diff --git a/Testing/script/regression.py b/Testing/script/regression.py index ce0e6ec651..7c1d68a845 100644 --- a/Testing/script/regression.py +++ b/Testing/script/regression.py @@ -9,6 +9,7 @@ import shutil import time import datetime import xml +import re from xml.dom.minidom import parse, parseString # @@ -253,7 +254,61 @@ class Database: for error in self.attribute_comparison: list.append(error) return ";".join(list) + + def generate_autopsy_artifacts(self): + if not self.autopsy_artifacts: + autopsy_db_file = os.path.join("./", case.output_dir, case.image_name, + "AutopsyTestCase", "autopsy.db") + autopsy_con = sqlite3.connect(autopsy_db_file) + autopsy_cur = autopsy_con.cursor() + for type_id in range(1, 13): + autopsy_cur.execute("SELECT COUNT(*) FROM blackboard_artifacts WHERE artifact_type_id=%d" % type_id) + self.autopsy_artifacts.append(autopsy_cur.fetchone()[0]) + + def generate_autopsy_attributes(self): + if self.autopsy_attributes == 0: + autopsy_db_file = os.path.join("./", case.output_dir, case.image_name, + "AutopsyTestCase", "autopsy.db") + autopsy_con = sqlite3.connect(autopsy_db_file) + autopsy_cur = autopsy_con.cursor() + autopsy_cur.execute("SELECT COUNT(*) FROM blackboard_attributes") + autopsy_attributes = autopsy_cur.fetchone()[0] + self.autopsy_attributes = autopsy_attributes + def generate_autopsy_objects(self): + if self.autopsy_objects == 0: + autopsy_db_file = os.path.join("./", case.output_dir, case.image_name, + "AutopsyTestCase", "autopsy.db") + autopsy_con = sqlite3.connect(autopsy_db_file) + autopsy_cur = autopsy_con.cursor() + autopsy_cur.execute("SELECT COUNT(*) FROM tsk_objects") + autopsy_objects = autopsy_cur.fetchone()[0] + self.autopsy_objects = autopsy_objects + + def generate_gold_artifacts(self): + if not self.gold_artifacts: + gold_db_file = os.path.join("./", case.gold, case.image_name, "standard.db") + gold_con = sqlite3.connect(gold_db_file) + gold_cur = gold_con.cursor() + for type_id in range(1, 13): + gold_cur.execute("SELECT COUNT(*) FROM blackboard_artifacts WHERE artifact_type_id=%d" % type_id) + self.gold_artifacts.append(gold_cur.fetchone()[0]) + + def generate_gold_attributes(self): + if self.gold_attributes == 0: + gold_db_file = os.path.join("./", case.gold, case.image_name, "standard.db") + gold_con = sqlite3.connect(gold_db_file) + gold_cur = gold_con.cursor() + gold_cur.execute("SELECT COUNT(*) FROM blackboard_attributes") + self.gold_attributes = gold_cur.fetchone()[0] + + def generate_gold_objects(self): + if self.gold_objects == 0: + gold_db_file = os.path.join("./", case.gold, case.image_name, "standard.db") + gold_con = sqlite3.connect(gold_db_file) + gold_cur = gold_con.cursor() + gold_cur.execute("SELECT COUNT(*) FROM tsk_objects") + self.gold_objects = gold_cur.fetchone()[0] #----------------------------------# @@ -262,7 +317,7 @@ class Database: # Iterates through an XML configuration file to find all given elements def run_config_test(config_file): - #try: + try: parsed = parse(config_file) if parsed.getElementsByTagName("indir"): case.input_dir = parsed.getElementsByTagName("indir")[0].getAttribute("value").encode() @@ -285,9 +340,9 @@ def run_config_test(config_file): else: printerror("Warning: Image file listed in the configuration does not exist:") printerror(value + "\n") - #except Exception as e: - #printerror("Error: There was an error running with the configuration file.") - #printerror(str(e) + "\n") + except Exception as e: + printerror("Error: There was an error running with the configuration file.") + printerror(str(e) + "\n") # Runs the test on the single given file. # The path must be guarenteed to be a correct path. @@ -312,8 +367,6 @@ def run_test(image_file): generate_common_log() try: fill_case_data() - except StringNotFoundException as e: - e.print_error() except Exception as e: printerror("Error: Unknown fatal error when filling case data.") printerror(str(e) + "\n") @@ -324,8 +377,8 @@ def run_test(image_file): # If NOT keeping Solr index (-k) if not args.keep: solr_index = make_local_path(case.output_dir, case.image_name, "AutopsyTestCase", "KeywordSearch") - clear_dir(solr_index) - print_report([], "DELETE SOLR INDEX", "Solr index deleted.") + if clear_dir(solr_index): + print_report([], "DELETE SOLR INDEX", "Solr index deleted.") elif args.keep: print_report([], "KEEP SOLR INDEX", "Solr index has been kept.") # If running in verbose mode (-v) @@ -335,7 +388,7 @@ def run_test(image_file): print_report(errors, "VERBOSE", okay) # If running in exception mode (-e) if args.exception: - exceptions = report_with(args.exception_string) + exceptions = search_logs(args.exception_string) okay = "No warnings or exceptions found containing text '" + args.exception_string + "'." print_report(exceptions, "EXCEPTION", okay) @@ -455,6 +508,24 @@ def compare_to_gold_db(): gold_db_file = os.path.join("./", case.gold, case.image_name, "standard.db") autopsy_db_file = os.path.join("./", case.output_dir, case.image_name, "AutopsyTestCase", "autopsy.db") + # Try to query the databases. Ignore any exceptions, the function will + # return an error later on if these do fail + database.clear() + try: + database.generate_gold_objects() + database.generate_gold_artifacts() + database.generate_gold_attributes() + except: + pass + try: + database.generate_autopsy_objects() + database.generate_autopsy_artifacts() + database.generate_autopsy_attributes() + except: + pass + # This is where we return if a file doesn't exist, because we don't want to + # compare faulty databases, but we do however want to try to run all queries + # regardless of the other database if not file_exists(autopsy_db_file): printerror("Error: Database file does not exist at:") printerror(autopsy_db_file + "\n") @@ -471,13 +542,12 @@ def compare_to_gold_db(): autopsy_cur = autopsy_con.cursor() exceptions = [] - database.clear() # Testing tsk_objects - exceptions.append(compare_tsk_objects(gold_cur, autopsy_cur)) + exceptions.append(compare_tsk_objects()) # Testing blackboard_artifacts - exceptions.append(compare_bb_artifacts(gold_cur, autopsy_cur)) + exceptions.append(compare_bb_artifacts()) # Testing blackboard_attributes - exceptions.append(compare_bb_attributes(gold_cur, autopsy_cur)) + exceptions.append(compare_bb_attributes()) database.artifact_comparison = exceptions[1] database.attribute_comparison = exceptions[2] @@ -516,61 +586,53 @@ def compare_to_gold_html(): except DirNotFoundException as e: e.print_error() except Exception as e: - printerror("Error: Unknown fatal error comparing databases.") + printerror("Error: Unknown fatal error comparing reports.") printerror(str(e) + "\n") # Compares the blackboard artifact counts of two databases # given the two database cursors -def compare_bb_artifacts(gold_cur, autopsy_cur): +def compare_bb_artifacts(): exceptions = [] - for type_id in range(1, 13): - gold_cur.execute("SELECT COUNT(*) FROM blackboard_artifacts WHERE artifact_type_id=%d" % type_id) - gold_artifacts = gold_cur.fetchone()[0] - autopsy_cur.execute("SELECT COUNT(*) FROM blackboard_artifacts WHERE artifact_type_id=%d" % type_id) - autopsy_artifacts = autopsy_cur.fetchone()[0] - # Append the results to the global database - database.gold_artifacts.append(gold_artifacts) - database.autopsy_artifacts.append(autopsy_artifacts) - # Report every discrepancy - if gold_artifacts != autopsy_artifacts: - error = str("Artifact counts do not match for type id %d. " % type_id) - error += str("Gold: %d, Test: %d" % (gold_artifacts, autopsy_artifacts)) - exceptions.append(error) - return exceptions + try: + for type_id in range(1, 13): + if database.gold_artifacts != database.autopsy_artifacts: + error = str("Artifact counts do not match for type id %d. " % type_id) + error += str("Gold: %d, Test: %d" % + (database.gold_artifacts[type_id], + database.autopsy_artifacts[type_id])) + exceptions.append(error) + return exceptions + except Exception as e: + exceptions.append("Error: Unable to compare blackboard_artifacts.\n") + return exceptions # Compares the blackboard atribute counts of two databases # given the two database cursors -def compare_bb_attributes(gold_cur, autopsy_cur): +def compare_bb_attributes(): exceptions = [] - gold_cur.execute("SELECT COUNT(*) FROM blackboard_attributes") - gold_attributes = gold_cur.fetchone()[0] - autopsy_cur.execute("SELECT COUNT(*) FROM blackboard_attributes") - autopsy_attributes = autopsy_cur.fetchone()[0] - # Give the database the attributes - database.gold_attributes = gold_attributes - database.autopsy_attributes = autopsy_attributes - if gold_attributes != autopsy_attributes: - error = "Attribute counts do not match. " - error += str("Gold: %d, Test: %d" % (gold_attributes, autopsy_attributes)) - exceptions.append(error) - return exceptions + try: + if database.gold_attributes != database.autopsy_attributes: + error = "Attribute counts do not match. " + error += str("Gold: %d, Test: %d" % (database.gold_attributes, database.autopsy_attributes)) + exceptions.append(error) + return exceptions + except Exception as e: + exceptions.append("Error: Unable to compare blackboard_attributes.\n") + return exceptions # Compares the tsk object counts of two databases # given the two database cursors -def compare_tsk_objects(gold_cur, autopsy_cur): +def compare_tsk_objects(): exceptions = [] - gold_cur.execute("SELECT COUNT(*) FROM tsk_objects") - gold_objects = gold_cur.fetchone()[0] - autopsy_cur.execute("SELECT COUNT(*) FROM tsk_objects") - autopsy_objects = autopsy_cur.fetchone()[0] - # Give the database the attributes - database.gold_objects = gold_objects - database.autopsy_objects = autopsy_objects - if gold_objects != autopsy_objects: - error = "TSK Object counts do not match. " - error += str("Gold: %d, Test: %d" % (gold_objects, autopsy_objects)) - exceptions.append(error) - return exceptions + try: + if database.gold_objects != database.autopsy_objects: + error = "TSK Object counts do not match. " + error += str("Gold: %d, Test: %d" % (database.gold_objects, database.autopsy_objects)) + exceptions.append(error) + return exceptions + except Exception as e: + exceptions.append("Error: Unable to compare tsk_objects.\n") + return exceptions @@ -581,22 +643,28 @@ def compare_tsk_objects(gold_cur, autopsy_cur): # Generate the "common log": a log of all exceptions and warnings # from each log file generated by Autopsy def generate_common_log(): - logs_path = make_local_path(case.output_dir, case.image_name, "logs") - common_log = open(case.common_log, "a") - common_log.write("--------------------------------------------------\n") - common_log.write(case.image_name + "\n") - common_log.write("--------------------------------------------------\n") - for file in os.listdir(logs_path): - log = open(make_path(logs_path, file), "r") - lines = log.readlines() - for line in lines: - if ("exception" in line) or ("EXCEPTION" in line) or ("Exception" in line): - common_log.write("From " + log.name[log.name.rfind("/")+1:] +":\n" + line + "\n") - if ("warning" in line) or ("WARNING" in line) or ("Warning" in line): - common_log.write("From " + log.name[log.name.rfind("/")+1:] +":\n" + line + "\n") - log.close() - common_log.write("\n\n") - common_log.close() + try: + logs_path = make_local_path(case.output_dir, case.image_name, "logs") + common_log = open(case.common_log, "a") + common_log.write("--------------------------------------------------\n") + common_log.write(case.image_name + "\n") + common_log.write("--------------------------------------------------\n") + for file in os.listdir(logs_path): + log = open(make_path(logs_path, file), "r") + lines = log.readlines() + for line in lines: + if "exception" in line.lower(): + common_log.write("From " + log.name[log.name.rfind("/")+1:] +":\n" + line + "\n") + if "warning" in line.lower(): + common_log.write("From " + log.name[log.name.rfind("/")+1:] +":\n" + line + "\n") + if "error" in line.lower(): + common_log.write("From " + log.name[log.name.rfind("/")+1:] +":\n" + line + "\n") + log.close() + common_log.write("\n\n") + common_log.close() + except Exception as e: + printerror("Error: Unable to generate the common log.") + printerror(str(e)) # Fill in the global case's variables that require the log files def fill_case_data(): @@ -668,6 +736,8 @@ def generate_csv(csv_path): total_ingest_time = case.total_ingest_time service_times = case.service_times exceptions_count = str(len(get_exceptions())) + mem_exceptions_count = str(len(search_common_log("OutOfMemoryException")) + + len(search_common_log("OutOfMemoryError"))) tsk_objects_count = str(database.autopsy_objects) artifacts_count = str(database.get_artifacts_count()) attributes_count = str(database.autopsy_attributes) @@ -680,7 +750,7 @@ def generate_csv(csv_path): # Make a list with all the strings in it seq = (image_path, name, path, autopsy_version, heap_space, start_date, end_date, total_test_time, - total_ingest_time, service_times, exceptions_count, tsk_objects_count, artifacts_count, + total_ingest_time, service_times, exceptions_count, mem_exceptions_count, tsk_objects_count, artifacts_count, attributes_count, gold_db_name, artifact_comparison, attribute_comparison, gold_report_name, report_comparison, ant) # Join it together with a ", " @@ -689,10 +759,6 @@ def generate_csv(csv_path): # Write to the log! csv.write(output) csv.close() - except StringNotFoundException as e: - e.print_error() - printerror("Error: CSV file was not created at:") - printerror(csv_path + "\n") except Exception as e: printerror("Error: Unknown fatal error when creating CSV file at:") printerror(csv_path) @@ -703,7 +769,8 @@ def csv_header(csv_path): csv = open(csv_path, "w") seq = ("Image Path", "Image Name", "Output Case Directory", "Autopsy Version", "Heap Space Setting", "Test Start Date", "Test End Date", "Total Test Time", - "Total Ingest Time", "Service Times", "Exceptions Count", "TSK Objects Count", "Artifacts Count", + "Total Ingest Time", "Service Times", "Exceptions Count", "OutOfMemoryExceptions", + "TSK Objects Count", "Artifacts Count", "Attributes Count", "Gold Database Name", "Artifacts Comparison", "Attributes Comparison", "Gold Report Name", "Report Comparison", "Ant Command Line") output = "|".join(seq) @@ -713,56 +780,50 @@ def csv_header(csv_path): # Returns a list of all the exceptions listed in the common log def get_exceptions(): - try: - exceptions = [] - common_log = open(case.common_log, "r") - lines = common_log.readlines() - for line in lines: - if ("exception" in line) or ("EXCEPTION" in line) or ("Exception" in line): - exceptions.append(line) - common_log.close() - return exceptions - except: - raise FileNotFoundExeption(case.common_log) + exceptions = [] + common_log = open(case.common_log, "r") + lines = common_log.readlines() + ex = re.compile("\SException") + er = re.compile("\SError") + for line in lines: + if ex.search(line) or er.search(line): + exceptions.append(line) + common_log.close() + return exceptions # Returns a list of all the warnings listed in the common log def get_warnings(): - try: - warnings = [] - common_log = open(case.common_log, "r") - lines = common_log.readlines() - for line in lines: - if ("warning" in line) or ("WARNING" in line) or ("Warning" in line): - warnings.append(line) - common_log.close() - return warnings - except: - raise FileNotFoundExeption(case.common_log) + warnings = [] + common_log = open(case.common_log, "r") + lines = common_log.readlines() + for line in lines: + if "warning" in line.lower(): + warnings.append(line) + common_log.close() + return warnings # Returns all the errors found in the common log in a list def report_all_errors(): try: return get_warnings() + get_exceptions() - except FileNotFoundException as e: - e.print_error() except Exception as e: printerror("Error: Unknown fatal error when reporting all errors.") printerror(str(e) + "\n") -# Returns any lines from the common log with the given string in them (case sensitive) -def report_with(exception): - try: - exceptions = [] - common_log = open(case.common_log, "r") - lines = common_log.readlines() +# Searched all the known logs for the given regex +# The function expects regex = re.compile(...) +def regex_search_logs(regex): + logs_path = make_local_path(case.output_dir, case.image_name, "logs") + results = [] + for file in os.listdir(logs_path): + log = open(make_path(logs_path, file), "r") + lines = log.readlines() for line in lines: - if exception in line: - exceptions.append(line) - common_log.close() - return exceptions - except: - printerror("Error: Cannot find the common log at:") - printerror(case.common_log + "\n") + if regex.search(line): + results.append(line) + log.close() + if results: + return results # Search through all the known log files for a specific string. # Returns a list of all lines with that string @@ -776,11 +837,18 @@ def search_logs(string): if string in line: results.append(line) log.close() - if results: - return results - raise StringNotFoundException(string) -# Search through all the known log files for a specific string. -# Returns a list of all lines with that string + return results + +# Searches the common log for any instances of a specific string. +def search_common_log(string): + results = [] + log = open(case.common_log, "r") + lines = log.readlines() + for line in lines: + if string in line: + results.append(line) + log.close() + return results # Searches the given log for the given string # Returns a list of all lines with that string @@ -796,7 +864,6 @@ def search_log(log, string): log.close() if results: return results - raise StringNotFoundException(string) except: raise FileNotFoundException(logs_path) @@ -829,82 +896,90 @@ def generate_html(): # this test, so we need to make the start of the html log if not file_exists(case.html_log): write_html_head() - html = open(case.html_log, "a") - # The image title - title = "
" + error.replace("<", "<").replace(">", ">") + "
" - # If there is a \n, we probably want aImage Path: | " - info += "" + case.image_file + " |
Image Name: | " - info += "" + case.image_name + " |
Case Output Directory: | " - info += "" + case.output_dir + " |
Autopsy Version: | " - info += "" + case.autopsy_version + " |
Heap Space: | " - info += "" + case.heap_space + " |
Test Start Date: | " - info += "" + case.start_date + " |
Test End Date: | " - info += "" + case.end_date + " |
Total Test Time: | " - info += "" + case.total_test_time + " |
Total Ingest Time: | " - info += "" + case.total_ingest_time + " |
Exceptions Count: | " - info += "" + str(len(get_exceptions())) + " |
TSK Objects Count: | " - info += "" + str(database.autopsy_objects) + " |
Artifacts Count: | " - info += "" + str(database.get_artifacts_count()) + " |
Attributes Count: | " - info += "" + str(database.autopsy_attributes) + " |
" + out + "
" - # If there was a \n it probably means we want a