From a6128d406e0a08f8b3738b6871819286a7e88ce1 Mon Sep 17 00:00:00 2001 From: Sean-M Date: Wed, 8 May 2013 16:22:23 -0400 Subject: [PATCH 1/7] database diff for regression.py --- test/script/regression.py | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/test/script/regression.py b/test/script/regression.py index 7b4d277754..4b45a6d287 100644 --- a/test/script/regression.py +++ b/test/script/regression.py @@ -328,12 +328,25 @@ class Database: for type_id in range(1, length): autopsy_cur.execute("SELECT COUNT(*) FROM blackboard_artifacts WHERE artifact_type_id=%d" % type_id) self.autopsy_artifacts.append(autopsy_cur.fetchone()[0]) - autopsy_cur.execute("SELECT blackboard_artifact_types.display_name FROM blackboard_artifact_types INNER JOIN blackboard_artifacts ON blackboard_artifact_types.artifact_type_id = blackboard_artifacts.artifact_type_id INNER JOIN tsk_objects ON tsk_objects.obj_id = blackboard_artifacts.obj_id INNER JOIN blackboard_attributes ON blackboard_attributes.artifact_id = blackboard_artifacts.artifact_id") - self.autopsy_artifacts_list = [] - for row in autopsy_cur.fetchall(): - for item in row: - self.autopsy_artifacts_list.append(item) - + autopsy_cur2 = autopsy_con.cursor() + autopsy_cur2.execute("SELECT tsk_files.parent_path, tsk_files.name, blackboard_artifact_types.display_name, blackboard_artifacts.artifact_id FROM blackboard_artifact_types INNER JOIN blackboard_artifacts ON blackboard_artifact_types.artifact_type_id = blackboard_artifacts.artifact_type_id INNER JOIN tsk_objects ON tsk_objects.obj_id = blackboard_artifacts.obj_id INNER JOIN tsk_files ON tsk_files.obj_id = tsk_objects.obj_id") + self.databaselist = [] + rw = autopsy_cur2.fetchone() + print(rw) + while (rw != None): + autopsy_cur1 = autopsy_con.cursor() + autopsy_cur1.execute("SELECT * FROM blackboard_attributes WHERE artifact_id = " + str(rw[3])) + addstrng = rw[0] + rw[1] + ' ' + attributes = autopsy_cur1.fetchall() + attributes.sort() + for attr in attributes: + val = 6 + attr[4] + addstrng += '< type = "' + attrs[1] + '" value = "' + attrs[val] + '" />' + addstrng += '' + print(addstrng) + self.databaselist.append(addstrng) + rw = autopsy_cur2.fetchone() + #print(self.databaselist) def generate_autopsy_attributes(self): if self.autopsy_attributes == 0: @@ -718,14 +731,14 @@ def compare_to_gold_db(): database.generate_gold_objects() database.generate_gold_artifacts() database.generate_gold_attributes() - except: - pass + except Exception as e: + print(str(e)) try: database.generate_autopsy_objects() database.generate_autopsy_artifacts() database.generate_autopsy_attributes() - except: - pass + except Exception as e: + print(str(e)) # This is where we return if a file doesn't exist, because we don't want to # compare faulty databases, but we do however want to try to run all queries # regardless of the other database From aa06dc4154e05e371709f47fa866c2e8512f5b7a Mon Sep 17 00:00:00 2001 From: Sean-M Date: Thu, 9 May 2013 16:44:28 -0400 Subject: [PATCH 2/7] Continued testing with database diff --- test/script/regression.py | 41 +++++++++++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 10 deletions(-) diff --git a/test/script/regression.py b/test/script/regression.py index 4b45a6d287..75142eb2e2 100644 --- a/test/script/regression.py +++ b/test/script/regression.py @@ -193,6 +193,7 @@ class TestAutopsy: self.ingest_messages = 0 self.indexed_files = 0 self.indexed_chunks = 0 + self.autopsy_data_file = "" # Infinite Testing info timer = 0 @@ -330,22 +331,41 @@ class Database: self.autopsy_artifacts.append(autopsy_cur.fetchone()[0]) autopsy_cur2 = autopsy_con.cursor() autopsy_cur2.execute("SELECT tsk_files.parent_path, tsk_files.name, blackboard_artifact_types.display_name, blackboard_artifacts.artifact_id FROM blackboard_artifact_types INNER JOIN blackboard_artifacts ON blackboard_artifact_types.artifact_type_id = blackboard_artifacts.artifact_type_id INNER JOIN tsk_objects ON tsk_objects.obj_id = blackboard_artifacts.obj_id INNER JOIN tsk_files ON tsk_files.obj_id = tsk_objects.obj_id") - self.databaselist = [] + database_log = codecs.open(case.autopsy_data_file, "w", "utf_8") rw = autopsy_cur2.fetchone() print(rw) while (rw != None): autopsy_cur1 = autopsy_con.cursor() - autopsy_cur1.execute("SELECT * FROM blackboard_attributes WHERE artifact_id = " + str(rw[3])) - addstrng = rw[0] + rw[1] + ' ' + autopsy_cur1.execute("SELECT blackboard_attributes.source, blackboard_attribute_types.display_name, blackboard_attributes.value_type, blackboard_attributes.value_text, blackboard_attributes.value_int32, blackboard_attributes.value_int64, blackboard_attributes.value_double FROM blackboard_attributes INNER JOIN blackboard_attribute_types ON blackboard_attributes.attribute_type_id = blackboard_attribute_types.attribute_type_id WHERE artifact_id = " + str(rw[3])) + database_log.write(rw[0] + rw[1] + ' ') attributes = autopsy_cur1.fetchall() attributes.sort() - for attr in attributes: - val = 6 + attr[4] - addstrng += '< type = "' + attrs[1] + '" value = "' + attrs[val] + '" />' - addstrng += '' - print(addstrng) - self.databaselist.append(addstrng) - rw = autopsy_cur2.fetchone() + print(attributes) + try: + for attr in attributes: + print(attr) + val = 3 + attr[2] + numvals = 0 + for x in range(3, 6): + if(attr[x] != None): + numvals += 1 + if(numvals > 1): + global failedbool + global errorem + global attachl + errorem += "There were too many values for attribute type: " + attr[1] + "for artifact with id #" + str(rw[3]) + ".\n" + failedbool = True + attachl.append(autopsy_db_file) + database_log.write('< type = "' + attr[1] + '" value = "') + inpval = attr[val] + if((type(inpval) != 'unicode') or (type(inpval) != 'str')): + inpval = str(inpval) + database_log.write(inpval) + database_log.write('" />') + database_log.write(' \n') + rw = autopsy_cur2.fetchone() + except Exception as e: + print(str(e)) #print(self.databaselist) def generate_autopsy_attributes(self): @@ -504,6 +524,7 @@ def run_test(image_file, count): # Set the case to work for this test case.image_file = image_file case.image_name = case.get_image_name(image_file) + "(" + str(count) + ")" + case.autopsy_data_file = Emailer.make_path(case.output_dir, case.image_name, "Autopsy_data.txt") case.image = case.get_image_name(image_file) case.common_log_path = Emailer.make_local_path(case.output_dir, case.image_name, case.image_name+case.common_log) case.warning_log = Emailer.make_local_path(case.output_dir, case.image_name, "AutopsyLogs.txt") From 361dc0df0b07e06ac775b8efd221ddf562158f71 Mon Sep 17 00:00:00 2001 From: Sean-M Date: Fri, 10 May 2013 16:01:54 -0400 Subject: [PATCH 3/7] Continued Database diff implementation --- test/script/regression.py | 80 +++++++++++++++++++++++++++------------ 1 file changed, 55 insertions(+), 25 deletions(-) diff --git a/test/script/regression.py b/test/script/regression.py index 75142eb2e2..792a15710e 100644 --- a/test/script/regression.py +++ b/test/script/regression.py @@ -329,21 +329,42 @@ class Database: for type_id in range(1, length): autopsy_cur.execute("SELECT COUNT(*) FROM blackboard_artifacts WHERE artifact_type_id=%d" % type_id) self.autopsy_artifacts.append(autopsy_cur.fetchone()[0]) - autopsy_cur2 = autopsy_con.cursor() - autopsy_cur2.execute("SELECT tsk_files.parent_path, tsk_files.name, blackboard_artifact_types.display_name, blackboard_artifacts.artifact_id FROM blackboard_artifact_types INNER JOIN blackboard_artifacts ON blackboard_artifact_types.artifact_type_id = blackboard_artifacts.artifact_type_id INNER JOIN tsk_objects ON tsk_objects.obj_id = blackboard_artifacts.obj_id INNER JOIN tsk_files ON tsk_files.obj_id = tsk_objects.obj_id") - database_log = codecs.open(case.autopsy_data_file, "w", "utf_8") - rw = autopsy_cur2.fetchone() - print(rw) + self.retrieve_data(case.autopsy_data_file, autopsy_con) + + #print(self.databaselist) + + def retrieve_data(self, data_file, autopsy_con): + autopsy_con.text_factory = lambda x: unicode(x, "utf-8", "ignore") + autopsy_cur2 = autopsy_con.cursor() + autopsy_cur2.execute("SELECT tsk_files.parent_path, tsk_files.name, blackboard_artifact_types.display_name, blackboard_artifacts.artifact_id FROM blackboard_artifact_types INNER JOIN blackboard_artifacts ON blackboard_artifact_types.artifact_type_id = blackboard_artifacts.artifact_type_id INNER JOIN tsk_objects ON tsk_objects.obj_id = blackboard_artifacts.obj_id INNER JOIN tsk_files ON tsk_files.obj_id = tsk_objects.obj_id") + database_log = codecs.open(data_file, "w", "utf_8") + print(database_log) + rw = autopsy_cur2.fetchone() + appnd = False + try: while (rw != None): + if(rw[0] != None): + database_log.write(rw[0] + rw[1] + ' ') + else: + database_log.write(rw[1] + ' ') autopsy_cur1 = autopsy_con.cursor() - autopsy_cur1.execute("SELECT blackboard_attributes.source, blackboard_attribute_types.display_name, blackboard_attributes.value_type, blackboard_attributes.value_text, blackboard_attributes.value_int32, blackboard_attributes.value_int64, blackboard_attributes.value_double FROM blackboard_attributes INNER JOIN blackboard_attribute_types ON blackboard_attributes.attribute_type_id = blackboard_attribute_types.attribute_type_id WHERE artifact_id = " + str(rw[3])) - database_log.write(rw[0] + rw[1] + ' ') - attributes = autopsy_cur1.fetchall() - attributes.sort() - print(attributes) + looptry = True try: + key = "" + for num in str(rw[3]): + key += num + key = key, + autopsy_cur1.execute("SELECT blackboard_attributes.source, blackboard_attribute_types.display_name, blackboard_attributes.value_type, blackboard_attributes.value_text, blackboard_attributes.value_int32, blackboard_attributes.value_int64, blackboard_attributes.value_double FROM blackboard_attributes INNER JOIN blackboard_attribute_types ON blackboard_attributes.attribute_type_id = blackboard_attribute_types.attribute_type_id WHERE artifact_id =?", key) + attributes = autopsy_cur1.fetchall() + attributes.sort() + except Exception as e: + print(str(e)) + print(str(rw[3])) + looptry = False + print('hello') + pass + if(looptry == True): for attr in attributes: - print(attr) val = 3 + attr[2] numvals = 0 for x in range(3, 6): @@ -354,20 +375,29 @@ class Database: global errorem global attachl errorem += "There were too many values for attribute type: " + attr[1] + "for artifact with id #" + str(rw[3]) + ".\n" + printerror("There were too many values for attribute type: " + attr[1] + "for artifact with id #" + str(rw[3]) + ".") failedbool = True - attachl.append(autopsy_db_file) - database_log.write('< type = "' + attr[1] + '" value = "') - inpval = attr[val] - if((type(inpval) != 'unicode') or (type(inpval) != 'str')): - inpval = str(inpval) - database_log.write(inpval) - database_log.write('" />') - database_log.write(' \n') - rw = autopsy_cur2.fetchone() - except Exception as e: - print(str(e)) - #print(self.databaselist) - + if(not appnd): + attachl.append(autopsy_db_file) + appnd = True + try: + database_log.write('< attribute type = "' + attr[1] + '" value = "') + inpval = attr[val] + if((type(inpval) != 'unicode') or (type(inpval) != 'str')): + inpval = str(inpval) + database_log.write(inpval) + except Exception as e: + print(str(e)) + print(attr[val]) + database_log.write('" />') + database_log.write(' \n') + rw = autopsy_cur2.fetchone() + except Exception as e: + print('outer exception: ' + str(e)) + print(rw[0]) + print(rw[1]) + print(rw[2]) + def generate_autopsy_attributes(self): if self.autopsy_attributes == 0: autopsy_db_file = Emailer.make_path(case.output_dir, case.image_name, @@ -981,7 +1011,7 @@ def compare_errors(): common_dat = common_log.read() patrn = re.compile("\d") if (not((re.sub(patrn, 'd', gold_dat)) == (re.sub(patrn, 'd', common_dat)))): - diff_dir = Emailer.make_local_path(case.output_dir, case.image_name, case.image_name+"_AutopsyErrors-Diff.txt") + diff_dir = Emailer.make_local_path(case.output_dir, case.image_name, case.image_name+"AutopsyErrors-Diff.txt") diff_file = open(diff_dir, "w") dffcmdlst = ["diff", case.sorted_log, gold_dir] subprocess.call(dffcmdlst, stdout = diff_file) From 6d91166dc68d4bd3e1a21a56f3e21255b4b12f47 Mon Sep 17 00:00:00 2001 From: Sean-M Date: Mon, 13 May 2013 17:16:04 -0400 Subject: [PATCH 4/7] Updated regression.py --- test/script/regression.py | 83 ++++++++++++++++++++++++++++++--------- 1 file changed, 65 insertions(+), 18 deletions(-) diff --git a/test/script/regression.py b/test/script/regression.py index 792a15710e..1faffc8673 100644 --- a/test/script/regression.py +++ b/test/script/regression.py @@ -194,6 +194,7 @@ class TestAutopsy: self.indexed_files = 0 self.indexed_chunks = 0 self.autopsy_data_file = "" + self.sorted_data_file = "" # Infinite Testing info timer = 0 @@ -329,24 +330,26 @@ class Database: for type_id in range(1, length): autopsy_cur.execute("SELECT COUNT(*) FROM blackboard_artifacts WHERE artifact_type_id=%d" % type_id) self.autopsy_artifacts.append(autopsy_cur.fetchone()[0]) + self.retrieve_data(case.autopsy_data_file, autopsy_con) - + srtcmdlst = ["sort", case.autopsy_data_file, "-o", case.sorted_data_file] + subprocess.call(srtcmdlst) #print(self.databaselist) - + def retrieve_data(self, data_file, autopsy_con): autopsy_con.text_factory = lambda x: unicode(x, "utf-8", "ignore") autopsy_cur2 = autopsy_con.cursor() - autopsy_cur2.execute("SELECT tsk_files.parent_path, tsk_files.name, blackboard_artifact_types.display_name, blackboard_artifacts.artifact_id FROM blackboard_artifact_types INNER JOIN blackboard_artifacts ON blackboard_artifact_types.artifact_type_id = blackboard_artifacts.artifact_type_id INNER JOIN tsk_objects ON tsk_objects.obj_id = blackboard_artifacts.obj_id INNER JOIN tsk_files ON tsk_files.obj_id = tsk_objects.obj_id") - database_log = codecs.open(data_file, "w", "utf_8") - print(database_log) + autopsy_cur2.execute("SELECT tsk_files.parent_path, tsk_files.name, blackboard_artifact_types.display_name, blackboard_artifacts.artifact_id FROM blackboard_artifact_types INNER JOIN blackboard_artifacts ON blackboard_artifact_types.artifact_type_id = blackboard_artifacts.artifact_type_id INNER JOIN tsk_files ON tsk_files.obj_id = blackboard_artifacts.obj_id") + database_log = codecs.open(data_file, "wb", "utf_8") rw = autopsy_cur2.fetchone() appnd = False + counter = 0 try: while (rw != None): if(rw[0] != None): - database_log.write(rw[0] + rw[1] + ' ') + database_log.write(rw[0] + rw[1] + ' ') else: - database_log.write(rw[1] + ' ') + database_log.write(rw[1] + ' ') autopsy_cur1 = autopsy_con.cursor() looptry = True try: @@ -354,9 +357,9 @@ class Database: for num in str(rw[3]): key += num key = key, - autopsy_cur1.execute("SELECT blackboard_attributes.source, blackboard_attribute_types.display_name, blackboard_attributes.value_type, blackboard_attributes.value_text, blackboard_attributes.value_int32, blackboard_attributes.value_int64, blackboard_attributes.value_double FROM blackboard_attributes INNER JOIN blackboard_attribute_types ON blackboard_attributes.attribute_type_id = blackboard_attribute_types.attribute_type_id WHERE artifact_id =?", key) + autopsy_cur1.execute("SELECT blackboard_attributes.source, blackboard_attribute_types.display_name, blackboard_attributes.value_type, blackboard_attributes.value_text, blackboard_attributes.value_int32, blackboard_attributes.value_int64, blackboard_attributes.value_double FROM blackboard_attributes INNER JOIN blackboard_attribute_types ON blackboard_attributes.attribute_type_id = blackboard_attribute_types.attribute_type_id WHERE artifact_id =? ORDER BY blackboard_attributes.source, blackboard_attribute_types.display_name, blackboard_attributes.value_type, blackboard_attributes.value_text, blackboard_attributes.value_int32, blackboard_attributes.value_int64, blackboard_attributes.value_double", key) attributes = autopsy_cur1.fetchall() - attributes.sort() + print(attributes) except Exception as e: print(str(e)) print(str(rw[3])) @@ -364,6 +367,7 @@ class Database: print('hello') pass if(looptry == True): + src = attributes[0][0] for attr in attributes: val = 3 + attr[2] numvals = 0 @@ -374,22 +378,36 @@ class Database: global failedbool global errorem global attachl - errorem += "There were too many values for attribute type: " + attr[1] + "for artifact with id #" + str(rw[3]) + ".\n" - printerror("There were too many values for attribute type: " + attr[1] + "for artifact with id #" + str(rw[3]) + ".") + errorem += "There were too many values for attribute type: " + attr[1] + " for artifact with id #" + str(rw[3]) + " for image " + case.image_name + ".\n" + printerror("There were too many values for attribute type: " + attr[1] + " for artifact with id #" + str(rw[3]) + " for image " + case.image_name + ".") + failedbool = True + if(not appnd): + attachl.append(autopsy_db_file) + appnd = True + if(not attr[0] == src): + global failedbool + global errorem + global attachl + errorem += "There were inconsistents sources for artifact with id #" + str(rw[3]) + " for image " + case.image_name + ".\n" + printerror("There were inconsistents sources for artifact with id #" + str(rw[3]) + " for image " + case.image_name + ".") failedbool = True if(not appnd): attachl.append(autopsy_db_file) appnd = True try: - database_log.write('< attribute type = "' + attr[1] + '" value = "') + database_log.write('') + for outp in inpval: + try: + database_log.write(outp) + except Exception as e: + print("Inner exception" + outp) + database_log.write("?") + except: + pass + database_log.write('" />') database_log.write(' \n') rw = autopsy_cur2.fetchone() except Exception as e: @@ -554,7 +572,8 @@ def run_test(image_file, count): # Set the case to work for this test case.image_file = image_file case.image_name = case.get_image_name(image_file) + "(" + str(count) + ")" - case.autopsy_data_file = Emailer.make_path(case.output_dir, case.image_name, "Autopsy_data.txt") + case.autopsy_data_file = Emailer.make_path(case.output_dir, case.image_name, case.image_name + "Autopsy_data.txt") + case.sorted_data_file = Emailer.make_path(case.output_dir, case.image_name, "Sorted_Autopsy_data.txt") case.image = case.get_image_name(image_file) case.common_log_path = Emailer.make_local_path(case.output_dir, case.image_name, case.image_name+case.common_log) case.warning_log = Emailer.make_local_path(case.output_dir, case.image_name, "AutopsyLogs.txt") @@ -625,6 +644,7 @@ def run_test(image_file, count): compare_to_gold_db() compare_to_gold_html() compare_errors() + compare_data() del_dir(img_gold) except Exception as e: print("Tests failed due to an error, try rebuilding or creating gold standards.\n") @@ -717,6 +737,7 @@ def rebuild(): tmpdir = Emailer.make_path(gold_dir, case.image_name) dbinpth = Emailer.make_path(case.output_dir, case.image_name, "AutopsyTestCase", "autopsy.db") dboutpth = Emailer.make_path(tmpdir, "autopsy.db") + dataoutpth = Emailer.make_path(tmpdir, case.image_name + "SortedData.txt") if not os.path.exists(case.gold_parse): os.makedirs(case.gold_parse) if not os.path.exists(gold_dir): @@ -724,6 +745,7 @@ def rebuild(): if not os.path.exists(tmpdir): os.makedirs(tmpdir) copy_file(dbinpth, dboutpth) + copy_file(case.sorted_data_file, dataoutpth) error_pth = Emailer.make_path(tmpdir, case.image_name+"SortedErrors.txt") copy_file(case.sorted_log, error_pth) # Rebuild the HTML report @@ -1001,6 +1023,31 @@ def generate_common_log(): printerror(str(e) + "\n") logging.critical(traceback.format_exc()) +def compare_data(): + gold_dir = Emailer.make_path(case.gold, case.image_name, case.image_name + "SortedData.txt") + if(not file_exists(gold_dir)): + gold_dir = Emailer.make_path(case.gold_parse, case.image_name, case.image_name + "SortedData.txt") + srtd_data = codecs.open(case.sorted_data_file, "r", "utf_8") + gold_data = codecs.open(gold_dir, "r", "utf_8") + gold_dat = gold_data.read() + srtd_dat = common_log.read() + patrn = re.compile("\d") + if (not(gold_dat == common_dat)): + diff_dir = Emailer.make_local_path(case.output_dir, case.image_name, case.image_name+"AutopsyErrors-Diff.txt") + diff_file = codecs.open(diff_dir, "wb", "utf_8") + dffcmdlst = ["diff", case.sorted_data_file, gold_dir] + subprocess.call(dffcmdlst, stdout = diff_file) + global attachl + global errorem + global failedbool + attachl.append(case.sorted_data_file) + attachl.append(diff_dir) + errorem += "There was a difference in the Database data for " + case.image_name + ".\n" + print("Databases didn't match.\n") + failedbool = True + global imgfail + imgfail = True + def compare_errors(): gold_dir = Emailer.make_path(case.gold, case.image_name, case.image_name + "SortedErrors.txt") if(not file_exists(gold_dir)): From ac2ac5e021439b8796938f16bcec72b42919eca3 Mon Sep 17 00:00:00 2001 From: Sean-M Date: Tue, 14 May 2013 12:36:22 -0400 Subject: [PATCH 5/7] Added db dump to regression.py --- test/script/regression.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/test/script/regression.py b/test/script/regression.py index 1faffc8673..ecdc85a040 100644 --- a/test/script/regression.py +++ b/test/script/regression.py @@ -354,8 +354,7 @@ class Database: looptry = True try: key = "" - for num in str(rw[3]): - key += num + key = str(rw[3]) key = key, autopsy_cur1.execute("SELECT blackboard_attributes.source, blackboard_attribute_types.display_name, blackboard_attributes.value_type, blackboard_attributes.value_text, blackboard_attributes.value_int32, blackboard_attributes.value_int64, blackboard_attributes.value_double FROM blackboard_attributes INNER JOIN blackboard_attribute_types ON blackboard_attributes.attribute_type_id = blackboard_attribute_types.attribute_type_id WHERE artifact_id =? ORDER BY blackboard_attributes.source, blackboard_attribute_types.display_name, blackboard_attributes.value_type, blackboard_attributes.value_text, blackboard_attributes.value_int32, blackboard_attributes.value_int64, blackboard_attributes.value_double", key) attributes = autopsy_cur1.fetchall() @@ -415,6 +414,18 @@ class Database: print(rw[0]) print(rw[1]) print(rw[2]) + print(rw[3]) + + def dbDump(self): + autopsy_db_file = Emailer.make_path(case.output_dir, case.image_name, + "AutopsyTestCase", "autopsy.db") + autopsy_con = sqlite3.connect(autopsy_db_file) + dump_file = Emailer.make_path(case.output_dir, case.image_name, "Dump.txt") + database_log = codecs.open(dump_file, "wb", "utf_8") + for line in autopsy_con.iterdump(): + database_log.write(line + "\n") + + def generate_autopsy_attributes(self): if self.autopsy_attributes == 0: @@ -809,6 +820,7 @@ def compare_to_gold_db(): try: database.generate_autopsy_objects() database.generate_autopsy_artifacts() + database.dbDump() database.generate_autopsy_attributes() except Exception as e: print(str(e)) @@ -1030,9 +1042,9 @@ def compare_data(): srtd_data = codecs.open(case.sorted_data_file, "r", "utf_8") gold_data = codecs.open(gold_dir, "r", "utf_8") gold_dat = gold_data.read() - srtd_dat = common_log.read() + srtd_dat = srtd_data.read() patrn = re.compile("\d") - if (not(gold_dat == common_dat)): + if (not(gold_dat == srtd_dat)): diff_dir = Emailer.make_local_path(case.output_dir, case.image_name, case.image_name+"AutopsyErrors-Diff.txt") diff_file = codecs.open(diff_dir, "wb", "utf_8") dffcmdlst = ["diff", case.sorted_data_file, gold_dir] From 58a2995bd6ae8e2c9cd3464691b0fc6dd2b7d3a1 Mon Sep 17 00:00:00 2001 From: Sean-M Date: Thu, 16 May 2013 16:11:27 -0400 Subject: [PATCH 6/7] updated regression.py --- test/script/regression.py | 330 ++++++++++++++++++++------------------ 1 file changed, 172 insertions(+), 158 deletions(-) diff --git a/test/script/regression.py b/test/script/regression.py index ecdc85a040..6b4a87c4d2 100644 --- a/test/script/regression.py +++ b/test/script/regression.py @@ -20,9 +20,6 @@ import smtplib from email.mime.image import MIMEImage from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText -from email.MIMEBase import MIMEBase -from email import Encoders -import urllib2 import re import zipfile import zlib @@ -77,7 +74,7 @@ class Args: def parse(self): global nxtproc nxtproc = [] - nxtproc.append("python") + nxtproc.append("python3") nxtproc.append(sys.argv.pop(0)) while sys.argv: arg = sys.argv.pop(0) @@ -160,7 +157,7 @@ class TestAutopsy: # Paths: self.input_dir = Emailer.make_local_path("..","input") self.output_dir = "" - self.gold = Emailer.make_path("..", "output", "gold", "tmp") + self.gold = Emailer.make_path("..", "output", "gold") # Logs: self.antlog_dir = "" self.common_log = "" @@ -195,6 +192,8 @@ class TestAutopsy: self.indexed_chunks = 0 self.autopsy_data_file = "" self.sorted_data_file = "" + self.gold_dbdump = "" + self.autopsy_dbdump = "" # Infinite Testing info timer = 0 @@ -329,103 +328,7 @@ class Database: length = autopsy_cur.fetchone()[0] + 1 for type_id in range(1, length): autopsy_cur.execute("SELECT COUNT(*) FROM blackboard_artifacts WHERE artifact_type_id=%d" % type_id) - self.autopsy_artifacts.append(autopsy_cur.fetchone()[0]) - - self.retrieve_data(case.autopsy_data_file, autopsy_con) - srtcmdlst = ["sort", case.autopsy_data_file, "-o", case.sorted_data_file] - subprocess.call(srtcmdlst) - #print(self.databaselist) - - def retrieve_data(self, data_file, autopsy_con): - autopsy_con.text_factory = lambda x: unicode(x, "utf-8", "ignore") - autopsy_cur2 = autopsy_con.cursor() - autopsy_cur2.execute("SELECT tsk_files.parent_path, tsk_files.name, blackboard_artifact_types.display_name, blackboard_artifacts.artifact_id FROM blackboard_artifact_types INNER JOIN blackboard_artifacts ON blackboard_artifact_types.artifact_type_id = blackboard_artifacts.artifact_type_id INNER JOIN tsk_files ON tsk_files.obj_id = blackboard_artifacts.obj_id") - database_log = codecs.open(data_file, "wb", "utf_8") - rw = autopsy_cur2.fetchone() - appnd = False - counter = 0 - try: - while (rw != None): - if(rw[0] != None): - database_log.write(rw[0] + rw[1] + ' ') - else: - database_log.write(rw[1] + ' ') - autopsy_cur1 = autopsy_con.cursor() - looptry = True - try: - key = "" - key = str(rw[3]) - key = key, - autopsy_cur1.execute("SELECT blackboard_attributes.source, blackboard_attribute_types.display_name, blackboard_attributes.value_type, blackboard_attributes.value_text, blackboard_attributes.value_int32, blackboard_attributes.value_int64, blackboard_attributes.value_double FROM blackboard_attributes INNER JOIN blackboard_attribute_types ON blackboard_attributes.attribute_type_id = blackboard_attribute_types.attribute_type_id WHERE artifact_id =? ORDER BY blackboard_attributes.source, blackboard_attribute_types.display_name, blackboard_attributes.value_type, blackboard_attributes.value_text, blackboard_attributes.value_int32, blackboard_attributes.value_int64, blackboard_attributes.value_double", key) - attributes = autopsy_cur1.fetchall() - print(attributes) - except Exception as e: - print(str(e)) - print(str(rw[3])) - looptry = False - print('hello') - pass - if(looptry == True): - src = attributes[0][0] - for attr in attributes: - val = 3 + attr[2] - numvals = 0 - for x in range(3, 6): - if(attr[x] != None): - numvals += 1 - if(numvals > 1): - global failedbool - global errorem - global attachl - errorem += "There were too many values for attribute type: " + attr[1] + " for artifact with id #" + str(rw[3]) + " for image " + case.image_name + ".\n" - printerror("There were too many values for attribute type: " + attr[1] + " for artifact with id #" + str(rw[3]) + " for image " + case.image_name + ".") - failedbool = True - if(not appnd): - attachl.append(autopsy_db_file) - appnd = True - if(not attr[0] == src): - global failedbool - global errorem - global attachl - errorem += "There were inconsistents sources for artifact with id #" + str(rw[3]) + " for image " + case.image_name + ".\n" - printerror("There were inconsistents sources for artifact with id #" + str(rw[3]) + " for image " + case.image_name + ".") - failedbool = True - if(not appnd): - attachl.append(autopsy_db_file) - appnd = True - try: - database_log.write('') - database_log.write(' \n') - rw = autopsy_cur2.fetchone() - except Exception as e: - print('outer exception: ' + str(e)) - print(rw[0]) - print(rw[1]) - print(rw[2]) - print(rw[3]) - - def dbDump(self): - autopsy_db_file = Emailer.make_path(case.output_dir, case.image_name, - "AutopsyTestCase", "autopsy.db") - autopsy_con = sqlite3.connect(autopsy_db_file) - dump_file = Emailer.make_path(case.output_dir, case.image_name, "Dump.txt") - database_log = codecs.open(dump_file, "wb", "utf_8") - for line in autopsy_con.iterdump(): - database_log.write(line + "\n") - - + self.autopsy_artifacts.append(autopsy_cur.fetchone()[0]) def generate_autopsy_attributes(self): if self.autopsy_attributes == 0: @@ -449,9 +352,9 @@ class Database: def generate_gold_artifacts(self): if not self.gold_artifacts: - gold_db_file = Emailer.make_path(case.gold, case.image_name, "autopsy.db") + gold_db_file = Emailer.make_path(case.gold, 'tmp', case.image_name, "autopsy.db") if(not file_exists(gold_db_file)): - gold_db_file = Emailer.make_path(case.gold_parse, case.image_name, "autopsy.db") + gold_db_file = Emailer.make_path(case.gold_parse, 'tmp', case.image_name, "autopsy.db") gold_con = sqlite3.connect(gold_db_file) gold_cur = gold_con.cursor() gold_cur.execute("SELECT COUNT(*) FROM blackboard_artifact_types") @@ -467,9 +370,9 @@ class Database: def generate_gold_attributes(self): if self.gold_attributes == 0: - gold_db_file = Emailer.make_path(case.gold, case.image_name, "autopsy.db") + gold_db_file = Emailer.make_path(case.gold, 'tmp', case.image_name, "autopsy.db") if(not file_exists(gold_db_file)): - gold_db_file = Emailer.make_path(case.gold_parse, case.image_name, "autopsy.db") + gold_db_file = Emailer.make_path(case.gold_parse, 'tmp', case.image_name, "autopsy.db") gold_con = sqlite3.connect(gold_db_file) gold_cur = gold_con.cursor() gold_cur.execute("SELECT COUNT(*) FROM blackboard_attributes") @@ -477,9 +380,9 @@ class Database: def generate_gold_objects(self): if self.gold_objects == 0: - gold_db_file = Emailer.make_path(case.gold, case.image_name, "autopsy.db") + gold_db_file = Emailer.make_path(case.gold, 'tmp', case.image_name, "autopsy.db") if(not file_exists(gold_db_file)): - gold_db_file = Emailer.make_path(case.gold_parse, case.image_name, "autopsy.db") + gold_db_file = Emailer.make_path(case.gold_parse, 'tmp', case.image_name, "autopsy.db") gold_con = sqlite3.connect(gold_db_file) gold_cur = gold_con.cursor() gold_cur.execute("SELECT COUNT(*) FROM tsk_objects") @@ -491,6 +394,97 @@ class Database: # Main testing functions # #----------------------------------# +def retrieve_data(data_file, autopsy_con,autopsy_db_file): + autopsy_cur2 = autopsy_con.cursor() + autopsy_cur2.execute("SELECT tsk_files.parent_path, tsk_files.name, blackboard_artifact_types.display_name, blackboard_artifacts.artifact_id FROM blackboard_artifact_types INNER JOIN blackboard_artifacts ON blackboard_artifact_types.artifact_type_id = blackboard_artifacts.artifact_type_id INNER JOIN tsk_files ON tsk_files.obj_id = blackboard_artifacts.obj_id") + database_log = codecs.open(data_file, "wb", "utf_8") + rw = autopsy_cur2.fetchone() + appnd = False + counter = 0 + try: + while (rw != None): + if(rw[0] != None): + database_log.write(rw[0] + rw[1] + ' ') + else: + database_log.write(rw[1] + ' ') + autopsy_cur1 = autopsy_con.cursor() + looptry = True + try: + key = "" + key = str(rw[3]) + key = key, + autopsy_cur1.execute("SELECT blackboard_attributes.source, blackboard_attribute_types.display_name, blackboard_attributes.value_type, blackboard_attributes.value_text, blackboard_attributes.value_int32, blackboard_attributes.value_int64, blackboard_attributes.value_double FROM blackboard_attributes INNER JOIN blackboard_attribute_types ON blackboard_attributes.attribute_type_id = blackboard_attribute_types.attribute_type_id WHERE artifact_id =? ORDER BY blackboard_attributes.source, blackboard_attribute_types.display_name, blackboard_attributes.value_type, blackboard_attributes.value_text, blackboard_attributes.value_int32, blackboard_attributes.value_int64, blackboard_attributes.value_double", key) + attributes = autopsy_cur1.fetchall() + except Exception as e: + print(str(e)) + print(str(rw[3])) + looptry = False + pass + if(looptry == True): + src = attributes[0][0] + for attr in attributes: + val = 3 + attr[2] + numvals = 0 + for x in range(3, 6): + if(attr[x] != None): + numvals += 1 + if(numvals > 1): + global failedbool + global errorem + global attachl + errorem += "There were too many values for attribute type: " + attr[1] + " for artifact with id #" + str(rw[3]) + " for image " + case.image_name + ".\n" + printerror("There were too many values for attribute type: " + attr[1] + " for artifact with id #" + str(rw[3]) + " for image " + case.image_name + ".") + failedbool = True + if(not appnd): + attachl.append(autopsy_db_file) + appnd = True + if(not attr[0] == src): + global failedbool + global errorem + global attachl + errorem += "There were inconsistents sources for artifact with id #" + str(rw[3]) + " for image " + case.image_name + ".\n" + printerror("There were inconsistents sources for artifact with id #" + str(rw[3]) + " for image " + case.image_name + ".") + failedbool = True + if(not appnd): + attachl.append(autopsy_db_file) + appnd = True + try: + database_log.write('') + database_log.write(' \n') + rw = autopsy_cur2.fetchone() + except Exception as e: + print('outer exception: ' + str(e)) + +def dbDump(): + autopsy_db_file = Emailer.make_path(case.output_dir, case.image_name, + "AutopsyTestCase", "autopsy.db") + backup_db_file = Emailer.make_path(case.output_dir, case.image_name, + "AutopsyTestCase", "autopsy_backup.db") + copy_file(autopsy_db_file,backup_db_file) + autopsy_con = sqlite3.connect(backup_db_file) + autopsy_con.execute("DROP TABLE blackboard_artifacts") + autopsy_con.execute("DROP TABLE blackboard_attributes") + dump_file = Emailer.make_path(case.output_dir, case.image_name, case.image_name + "Dump.txt") + database_log = codecs.open(dump_file, "wb", "utf_8") + dump_list = autopsy_con.iterdump() + try: + for line in dump_list: + try: + database_log.write(line + "\n") + except: + print("Inner dump Exception:" + str(e)) + except Exception as e: + print("Outer dump Exception:" + str(e)) # Iterates through an XML configuration file to find all given elements @@ -510,7 +504,6 @@ def run_config_test(config_file): case.global_csv = Emailer.make_local_path(case.global_csv) if parsed.getElementsByTagName("golddir"): case.gold_parse = parsed.getElementsByTagName("golddir")[0].getAttribute("value").encode().decode("utf_8") - case.gold_parse = Emailer.make_path(case.gold_parse, "tmp") else: case.gold_parse = case.gold # Generate the top navbar of the HTML for easy access to all images @@ -535,12 +528,12 @@ def run_config_test(config_file): images = [] # Run the test for each file in the configuration global args - + if(args.contin): #set all times an image has been processed to 0 for element in parsed.getElementsByTagName("image"): value = element.getAttribute("value").encode().decode("utf_8") - images.append(value) + images.append(str(value)) #Begin infiniloop if(newDay()): global daycount @@ -559,7 +552,7 @@ def run_config_test(config_file): else: for img in values: if file_exists(img): - run_test(img, 0) + run_test(str(img), 0) else: printerror("Warning: Image file listed in configuration does not exist:") printrttot(value + "\n") @@ -637,15 +630,16 @@ def run_test(image_file, count): exceptions = search_logs(args.exception_string) okay = "No warnings or exceptions found containing text '" + args.exception_string + "'." print_report(exceptions, "EXCEPTION", okay) - + case.autopsy_dbdump = Emailer.make_path(case.output_dir, case.image_name, + case.image_name + "Dump.txt") # Now test in comparison to the gold standards if not args.gold_creation: try: - gold_path = Emailer.make_path(case.gold, "..") - img_gold = Emailer.make_path(case.gold, case.image_name) - img_archive = Emailer.make_local_path("..", "output", "gold", case.image_name+"-archive.zip") + gold_path = case.gold + img_gold = Emailer.make_path(case.gold, "tmp", case.image_name) + img_archive = Emailer.make_path("..", "output", "gold", case.image_name+"-archive.zip") if(not file_exists(img_archive)): - img_archive = Emailer.make_path(case.gold_parse, "..", case.image_name+"-archive.zip") + img_archive = Emailer.make_path(case.gold_parse, case.image_name+"-archive.zip") gold_path = case.gold_parse img_gold = Emailer.make_path(gold_path, case.image_name) extrctr = zipfile.ZipFile(img_archive, 'r', compression=zipfile.ZIP_DEFLATED) @@ -655,12 +649,22 @@ def run_test(image_file, count): compare_to_gold_db() compare_to_gold_html() compare_errors() - compare_data() + gold_nm = "SortedData" + compare_data(case.sorted_data_file, gold_nm) + gold_nm = "DBDump" + compare_data(case.autopsy_dbdump, gold_nm) del_dir(img_gold) except Exception as e: print("Tests failed due to an error, try rebuilding or creating gold standards.\n") print(str(e) + "\n") # Make the CSV log and the html log viewer + autopsy_db_file = Emailer.make_path(case.output_dir, case.image_name, + "AutopsyTestCase", "autopsy.db") + autopsy_con = sqlite3.connect(autopsy_db_file) + retrieve_data(case.autopsy_data_file, autopsy_con,autopsy_db_file) + srtcmdlst = ["sort", case.autopsy_data_file, "-o", case.sorted_data_file] + subprocess.call(srtcmdlst) + dbDump() generate_csv(case.csv) if case.global_csv: generate_csv(case.global_csv) @@ -713,20 +717,20 @@ def run_ant(): # Returns the type of image file, based off extension class IMGTYPE: - RAW, ENCASE, SPLIT, UNKNOWN = range(4) + RAW, ENCASE, SPLIT, UNKNOWN = range(4) def image_type(image_file): - ext_start = image_file.rfind(".") - if (ext_start == -1): - return IMGTYPE.UNKNOWN - ext = image_file[ext_start:].lower() - if (ext == ".img" or ext == ".dd"): - return IMGTYPE.RAW - elif (ext == ".e01"): - return IMGTYPE.ENCASE - elif (ext == ".aa" or ext == ".001"): - return IMGTYPE.SPLIT - else: - return IMGTYPE.UNKNOWN + ext_start = image_file.rfind(".") + if (ext_start == -1): + return IMGTYPE.UNKNOWN + ext = image_file[ext_start:].lower() + if (ext == ".img" or ext == ".dd"): + return IMGTYPE.RAW + elif (ext == ".e01"): + return IMGTYPE.ENCASE + elif (ext == ".aa" or ext == ".001"): + return IMGTYPE.SPLIT + else: + return IMGTYPE.UNKNOWN @@ -743,21 +747,27 @@ def rebuild(): if(case.gold_parse == None): case.gold_parse = case.gold # Delete the current gold standards - gold_dir = Emailer.make_path(case.gold_parse) + gold_dir = Emailer.make_path(case.gold_parse,'tmp') clear_dir(gold_dir) tmpdir = Emailer.make_path(gold_dir, case.image_name) dbinpth = Emailer.make_path(case.output_dir, case.image_name, "AutopsyTestCase", "autopsy.db") dboutpth = Emailer.make_path(tmpdir, "autopsy.db") dataoutpth = Emailer.make_path(tmpdir, case.image_name + "SortedData.txt") + dbdumpinpth = case.autopsy_dbdump + dbdumpoutpth = Emailer.make_path(tmpdir, case.image_name + "DBDump.txt") if not os.path.exists(case.gold_parse): os.makedirs(case.gold_parse) if not os.path.exists(gold_dir): os.makedirs(gold_dir) if not os.path.exists(tmpdir): os.makedirs(tmpdir) - copy_file(dbinpth, dboutpth) - copy_file(case.sorted_data_file, dataoutpth) - error_pth = Emailer.make_path(tmpdir, case.image_name+"SortedErrors.txt") + try: + copy_file(dbinpth, dboutpth) + copy_file(case.sorted_data_file, dataoutpth) + copy_file(dbdumpinpth, dbdumpoutpth) + error_pth = Emailer.make_path(tmpdir, case.image_name+"SortedErrors.txt") + except Exception as e: + print(str(e)) copy_file(case.sorted_log, error_pth) # Rebuild the HTML report htmlfolder = "" @@ -769,7 +779,8 @@ def rebuild(): html_path = Emailer.make_path(case.output_dir, case.image_name, "AutopsyTestCase", "Reports") try: - os.makedirs(os.path.join(tmpdir, htmlfolder)) + if not os.path.exists(Emailer.make_path(tmpdir, htmlfolder)): + os.makedirs(Emailer.make_path(tmpdir, htmlfolder)) for file in os.listdir(autopsy_html_path): html_to = Emailer.make_path(tmpdir, file.replace("HTML Report", "Report")) copy_dir(get_file_in_dir(autopsy_html_path, file), html_to) @@ -778,8 +789,9 @@ def rebuild(): except Exception as e: errors.append("Error: Unknown fatal error when rebuilding the gold html report.") errors.append(str(e) + "\n") + traceback.print_exc oldcwd = os.getcwd() - zpdir = case.gold_parse + zpdir = gold_dir os.chdir(zpdir) os.chdir("..") img_gold = "tmp" @@ -803,7 +815,7 @@ def zipdir(path, zip): # from queries while comparing def compare_to_gold_db(): # SQLITE needs unix style pathing - gold_db_file = Emailer.make_path(case.gold, case.image_name, "autopsy.db") + gold_db_file = Emailer.make_path(case.gold, 'tmp', case.image_name, "autopsy.db") if(not file_exists(gold_db_file)): gold_db_file = Emailer.make_path(case.gold_parse, case.image_name, "autopsy.db") autopsy_db_file = Emailer.make_path(case.output_dir, case.image_name, @@ -816,14 +828,13 @@ def compare_to_gold_db(): database.generate_gold_artifacts() database.generate_gold_attributes() except Exception as e: - print(str(e)) + print("Way out:" + str(e)) try: database.generate_autopsy_objects() database.generate_autopsy_artifacts() - database.dbDump() database.generate_autopsy_attributes() except Exception as e: - print(str(e)) + print("Way outA:" + str(e)) # This is where we return if a file doesn't exist, because we don't want to # compare faulty databases, but we do however want to try to run all queries # regardless of the other database @@ -861,9 +872,9 @@ def compare_to_gold_db(): # Using the global case's variables, compare the html report file made by # the regression test against the gold standard html report def compare_to_gold_html(): - gold_html_file = Emailer.make_path(case.gold, case.image_name, "Report", "index.html") + gold_html_file = Emailer.make_path(case.gold, 'tmp', case.image_name, "Report", "index.html") if(not file_exists(gold_html_file)): - gold_html_file = Emailer.make_path(case.gold_parse, case.image_name, "Report", "index.html") + gold_html_file = Emailer.make_path(case.gold_parse, 'tmp', case.image_name, "Report", "index.html") htmlfolder = "" for fs in os.listdir(Emailer.make_path(case.output_dir, case.image_name, "AutopsyTestCase", "Reports")): if os.path.isdir(Emailer.make_path(case.output_dir, case.image_name, "AutopsyTestCase", "Reports", fs)): @@ -885,18 +896,18 @@ def compare_to_gold_html(): ListGoldHTML = [] for fs in os.listdir(Emailer.make_path(case.output_dir, case.image_name, "AutopsyTestCase", "Reports", htmlfolder)): if(fs.endswith(".html")): - ListGoldHTML.append(os.path.join(case.output_dir, case.image_name, "AutopsyTestCase", "Reports", htmlfolder, fs)) + ListGoldHTML.append(Emailer.make_path(case.output_dir, case.image_name, "AutopsyTestCase", "Reports", htmlfolder, fs)) #Find all new .html files belonging to this case ListNewHTML = [] - if(os.path.exists(Emailer.make_path(case.gold, case.image_name))): - for fs in os.listdir(Emailer.make_path(case.gold, case.image_name)): + if(os.path.exists(Emailer.make_path(case.gold, 'tmp', case.image_name))): + for fs in os.listdir(Emailer.make_path(case.gold, 'tmp', case.image_name)): if (fs.endswith(".html")): - ListNewHTML.append(Emailer.make_path(case.gold, case.image_name, fs)) + ListNewHTML.append(Emailer.make_path(case.gold, 'tmp', case.image_name, fs)) if(not case.gold_parse == None or case.gold == case.gold_parse): - if(file_exists(Emailer.make_path(case.gold_parse, case.image_name))): - for fs in os.listdir(Emailer.make_path(case.gold_parse, case.image_name)): + if(file_exists(Emailer.make_path(case.gold_parse, 'tmp', case.image_name))): + for fs in os.listdir(Emailer.make_path(case.gold_parse, 'tmp',case.image_name)): if (fs.endswith(".html")): - ListNewHTML.append(Emailer.make_path(case.gold_parse, case.image_name, fs)) + ListNewHTML.append(Emailer.make_path(case.gold_parse, 'tmp', case.image_name, fs)) #ensure both reports have the same number of files and are in the same order if(len(ListGoldHTML) != len(ListNewHTML)): printerror("The reports did not have the same number of files. One of the reports may have been corrupted") @@ -1035,17 +1046,19 @@ def generate_common_log(): printerror(str(e) + "\n") logging.critical(traceback.format_exc()) -def compare_data(): - gold_dir = Emailer.make_path(case.gold, case.image_name, case.image_name + "SortedData.txt") +def compare_data(aut, gld): + gold_dir = Emailer.make_path(case.gold, case.image_name, case.image_name + gld + ".txt") if(not file_exists(gold_dir)): - gold_dir = Emailer.make_path(case.gold_parse, case.image_name, case.image_name + "SortedData.txt") - srtd_data = codecs.open(case.sorted_data_file, "r", "utf_8") + gold_dir = Emailer.make_path(case.gold_parse, case.image_name, case.image_name + gld + ".txt") + if(not file_exists(aut)): + return + srtd_data = codecs.open(aut, "r", "utf_8") gold_data = codecs.open(gold_dir, "r", "utf_8") gold_dat = gold_data.read() srtd_dat = srtd_data.read() patrn = re.compile("\d") if (not(gold_dat == srtd_dat)): - diff_dir = Emailer.make_local_path(case.output_dir, case.image_name, case.image_name+"AutopsyErrors-Diff.txt") + diff_dir = Emailer.make_local_path(case.output_dir, case.image_name, case.image_name+gld+"-Diff.txt") diff_file = codecs.open(diff_dir, "wb", "utf_8") dffcmdlst = ["diff", case.sorted_data_file, gold_dir] subprocess.call(dffcmdlst, stdout = diff_file) @@ -1063,7 +1076,7 @@ def compare_data(): def compare_errors(): gold_dir = Emailer.make_path(case.gold, case.image_name, case.image_name + "SortedErrors.txt") if(not file_exists(gold_dir)): - gold_dir = Emailer.make_path(case.gold_parse, case.image_name, case.image_name + "SortedErrors.txt") + gold_dir = Emailer.make_path(case.gold_parse, 'tmp', case.image_name, case.image_name + "SortedErrors.txt") common_log = codecs.open(case.sorted_log, "r", "utf_8") gold_log = codecs.open(gold_dir, "r", "utf_8") gold_dat = gold_log.read() @@ -1507,7 +1520,8 @@ def generate_html(): # Writed the top of the HTML log file def write_html_head(): - html = open(case.html_log, "a") + print(case.html_log) + html = open(str(case.html_log), "a") head = "\ \ AutopsyTestCase Output\ @@ -1781,7 +1795,7 @@ def execute_test(): os.makedirs(case.output_dir) case.common_log = "AutopsyErrors.txt" case.csv = Emailer.make_local_path(case.output_dir, "CSV.txt") - case.html_log = Emailer.make_local_path(case.output_dir, "AutopsyTestCase.html") + case.html_log = Emailer.make_path(case.output_dir, "AutopsyTestCase.html") log_name = case.output_dir + "\\regression.log" logging.basicConfig(filename=log_name, level=logging.DEBUG) # If user wants to do a single file and a list (contradictory?) From 0219dc24724b629a6eb65ce687298cd1e433eff5 Mon Sep 17 00:00:00 2001 From: Sean-M Date: Thu, 16 May 2013 16:11:44 -0400 Subject: [PATCH 7/7] updated scripts --- test/script/Emailer.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/test/script/Emailer.py b/test/script/Emailer.py index 0a6a3749c4..d98da2e074 100644 --- a/test/script/Emailer.py +++ b/test/script/Emailer.py @@ -2,9 +2,6 @@ import smtplib from email.mime.image import MIMEImage from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText -from email.MIMEBase import MIMEBase -from email import Encoders -import urllib2 import xml from time import localtime, strftime from xml.dom.minidom import parse, parseString @@ -55,16 +52,16 @@ def Build_email(msg, attachl): # Returns a Windows style path starting with the cwd and # ending with the list of directories given def make_local_path(*dirs): - path = wgetcwd() + path = wgetcwd().decode("utf-8") for dir in dirs: - path += ("\\" + dir) + path += ("\\" + str(dir)) return path_fix(path) # Returns a Windows style path based only off the given directories def make_path(*dirs): path = dirs[0] for dir in dirs[1:]: - path += ("\\" + dir) + path += ("\\" + str(dir)) return path_fix(path) # Fix a standard os.path by making it Windows format