Merge branch 'develop' of github.com:sleuthkit/autopsy into 7264d_refresh

This commit is contained in:
Greg DiCristofaro 2021-02-04 14:47:57 -05:00
commit 12f99dcace
10 changed files with 579 additions and 84 deletions

1
.gitignore vendored
View File

@ -102,3 +102,4 @@ hs_err_pid*.log
/thirdparty/yara/YaraJNIWrapper/nbproject/private/ /thirdparty/yara/YaraJNIWrapper/nbproject/private/
/thirdparty/yara/yarabridge/.vs/ /thirdparty/yara/yarabridge/.vs/
*/path_list.txt

View File

@ -61,19 +61,19 @@ final public class TagNameDefinition implements Comparable<TagNameDefinition> {
private final TskData.FileKnown knownStatus; private final TskData.FileKnown knownStatus;
private static final List<TagNameDefinition> STANDARD_TAGS_DEFINITIONS = new ArrayList<>(); private static final List<TagNameDefinition> STANDARD_TAGS_DEFINITIONS = new ArrayList<>();
private static final List<String> OLD_CATEGORY_TAG_NAMES = new ArrayList<>(); private static final List<String> PROJECT_VIC_NAMES_NO_LONGER_USED = new ArrayList<>();
static { static {
STANDARD_TAGS_DEFINITIONS.add(new TagNameDefinition(Bundle.TagNameDefinition_predefTagNames_bookmark_text(), "", TagName.HTML_COLOR.NONE, TskData.FileKnown.UNKNOWN)); STANDARD_TAGS_DEFINITIONS.add(new TagNameDefinition(Bundle.TagNameDefinition_predefTagNames_bookmark_text(), "", TagName.HTML_COLOR.NONE, TskData.FileKnown.UNKNOWN));
STANDARD_TAGS_DEFINITIONS.add(new TagNameDefinition(Bundle.TagNameDefinition_predefTagNames_followUp_text(), "", TagName.HTML_COLOR.NONE, TskData.FileKnown.UNKNOWN)); STANDARD_TAGS_DEFINITIONS.add(new TagNameDefinition(Bundle.TagNameDefinition_predefTagNames_followUp_text(), "", TagName.HTML_COLOR.NONE, TskData.FileKnown.UNKNOWN));
STANDARD_TAGS_DEFINITIONS.add(new TagNameDefinition(Bundle.TagNameDefinition_predefTagNames_notableItem_text(), "", TagName.HTML_COLOR.NONE, TskData.FileKnown.BAD)); STANDARD_TAGS_DEFINITIONS.add(new TagNameDefinition(Bundle.TagNameDefinition_predefTagNames_notableItem_text(), "", TagName.HTML_COLOR.NONE, TskData.FileKnown.BAD));
OLD_CATEGORY_TAG_NAMES.add("CAT-1: Child Exploitation (Illegal)"); PROJECT_VIC_NAMES_NO_LONGER_USED.add("CAT-1: Child Exploitation (Illegal)");
OLD_CATEGORY_TAG_NAMES.add("CAT-2: Child Exploitation (Non-Illegal/Age Difficult)"); PROJECT_VIC_NAMES_NO_LONGER_USED.add("CAT-2: Child Exploitation (Non-Illegal/Age Difficult)");
OLD_CATEGORY_TAG_NAMES.add("CAT-3: CGI/Animation (Child Exploitive)"); PROJECT_VIC_NAMES_NO_LONGER_USED.add("CAT-3: CGI/Animation (Child Exploitive)");
OLD_CATEGORY_TAG_NAMES.add("CAT-4: Exemplar/Comparison (Internal Use Only)"); PROJECT_VIC_NAMES_NO_LONGER_USED.add("CAT-4: Exemplar/Comparison (Internal Use Only)");
OLD_CATEGORY_TAG_NAMES.add("CAT-5: Non-pertinent"); PROJECT_VIC_NAMES_NO_LONGER_USED.add("CAT-5: Non-pertinent");
OLD_CATEGORY_TAG_NAMES.add("CAT-0: Uncategorized"); PROJECT_VIC_NAMES_NO_LONGER_USED.add("CAT-0: Uncategorized");
} }
/** /**
@ -259,7 +259,7 @@ final public class TagNameDefinition implements Comparable<TagNameDefinition> {
*/ */
static synchronized Set<TagNameDefinition> getTagNameDefinitions() { static synchronized Set<TagNameDefinition> getTagNameDefinitions() {
if (needsVersionUpdate()) { if (needsVersionUpdate()) {
updateTagDefinitions(); updatePropertyFile();
} }
String tagsProperty = ModuleSettings.getConfigSetting(TAGS_SETTINGS_NAME, TAG_NAMES_SETTING_KEY); String tagsProperty = ModuleSettings.getConfigSetting(TAGS_SETTINGS_NAME, TAG_NAMES_SETTING_KEY);
@ -311,7 +311,7 @@ final public class TagNameDefinition implements Comparable<TagNameDefinition> {
/** /**
* Updates the Tag Definition file to the current format. * Updates the Tag Definition file to the current format.
*/ */
private static void updateTagDefinitions() { private static void updatePropertyFile() {
Integer version = getPropertyFileVersion(); Integer version = getPropertyFileVersion();
List<TagNameDefinition> definitions = new ArrayList<>(); List<TagNameDefinition> definitions = new ArrayList<>();
@ -355,18 +355,18 @@ final public class TagNameDefinition implements Comparable<TagNameDefinition> {
} }
// Remove the standard and Project VIC tags from the list // Remove the standard and Project VIC tags from the list
List<String> tagStrings = new ArrayList<>(); List<String> tagStringsToKeep = new ArrayList<>();
List<String> standardTags = getStandardTagNames(); List<String> standardTags = getStandardTagNames();
for (TagNameDefinition def : definitions) { for (TagNameDefinition def : definitions) {
if (!standardTags.contains(def.getDisplayName()) if (!standardTags.contains(def.getDisplayName())
&& !OLD_CATEGORY_TAG_NAMES.contains(def.getDisplayName())) { && !PROJECT_VIC_NAMES_NO_LONGER_USED.contains(def.getDisplayName())) {
tagStrings.add(def.toSettingsFormat()); tagStringsToKeep.add(def.toSettingsFormat());
} }
} }
// Write out the version and the new tag list. // Write out the version and the new tag list.
ModuleSettings.setConfigSetting(TAGS_SETTINGS_NAME, TAG_SETTING_VERSION_KEY, Integer.toString(TAG_SETTINGS_VERSION)); ModuleSettings.setConfigSetting(TAGS_SETTINGS_NAME, TAG_SETTING_VERSION_KEY, Integer.toString(TAG_SETTINGS_VERSION));
ModuleSettings.setConfigSetting(TAGS_SETTINGS_NAME, TAG_NAMES_SETTING_KEY, String.join(";", tagStrings)); ModuleSettings.setConfigSetting(TAGS_SETTINGS_NAME, TAG_NAMES_SETTING_KEY, String.join(";", tagStringsToKeep));
} }
/** /**

View File

@ -88,7 +88,7 @@ final public class TagSetDefinition {
} }
/** /**
* Returns a list of the defined TagSet objects. * Returns a list of configured TagSets (from the user's config folder)
* *
* @return A list of TagSetDefinition objects or empty list if none were * @return A list of TagSetDefinition objects or empty list if none were
* found. * found.

View File

@ -55,7 +55,9 @@ public class TagsManager implements Closeable {
private static final Logger LOGGER = Logger.getLogger(TagsManager.class.getName()); private static final Logger LOGGER = Logger.getLogger(TagsManager.class.getName());
private final SleuthkitCase caseDb; private final SleuthkitCase caseDb;
private static String DEFAULT_TAG_SET_NAME = "Project VIC"; // NOTE: This name is also hard coded in Image Gallery and Projet Vic module.
// They need to stay in sync
private static String PROJECT_VIC_TAG_SET_NAME = "Project VIC";
private static final Object lock = new Object(); private static final Object lock = new Object();
@ -196,7 +198,7 @@ public class TagsManager implements Closeable {
try { try {
List<TagSet> tagSetList = Case.getCurrentCaseThrows().getSleuthkitCase().getTaggingManager().getTagSets(); List<TagSet> tagSetList = Case.getCurrentCaseThrows().getSleuthkitCase().getTaggingManager().getTagSets();
for (TagSet tagSet : tagSetList) { for (TagSet tagSet : tagSetList) {
if (tagSet.getName().equals(DEFAULT_TAG_SET_NAME)) { if (tagSet.getName().equals(PROJECT_VIC_TAG_SET_NAME)) {
for (TagName tagName : tagSet.getTagNames()) { for (TagName tagName : tagSet.getTagNames()) {
tagList.add(tagName.getDisplayName()); tagList.add(tagName.getDisplayName());
} }
@ -237,7 +239,7 @@ public class TagsManager implements Closeable {
} }
/** /**
* Creates a new TagSetDefinition file. * Creates a new TagSetDefinition file that will be used for future cases
* *
* @param tagSetDef The tag set definition. * @param tagSetDef The tag set definition.
* *
@ -258,23 +260,26 @@ public class TagsManager implements Closeable {
TagsManager(SleuthkitCase caseDb) { TagsManager(SleuthkitCase caseDb) {
this.caseDb = caseDb; this.caseDb = caseDb;
// Add standard tags and the Project VIC default tag set and tags. // Add standard tags and any configured tag sets.
TaggingManager taggingMgr = caseDb.getTaggingManager(); TaggingManager taggingMgr = caseDb.getTaggingManager();
try { try {
List<TagSet> setList = taggingMgr.getTagSets(); List<TagSet> tagSetsInCase = taggingMgr.getTagSets();
if (setList.isEmpty()) { if (tagSetsInCase.isEmpty()) {
// add the standard tag names
for (TagNameDefinition def : TagNameDefinition.getStandardTagNameDefinitions()) { for (TagNameDefinition def : TagNameDefinition.getStandardTagNameDefinitions()) {
caseDb.addOrUpdateTagName(def.getDisplayName(), def.getDescription(), def.getColor(), def.getKnownStatus()); caseDb.addOrUpdateTagName(def.getDisplayName(), def.getDescription(), def.getColor(), def.getKnownStatus());
} }
//Assume new case and add tag sets
//Assume new case and add all tag sets
for (TagSetDefinition setDef : TagSetDefinition.readTagSetDefinitions()) { for (TagSetDefinition setDef : TagSetDefinition.readTagSetDefinitions()) {
List<TagName> tagNameList = new ArrayList<>(); List<TagName> tagNamesInSet = new ArrayList<>();
for (TagNameDefinition tagNameDef : setDef.getTagNameDefinitions()) { for (TagNameDefinition tagNameDef : setDef.getTagNameDefinitions()) {
tagNameList.add(caseDb.addOrUpdateTagName(tagNameDef.getDisplayName(), tagNameDef.getDescription(), tagNameDef.getColor(), tagNameDef.getKnownStatus())); tagNamesInSet.add(caseDb.addOrUpdateTagName(tagNameDef.getDisplayName(), tagNameDef.getDescription(), tagNameDef.getColor(), tagNameDef.getKnownStatus()));
} }
if (!tagNameList.isEmpty()) { if (!tagNamesInSet.isEmpty()) {
taggingMgr.addTagSet(setDef.getName(), tagNameList); taggingMgr.addTagSet(setDef.getName(), tagNamesInSet);
} }
} }
} }

View File

@ -30,6 +30,7 @@ import org.openide.util.NbBundle;
/** /**
* Enum to represent the six categories in the DHS image categorization scheme. * Enum to represent the six categories in the DHS image categorization scheme.
* NOTE: This appears to not be used anywhere anymore after the ImageGallery refactoring
*/ */
@NbBundle.Messages({ @NbBundle.Messages({
"Category.one=CAT-1: Child Exploitation (Illegal)", "Category.one=CAT-1: Child Exploitation (Illegal)",

View File

@ -66,7 +66,7 @@ Optional Solr Configuration Parameters:
\subsubsection install_sorl_index_file_loc Solr Text Index File Location \subsubsection install_sorl_index_file_loc Solr Text Index File Location
<b>Important note:</b> previous versions of Autopsy (Autopsy 4.17.0 and earlier) stored the Solr text indexes in the case output directory. As a result, the Solr indexes would get deleted if a user deleted the case output directory. Solr 8 (i.e. Autpsy 4.18.0 and later) no longer stores the Solr text index files in the case output directory but instead stores them in location defined by the <b>SOLR_DATA_HOME</b> parameter. As a consequence, if a user choses to manually delete case output directories (for example, to free up disk space), the Solr index directories located in <b>SOLR_DATA_HOME</b> need to be manually deleted as well. <b>Important note:</b> previous versions of Autopsy (Autopsy 4.17.0 and earlier) stored the Solr text indexes in the case output directory. As a result, the Solr indexes would get deleted if a user deleted the case output directory. Solr 8 (i.e. Autopsy 4.18.0 and later) no longer stores the Solr text index files in the case output directory but instead stores them in location defined by the <b>SOLR_DATA_HOME</b> parameter. As a consequence, if a user choses to manually delete case output directories (for example, to free up disk space), the Solr index directories located in <b>SOLR_DATA_HOME</b> need to be manually deleted as well.
Text index for an Autopsy case will follow a naming structure according to following rules: \c "[Autopsy case name] [Case creation time stamp] [Text index creation time stamp] [shardX_replica_nY]". For example, the text index for an Autopsy case "Test Case" will be located in the following directory inside <b>SOLR_DATA_HOME</b>: Text index for an Autopsy case will follow a naming structure according to following rules: \c "[Autopsy case name] [Case creation time stamp] [Text index creation time stamp] [shardX_replica_nY]". For example, the text index for an Autopsy case "Test Case" will be located in the following directory inside <b>SOLR_DATA_HOME</b>:

View File

@ -16,7 +16,25 @@ If you are experiencing an error, we encourage you to post on the forum (https:/
<li>If there were any errors in the \ref troubleshooting_logs "logs" <li>If there were any errors in the \ref troubleshooting_logs "logs"
</ul> </ul>
\section troubleshooting_user_folder Deleting the Autopsy User Folder \section troubleshooting_specific_issues Specific Issues
\subsection troubleshooting_fond_size Font Size Too Small in Windows
Make the following changes if the application is hard to navigate in High DPI systems:
<ol>
<li>Right-click on the application icon on your Desktop, Start Menu, etc.
<li>Choose Properties.
<li>Go to Compatibility tab.
<li>Click "Change high DPI settings" button.
<li>Select "Override high DPI scaling behavior".
<li>Change the "Scaling performed by:" drop down box to "System".
<li>Restart Autopsy.
</ol>
\section troubleshooting_general General Troubleshooting
\subsection troubleshooting_user_folder Deleting the Autopsy User Folder
If Autopsy starts behaving strangely, stops loading entirely, or menu items go missing, you probably need to delete your user folder. Doing so essenitally gives you a fresh installation. On Windows the user folder is located in "C:\Users\(user name)\AppData\Roaming\autopsy". If Autopsy starts behaving strangely, stops loading entirely, or menu items go missing, you probably need to delete your user folder. Doing so essenitally gives you a fresh installation. On Windows the user folder is located in "C:\Users\(user name)\AppData\Roaming\autopsy".
@ -30,7 +48,7 @@ Note that if you delete this folder you will lose all your Autopsy settings incl
Alternately, you could copy the fresh user folder somewhere, move your old version back, and replace folders until it works again. Alternately, you could copy the fresh user folder somewhere, move your old version back, and replace folders until it works again.
\section troubleshooting_logs Viewing the Logs \subsection troubleshooting_logs Viewing the Logs
The logs are generally the most helpful in figuring out why an error is occurring. There are two sets of logs - the system logs and the case logs. There is an option in the UI to open the log folder: The logs are generally the most helpful in figuring out why an error is occurring. There are two sets of logs - the system logs and the case logs. There is an option in the UI to open the log folder:
@ -70,7 +88,7 @@ Caused by: java.sql.SQLException: ResultSet closed
If the error message doesn't help you solve the problem yourself, please post to the <a href="https://sleuthkit.discourse.group/">forum</a> including the full stack trace (if available). If the error message doesn't help you solve the problem yourself, please post to the <a href="https://sleuthkit.discourse.group/">forum</a> including the full stack trace (if available).
\section troubleshooting_stack Creating a Thread Dump \subsection troubleshooting_stack Creating a Thread Dump
You can also generate a thread dump of the current state. This is useful if an ingest module or other process seems to be stuck. To generate a thread dump, go to "Help" then "Thread Dump" in the UI. You can also generate a thread dump of the current state. This is useful if an ingest module or other process seems to be stuck. To generate a thread dump, go to "Help" then "Thread Dump" in the UI.

View File

@ -0,0 +1,178 @@
# Sample module in the public domain. Feel free to use this as a template
# for your modules (and you can remove this header and take complete credit
# and liability)
#
# Contact: Brian Carrier [carrier <at> sleuthkit [dot] org]
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# Simple data source-level ingest module for Autopsy.
# Used as part of Python tutorials from Basis Technology - August 2015
#
# Looks for files of a given name, opens then in SQLite, queries the DB,
# and makes artifacts
import jarray
import inspect
from java.lang import Class
from java.lang import System
from java.util.logging import Level
from java.util import ArrayList
from java.io import File
from org.sleuthkit.datamodel import SleuthkitCase
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.autopsy.ingest import IngestModule
from org.sleuthkit.autopsy.ingest.IngestModule import IngestModuleException
from org.sleuthkit.autopsy.ingest import DataSourceIngestModule
from org.sleuthkit.autopsy.ingest import IngestModuleFactoryAdapter
from org.sleuthkit.autopsy.ingest import IngestMessage
from org.sleuthkit.autopsy.ingest import IngestServices
from org.sleuthkit.autopsy.ingest import ModuleDataEvent
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.datamodel import TskCoreException
from org.sleuthkit.datamodel.Blackboard import BlackboardException
from org.sleuthkit.autopsy.casemodule import NoCurrentCaseException
from org.sleuthkit.datamodel import Account
from org.sleuthkit.datamodel.blackboardutils import CommunicationArtifactsHelper
from java.sql import ResultSet
from java.sql import SQLException
from org.sleuthkit.autopsy.coreutils import AppSQLiteDB
# Factory that defines the name and details of the module and allows Autopsy
# to create instances of the modules that will do the analysis.
class ContactsDbIngestModuleFactory(IngestModuleFactoryAdapter):
# TODO - Replace with your modules name
moduleName = "Contacts Db Analyzer"
def getModuleDisplayName(self):
return self.moduleName
def getModuleDescription(self):
return "Sample module that parses contacts.db"
def getModuleVersionNumber(self):
return "1.0"
def isDataSourceIngestModuleFactory(self):
return True
def createDataSourceIngestModule(self, ingestOptions):
return ContactsDbIngestModule()
# Data Source-level ingest module. One gets created per data source.
class ContactsDbIngestModule(DataSourceIngestModule):
_logger = Logger.getLogger(ContactsDbIngestModuleFactory.moduleName)
def log(self, level, msg):
self._logger.logp(level, self.__class__.__name__, inspect.stack()[1][3], msg)
def __init__(self):
self.context = None
# Where any setup and configuration is done
# 'context' is an instance of org.sleuthkit.autopsy.ingest.IngestJobContext.
# See: http://sleuthkit.org/autopsy/docs/api-docs/latest/classorg_1_1sleuthkit_1_1autopsy_1_1ingest_1_1_ingest_job_context.html
def startUp(self, context):
self.context = context
# Where the analysis is done.
# The 'data_source' object being passed in is of type org.sleuthkit.datamodel.Content.
# See: http://www.sleuthkit.org/sleuthkit/docs/jni-docs/latest/interfaceorg_1_1sleuthkit_1_1datamodel_1_1_content.html
# 'progress_bar' is of type org.sleuthkit.autopsy.ingest.DataSourceIngestModuleProgress
# See: http://sleuthkit.org/autopsy/docs/api-docs/latest/classorg_1_1sleuthkit_1_1autopsy_1_1ingest_1_1_data_source_ingest_module_progress.html
def process(self, data_source, progress_bar):
# we don't know how much work there is yet
progress_bar.switchToIndeterminate()
# Find files named contacts.db anywhere in the data source.
# TODO - replace with your database name and parent path.
app_databases = AppSQLiteDB.findAppDatabases(data_source, "contacts.db", True, "")
num_databases = len(app_databases)
progress_bar.switchToDeterminate(num_databases)
databases_processed = 0
try:
# Iterate through all the database files returned
for app_database in app_databases:
# Check if the user pressed cancel while we were busy
if self.context.isJobCancelled():
return IngestModule.ProcessResult.OK
self.log(Level.INFO, "Processing file: " + app_database.getDBFile().getName())
# Query the contacts table in the database and get all columns.
try:
# TODO - replace with your query
result_set = app_database.runQuery("SELECT * FROM contacts")
except SQLException as e:
self.log(Level.INFO, "Error querying database for contacts table (" + e.getMessage() + ")")
return IngestModule.ProcessResult.OK
try:
#Get the current case for the CommunicationArtifactsHelper.
current_case = Case.getCurrentCaseThrows()
except NoCurrentCaseException as ex:
self.log(Level.INFO, "Case is closed (" + ex.getMessage() + ")")
return IngestModule.ProcessResult.OK
# Create an instance of the helper class
# TODO - Replace with your parser name and Account.Type
helper = CommunicationArtifactsHelper(current_case.getSleuthkitCase(),
ContactsDbIngestModuleFactory.moduleName, app_database.getDBFile(), Account.Type.DEVICE)
# Iterate through each row and create artifacts
while result_set.next():
try:
# TODO - Replace these calls with your column names and types
# Ex of other types: result_set.getInt("contact_type") or result_set.getLong("datetime")
name = result_set.getString("name")
email = result_set.getString("email")
phone = result_set.getString("phone")
except SQLException as e:
self.log(Level.INFO, "Error getting values from contacts table (" + e.getMessage() + ")")
helper.addContact(name, phone, "", "", email)
app_database.close()
databases_processed += 1
progress_bar.progress(databases_processed)
except TskCoreException as e:
self.log(Level.INFO, "Error inserting or reading from the Sleuthkit case (" + e.getMessage() + ")")
except BlackboardException as e:
self.log(Level.INFO, "Error posting artifact to the Blackboard (" + e.getMessage() + ")")
# After all databases, post a message to the ingest messages in box.
# TODO - update your module name here
message = IngestMessage.createMessage(IngestMessage.MessageType.DATA,
ContactsDbIngestModuleFactory.moduleName, "Found %d files" % num_databases)
IngestServices.getInstance().postMessage(message)
return IngestModule.ProcessResult.OK

View File

@ -0,0 +1,284 @@
# Sample module in the public domain. Feel free to use this as a template
# for your modules (and you can remove this header and take complete credit
# and liability)
#
# Contact: Brian Carrier [carrier <at> sleuthkit [dot] org]
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# Simple data source-level ingest module for Autopsy.
# Search for TODO for the things that you need to change
# See http://sleuthkit.org/autopsy/docs/api-docs/4.6.0/index.html for documentation
import inspect
import os
import shutil
import ntpath
from com.williballenthin.rejistry import RegistryHiveFile
from com.williballenthin.rejistry import RegistryKey
from com.williballenthin.rejistry import RegistryParseException
from com.williballenthin.rejistry import RegistryValue
from java.io import File
from java.lang import Class
from java.lang import System
from java.sql import DriverManager, SQLException
from java.util.logging import Level
from java.util import ArrayList
from org.sleuthkit.datamodel import SleuthkitCase
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.datamodel import ReadContentInputStream
from org.sleuthkit.datamodel import BlackboardArtifact
from org.sleuthkit.datamodel import BlackboardAttribute
from org.sleuthkit.datamodel import TskData
from org.sleuthkit.autopsy.ingest import IngestModule
from org.sleuthkit.autopsy.ingest.IngestModule import IngestModuleException
from org.sleuthkit.autopsy.ingest import DataSourceIngestModule
from org.sleuthkit.autopsy.ingest import IngestModuleFactoryAdapter
from org.sleuthkit.autopsy.ingest import IngestModuleIngestJobSettings
from org.sleuthkit.autopsy.ingest import IngestModuleIngestJobSettingsPanel
from org.sleuthkit.autopsy.ingest import IngestMessage
from org.sleuthkit.autopsy.ingest import IngestServices
from org.sleuthkit.autopsy.ingest import ModuleDataEvent
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.coreutils import PlatformUtil
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.casemodule.services import Services
from org.sleuthkit.autopsy.casemodule.services import FileManager
from org.sleuthkit.autopsy.datamodel import ContentUtils
from org.sleuthkit.autopsy.modules.interestingitems import FilesSetsManager
# Factory that defines the name and details of the module and allows Autopsy
# to create instances of the modules that will do the analysis.
class RegistryExampleIngestModuleFactory(IngestModuleFactoryAdapter):
def __init__(self):
self.settings = None
moduleName = "Registy Example Module"
def getModuleDisplayName(self):
return self.moduleName
def getModuleDescription(self):
return "Extract Run Keys To Look For Interesting Items"
def getModuleVersionNumber(self):
return "1.0"
def hasIngestJobSettingsPanel(self):
return False
def isDataSourceIngestModuleFactory(self):
return True
def createDataSourceIngestModule(self, ingestOptions):
return RegistryExampleIngestModule(self.settings)
# Data Source-level ingest module. One gets created per data source.
class RegistryExampleIngestModule(DataSourceIngestModule):
_logger = Logger.getLogger(RegistryExampleIngestModuleFactory.moduleName)
def log(self, level, msg):
self._logger.logp(level, self.__class__.__name__, inspect.stack()[1][3], msg)
def __init__(self, settings):
self.context = None
# Where any setup and configuration is done
def startUp(self, context):
self.context = context
# Hive Keys to parse, use / as it is easier to parse out then \\
self.registryNTUserRunKeys = ('Software/Microsoft/Windows/CurrentVersion/Run', 'Software/Microsoft/Windows/CurrentVersion/RunOnce')
self.registrySoftwareRunKeys = ('Microsoft/Windows/CurrentVersion/Run', 'Microsoft/Windows/CurrentVersion/RunOnce')
self.registryKeysFound = []
# Where the analysis is done.
def process(self, dataSource, progressBar):
# we don't know how much work there is yet
progressBar.switchToIndeterminate()
# Hive files to extract
filesToExtract = ("NTUSER.DAT", "SOFTWARE")
# Create ExampleRegistry directory in temp directory, if it exists then continue on processing
tempDir = os.path.join(Case.getCurrentCase().getTempDirectory(), "RegistryExample")
self.log(Level.INFO, "create Directory " + tempDir)
try:
os.mkdir(tempDir)
except:
self.log(Level.INFO, "ExampleRegistry Directory already exists " + tempDir)
# Set the database to be read to the once created by the prefetch parser program
skCase = Case.getCurrentCase().getSleuthkitCase();
fileManager = Case.getCurrentCase().getServices().getFileManager()
# Look for files to process
for fileName in filesToExtract:
files = fileManager.findFiles(dataSource, fileName)
numFiles = len(files)
for file in files:
# Check if the user pressed cancel while we were busy
if self.context.isJobCancelled():
return IngestModule.ProcessResult.OK
# Check path to only get the hive files in the config directory and no others
if ((file.getName() == 'SOFTWARE') and (file.getParentPath().upper() == '/WINDOWS/SYSTEM32/CONFIG/') and (file.getSize() > 0)):
# Save the file locally in the temp folder.
self.writeHiveFile(file, file.getName(), tempDir)
# Process this file looking thru the run keys
self.processSoftwareHive(os.path.join(tempDir, file.getName()), file)
elif ((file.getName() == 'NTUSER.DAT') and ('/USERS' in file.getParentPath().upper()) and (file.getSize() > 0)):
# Found a NTUSER.DAT file to process only want files in User directories
# Filename may not be unique so add file id to the name
fileName = str(file.getId()) + "-" + file.getName()
# Save the file locally in the temp folder.
self.writeHiveFile(file, fileName, tempDir)
# Process this file looking thru the run keys
self.processNTUserHive(os.path.join(tempDir, fileName), file)
# Setup Artifact and Attributes
try:
artID = skCase.addArtifactType( "TSK_REGISTRY_RUN_KEYS", "Registry Run Keys")
except:
self.log(Level.INFO, "Artifacts Creation Error, some artifacts may not exist now. ==> ")
artId = skCase.getArtifactTypeID("TSK_REGISTRY_RUN_KEYS")
try:
attributeIdRunKeyName = skCase.addArtifactAttributeType("TSK_REG_RUN_KEY_NAME", BlackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.STRING, "Run Key Name")
except:
self.log(Level.INFO, "Attributes Creation Error, TSK_REG_RUN_KEY_NAME, May already exist. ")
try:
attributeIdRunKeyValue = skCase.addArtifactAttributeType("TSK_REG_RUN_KEY_VALUE", BlackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.STRING, "Run Key Value")
except:
self.log(Level.INFO, "Attributes Creation Error, TSK_REG_RUN_KEY_VALUE, May already exist. ")
try:
attributeIdRegKeyLoc = skCase.addArtifactAttributeType("TSK_REG_KEY_LOCATION", BlackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.STRING, "Registry Key Location")
except:
self.log(Level.INFO, "Attributes Creation Error, TSK_REG_KEY_LOCATION, May already exist. ")
attributeIdRunKeyName = skCase.getAttributeType("TSK_REG_RUN_KEY_NAME")
attributeIdRunKeyValue = skCase.getAttributeType("TSK_REG_RUN_KEY_VALUE")
attributeIdRegKeyLoc = skCase.getAttributeType("TSK_REG_KEY_LOCATION")
moduleName = RegistryExampleIngestModuleFactory.moduleName
# RefistryKeysFound is a list that contains a list with the following records abstractFile, Registry Key Location, Key Name, Key value
for registryKey in self.registryKeysFound:
attributes = ArrayList()
art = registryKey[0].newArtifact(artId)
attributes.add(BlackboardAttribute(attributeIdRegKeyLoc, moduleName, registryKey[1]))
attributes.add(BlackboardAttribute(attributeIdRunKeyName, moduleName, registryKey[2]))
attributes.add(BlackboardAttribute(attributeIdRunKeyValue, moduleName, registryKey[3]))
art.addAttributes(attributes)
# index the artifact for keyword search
try:
blackboard.indexArtifact(art)
except:
self._logger.log(Level.WARNING, "Error indexing artifact " + art.getDisplayName())
#Clean up registryExample directory and files
try:
shutil.rmtree(tempDir)
except:
self.log(Level.INFO, "removal of directory tree failed " + tempDir)
# After all databases, post a message to the ingest messages in box.
message = IngestMessage.createMessage(IngestMessage.MessageType.DATA,
"RegistryExample", " RegistryExample Files Have Been Analyzed " )
IngestServices.getInstance().postMessage(message)
return IngestModule.ProcessResult.OK
def writeHiveFile(self, file, fileName, tempDir):
# Write the file to the temp directory.
filePath = os.path.join(tempDir, fileName)
ContentUtils.writeToFile(file, File(filePath))
def processSoftwareHive(self, softwareHive, abstractFile):
# Open the registry hive file
softwareRegFile = RegistryHiveFile(File(softwareHive))
for runKey in self.registrySoftwareRunKeys:
currentKey = self.findRegistryKey(softwareRegFile, runKey)
if len(currentKey.getValueList()) > 0:
skValues = currentKey.getValueList()
for skValue in skValues:
regKey = []
regKey.append(abstractFile)
regKey.append(runKey)
skName = skValue.getName()
skVal = skValue.getValue()
regKey.append(str(skName))
regKey.append(skVal.getAsString())
self.registryKeysFound.append(regKey)
def processNTUserHive(self, ntuserHive, abstractFile):
# Open the registry hive file
ntuserRegFile = RegistryHiveFile(File(ntuserHive))
for runKey in self.registryNTUserRunKeys:
currentKey = self.findRegistryKey(ntuserRegFile, runKey)
if len(currentKey.getValueList()) > 0:
skValues = currentKey.getValueList()
for skValue in skValues:
regKey = []
regKey.append(abstractFile)
regKey.append(runKey)
skName = skValue.getName()
skVal = skValue.getValue()
regKey.append(str(skName))
regKey.append(skVal.getAsString())
self.registryKeysFound.append(regKey)
def findRegistryKey(self, registryHiveFile, registryKey):
# Search for the registry key
rootKey = registryHiveFile.getRoot()
regKeyList = registryKey.split('/')
currentKey = rootKey
try:
for key in regKeyList:
currentKey = currentKey.getSubkey(key)
return currentKey
except:
# Key not found
return null

View File

@ -273,19 +273,24 @@ class TestRunner(object):
# Compare output with gold and display results # Compare output with gold and display results
TestResultsDiffer.run_diff(test_data) TestResultsDiffer.run_diff(test_data)
print("Html report passed: ", test_data.html_report_passed) # NOTE: commented out html version items
# print("Html report passed: ", test_data.html_report_passed)
print("Errors diff passed: ", test_data.errors_diff_passed) print("Errors diff passed: ", test_data.errors_diff_passed)
print("DB diff passed: ", test_data.db_diff_passed) print("DB diff passed: ", test_data.db_diff_passed)
# run time test only for the specific jenkins test # run time test only for the specific jenkins test
if test_data.main_config.timing: if test_data.main_config.timing:
print("Run time test passed: ", test_data.run_time_passed) print("Run time test passed: ", test_data.run_time_passed)
test_data.overall_passed = (test_data.html_report_passed and # NOTE: commented out html version items
test_data.errors_diff_passed and test_data.db_diff_passed) #test_data.overall_passed = (test_data.html_report_passed and
#test_data.errors_diff_passed and test_data.db_diff_passed)
test_data.overall_passed = (test_data.errors_diff_passed and test_data.db_diff_passed)
# otherwise, do the usual # otherwise, do the usual
else: else:
test_data.overall_passed = (test_data.html_report_passed and # NOTE: commented out html version items
test_data.errors_diff_passed and test_data.db_diff_passed) #test_data.overall_passed = (test_data.html_report_passed and
#test_data.errors_diff_passed and test_data.db_diff_passed)
test_data.overall_passed = (test_data.errors_diff_passed and test_data.db_diff_passed)
Reports.generate_reports(test_data) Reports.generate_reports(test_data)
if(not test_data.overall_passed): if(not test_data.overall_passed):
@ -1009,10 +1014,11 @@ class TestResultsDiffer(object):
test_data.errors_diff_passed = passed test_data.errors_diff_passed = passed
# Compare html output # Compare html output
gold_report_path = test_data.get_html_report_path(DBType.GOLD) # NOTE: commented out html version items
output_report_path = test_data.get_html_report_path(DBType.OUTPUT) # gold_report_path = test_data.get_html_report_path(DBType.GOLD)
passed = TestResultsDiffer._html_report_diff(test_data) # output_report_path = test_data.get_html_report_path(DBType.OUTPUT)
test_data.html_report_passed = passed # passed = TestResultsDiffer._html_report_diff(test_data)
# test_data.html_report_passed = passed
# Compare time outputs # Compare time outputs
if test_data.main_config.timing: if test_data.main_config.timing:
@ -1070,51 +1076,52 @@ class TestResultsDiffer(object):
else: else:
return True return True
def _html_report_diff(test_data): # NOTE: commented out html version items
"""Compare the output and gold html reports. Diff util is used for this purpose. # def _html_report_diff(test_data):
Diff -r -N -x <non-textual files> --ignore-matching-lines <regex> <folder-location-1> <folder-location-2> # """Compare the output and gold html reports. Diff util is used for this purpose.
is executed. # Diff -r -N -x <non-textual files> --ignore-matching-lines <regex> <folder-location-1> <folder-location-2>
Diff is recursively used to scan through the HTML report directories. Modify the <regex> to suit the needs. # is executed.
Currently, the regex is set to match certain lines found on index.html and summary.html, and skip (read ignore) # Diff is recursively used to scan through the HTML report directories. Modify the <regex> to suit the needs.
them. # Currently, the regex is set to match certain lines found on index.html and summary.html, and skip (read ignore)
Diff returns 0 when there is no difference, 1 when there is difference, and 2 when there is trouble (trouble not # them.
defined in the official documentation). # Diff returns 0 when there is no difference, 1 when there is difference, and 2 when there is trouble (trouble not
# defined in the official documentation).
Args: #
test_data TestData object which contains initialized report_paths. # Args:
# test_data TestData object which contains initialized report_paths.
Returns: #
true, if the reports match, false otherwise. # Returns:
""" # true, if the reports match, false otherwise.
gold_report_path = test_data.get_html_report_path(DBType.GOLD) # """
output_report_path = test_data.get_html_report_path(DBType.OUTPUT) # gold_report_path = test_data.get_html_report_path(DBType.GOLD)
try: # output_report_path = test_data.get_html_report_path(DBType.OUTPUT)
# Ensure gold is passed before output # try:
(subprocess.check_output(["diff", '-r', '-N', '-x', '*.png', '-x', '*.ico', '--ignore-matching-lines', # # Ensure gold is passed before output
'HTML Report Generated on \|Autopsy Report for case \|Case:\|Case Number:' # (subprocess.check_output(["diff", '-r', '-N', '-x', '*.png', '-x', '*.ico', '--ignore-matching-lines',
'\|Examiner:\|Unalloc_', gold_report_path, output_report_path])) # 'HTML Report Generated on \|Autopsy Report for case \|Case:\|Case Number:'
print_report("", "REPORT COMPARISON", "The test html reports matched the gold reports") # '\|Examiner:\|Unalloc_', gold_report_path, output_report_path]))
return True # print_report("", "REPORT COMPARISON", "The test html reports matched the gold reports")
except subprocess.CalledProcessError as e: # return True
if e.returncode == 1: # except subprocess.CalledProcessError as e:
Errors.print_error("Error Code: 1\nThe HTML reports did not match.") # if e.returncode == 1:
diff_file = codecs.open(test_data.output_path + "\HTML-Report-Diff.txt", "wb", "utf_8") # Errors.print_error("Error Code: 1\nThe HTML reports did not match.")
diff_file.write(str(e.output.decode("utf-8"))) # diff_file = codecs.open(test_data.output_path + "\HTML-Report-Diff.txt", "wb", "utf_8")
return False # diff_file.write(str(e.output.decode("utf-8")))
if e.returncode == 2: # return False
Errors.print_error("Error Code: 2\nTrouble executing the Diff Utility.") # if e.returncode == 2:
diff_file = codecs.open(test_data.output_path + "\HTML-Report-Diff.txt", "wb", "utf_8") # Errors.print_error("Error Code: 2\nTrouble executing the Diff Utility.")
diff_file.write(str(e.output.decode("utf-8"))) # diff_file = codecs.open(test_data.output_path + "\HTML-Report-Diff.txt", "wb", "utf_8")
return False # diff_file.write(str(e.output.decode("utf-8")))
except OSError as e: # return False
Errors.print_error("Error: OSError while performing html report diff") # except OSError as e:
Errors.print_error(str(e) + "\n") # Errors.print_error("Error: OSError while performing html report diff")
return False # Errors.print_error(str(e) + "\n")
except Exception as e: # return False
Errors.print_error("Error: Unknown fatal error comparing reports.") # except Exception as e:
Errors.print_error(str(e) + "\n") # Errors.print_error("Error: Unknown fatal error comparing reports.")
logging.critical(traceback.format_exc()) # Errors.print_error(str(e) + "\n")
return False # logging.critical(traceback.format_exc())
# return False
def _run_time_diff(test_data, old_time_path): def _run_time_diff(test_data, old_time_path):
""" Compare run times for this run, and the run previous. """ Compare run times for this run, and the run previous.
@ -1371,7 +1378,8 @@ class Reports(object):
vars.append( str(len(search_log_set("autopsy", "Stopping ingest due to low disk space on disk", test_data))) ) vars.append( str(len(search_log_set("autopsy", "Stopping ingest due to low disk space on disk", test_data))) )
vars.append( make_local_path("gold", test_data.image_name, DB_FILENAME) ) vars.append( make_local_path("gold", test_data.image_name, DB_FILENAME) )
vars.append( make_local_path("gold", test_data.image_name, "standard.html") ) vars.append( make_local_path("gold", test_data.image_name, "standard.html") )
vars.append( str(test_data.html_report_passed) ) # NOTE: commented out html version items
# vars.append( str(test_data.html_report_passed) )
vars.append( test_data.ant_to_string() ) vars.append( test_data.ant_to_string() )
# Join it together with a ", " # Join it together with a ", "
output = "|".join(vars) output = "|".join(vars)