Merge in rc-211 branch

This commit is contained in:
Richard Cordovano 2015-03-30 09:36:51 -04:00
commit bcc43af925
142 changed files with 1204 additions and 466 deletions

View File

@ -29,6 +29,8 @@ URL_ON_IMG=http://www.sleuthkit.org/
URL_ON_HELP=http://sleuthkit.org/autopsy/docs/user-docs/3.1/
FILE_FOR_LOCAL_HELP=file:///
INDEX_FOR_LOCAL_HELP=/docs/index.html
LBL_Close=Close
DataContentViewerString.copyMenuItem.text=Copy
DataContentViewerHex.copyMenuItem.text=Copy
@ -137,7 +139,7 @@ AutopsyOptionsPanel.useGMTTimeRB.text=Use GMT
AutopsyOptionsPanel.useLocalTimeRB.text=Use local time zone
AutopsyOptionsPanel.keepCurrentViewerRB.toolTipText=For example, stay in Hex view when a JPEG is selected.
AutopsyOptionsPanel.keepCurrentViewerRB.text=Stay on the same file viewer
AutopsyOptionsPanel.restartRequiredLabel.text=For this computer, a maximum of {0} file ingest threads should be used. Restart required to take effect.
AutopsyOptionsPanel.restartRequiredLabel.text=For this computer, a maximum of {0} file ingest threads should be used. Application restart required to take effect.
AutopsyOptionsPanel.jLabelSelectFile.text=When selecting a file:
AutopsyOptionsPanel.jLabelHideKnownFiles.text=Hide known files (i.e. those in the NIST NSRL) in the:
AutopsyOptionsPanel.jLabelTimeDisplay.text=When displaying times:

View File

@ -0,0 +1,108 @@
/*
* Autopsy Forensic Browser
*
* Copyright 2011-2015 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.autopsy.corecomponents;
import java.awt.Desktop;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URI;
import org.netbeans.core.actions.HTMLViewAction;
import org.openide.awt.ActionID;
import org.openide.awt.ActionReference;
import org.openide.awt.ActionReferences;
import org.openide.awt.ActionRegistration;
import org.openide.awt.HtmlBrowser;
import org.openide.util.NbBundle;
import org.openide.util.NbBundle.Messages;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Implements a hyperlink to the Offline Documentation.
*/
@ActionID(
category = "Help",
id = "org.sleuthkit.autopsy.corecomponents.OfflineHelpAction"
)
@ActionRegistration(
displayName = "#CTL_OfflineHelpAction"
)
@ActionReferences({
@ActionReference(path = "Menu/Help", position = 1),
@ActionReference(path = "Shortcuts", name = "F2")
})
@Messages("CTL_OfflineHelpAction=Offline Autopsy Documentation")
public final class OfflineHelpAction implements ActionListener {
private URI uri;
private static final Logger Logger =
org.sleuthkit.autopsy.coreutils.Logger.getLogger(AboutWindowPanel.class.getName());
@Override
public void actionPerformed(ActionEvent e) {
viewOfflineHelp();
}
/**
* Displays the Offline Documentation in the system browser. If not
* available, displays it in the built-in OpenIDE HTML Browser.
*
* Tested and working: Chrome, Firefox, IE
* Not tested: Opera, Safari
*/
private void viewOfflineHelp() {
String fileForHelp = "";
String indexForHelp = "";
String currentDirectory = "";
try {
// Match the form: file:///C:/some/directory/AutopsyXYZ/docs/index.html
fileForHelp = NbBundle.getMessage(OfflineHelpAction.class, "FILE_FOR_LOCAL_HELP");
indexForHelp = NbBundle.getMessage(OfflineHelpAction.class, "INDEX_FOR_LOCAL_HELP");
currentDirectory = System.getProperty("user.dir").replace("\\", "/").replace(" ", "%20"); //NON-NLS
uri = new URI(fileForHelp + currentDirectory + indexForHelp);
} catch (Exception ex) {
Logger.log(Level.SEVERE, "Unable to load Offline Documentation: "
+ fileForHelp + currentDirectory + indexForHelp, ex); //NON-NLS
}
if (uri != null) {
// Display URL in the System browser
if (Desktop.isDesktopSupported()) {
Desktop desktop = Desktop.getDesktop();
try {
desktop.browse(uri);
} catch (IOException ex) {
Logger.log(Level.SEVERE, "Unable to launch the system browser: "
+ fileForHelp + currentDirectory + indexForHelp, ex); //NON-NLS
}
} else {
org.openide.awt.StatusDisplayer.getDefault().setStatusText(
NbBundle.getMessage(HTMLViewAction.class, "CTL_OpeningBrowser")); //NON-NLS
try {
HtmlBrowser.URLDisplayer.getDefault().showURL(uri.toURL());
} catch (MalformedURLException ex) {
Logger.log(Level.SEVERE, "Unable to launch the built-in browser: "
+ fileForHelp + currentDirectory + indexForHelp, ex); //NON-NLS
}
}
}
}
}

View File

@ -51,7 +51,7 @@ import java.util.logging.Logger;
@ActionReference(path = "Menu/Help", position = 0),
@ActionReference(path = "Shortcuts", name = "F1")
})
@Messages("CTL_OnlineHelpAction=Online Documentation")
@Messages("CTL_OnlineHelpAction=Online Autopsy Documentation")
public final class OnlineHelpAction implements ActionListener {
private URI uri;

View File

@ -28,6 +28,7 @@ import org.sleuthkit.datamodel.AbstractFile;
import org.sleuthkit.datamodel.BlackboardArtifact;
import org.sleuthkit.datamodel.BlackboardAttribute;
import org.sleuthkit.datamodel.SleuthkitCase;
import org.sleuthkit.datamodel.SleuthkitCase.CaseDbQuery;
import org.sleuthkit.datamodel.TskCoreException;
/**
@ -228,40 +229,33 @@ public abstract class AbstractAbstractFileNode<T extends AbstractFile> extends A
}
@SuppressWarnings("deprecation")
private static String getHashSetHitsForFile(AbstractFile content) {
ResultSet rs = null;
String strList = "";
SleuthkitCase skCase = content.getSleuthkitCase();
long objId = content.getId();
try {
int setNameId = BlackboardAttribute.ATTRIBUTE_TYPE.TSK_SET_NAME.getTypeID();
int artId = BlackboardArtifact.ARTIFACT_TYPE.TSK_HASHSET_HIT.getTypeID();
int setNameId = BlackboardAttribute.ATTRIBUTE_TYPE.TSK_SET_NAME.getTypeID();
int artId = BlackboardArtifact.ARTIFACT_TYPE.TSK_HASHSET_HIT.getTypeID();
String query = "SELECT value_text,blackboard_attributes.artifact_id,attribute_type_id " //NON-NLS
+ "FROM blackboard_attributes,blackboard_artifacts WHERE " //NON-NLS
+ "attribute_type_id=" + setNameId //NON-NLS
+ " AND blackboard_attributes.artifact_id=blackboard_artifacts.artifact_id" //NON-NLS
+ " AND blackboard_artifacts.artifact_type_id=" + artId //NON-NLS
+ " AND blackboard_artifacts.obj_id=" + objId; //NON-NLS
rs = skCase.runQuery(query);
String query = "SELECT value_text,blackboard_attributes.artifact_id,attribute_type_id " //NON-NLS
+ "FROM blackboard_attributes,blackboard_artifacts WHERE " //NON-NLS
+ "attribute_type_id=" + setNameId //NON-NLS
+ " AND blackboard_attributes.artifact_id=blackboard_artifacts.artifact_id" //NON-NLS
+ " AND blackboard_artifacts.artifact_type_id=" + artId //NON-NLS
+ " AND blackboard_artifacts.obj_id=" + objId; //NON-NLS
try (CaseDbQuery dbQuery = skCase.executeQuery(query)) {
ResultSet resultSet = dbQuery.getResultSet();
int i = 0;
while (rs.next()) {
while (resultSet.next()) {
if (i++ > 0) {
strList += ", ";
}
strList += rs.getString("value_text"); //NON-NLS
}
} catch (SQLException ex) {
logger.log(Level.WARNING, "SQL Exception occurred: ", ex); //NON-NLS
} finally {
if (rs != null) {
try {
skCase.closeRunQuery(rs);
} catch (SQLException ex) {
logger.log(Level.WARNING, "Error closing result set after getting hashset hits", ex); //NON-NLS
}
strList += resultSet.getString("value_text"); //NON-NLS
}
} catch (TskCoreException | SQLException ex) {
logger.log(Level.WARNING, "Error getting hashset hits: ", ex); //NON-NLS
}
return strList;
}

View File

@ -44,6 +44,8 @@ import org.sleuthkit.autopsy.ingest.ModuleDataEvent;
import org.sleuthkit.datamodel.BlackboardArtifact;
import org.sleuthkit.datamodel.BlackboardAttribute;
import org.sleuthkit.datamodel.SleuthkitCase;
import org.sleuthkit.datamodel.SleuthkitCase.CaseDbQuery;
import org.sleuthkit.datamodel.TskCoreException;
import org.sleuthkit.datamodel.TskException;
/**
@ -95,18 +97,19 @@ public class EmailExtracted implements AutopsyVisitableItem {
return;
}
try {
int artId = BlackboardArtifact.ARTIFACT_TYPE.TSK_EMAIL_MSG.getTypeID();
int pathAttrId = BlackboardAttribute.ATTRIBUTE_TYPE.TSK_PATH.getTypeID();
String query = "SELECT value_text,blackboard_attributes.artifact_id,attribute_type_id " //NON-NLS
+ "FROM blackboard_attributes,blackboard_artifacts WHERE " //NON-NLS
+ "attribute_type_id=" + pathAttrId //NON-NLS
+ " AND blackboard_attributes.artifact_id=blackboard_artifacts.artifact_id" //NON-NLS
+ " AND blackboard_artifacts.artifact_type_id=" + artId; //NON-NLS
ResultSet rs = skCase.runQuery(query);
while (rs.next()) {
final String path = rs.getString("value_text"); //NON-NLS
final long artifactId = rs.getLong("artifact_id"); //NON-NLS
int artId = BlackboardArtifact.ARTIFACT_TYPE.TSK_EMAIL_MSG.getTypeID();
int pathAttrId = BlackboardAttribute.ATTRIBUTE_TYPE.TSK_PATH.getTypeID();
String query = "SELECT value_text,blackboard_attributes.artifact_id,attribute_type_id " //NON-NLS
+ "FROM blackboard_attributes,blackboard_artifacts WHERE " //NON-NLS
+ "attribute_type_id=" + pathAttrId //NON-NLS
+ " AND blackboard_attributes.artifact_id=blackboard_artifacts.artifact_id" //NON-NLS
+ " AND blackboard_artifacts.artifact_type_id=" + artId; //NON-NLS
try (CaseDbQuery dbQuery = skCase.executeQuery(query)) {
ResultSet resultSet = dbQuery.getResultSet();
while (resultSet.next()) {
final String path = resultSet.getString("value_text"); //NON-NLS
final long artifactId = resultSet.getLong("artifact_id"); //NON-NLS
final Map<String, String> parsedPath = parsePath(path);
final String account = parsedPath.get(MAIL_ACCOUNT);
final String folder = parsedPath.get(MAIL_FOLDER);
@ -123,10 +126,8 @@ public class EmailExtracted implements AutopsyVisitableItem {
}
messages.add(artifactId);
}
skCase.closeRunQuery(rs);
} catch (SQLException ex) {
logger.log(Level.WARNING, "Cannot initialize email extraction", ex); //NON-NLS
} catch (TskCoreException | SQLException ex) {
logger.log(Level.WARNING, "Cannot initialize email extraction: ", ex); //NON-NLS
}
}

View File

@ -46,6 +46,8 @@ import org.sleuthkit.datamodel.BlackboardArtifact;
import org.sleuthkit.datamodel.BlackboardArtifact.ARTIFACT_TYPE;
import org.sleuthkit.datamodel.BlackboardAttribute.ATTRIBUTE_TYPE;
import org.sleuthkit.datamodel.SleuthkitCase;
import org.sleuthkit.datamodel.SleuthkitCase.CaseDbQuery;
import org.sleuthkit.datamodel.TskCoreException;
import org.sleuthkit.datamodel.TskException;
/**
@ -99,35 +101,28 @@ public class HashsetHits implements AutopsyVisitableItem {
return;
}
ResultSet rs = null;
try {
int setNameId = ATTRIBUTE_TYPE.TSK_SET_NAME.getTypeID();
int artId = ARTIFACT_TYPE.TSK_HASHSET_HIT.getTypeID();
String query = "SELECT value_text,blackboard_attributes.artifact_id,attribute_type_id " //NON-NLS
+ "FROM blackboard_attributes,blackboard_artifacts WHERE " //NON-NLS
+ "attribute_type_id=" + setNameId //NON-NLS
+ " AND blackboard_attributes.artifact_id=blackboard_artifacts.artifact_id" //NON-NLS
+ " AND blackboard_artifacts.artifact_type_id=" + artId; //NON-NLS
rs = skCase.runQuery(query);
while (rs.next()) {
String setName = rs.getString("value_text"); //NON-NLS
long artifactId = rs.getLong("artifact_id"); //NON-NLS
int setNameId = ATTRIBUTE_TYPE.TSK_SET_NAME.getTypeID();
int artId = ARTIFACT_TYPE.TSK_HASHSET_HIT.getTypeID();
String query = "SELECT value_text,blackboard_attributes.artifact_id,attribute_type_id " //NON-NLS
+ "FROM blackboard_attributes,blackboard_artifacts WHERE " //NON-NLS
+ "attribute_type_id=" + setNameId //NON-NLS
+ " AND blackboard_attributes.artifact_id=blackboard_artifacts.artifact_id" //NON-NLS
+ " AND blackboard_artifacts.artifact_type_id=" + artId; //NON-NLS
try (CaseDbQuery dbQuery = skCase.executeQuery(query)) {
ResultSet resultSet = dbQuery.getResultSet();
while (resultSet.next()) {
String setName = resultSet.getString("value_text"); //NON-NLS
long artifactId = resultSet.getLong("artifact_id"); //NON-NLS
if (!hashSetHitsMap.containsKey(setName)) {
hashSetHitsMap.put(setName, new HashSet<Long>());
}
hashSetHitsMap.get(setName).add(artifactId);
}
} catch (SQLException ex) {
} catch (TskCoreException | SQLException ex) {
logger.log(Level.WARNING, "SQL Exception occurred: ", ex); //NON-NLS
} finally {
if (rs != null) {
try {
skCase.closeRunQuery(rs);
} catch (SQLException ex) {
logger.log(Level.WARNING, "Error closing result set after getting hashset hits", ex); //NON-NLS
}
}
}
setChanged();
notifyObservers();
}

View File

@ -47,6 +47,7 @@ import org.sleuthkit.autopsy.ingest.ModuleDataEvent;
import org.sleuthkit.datamodel.BlackboardArtifact;
import org.sleuthkit.datamodel.BlackboardAttribute;
import org.sleuthkit.datamodel.SleuthkitCase;
import org.sleuthkit.datamodel.SleuthkitCase.CaseDbQuery;
import org.sleuthkit.datamodel.TskCoreException;
@ -94,36 +95,27 @@ public class InterestingHits implements AutopsyVisitableItem {
return;
}
ResultSet rs = null;
try {
int setNameId = BlackboardAttribute.ATTRIBUTE_TYPE.TSK_SET_NAME.getTypeID();
int artId = artType.getTypeID();
String query = "SELECT value_text,blackboard_attributes.artifact_id,attribute_type_id " //NON-NLS
+ "FROM blackboard_attributes,blackboard_artifacts WHERE " //NON-NLS
+ "attribute_type_id=" + setNameId //NON-NLS
+ " AND blackboard_attributes.artifact_id=blackboard_artifacts.artifact_id" //NON-NLS
+ " AND blackboard_artifacts.artifact_type_id=" + artId; //NON-NLS
rs = skCase.runQuery(query);
while (rs.next()) {
String value = rs.getString("value_text"); //NON-NLS
long artifactId = rs.getLong("artifact_id"); //NON-NLS
int setNameId = BlackboardAttribute.ATTRIBUTE_TYPE.TSK_SET_NAME.getTypeID();
int artId = artType.getTypeID();
String query = "SELECT value_text,blackboard_attributes.artifact_id,attribute_type_id " //NON-NLS
+ "FROM blackboard_attributes,blackboard_artifacts WHERE " //NON-NLS
+ "attribute_type_id=" + setNameId //NON-NLS
+ " AND blackboard_attributes.artifact_id=blackboard_artifacts.artifact_id" //NON-NLS
+ " AND blackboard_artifacts.artifact_type_id=" + artId; //NON-NLS
try (CaseDbQuery dbQuery = skCase.executeQuery(query)) {
ResultSet resultSet = dbQuery.getResultSet();
while (resultSet.next()) {
String value = resultSet.getString("value_text"); //NON-NLS
long artifactId = resultSet.getLong("artifact_id"); //NON-NLS
if (!interestingItemsMap.containsKey(value)) {
interestingItemsMap.put(value, new HashSet<>());
}
interestingItemsMap.get(value).add(artifactId);
}
} catch (SQLException ex) {
} catch (TskCoreException | SQLException ex) {
logger.log(Level.WARNING, "SQL Exception occurred: ", ex); //NON-NLS
}
finally {
if (rs != null) {
try {
skCase.closeRunQuery(rs);
} catch (SQLException ex) {
logger.log(Level.WARNING, "Error closing result set after getting artifacts", ex); //NON-NLS
}
}
}
}
}

View File

@ -46,6 +46,7 @@ import org.sleuthkit.datamodel.AbstractFile;
import org.sleuthkit.datamodel.BlackboardArtifact;
import org.sleuthkit.datamodel.BlackboardAttribute;
import org.sleuthkit.datamodel.SleuthkitCase;
import org.sleuthkit.datamodel.SleuthkitCase.CaseDbQuery;
import org.sleuthkit.datamodel.TskCoreException;
import org.sleuthkit.datamodel.TskException;
@ -163,24 +164,24 @@ public class KeywordHits implements AutopsyVisitableItem {
return;
}
ResultSet rs = null;
try {
int setId = BlackboardAttribute.ATTRIBUTE_TYPE.TSK_SET_NAME.getTypeID();
int wordId = BlackboardAttribute.ATTRIBUTE_TYPE.TSK_KEYWORD.getTypeID();
int regexId = BlackboardAttribute.ATTRIBUTE_TYPE.TSK_KEYWORD_REGEXP.getTypeID();
int artId = BlackboardArtifact.ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID();
String query = "SELECT blackboard_attributes.value_text,blackboard_attributes.artifact_id," //NON-NLS
+ "blackboard_attributes.attribute_type_id FROM blackboard_attributes,blackboard_artifacts WHERE " //NON-NLS
+ "(blackboard_attributes.artifact_id=blackboard_artifacts.artifact_id AND " //NON-NLS
+ "blackboard_artifacts.artifact_type_id=" + artId //NON-NLS
+ ") AND (attribute_type_id=" + setId + " OR " //NON-NLS
+ "attribute_type_id=" + wordId + " OR " //NON-NLS
+ "attribute_type_id=" + regexId + ")"; //NON-NLS
rs = skCase.runQuery(query);
while (rs.next()) {
String value = rs.getString("value_text"); //NON-NLS
long artifactId = rs.getLong("artifact_id"); //NON-NLS
long typeId = rs.getLong("attribute_type_id"); //NON-NLS
int setId = BlackboardAttribute.ATTRIBUTE_TYPE.TSK_SET_NAME.getTypeID();
int wordId = BlackboardAttribute.ATTRIBUTE_TYPE.TSK_KEYWORD.getTypeID();
int regexId = BlackboardAttribute.ATTRIBUTE_TYPE.TSK_KEYWORD_REGEXP.getTypeID();
int artId = BlackboardArtifact.ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID();
String query = "SELECT blackboard_attributes.value_text,blackboard_attributes.artifact_id," //NON-NLS
+ "blackboard_attributes.attribute_type_id FROM blackboard_attributes,blackboard_artifacts WHERE " //NON-NLS
+ "(blackboard_attributes.artifact_id=blackboard_artifacts.artifact_id AND " //NON-NLS
+ "blackboard_artifacts.artifact_type_id=" + artId //NON-NLS
+ ") AND (attribute_type_id=" + setId + " OR " //NON-NLS
+ "attribute_type_id=" + wordId + " OR " //NON-NLS
+ "attribute_type_id=" + regexId + ")"; //NON-NLS
try (CaseDbQuery dbQuery = skCase.executeQuery(query)) {
ResultSet resultSet = dbQuery.getResultSet();
while (resultSet.next()) {
String value = resultSet.getString("value_text"); //NON-NLS
long artifactId = resultSet.getLong("artifact_id"); //NON-NLS
long typeId = resultSet.getLong("attribute_type_id"); //NON-NLS
if (!artifactIds.containsKey(artifactId)) {
artifactIds.put(artifactId, new LinkedHashMap<Long, String>());
}
@ -188,17 +189,10 @@ public class KeywordHits implements AutopsyVisitableItem {
artifactIds.get(artifactId).put(typeId, value);
}
}
} catch (SQLException ex) {
} catch (TskCoreException | SQLException ex) {
logger.log(Level.WARNING, "SQL Exception occurred: ", ex); //NON-NLS
} finally {
if (rs != null) {
try {
skCase.closeRunQuery(rs);
} catch (SQLException ex) {
logger.log(Level.WARNING, "Error closing result set after getting keyword hits", ex); //NON-NLS
}
}
}
populateMaps(artifactIds);
}
}

View File

@ -29,6 +29,8 @@ import org.sleuthkit.autopsy.coreutils.Logger;
import org.openide.nodes.ChildFactory;
import org.openide.nodes.Node;
import org.sleuthkit.datamodel.SleuthkitCase;
import org.sleuthkit.datamodel.SleuthkitCase.CaseDbQuery;
import org.sleuthkit.datamodel.TskCoreException;
/**
*
@ -82,21 +84,14 @@ import org.sleuthkit.datamodel.SleuthkitCase;
@SuppressWarnings("deprecation")
private long runTimeQuery(String query) {
long result = 0;
ResultSet rs = null;
try {
rs = skCase.runQuery(query);
result = rs.getLong(1);
} catch (SQLException ex) {
logger.log(Level.WARNING, "Couldn't get recent files results", ex); //NON-NLS
} finally {
if (rs != null) {
try {
skCase.closeRunQuery(rs);
} catch (SQLException ex) {
logger.log(Level.WARNING, "Error closing result set after getting recent files results", ex); //NON-NLS
}
}
try (CaseDbQuery dbQuery = skCase.executeQuery(query)) {
ResultSet resultSet = dbQuery.getResultSet();
result = resultSet.getLong(1);
} catch (TskCoreException | SQLException ex) {
logger.log(Level.WARNING, "Couldn't get recent files results: ", ex); //NON-NLS
}
return result;
}
}

View File

@ -74,7 +74,6 @@ class CacheLocationAnalyzer {
}
private static void findGeoLocationsInFile(File file, AbstractFile f) {
byte[] bytes; // will temporarily hold bytes to be converted into the correct data types
try {
@ -95,7 +94,9 @@ class CacheLocationAnalyzer {
bytes = new byte[1];
inputStream.read(bytes);
while (new BigInteger(bytes).intValue() != 0) { //pass through non important values until the start of accuracy(around 7-10 bytes)
inputStream.read(bytes);
if (0 > inputStream.read(bytes)) {
break; /// we've passed the end of the file, so stop
}
}
bytes = new byte[3];
inputStream.read(bytes);

View File

@ -43,6 +43,7 @@ InterestingItemDefsPanel.fileNameRegexCheckbox.text=Regex
InterestingItemDefsPanel.fileNameExtensionRadioButton.text=Extension Only
InterestingItemDefsPanel.fileNameTextField.text=
InterestingItemDefsPanel.fileNameRadioButton.text=File Name
InterestingItemDefsPanel.doFileSetsDialog.duplicateRuleSet.text=Rule set with name {0} already exists.
FilesSetRulePanel.pathSeparatorInfoLabel.text=Use / as path separator
FilesSetRulePanel.filesAndDirsRadioButton.text=Files and Directories
InterestingItemDefsPanel.rulePathFilterTextField.text=

View File

@ -22,6 +22,7 @@ import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.regex.Pattern;
import org.sleuthkit.datamodel.AbstractFile;
import org.sleuthkit.datamodel.TskData;
@ -135,6 +136,7 @@ final class FilesSet {
*/
static class Rule {
private final String uuid;
private final String ruleName;
private final FileNameFilter fileNameFilter;
private final MetaTypeFilter metaTypeFilter;
@ -150,8 +152,11 @@ final class FilesSet {
* @param pathFilter A file path filter, may be null.
*/
Rule(String ruleName, FileNameFilter fileNameFilter, MetaTypeFilter metaTypeFilter, ParentPathFilter pathFilter) {
// since ruleName is optional, ruleUUID can be used to uniquely identify a rule.
this.uuid = UUID.randomUUID().toString();
if (ruleName == null) {
throw new NullPointerException("Interesting files set rule name cannot be null");
throw new IllegalArgumentException("Interesting files set rule name cannot be null");
}
if (fileNameFilter == null) {
throw new IllegalArgumentException("Interesting files set rule file name filter cannot be null");
@ -235,6 +240,13 @@ final class FilesSet {
return this.ruleName + " (" + fileNameFilter.getTextToMatch() + ")";
}
/**
* @return the ruleUUID
*/
public String getUuid() {
return this.uuid;
}
/**
* An interface for the file attribute filters of which interesting
* files set membership rules are composed.
@ -345,8 +357,11 @@ final class FilesSet {
*
* @param text The text to be matched.
*/
AbstractTextFilter(String text) {
this.textMatcher = new FilesSet.Rule.CaseInsensitiveStringComparisionMatcher(text);
AbstractTextFilter(String text, Boolean partialMatch) {
if(partialMatch)
this.textMatcher = new FilesSet.Rule.CaseInsensitivePartialStringComparisionMatcher(text);
else
this.textMatcher = new FilesSet.Rule.CaseInsensitiveStringComparisionMatcher(text);
}
/**
@ -412,7 +427,7 @@ final class FilesSet {
* @param path The path to be matched.
*/
ParentPathFilter(String path) {
super(path);
super(path, true);
}
/**
@ -429,7 +444,7 @@ final class FilesSet {
*/
@Override
public boolean passes(AbstractFile file) {
return this.textMatches(file.getParentPath());
return this.textMatches(file.getParentPath() + "/");
}
}
@ -454,7 +469,7 @@ final class FilesSet {
* @param name The file name to be matched.
*/
FullNameFilter(String name) {
super(name);
super(name, false);
}
/**
@ -492,7 +507,7 @@ final class FilesSet {
// If there is a leading ".", strip it since
// AbstractFile.getFileNameExtension() returns just the
// extension chars and not the dot.
super(extension.startsWith(".") ? extension.substring(1) : extension);
super(extension.startsWith(".") ? extension.substring(1) : extension, false);
}
/**
@ -502,7 +517,7 @@ final class FilesSet {
* matched.
*/
ExtensionFilter(Pattern extension) {
super(extension.pattern());
super(extension.pattern(), false);
}
/**
@ -590,6 +605,50 @@ final class FilesSet {
}
/**
* A text matcher that does a case-insensitive string comparison.
*/
private static class CaseInsensitivePartialStringComparisionMatcher implements TextMatcher {
private final String textToMatch;
private final Pattern pattern;
/**
* Construct a text matcher that does a case-insensitive string
* comparison.
*
* @param textToMatch The text to match.
*/
CaseInsensitivePartialStringComparisionMatcher(String textToMatch) {
this.textToMatch = textToMatch;
this.pattern = Pattern.compile(Pattern.quote(textToMatch), Pattern.CASE_INSENSITIVE);
}
/**
* @inheritDoc
*/
@Override
public String getTextToMatch() {
return this.textToMatch;
}
/**
* @inheritDoc
*/
@Override
public boolean isRegex() {
return false;
}
/**
* @inheritDoc
*/
@Override
public boolean textMatches(String subject) {
return pattern.matcher(subject).find();
}
}
/**
* A text matcher that does regular expression matching.
*/

View File

@ -121,6 +121,7 @@ final class InterestingItemDefsManager extends Observable {
private static final String NAME_RULE_TAG = "NAME"; //NON-NLS
private static final String EXTENSION_RULE_TAG = "EXTENSION"; //NON-NLS
private static final String NAME_ATTR = "name"; //NON-NLS
private static final String RULE_UUID_ATTR = "ruleUUID"; //NON-NLS
private static final String DESC_ATTR = "description"; //NON-NLS
private static final String IGNORE_KNOWN_FILES_ATTR = "ignoreKnown"; //NON-NLS
private static final String TYPE_FILTER_ATTR = "typeFilter"; //NON-NLS
@ -234,10 +235,10 @@ final class InterestingItemDefsManager extends Observable {
Element elem = (Element) nameRuleElems.item(j);
FilesSet.Rule rule = FilesSetXML.readFileNameRule(elem);
if (rule != null) {
if (!rules.containsKey(rule.getName())) {
rules.put(rule.getName(), rule);
if (!rules.containsKey(rule.getUuid())) {
rules.put(rule.getUuid(), rule);
} else {
logger.log(Level.SEVERE, "Found duplicate rule {0} for set named {1} in interesting file sets definition file at {2}, discarding malformed set", new Object[]{rule.getName(), setName, filePath}); // NON-NLS
logger.log(Level.SEVERE, "Found duplicate rule {0} for set named {1} in interesting file sets definition file at {2}, discarding malformed set", new Object[]{rule.getUuid(), setName, filePath}); // NON-NLS
return;
}
} else {
@ -252,10 +253,10 @@ final class InterestingItemDefsManager extends Observable {
Element elem = (Element) extRuleElems.item(j);
FilesSet.Rule rule = FilesSetXML.readFileExtensionRule(elem);
if (rule != null) {
if (!rules.containsKey(rule.getName())) {
rules.put(rule.getName(), rule);
if (!rules.containsKey(rule.getUuid())) {
rules.put(rule.getUuid(), rule);
} else {
logger.log(Level.SEVERE, "Found duplicate rule {0} for set named {1} in interesting file sets definition file at {2}, discarding malformed set", new Object[]{rule.getName(), setName, filePath}); //NOI18N
logger.log(Level.SEVERE, "Found duplicate rule {0} for set named {1} in interesting file sets definition file at {2}, discarding malformed set", new Object[]{rule.getUuid(), setName, filePath}); //NOI18N
return;
}
} else {
@ -517,6 +518,7 @@ final class InterestingItemDefsManager extends Observable {
ruleElement = doc.createElement(FilesSetXML.EXTENSION_RULE_TAG);
}
// Add the rule name attribute.
ruleElement.setAttribute(FilesSetXML.NAME_ATTR, rule.getName());

View File

@ -29,6 +29,7 @@ import javax.swing.event.ListSelectionEvent;
import javax.swing.event.ListSelectionListener;
import org.openide.util.NbBundle;
import org.sleuthkit.autopsy.corecomponents.OptionsPanel;
import org.sleuthkit.autopsy.coreutils.MessageNotifyUtil;
import org.sleuthkit.autopsy.ingest.IngestModuleGlobalSettingsPanel;
/**
@ -255,6 +256,14 @@ final class InterestingItemDefsPanel extends IngestModuleGlobalSettingsPanel imp
option = JOptionPane.showConfirmDialog(null, panel, NbBundle.getMessage(FilesSetPanel.class, "FilesSetPanel.title"), JOptionPane.OK_CANCEL_OPTION, JOptionPane.PLAIN_MESSAGE);
} while (option == JOptionPane.OK_OPTION && !panel.isValidDefinition());
// If rule set with same name already exists, do not add to the filesSets hashMap.
if(this.filesSets.containsKey(panel.getFilesSetName())) {
MessageNotifyUtil.Message.error(NbBundle.getMessage(this.getClass(),
"InterestingItemDefsPanel.doFileSetsDialog.duplicateRuleSet.text",
panel.getFilesSetName()));
return;
}
if (option == JOptionPane.OK_OPTION) {
Map<String, FilesSet.Rule> rules = new HashMap<>();
if (selectedSet != null) {
@ -303,7 +312,7 @@ final class InterestingItemDefsPanel extends IngestModuleGlobalSettingsPanel imp
// Remove the "old" rule definition and add the new/edited
// definition.
if (selectedRule != null) {
rules.remove(selectedRule.getName());
rules.remove(selectedRule.getUuid());
}
FilesSet.Rule newRule = new FilesSet.Rule(panel.getRuleName(), panel.getFileNameFilter(), panel.getMetaTypeFilter(), panel.getPathFilter());
rules.put(Integer.toString(newRule.hashCode()), newRule);
@ -725,7 +734,7 @@ final class InterestingItemDefsPanel extends IngestModuleGlobalSettingsPanel imp
FilesSet oldSet = this.setsList.getSelectedValue();
Map<String, FilesSet.Rule> rules = new HashMap<>(oldSet.getRules());
FilesSet.Rule selectedRule = this.rulesList.getSelectedValue();
rules.remove(selectedRule.getName());
rules.remove(selectedRule.getUuid());
this.replaceFilesSet(oldSet, oldSet.getName(), oldSet.getDescription(), oldSet.ignoresKnownFiles(), rules);
}//GEN-LAST:event_deleteRuleButtonActionPerformed

View File

@ -63,6 +63,7 @@ import org.sleuthkit.datamodel.BlackboardAttribute.ATTRIBUTE_TYPE;
import org.sleuthkit.datamodel.Content;
import org.sleuthkit.datamodel.ContentTag;
import org.sleuthkit.datamodel.SleuthkitCase;
import org.sleuthkit.datamodel.SleuthkitCase.CaseDbQuery;
import org.sleuthkit.datamodel.TskCoreException;
import org.sleuthkit.datamodel.TskData;
@ -892,19 +893,22 @@ import org.sleuthkit.datamodel.TskData;
*/
@SuppressWarnings("deprecation")
private void writeKeywordHits(List<TableReportModule> tableModules, String comment, HashSet<String> tagNamesFilter) {
ResultSet listsRs = null;
try {
// Query for keyword lists-only so that we can tell modules what lists
// will exist for their index.
// @@@ There is a bug in here. We should use the tags in the below code
// so that we only report the lists that we will later provide with real
// hits. If no keyord hits are tagged, then we make the page for nothing.
listsRs = skCase.runQuery("SELECT att.value_text AS list " + //NON-NLS
"FROM blackboard_attributes AS att, blackboard_artifacts AS art " + //NON-NLS
"WHERE att.attribute_type_id = " + ATTRIBUTE_TYPE.TSK_SET_NAME.getTypeID() + " " + //NON-NLS
"AND art.artifact_type_id = " + ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID() + " " + //NON-NLS
"AND att.artifact_id = art.artifact_id " + //NON-NLS
"GROUP BY list"); //NON-NLS
// Query for keyword lists-only so that we can tell modules what lists
// will exist for their index.
// @@@ There is a bug in here. We should use the tags in the below code
// so that we only report the lists that we will later provide with real
// hits. If no keyord hits are tagged, then we make the page for nothing.
String keywordListQuery =
"SELECT att.value_text AS list " + //NON-NLS
"FROM blackboard_attributes AS att, blackboard_artifacts AS art " + //NON-NLS
"WHERE att.attribute_type_id = " + ATTRIBUTE_TYPE.TSK_SET_NAME.getTypeID() + " " + //NON-NLS
"AND art.artifact_type_id = " + ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID() + " " + //NON-NLS
"AND att.artifact_id = art.artifact_id " + //NON-NLS
"GROUP BY list"; //NON-NLS
try (CaseDbQuery dbQuery = skCase.executeQuery(keywordListQuery)) {
ResultSet listsRs = dbQuery.getResultSet();
List<String> lists = new ArrayList<>();
while(listsRs.next()) {
String list = listsRs.getString("list"); //NON-NLS
@ -923,36 +927,32 @@ import org.sleuthkit.datamodel.TskData;
ARTIFACT_TYPE.TSK_KEYWORD_HIT.getDisplayName()));
}
}
catch (SQLException ex) {
catch (TskCoreException | SQLException ex) {
errorList.add(NbBundle.getMessage(this.getClass(), "ReportGenerator.errList.failedQueryKWLists"));
logger.log(Level.SEVERE, "Failed to query keyword lists.", ex); //NON-NLS
logger.log(Level.SEVERE, "Failed to query keyword lists: ", ex); //NON-NLS
return;
} finally {
if (listsRs != null) {
try {
skCase.closeRunQuery(listsRs);
} catch (SQLException ex) {
}
}
}
ResultSet rs = null;
try {
// Query for keywords, grouped by list
rs = skCase.runQuery("SELECT art.artifact_id, art.obj_id, att1.value_text AS keyword, att2.value_text AS preview, att3.value_text AS list, f.name AS name, f.parent_path AS parent_path " + //NON-NLS
"FROM blackboard_artifacts AS art, blackboard_attributes AS att1, blackboard_attributes AS att2, blackboard_attributes AS att3, tsk_files AS f " + //NON-NLS
"WHERE (att1.artifact_id = art.artifact_id) " + //NON-NLS
"AND (att2.artifact_id = art.artifact_id) " + //NON-NLS
"AND (att3.artifact_id = art.artifact_id) " + //NON-NLS
"AND (f.obj_id = art.obj_id) " + //NON-NLS
"AND (att1.attribute_type_id = " + ATTRIBUTE_TYPE.TSK_KEYWORD.getTypeID() + ") " + //NON-NLS
"AND (att2.attribute_type_id = " + ATTRIBUTE_TYPE.TSK_KEYWORD_PREVIEW.getTypeID() + ") " + //NON-NLS
"AND (att3.attribute_type_id = " + ATTRIBUTE_TYPE.TSK_SET_NAME.getTypeID() + ") " + //NON-NLS
"AND (art.artifact_type_id = " + ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID() + ") " + //NON-NLS
"ORDER BY list, keyword, parent_path, name"); //NON-NLS
// Query for keywords, grouped by list
String keywordsQuery =
"SELECT art.artifact_id, art.obj_id, att1.value_text AS keyword, att2.value_text AS preview, att3.value_text AS list, f.name AS name, f.parent_path AS parent_path " + //NON-NLS
"FROM blackboard_artifacts AS art, blackboard_attributes AS att1, blackboard_attributes AS att2, blackboard_attributes AS att3, tsk_files AS f " + //NON-NLS
"WHERE (att1.artifact_id = art.artifact_id) " + //NON-NLS
"AND (att2.artifact_id = art.artifact_id) " + //NON-NLS
"AND (att3.artifact_id = art.artifact_id) " + //NON-NLS
"AND (f.obj_id = art.obj_id) " + //NON-NLS
"AND (att1.attribute_type_id = " + ATTRIBUTE_TYPE.TSK_KEYWORD.getTypeID() + ") " + //NON-NLS
"AND (att2.attribute_type_id = " + ATTRIBUTE_TYPE.TSK_KEYWORD_PREVIEW.getTypeID() + ") " + //NON-NLS
"AND (att3.attribute_type_id = " + ATTRIBUTE_TYPE.TSK_SET_NAME.getTypeID() + ") " + //NON-NLS
"AND (art.artifact_type_id = " + ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID() + ") " + //NON-NLS
"ORDER BY list, keyword, parent_path, name"; //NON-NLS
try (CaseDbQuery dbQuery = skCase.executeQuery(keywordsQuery)) {
ResultSet resultSet = dbQuery.getResultSet();
String currentKeyword = "";
String currentList = "";
while (rs.next()) {
while (resultSet.next()) {
// Check to see if all the TableReportModules have been canceled
if (tableModules.isEmpty()) {
break;
@ -966,16 +966,16 @@ import org.sleuthkit.datamodel.TskData;
}
// Get any tags that associated with this artifact and apply the tag filter.
HashSet<String> uniqueTagNames = getUniqueTagNames(rs.getLong("artifact_id")); //NON-NLS
HashSet<String> uniqueTagNames = getUniqueTagNames(resultSet.getLong("artifact_id")); //NON-NLS
if(failsTagFilter(uniqueTagNames, tagNamesFilter)) {
continue;
}
String tagsList = makeCommaSeparatedList(uniqueTagNames);
Long objId = rs.getLong("obj_id"); //NON-NLS
String keyword = rs.getString("keyword"); //NON-NLS
String preview = rs.getString("preview"); //NON-NLS
String list = rs.getString("list"); //NON-NLS
Long objId = resultSet.getLong("obj_id"); //NON-NLS
String keyword = resultSet.getString("keyword"); //NON-NLS
String preview = resultSet.getString("preview"); //NON-NLS
String list = resultSet.getString("list"); //NON-NLS
String uniquePath = "";
try {
@ -1029,16 +1029,9 @@ import org.sleuthkit.datamodel.TskData;
tableProgress.get(module).increment();
module.endDataType();
}
} catch (SQLException ex) {
} catch (TskCoreException | SQLException ex) {
errorList.add(NbBundle.getMessage(this.getClass(), "ReportGenerator.errList.failedQueryKWs"));
logger.log(Level.SEVERE, "Failed to query keywords.", ex); //NON-NLS
} finally {
if (rs != null) {
try {
skCase.closeRunQuery(rs);
} catch (SQLException ex) {
}
}
logger.log(Level.SEVERE, "Failed to query keywords: ", ex); //NON-NLS
}
}
@ -1048,15 +1041,17 @@ import org.sleuthkit.datamodel.TskData;
*/
@SuppressWarnings("deprecation")
private void writeHashsetHits(List<TableReportModule> tableModules, String comment, HashSet<String> tagNamesFilter) {
ResultSet listsRs = null;
try {
String hashsetsQuery =
"SELECT att.value_text AS list " + //NON-NLS
"FROM blackboard_attributes AS att, blackboard_artifacts AS art " + //NON-NLS
"WHERE att.attribute_type_id = " + ATTRIBUTE_TYPE.TSK_SET_NAME.getTypeID() + " " + //NON-NLS
"AND art.artifact_type_id = " + ARTIFACT_TYPE.TSK_HASHSET_HIT.getTypeID() + " " + //NON-NLS
"AND att.artifact_id = art.artifact_id " + //NON-NLS
"GROUP BY list"; //NON-NLS
try (CaseDbQuery dbQuery = skCase.executeQuery(hashsetsQuery)) {
// Query for hashsets
listsRs = skCase.runQuery("SELECT att.value_text AS list " + //NON-NLS
"FROM blackboard_attributes AS att, blackboard_artifacts AS art " + //NON-NLS
"WHERE att.attribute_type_id = " + ATTRIBUTE_TYPE.TSK_SET_NAME.getTypeID() + " " + //NON-NLS
"AND art.artifact_type_id = " + ARTIFACT_TYPE.TSK_HASHSET_HIT.getTypeID() + " " + //NON-NLS
"AND att.artifact_id = art.artifact_id " + //NON-NLS
"GROUP BY list"); //NON-NLS
ResultSet listsRs = dbQuery.getResultSet();
List<String> lists = new ArrayList<>();
while(listsRs.next()) {
lists.add(listsRs.getString("list")); //NON-NLS
@ -1069,31 +1064,26 @@ import org.sleuthkit.datamodel.TskData;
NbBundle.getMessage(this.getClass(), "ReportGenerator.progress.processing",
ARTIFACT_TYPE.TSK_HASHSET_HIT.getDisplayName()));
}
} catch (SQLException ex) {
} catch (TskCoreException | SQLException ex) {
errorList.add(NbBundle.getMessage(this.getClass(), "ReportGenerator.errList.failedQueryHashsetLists"));
logger.log(Level.SEVERE, "Failed to query hashset lists.", ex); //NON-NLS
logger.log(Level.SEVERE, "Failed to query hashset lists: ", ex); //NON-NLS
return;
} finally {
if (listsRs != null) {
try {
skCase.closeRunQuery(listsRs);
} catch (SQLException ex) {
}
}
}
ResultSet rs = null;
try {
String hashsetHitsQuery =
"SELECT art.artifact_id, art.obj_id, att.value_text AS setname, f.name AS name, f.size AS size, f.parent_path AS parent_path " + //NON-NLS
"FROM blackboard_artifacts AS art, blackboard_attributes AS att, tsk_files AS f " + //NON-NLS
"WHERE (att.artifact_id = art.artifact_id) " + //NON-NLS
"AND (f.obj_id = art.obj_id) " + //NON-NLS
"AND (att.attribute_type_id = " + ATTRIBUTE_TYPE.TSK_SET_NAME.getTypeID() + ") " + //NON-NLS
"AND (art.artifact_type_id = " + ARTIFACT_TYPE.TSK_HASHSET_HIT.getTypeID() + ") " + //NON-NLS
"ORDER BY setname, parent_path, name, size"; //NON-NLS
try (CaseDbQuery dbQuery = skCase.executeQuery(hashsetHitsQuery)) {
// Query for hashset hits
rs = skCase.runQuery("SELECT art.artifact_id, art.obj_id, att.value_text AS setname, f.name AS name, f.size AS size, f.parent_path AS parent_path " + //NON-NLS
"FROM blackboard_artifacts AS art, blackboard_attributes AS att, tsk_files AS f " + //NON-NLS
"WHERE (att.artifact_id = art.artifact_id) " + //NON-NLS
"AND (f.obj_id = art.obj_id) " + //NON-NLS
"AND (att.attribute_type_id = " + ATTRIBUTE_TYPE.TSK_SET_NAME.getTypeID() + ") " + //NON-NLS
"AND (art.artifact_type_id = " + ARTIFACT_TYPE.TSK_HASHSET_HIT.getTypeID() + ") " + //NON-NLS
"ORDER BY setname, parent_path, name, size"); //NON-NLS
ResultSet resultSet = dbQuery.getResultSet();
String currentSet = "";
while (rs.next()) {
while (resultSet.next()) {
// Check to see if all the TableReportModules have been canceled
if (tableModules.isEmpty()) {
break;
@ -1107,15 +1097,15 @@ import org.sleuthkit.datamodel.TskData;
}
// Get any tags that associated with this artifact and apply the tag filter.
HashSet<String> uniqueTagNames = getUniqueTagNames(rs.getLong("artifact_id")); //NON-NLS
HashSet<String> uniqueTagNames = getUniqueTagNames(resultSet.getLong("artifact_id")); //NON-NLS
if(failsTagFilter(uniqueTagNames, tagNamesFilter)) {
continue;
}
String tagsList = makeCommaSeparatedList(uniqueTagNames);
Long objId = rs.getLong("obj_id"); //NON-NLS
String set = rs.getString("setname"); //NON-NLS
String size = rs.getString("size"); //NON-NLS
Long objId = resultSet.getLong("obj_id"); //NON-NLS
String set = resultSet.getString("setname"); //NON-NLS
String size = resultSet.getString("size"); //NON-NLS
String uniquePath = "";
try {
@ -1156,16 +1146,9 @@ import org.sleuthkit.datamodel.TskData;
tableProgress.get(module).increment();
module.endDataType();
}
} catch (SQLException ex) {
} catch (TskCoreException | SQLException ex) {
errorList.add(NbBundle.getMessage(this.getClass(), "ReportGenerator.errList.failedQueryHashsetHits"));
logger.log(Level.SEVERE, "Failed to query hashsets hits.", ex); //NON-NLS
} finally {
if (rs != null) {
try {
skCase.closeRunQuery(rs);
} catch (SQLException ex) {
}
}
logger.log(Level.SEVERE, "Failed to query hashsets hits: ", ex); //NON-NLS
}
}
@ -1878,14 +1861,22 @@ import org.sleuthkit.datamodel.TskData;
* @throws SQLException
*/
@SuppressWarnings("deprecation")
private HashSet<String> getUniqueTagNames(long artifactId) throws SQLException {
private HashSet<String> getUniqueTagNames(long artifactId) throws TskCoreException {
HashSet<String> uniqueTagNames = new HashSet<>();
ResultSet tagNameRows = skCase.runQuery("SELECT display_name, artifact_id FROM tag_names AS tn, blackboard_artifact_tags AS bat " + //NON-NLS
"WHERE tn.tag_name_id = bat.tag_name_id AND bat.artifact_id = " + artifactId); //NON-NLS
while (tagNameRows.next()) {
uniqueTagNames.add(tagNameRows.getString("display_name")); //NON-NLS
String query = "SELECT display_name, artifact_id FROM tag_names AS tn, blackboard_artifact_tags AS bat " + //NON-NLS
"WHERE tn.tag_name_id = bat.tag_name_id AND bat.artifact_id = " + artifactId; //NON-NLS
try (CaseDbQuery dbQuery = skCase.executeQuery(query)) {
ResultSet tagNameRows = dbQuery.getResultSet();
while (tagNameRows.next()) {
uniqueTagNames.add(tagNameRows.getString("display_name")); //NON-NLS
}
}
skCase.closeRunQuery(tagNameRows);
catch (TskCoreException | SQLException ex) {
throw new TskCoreException("Error getting tag names for artifact: ", ex);
}
return uniqueTagNames;
}

View File

@ -78,6 +78,7 @@ import org.sleuthkit.autopsy.timeline.zooming.DescriptionLOD;
import org.sleuthkit.autopsy.timeline.zooming.EventTypeZoomLevel;
import org.sleuthkit.autopsy.timeline.zooming.ZoomParams;
import org.sleuthkit.datamodel.SleuthkitCase;
import org.sleuthkit.datamodel.SleuthkitCase.CaseDbQuery;
import org.sleuthkit.datamodel.TskCoreException;
/** Controller in the MVC design along with model = {@link FilteredEventsModel}
@ -357,13 +358,15 @@ public class TimeLineController {
@SuppressWarnings("deprecation")
private long getCaseLastArtifactID(final SleuthkitCase sleuthkitCase) {
long caseLastArtfId = -1;
try (ResultSet runQuery = sleuthkitCase.runQuery("select Max(artifact_id) as max_id from blackboard_artifacts")) { // NON-NLS
while (runQuery.next()) {
caseLastArtfId = runQuery.getLong("max_id"); // NON-NLS
String query = "select Max(artifact_id) as max_id from blackboard_artifacts"; // NON-NLS
try (CaseDbQuery dbQuery = sleuthkitCase.executeQuery(query)) {
ResultSet resultSet = dbQuery.getResultSet();
while (resultSet.next()) {
caseLastArtfId = resultSet.getLong("max_id"); // NON-NLS
}
sleuthkitCase.closeRunQuery(runQuery);
} catch (SQLException ex) {
Exceptions.printStackTrace(ex);
} catch (TskCoreException | SQLException ex) {
LOGGER.log(Level.SEVERE, "Error getting last artifact id: ", ex); // NON-NLS
}
return caseLastArtfId;
}

View File

@ -69,9 +69,12 @@
<copy file="${basedir}/LICENSE-2.0.txt" tofile="${zip-tmp}/${app.name}/LICENSE-2.0.txt"/>
<copy file="${basedir}/NEWS.txt" tofile="${zip-tmp}/${app.name}/NEWS.txt"/>
<copy file="${basedir}/KNOWN_ISSUES.txt" tofile="${zip-tmp}/${app.name}/KNOWN_ISSUES.txt"/>
<unzip src="${thirdparty.dir}/gstreamer/${os.family}/i386/0.10.7/gstreamer.zip" dest="${zip-tmp}/${app.name}/gstreamer"/>
<copy file="${basedir}/icons/icon.ico" tofile="${zip-tmp}/${app.name}/icon.ico" overwrite="true"/>
<!-- Copy the Autopsy documentation to the docs folder -->
<copy flatten="true" todir="${zip-tmp}/${app.name}/docs">
<fileset dir="${basedir}/docs/doxygen-user/user-docs"/>
</copy>
<antcall target="copyLibsToZip"/>
@ -229,7 +232,7 @@
<target name="versioning-script" depends="check-release, versioning-script-if-release, versioning-script-if-not-release"/>
<target name="build-installer" depends="getProps, build-zip" description="Builds Autopsy installer.">
<target name="build-installer" depends="getProps, doxygen, build-zip" description="Builds Autopsy installer.">
<delete dir="${nbdist.dir}/${app.name}-installer" quiet="true"/>
<unzip src="${nbdist.dir}/${app.name}-${app.version}.zip" dest="${nbdist.dir}/${app.name}-installer"/>
<antcall target="build-installer-${os.family}" />

6
docs/doxygen-user/Doxyfile Normal file → Executable file
View File

@ -1372,7 +1372,7 @@ DISABLE_INDEX = NO
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_TREEVIEW = YES
GENERATE_TREEVIEW = NO
# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
# doxygen will group on one line in the generated HTML documentation.
@ -1486,7 +1486,7 @@ MATHJAX_CODEFILE =
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
SEARCHENGINE = YES
SEARCHENGINE = NO
# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
# implemented using a web server instead of a web client using Javascript. There
@ -2117,7 +2117,7 @@ DOT_NUM_THREADS = 0
# The default value is: Helvetica.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTNAME = FreeSans
DOT_FONTNAME =
# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
# dot graphs.

View File

@ -0,0 +1,29 @@
/*! \page EXIF_parser_page EXIF Parser Module
What Does It Do
========
The EXIF Parser module extracts EXIF (Exchangeable Image File Format) information from ingested pictures. This information can contain geolocation data for the picture, time, date, camera model and settings (exposure values, resolution, etc) and other information. The discovered attributes are added to the BlackBoard.
This can tell you where and when a picture was taken, and give clues to the camera that took it.
Configuration
=======
There is no configuration required.
Using the Module
======
Select the checkbox in the Ingest Modules settings screen to enable the EXIF Parser.
Ingest Settings
------
There are no runtime ingest settings required.
Seeing Results
------
Results are shown in the Results tree.
\image html EXIF-tree.PNG
*/

0
docs/doxygen-user/README.txt Normal file → Executable file
View File

View File

@ -0,0 +1,47 @@
/*! \page android_analyzer_page Android Analyzer Module
What Does It Do
========
The Android Analyzer module allows you to analyze SQLite and other files from an Android device. It works on Physical dumps from most Android devices (note that we do not provide an acquisition method). Autopsy will not support older Android devices that do not have a volume system. These devices will often have a single physical image file for them and there is no information in the image that describes the layout of the file systems. Autopsy will therefore not be able to detect what it is.
The module should be able to extract the following:
- Text messages / SMS / MMS
- Call Logs
- Contacts
- Tango Messages
- Words with Friends Messages
- GPS from the browser and Google Maps
- GPS from cache.wifi and cache.cell files
NOTE: These database formats vary by version of OS and different vendors can place the databases in different places. Autopsy may not support all versions and vendors.
NOTE: This module is not exhaustive with its support for Android. It was created as a starting point for others to contribute plug-ins for 3rd party apps. See the <a href="http://sleuthkit.org/autopsy/docs/api-docs/3.1/mod_mobile_page.html">Developer docs</a> for information on writing modules.
Configuration
=======
There is no configuration required.
Using the Module
======
Simply add your physical images or file system dumps as data sources and enable the Android Analyzer module.
Ingest Settings
------
There are no runtime ingest settings required.
Seeing Results
------
The results show up in the tree under "Results", "Extracted Content".
\image html android_analyzer_output.PNG
*/
*/

View File

@ -0,0 +1,33 @@
/*! \page archive_extractor_page Archive Extractor Module
What Does It Do
========
The Archive Extractor module opens ZIP, RAR, and other archive formats and sends the files from those archive files back through the ingest pipeline for analysis.
This module expands archive files to enable Autopsy to analyze all files on the system. It enables keyword search and hash lookup to analyze files inside of archives
Configuration
=======
There is no configuration required.
Using the Module
======
Select the checkbox in the Ingest Modules settings screen to enable the Archive Extractor.
Ingest Settings
------
There are no runtime ingest settings required.
Seeing Results
------
Each file extracted shows up in the data source tree view as a child of the archive containing it,
\image html zipped_children_1.PNG
<br>
<br>
and as an archive under "Views", "File Types", "Archives".
\image html zipped_children_2.PNG
*/

16
docs/doxygen-user/case_management.dox Normal file → Executable file
View File

@ -2,15 +2,17 @@
You need to create a case before you can analyze data in Autopsy. A case can contain one or more data sources (disk images, disk devices, logical files). The data sources can be from multiple drives in a single computer or from multiple computers. It's up to you.
Each case has its own directory that is named based on the case name. The directory will contain configuration files, a database, reports, and other files that modules generates. The main Autopsy case configuration file has a .aut extension.
Each case has its own directory that is named based on the case name. The directory will contain configuration files, a database, reports, and other files that modules generates. The main Autopsy case configuration file has an ".aut" extension.
\section case_create Creating a Case
There are several ways to create a new case:
- The opening window has a button to create a new case.
- The "File" -> "New Case..." menu item
\image html splashscreen.PNG
The "New Case" wizard dialog will open and you will need to enter the case name and base directory. A directory for the case will be created inside of the "base directory". If the directory already exists, you will need to either delete the existing directory or choose a different combination of names.
There are several ways to create a new case:
- The opening splash screen has a button to create a new case.
- The "File", "Create New Case" menu item
The New Case wizard dialog will open and you will need to enter the case name and base directory. A directory for the case will be created inside of the "base directory". If the directory already exists, you will need to either delete the existing directory or choose a different combination of names.
\image html case-newcase.png
@ -21,8 +23,8 @@ After you create the case, you will be prompted to add a data source, as describ
\section case_open Opening a Case
To open a case, either:
- Choose "Open Case" or "Open Recent Case" from the opening window.
- The "File" -> "Open Case" menu item or "File" -> "Open Recent Case"
- Choose "Open Existing Case" or "Open Recent Case" from the opening splash screen.
- Choose the "File", "Open Case" menu item or "File", "Open Recent Case"
Navigate to the case directory and select the ".aut" file.

View File

@ -0,0 +1,23 @@
/*! \page content_viewer_page Content Viewer
The Content Viewer lives in the lower right-hand side of the Autopsy main screen and show pictures, video, hex, text, extracted strings, metadata, etc. They are enabled when you select a file in the file list above it.
The Content Viewer is context-aware, meaning it will present different views of the content based on the type of file selected. For example, a .JPG would show up as a picture, a text file would show up as text, and a .bin file would show up as hex output.
The screenshots below show some examples of content viewers in action.
<br>
\image html content-viewer-1.PNG
<br>
<br>
\image html content-viewer-2.PNG
<br>
<br>
\image html content-viewer-3.PNG
<br>
<br>
\image html content-viewer-4.PNG
<br>
<br>
\image html content-viewer-5.PNG
<br>
*/

22
docs/doxygen-user/data_sources.dox Normal file → Executable file
View File

@ -1,7 +1,7 @@
/*! \page ds_page Data Sources
Data source is the term that we use in Autopsy to refer to disk images, logical files, etc. This is the data that you want to add in to analyze. You must have a case open before you can add a data source.
A data source the thing you want to analyze. It can be a disk image, some logical files, a local drive, etc. You must open a case prior to adding a data source to Autopsy.
Autopsy supports three types of data sources:
- Disk Image: A file (or set of files) that is a byte-for-byte copy of a hard drive or media card. (see \ref ds_img)
@ -15,13 +15,9 @@ Autopsy supports three types of data sources:
You can add a data source in several ways:
- After you create a case, it automatically prompts you to add a data source.
- There is a toolbar item to add a Data Source when a case is open.
- The "File" -> "Add Data Source" menu item when a case is open.
- The "File", "Add Data Source" menu item when a case is open.
The data source must remain accessible for the duration of the analysis because the case contains only a reference to the data source. It does not copy the data source into the case folder.
\section ds_process Data Source Adding Process
The data source must remain accessible for the duration of the analysis because the case contains a reference to the data source. It does <b>not</b> copy the data source into the case folder.
Regardless of the type of data source, there are some common steps in the process:
@ -41,12 +37,10 @@ Regardless of the type of data source, there are some common steps in the proces
5) After the ingest modules have been configured and the basic examination of the data source is complete, the ingest modules will begin to analyze the file contents.
You cannot remove a data source from a case.
\section ds_img Adding a Disk Image
Supported Image Formats
Autopsy supports disk images in the following formats:
- Raw Single (For example: *.img, *.dd, *.raw, etc)
- Raw Split (For example: *.001, *.002, *.aa, *.ab, etc)
@ -55,7 +49,7 @@ Autopsy supports disk images in the following formats:
To add a disk image:
-# Choose "Image File" from the pull down.
-# Browse to the first file in the disk image. You need to specify only the first file and it will find the rest.
-# Browse to the first file in the disk image. You need to specify only the first file and Autopsy will find the rest.
-# Choose the timezone that the disk image came from. This is most important for when adding FAT file systems because it does not store timezone information and Autopsy will not know how to normalize to UTC.
-# Choose to perform orphan file finding on FAT file systems. This can be a time intensive process because it will require that Autopsy look at each sector in the device.
@ -80,7 +74,7 @@ You can add files or folders that are on your local computer (or on a shared dri
Some things to note when doing this:
- Autopsy ignores the time stamps on files that it adds this way because they could be the timestamps when they were copied onto your examination device.
- If you have a USB-attached device that you are analyzing and you choose to add the device's contents using this method, then note that it will not look at unallocated space or deleted files. Autopsy will only be able to see the allocated files. You should add the device as a "Logical Drive" to get the unallocated space.
- If you have a USB-attached device that you are analyzing and you choose to add the device's contents using this method, then note that it will not look at unallocated space or deleted files. Autopsy will only be able to see the allocated files. You should add the device as a "Logical Drive" to analyze the unallocated space.
To add logical files:
-# Choose "Logical Files" from the pull down.
@ -89,8 +83,4 @@ To add logical files:
All of the files that you added in the panel will be grouped together into a single data source, called "LogicalFileSet" in the main UI.
\section ds_rem Removing a Data Source
You cannot currently remove an data source from a case.
*/

View File

@ -0,0 +1,31 @@
/*! \page e01_verifier_page E01 Verifier Module
What Does It Do
========
The E01 Verifier module computes a checksum on E01 files and compares with the E01 file's internal checksum to ensure they match.
This can detect if the E01 module is corrupted.
Configuration
=======
There is no configuration required.
Using the Module
======
Select the checkbox in the Ingest Modules list to use this module.
Ingest Settings
------
There are no runtime ingest settings required.
Seeing Results
------
You only see results from this module if the E01 is corrupted. A failure to load is shown below.
\image html e01-verifier.png
*/

View File

@ -0,0 +1,30 @@
/*! \page email_parser_page Email Parser Module
What Does It Do
========
The Email Parser module identifies Thunderbird MBOX files and PST format files based on file signatures, extracting the e-mails from them, adding the results to the Blackboard. This module skips known files and creates a Blackboard artifact for each message. It adds email attachments as derived files.
This allows the user to identify email-based communications from the system being analyzed.
Configuration
=======
There is no configuration required.
Using the Module
======
Explore the "Results", "E-Mail Messages" portion of the tree to review the results of this module.
Ingest Settings
------
There are no runtime ingest settings required.
Seeing Results
------
The results of this show up in the "Results", "E-Mail Messages" portion of the tree.
\image html email_results.PNG
*/

View File

@ -0,0 +1,35 @@
/*! \page extension_mismatch_detector_page Extension Mismatch Detector Module
What Does It Do
========
Extension Mismatch Detector module uses the results from the File Type Identification and flags files that have an extension not traditionally associated with the file's detected type. It ignores 'known' (NSRL) files. You can customize the MIME types and file extensions per MIME type in "Tools", "Options", "File Extension Mismatch".
This detects files that someone may be trying to hide.
Configuration
=======
One can add and remove MIME types in the "Tools", "Options", "File Extension Mismatch" dialog box, as well as add and remove extensions to particular MIME types.
<br>
\image html extension-mismatch-detected-configuration.PNG
<br>
Using the Module
======
Note that you can get a lot of false positives with this module. You can add your own rules to Autopsy to reduce unwanted hits.
Ingest Settings
------
In the ingest settings, the user can choose if the module should skip files without extensions and skip text files. Both of these options are enabled by default.
\image html extension-mismatch-detected-ingest-settings.PNG
Seeing Results
------
Results are shown in the Results tree under "Extension Mismatch Detected".
\image html extension-mismatch-detected.PNG
*/

27
docs/doxygen-user/file_search.dox Normal file → Executable file
View File

@ -1,31 +1,18 @@
/*! \page file_search File Search
/*! \page file_search_page File Search
\section about_file_search About File Search
File Search tool can be accessed either from the Tools menu or by right-clicking on image node in the Data Explorer / Directory Tree. By using File Search, you can specify, filter, and show the directories and files that you want to see from the images in the current opened case. The File Search results will be populated in a brand new Table Result viewer on the right-hand side.
The File Search tool can be accessed either from the Tools menu or by right-clicking on a data source node in the Data Explorer / Directory Tree. By using File Search, you can specify, filter, and show the directories and files that you want to see from the images in the currently opened case. The File Search results will be populated in a brand new Table Result viewer on the right-hand side.
Currently, Autopsy only supports 4 categories in File Search: Name, Size, Date, and Known Status based search.
<b>Note: Currently File Search doesn't support regular expression, however the Keyword Search feature of Autopsy does also look in file names and it does support regular expressions, which can complimentary to the File Search.</b>
<b>How to Open File Search:</b>\n
To see how to open File Search, click \ref how_to_open_file_search "here".\n
<b>Note: The File Search Window is opened and closed automatically. If there's a case opened and there is at least one image inside that case, File Search Window can't be closed.</b>
<b>How to Use File Search:</b> \n
To see how to use File Search, click \ref how_to_use_file_search "here".
<b>Example</b>
Here's an example of a File Search window:
\image html file-search-top-component.PNG
Note: Currently File Search doesn't support regular expressions. The Keyword Search feature of Autopsy does support regular expressions and can be used for to search for files and/or directories by name.
\section how_to_open_file_search How To Open File Search
How to Open File Search
To open the File Search, you can do one of the following thing:
Right click an image and choose "Open File Search by Attributes".
Right-click a data source and choose "Open File Search by Attributes".
\image html open-file-search-component-1.PNG
Select the "Tools" > "File Search by Attributes".
or select the "Tools", "File Search by Attributes".
\image html open-file-search-component-2.PNG
<b>Note: The File Search Window is opened and closed automatically. If there's a case opened and there is at least one image inside that case, File Search Window can't be closed.</b>
\section how_to_use_file_search How To Use File Search
@ -41,8 +28,8 @@ Search for all files and directory whose "date property" is within the date rang
\li Known Status:
Search for all files and directory whose known status is recognized as either Unknown, Known, or Known Bad. For more on Known Status, see Hash Database Management.
To use any of these filters, check the box next to the category and click "Search" button to start the search process. The result will show up in the "Result Viewer".
Example
Here's an example where I try to get all the directories and files whose name contains "hello", has a size greater than 1000 Bytes,was created between 06/15/2010 and 06/16/2010 (in GMT-5 timezone), and is an unknown file:
Here's an example where we try to get all the directories and files whose name contains "hello", has a size greater than 1000 Bytes,was created between 06/15/2010 and 06/16/2010 (in GMT-5 timezone), and is an unknown file:
\image html example-of-file-sarch.PNG
*/

View File

@ -1,4 +1,4 @@
/*! \page filetype_page File Type Module
/*! \page file_type_identification_page File Type Identification Module
What Does It Do
========
@ -7,17 +7,18 @@ The File Type ID module identifies files based on their internal signatures and
You should enable this module because many other modules depend on its results to determine if they should analyze a file. Some examples include:
- Extension Mismatch Module
- \subpage kwsrch_page
- \subpage extension_mismatch_detector_page
- \subpage keyword_search_page
Configuration
=======
You do not need to configure anything with this module unless you want to define your own types. To define your own types, go to the Tools -> Options -> File Type Id panel.
You do not need to configure anything with this module unless you want to define your own types. To define your own types, go to "Tools", "Options", "File Type Id" panel.
From there, you can define rules based on the offset of the signature and if the signature is a byte sequence of an ASCII string.
\image html filetype.png
Using the Module
======
@ -31,7 +32,7 @@ a data source. All user-defined and Tika rules are always applied.
Seeing Results
------
This module does not have obvious impacts in the user interface, though it is used by many othe modules.
This module does not have obvious impacts in the user interface, though it is used by many other modules.
To see the file type of an individual file, view the "Results" tab in the lower right when you navigate to the file. You should see a page in there that mentions the file type.

0
docs/doxygen-user/footer.html Normal file → Executable file
View File

57
docs/doxygen-user/hashdb_lookup.dox Normal file → Executable file
View File

@ -1,33 +1,68 @@
/*! \page hash_db_page Hash Database Lookup Module
Autopsy has an ingest module that calculates hash values and looks up the hash values in a database to determine if the file is known bad, known (in general), or unknown. This page outlines that module and its configuration.
\section hash_db_config Configuring the module
What Does It Do
========
The Hash Database Lookup Module calculates MD5 hash values for files and looks up hash values in a database to determine if the file is known bad, known (in general), or unknown.
Configuration
=======
The Hash Database Management window is where you can set and update your hash database information. Hash databases are used to identify files that are 'known'.
\li Known good files are those that can be safely ignored. This set of files frequently includes standard OS and application files. Ignoring such uninteresting to the investigator files, can greatly reduce image analysis time.
\li Known good files are those that can be safely ignored. This set of files frequently includes standard OS and application files. Ignoring such uninteresting-to-the-investigator files, can greatly reduce image analysis time.
\li Known bad (also called notable) files are those that should raise awareness. This set will vary depending on the type of investigation, but common examples include contraband images and malware.
\section notable_known_bad_hashsets Notable / Known Bad Hashsets
Autopsy allows for multiple known bad hash databases to be set. Autopsy supports three formats:
Autopsy allows for multiple known bad hash databases to be set. Autopsy supports the following formats:
\li EnCase: An EnCase hashset file.
\li MD5sum: Output from running the md5, md5sum, or md5deep program on a set of files.
\li NSRL: The format of the NSRL database.
\li HashKeeper: Hashset file conforming to the HashKeeper standard.
<b>NIST_NSRL:</b>
Autopsy can use the <A HREF="http://www.nsrl.nist.gov">NIST NSRL</A> to detect 'known files'. Note that the NSRL contains hashes of 'known files' that may be good or bad depending on your perspective and investigation type. For example, the existence of a piece of financial software may be interesting to your investigation and that software could be in the NSRL. Therefore, Autopsy treats files that are found in the NSRL as simply 'known' and does not specify good or bad. Ingest modules have the option of ignoring files that were found in the NSRL.
To use the NSRL, you must concatenate all of the NSRLFile.txt files together. You can use 'cat' on a Unix system or from within Cygwin to do this.
\section adding_hashsets Adding Hashsets
Autopsy needs an index of the hashset to actualy use a hash database. It can create the index if you import only the hashset. When you select the database from within this window, it will tell you if the index needs to be created. Autopsy uses the hash database management system from The Sleuth Kit. You can manually create an index using the 'hfind' command line tool or you can use Autopsy. If you attempt proceed without indexing a database, Autopsy will offer to automatically produce an index for you.
You can also specify only the index file and not use the full hashset - the index file is sufficient to identify known files. This can save space. To do this, specify the .idx file from the Hash Database Management window.
<br>
\section using_hashsets Using Hashsets
There is an \ref ingest "ingest module" that will hash the files and look them up in the hashsets. It will flag files that were in the notable hashset and those results will be shown in the Results tree of the \ref directory_tree "Data Explorer".
There is an \ref ingest_page "ingest module" that will hash the files and look them up in the hashsets. It will flag files that were in the notable hashset and those results will be shown in the Results tree of the \ref tree_viewer_page.
Other ingest modules are able to use the known status of a file to decide if they should ignore the file or process it.
You can also see the results in the \ref how_to_open_file_search "File Search" window. There is an option to choose the 'known status'. From here, you can do a search to see all 'known bad' files. From here, you can also choose to ignore all 'known' files that were found in the NSRL. You can also see the status of the file in a column when the file is listed.
\image html hash-database-configuration.PNG
<br>
NIST NSRL
------
Autopsy can use the <A HREF="http://www.nsrl.nist.gov">NIST NSRL</A> to detect 'known files'. The NSRL contains hashes of 'known files' that may be good or bad depending on your perspective and investigation type. For example, the existence of a piece of financial software may be interesting to your investigation and that software could be in the NSRL. Therefore, Autopsy treats files that are found in the NSRL as simply 'known' and does not specify good or bad. Ingest modules have the option of ignoring files that were found in the NSRL.
To use the NSRL, you may download a pre-made index from <A HREF="http://sourceforge.net/projects/autopsy/files/NSRL/">http://sourceforge.net/projects/autopsy/files/NSRL</A>. Download the <b>NSRL-XYZm-autopsy.zip </b> (where 'XYZ' is the version number. As of this writing, it is 247) and unzip the file. Use the "Tools", "Options" menu and select the "Hash Database" tab. Click "Import Database" and browse to the location of the unzipped NSRL file. You can change the Hash Set Name if desired. Select the type of database desired, choosing "Send ingest inbox message for each hit" if desired, and then click "OK".
<br>
\image html nsrl_import_process.PNG
<br>
<br>
The screenshot below shows an imported NSRL.
<br>
\image html nsrl_imported.PNG
<br>
<br>
Using the Module
======
Ingest Settings
------
When hashsets are configured, the user can select the hashsets to use during the ingest process.
\image html hash-lookup.png
Seeing Results
------
Results show up in the tree as "Hashset Hits", grouped by the name of the hash set.
\image html hashset-hits.png
*/

View File

@ -1,4 +1,4 @@
/*! \page image_viewer Image and Video Viewer
/*! \page image_gallery_page Image Gallery Module
Overview
========
This document outlines the use of the new Image Gallery feature of Autopsy. This feature was funded by DHS S&T to help provide free and open source digital forensics tools to law enforcement.
@ -8,12 +8,11 @@ The new image gallery feature has been designed specifically with child-exploita
- Allows examiner to start viewing images immediately upon adding them to the case. As images are hashed, they are updated in the interface. You do not need to wait until the entire image is ingested.
This document assumes basic familiarity with Autopsy.
Quick Start
===========
1. The Image Gallery tool can be configured to collect data about images/videos as ingest runs or all at once after ingest. To change this setting go to Tools->Options->Image /Video Gallery. This setting is saved per case, but can not be changed during ingest.
1. The Image Gallery tool can be configured to collect data about images/videos as ingest runs or all at once after ingest. To change this setting go to "Tools", "Options", "Image /Video Gallery". This setting is saved per case, but can not be changed during ingest.
2. Create a case as normal and add a disk image (or folder of files) as a data source. Ensure that you have the hash lookup module enabled with NSRL and known bad hashsets, the EXIF module enabled, and the File Type module enabled.
3. Click Tools->View Images/Videos in the menu. This will open the Autopsy Image/Video Gallery tool in a new window.
3. Click "Tools", "Analyze Images/Videos" in the menu. This will open the Autopsy Image/Video Analysis tool in a new window.
4. Groups of images will be presented as they are analyzed by the background ingest modules. You can later resort and regroup, but it is required to keep it grouped by folder while ingest is still ongoing.
5. As each group is reviewed, the next highest priority group is presented, according to a sorting criteria (the default is the density of hash set hits).
6. Images that were hits from hashsets, will have a dashed border around them.

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

0
docs/doxygen-user/images/add-data-source.PNG Normal file → Executable file
View File

Before

Width:  |  Height:  |  Size: 1005 B

After

Width:  |  Height:  |  Size: 1005 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

BIN
docs/doxygen-user/images/case-newcase.png Normal file → Executable file

Binary file not shown.

Before

Width:  |  Height:  |  Size: 57 KiB

After

Width:  |  Height:  |  Size: 33 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.3 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 299 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 46 KiB

BIN
docs/doxygen-user/images/data-source-progress-bar.PNG Normal file → Executable file

Binary file not shown.

Before

Width:  |  Height:  |  Size: 40 KiB

After

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 65 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 47 KiB

0
docs/doxygen-user/images/example-of-file-sarch.PNG Normal file → Executable file
View File

Before

Width:  |  Height:  |  Size: 36 KiB

After

Width:  |  Height:  |  Size: 36 KiB

0
docs/doxygen-user/images/explorer-tree.PNG Normal file → Executable file
View File

Before

Width:  |  Height:  |  Size: 28 KiB

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

View File

Before

Width:  |  Height:  |  Size: 9.8 KiB

After

Width:  |  Height:  |  Size: 9.8 KiB

0
docs/doxygen-user/images/file-search-top-component.PNG Normal file → Executable file
View File

Before

Width:  |  Height:  |  Size: 36 KiB

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 47 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 47 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 10 KiB

0
docs/doxygen-user/images/hex-content-viewer-tab.PNG Normal file → Executable file
View File

Before

Width:  |  Height:  |  Size: 39 KiB

After

Width:  |  Height:  |  Size: 39 KiB

0
docs/doxygen-user/images/inbox-button.PNG Normal file → Executable file
View File

Before

Width:  |  Height:  |  Size: 5.5 KiB

After

Width:  |  Height:  |  Size: 5.5 KiB

0
docs/doxygen-user/images/inbox-detail-screen.PNG Normal file → Executable file
View File

Before

Width:  |  Height:  |  Size: 18 KiB

After

Width:  |  Height:  |  Size: 18 KiB

BIN
docs/doxygen-user/images/inbox-main-screen.PNG Normal file → Executable file

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 69 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

BIN
docs/doxygen-user/images/keyword-search-bar.PNG Normal file → Executable file

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.1 KiB

After

Width:  |  Height:  |  Size: 5.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 30 KiB

After

Width:  |  Height:  |  Size: 47 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 41 KiB

After

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 54 KiB

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 33 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 63 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 52 KiB

BIN
docs/doxygen-user/images/open-file-search-component-1.PNG Normal file → Executable file

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

After

Width:  |  Height:  |  Size: 32 KiB

BIN
docs/doxygen-user/images/open-file-search-component-2.PNG Normal file → Executable file

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 79 KiB

View File

Before

Width:  |  Height:  |  Size: 47 KiB

After

Width:  |  Height:  |  Size: 47 KiB

View File

Before

Width:  |  Height:  |  Size: 45 KiB

After

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 192 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

View File

Before

Width:  |  Height:  |  Size: 40 KiB

After

Width:  |  Height:  |  Size: 40 KiB

BIN
docs/doxygen-user/images/screenshot.png Normal file → Executable file

Binary file not shown.

Before

Width:  |  Height:  |  Size: 185 KiB

After

Width:  |  Height:  |  Size: 191 KiB

BIN
docs/doxygen-user/images/select-data-source-type.PNG Normal file → Executable file

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

After

Width:  |  Height:  |  Size: 48 KiB

BIN
docs/doxygen-user/images/select-ingest-modules.PNG Normal file → Executable file

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

0
docs/doxygen-user/images/string-content-viewer-tab.PNG Normal file → Executable file
View File

Before

Width:  |  Height:  |  Size: 32 KiB

After

Width:  |  Height:  |  Size: 32 KiB

0
docs/doxygen-user/images/table-result-viewer-tab.PNG Normal file → Executable file
View File

Before

Width:  |  Height:  |  Size: 68 KiB

After

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

Some files were not shown because too many files have changed in this diff Show More