Merge branch 'master' of https://github.com/sleuthkit/autopsy into viewInContext

This commit is contained in:
Jeff Wallace 2013-11-26 12:15:07 -05:00
commit f022875a04
16 changed files with 570 additions and 367 deletions

View File

@ -26,8 +26,8 @@ import javafx.embed.swing.JFXPanel;
import org.sleuthkit.autopsy.coreutils.Logger; import org.sleuthkit.autopsy.coreutils.Logger;
import org.openide.modules.ModuleInstall; import org.openide.modules.ModuleInstall;
import org.openide.windows.WindowManager; import org.openide.windows.WindowManager;
import org.sleuthkit.autopsy.casemodule.Case;
import org.sleuthkit.autopsy.coreutils.MessageNotifyUtil; import org.sleuthkit.autopsy.coreutils.MessageNotifyUtil;
import org.sleuthkit.autopsy.coreutils.PlatformUtil;
/** /**
* Wrapper over Installers in packages in Core module This is the main * Wrapper over Installers in packages in Core module This is the main
@ -39,6 +39,41 @@ public class Installer extends ModuleInstall {
private static final Logger logger = Logger.getLogger(Installer.class.getName()); private static final Logger logger = Logger.getLogger(Installer.class.getName());
private static volatile boolean javaFxInit = false; private static volatile boolean javaFxInit = false;
static {
loadDynLibraries();
}
private static void loadDynLibraries() {
if (PlatformUtil.isWindowsOS()) {
try {
//on windows force loading ms crt dependencies first
//in case linker can't find them on some systems
//Note: if shipping with a different CRT version, this will only print a warning
//and try to use linker mechanism to find the correct versions of libs.
//We should update this if we officially switch to a new version of CRT/compiler
System.loadLibrary("msvcr100");
System.loadLibrary("msvcp100");
logger.log(Level.INFO, "MS CRT libraries loaded");
} catch (UnsatisfiedLinkError e) {
logger.log(Level.SEVERE, "Error loading ms crt libraries, ", e);
}
try {
System.loadLibrary("zlib");
logger.log(Level.INFO, "ZLIB library loaded loaded");
} catch (UnsatisfiedLinkError e) {
logger.log(Level.SEVERE, "Error loading ZLIB library, ", e);
}
try {
System.loadLibrary("libewf");
logger.log(Level.INFO, "EWF library loaded");
} catch (UnsatisfiedLinkError e) {
logger.log(Level.SEVERE, "Error loading EWF library, ", e);
}
}
}
public Installer() { public Installer() {
logger.log(Level.INFO, "core installer created"); logger.log(Level.INFO, "core installer created");
javaFxInit = false; javaFxInit = false;

View File

@ -299,7 +299,6 @@ public class DataContentViewerHex extends javax.swing.JPanel implements DataCont
currentPage = page; currentPage = page;
long offset = (currentPage - 1) * pageLength; long offset = (currentPage - 1) * pageLength;
// change the cursor to "waiting cursor" for this operation // change the cursor to "waiting cursor" for this operation
this.setCursor(Cursor.getPredefinedCursor(Cursor.WAIT_CURSOR)); this.setCursor(Cursor.getPredefinedCursor(Cursor.WAIT_CURSOR));
@ -344,13 +343,13 @@ public class DataContentViewerHex extends javax.swing.JPanel implements DataCont
// set the output view // set the output view
if (errorText == null) { if (errorText == null) {
int showLength = bytesRead < pageLength ? bytesRead : (int) pageLength; int showLength = bytesRead < pageLength ? bytesRead : (int) pageLength;
outputViewPane.setText(DataConversion.byteArrayToHex(data, showLength, offset, outputViewPane.getFont())); outputViewPane.setText(DataConversion.byteArrayToHex(data, showLength, offset));
} }
else { else {
outputViewPane.setText(errorText); outputViewPane.setText(errorText);
} }
outputViewPane.moveCaretPosition(0); outputViewPane.setCaretPosition(0);
this.setCursor(null); this.setCursor(null);
} }

View File

@ -20,79 +20,106 @@ package org.sleuthkit.autopsy.datamodel;
import java.awt.Font; import java.awt.Font;
import java.util.Arrays; import java.util.Arrays;
import java.util.Formatter;
/** /**
* Helper methods for converting data. * Helper methods for converting data.
*/ */
public class DataConversion { public class DataConversion {
public static String byteArrayToHex(byte[] array, int length, long offset, Font font) { final private static char[] hexArray = "0123456789ABCDEF".toCharArray();
/**
* Return the hex-dump layout of the passed in byte array.
* Deprecated because we don't need font
* @param array Data to display
* @param length Amount of data in array to display
* @param arrayOffset Offset of where data in array begins as part of a bigger file (used for arrayOffset column)
* @param font Font that will be used to display the text
* @return
*/
@Deprecated
public static String byteArrayToHex(byte[] array, int length, long arrayOffset, Font font) {
return byteArrayToHex(array, length, arrayOffset);
}
/**
* Return the hex-dump layout of the passed in byte array.
* @param array Data to display
* @param length Amount of data in array to display
* @param arrayOffset Offset of where data in array begins as part of a bigger file (used for arrayOffset column)
* @return
*/
public static String byteArrayToHex(byte[] array, int length, long arrayOffset) {
if (array == null) { if (array == null) {
return ""; return "";
} else { }
String base = new String(array, 0, length); else {
StringBuilder outputStringBuilder = new StringBuilder();
// loop through the file in 16-byte increments
for (int curOffset = 0; curOffset < length; curOffset += 16) {
// how many bytes are we displaying on this line
int lineLen = 16;
if (length - curOffset < 16) {
lineLen = length - curOffset;
}
// print the offset column
//outputStringBuilder.append("0x");
outputStringBuilder.append(String.format("0x%08x: ", arrayOffset + curOffset));
//outputStringBuilder.append(": ");
StringBuilder buff = new StringBuilder(); // print the hex columns
int count = 0;
int extra = base.length() % 16;
String sub = "";
char subchar;
//commented out code can be used as a base for generating hex length based on
//offset/length/file size
//String hex = Long.toHexString(length + offset);
//double hexMax = Math.pow(16, hex.length());
double hexMax = Math.pow(16, 6);
while (count < base.length() - extra) {
buff.append("0x");
buff.append(Long.toHexString((long) (offset + count + hexMax)).substring(1));
buff.append(": ");
for (int i = 0; i < 16; i++) { for (int i = 0; i < 16; i++) {
buff.append(Integer.toHexString((((int) base.charAt(count + i)) & 0xff) + 256).substring(1).toUpperCase()); if (i < lineLen) {
buff.append(" "); int v = array[curOffset + i] & 0xFF;
if (i == 7) { outputStringBuilder.append(hexArray[v >>> 4]);
buff.append(" "); outputStringBuilder.append(hexArray[v & 0x0F]);
}
else {
outputStringBuilder.append(" ");
}
// someday we'll offer the option of these two styles...
if (true) {
outputStringBuilder.append(" ");
if (i % 4 == 3) {
outputStringBuilder.append(" ");
}
if (i == 7) {
outputStringBuilder.append(" ");
}
}
// xxd style
else {
if (i % 2 == 1) {
outputStringBuilder.append(" ");
}
} }
} }
sub = base.substring(count, count + 16);
outputStringBuilder.append(" ");
// print the ascii columns
String ascii = new String(array, curOffset, lineLen);
for (int i = 0; i < 16; i++) { for (int i = 0; i < 16; i++) {
subchar = sub.charAt(i); char c = ' ';
if (!font.canDisplay(subchar)) { if (i < lineLen) {
sub.replace(subchar, '.'); c = ascii.charAt(i);
} int dec = (int) c;
// replace all unprintable characters with "." if (dec < 32 || dec > 126) {
int dec = (int) subchar; c = '.';
if (dec < 32 || dec > 126) { }
sub = sub.replace(subchar, '.');
} }
outputStringBuilder.append(c);
} }
buff.append(" " + sub + "\n");
count += 16; outputStringBuilder.append("\n");
} }
if (base.length() % 16 != 0) {
buff.append("0x" + Long.toHexString((long) (offset + count + hexMax)).substring(1) + ": "); return outputStringBuilder.toString();
}
for (int i = 0; i < 16; i++) {
if (i < extra) {
buff.append(Integer.toHexString((((int) base.charAt(count + i)) & 0xff) + 256).substring(1) + " ");
} else {
buff.append(" ");
}
if (i == 7) {
buff.append(" ");
}
}
sub = base.substring(count, count + extra);
for (int i = 0; i < extra; i++) {
subchar = sub.charAt(i);
if (!font.canDisplay(subchar)) {
sub.replace(subchar, '.');
}
}
buff.append(" " + sub);
return buff.toString();
} }
} }

View File

@ -65,9 +65,15 @@ public class GeneralIngestConfigurator implements IngestConfigurator {
String[] enabledModuleNames = ModuleSettings.getConfigSetting(moduleContext, ENABLED_INGEST_MODULES_KEY).split(", "); String[] enabledModuleNames = ModuleSettings.getConfigSetting(moduleContext, ENABLED_INGEST_MODULES_KEY).split(", ");
List<IngestModuleAbstract> enabledModules = new ArrayList<>(); List<IngestModuleAbstract> enabledModules = new ArrayList<>();
for (String moduleName : enabledModuleNames) { for (String moduleName : enabledModuleNames) {
// if no modules were enabled, we get an empty string in here
if (moduleName.trim().equals("")) {
continue;
}
// we renamed this module in 3.0.9 -> update it to prevent an error message
if (moduleName.equals("Thunderbird Parser")) { if (moduleName.equals("Thunderbird Parser")) {
moduleName = "MBox Parser"; moduleName = "MBox Parser";
} }
IngestModuleAbstract moduleFound = null; IngestModuleAbstract moduleFound = null;
for (IngestModuleAbstract module : allModules) { for (IngestModuleAbstract module : allModules) {
if (moduleName.equals(module.getName())) { if (moduleName.equals(module.getName())) {
@ -79,7 +85,7 @@ public class GeneralIngestConfigurator implements IngestConfigurator {
enabledModules.add(moduleFound); enabledModules.add(moduleFound);
} }
else { else {
messages.add("Unable to enable ingest module: " + moduleName); messages.add(moduleName + " was previously enabled, but could not be found");
} }
} }
ingestDialogPanel.setEnabledIngestModules(enabledModules); ingestDialogPanel.setEnabledIngestModules(enabledModules);

View File

@ -3,7 +3,128 @@
<!-- Need a way to specify TSK Debug versus Release --> <!-- Need a way to specify TSK Debug versus Release -->
<property name="TSK_BUILD_TYPE">Release</property> <property name="TSK_BUILD_TYPE">Release</property>
<!-- Directory paths -->
<property name="amd64" location="${basedir}/Core/release/modules/lib/amd64" />
<property name="x86" location="${basedir}/Core/release/modules/lib/x86" />
<property name="x86_64" location="${basedir}/Core/release/modules/lib/x86_64" />
<property name="i386" location="${basedir}/Core/release/modules/lib/i386" />
<property name="i586" location="${basedir}/Core/release/modules/lib/i586" />
<property name="i686" location="${basedir}/Core/release/modules/lib/i686"/>
<property name="crt" location="${basedir}/thirdparty/crt" />
<!-- NATIVE LIB TARGETS -->
<target name="copyLibs" depends="copyWinLibs64,copyWinLibs32" description="Copy windows dlls to the correct location." />
<target name="init-lib-path" description="Set up folder hierarchy under release/modules/lib">
<mkdir dir="${amd64}"/>
<mkdir dir="${x86_64}"/>
<mkdir dir="${x86}"/>
<mkdir dir="${i386}"/>
<mkdir dir="${i586}"/>
<mkdir dir="${i686}"/>
</target>
<target name="checkLibDirs" depends="init-lib-path">
<property environment="env"/>
<condition property="ewfFound">
<isset property="env.LIBEWF_HOME"/>
</condition>
<fail unless="ewfFound" message="LIBEWF_HOME must be set as an environment variable."/>
<property name="win64.lib.path" value="${env.TSK_HOME}/win32/x64/Release"/>
<property name="win32.lib.path" value="${env.TSK_HOME}/win32/Release" />
<available property="win64.exists" type="dir" file="${win64.lib.path}" />
<available property="win32.exists" type="dir" file="${win32.lib.path}" />
</target>
<target name="copyWinLibs64" depends="checkLibDirs" if="win64.exists">
<property name="win64dir" location="${win64.lib.path}" />
<fileset dir="${win64dir}" id="win64dlls">
<include name="libewf.dll" />
<include name="zlib.dll"/>
</fileset>
<copy todir="${amd64}" overwrite="true">
<fileset refid="win64dlls" />
</copy>
<copy todir="${x86_64}" overwrite="true">
<fileset refid="win64dlls" />
</copy>
</target>
<target name="copyWinLibs32" depends="checkLibDirs" if="win32.exists">
<property name="win32dir" location="${win32.lib.path}" />
<fileset dir="${win32dir}" id="win32dlls">
<include name="zlib.dll" />
<include name="libewf.dll"/>
</fileset>
<copy todir="${i386}" overwrite="true">
<fileset refid="win32dlls" />
</copy>
<copy todir="${x86}" overwrite="true">
<fileset refid="win32dlls" />
</copy>
<copy todir="${i586}" overwrite="true">
<fileset refid="win32dlls" />
</copy>
<copy todir="${i686}" overwrite="true">
<fileset refid="win32dlls" />
</copy>
</target>
<!-- CRT LIBS TO ZIP -->
<target name="copyExternalLibsToZip" depends="copyCRT32,copyCRT64"/>
<target name="copyCRT32">
<property name="CRT.path" value="${thirdparty.dir}/crt/win32/crt.zip"/>
<available file="${CRT.path}" property="crtFound"/>
<fail unless="crtFound" message="CRT not found in the thirdparty repo in path: ${CRT.path}"/>
<property name="zip-lib-path" value="${zip-tmp}/${app.name}/${app.name}/modules/lib/"/>
<unzip src="${CRT.path}" dest="${zip-lib-path}/x86" overwrite="true"/>
<fileset dir="${zip-lib-path}/x86" id="crt32dlls">
<include name="*.dll"/>
</fileset>
<copy todir="${zip-lib-path}/i386" overwrite="true">
<fileset refid="crt32dlls"/>
</copy>
<copy todir="${zip-lib-path}/i586" overwrite="true">
<fileset refid="crt32dlls"/>
</copy>
<copy todir="${zip-lib-path}/i686" overwrite="true">
<fileset refid="crt32dlls"/>
</copy>
</target>
<target name="copyCRT64">
<property name="CRT.path" value="${thirdparty.dir}/crt/win64/crt.zip"/>
<available file="${CRT.path}" property="crtFound"/>
<fail unless="crtFound" message="CRT not found in the thirdparty repo in path: ${CRT.path}"/>
<property name="zip-lib-path" value="${zip-tmp}/${app.name}/${app.name}/modules/lib/"/>
<unzip src="${CRT.path}" dest="${zip-lib-path}/x86_64" overwrite="true"/>
<fileset dir="${zip-lib-path}/x86_64" id="crt32dlls">
<include name="*.dll"/>
</fileset>
<copy todir="${zip-lib-path}/amd64" overwrite="true">
<fileset refid="crt32dlls"/>
</copy>
</target>
<!-- ADVANCED INSTALLER TARGETS -->
<target name="build-installer-windows" depends="init-advanced-installer" <target name="build-installer-windows" depends="init-advanced-installer"
description="Makes an installer from the opened ZIP file"> description="Makes an installer from the opened ZIP file">
<antcall target="run-advanced-installer" /> <antcall target="run-advanced-installer" />

View File

@ -72,6 +72,12 @@
<unzip src="${thirdparty.dir}/gstreamer/${os.family}/i386/0.10.7/gstreamer.zip" dest="${zip-tmp}/${app.name}/gstreamer"/> <unzip src="${thirdparty.dir}/gstreamer/${os.family}/i386/0.10.7/gstreamer.zip" dest="${zip-tmp}/${app.name}/gstreamer"/>
<copy file="${basedir}/branding_${app.name}/icon.ico" tofile="${zip-tmp}/${app.name}/icon.ico" overwrite="true"/> <copy file="${basedir}/branding_${app.name}/icon.ico" tofile="${zip-tmp}/${app.name}/icon.ico" overwrite="true"/>
<if>
<equals arg1="${os.family}" arg2="windows"/>
<then>
<antcall target="copyExternalLibsToZip"/>
</then>
</if>
<property name="app.property.file" value="${zip-tmp}/${app.name}/etc/${app.name}.conf" /> <property name="app.property.file" value="${zip-tmp}/${app.name}/etc/${app.name}.conf" />
<property name="jvm.options" value="&quot;--branding ${app.name} -J-Xms24m -J-XX:MaxPermSize=128M -J-Xverify:none -J-Xdock:name=${app.title}&quot;" /> <property name="jvm.options" value="&quot;--branding ${app.name} -J-Xms24m -J-XX:MaxPermSize=128M -J-Xverify:none -J-Xdock:name=${app.title}&quot;" />
@ -126,7 +132,16 @@
<property name="app.version" value="${DSTAMP}"/> <property name="app.version" value="${DSTAMP}"/>
</target> </target>
<target name="-init" depends="-taskdefs,-convert-old-project,getProps,getJunit"> <target name="getExternals">
<if>
<equals arg1="${os.family}" arg2="windows"/>
<then>
<antcall target="copyLibs"/>
</then>
</if>
</target>
<target name="-init" depends="-taskdefs,-convert-old-project,getProps,getJunit,getExternals">
<convertclusterpath from="${cluster.path.evaluated}" to="cluster.path.final" id="cluster.path.id"/> <convertclusterpath from="${cluster.path.evaluated}" to="cluster.path.final" id="cluster.path.id"/>
<sortsuitemodules unsortedmodules="${modules}" sortedmodulesproperty="modules.sorted"/> <sortsuitemodules unsortedmodules="${modules}" sortedmodulesproperty="modules.sorted"/>
<property name="cluster" location="build/cluster"/> <property name="cluster" location="build/cluster"/>

View File

@ -1,221 +1,221 @@
<html> <html>
<head> <head>
<link rel="stylesheet" href="nbdocs:/org/sleuthkit/autopsy/core/docs/ide.css" type="text/css"> <link rel="stylesheet" href="nbdocs:/org/sleuthkit/autopsy/core/docs/ide.css" type="text/css">
<style> <style>
h1 { font-size: 145%; color: #666666; } h1 { font-size: 145%; color: #666666; }
h2 { font-size: 120%; color: #666666; } h2 { font-size: 120%; color: #666666; }
</style> </style>
<title>Autopsy 3 Quick Start Guide</title> <title>Autopsy 3 Quick Start Guide</title>
</head> </head>
<body> <body>
<p align="center" style="font-size: 145%;"><strong>Autopsy 3 Quick Start Guide</strong></p> <p align="center" style="font-size: 145%;"><strong>Autopsy 3 Quick Start Guide</strong></p>
<p align="center" style="font-size: 120%;">June 2013</p> <p align="center" style="font-size: 120%;">June 2013</p>
<p align="center"><a href="http://www.sleuthkit.org/autopsy/">www.sleuthkit.org/autopsy/</a></p> <p align="center"><a href="http://www.sleuthkit.org/autopsy/">www.sleuthkit.org/autopsy/</a></p>
<h1>Installation</h1> <h1>Installation</h1>
<p> <p>
The current version of Autopsy 3 runs only on Microsoft Windows. The current version of Autopsy 3 runs only on Microsoft Windows.
We have gotten it to run on other platforms, such as Linux and OS X, but we do not have it in a state that makes it easy to distribute and find the needed libraries. We have gotten it to run on other platforms, such as Linux and OS X, but we do not have it in a state that makes it easy to distribute and find the needed libraries.
</p> </p>
<p> <p>
The Windows installer will make a directory for Autopsy and place all of the needed files inside of it. The Windows installer will make a directory for Autopsy and place all of the needed files inside of it.
The installer includes all dependencies, including Sleuth Kit and Java. The installer includes all dependencies, including Sleuth Kit and Java.
</p> </p>
<p>Note that Autopsy 3 is a complete rewrite from Autopsy 2 and none of this document is relevant to Autopsy 2.</p> <p>Note that Autopsy 3 is a complete rewrite from Autopsy 2 and none of this document is relevant to Autopsy 2.</p>
<h1>Adding a Data Source (image, local disk, logical files)</h1> <h1>Adding a Data Source (image, local disk, logical files)</h1>
<p> <p>
Data sources are added to a <strong>case</strong>. A case can have a single data source or it can have multiple data source if they are related. Data sources are added to a <strong>case</strong>. A case can have a single data source or it can have multiple data source if they are related.
Currently, a single report is generated for an entire case, so if you need to report on individual data sources, then you should use one data source per case. Currently, a single report is generated for an entire case, so if you need to report on individual data sources, then you should use one data source per case.
</p> </p>
<h2>Creating a Case</h2> <h2>Creating a Case</h2>
<p> <p>
To create a case, use either the &quot;Create New Case&quot; option on the Welcome screen or from the &quot;File&quot; menu. To create a case, use either the &quot;Create New Case&quot; option on the Welcome screen or from the &quot;File&quot; menu.
This will start the <strong>New Case Wizard</strong>. You will need to supply it with the name of the case and a directory to store the case results into. This will start the <strong>New Case Wizard</strong>. You will need to supply it with the name of the case and a directory to store the case results into.
You can optionally provide case numbers and other details. You can optionally provide case numbers and other details.
</p> </p>
<h2>Adding a Data Source</h2> <h2>Adding a Data Source</h2>
<p> <p>
The next step is to add input data source to the case. The next step is to add input data source to the case.
The <strong>Add Data Source Wizard</strong> will start automatically after the case is created or you can manually start it from the &quot;File&quot; menu or toolbar. The <strong>Add Data Source Wizard</strong> will start automatically after the case is created or you can manually start it from the &quot;File&quot; menu or toolbar.
You will need to choose the type of input data source to add (image, local disk or logical files and folders). You will need to choose the type of input data source to add (image, local disk or logical files and folders).
Next, supply it with the location of the source to add. Next, supply it with the location of the source to add.
</p> </p>
<ul> <ul>
<li>For a disk image, browse to the first file in the set (Autopsy will find the rest of the files). Autopsy currently supports E01 and raw (dd) files. <li>For a disk image, browse to the first file in the set (Autopsy will find the rest of the files). Autopsy currently supports E01 and raw (dd) files.
</li> </li>
<li> <li>
For local disk, select one of the detected disks. For local disk, select one of the detected disks.
Autopsy will add the current view of the disk to the case (i.e. snapshot of the meta-data). Autopsy will add the current view of the disk to the case (i.e. snapshot of the meta-data).
However, the individual file content (not meta-data) does get updated with the changes made to the disk. However, the individual file content (not meta-data) does get updated with the changes made to the disk.
Note, you may need run Autopsy as an Administrator to detect all disks. Note, you may need run Autopsy as an Administrator to detect all disks.
</li> </li>
<li>For logical files (a single file or folder of files), use the "Add" button to add one or more files or folders on your system to the case. Folders will be recursively added to the case.</li> <li>For logical files (a single file or folder of files), use the "Add" button to add one or more files or folders on your system to the case. Folders will be recursively added to the case.</li>
</ul> </ul>
<p> <p>
There are a couple of options in the wizard that will allow you to make the ingest process faster. There are a couple of options in the wizard that will allow you to make the ingest process faster.
These typically deal with deleted files. These typically deal with deleted files.
It will take longer if unallocated space is analyzed and the entire drive is searched for deleted files. It will take longer if unallocated space is analyzed and the entire drive is searched for deleted files.
In some scenarios, these recovery steps must be performed and in other scenarios these steps are not needed and instead fast results on the allocated files are needed. In some scenarios, these recovery steps must be performed and in other scenarios these steps are not needed and instead fast results on the allocated files are needed.
Use these options to control how long the analysis will take. Use these options to control how long the analysis will take.
</p> </p>
<p> <p>
Autopsy will start to analyze these data sources and add them to the case and internal database. While it is doing that, it will prompt you to configure the Ingest Modules. </p> Autopsy will start to analyze these data sources and add them to the case and internal database. While it is doing that, it will prompt you to configure the Ingest Modules. </p>
<h2>Ingest Modules</h2> <h2>Ingest Modules</h2>
<p> <p>
You will next be prompted to configure the Ingest Modules. You will next be prompted to configure the Ingest Modules.
Ingest modules will run in the background and perform specific tasks. Ingest modules will run in the background and perform specific tasks.
The Ingest Modules analyze files in a prioritized order so that files in a user's directory are analyzed before files in other folders. The Ingest Modules analyze files in a prioritized order so that files in a user's directory are analyzed before files in other folders.
Ingest modules can be developed by third-parties and here are some of the standard ingest modules that come with Autopsy: Ingest modules can be developed by third-parties and here are some of the standard ingest modules that come with Autopsy:
</p> </p>
<ul> <ul>
<li><strong>Recent Activity</strong> <li><strong>Recent Activity</strong>
extracts user activity as saved by web browsers and the OS. Also runs regripper on the registry hive. extracts user activity as saved by web browsers and the OS. Also runs regripper on the registry hive.
</li> </li>
<li><strong>Hash Lookup</strong> <li><strong>Hash Lookup</strong>
uses hash databases to ignore known files from the NIST NSRL and flag known bad files. uses hash databases to ignore known files from the NIST NSRL and flag known bad files.
Use the "Advanced" button to add and configure the hash databases to use during this process. Use the "Advanced" button to add and configure the hash databases to use during this process.
You will get updates on known bad file hits as the ingest occurs. You can later add hash databases You will get updates on known bad file hits as the ingest occurs. You can later add hash databases
via the Tools -&gt; Options menu in the main UI. You can download an index of the NIST NSRL from via the Tools -&gt; Options menu in the main UI. You can download an index of the NIST NSRL from
<a href="http://sourceforge.net/projects/autopsy/files/NSRL/">here</a>. <a href="http://sourceforge.net/projects/autopsy/files/NSRL/">here</a>.
</li> </li>
<li><strong>Keyword Search</strong> <li><strong>Keyword Search</strong>
uses keyword lists to identify files with specific words in them. uses keyword lists to identify files with specific words in them.
You can select the keyword lists to search for automatically and you can create new lists using the "Advanced" button. You can select the keyword lists to search for automatically and you can create new lists using the "Advanced" button.
Note that with keyword search, you can always conduct searches after ingest has finished. Note that with keyword search, you can always conduct searches after ingest has finished.
The keyword lists that you select during ingest will be searched for at periodic intervals and you will get the results in real-time. The keyword lists that you select during ingest will be searched for at periodic intervals and you will get the results in real-time.
You do not need to wait for all files to be indexed. You do not need to wait for all files to be indexed.
</li> </li>
<li><strong>Archive Extractor</strong> opens ZIP, RAR, and other archive formats and sends the files from those archive files back <li><strong>Archive Extractor</strong> opens ZIP, RAR, and other archive formats and sends the files from those archive files back
through the pipelines for analysis.</li> through the pipelines for analysis.</li>
<li><strong>Exif Image Parser</strong> extracts EXIF information from JPEG files and posts the results into the tree in the main UI.</li> <li><strong>Exif Image Parser</strong> extracts EXIF information from JPEG files and posts the results into the tree in the main UI.</li>
<li><strong>Thunderbird Parser</strong> Identifies Thunderbird MBOX files and extracts the e-mails from them.</li> <li><strong>Thunderbird Parser</strong> Identifies Thunderbird MBOX files and extracts the e-mails from them.</li>
</ul> </ul>
<p> <p>
When you select a module, you will have the option to change its settings. When you select a module, you will have the option to change its settings.
For example, you can configure which keyword search lists to use during ingest and which hash databases to use. For example, you can configure which keyword search lists to use during ingest and which hash databases to use.
Refer to the help system inside of Autopsy for details on configuring each module. Refer to the help system inside of Autopsy for details on configuring each module.
</p> </p>
<p> <p>
While ingest modules are running in the background, you will see a progress bar in the lower right. While ingest modules are running in the background, you will see a progress bar in the lower right.
You can use the GUI to review incoming results and perform other tasks while ingest at that time. You can use the GUI to review incoming results and perform other tasks while ingest at that time.
</p> </p>
<h1>Analysis Basics</h1> <h1>Analysis Basics</h1>
<img src="screenshot.png" alt="Autopsy Screenshot" /> <img src="screenshot.png" alt="Autopsy Screenshot" />
<p>You will start all of your analysis techniques from the tree on the left.</p> <p>You will start all of your analysis techniques from the tree on the left.</p>
<ul> <ul>
<li>The Data Sources root node shows all data in the case.</li> <li>The Data Sources root node shows all data in the case.</li>
<ul> <ul>
<li>The individual image nodes show the file system structure of the disk images or local disks in the case.</li> <li>The individual image nodes show the file system structure of the disk images or local disks in the case.</li>
<li>The LogicalFileSet nodes show the logical files in the case.</li> <li>The LogicalFileSet nodes show the logical files in the case.</li>
</ul> </ul>
<li>The Views node shows the same data from a file type or timeline perspective.</li> <li>The Views node shows the same data from a file type or timeline perspective.</li>
<li>The Results node shows the output from the ingest modules.</li> <li>The Results node shows the output from the ingest modules.</li>
</ul> </ul>
<p> <p>
When you select a node from the tree on the left, a list of files will be shown in the upper right. When you select a node from the tree on the left, a list of files will be shown in the upper right.
You can use the Thumbnail view in the upper right to view the pictures. You can use the Thumbnail view in the upper right to view the pictures.
When you select a file from the upper right, its contents will be shown in the lower right. When you select a file from the upper right, its contents will be shown in the lower right.
You can use the tabs in the lower right to view the text of the file, an image, or the hex data. You can use the tabs in the lower right to view the text of the file, an image, or the hex data.
</p> </p>
<p> <p>
If you are viewing files from the Views and Results nodes, you can right-click on a file to go to its file system location. If you are viewing files from the Views and Results nodes, you can right-click on a file to go to its file system location.
This feature is useful to see what else the user stored in the same folder as the file that you are currently looking at. This feature is useful to see what else the user stored in the same folder as the file that you are currently looking at.
You can also right click on a file to extract it to the local system. You can also right click on a file to extract it to the local system.
</p> </p>
<p> <p>
If you want to search for single keywords, then you can use the search box in the upper right of the program. If you want to search for single keywords, then you can use the search box in the upper right of the program.
The results will be shown in a table in the upper right. The results will be shown in a table in the upper right.
</p> </p>
<p> You can tag (or bookmark) arbitrary files so that you can more quickly find them later or so that you can include them specifically in a report.</p> <p> You can tag (or bookmark) arbitrary files so that you can more quickly find them later or so that you can include them specifically in a report.</p>
<h2>Ingest Inbox</h2> <h2>Ingest Inbox</h2>
<p> <p>
As you are going through the results in the tree, the ingest modules are running in the background. As you are going through the results in the tree, the ingest modules are running in the background.
The results are shown in the tree as soon as the ingest modules find them and report them. The results are shown in the tree as soon as the ingest modules find them and report them.
</p> </p>
<p> <p>
The Ingest Inbox receives messages from the ingest modules as they find results. The Ingest Inbox receives messages from the ingest modules as they find results.
You can open the inbox to see what has been recently found. You can open the inbox to see what has been recently found.
It keeps track of what messages you have read. It keeps track of what messages you have read.
</p> </p>
<p> <p>
The intended use of this inbox is that you can focus on some data for a while and then check back on the inbox at a time that is convenient for them. The intended use of this inbox is that you can focus on some data for a while and then check back on the inbox at a time that is convenient for them.
You can then see what else was found while you were focused on the previous task. You can then see what else was found while you were focused on the previous task.
You may learn that a known bad file was found or that a file was found with a relevant keyword and then decide to focus on that for a while. You may learn that a known bad file was found or that a file was found with a relevant keyword and then decide to focus on that for a while.
</p> </p>
<p> When you select a message, you can then jump to the Results tree where more details can be found or jump to the file's location in the filesystem.</p> <p> When you select a message, you can then jump to the Results tree where more details can be found or jump to the file's location in the filesystem.</p>
<h2>Timeline (Beta)</h2> <h2>Timeline (Beta)</h2>
<p>There is a basic timeline view that you can access via the Tools -&gt; Make Timeline feature. This will take a few minutes to create the timeline for analysis. Its features are still in development.</p> <p>There is a basic timeline view that you can access via the Tools -&gt; Make Timeline feature. This will take a few minutes to create the timeline for analysis. Its features are still in development.</p>
<h1>Example Use Cases</h1> <h1>Example Use Cases</h1>
<p>In this section, we will provide examples of how to do common analysis tasks.</p> <p>In this section, we will provide examples of how to do common analysis tasks.</p>
<h2>Web Artifacts</h2> <h2>Web Artifacts</h2>
<p> <p>
If you want to view the user's recent web activity, make sure that the Recent Activity ingest module was enabled. If you want to view the user's recent web activity, make sure that the Recent Activity ingest module was enabled.
You can then go to the &quot;Results &quot; node in the tree on the left and then into the &quot;Extracted Data&quot; node. You can then go to the &quot;Results &quot; node in the tree on the left and then into the &quot;Extracted Data&quot; node.
There, you can find bookmarks, cookies, downloads, and history. There, you can find bookmarks, cookies, downloads, and history.
</p> </p>
<h2>Known Bad Hash Files</h2> <h2>Known Bad Hash Files</h2>
<p> <p>
If you want to see if the data source had known bad files, make sure that the Hash Lookup ingest module was enabled. If you want to see if the data source had known bad files, make sure that the Hash Lookup ingest module was enabled.
You can then view the &quot;Hashset Hits&quot; section in the &quot;Results&quot; area of the tree on the left. You can then view the &quot;Hashset Hits&quot; section in the &quot;Results&quot; area of the tree on the left.
Note that hash lookup can take a long time, so this section will be updated as long as the ingest process is occurring. Note that hash lookup can take a long time, so this section will be updated as long as the ingest process is occurring.
Use the Ingest Inbox to keep track of what known bad files were recently found. Use the Ingest Inbox to keep track of what known bad files were recently found.
</p> </p>
<p> <p>
When you find a known bad file in this interface, you may want to right click on the file to also view the file's original location. When you find a known bad file in this interface, you may want to right click on the file to also view the file's original location.
You may find additional files that are relevant and stored in the same folder as this file. You may find additional files that are relevant and stored in the same folder as this file.
</p> </p>
<h2>Media: Images and Videos</h2> <h2>Media: Images and Videos</h2>
<p> <p>
If you want to see all images and video on the disk image, then go to the &quot;Views&quot; section in the tree on the left and then &quot;File Types&quot;. If you want to see all images and video on the disk image, then go to the &quot;Views&quot; section in the tree on the left and then &quot;File Types&quot;.
Select either &quot;Images&quot; or &quot;Videos&quot;. Select either &quot;Images&quot; or &quot;Videos&quot;.
You can use the thumbnail option in the upper right to view thumbnails of all images. You can use the thumbnail option in the upper right to view thumbnails of all images.
</p> </p>
<ul class="note"> <ul class="note">
<li><strong>Note</strong>: <li><strong>Note</strong>:
We are working on making this more efficient when there are lots of images and we are working on the feature to display video thumbnails. We are working on making this more efficient when there are lots of images and we are working on the feature to display video thumbnails.
</li> </li>
</ul> </ul>
<p>You can select an image or video from the upper right and view the video or image in the lower right. Video will be played with sound.</p> <p>You can select an image or video from the upper right and view the video or image in the lower right. Video will be played with sound.</p>
<h1>Reporting</h1> <h1>Reporting</h1>
<p> <p>
A final report can be generated that will include all analysis results. A final report can be generated that will include all analysis results.
Use the &quot;Generate Report&quot; button to create this. Use the &quot;Generate Report&quot; button to create this.
It will create an HTML or XLS report in the Reports folder of the case folder. It will create an HTML or XLS report in the Reports folder of the case folder.
If you forgot the location of your case folder, you can determine it using the &quot;Case Properties&quot; option in the &quot;File&quot; menu. If you forgot the location of your case folder, you can determine it using the &quot;Case Properties&quot; option in the &quot;File&quot; menu.
There is also an option to export report files to a separate folder outside of the case folder. There is also an option to export report files to a separate folder outside of the case folder.
</p> </p>
<hr> <hr>
<p><i>Copyright &#169; 2012-2013 Basis Technology.</i></p> <p><i>Copyright &#169; 2012-2013 Basis Technology.</i></p>
<p><i> <p><i>
This work is licensed under a This work is licensed under a
<a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/us/">Creative Commons Attribution-Share Alike 3.0 United States License</a>. <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/us/">Creative Commons Attribution-Share Alike 3.0 United States License</a>.
</i></p> </i></p>
</body> </body>
</html> </html>

View File

@ -1,30 +1,30 @@
<!-- @@@ MOVE THIS SOMEWHERE ELSE -- the directory tree package maybe?? --> <!-- @@@ MOVE THIS SOMEWHERE ELSE -- the directory tree package maybe?? -->
The component is by default registered with the ingest manager as an ingest event listener. The component is by default registered with the ingest manager as an ingest event listener.
The viewer first loads all the viewer-supported data currently in the blackboard when Autopsy starts. The viewer first loads all the viewer-supported data currently in the blackboard when Autopsy starts.
During the ingest process the viewer receives events from ingest modules During the ingest process the viewer receives events from ingest modules
(relayed by ingest manager) and it selectively refreshes parts of the tree providing real-time updates to the user. (relayed by ingest manager) and it selectively refreshes parts of the tree providing real-time updates to the user.
When ingest is completed, the viewer responds to the final ingest data event generated by the ingest manager, When ingest is completed, the viewer responds to the final ingest data event generated by the ingest manager,
and performs a final refresh of all viewer-supported data in the blackboard. and performs a final refresh of all viewer-supported data in the blackboard.
Node content support capabilities are registered in the node's Lookup. Node content support capabilities are registered in the node's Lookup.
<!-- @@@ This is too detailed for here, but maybe should be broken up and put into the sections on making a result viewer and such… <!-- @@@ This is too detailed for here, but maybe should be broken up and put into the sections on making a result viewer and such…
--> -->
\section design_data_flow Data Flow \section design_data_flow Data Flow
\subsection design_data_flow_create Creating Nodes in DataExplorer \subsection design_data_flow_create Creating Nodes in DataExplorer
Data flows between the UI zones using a NetBeans node. The DataExplorer modules create the NetBeans nodes. They query the SQLite database or do whatever they want to identify the set of files that are of interest. They create the NetBeans nodes based on Sleuthkit data model objects. See the org.sleuthkit.autopsy.datamodel package for more details on this. Data flows between the UI zones using a NetBeans node. The DataExplorer modules create the NetBeans nodes. They query the SQLite database or do whatever they want to identify the set of files that are of interest. They create the NetBeans nodes based on Sleuthkit data model objects. See the org.sleuthkit.autopsy.datamodel package for more details on this.
\subsection design_data_flow_toResult Getting Nodes to DataResult \subsection design_data_flow_toResult Getting Nodes to DataResult
Each DataExplorer TopComponent is responsible for creating its own DataResult TopComponent to display its results. It can choose to re-use the same TopComponent for multiple searches (as DirectoryTree does) or it can choose to make a new one each time (as FileSearch does). The setNode() method on the DataResult object is used to set the root node to display. A dummy root node must be created as the parent if a parent does not already exist. Each DataExplorer TopComponent is responsible for creating its own DataResult TopComponent to display its results. It can choose to re-use the same TopComponent for multiple searches (as DirectoryTree does) or it can choose to make a new one each time (as FileSearch does). The setNode() method on the DataResult object is used to set the root node to display. A dummy root node must be created as the parent if a parent does not already exist.
The DataExplorer is responsible for setting the double-click and right-click actions associated with the node. The default single click action is to pass data to DataContent. To override this, you must create a new DataResultViewer instance that overrides the propertyChange() method. The DataExplorer adds actions to wrapping the node in a FilterNode variant. The FilterNode then defines the actions for the node by overriding the getPreferredAction() and getActions() methods. As an example, org.sleuthkit.autopsy.directorytree.DataResultFilterNode and org.sleuthkit.autopsy.directorytree.DataResultFilterChildren wraps the nodes that are passed over by the DirectoryTree DataExplorer. The DataExplorer is responsible for setting the double-click and right-click actions associated with the node. The default single click action is to pass data to DataContent. To override this, you must create a new DataResultViewer instance that overrides the propertyChange() method. The DataExplorer adds actions to wrapping the node in a FilterNode variant. The FilterNode then defines the actions for the node by overriding the getPreferredAction() and getActions() methods. As an example, org.sleuthkit.autopsy.directorytree.DataResultFilterNode and org.sleuthkit.autopsy.directorytree.DataResultFilterChildren wraps the nodes that are passed over by the DirectoryTree DataExplorer.
DataResult can send data back to its DataExplorer by making a custom action that looks up it's instance (DataExplorer.getInstance()). DataResult can send data back to its DataExplorer by making a custom action that looks up it's instance (DataExplorer.getInstance()).

View File

@ -1,53 +1,53 @@
/*! \page workflow_page General Workflow and Design /*! \page workflow_page General Workflow and Design
\section design_overview Overview \section design_overview Overview
This section outlines the internal Autopsy design from the typical analysis work flow perspective. This section outlines the internal Autopsy design from the typical analysis work flow perspective.
This page is organized based on these phases: This page is organized based on these phases:
- A Case is created. - A Case is created.
- Images are added to the case and ingest modules are run. - Images are added to the case and ingest modules are run.
- Results are manually reviewed and searched. - Results are manually reviewed and searched.
- Reports are generated. - Reports are generated.
\section design_case Creating a Case \section design_case Creating a Case
The first step in Autopsy work flow is creating a case. This is done in the org.sleuthkit.autopsy.casemodule package (see \ref casemodule_overview for details). This module contains the wizards needed and deals with how to store the information. You should not need to do much modifications in this package. But, you will want to use the org.sleuthkit.autopsy.casemodule.Case object to access all data related to this case. The first step in Autopsy work flow is creating a case. This is done in the org.sleuthkit.autopsy.casemodule package (see \ref casemodule_overview for details). This module contains the wizards needed and deals with how to store the information. You should not need to do much modifications in this package. But, you will want to use the org.sleuthkit.autopsy.casemodule.Case object to access all data related to this case.
\section design_image Adding an Image and Running Ingest Modules \section design_image Adding an Image and Running Ingest Modules
After case is created, one or more disk images can be added to the case. There is a wizard to guide that process and it is located in the org.sleuthkit.autopsy.casemodule package. Refer to the package section \ref casemodule_add_image for more details on the wizard. Most developers will not need to touch this code though. An important concept though is that adding an image to a case means that Autopsy uses The Sleuth Kit to enumerate all of the files in the file system and make a database entry for them in the embedded SQLite database that was created for the case. The database will be used for all further analysis. After case is created, one or more disk images can be added to the case. There is a wizard to guide that process and it is located in the org.sleuthkit.autopsy.casemodule package. Refer to the package section \ref casemodule_add_image for more details on the wizard. Most developers will not need to touch this code though. An important concept though is that adding an image to a case means that Autopsy uses The Sleuth Kit to enumerate all of the files in the file system and make a database entry for them in the embedded SQLite database that was created for the case. The database will be used for all further analysis.
After image has been added to the case, the user can select one or more ingest modules to be executed on the image. Ingest modules focus on a specific type of analysis task and run in the background. They either analyze the entire disk image or individual files. The user will see the results from the modules in the result tree and in the ingest inbox. After image has been added to the case, the user can select one or more ingest modules to be executed on the image. Ingest modules focus on a specific type of analysis task and run in the background. They either analyze the entire disk image or individual files. The user will see the results from the modules in the result tree and in the ingest inbox.
The org.sleuthkit.autopsy.ingest package provides the basic infrastructure for the ingest module management. The org.sleuthkit.autopsy.ingest package provides the basic infrastructure for the ingest module management.
If you want to develop a module that analyzes drive data, then this is probably the type of module that you want to build. See \ref mod_ingest_page for more details on making an ingest module. If you want to develop a module that analyzes drive data, then this is probably the type of module that you want to build. See \ref mod_ingest_page for more details on making an ingest module.
\section design_view Viewing Results \section design_view Viewing Results
The UI has three main areas. The tree on the left-hand side, the result viewers in the upper right, and the content viewers in the lower right. Data passes between these areas by encapsulating them in Netbeans Node objects (see org.openide.nodes.Node). These allow Autopsy to generically handle all types of data. The org.sleuthkit.autopsy.datamodel package wraps the generic org.sleuthkit.datamodel Sleuth Kit objects as Netbeans Nodes. The UI has three main areas. The tree on the left-hand side, the result viewers in the upper right, and the content viewers in the lower right. Data passes between these areas by encapsulating them in Netbeans Node objects (see org.openide.nodes.Node). These allow Autopsy to generically handle all types of data. The org.sleuthkit.autopsy.datamodel package wraps the generic org.sleuthkit.datamodel Sleuth Kit objects as Netbeans Nodes.
Nodes are modeled in a parent-child hierarchy with other nodes. All data within a Case is represented in a hierarchy with the disk images being one level below the case and volumes and such below the image. Nodes are modeled in a parent-child hierarchy with other nodes. All data within a Case is represented in a hierarchy with the disk images being one level below the case and volumes and such below the image.
The tree on the left hand-side shows the analysis results. The tree on the left hand-side shows the analysis results.
Its contents are populated from the central database. Its contents are populated from the central database.
This is where you can browse the file system contents and see the results from the blackboard. This is where you can browse the file system contents and see the results from the blackboard.
<!-- @@@(see \ref blackboard_page). --> <!-- @@@(see \ref blackboard_page). -->
The tree is implemented in the org.sleuthkit.autopsy.directorytree package. The tree is implemented in the org.sleuthkit.autopsy.directorytree package.
The area in the upper right is the result viewer area. When a node is selected from the tree, the node and its children are sent to this area. This area is used to view a set of nodes. The viewer is itself a framework with modules that display the data in different layouts. For example, the standard version comes with a table viewer and a thumbnail viewer. Refer to \ref mod_result_page for details on building a data result module. The area in the upper right is the result viewer area. When a node is selected from the tree, the node and its children are sent to this area. This area is used to view a set of nodes. The viewer is itself a framework with modules that display the data in different layouts. For example, the standard version comes with a table viewer and a thumbnail viewer. Refer to \ref mod_result_page for details on building a data result module.
When an item is selected from the result viewer area, it is passed to the bottom right content viewers. It too is a framework with many modules that know how to show information about a specific file in different ways. For example, there are viewers that show the data in a hex dump format, extract the strings, and display pictures and movies. When an item is selected from the result viewer area, it is passed to the bottom right content viewers. It too is a framework with many modules that know how to show information about a specific file in different ways. For example, there are viewers that show the data in a hex dump format, extract the strings, and display pictures and movies.
See \ref mod_content_page for details on building new content viewers. See \ref mod_content_page for details on building new content viewers.
\section design_report Report generation \section design_report Report generation
When ingest is complete, the user can generate reports. When ingest is complete, the user can generate reports.
There is a reporting framework to enable many different formats. Autopsy currently comes with generic html, xml and Excel reports. See the org.sleuthkit.autopsy.report package for details on the framework and There is a reporting framework to enable many different formats. Autopsy currently comes with generic html, xml and Excel reports. See the org.sleuthkit.autopsy.report package for details on the framework and
\ref mod_report_page for details on building a new report module. \ref mod_report_page for details on building a new report module.
*/ */

BIN
thirdparty/crt/win32/crt.zip vendored Executable file

Binary file not shown.

BIN
thirdparty/crt/win64/crt.zip vendored Executable file

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.