Merge branch 'develop' of https://github.com/sleuthkit/autopsy into 4792-OtherOccurrences4thColumn
@ -81,6 +81,7 @@ import org.sleuthkit.autopsy.casemodule.events.ContentTagDeletedEvent;
|
||||
import org.sleuthkit.autopsy.casemodule.events.DataSourceAddedEvent;
|
||||
import org.sleuthkit.autopsy.casemodule.events.DataSourceNameChangedEvent;
|
||||
import org.sleuthkit.autopsy.casemodule.events.ReportAddedEvent;
|
||||
import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData.CaseNodeDataException;
|
||||
import org.sleuthkit.autopsy.casemodule.multiusercases.CoordinationServiceUtils;
|
||||
import org.sleuthkit.autopsy.casemodule.services.Services;
|
||||
import org.sleuthkit.autopsy.commonpropertiessearch.CommonAttributeSearchAction;
|
||||
@ -1620,11 +1621,10 @@ public class Case {
|
||||
}
|
||||
if (getCaseType() == CaseType.MULTI_USER_CASE && !oldCaseDetails.getCaseDisplayName().equals(caseDetails.getCaseDisplayName())) {
|
||||
try {
|
||||
CoordinationService coordinationService = CoordinationService.getInstance();
|
||||
CaseNodeData nodeData = new CaseNodeData(coordinationService.getNodeData(CategoryNode.CASES, metadata.getCaseDirectory()));
|
||||
CaseNodeData nodeData = CaseNodeData.readCaseNodeData(metadata.getCaseDirectory());
|
||||
nodeData.setDisplayName(caseDetails.getCaseDisplayName());
|
||||
coordinationService.setNodeData(CategoryNode.CASES, metadata.getCaseDirectory(), nodeData.toArray());
|
||||
} catch (CoordinationServiceException | InterruptedException | IOException ex) {
|
||||
CaseNodeData.writeCaseNodeData(nodeData);
|
||||
} catch (CaseNodeDataException | InterruptedException ex) {
|
||||
throw new CaseActionException(Bundle.Case_exceptionMessage_couldNotUpdateCaseNodeData(ex.getLocalizedMessage()), ex);
|
||||
}
|
||||
}
|
||||
@ -2005,10 +2005,8 @@ public class Case {
|
||||
if (getCaseType() == CaseType.MULTI_USER_CASE) {
|
||||
progressIndicator.progress(Bundle.Case_progressMessage_creatingCaseNodeData());
|
||||
try {
|
||||
CoordinationService coordinationService = CoordinationService.getInstance();
|
||||
CaseNodeData nodeData = new CaseNodeData(metadata);
|
||||
coordinationService.setNodeData(CategoryNode.CASES, metadata.getCaseDirectory(), nodeData.toArray());
|
||||
} catch (CoordinationServiceException | InterruptedException | ParseException | IOException ex) {
|
||||
CaseNodeData.createCaseNodeData(metadata);
|
||||
} catch (CaseNodeDataException | InterruptedException ex) {
|
||||
throw new CaseActionException(Bundle.Case_exceptionMessage_couldNotCreateCaseNodeData(ex.getLocalizedMessage()), ex);
|
||||
}
|
||||
}
|
||||
@ -2033,27 +2031,10 @@ public class Case {
|
||||
if (getCaseType() == CaseType.MULTI_USER_CASE) {
|
||||
progressIndicator.progress(Bundle.Case_progressMessage_updatingCaseNodeData());
|
||||
try {
|
||||
CaseNodeData nodeData;
|
||||
CoordinationService coordinationService = CoordinationService.getInstance();
|
||||
byte[] nodeBytes = coordinationService.getNodeData(CategoryNode.CASES, metadata.getCaseDirectory());
|
||||
if (nodeBytes != null && nodeBytes.length > 0) {
|
||||
/*
|
||||
* Update the last access date in the coordination service
|
||||
* node data for the case.
|
||||
*/
|
||||
nodeData = new CaseNodeData(nodeBytes);
|
||||
nodeData.setLastAccessDate(new Date());
|
||||
} else {
|
||||
/*
|
||||
* This is a "legacy" case with no data stored in its case
|
||||
* directory coordination service node yet, or the node is
|
||||
* empty due to some error, so create the coordination
|
||||
* service node data from the case metadata.
|
||||
*/
|
||||
nodeData = new CaseNodeData(metadata);
|
||||
}
|
||||
coordinationService.setNodeData(CategoryNode.CASES, metadata.getCaseDirectory(), nodeData.toArray());
|
||||
} catch (CoordinationServiceException | InterruptedException | ParseException | IOException ex) {
|
||||
CaseNodeData nodeData = CaseNodeData.readCaseNodeData(metadata.getCaseDirectory());
|
||||
nodeData.setLastAccessDate(new Date());
|
||||
CaseNodeData.writeCaseNodeData(nodeData);
|
||||
} catch (CaseNodeDataException | InterruptedException ex) {
|
||||
throw new CaseActionException(Bundle.Case_exceptionMessage_couldNotUpdateCaseNodeData(ex.getLocalizedMessage()), ex);
|
||||
}
|
||||
}
|
||||
@ -2633,9 +2614,8 @@ public class Case {
|
||||
|
||||
progressIndicator.progress(Bundle.Case_progressMessage_fetchingCoordSvcNodeData());
|
||||
try {
|
||||
byte[] nodeBytes = coordinationService.getNodeData(CoordinationService.CategoryNode.CASES, metadata.getCaseDirectory());
|
||||
caseNodeData = new CaseNodeData(nodeBytes);
|
||||
} catch (CoordinationServiceException | InterruptedException | IOException ex) {
|
||||
caseNodeData = CaseNodeData.readCaseNodeData(metadata.getCaseDirectory());
|
||||
} catch (CaseNodeDataException | InterruptedException ex) {
|
||||
logger.log(Level.SEVERE, String.format("Failed to get coordination service node data %s (%s) in %s", metadata.getCaseDisplayName(), metadata.getCaseName(), metadata.getCaseDirectory()), ex); //NON-NLS
|
||||
throw new CaseActionException(Bundle.Case_exceptionMessage_failedToFetchCoordSvcNodeData(ex.getLocalizedMessage()));
|
||||
}
|
||||
@ -2899,9 +2879,8 @@ public class Case {
|
||||
private static void setDeletedItemFlag(CaseNodeData caseNodeData, CaseNodeData.DeletedFlags flag) throws InterruptedException {
|
||||
try {
|
||||
caseNodeData.setDeletedFlag(flag);
|
||||
CoordinationService coordinationService = CoordinationService.getInstance();
|
||||
coordinationService.setNodeData(CategoryNode.CASES, caseNodeData.getDirectory().toString(), caseNodeData.toArray());
|
||||
} catch (IOException | CoordinationServiceException ex) {
|
||||
CaseNodeData.writeCaseNodeData(caseNodeData);
|
||||
} catch (CaseNodeDataException ex) {
|
||||
logger.log(Level.SEVERE, String.format("Error updating deleted item flag %s for %s (%s) in %s", flag.name(), caseNodeData.getDisplayName(), caseNodeData.getName(), caseNodeData.getDirectory()), ex);
|
||||
}
|
||||
}
|
||||
|
@ -22,25 +22,33 @@ import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.text.ParseException;
|
||||
import java.util.Date;
|
||||
import java.util.logging.Level;
|
||||
import org.sleuthkit.autopsy.casemodule.CaseMetadata;
|
||||
import org.sleuthkit.autopsy.casemodule.CaseMetadata.CaseMetadataException;
|
||||
import org.sleuthkit.autopsy.coordinationservice.CoordinationService;
|
||||
import org.sleuthkit.autopsy.coordinationservice.CoordinationService.CoordinationServiceException;
|
||||
import org.sleuthkit.autopsy.coreutils.Logger;
|
||||
|
||||
/**
|
||||
* An object that converts data for a case directory lock coordination service
|
||||
* node to and from byte arrays.
|
||||
* Case data stored in a case directory coordination service node.
|
||||
*/
|
||||
public final class CaseNodeData {
|
||||
|
||||
private static final int CURRENT_VERSION = 1;
|
||||
private static final int MAJOR_VERSION = 2;
|
||||
private static final int MINOR_VERSION = 0;
|
||||
private static final Logger logger = Logger.getLogger(CaseNodeData.class.getName());
|
||||
|
||||
/*
|
||||
* Version 0 fields.
|
||||
* Version 0 fields. Note that version 0 node data was only written to the
|
||||
* coordination service node if an auto ingest job error occurred.
|
||||
*/
|
||||
private final int version;
|
||||
private int version;
|
||||
private boolean errorsOccurred;
|
||||
|
||||
/*
|
||||
@ -53,28 +61,191 @@ public final class CaseNodeData {
|
||||
private String displayName;
|
||||
private short deletedItemFlags;
|
||||
|
||||
/**
|
||||
* Gets the current version of the case directory lock coordination service
|
||||
* node data.
|
||||
*
|
||||
* @return The version number.
|
||||
/*
|
||||
* Version 2 fields.
|
||||
*/
|
||||
public static int getCurrentVersion() {
|
||||
return CaseNodeData.CURRENT_VERSION;
|
||||
private int minorVersion;
|
||||
|
||||
/**
|
||||
* Creates case node data from the metadata for a case and writes it to the
|
||||
* appropriate case directory coordination service node, which must already
|
||||
* exist.
|
||||
*
|
||||
* @param metadata The case metadata.
|
||||
*
|
||||
* @return The case node data that was written to the coordination service
|
||||
* node.
|
||||
*
|
||||
* @throws CaseNodeDataException If there is an error creating or writing
|
||||
* the case node data.
|
||||
* @throws InterruptedException If the current thread is interrupted while
|
||||
* waiting for the coordination service.
|
||||
*/
|
||||
public static CaseNodeData createCaseNodeData(final CaseMetadata metadata) throws CaseNodeDataException, InterruptedException {
|
||||
try {
|
||||
final CaseNodeData nodeData = new CaseNodeData(metadata);
|
||||
CoordinationService.getInstance().setNodeData(CoordinationService.CategoryNode.CASES, nodeData.getDirectory().toString(), nodeData.toArray());
|
||||
return nodeData;
|
||||
|
||||
} catch (ParseException | IOException | CoordinationServiceException ex) {
|
||||
throw new CaseNodeDataException(String.format("Error creating case node data for coordination service node with path %s", metadata.getCaseDirectory().toUpperCase()), ex); //NON-NLS
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Uses a CaseMetadata object to construct an object that converts data for
|
||||
* a case directory lock coordination service node to and from byte arrays.
|
||||
* Reads case data from a case directory coordination service node. If the
|
||||
* data is missing, corrupted, or from an older version of the software, an
|
||||
* attempt is made to remedy the situation using the case metadata.
|
||||
*
|
||||
* @param nodePath The case directory coordination service node path.
|
||||
*
|
||||
* @return The case node data.
|
||||
*
|
||||
* @throws CaseNodeDataException If there is an error reading or writing the
|
||||
* case node data.
|
||||
* @throws InterruptedException If the current thread is interrupted while
|
||||
* waiting for the coordination service.
|
||||
*/
|
||||
public static CaseNodeData readCaseNodeData(String nodePath) throws CaseNodeDataException, InterruptedException {
|
||||
try {
|
||||
CaseNodeData nodeData;
|
||||
final byte[] nodeBytes = CoordinationService.getInstance().getNodeData(CoordinationService.CategoryNode.CASES, nodePath);
|
||||
if (nodeBytes != null && nodeBytes.length > 0) {
|
||||
try {
|
||||
nodeData = new CaseNodeData(nodeBytes);
|
||||
} catch (IOException ex) {
|
||||
/*
|
||||
* The existing case node data is corrupted.
|
||||
*/
|
||||
logger.log(Level.WARNING, String.format("Error reading node data for coordination service node with path %s, will attempt to replace it", nodePath.toUpperCase()), ex); //NON-NLS
|
||||
final CaseMetadata metadata = getCaseMetadata(nodePath);
|
||||
nodeData = createCaseNodeData(metadata);
|
||||
logger.log(Level.INFO, String.format("Replaced corrupt node data for coordination service node with path %s", nodePath.toUpperCase())); //NON-NLS
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* The case node data is missing. Version 0 node data was only
|
||||
* written to the coordination service node if an auto ingest
|
||||
* job error occurred.
|
||||
*/
|
||||
logger.log(Level.INFO, String.format("Missing node data for coordination service node with path %s, will attempt to create it", nodePath.toUpperCase())); //NON-NLS
|
||||
final CaseMetadata metadata = getCaseMetadata(nodePath);
|
||||
nodeData = createCaseNodeData(metadata);
|
||||
logger.log(Level.INFO, String.format("Created node data for coordination service node with path %s", nodePath.toUpperCase())); //NON-NLS
|
||||
}
|
||||
if (nodeData.getVersion() < CaseNodeData.MAJOR_VERSION) {
|
||||
nodeData = upgradeCaseNodeData(nodePath, nodeData);
|
||||
}
|
||||
return nodeData;
|
||||
|
||||
} catch (CaseNodeDataException | CaseMetadataException | ParseException | IOException | CoordinationServiceException ex) {
|
||||
throw new CaseNodeDataException(String.format("Error reading/writing node data coordination service node with path %s", nodePath.toUpperCase()), ex); //NON-NLS
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes case data to a case directory coordination service node. Obtain
|
||||
* the case data to be updated and written by calling createCaseNodeData()
|
||||
* or readCaseNodeData().
|
||||
*
|
||||
* @param nodeData The case node data.
|
||||
*
|
||||
* @throws CaseNodeDataException If there is an error writing the case node
|
||||
* data.
|
||||
* @throws InterruptedException If the current thread is interrupted while
|
||||
* waiting for the coordination service.
|
||||
*/
|
||||
public static void writeCaseNodeData(CaseNodeData nodeData) throws CaseNodeDataException, InterruptedException {
|
||||
try {
|
||||
CoordinationService.getInstance().setNodeData(CoordinationService.CategoryNode.CASES, nodeData.getDirectory().toString(), nodeData.toArray());
|
||||
|
||||
} catch (IOException | CoordinationServiceException ex) {
|
||||
throw new CaseNodeDataException(String.format("Error writing node data coordination service node with path %s", nodeData.getDirectory().toString().toUpperCase()), ex); //NON-NLS
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Upgrades older versions of node data to the current version and writes
|
||||
* the data back to the case directory coordination service node.
|
||||
*
|
||||
* @param nodePath The case directory coordination service node path.
|
||||
* @param oldNodeData The outdated node data.
|
||||
*
|
||||
* @return The updated node data.
|
||||
*
|
||||
* @throws CaseNodeDataException If the case meta data file or case
|
||||
* directory do not exist.
|
||||
* @throws CaseMetadataException If the case metadata cannot be read.
|
||||
*/
|
||||
private static CaseNodeData upgradeCaseNodeData(String nodePath, CaseNodeData oldNodeData) throws CaseNodeDataException, CaseMetadataException, ParseException, IOException, CoordinationServiceException, InterruptedException {
|
||||
CaseNodeData nodeData;
|
||||
switch (oldNodeData.getVersion()) {
|
||||
case 0:
|
||||
/*
|
||||
* Version 0 node data consisted of only the version number and
|
||||
* the errors occurred flag and was only written when an auto
|
||||
* ingest job error occurred. To upgrade from version 0, the
|
||||
* version 1 fields need to be set from the case metadata and
|
||||
* the errors occurred flag needs to be carried forward. Note
|
||||
* that the last accessed date gets advanced to now, since it is
|
||||
* otherwise unknown.
|
||||
*/
|
||||
final CaseMetadata metadata = getCaseMetadata(nodePath);
|
||||
nodeData = new CaseNodeData(metadata);
|
||||
nodeData.setErrorsOccurred(oldNodeData.getErrorsOccurred());
|
||||
break;
|
||||
case 1:
|
||||
/*
|
||||
* Version 1 node data did not have a minor version number
|
||||
* field.
|
||||
*/
|
||||
oldNodeData.setMinorVersion(MINOR_VERSION);
|
||||
nodeData = oldNodeData;
|
||||
break;
|
||||
default:
|
||||
nodeData = oldNodeData;
|
||||
break;
|
||||
}
|
||||
writeCaseNodeData(nodeData);
|
||||
return nodeData;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the metadata for a case.
|
||||
*
|
||||
* @param nodePath The case directory coordination service node path for the
|
||||
* case.
|
||||
*
|
||||
* @return The case metadata.
|
||||
*
|
||||
* @throws CaseNodeDataException If the case metadata file or the case
|
||||
* directory does not exist.
|
||||
* @throws CaseMetadataException If the case metadata cannot be read.
|
||||
*/
|
||||
private static CaseMetadata getCaseMetadata(String nodePath) throws CaseNodeDataException, CaseMetadataException {
|
||||
final Path caseDirectoryPath = Paths.get(nodePath);
|
||||
final File caseDirectory = caseDirectoryPath.toFile();
|
||||
if (!caseDirectory.exists()) {
|
||||
throw new CaseNodeDataException("Case directory does not exist"); // NON-NLS
|
||||
}
|
||||
final Path metadataFilePath = CaseMetadata.getCaseMetadataFilePath(caseDirectoryPath);
|
||||
if (metadataFilePath == null) {
|
||||
throw new CaseNodeDataException("Case meta data file does not exist"); // NON-NLS
|
||||
}
|
||||
return new CaseMetadata(metadataFilePath);
|
||||
}
|
||||
|
||||
/**
|
||||
* Uses case metadata to construct the case data to store in a case
|
||||
* directory coordination service node.
|
||||
*
|
||||
* @param metadata The case meta data.
|
||||
*
|
||||
* @throws java.text.ParseException If there is an error parsing dates from
|
||||
* string representations of dates in the
|
||||
* meta data.
|
||||
* @throws ParseException If there is an error parsing dates from string
|
||||
* representations of dates in the meta data.
|
||||
*/
|
||||
public CaseNodeData(CaseMetadata metadata) throws ParseException {
|
||||
this.version = CURRENT_VERSION;
|
||||
private CaseNodeData(CaseMetadata metadata) throws ParseException {
|
||||
this.version = MAJOR_VERSION;
|
||||
this.errorsOccurred = false;
|
||||
this.directory = Paths.get(metadata.getCaseDirectory());
|
||||
this.createDate = CaseMetadata.getDateFormat().parse(metadata.getCreatedDate());
|
||||
@ -82,51 +253,64 @@ public final class CaseNodeData {
|
||||
this.name = metadata.getCaseName();
|
||||
this.displayName = metadata.getCaseDisplayName();
|
||||
this.deletedItemFlags = 0;
|
||||
this.minorVersion = MINOR_VERSION;
|
||||
}
|
||||
|
||||
/**
|
||||
* Uses coordination service node data to construct an object that converts
|
||||
* data for a case directory lock coordination service node to and from byte
|
||||
* arrays.
|
||||
* Uses the raw bytes from a case directory coordination service node to
|
||||
* construct a case node data object.
|
||||
*
|
||||
* @param nodeData The raw bytes received from the coordination service.
|
||||
*
|
||||
* @throws IOException If there is an error reading the node data.
|
||||
*/
|
||||
public CaseNodeData(byte[] nodeData) throws IOException {
|
||||
private CaseNodeData(byte[] nodeData) throws IOException {
|
||||
if (nodeData == null || nodeData.length == 0) {
|
||||
throw new IOException(null == nodeData ? "Null node data byte array" : "Zero-length node data byte array");
|
||||
}
|
||||
DataInputStream inputStream = new DataInputStream(new ByteArrayInputStream(nodeData));
|
||||
this.version = inputStream.readInt();
|
||||
if (this.version > 0) {
|
||||
this.errorsOccurred = inputStream.readBoolean();
|
||||
} else {
|
||||
short legacyErrorsOccurred = inputStream.readByte();
|
||||
this.errorsOccurred = (legacyErrorsOccurred < 0);
|
||||
}
|
||||
if (this.version > 0) {
|
||||
this.directory = Paths.get(inputStream.readUTF());
|
||||
this.createDate = new Date(inputStream.readLong());
|
||||
this.lastAccessDate = new Date(inputStream.readLong());
|
||||
this.name = inputStream.readUTF();
|
||||
this.displayName = inputStream.readUTF();
|
||||
this.deletedItemFlags = inputStream.readShort();
|
||||
try (ByteArrayInputStream byteStream = new ByteArrayInputStream(nodeData); DataInputStream inputStream = new DataInputStream(byteStream)) {
|
||||
this.version = inputStream.readInt();
|
||||
if (this.version == 1) {
|
||||
this.errorsOccurred = inputStream.readBoolean();
|
||||
} else {
|
||||
byte errorsOccurredByte = inputStream.readByte();
|
||||
this.errorsOccurred = (errorsOccurredByte < 0);
|
||||
}
|
||||
if (this.version > 0) {
|
||||
this.directory = Paths.get(inputStream.readUTF());
|
||||
this.createDate = new Date(inputStream.readLong());
|
||||
this.lastAccessDate = new Date(inputStream.readLong());
|
||||
this.name = inputStream.readUTF();
|
||||
this.displayName = inputStream.readUTF();
|
||||
this.deletedItemFlags = inputStream.readShort();
|
||||
}
|
||||
if (this.version > 1) {
|
||||
this.minorVersion = inputStream.readInt();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the node data version number of this node.
|
||||
* Gets the version number of this node data.
|
||||
*
|
||||
* @return The version number.
|
||||
*/
|
||||
public int getVersion() {
|
||||
private int getVersion() {
|
||||
return this.version;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the minor version number of this node data.
|
||||
*
|
||||
* @param minorVersion The version number.
|
||||
*/
|
||||
private void setMinorVersion(int minorVersion) {
|
||||
this.minorVersion = minorVersion;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets whether or not any errors occurred during the processing of any auto
|
||||
* ingest job for the case represented by this node data.
|
||||
* ingest job for the case.
|
||||
*
|
||||
* @return True or false.
|
||||
*/
|
||||
@ -136,7 +320,7 @@ public final class CaseNodeData {
|
||||
|
||||
/**
|
||||
* Sets whether or not any errors occurred during the processing of any auto
|
||||
* ingest job for the case represented by this node data.
|
||||
* ingest job for the case.
|
||||
*
|
||||
* @param errorsOccurred True or false.
|
||||
*/
|
||||
@ -145,8 +329,7 @@ public final class CaseNodeData {
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the path of the case directory of the case represented by this node
|
||||
* data.
|
||||
* Gets the path of the case directory.
|
||||
*
|
||||
* @return The case directory path.
|
||||
*/
|
||||
@ -155,17 +338,7 @@ public final class CaseNodeData {
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the path of the case directory of the case represented by this node
|
||||
* data.
|
||||
*
|
||||
* @param caseDirectory The case directory path.
|
||||
*/
|
||||
public void setDirectory(Path caseDirectory) {
|
||||
this.directory = caseDirectory;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the date the case represented by this node data was created.
|
||||
* Gets the date the case was created.
|
||||
*
|
||||
* @return The create date.
|
||||
*/
|
||||
@ -174,16 +347,7 @@ public final class CaseNodeData {
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the date the case represented by this node data was created.
|
||||
*
|
||||
* @param createDate The create date.
|
||||
*/
|
||||
public void setCreateDate(Date createDate) {
|
||||
this.createDate = new Date(createDate.getTime());
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the date the case represented by this node data last accessed.
|
||||
* Gets the date the case was last accessed.
|
||||
*
|
||||
* @return The last access date.
|
||||
*/
|
||||
@ -192,7 +356,7 @@ public final class CaseNodeData {
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the date the case represented by this node data was last accessed.
|
||||
* Sets the date the case was last accessed.
|
||||
*
|
||||
* @param lastAccessDate The last access date.
|
||||
*/
|
||||
@ -201,8 +365,7 @@ public final class CaseNodeData {
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the unique and immutable (user cannot change it) name of the case
|
||||
* represented by this node data.
|
||||
* Gets the unique and immutable name of the case.
|
||||
*
|
||||
* @return The case name.
|
||||
*/
|
||||
@ -211,17 +374,7 @@ public final class CaseNodeData {
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the unique and immutable (user cannot change it) name of the case
|
||||
* represented by this node data.
|
||||
*
|
||||
* @param name The case name.
|
||||
*/
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the display name of the case represented by this node data.
|
||||
* Gets the display name of the case.
|
||||
*
|
||||
* @return The case display name.
|
||||
*/
|
||||
@ -230,7 +383,7 @@ public final class CaseNodeData {
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the display name of the case represented by this node data.
|
||||
* Sets the display name of the case.
|
||||
*
|
||||
* @param displayName The case display name.
|
||||
*/
|
||||
@ -239,19 +392,18 @@ public final class CaseNodeData {
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks whether a deleted item flag is set for the case represented by
|
||||
* this node data.
|
||||
* Checks whether a given deleted item flag is set for the case.
|
||||
*
|
||||
* @param flag The flag to check.
|
||||
*
|
||||
* @return
|
||||
* @return True or false.
|
||||
*/
|
||||
public boolean isDeletedFlagSet(DeletedFlags flag) {
|
||||
return (this.deletedItemFlags & flag.getValue()) == flag.getValue();
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets a deleted item flag for the case represented by this node data.
|
||||
* Sets a given deleted item flag.
|
||||
*
|
||||
* @param flag The flag to set.
|
||||
*/
|
||||
@ -265,22 +417,24 @@ public final class CaseNodeData {
|
||||
*
|
||||
* @return The node data as a byte array.
|
||||
*
|
||||
* @throws IOException If there is an error writing the node data.
|
||||
* @throws IOException If there is an error writing the node data to the
|
||||
* array.
|
||||
*/
|
||||
public byte[] toArray() throws IOException {
|
||||
ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
|
||||
DataOutputStream outputStream = new DataOutputStream(byteStream);
|
||||
outputStream.writeInt(this.version);
|
||||
outputStream.writeBoolean(this.errorsOccurred);
|
||||
outputStream.writeUTF(this.directory.toString());
|
||||
outputStream.writeLong(this.createDate.getTime());
|
||||
outputStream.writeLong(this.lastAccessDate.getTime());
|
||||
outputStream.writeUTF(this.name);
|
||||
outputStream.writeUTF(this.displayName);
|
||||
outputStream.writeShort(this.deletedItemFlags);
|
||||
outputStream.flush();
|
||||
byteStream.flush();
|
||||
return byteStream.toByteArray();
|
||||
private byte[] toArray() throws IOException {
|
||||
try (ByteArrayOutputStream byteStream = new ByteArrayOutputStream(); DataOutputStream outputStream = new DataOutputStream(byteStream)) {
|
||||
outputStream.writeInt(this.version);
|
||||
outputStream.writeByte((byte) (this.errorsOccurred ? 0x80 : 0));
|
||||
outputStream.writeUTF(this.directory.toString());
|
||||
outputStream.writeLong(this.createDate.getTime());
|
||||
outputStream.writeLong(this.lastAccessDate.getTime());
|
||||
outputStream.writeUTF(this.name);
|
||||
outputStream.writeUTF(this.displayName);
|
||||
outputStream.writeShort(this.deletedItemFlags);
|
||||
outputStream.writeInt(this.minorVersion);
|
||||
outputStream.flush();
|
||||
byteStream.flush();
|
||||
return byteStream.toByteArray();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -316,4 +470,34 @@ public final class CaseNodeData {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Exception thrown when there is an error reading or writing case node
|
||||
* data.
|
||||
*/
|
||||
public static final class CaseNodeDataException extends Exception {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
/**
|
||||
* Constructs an exception to throw when there is an error reading or
|
||||
* writing case node data.
|
||||
*
|
||||
* @param message The exception message.
|
||||
*/
|
||||
private CaseNodeDataException(String message) {
|
||||
super(message);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs an exception to throw when there is an error reading or
|
||||
* writing case node data.
|
||||
*
|
||||
* @param message The exception message.
|
||||
* @param cause The cause of the exception.
|
||||
*/
|
||||
private CaseNodeDataException(String message, Throwable cause) {
|
||||
super(message, cause);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -18,31 +18,28 @@
|
||||
*/
|
||||
package org.sleuthkit.autopsy.casemodule.multiusercases;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.LinkOption;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.text.ParseException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.logging.Level;
|
||||
import org.sleuthkit.autopsy.casemodule.CaseMetadata;
|
||||
import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData.CaseNodeDataException;
|
||||
import static org.sleuthkit.autopsy.casemodule.multiusercases.CoordinationServiceUtils.isCaseAutoIngestLogNodePath;
|
||||
import static org.sleuthkit.autopsy.casemodule.multiusercases.CoordinationServiceUtils.isCaseNameNodePath;
|
||||
import static org.sleuthkit.autopsy.casemodule.multiusercases.CoordinationServiceUtils.isCaseResourcesNodePath;
|
||||
import org.sleuthkit.autopsy.coordinationservice.CoordinationService;
|
||||
import org.sleuthkit.autopsy.coordinationservice.CoordinationService.CoordinationServiceException;
|
||||
import org.sleuthkit.autopsy.coreutils.Logger;
|
||||
|
||||
/**
|
||||
* Queries the coordination service to collect the multi-user case node data
|
||||
* stored in the case directory lock ZooKeeper nodes.
|
||||
* Collects the multi-user case node data stored in the case directory
|
||||
* coordination service nodes.
|
||||
*/
|
||||
final public class CaseNodeDataCollector {
|
||||
|
||||
private static final Logger logger = Logger.getLogger(CaseNodeDataCollector.class.getName());
|
||||
|
||||
/**
|
||||
* Queries the coordination service to collect the multi-user case node data
|
||||
* stored in the case directory lock ZooKeeper nodes.
|
||||
* Collects the multi-user case node data stored in the case directory
|
||||
* coordination service nodes.
|
||||
*
|
||||
* @return The node data for the multi-user cases known to the coordination
|
||||
* service.
|
||||
@ -54,128 +51,30 @@ final public class CaseNodeDataCollector {
|
||||
* service.
|
||||
*/
|
||||
public static List<CaseNodeData> getNodeData() throws CoordinationServiceException, InterruptedException {
|
||||
final List<CaseNodeData> cases = new ArrayList<>();
|
||||
final List<CaseNodeData> nodeDataList = new ArrayList<>();
|
||||
final CoordinationService coordinationService = CoordinationService.getInstance();
|
||||
final List<String> nodeList = coordinationService.getNodeList(CoordinationService.CategoryNode.CASES);
|
||||
for (String nodeName : nodeList) {
|
||||
if (CoordinationServiceUtils.isCaseNameNodePath(nodeName)
|
||||
|| CoordinationServiceUtils.isCaseResourcesNodePath(nodeName)
|
||||
|| CoordinationServiceUtils.isCaseAutoIngestLogNodePath(nodeName)) {
|
||||
final List<String> nodePaths = coordinationService.getNodeList(CoordinationService.CategoryNode.CASES);
|
||||
for (String nodePath : nodePaths) {
|
||||
/*
|
||||
* Skip the case name, case resources, and case auto ingest log
|
||||
* coordination service nodes. They are not used to store case data.
|
||||
*/
|
||||
if (isCaseNameNodePath(nodePath) || isCaseResourcesNodePath(nodePath) || isCaseAutoIngestLogNodePath(nodePath)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the data from the case directory lock node. This data may not
|
||||
* exist or may exist only in an older version. If it is missing or
|
||||
* incomplete, create or update it.
|
||||
* Get the case node data from the case directory coordination service node.
|
||||
*/
|
||||
try {
|
||||
CaseNodeData nodeData;
|
||||
final byte[] nodeBytes = coordinationService.getNodeData(CoordinationService.CategoryNode.CASES, nodeName);
|
||||
if (nodeBytes != null && nodeBytes.length > 0) {
|
||||
nodeData = new CaseNodeData(nodeBytes);
|
||||
if (nodeData.getVersion() < CaseNodeData.getCurrentVersion()) {
|
||||
nodeData = updateNodeData(nodeName, nodeData);
|
||||
}
|
||||
} else {
|
||||
nodeData = updateNodeData(nodeName, null);
|
||||
}
|
||||
if (nodeData != null) {
|
||||
cases.add(nodeData);
|
||||
}
|
||||
|
||||
} catch (CoordinationService.CoordinationServiceException | InterruptedException | IOException | ParseException | CaseMetadata.CaseMetadataException ex) {
|
||||
logger.log(Level.SEVERE, String.format("Error getting coordination service node data for %s", nodeName), ex);
|
||||
final CaseNodeData nodeData = CaseNodeData.readCaseNodeData(nodePath);
|
||||
nodeDataList.add(nodeData);
|
||||
} catch (CaseNodeDataException | InterruptedException ex) {
|
||||
logger.log(Level.WARNING, String.format("Error reading case node data from %s", nodePath), ex);
|
||||
}
|
||||
|
||||
}
|
||||
return cases;
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the case directory lock coordination service node data for a
|
||||
* case.
|
||||
*
|
||||
* @param nodeName The coordination service node name, i.e., the case
|
||||
* directory path.
|
||||
* @param oldNodeData The node data to be updated.
|
||||
*
|
||||
* @return A CaseNodedata object or null if the coordination service node is
|
||||
* an "orphan" with no corresponding case directry.
|
||||
*
|
||||
* @throws IOException If there is an error writing the
|
||||
* node data to a byte array.
|
||||
* @throws CaseMetadataException If there is an error reading the
|
||||
* case metadata file.
|
||||
* @throws ParseException If there is an error parsing a date
|
||||
* from the case metadata file.
|
||||
* @throws CoordinationServiceException If there is an error interacting
|
||||
* with the coordination service.
|
||||
* @throws InterruptedException If a coordination service operation
|
||||
* is interrupted.
|
||||
*/
|
||||
private static CaseNodeData updateNodeData(String nodeName, CaseNodeData oldNodeData) throws IOException, CaseMetadata.CaseMetadataException, ParseException, CoordinationService.CoordinationServiceException, InterruptedException {
|
||||
Path caseDirectoryPath = Paths.get(nodeName).toRealPath(LinkOption.NOFOLLOW_LINKS);
|
||||
File caseDirectory = caseDirectoryPath.toFile();
|
||||
if (!caseDirectory.exists()) {
|
||||
logger.log(Level.WARNING, String.format("Found orphan coordination service node %s, attempting clean up", caseDirectoryPath));
|
||||
deleteLockNodes(CoordinationService.getInstance(), caseDirectoryPath);
|
||||
return null;
|
||||
}
|
||||
|
||||
CaseNodeData nodeData = null;
|
||||
if (oldNodeData == null || oldNodeData.getVersion() == 0) {
|
||||
File[] files = caseDirectory.listFiles();
|
||||
for (File file : files) {
|
||||
String name = file.getName().toLowerCase();
|
||||
if (name.endsWith(CaseMetadata.getFileExtension())) {
|
||||
CaseMetadata metadata = new CaseMetadata(Paths.get(file.getAbsolutePath()));
|
||||
nodeData = new CaseNodeData(metadata);
|
||||
if (oldNodeData != null) {
|
||||
/*
|
||||
* Version 0 case node data was only written if errors
|
||||
* occurred during an auto ingest job.
|
||||
*/
|
||||
nodeData.setErrorsOccurred(true);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (nodeData != null) {
|
||||
CoordinationService.getInstance().setNodeData(CoordinationService.CategoryNode.CASES, nodeName, nodeData.toArray());
|
||||
}
|
||||
|
||||
return nodeData;
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to delete the coordination service lock nodes for a case,
|
||||
* logging any failures.
|
||||
*
|
||||
* @param coordinationService The coordination service.
|
||||
* @param caseDirectoryPath The case directory path.
|
||||
*/
|
||||
private static void deleteLockNodes(CoordinationService coordinationService, Path caseDirectoryPath) {
|
||||
deleteCoordinationServiceNode(coordinationService, CoordinationServiceUtils.getCaseResourcesNodePath(caseDirectoryPath));
|
||||
deleteCoordinationServiceNode(coordinationService, CoordinationServiceUtils.getCaseAutoIngestLogNodePath(caseDirectoryPath));
|
||||
deleteCoordinationServiceNode(coordinationService, CoordinationServiceUtils.getCaseDirectoryNodePath(caseDirectoryPath));
|
||||
deleteCoordinationServiceNode(coordinationService, CoordinationServiceUtils.getCaseNameNodePath(caseDirectoryPath));
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to delete a coordination service node, logging failure.
|
||||
*
|
||||
* @param coordinationService The coordination service.
|
||||
* @param nodeName A node name.
|
||||
*/
|
||||
private static void deleteCoordinationServiceNode(CoordinationService coordinationService, String nodeName) {
|
||||
try {
|
||||
coordinationService.deleteNode(CoordinationService.CategoryNode.CASES, nodeName);
|
||||
} catch (CoordinationService.CoordinationServiceException | InterruptedException ex) {
|
||||
logger.log(Level.WARNING, String.format("Error deleting coordination service node %s", nodeName), ex);
|
||||
}
|
||||
return nodeDataList;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -183,5 +82,5 @@ final public class CaseNodeDataCollector {
|
||||
*/
|
||||
private CaseNodeDataCollector() {
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@ -170,13 +170,13 @@ public class MediaPlayerPanel extends JPanel implements MediaFileViewer.MediaVie
|
||||
"GstVideoPanel.cannotProcFile.err");
|
||||
|
||||
//Video playback components
|
||||
private PlayBin gstPlayBin;
|
||||
private volatile PlayBin gstPlayBin;
|
||||
private JavaFxAppSink fxAppSink;
|
||||
private JFXPanel fxPanel;
|
||||
private volatile boolean livePlayBin;
|
||||
private volatile boolean hasError;
|
||||
private Bus.ERROR errorListener;
|
||||
private Bus.STATE_CHANGED stateChangeListener;
|
||||
private Bus.EOS endOfStreamListener;
|
||||
|
||||
//When a video is playing, update the UI every 75 ms
|
||||
//Update progress bar and time label during video playback
|
||||
private final Timer timer = new Timer(75, new VideoPanelUpdater());
|
||||
private static final int PROGRESS_SLIDER_SIZE = 2000;
|
||||
|
||||
@ -196,7 +196,6 @@ public class MediaPlayerPanel extends JPanel implements MediaFileViewer.MediaVie
|
||||
progressSlider.setMinimum(0);
|
||||
progressSlider.setMaximum(PROGRESS_SLIDER_SIZE);
|
||||
progressSlider.setValue(0);
|
||||
|
||||
//Manage the gstreamer video position when a user is dragging the slider in the panel.
|
||||
progressSlider.addChangeListener(new ChangeListener() {
|
||||
@Override
|
||||
@ -212,24 +211,49 @@ public class MediaPlayerPanel extends JPanel implements MediaFileViewer.MediaVie
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
//Manage the audio level when the user is adjusting the volumn slider
|
||||
audioSlider.addChangeListener((ChangeEvent event) -> {
|
||||
if (audioSlider.getValueIsAdjusting()) {
|
||||
int audioPercent = audioSlider.getValue() * 2;
|
||||
gstPlayBin.setVolumePercent(audioPercent);
|
||||
double audioPercent = (audioSlider.getValue() * 2.0) / 100.0;
|
||||
gstPlayBin.setVolume(audioPercent);
|
||||
}
|
||||
});
|
||||
|
||||
videoPanel.setLayout(new BoxLayout(videoPanel, BoxLayout.Y_AXIS));
|
||||
fxPanel = new JFXPanel();
|
||||
videoPanel.add(fxPanel);//add jfx ui to JPanel
|
||||
errorListener = new Bus.ERROR() {
|
||||
@Override
|
||||
public void errorMessage(GstObject go, int i, String string) {
|
||||
enableComponents(false);
|
||||
infoLabel.setText(String.format(
|
||||
"<html><font color='red'>%s</font></html>",
|
||||
MEDIA_PLAYER_ERROR_STRING));
|
||||
timer.stop();
|
||||
}
|
||||
};
|
||||
stateChangeListener = new Bus.STATE_CHANGED() {
|
||||
@Override
|
||||
public void stateChanged(GstObject go, State oldState, State currentState, State pendingState) {
|
||||
if (State.PLAYING.equals(currentState)) {
|
||||
playButton.setText("||");
|
||||
} else {
|
||||
playButton.setText("►");
|
||||
}
|
||||
}
|
||||
};
|
||||
endOfStreamListener = new Bus.EOS() {
|
||||
@Override
|
||||
public void endOfStream(GstObject go) {
|
||||
gstPlayBin.seek(ClockTime.ZERO);
|
||||
progressSlider.setValue(0);
|
||||
/**
|
||||
* Keep the video from automatically playing
|
||||
*/
|
||||
Gst.getExecutor().submit(() -> gstPlayBin.pause());
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private void initGst() throws GstException, UnsatisfiedLinkError {
|
||||
logger.log(Level.INFO, "Attempting initializing of gstreamer for video/audio viewing"); //NON-NLS
|
||||
Gst.init();
|
||||
gstPlayBin = new PlayBin("VideoPlayer");
|
||||
}
|
||||
|
||||
/**
|
||||
@ -241,8 +265,6 @@ public class MediaPlayerPanel extends JPanel implements MediaFileViewer.MediaVie
|
||||
@NbBundle.Messages({"GstVideoPanel.noOpenCase.errMsg=No open case available."})
|
||||
void loadFile(final AbstractFile file) {
|
||||
//Ensure everything is back in the initial state
|
||||
reset();
|
||||
|
||||
infoLabel.setText("");
|
||||
if (file.isDirNameFlagSet(TskData.TSK_FS_NAME_FLAG_ENUM.UNALLOC)) {
|
||||
infoLabel.setText(NbBundle.getMessage(this.getClass(), "GstVideoPanel.setupVideo.infoLabel.text"));
|
||||
@ -252,7 +274,7 @@ public class MediaPlayerPanel extends JPanel implements MediaFileViewer.MediaVie
|
||||
try {
|
||||
//Pushing off initialization to the background
|
||||
extractMediaWorker = new ExtractMedia(file, VideoUtils.getVideoFileInTempDir(file));
|
||||
extractMediaWorker.execute();
|
||||
extractMediaWorker.execute();
|
||||
} catch (NoCurrentCaseException ex) {
|
||||
logger.log(Level.SEVERE, "Exception while getting open case.", ex); //NON-NLS
|
||||
infoLabel.setText(String.format("<html><font color='red'>%s</font></html>", Bundle.GstVideoPanel_noOpenCase_errMsg()));
|
||||
@ -261,14 +283,14 @@ public class MediaPlayerPanel extends JPanel implements MediaFileViewer.MediaVie
|
||||
}
|
||||
|
||||
/**
|
||||
* Assume no support on a fresh reset until we begin loading the file
|
||||
* for play.
|
||||
* Assume no support on a fresh reset until we begin loading the file for
|
||||
* play.
|
||||
*/
|
||||
@NbBundle.Messages({
|
||||
"MediaPlayerPanel.noSupport=File not supported."
|
||||
})
|
||||
void resetComponents() {
|
||||
progressLabel.setText(String.format("%s/%s", Bundle.MediaPlayerPanel_unknownTime(),
|
||||
progressLabel.setText(String.format("%s/%s", Bundle.MediaPlayerPanel_unknownTime(),
|
||||
Bundle.MediaPlayerPanel_unknownTime()));
|
||||
infoLabel.setText(Bundle.MediaPlayerPanel_noSupport());
|
||||
progressSlider.setValue(0);
|
||||
@ -278,39 +300,24 @@ public class MediaPlayerPanel extends JPanel implements MediaFileViewer.MediaVie
|
||||
* Return this panel to its initial state.
|
||||
*/
|
||||
void reset() {
|
||||
timer.stop();
|
||||
if(livePlayBin && !hasError) {
|
||||
gstPlayBin.stop();
|
||||
}
|
||||
|
||||
hasError = false;
|
||||
livePlayBin = false;
|
||||
gstPlayBin.dispose();
|
||||
|
||||
if (fxAppSink != null) {
|
||||
fxAppSink.clear();
|
||||
}
|
||||
|
||||
videoPanel.removeAll();
|
||||
|
||||
if (extractMediaWorker != null) {
|
||||
extractMediaWorker.cancel(true);
|
||||
}
|
||||
|
||||
timer.stop();
|
||||
if (gstPlayBin != null) {
|
||||
gstPlayBin.stop();
|
||||
gstPlayBin.getBus().disconnect(endOfStreamListener);
|
||||
gstPlayBin.getBus().disconnect(endOfStreamListener);
|
||||
gstPlayBin.getBus().disconnect(endOfStreamListener);
|
||||
gstPlayBin.dispose();
|
||||
fxAppSink.clear();
|
||||
gstPlayBin = null;
|
||||
}
|
||||
videoPanel.removeAll();
|
||||
resetComponents();
|
||||
enableComponents(false);
|
||||
}
|
||||
|
||||
/**
|
||||
* If the node has been reset but messages from the previous PlayBin are
|
||||
* still firing, ignore them.
|
||||
*/
|
||||
synchronized void setLabelText(String msg) {
|
||||
if (livePlayBin) {
|
||||
infoLabel.setText(msg);
|
||||
}
|
||||
}
|
||||
|
||||
private void enableComponents(boolean isEnabled) {
|
||||
playButton.setEnabled(isEnabled);
|
||||
progressSlider.setEnabled(isEnabled);
|
||||
@ -442,31 +449,37 @@ public class MediaPlayerPanel extends JPanel implements MediaFileViewer.MediaVie
|
||||
protected void done() {
|
||||
try {
|
||||
super.get();
|
||||
|
||||
//Video is ready for playback. Clean up previous components and create new ones
|
||||
|
||||
if(this.isCancelled()) {
|
||||
return;
|
||||
}
|
||||
//Video is ready for playback. Create new components
|
||||
gstPlayBin = new PlayBin("VideoPlayer", tempFile.toURI());
|
||||
//Create a custom AppSink that hooks into JavaFx panels for video display
|
||||
fxPanel = new JFXPanel();
|
||||
//Configure event handling
|
||||
Bus playBinBus = gstPlayBin.getBus();
|
||||
playBinBus.connect(endOfStreamListener);
|
||||
playBinBus.connect(stateChangeListener);
|
||||
playBinBus.connect(errorListener);
|
||||
|
||||
if(this.isCancelled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
JFXPanel fxPanel = new JFXPanel();
|
||||
videoPanel.removeAll();
|
||||
videoPanel.setLayout(new BoxLayout(videoPanel, BoxLayout.Y_AXIS));
|
||||
videoPanel.add(fxPanel);
|
||||
fxAppSink = new JavaFxAppSink("JavaFxAppSink", fxPanel);
|
||||
gstPlayBin.setVideoSink(fxAppSink);
|
||||
|
||||
videoPanel.setLayout(new BoxLayout(videoPanel, BoxLayout.Y_AXIS));
|
||||
videoPanel.add(fxPanel);//add jfx ui to JPanel
|
||||
if(this.isCancelled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
//Configure event handling
|
||||
attachEOSListener(gstPlayBin); //Handle end of video events
|
||||
attachStateListener(gstPlayBin); //Handle syncing play/pause button to the stream state
|
||||
attachErrorListener(gstPlayBin); //Handle errors gracefully when they are encountered
|
||||
|
||||
//Customize components
|
||||
gstPlayBin.setVolumePercent(audioSlider.getValue() * 2);
|
||||
|
||||
/**
|
||||
* Prepare the PlayBin for playback.
|
||||
*/
|
||||
gstPlayBin.ready();
|
||||
livePlayBin = true;
|
||||
//Customize components
|
||||
gstPlayBin.setVolume((audioSlider.getValue() * 2.0) / 100.0);
|
||||
gstPlayBin.pause();
|
||||
|
||||
timer.start();
|
||||
enableComponents(true);
|
||||
} catch (CancellationException ex) {
|
||||
logger.log(Level.INFO, "Media buffering was canceled."); //NON-NLS
|
||||
@ -476,69 +489,6 @@ public class MediaPlayerPanel extends JPanel implements MediaFileViewer.MediaVie
|
||||
logger.log(Level.SEVERE, "Fatal error during media buffering.", ex); //NON-NLS
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Listens for the end of stream event, in which case we conveniently
|
||||
* reset the video for the user.
|
||||
*/
|
||||
private void attachEOSListener(PlayBin gstPlayBin) {
|
||||
gstPlayBin.getBus().connect(new Bus.EOS() {
|
||||
@Override
|
||||
public void endOfStream(GstObject go) {
|
||||
gstPlayBin.seek(ClockTime.ZERO);
|
||||
progressSlider.setValue(0);
|
||||
/**
|
||||
* Keep the video from automatically playing
|
||||
*/
|
||||
Gst.getExecutorService().submit(() -> gstPlayBin.pause());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Listen for state changes and update the play/pause button
|
||||
* accordingly. In addition, handle the state transition from
|
||||
* READY -> PAUSED.
|
||||
*/
|
||||
private void attachStateListener(PlayBin gstPlayBin) {
|
||||
gstPlayBin.getBus().connect(new Bus.STATE_CHANGED() {
|
||||
@Override
|
||||
public void stateChanged(GstObject go, State oldState, State currentState, State pendingState) {
|
||||
/**
|
||||
* If we are ready, it is safe to transition to the pause state
|
||||
* to initiate data-flow for pre-roll frame and duration
|
||||
* information.
|
||||
*/
|
||||
if (State.READY.equals(currentState)) {
|
||||
Gst.getExecutorService().submit(() -> gstPlayBin.pause());
|
||||
timer.start();
|
||||
}
|
||||
|
||||
if (State.PLAYING.equals(currentState)) {
|
||||
playButton.setText("||");
|
||||
} else {
|
||||
playButton.setText("►");
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* On error messages disable the UI and show the user an error was
|
||||
* encountered.
|
||||
*/
|
||||
private void attachErrorListener(PlayBin gstPlayBin) {
|
||||
gstPlayBin.getBus().connect(new Bus.ERROR() {
|
||||
@Override
|
||||
public void errorMessage(GstObject go, int i, String string) {
|
||||
enableComponents(false);
|
||||
setLabelText(String.format("<html><font color='red'>%s</font></html>",
|
||||
MEDIA_PLAYER_ERROR_STRING));
|
||||
timer.stop();
|
||||
hasError = true;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -549,21 +499,19 @@ public class MediaPlayerPanel extends JPanel implements MediaFileViewer.MediaVie
|
||||
@Override
|
||||
public void actionPerformed(ActionEvent e) {
|
||||
if (!progressSlider.getValueIsAdjusting()) {
|
||||
if(livePlayBin) {
|
||||
long position = gstPlayBin.queryPosition(TimeUnit.NANOSECONDS);
|
||||
long duration = gstPlayBin.queryDuration(TimeUnit.NANOSECONDS);
|
||||
/**
|
||||
* Duration may not be known until there is video data in the
|
||||
* pipeline. We start this updater when data-flow has just been
|
||||
* initiated so buffering may still be in progress.
|
||||
*/
|
||||
if (duration != -1) {
|
||||
double relativePosition = (double) position / duration;
|
||||
progressSlider.setValue((int) (relativePosition * PROGRESS_SLIDER_SIZE));
|
||||
}
|
||||
|
||||
updateTimeLabel(position, duration);
|
||||
long position = gstPlayBin.queryPosition(TimeUnit.NANOSECONDS);
|
||||
long duration = gstPlayBin.queryDuration(TimeUnit.NANOSECONDS);
|
||||
/**
|
||||
* Duration may not be known until there is video data in the
|
||||
* pipeline. We start this updater when data-flow has just been
|
||||
* initiated so buffering may still be in progress.
|
||||
*/
|
||||
if (duration != -1) {
|
||||
double relativePosition = (double) position / duration;
|
||||
progressSlider.setValue((int) (relativePosition * PROGRESS_SLIDER_SIZE));
|
||||
}
|
||||
|
||||
updateTimeLabel(position, duration);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -681,7 +629,7 @@ public class MediaPlayerPanel extends JPanel implements MediaFileViewer.MediaVie
|
||||
}// </editor-fold>//GEN-END:initComponents
|
||||
|
||||
private void playButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_playButtonActionPerformed
|
||||
if(gstPlayBin.isPlaying()) {
|
||||
if (gstPlayBin.isPlaying()) {
|
||||
gstPlayBin.pause();
|
||||
} else {
|
||||
gstPlayBin.play();
|
||||
|
@ -58,7 +58,7 @@ public class HealthMonitorDashboard {
|
||||
|
||||
private final static Logger logger = Logger.getLogger(HealthMonitorDashboard.class.getName());
|
||||
|
||||
private final static String ADMIN_ACCESS_FILE_NAME = "_aiaa"; // NON-NLS
|
||||
private final static String ADMIN_ACCESS_FILE_NAME = "admin"; // NON-NLS
|
||||
private final static String ADMIN_ACCESS_FILE_PATH = Paths.get(PlatformUtil.getUserConfigDirectory(), ADMIN_ACCESS_FILE_NAME).toString();
|
||||
|
||||
Map<String, List<HealthMonitor.DatabaseTimingResult>> timingData;
|
||||
|
@ -52,6 +52,7 @@ final class EncryptionDetectionDataSourceIngestModule implements DataSourceInges
|
||||
private Blackboard blackboard;
|
||||
private double calculatedEntropy;
|
||||
private final double minimumEntropy;
|
||||
private IngestJobContext context;
|
||||
|
||||
/**
|
||||
* Create an EncryptionDetectionDataSourceIngestModule object that will
|
||||
@ -67,6 +68,7 @@ final class EncryptionDetectionDataSourceIngestModule implements DataSourceInges
|
||||
public void startUp(IngestJobContext context) throws IngestModule.IngestModuleException {
|
||||
validateSettings();
|
||||
blackboard = Case.getCurrentCase().getServices().getBlackboard();
|
||||
this.context = context;
|
||||
}
|
||||
|
||||
@Messages({
|
||||
@ -77,8 +79,6 @@ final class EncryptionDetectionDataSourceIngestModule implements DataSourceInges
|
||||
@Override
|
||||
public ProcessResult process(Content dataSource, DataSourceIngestModuleProgress progressBar) {
|
||||
|
||||
|
||||
|
||||
try {
|
||||
if (dataSource instanceof Image) {
|
||||
|
||||
@ -92,10 +92,23 @@ final class EncryptionDetectionDataSourceIngestModule implements DataSourceInges
|
||||
int numVolSystemsChecked = 0;
|
||||
progressBar.progress(Bundle.EncryptionDetectionDataSourceIngestModule_processing_message(), 0);
|
||||
for (VolumeSystem volumeSystem : volumeSystems) {
|
||||
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
return ProcessResult.OK;
|
||||
}
|
||||
|
||||
for (Volume volume : volumeSystem.getVolumes()) {
|
||||
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
return ProcessResult.OK;
|
||||
}
|
||||
if (BitlockerDetection.isBitlockerVolume(volume)) {
|
||||
return flagVolume(volume, BlackboardArtifact.ARTIFACT_TYPE.TSK_ENCRYPTION_DETECTED, Bundle.EncryptionDetectionDataSourceIngestModule_artifactComment_bitlocker());
|
||||
}
|
||||
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
return ProcessResult.OK;
|
||||
}
|
||||
if (isVolumeEncrypted(volume)) {
|
||||
return flagVolume(volume, BlackboardArtifact.ARTIFACT_TYPE.TSK_ENCRYPTION_SUSPECTED, String.format(Bundle.EncryptionDetectionDataSourceIngestModule_artifactComment_suspected(), calculatedEntropy));
|
||||
}
|
||||
@ -139,6 +152,11 @@ final class EncryptionDetectionDataSourceIngestModule implements DataSourceInges
|
||||
* there was a problem.
|
||||
*/
|
||||
private IngestModule.ProcessResult flagVolume(Volume volume, BlackboardArtifact.ARTIFACT_TYPE artifactType, String comment) {
|
||||
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
return ProcessResult.OK;
|
||||
}
|
||||
|
||||
try {
|
||||
BlackboardArtifact artifact = volume.newArtifact(artifactType);
|
||||
artifact.addAttribute(new BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_COMMENT, EncryptionDetectionModuleFactory.getModuleName(), comment));
|
||||
@ -198,7 +216,7 @@ final class EncryptionDetectionDataSourceIngestModule implements DataSourceInges
|
||||
* http://www.forensicswiki.org/wiki/TrueCrypt#Detection
|
||||
*/
|
||||
if (volume.getFileSystems().isEmpty()) {
|
||||
calculatedEntropy = EncryptionDetectionTools.calculateEntropy(volume);
|
||||
calculatedEntropy = EncryptionDetectionTools.calculateEntropy(volume, context);
|
||||
if (calculatedEntropy >= minimumEntropy) {
|
||||
return true;
|
||||
}
|
||||
|
@ -82,6 +82,7 @@ final class EncryptionDetectionFileIngestModule extends FileIngestModuleAdapter
|
||||
private final Logger logger = services.getLogger(EncryptionDetectionModuleFactory.getModuleName());
|
||||
private FileTypeDetector fileTypeDetector;
|
||||
private Blackboard blackboard;
|
||||
private IngestJobContext context;
|
||||
private double calculatedEntropy;
|
||||
|
||||
private final double minimumEntropy;
|
||||
@ -107,6 +108,7 @@ final class EncryptionDetectionFileIngestModule extends FileIngestModuleAdapter
|
||||
public void startUp(IngestJobContext context) throws IngestModule.IngestModuleException {
|
||||
try {
|
||||
validateSettings();
|
||||
this.context = context;
|
||||
blackboard = Case.getCurrentCaseThrows().getServices().getBlackboard();
|
||||
fileTypeDetector = new FileTypeDetector();
|
||||
} catch (FileTypeDetector.FileTypeDetectorInitException ex) {
|
||||
@ -194,6 +196,10 @@ final class EncryptionDetectionFileIngestModule extends FileIngestModuleAdapter
|
||||
*/
|
||||
private IngestModule.ProcessResult flagFile(AbstractFile file, BlackboardArtifact.ARTIFACT_TYPE artifactType, String comment) {
|
||||
try {
|
||||
if (context.fileIngestIsCancelled()) {
|
||||
return IngestModule.ProcessResult.OK;
|
||||
}
|
||||
|
||||
BlackboardArtifact artifact = file.newArtifact(artifactType);
|
||||
artifact.addAttribute(new BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_COMMENT,
|
||||
EncryptionDetectionModuleFactory.getModuleName(), comment));
|
||||
@ -397,7 +403,7 @@ final class EncryptionDetectionFileIngestModule extends FileIngestModuleAdapter
|
||||
/*
|
||||
* Qualify the entropy.
|
||||
*/
|
||||
calculatedEntropy = EncryptionDetectionTools.calculateEntropy(file);
|
||||
calculatedEntropy = EncryptionDetectionTools.calculateEntropy(file, context);
|
||||
if (calculatedEntropy >= minimumEntropy) {
|
||||
possiblyEncrypted = true;
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ import java.io.BufferedInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import org.openide.util.NbBundle;
|
||||
import org.sleuthkit.autopsy.ingest.IngestJobContext;
|
||||
import org.sleuthkit.autopsy.ingest.IngestModule;
|
||||
import org.sleuthkit.datamodel.ReadContentInputStream;
|
||||
import org.sleuthkit.datamodel.Content;
|
||||
@ -69,6 +70,7 @@ final class EncryptionDetectionTools {
|
||||
* content as possibly encrypted.
|
||||
*
|
||||
* @param content The content to be calculated against.
|
||||
* @param context The ingest job context for cancellation checks
|
||||
*
|
||||
* @return The entropy of the content.
|
||||
*
|
||||
@ -77,7 +79,7 @@ final class EncryptionDetectionTools {
|
||||
* @throws IOException If there is a failure closing or
|
||||
* reading from the InputStream.
|
||||
*/
|
||||
static double calculateEntropy(Content content) throws ReadContentInputStream.ReadContentInputStreamException, IOException {
|
||||
static double calculateEntropy(Content content, IngestJobContext context) throws ReadContentInputStream.ReadContentInputStreamException, IOException {
|
||||
/*
|
||||
* Logic in this method is based on
|
||||
* https://github.com/willjasen/entropy/blob/master/entropy.java
|
||||
@ -95,8 +97,17 @@ final class EncryptionDetectionTools {
|
||||
*/
|
||||
int[] byteOccurences = new int[BYTE_OCCURENCES_BUFFER_SIZE];
|
||||
int readByte;
|
||||
long bytesRead = 0;
|
||||
while ((readByte = bin.read()) != -1) {
|
||||
byteOccurences[readByte]++;
|
||||
|
||||
// Do a cancellation check every 10,000 bytes
|
||||
bytesRead++;
|
||||
if (bytesRead % 10000 == 0) {
|
||||
if (context.dataSourceIngestIsCancelled() || context.fileIngestIsCancelled()) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -72,6 +72,7 @@ import org.sleuthkit.autopsy.timeline.zooming.ZoomParams;
|
||||
import org.sleuthkit.datamodel.AbstractFile;
|
||||
import org.sleuthkit.datamodel.BlackboardArtifact;
|
||||
import org.sleuthkit.datamodel.BlackboardArtifactTag;
|
||||
import org.sleuthkit.datamodel.Content;
|
||||
import org.sleuthkit.datamodel.ContentTag;
|
||||
import org.sleuthkit.datamodel.SleuthkitCase;
|
||||
import org.sleuthkit.datamodel.Tag;
|
||||
@ -711,10 +712,10 @@ public class EventsRepository {
|
||||
// if the time is legitimate ( greater than zero ) insert it into the db
|
||||
if (eventDescription != null && eventDescription.getTime() > 0) {
|
||||
long objectID = bbart.getObjectID();
|
||||
AbstractFile f = skCase.getAbstractFileById(objectID);
|
||||
long datasourceID = f.getDataSource().getId();
|
||||
Content content = skCase.getContentById(objectID);
|
||||
long datasourceID = content.getDataSource().getId();
|
||||
long artifactID = bbart.getArtifactID();
|
||||
Set<String> hashSets = f.getHashSetNames();
|
||||
Set<String> hashSets = content.getHashSetNames();
|
||||
List<BlackboardArtifactTag> tags = tagsManager.getBlackboardArtifactTagsByArtifact(bbart);
|
||||
String fullDescription = eventDescription.getFullDescription();
|
||||
String medDescription = eventDescription.getMedDescription();
|
||||
|
@ -13,7 +13,7 @@
|
||||
<dependency org="com.apple" name="AppleJavaExtensions" rev="1.4"/>
|
||||
|
||||
<!-- for viewers -->
|
||||
<dependency conf="autopsy_core->*" org="org.freedesktop.gstreamer" name="gst1-java-core" rev="0.9.3"/>
|
||||
<dependency conf="autopsy_core->*" org="org.freedesktop.gstreamer" name="gst1-java-core" rev="1.0.0"/>
|
||||
<dependency conf="autopsy_core->*" org="net.java.dev.jna" name="jna" rev="3.4.0"/>
|
||||
<dependency conf="autopsy_core->*" org="net.java.dev.jna" name="platform" rev="3.4.0"/>
|
||||
|
||||
|
@ -23,7 +23,7 @@ file.reference.controlsfx-8.40.11.jar=release/modules/ext/controlsfx-8.40.11.jar
|
||||
file.reference.dom4j-1.6.1.jar=release/modules/ext/dom4j-1.6.1.jar
|
||||
file.reference.geronimo-jms_1.1_spec-1.0.jar=release/modules/ext/geronimo-jms_1.1_spec-1.0.jar
|
||||
file.reference.gson-2.8.1.jar=release/modules/ext/gson-2.8.1.jar
|
||||
file.reference.gst1-java-core-0.9.3.jar=release/modules/ext/gst1-java-core-0.9.3.jar
|
||||
file.reference.gst1-java-core-1.0.0.jar=release\\modules\\ext\\gst1-java-core-1.0.0.jar
|
||||
file.reference.jna-3.4.0.jar=release/modules/ext/jna-3.4.0.jar
|
||||
file.reference.guava-19.0.jar=release/modules/ext/guava-19.0.jar
|
||||
file.reference.imageio-bmp-3.2.jar=release/modules/ext/imageio-bmp-3.2.jar
|
||||
|
@ -589,23 +589,16 @@
|
||||
<package>org.dom4j.xpath</package>
|
||||
<package>org.dom4j.xpp</package>
|
||||
<package>org.freedesktop.gstreamer</package>
|
||||
<package>org.freedesktop.gstreamer.controller</package>
|
||||
<package>org.freedesktop.gstreamer.device</package>
|
||||
<package>org.freedesktop.gstreamer.elements</package>
|
||||
<package>org.freedesktop.gstreamer.elements.good</package>
|
||||
<package>org.freedesktop.gstreamer.event</package>
|
||||
<package>org.freedesktop.gstreamer.example</package>
|
||||
<package>org.freedesktop.gstreamer.glib</package>
|
||||
<package>org.freedesktop.gstreamer.interfaces</package>
|
||||
<package>org.freedesktop.gstreamer.io</package>
|
||||
<package>org.freedesktop.gstreamer.lowlevel</package>
|
||||
<package>org.freedesktop.gstreamer.lowlevel.annotations</package>
|
||||
<package>org.freedesktop.gstreamer.media</package>
|
||||
<package>org.freedesktop.gstreamer.media.event</package>
|
||||
<package>org.freedesktop.gstreamer.message</package>
|
||||
<package>org.freedesktop.gstreamer.query</package>
|
||||
<package>org.freedesktop.gstreamer.swing</package>
|
||||
<package>org.freedesktop.gstreamer.swt</package>
|
||||
<package>org.freedesktop.gstreamer.swt.overlay</package>
|
||||
<package>org.freedesktop.gstreamer.webrtc</package>
|
||||
<package>org.hyperic.jni</package>
|
||||
<package>org.hyperic.sigar</package>
|
||||
<package>org.hyperic.sigar.cmd</package>
|
||||
@ -707,6 +700,10 @@
|
||||
<runtime-relative-path>ext/logkit-1.0.1.jar</runtime-relative-path>
|
||||
<binary-origin>release/modules/ext/logkit-1.0.1.jar</binary-origin>
|
||||
</class-path-extension>
|
||||
<class-path-extension>
|
||||
<runtime-relative-path>ext/gst1-java-core-1.0.0.jar</runtime-relative-path>
|
||||
<binary-origin>release\modules\ext\gst1-java-core-1.0.0.jar</binary-origin>
|
||||
</class-path-extension>
|
||||
<class-path-extension>
|
||||
<runtime-relative-path>ext/imageio-jpeg-3.2.jar</runtime-relative-path>
|
||||
<binary-origin>release/modules/ext/imageio-jpeg-3.2.jar</binary-origin>
|
||||
@ -967,10 +964,6 @@
|
||||
<runtime-relative-path>ext/common-lang-3.2.jar</runtime-relative-path>
|
||||
<binary-origin>release/modules/ext/common-lang-3.2.jar</binary-origin>
|
||||
</class-path-extension>
|
||||
<class-path-extension>
|
||||
<runtime-relative-path>ext/gst1-java-core-0.9.3.jar</runtime-relative-path>
|
||||
<binary-origin>release/modules/ext/gst1-java-core-0.9.3.jar</binary-origin>
|
||||
</class-path-extension>
|
||||
<class-path-extension>
|
||||
<runtime-relative-path>ext/dd-plist-1.20.jar</runtime-relative-path>
|
||||
<binary-origin>release/modules/ext/dd-plist-1.20.jar</binary-origin>
|
||||
|
@ -51,7 +51,7 @@ import org.sleuthkit.autopsy.experimental.autoingest.AutoIngestNodeRefreshEvents
|
||||
@SuppressWarnings("PMD.SingularField") // UI widgets cause lots of false positives
|
||||
final class AutoIngestDashboard extends JPanel implements Observer {
|
||||
|
||||
private final static String ADMIN_ACCESS_FILE_NAME = "_aiaa"; // NON-NLS
|
||||
private final static String ADMIN_ACCESS_FILE_NAME = "admin"; // NON-NLS
|
||||
private final static String ADMIN_ACCESS_FILE_PATH = Paths.get(PlatformUtil.getUserConfigDirectory(), ADMIN_ACCESS_FILE_NAME).toString();
|
||||
private final static String AID_REFRESH_THREAD_NAME = "AID-refresh-jobs-%d";
|
||||
private final static int AID_REFRESH_INTERVAL_SECS = 30;
|
||||
|
@ -77,7 +77,7 @@ public final class AutoIngestDashboardTopComponent extends TopComponent {
|
||||
AutoIngestDashboard dashboard = AutoIngestDashboard.createDashboard();
|
||||
tc.add(dashboard);
|
||||
dashboard.setSize(dashboard.getPreferredSize());
|
||||
//if the user has administrator access enabled open the Node Status top component as well
|
||||
//if the user has administrator access enabled open the Node Status and cases top components as well
|
||||
if (AutoIngestDashboard.isAdminAutoIngestDashboard()) {
|
||||
EventQueue.invokeLater(() -> {
|
||||
AinStatusDashboardTopComponent.openTopComponent(dashboard.getMonitor());
|
||||
|
@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Autopsy Forensic Browser
|
||||
*
|
||||
* Copyright 2019-2019 Basis Technology Corp.
|
||||
* Contact: carrier <at> sleuthkit <dot> org
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.sleuthkit.autopsy.experimental.autoingest;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.logging.Level;
|
||||
import org.sleuthkit.autopsy.coordinationservice.CoordinationService;
|
||||
import org.sleuthkit.autopsy.coordinationservice.CoordinationService.CoordinationServiceException;
|
||||
import org.sleuthkit.autopsy.coreutils.Logger;
|
||||
|
||||
/**
|
||||
* Collects the auto ingest job node data stored in the manifest file
|
||||
* coordination service nodes.
|
||||
*/
|
||||
final class AutoIngestJobNodeDataCollector {
|
||||
|
||||
private static final Logger logger = Logger.getLogger(AutoIngestJobNodeDataCollector.class.getName());
|
||||
|
||||
static List<AutoIngestJobNodeData> getNodeData() throws CoordinationServiceException, InterruptedException {
|
||||
final CoordinationService coordinationService = CoordinationService.getInstance();
|
||||
final List<String> nodePaths = coordinationService.getNodeList(CoordinationService.CategoryNode.MANIFESTS);
|
||||
final List<AutoIngestJobNodeData> nodeDataList = new ArrayList<>();
|
||||
for (String nodePath : nodePaths) {
|
||||
try {
|
||||
final byte[] nodeBytes = coordinationService.getNodeData(CoordinationService.CategoryNode.MANIFESTS, nodePath);
|
||||
AutoIngestJobNodeData nodeData = new AutoIngestJobNodeData(nodeBytes);
|
||||
nodeDataList.add(nodeData);
|
||||
} catch (AutoIngestJobNodeData.InvalidDataException ex) {
|
||||
logger.log(Level.WARNING, String.format("Error reading node data from manifest file coordination service node %s", nodePath), ex); // NON-NLS
|
||||
}
|
||||
}
|
||||
return nodeDataList;
|
||||
}
|
||||
|
||||
/**
|
||||
* Prevents instantiation of this utility class.
|
||||
*/
|
||||
private AutoIngestJobNodeDataCollector() {
|
||||
}
|
||||
|
||||
}
|
@ -62,6 +62,7 @@ import org.sleuthkit.autopsy.casemodule.CaseActionException;
|
||||
import org.sleuthkit.autopsy.casemodule.CaseDetails;
|
||||
import org.sleuthkit.autopsy.casemodule.CaseMetadata;
|
||||
import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData;
|
||||
import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData.CaseNodeDataException;
|
||||
import org.sleuthkit.autopsy.coordinationservice.CoordinationService;
|
||||
import org.sleuthkit.autopsy.coordinationservice.CoordinationService.CoordinationServiceException;
|
||||
import org.sleuthkit.autopsy.coordinationservice.CoordinationService.Lock;
|
||||
@ -126,7 +127,6 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
private static final int NUM_INPUT_SCAN_SCHEDULING_THREADS = 1;
|
||||
private static final String INPUT_SCAN_SCHEDULER_THREAD_NAME = "AIM-input-scan-scheduler-%d";
|
||||
private static final String INPUT_SCAN_THREAD_NAME = "AIM-input-scan-%d";
|
||||
private static final int INPUT_SCAN_LOCKING_TIMEOUT_MINS = 5;
|
||||
private static final String AUTO_INGEST_THREAD_NAME = "AIM-job-processing-%d";
|
||||
private static final String LOCAL_HOST_NAME = NetworkUtils.getLocalHostName();
|
||||
private static final String EVENT_CHANNEL_NAME = "Auto-Ingest-Manager-Events";
|
||||
@ -174,6 +174,12 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
|
||||
private volatile AutoIngestNodeStateEvent lastPublishedStateEvent;
|
||||
|
||||
/**
|
||||
* Gets the name of the file in a case directory that is used to record the
|
||||
* manifest file paths for the auto ingest jobs for the case.
|
||||
*
|
||||
* @return The file name.
|
||||
*/
|
||||
static String getCaseManifestsListFileName() {
|
||||
return CASE_MANIFESTS_LIST_FILE_NAME;
|
||||
}
|
||||
@ -1012,26 +1018,24 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the error flag for case node data given a case directory path.
|
||||
* Sets the error flag in the case node data stored in a case directory
|
||||
* coordination service node.
|
||||
*
|
||||
* @param caseDirectoryPath The case directory path.
|
||||
*
|
||||
* @throws CoordinationServiceException If there was an error getting the
|
||||
* node data from the cooordination
|
||||
* service.
|
||||
* @throws IOException If the node data was missing or
|
||||
* there was an error interpreting it.
|
||||
* @throws InterruptedException If the thread running the input
|
||||
* directory scan task is interrupted
|
||||
* while blocked, i.e., if auto ingest
|
||||
* is shutting down.
|
||||
* @throws InterruptedException If the thread running the input directory
|
||||
* scan task is interrupted while blocked,
|
||||
* i.e., if auto ingest is shutting down.
|
||||
*/
|
||||
private void setCaseNodeDataErrorsOccurred(Path caseDirectoryPath) throws IOException, CoordinationServiceException, InterruptedException {
|
||||
byte[] rawData = coordinationService.getNodeData(CoordinationService.CategoryNode.CASES, caseDirectoryPath.toString());
|
||||
CaseNodeData caseNodeData = new CaseNodeData(rawData);
|
||||
caseNodeData.setErrorsOccurred(true);
|
||||
rawData = caseNodeData.toArray();
|
||||
coordinationService.setNodeData(CoordinationService.CategoryNode.CASES, caseDirectoryPath.toString(), rawData);
|
||||
private void setErrorsOccurredFlagForCase(Path caseDirectoryPath) throws InterruptedException {
|
||||
try {
|
||||
CaseNodeData caseNodeData = CaseNodeData.readCaseNodeData(caseDirectoryPath.toString());
|
||||
caseNodeData.setErrorsOccurred(true);
|
||||
CaseNodeData.writeCaseNodeData(caseNodeData);
|
||||
} catch (CaseNodeDataException ex) {
|
||||
sysLogger.log(Level.WARNING, String.format("Error attempting to set error flag in case node data for %s", caseDirectoryPath), ex);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1160,8 +1164,8 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
* @param filePath The path of the file.
|
||||
* @param attrs The file system attributes of the file.
|
||||
*
|
||||
* @return TERMINATE if auto ingest is shutting down, CONTINUE if it has
|
||||
* not.
|
||||
* @return TERMINATE if auto ingest is shutting down, CONTINUE
|
||||
* otherwise.
|
||||
*/
|
||||
@Override
|
||||
public FileVisitResult visitFile(Path filePath, BasicFileAttributes attrs) {
|
||||
@ -1171,9 +1175,8 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
|
||||
try {
|
||||
/*
|
||||
* Determine whether or not the file is an auto ingest job
|
||||
* manifest file. If it is, then parse it. Otherwise, move on to
|
||||
* the next file in the directory.
|
||||
* Determine whether or not the file is a manifest file. If it
|
||||
* is, then parse it.
|
||||
*/
|
||||
Manifest manifest = null;
|
||||
for (ManifestFileParser parser : Lookup.getDefault().lookupAll(ManifestFileParser.class)) {
|
||||
@ -1197,138 +1200,118 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
if (manifest == null) {
|
||||
return CONTINUE;
|
||||
}
|
||||
|
||||
/*
|
||||
* If a manifest file has been found, get a manifest file lock,
|
||||
* analyze the job state, and put a job into the appropriate job
|
||||
* list. There is a short wait here in case the input directory
|
||||
* scanner file visitor of another auto ingest node (AIN) has
|
||||
* the lock. If the lock ultmiately can't be obtained, the wait
|
||||
* was not long enough, or another auto ingest node (AIN) is
|
||||
* holding the lock because it is executing the job, or a case
|
||||
* deletion task has aquired the lock. In all of these cases the
|
||||
* manifest can be skipped for this scan.
|
||||
* If a manifest file has been found, get the corresponding auto
|
||||
* ingest job state from the manifest file coordination service
|
||||
* node and put the job in the appropriate jobs list.
|
||||
*
|
||||
* There can be a race condition between queuing jobs and case
|
||||
* deletion. However, in practice eliminating the race condition
|
||||
* by acquiring a manifest file coordination service lock when
|
||||
* analyzing job state here appears to have a significant
|
||||
* performance cost for both input directory scanning and
|
||||
* dequeuing jobs. Therefore, job state must be checked again
|
||||
* during job dequeuing, while actually holding the lock, before
|
||||
* executing the job.
|
||||
*/
|
||||
try (Lock manifestLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.MANIFESTS, manifest.getFilePath().toString(), INPUT_SCAN_LOCKING_TIMEOUT_MINS, TimeUnit.MINUTES)) {
|
||||
if (null != manifestLock) {
|
||||
|
||||
/*
|
||||
* Now that the lock has been acquired, make sure the
|
||||
* manifest is still here. This is a way to resolve the
|
||||
* race condition between this task and case deletion
|
||||
* tasks without resorting to a protocol using locking
|
||||
* of the input directory.
|
||||
*/
|
||||
if (!filePath.toFile().exists()) {
|
||||
return CONTINUE;
|
||||
}
|
||||
|
||||
byte[] rawData = coordinationService.getNodeData(CoordinationService.CategoryNode.MANIFESTS, manifest.getFilePath().toString());
|
||||
if (null != rawData && rawData.length > 0) {
|
||||
AutoIngestJobNodeData nodeData = new AutoIngestJobNodeData(rawData);
|
||||
AutoIngestJob.ProcessingStatus processingStatus = nodeData.getProcessingStatus();
|
||||
switch (processingStatus) {
|
||||
case PENDING:
|
||||
addPendingJob(manifest, nodeData);
|
||||
break;
|
||||
case PROCESSING:
|
||||
/*
|
||||
* If an exclusive manifest file lock was
|
||||
* obtained for an auto ingest job in the
|
||||
* processing state, the auto ingest node
|
||||
* (AIN) executing the job crashed and the
|
||||
* lock was released when the coordination
|
||||
* service detected that the AIN was no
|
||||
* longer alive.
|
||||
*/
|
||||
doCrashRecovery(manifest, nodeData);
|
||||
break;
|
||||
case COMPLETED:
|
||||
addCompletedJob(manifest, nodeData);
|
||||
break;
|
||||
case DELETED:
|
||||
/*
|
||||
* Ignore jobs marked as deleted. Note that
|
||||
* this state is no longer used and is
|
||||
* retained for legacy jobs only.
|
||||
*/
|
||||
break;
|
||||
default:
|
||||
sysLogger.log(Level.SEVERE, "Unknown ManifestNodeData.ProcessingStatus");
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
addNewPendingJob(manifest);
|
||||
} catch (AutoIngestJobException ex) {
|
||||
sysLogger.log(Level.SEVERE, String.format("Invalid manifest data for %s", manifest.getFilePath()), ex);
|
||||
}
|
||||
}
|
||||
String manifestFilePath = manifest.getFilePath().toString();
|
||||
byte[] rawData = coordinationService.getNodeData(CoordinationService.CategoryNode.MANIFESTS, manifestFilePath);
|
||||
if (null != rawData && rawData.length > 0) {
|
||||
AutoIngestJobNodeData nodeData = new AutoIngestJobNodeData(rawData);
|
||||
AutoIngestJob.ProcessingStatus processingStatus = nodeData.getProcessingStatus();
|
||||
switch (processingStatus) {
|
||||
case PENDING:
|
||||
addPendingJob(manifest, nodeData);
|
||||
break;
|
||||
case PROCESSING:
|
||||
doRecoveryIfCrashed(manifest, nodeData);
|
||||
break;
|
||||
case COMPLETED:
|
||||
addCompletedJob(manifest, nodeData);
|
||||
break;
|
||||
case DELETED:
|
||||
break;
|
||||
default:
|
||||
sysLogger.log(Level.SEVERE, "Unknown ManifestNodeData.ProcessingStatus");
|
||||
break;
|
||||
}
|
||||
} catch (CoordinationServiceException | AutoIngestJobException | AutoIngestJobNodeData.InvalidDataException ex) {
|
||||
sysLogger.log(Level.SEVERE, String.format("Error handling manifest at %s", manifest.getFilePath()), ex);
|
||||
} catch (InterruptedException ex) {
|
||||
/*
|
||||
* The thread running the input directory scan task was
|
||||
* interrupted while blocked, i.e., auto ingest is shutting
|
||||
* down.
|
||||
*/
|
||||
return TERMINATE;
|
||||
} else {
|
||||
addNewPendingJob(manifest);
|
||||
}
|
||||
|
||||
} catch (CoordinationServiceException | AutoIngestJobException | AutoIngestJobNodeData.InvalidDataException ex) {
|
||||
sysLogger.log(Level.SEVERE, String.format("Error visiting %s", filePath), ex);
|
||||
|
||||
} catch (InterruptedException ex) {
|
||||
return TERMINATE;
|
||||
|
||||
} catch (Exception ex) {
|
||||
/*
|
||||
* This is an exception firewall so that an unexpected runtime
|
||||
* exception from the handling of a single manifest file does
|
||||
* not take out the input directory scanner.
|
||||
* exception from the handling of a single file does not stop
|
||||
* the input directory scan.
|
||||
*/
|
||||
sysLogger.log(Level.SEVERE, String.format("Unexpected exception handling %s", filePath), ex);
|
||||
sysLogger.log(Level.SEVERE, String.format("Unexpected exception visiting %s", filePath), ex);
|
||||
}
|
||||
|
||||
if (!Thread.currentThread().isInterrupted()) {
|
||||
return CONTINUE;
|
||||
} else {
|
||||
sysLogger.log(Level.WARNING, String.format("Auto ingest shut down while visiting %s", filePath));
|
||||
return TERMINATE;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an auto ingest job to the pending jobs queue.
|
||||
* Adds an existing auto ingest job to the pending jobs queue. If the
|
||||
* version of the coordination service node data is out of date, it is
|
||||
* upgraded to the current version.
|
||||
*
|
||||
* @param manifest The manifest for the job.
|
||||
* @param nodeData The data stored in the manifest file lock
|
||||
* coordination service node for the job.
|
||||
* @param nodeData The data stored in the manifest file coordination
|
||||
* service node for the job.
|
||||
*
|
||||
* @throws AutoIngestJobException If there was an error working
|
||||
* with the node data.
|
||||
* @throws CoordinationServiceException If a lock node data version
|
||||
* update was required and there
|
||||
* was an error writing the node
|
||||
* data by the coordination
|
||||
* service.
|
||||
* @throws InterruptedException If the thread running the input
|
||||
* directory scan task is
|
||||
* interrupted while blocked, i.e.,
|
||||
* if auto ingest is shutting down.
|
||||
*/
|
||||
private void addPendingJob(Manifest manifest, AutoIngestJobNodeData nodeData) throws AutoIngestJobException, CoordinationServiceException, InterruptedException {
|
||||
private void addPendingJob(Manifest manifest, AutoIngestJobNodeData nodeData) throws AutoIngestJobException, InterruptedException {
|
||||
AutoIngestJob job;
|
||||
if (nodeData.getVersion() == AutoIngestJobNodeData.getCurrentVersion()) {
|
||||
job = new AutoIngestJob(nodeData);
|
||||
} else {
|
||||
/*
|
||||
* Upgrade the auto ingest node data to the current version.
|
||||
*/
|
||||
job = new AutoIngestJob(manifest);
|
||||
job.setPriority(nodeData.getPriority());
|
||||
Path caseDirectory = PathUtils.findCaseDirectory(rootOutputDirectory, manifest.getCaseName());
|
||||
if (null != caseDirectory) {
|
||||
job.setCaseDirectoryPath(caseDirectory);
|
||||
}
|
||||
updateAutoIngestJobData(job);
|
||||
|
||||
/*
|
||||
* Try to write the upgraded node data to coordination service
|
||||
* manifest node data for the job. If the lock cannot be
|
||||
* obtained, assume that the auto ingest node holding the lock
|
||||
* is taking care of this.
|
||||
*/
|
||||
try (Lock manifestLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.MANIFESTS, manifest.getFilePath().toString())) {
|
||||
if (null != manifestLock) {
|
||||
updateAutoIngestJobData(job);
|
||||
}
|
||||
} catch (CoordinationServiceException ex) {
|
||||
sysLogger.log(Level.SEVERE, String.format("Error attempting to set node data for %s", manifest.getFilePath()), ex);
|
||||
}
|
||||
}
|
||||
newPendingJobsList.add(job);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a new job to the pending jobs queue.
|
||||
* Adds a new auto ingest job to the pending jobs queue.
|
||||
*
|
||||
* @param manifest The manifest for the job.
|
||||
*
|
||||
@ -1343,14 +1326,28 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
* if auto ingest is shutting down.
|
||||
*/
|
||||
private void addNewPendingJob(Manifest manifest) throws AutoIngestJobException, CoordinationServiceException, InterruptedException {
|
||||
AutoIngestJob job = new AutoIngestJob(manifest);
|
||||
updateAutoIngestJobData(job);
|
||||
newPendingJobsList.add(job);
|
||||
/*
|
||||
* Create the coordination service manifest file node data for the
|
||||
* job. Getting the lock both guards the writing of the new node
|
||||
* data and creates the coordination service node if it does not
|
||||
* already exist. Note that if this auto ingest node cannot get the
|
||||
* lock, it is assumed that the auto ingest node holding the lock is
|
||||
* taking care of this. In this case, this auto ingest node will not
|
||||
* add the new job to its pending queue during this scan of the
|
||||
* input directory, but it will be picked up during the next scan.
|
||||
*/
|
||||
try (Lock manifestLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.MANIFESTS, manifest.getFilePath().toString())) {
|
||||
if (null != manifestLock) {
|
||||
AutoIngestJob job = new AutoIngestJob(manifest);
|
||||
updateAutoIngestJobData(job);
|
||||
newPendingJobsList.add(job);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Does recovery for an auto ingest job that was left in the processing
|
||||
* state by an auot ingest node (AIN) that crashed.
|
||||
* If required, does recovery for an auto ingest job that was left in
|
||||
* the processing state by an auto ingest node (AIN) that crashed.
|
||||
*
|
||||
* @param manifest The manifest for the job.
|
||||
* @param nodeData The data stored in the manifest file lock
|
||||
@ -1366,62 +1363,73 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
* interrupted while blocked, i.e.,
|
||||
* if auto ingest is shutting down.
|
||||
*/
|
||||
private void doCrashRecovery(Manifest manifest, AutoIngestJobNodeData jobNodeData) throws AutoIngestJobException, CoordinationServiceException, InterruptedException {
|
||||
private void doRecoveryIfCrashed(Manifest manifest, AutoIngestJobNodeData jobNodeData) throws AutoIngestJobException, CoordinationServiceException, InterruptedException {
|
||||
String manifestPath = manifest.getFilePath().toString();
|
||||
sysLogger.log(Level.SEVERE, "Attempting crash recovery for {0}", manifestPath);
|
||||
AutoIngestJob job = new AutoIngestJob(jobNodeData);
|
||||
try (Lock manifestLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.MANIFESTS, manifestPath)) {
|
||||
if (null != manifestLock) {
|
||||
AutoIngestJob job = new AutoIngestJob(jobNodeData);
|
||||
if (job.getProcessingStatus() == AutoIngestJob.ProcessingStatus.PROCESSING) {
|
||||
/*
|
||||
* If the lock can be obtained with the job status set
|
||||
* to processing, then an auto ingest node crashed while
|
||||
* executing the job and was unable to update the job
|
||||
* status.
|
||||
*/
|
||||
sysLogger.log(Level.SEVERE, "Attempting crash recovery for {0}", manifestPath);
|
||||
|
||||
/*
|
||||
* Try to set the error flags that indicate incomplete or messy data
|
||||
* in displays for the job and the case. Note that if the job
|
||||
* crashed before a case directory was created, the job was a no-op,
|
||||
* so the data quality flags do not need to be set.
|
||||
*/
|
||||
Path caseDirectoryPath = PathUtils.findCaseDirectory(rootOutputDirectory, manifest.getCaseName());
|
||||
if (null != caseDirectoryPath) {
|
||||
job.setCaseDirectoryPath(caseDirectoryPath);
|
||||
job.setErrorsOccurred(true);
|
||||
try {
|
||||
setCaseNodeDataErrorsOccurred(caseDirectoryPath);
|
||||
} catch (IOException ex) {
|
||||
sysLogger.log(Level.WARNING, String.format("Error attempting to set error flag in case node data for %s", caseDirectoryPath), ex);
|
||||
}
|
||||
} else {
|
||||
job.setErrorsOccurred(false);
|
||||
}
|
||||
/*
|
||||
* First, try to set the case node data error flag that
|
||||
* indicates there was an auto ingest job error. If the
|
||||
* auto ingest node that was executing the job crashed
|
||||
* before the case directory was created, the job was a
|
||||
* no-op, so the error flag does not need to be set.
|
||||
* However, note that if another auto ingest job
|
||||
* subsequently completed, the failed job may still have
|
||||
* been a no-op, but in this case the flag will be set
|
||||
* anyway, because a case directory will be found.
|
||||
*/
|
||||
Path caseDirectoryPath = PathUtils.findCaseDirectory(rootOutputDirectory, manifest.getCaseName());
|
||||
if (null != caseDirectoryPath) {
|
||||
job.setCaseDirectoryPath(caseDirectoryPath);
|
||||
job.setErrorsOccurred(true);
|
||||
setErrorsOccurredFlagForCase(caseDirectoryPath);
|
||||
} else {
|
||||
job.setErrorsOccurred(false);
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the crash count for the job, determine whether or not to
|
||||
* retry processing its data source, and deal with the job
|
||||
* accordingly.
|
||||
*/
|
||||
int numberOfCrashes = job.getNumberOfCrashes();
|
||||
++numberOfCrashes;
|
||||
job.setNumberOfCrashes(numberOfCrashes);
|
||||
if (numberOfCrashes < AutoIngestUserPreferences.getMaxNumTimesToProcessImage()) {
|
||||
job.setProcessingStatus(AutoIngestJob.ProcessingStatus.PENDING);
|
||||
job.setCompletedDate(new Date(0));
|
||||
if (null != caseDirectoryPath) {
|
||||
try {
|
||||
new AutoIngestJobLogger(manifest.getFilePath(), manifest.getDataSourceFileName(), caseDirectoryPath).logCrashRecoveryWithRetry();
|
||||
} catch (AutoIngestJobLoggerException ex) {
|
||||
sysLogger.log(Level.SEVERE, String.format("Error writing case auto ingest log entry for crashed job for %s", manifestPath), ex);
|
||||
/*
|
||||
* Update the crash count for the job, determine whether
|
||||
* or not to retry processing its data source, and deal
|
||||
* with the job accordingly.
|
||||
*/
|
||||
int numberOfCrashes = job.getNumberOfCrashes();
|
||||
++numberOfCrashes;
|
||||
job.setNumberOfCrashes(numberOfCrashes);
|
||||
if (numberOfCrashes < AutoIngestUserPreferences.getMaxNumTimesToProcessImage()) {
|
||||
job.setProcessingStatus(AutoIngestJob.ProcessingStatus.PENDING);
|
||||
job.setCompletedDate(new Date(0));
|
||||
if (null != caseDirectoryPath) {
|
||||
try {
|
||||
new AutoIngestJobLogger(manifest.getFilePath(), manifest.getDataSourceFileName(), caseDirectoryPath).logCrashRecoveryWithRetry();
|
||||
} catch (AutoIngestJobLoggerException ex) {
|
||||
sysLogger.log(Level.SEVERE, String.format("Error writing case auto ingest log entry for crashed job for %s", manifestPath), ex);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
job.setProcessingStatus(AutoIngestJob.ProcessingStatus.COMPLETED);
|
||||
job.setCompletedDate(Date.from(Instant.now()));
|
||||
if (null != caseDirectoryPath) {
|
||||
try {
|
||||
new AutoIngestJobLogger(manifest.getFilePath(), manifest.getDataSourceFileName(), caseDirectoryPath).logCrashRecoveryNoRetry();
|
||||
} catch (AutoIngestJobLoggerException ex) {
|
||||
sysLogger.log(Level.SEVERE, String.format("Error writing case auto ingest log entry for crashed job for %s", manifestPath), ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
updateAutoIngestJobData(job);
|
||||
newPendingJobsList.add(job);
|
||||
}
|
||||
}
|
||||
updateAutoIngestJobData(job);
|
||||
newPendingJobsList.add(job);
|
||||
} else {
|
||||
job.setProcessingStatus(AutoIngestJob.ProcessingStatus.COMPLETED);
|
||||
job.setCompletedDate(Date.from(Instant.now()));
|
||||
if (null != caseDirectoryPath) {
|
||||
try {
|
||||
new AutoIngestJobLogger(manifest.getFilePath(), manifest.getDataSourceFileName(), caseDirectoryPath).logCrashRecoveryNoRetry();
|
||||
} catch (AutoIngestJobLoggerException ex) {
|
||||
sysLogger.log(Level.SEVERE, String.format("Error writing case auto ingest log entry for crashed job for %s", manifestPath), ex);
|
||||
}
|
||||
}
|
||||
updateAutoIngestJobData(job);
|
||||
newCompletedJobsList.add(new AutoIngestJob(jobNodeData));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1434,15 +1442,12 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
*
|
||||
* @throws AutoIngestJobException If there was an error working
|
||||
* with the node data.
|
||||
* @throws CoordinationServiceException If there was an error writing
|
||||
* updated node data by the
|
||||
* coordination service.
|
||||
* @throws InterruptedException If the thread running the input
|
||||
* directory scan task is
|
||||
* interrupted while blocked, i.e.,
|
||||
* if auto ingest is shutting down.
|
||||
*/
|
||||
private void addCompletedJob(Manifest manifest, AutoIngestJobNodeData nodeData) throws AutoIngestJobException, CoordinationServiceException, InterruptedException {
|
||||
private void addCompletedJob(Manifest manifest, AutoIngestJobNodeData nodeData) throws AutoIngestJobException, InterruptedException {
|
||||
Path caseDirectoryPath = nodeData.getCaseDirectoryPath();
|
||||
if (!caseDirectoryPath.toFile().exists()) {
|
||||
sysLogger.log(Level.WARNING, String.format("Job completed for %s, but cannot find case directory %s, ignoring job", nodeData.getManifestFilePath(), caseDirectoryPath.toString()));
|
||||
@ -1454,19 +1459,11 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
job = new AutoIngestJob(nodeData);
|
||||
job.setCaseDirectoryPath(caseDirectoryPath);
|
||||
} else {
|
||||
/**
|
||||
* Use the manifest rather than the node data here to create a
|
||||
* new AutoIngestJob instance because the AutoIngestJob
|
||||
* constructor that takes a node data object expects the node
|
||||
* data to have fields that do not exist in earlier versions.
|
||||
/*
|
||||
* Upgrade the auto ingest node data to the current version.
|
||||
*/
|
||||
job = new AutoIngestJob(manifest);
|
||||
job.setCaseDirectoryPath(caseDirectoryPath);
|
||||
|
||||
/**
|
||||
* Update the job with the fields that exist in all versions of
|
||||
* the nodeData.
|
||||
*/
|
||||
job.setCompletedDate(nodeData.getCompletedDate());
|
||||
job.setErrorsOccurred(nodeData.getErrorsOccurred());
|
||||
job.setPriority(nodeData.getPriority());
|
||||
@ -1474,7 +1471,19 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
job.setProcessingStage(AutoIngestJob.Stage.COMPLETED, nodeData.getCompletedDate());
|
||||
job.setProcessingStatus(AutoIngestJob.ProcessingStatus.COMPLETED);
|
||||
|
||||
updateAutoIngestJobData(job);
|
||||
/*
|
||||
* Try to write the upgraded node data to coordination service
|
||||
* manifest node data for the job. If the lock cannot be
|
||||
* obtained, assume that the auto ingest node holding the lock
|
||||
* is taking care of this.
|
||||
*/
|
||||
try (Lock manifestLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.MANIFESTS, manifest.getFilePath().toString())) {
|
||||
if (null != manifestLock) {
|
||||
updateAutoIngestJobData(job);
|
||||
}
|
||||
} catch (CoordinationServiceException ex) {
|
||||
sysLogger.log(Level.SEVERE, String.format("Error attempting to set node data for %s", manifest.getFilePath()), ex);
|
||||
}
|
||||
}
|
||||
|
||||
newCompletedJobsList.add(job);
|
||||
@ -1972,6 +1981,17 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
}
|
||||
|
||||
try {
|
||||
/*
|
||||
* There can be a race condition between queuing jobs
|
||||
* and case deletion. However, in practice eliminating
|
||||
* the race condition by acquiring a manifest file
|
||||
* coordination service lock when analyzing job state
|
||||
* during the input directory scan appears to have a
|
||||
* significant performance cost for both input directory
|
||||
* scanning and dequeuing jobs. Therefore, job state
|
||||
* must be checked again here, while actually holding
|
||||
* the lock, before executing the job.
|
||||
*/
|
||||
AutoIngestJobNodeData nodeData = new AutoIngestJobNodeData(coordinationService.getNodeData(CoordinationService.CategoryNode.MANIFESTS, manifestPath.toString()));
|
||||
if (!nodeData.getProcessingStatus().equals(PENDING)) {
|
||||
iterator.remove();
|
||||
@ -2105,7 +2125,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
if (currentJob.isCanceled()) {
|
||||
Path caseDirectoryPath = currentJob.getCaseDirectoryPath();
|
||||
if (null != caseDirectoryPath) {
|
||||
setCaseNodeDataErrorsOccurred(caseDirectoryPath);
|
||||
setErrorsOccurredFlagForCase(caseDirectoryPath);
|
||||
AutoIngestJobLogger jobLogger = new AutoIngestJobLogger(manifestPath, currentJob.getManifest().getDataSourceFileName(), caseDirectoryPath);
|
||||
jobLogger.logJobCancelled();
|
||||
}
|
||||
@ -2463,7 +2483,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
if (!dataSource.exists()) {
|
||||
sysLogger.log(Level.SEVERE, "Missing data source for {0}", manifestPath);
|
||||
currentJob.setErrorsOccurred(true);
|
||||
setCaseNodeDataErrorsOccurred(caseDirectoryPath);
|
||||
setErrorsOccurredFlagForCase(caseDirectoryPath);
|
||||
jobLogger.logMissingDataSource();
|
||||
return null;
|
||||
}
|
||||
@ -2508,7 +2528,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
// did we find a data source processor that can process the data source
|
||||
if (validDataSourceProcessors.isEmpty()) {
|
||||
// This should never happen. We should add all unsupported data sources as logical files.
|
||||
setCaseNodeDataErrorsOccurred(caseDirectoryPath);
|
||||
setErrorsOccurredFlagForCase(caseDirectoryPath);
|
||||
currentJob.setErrorsOccurred(true);
|
||||
jobLogger.logFailedToIdentifyDataSource();
|
||||
sysLogger.log(Level.WARNING, "Unsupported data source {0} for {1}", new Object[]{dataSource.getPath(), manifestPath}); // NON-NLS
|
||||
@ -2543,7 +2563,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
// If we get to this point, none of the processors were successful
|
||||
sysLogger.log(Level.SEVERE, "All data source processors failed to process {0}", dataSource.getPath());
|
||||
jobLogger.logFailedToAddDataSource();
|
||||
setCaseNodeDataErrorsOccurred(caseDirectoryPath);
|
||||
setErrorsOccurredFlagForCase(caseDirectoryPath);
|
||||
currentJob.setErrorsOccurred(true);
|
||||
// Throw an exception. It will get caught & handled upstream and will result in AIM auto-pause.
|
||||
throw new AutoIngestDataSourceProcessor.AutoIngestDataSourceProcessorException("Failed to process " + dataSource.getPath() + " with all data source processors");
|
||||
@ -2662,7 +2682,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
if (!cancelledModules.isEmpty()) {
|
||||
sysLogger.log(Level.WARNING, String.format("Ingest module(s) cancelled for %s", manifestPath));
|
||||
currentJob.setErrorsOccurred(true);
|
||||
setCaseNodeDataErrorsOccurred(caseDirectoryPath);
|
||||
setErrorsOccurredFlagForCase(caseDirectoryPath);
|
||||
for (String module : snapshot.getCancelledDataSourceIngestModules()) {
|
||||
sysLogger.log(Level.WARNING, String.format("%s ingest module cancelled for %s", module, manifestPath));
|
||||
nestedJobLogger.logIngestModuleCancelled(module);
|
||||
@ -2672,7 +2692,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
} else {
|
||||
currentJob.setProcessingStage(AutoIngestJob.Stage.CANCELLING, Date.from(Instant.now()));
|
||||
currentJob.setErrorsOccurred(true);
|
||||
setCaseNodeDataErrorsOccurred(caseDirectoryPath);
|
||||
setErrorsOccurredFlagForCase(caseDirectoryPath);
|
||||
nestedJobLogger.logAnalysisCancelled();
|
||||
CancellationReason cancellationReason = snapshot.getCancellationReason();
|
||||
if (CancellationReason.NOT_CANCELLED != cancellationReason && CancellationReason.USER_CANCELLED != cancellationReason) {
|
||||
@ -2685,13 +2705,13 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
sysLogger.log(Level.SEVERE, String.format("%s ingest module startup error for %s", error.getModuleDisplayName(), manifestPath), error.getThrowable());
|
||||
}
|
||||
currentJob.setErrorsOccurred(true);
|
||||
setCaseNodeDataErrorsOccurred(caseDirectoryPath);
|
||||
setErrorsOccurredFlagForCase(caseDirectoryPath);
|
||||
jobLogger.logIngestModuleStartupErrors();
|
||||
throw new AnalysisStartupException(String.format("Error(s) during ingest module startup for %s", manifestPath));
|
||||
} else {
|
||||
sysLogger.log(Level.SEVERE, String.format("Ingest manager ingest job start error for %s", manifestPath), ingestJobStartResult.getStartupException());
|
||||
currentJob.setErrorsOccurred(true);
|
||||
setCaseNodeDataErrorsOccurred(caseDirectoryPath);
|
||||
setErrorsOccurredFlagForCase(caseDirectoryPath);
|
||||
jobLogger.logAnalysisStartupError();
|
||||
throw new AnalysisStartupException("Ingest manager error starting job", ingestJobStartResult.getStartupException());
|
||||
}
|
||||
@ -2700,7 +2720,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
sysLogger.log(Level.SEVERE, "Ingest job settings error for {0}: {1}", new Object[]{manifestPath, warning});
|
||||
}
|
||||
currentJob.setErrorsOccurred(true);
|
||||
setCaseNodeDataErrorsOccurred(caseDirectoryPath);
|
||||
setErrorsOccurredFlagForCase(caseDirectoryPath);
|
||||
jobLogger.logIngestJobSettingsErrors();
|
||||
throw new AnalysisStartupException("Error(s) in ingest job settings");
|
||||
}
|
||||
@ -2783,7 +2803,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
} catch (FileExportException ex) {
|
||||
sysLogger.log(Level.SEVERE, String.format("Error doing file export for %s", manifestPath), ex);
|
||||
currentJob.setErrorsOccurred(true);
|
||||
setCaseNodeDataErrorsOccurred(caseDirectoryPath);
|
||||
setErrorsOccurredFlagForCase(caseDirectoryPath);
|
||||
jobLogger.logFileExportError();
|
||||
}
|
||||
}
|
||||
|
@ -72,13 +72,13 @@ final class AutoIngestMetricsCollector {
|
||||
switch (processingStatus) {
|
||||
case PENDING:
|
||||
case PROCESSING:
|
||||
case DELETED: // No longer used, retained for legacy jobs only.
|
||||
/*
|
||||
* These are not jobs we care about for metrics, so
|
||||
* we will ignore them.
|
||||
*/
|
||||
break;
|
||||
case COMPLETED:
|
||||
case DELETED: // Assuming deleted jobs were completed before they were deleted.
|
||||
newMetricsSnapshot.addCompletedJobMetric(job.getCompletedDate(), job.getDataSourceSize());
|
||||
break;
|
||||
default:
|
||||
|
@ -354,7 +354,10 @@ final class AutoIngestMonitor extends Observable implements PropertyChangeListen
|
||||
case COMPLETED:
|
||||
newJobsSnapshot.addOrReplaceCompletedJob(job);
|
||||
break;
|
||||
case DELETED: // No longer used, retained for legacy jobs only.
|
||||
case DELETED:
|
||||
/*
|
||||
* Ignore jobs marked as deleted.
|
||||
*/
|
||||
break;
|
||||
default:
|
||||
LOGGER.log(Level.SEVERE, "Unknown AutoIngestJobData.ProcessingStatus");
|
||||
|
@ -0,0 +1,80 @@
|
||||
/*
|
||||
* Autopsy Forensic Browser
|
||||
*
|
||||
* Copyright 2019-2019 Basis Technology Corp.
|
||||
* Contact: carrier <at> sleuthkit <dot> org
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.sleuthkit.autopsy.experimental.autoingest;
|
||||
|
||||
import java.awt.event.ActionEvent;
|
||||
import java.util.concurrent.FutureTask;
|
||||
import javax.swing.AbstractAction;
|
||||
import org.sleuthkit.autopsy.progress.AppFrameProgressBar;
|
||||
import org.sleuthkit.autopsy.progress.ProgressIndicator;
|
||||
import org.sleuthkit.autopsy.progress.TaskCancellable;
|
||||
|
||||
/**
|
||||
* A base class for action classes that kick off a cancellable task that runs in
|
||||
* a background thread and reports progress using an application frame progress
|
||||
* bar.
|
||||
*/
|
||||
abstract class BackgroundTaskAction extends AbstractAction {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
private final String progressDisplayName;
|
||||
|
||||
/**
|
||||
* Constructs the base class part of action classes that kick off a
|
||||
* cancellable task that runs in a background thread and reports progress
|
||||
* using an application frame progress bar.
|
||||
*
|
||||
* @param actionName The name of the action.
|
||||
* @param progressDisplayName The display name for the progress bar.
|
||||
*/
|
||||
BackgroundTaskAction(String actionName, String progressDisplayName) {
|
||||
super(actionName);
|
||||
this.progressDisplayName = progressDisplayName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void actionPerformed(ActionEvent event) {
|
||||
final AppFrameProgressBar progress = new AppFrameProgressBar(progressDisplayName);
|
||||
final TaskCancellable taskCanceller = new TaskCancellable(progress);
|
||||
progress.setCancellationBehavior(taskCanceller);
|
||||
final Runnable task = getTask(progress);
|
||||
final FutureTask<Void> future = new FutureTask<>(task, null);
|
||||
taskCanceller.setFuture(future);
|
||||
new Thread(future).start();
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the background task to be executed. The task is expected to report
|
||||
* its progress using the supplied progress indicator and to check for
|
||||
* cancellation by checking to see if the thread it is running in has been
|
||||
* interrupted.
|
||||
*
|
||||
* @param progress A progress indicator for the task.
|
||||
*
|
||||
* @return The Runnnable task.
|
||||
*/
|
||||
abstract Runnable getTask(ProgressIndicator progress);
|
||||
|
||||
@Override
|
||||
public BackgroundTaskAction clone() throws CloneNotSupportedException {
|
||||
super.clone();
|
||||
throw new CloneNotSupportedException();
|
||||
}
|
||||
|
||||
}
|
@ -255,3 +255,5 @@ AinStatusDashboard.nodeStatusTableTitle.text=Auto Ingest Nodes
|
||||
AinStatusDashboard.healthMonitorButton.text=Health Monitor
|
||||
CasesDashboardTopComponent.refreshButton.text=Refresh
|
||||
AutoIngestCasesDeletionDialog.jLabel1.text=Progress
|
||||
CasesDashboardTopComponent.deleteOrphanCaseNodesButton.text=Delete Orphan Case Znodes
|
||||
CasesDashboardTopComponent.deleteOrphanManifestNodesButton.text=Delete Orphan Manifest Znodes
|
||||
|
@ -164,6 +164,10 @@ CTL_AutoIngestDashboardOpenAction=Auto Ingest Dashboard
|
||||
CTL_AutoIngestDashboardTopComponent=Auto Ingest Jobs
|
||||
CTL_CasesDashboardAction=Multi-User Cases Dashboard
|
||||
CTL_CasesDashboardTopComponent=Cases
|
||||
DeleteCaseAction.confirmationText=Are you sure you want to delete the following for the case(s):\n\tManifest file znodes\n\tCase database\n\tCore.properties file\n\tCase directory\n\tCase znodes
|
||||
DeleteCaseAction.menuItemText=Delete Case(s)
|
||||
DeleteCaseAction.progressDisplayName=Delete Case(s)
|
||||
DeleteCaseAction.taskName=app-input-and-output
|
||||
DeleteCaseInputAction.confirmationText=Are you sure you want to delete the following for the case(s):\n\tManifest files\n\tData sources\n
|
||||
DeleteCaseInputAction.menuItemText=Delete Input
|
||||
DeleteCaseInputAction.progressDisplayName=Delete Input
|
||||
@ -182,9 +186,8 @@ DeleteCaseTask.progress.acquiringManifestLocks=Acquiring exclusive manifest file
|
||||
DeleteCaseTask.progress.connectingToCoordSvc=Connecting to the coordination service...
|
||||
DeleteCaseTask.progress.deletingCaseDirCoordSvcNode=Deleting case directory znode...
|
||||
DeleteCaseTask.progress.deletingCaseNameCoordSvcNode=Deleting case name znode...
|
||||
# {0} - data source name
|
||||
# {1} - device id
|
||||
DeleteCaseTask.progress.deletingDataSource=Deleting data source {0} with device id {1}...
|
||||
# {0} - data source path
|
||||
DeleteCaseTask.progress.deletingDataSource=Deleting data source {0}...
|
||||
DeleteCaseTask.progress.deletingJobLogLockNode=Deleting case auto ingest log znode...
|
||||
# {0} - manifest file path
|
||||
DeleteCaseTask.progress.deletingManifest=Deleting manifest file {0}...
|
||||
@ -201,6 +204,18 @@ DeleteCaseTask.progress.parsingManifest=Parsing manifest file {0}...
|
||||
# {0} - manifest file path
|
||||
DeleteCaseTask.progress.releasingManifestLock=Releasing lock on the manifest file {0}...
|
||||
DeleteCaseTask.progress.startMessage=Starting deletion...
|
||||
DeleteOrphanCaseNodesAction.progressDisplayName=Cleanup Case Znodes
|
||||
DeleteOrphanCaseNodesTask.progress.connectingToCoordSvc=Connecting to the coordination service
|
||||
# {0} - node path
|
||||
DeleteOrphanCaseNodesTask.progress.deletingOrphanedCaseNode=Deleting orphaned case znode {0}
|
||||
DeleteOrphanCaseNodesTask.progress.gettingCaseNodesListing=Querying coordination service for case znodes
|
||||
DeleteOrphanCaseNodesTask.progress.startMessage=Starting orphaned case znode cleanup
|
||||
DeleteOrphanManifestNodesAction.progressDisplayName=Cleanup Manifest File Znodes
|
||||
DeleteOrphanManifestNodesTask.progress.connectingToCoordSvc=Connecting to the coordination service
|
||||
# {0} - node path
|
||||
DeleteOrphanManifestNodesTask.progress.deletingOrphanedManifestNode=Deleting orphaned manifest file znode {0}
|
||||
DeleteOrphanManifestNodesTask.progress.gettingManifestNodes=Querying the coordination service for manifest file znodes
|
||||
DeleteOrphanManifestNodesTask.progress.startMessage=Starting orphaned manifest file znode cleanup
|
||||
HINT_CasesDashboardTopComponent=This is an adminstrative dashboard for multi-user cases
|
||||
OpenAutoIngestLogAction.deletedLogErrorMsg=The case auto ingest log has been deleted.
|
||||
OpenAutoIngestLogAction.logOpenFailedErrorMsg=Failed to open case auto ingest log. See application log for details.
|
||||
@ -437,3 +452,5 @@ AinStatusDashboard.nodeStatusTableTitle.text=Auto Ingest Nodes
|
||||
AinStatusDashboard.healthMonitorButton.text=Health Monitor
|
||||
CasesDashboardTopComponent.refreshButton.text=Refresh
|
||||
AutoIngestCasesDeletionDialog.jLabel1.text=Progress
|
||||
CasesDashboardTopComponent.deleteOrphanCaseNodesButton.text=Delete Orphan Case Znodes
|
||||
CasesDashboardTopComponent.deleteOrphanManifestNodesButton.text=Delete Orphan Manifest Znodes
|
||||
|
@ -18,11 +18,14 @@
|
||||
*/
|
||||
package org.sleuthkit.autopsy.experimental.autoingest;
|
||||
|
||||
import java.io.File;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import javax.swing.Action;
|
||||
import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData;
|
||||
import org.sleuthkit.autopsy.casemodule.multiusercasesbrowser.MultiUserCaseBrowserCustomizer;
|
||||
import org.sleuthkit.autopsy.coreutils.PlatformUtil;
|
||||
|
||||
/**
|
||||
* A customizer for the multi-user case browser panel used in the administrative
|
||||
@ -31,9 +34,12 @@ import org.sleuthkit.autopsy.casemodule.multiusercasesbrowser.MultiUserCaseBrows
|
||||
*/
|
||||
final class CasesDashboardCustomizer implements MultiUserCaseBrowserCustomizer {
|
||||
|
||||
private final static String ADMIN_EXT_ACCESS_FILE_NAME = "adminext"; // NON-NLS
|
||||
private final static String ADMIN_EXT_ACCESS_FILE_PATH = Paths.get(PlatformUtil.getUserConfigDirectory(), ADMIN_EXT_ACCESS_FILE_NAME).toString();
|
||||
private final DeleteCaseAction deleteCaseAction;
|
||||
private final DeleteCaseInputAction deleteCaseInputAction;
|
||||
private final DeleteCaseOutputAction deleteCaseOutputAction;
|
||||
private final DeleteCaseInputAndOutputAction deleteCaseAction;
|
||||
private final DeleteCaseInputAndOutputAction deleteCaseInputAndOutputAction;
|
||||
|
||||
/**
|
||||
* Constructs a customizer for the multi-user case browser panel used in the
|
||||
@ -48,9 +54,10 @@ final class CasesDashboardCustomizer implements MultiUserCaseBrowserCustomizer {
|
||||
* These actions are shared by all nodes in order to support multiple
|
||||
* selection.
|
||||
*/
|
||||
deleteCaseAction = new DeleteCaseAction();
|
||||
deleteCaseInputAction = new DeleteCaseInputAction();
|
||||
deleteCaseOutputAction = new DeleteCaseOutputAction();
|
||||
deleteCaseAction = new DeleteCaseInputAndOutputAction();
|
||||
deleteCaseInputAndOutputAction = new DeleteCaseInputAndOutputAction();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -60,7 +67,9 @@ final class CasesDashboardCustomizer implements MultiUserCaseBrowserCustomizer {
|
||||
properties.add(Column.LAST_ACCESS_DATE);
|
||||
properties.add(Column.DIRECTORY);
|
||||
properties.add(Column.MANIFEST_FILE_ZNODES_DELETE_STATUS);
|
||||
properties.add(Column.DATA_SOURCES_DELETE_STATUS);
|
||||
if (CasesDashboardCustomizer.extendedFeaturesAreEnabled()) {
|
||||
properties.add(Column.DATA_SOURCES_DELETE_STATUS);
|
||||
}
|
||||
properties.add(Column.TEXT_INDEX_DELETE_STATUS);
|
||||
properties.add(Column.CASE_DB_DELETE_STATUS);
|
||||
properties.add(Column.CASE_DIR_DELETE_STATUS);
|
||||
@ -84,9 +93,13 @@ final class CasesDashboardCustomizer implements MultiUserCaseBrowserCustomizer {
|
||||
List<Action> actions = new ArrayList<>();
|
||||
actions.add(new OpenCaseAction(nodeData));
|
||||
actions.add(new OpenAutoIngestLogAction(nodeData));
|
||||
actions.add(deleteCaseInputAction);
|
||||
actions.add(deleteCaseOutputAction);
|
||||
actions.add(deleteCaseAction);
|
||||
if (CasesDashboardCustomizer.extendedFeaturesAreEnabled()) {
|
||||
actions.add(deleteCaseInputAction);
|
||||
actions.add(deleteCaseOutputAction);
|
||||
actions.add(deleteCaseInputAndOutputAction);
|
||||
} else {
|
||||
actions.add(deleteCaseAction);
|
||||
}
|
||||
return actions;
|
||||
}
|
||||
|
||||
@ -95,4 +108,15 @@ final class CasesDashboardCustomizer implements MultiUserCaseBrowserCustomizer {
|
||||
return new OpenCaseAction(nodeData);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines whether the extended system administrator features of the
|
||||
* cases dashboard are enabled.
|
||||
*
|
||||
* @return True or false.
|
||||
*/
|
||||
static boolean extendedFeaturesAreEnabled() {
|
||||
File f = new File(ADMIN_EXT_ACCESS_FILE_PATH);
|
||||
return f.exists();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -20,8 +20,12 @@
|
||||
<EmptySpace max="-2" attributes="0"/>
|
||||
<Group type="103" groupAlignment="0" attributes="0">
|
||||
<Group type="102" attributes="0">
|
||||
<Component id="refreshButton" min="-2" max="-2" attributes="0"/>
|
||||
<EmptySpace min="0" pref="458" max="32767" attributes="0"/>
|
||||
<Component id="refreshButton" linkSize="3" min="-2" max="-2" attributes="0"/>
|
||||
<EmptySpace max="-2" attributes="0"/>
|
||||
<Component id="deleteOrphanCaseNodesButton" linkSize="3" min="-2" max="-2" attributes="0"/>
|
||||
<EmptySpace max="-2" attributes="0"/>
|
||||
<Component id="deleteOrphanManifestNodesButton" linkSize="3" min="-2" max="-2" attributes="0"/>
|
||||
<EmptySpace min="0" pref="0" max="32767" attributes="0"/>
|
||||
</Group>
|
||||
<Group type="102" alignment="1" attributes="0">
|
||||
<Component id="caseBrowserScrollPane" max="32767" attributes="0"/>
|
||||
@ -37,7 +41,11 @@
|
||||
<EmptySpace max="-2" attributes="0"/>
|
||||
<Component id="caseBrowserScrollPane" pref="246" max="32767" attributes="0"/>
|
||||
<EmptySpace type="unrelated" max="-2" attributes="0"/>
|
||||
<Component id="refreshButton" min="-2" max="-2" attributes="0"/>
|
||||
<Group type="103" groupAlignment="3" attributes="0">
|
||||
<Component id="refreshButton" linkSize="2" alignment="3" min="-2" max="-2" attributes="0"/>
|
||||
<Component id="deleteOrphanCaseNodesButton" linkSize="2" alignment="3" min="-2" max="-2" attributes="0"/>
|
||||
<Component id="deleteOrphanManifestNodesButton" linkSize="2" alignment="3" min="-2" max="-2" attributes="0"/>
|
||||
</Group>
|
||||
<EmptySpace max="-2" attributes="0"/>
|
||||
</Group>
|
||||
</Group>
|
||||
@ -58,5 +66,25 @@
|
||||
|
||||
<Layout class="org.netbeans.modules.form.compat2.layouts.support.JScrollPaneSupportLayout"/>
|
||||
</Container>
|
||||
<Component class="javax.swing.JButton" name="deleteOrphanCaseNodesButton">
|
||||
<Properties>
|
||||
<Property name="text" type="java.lang.String" editor="org.netbeans.modules.i18n.form.FormI18nStringEditor">
|
||||
<ResourceString bundle="org/sleuthkit/autopsy/experimental/autoingest/Bundle.properties" key="CasesDashboardTopComponent.deleteOrphanCaseNodesButton.text" replaceFormat="org.openide.util.NbBundle.getMessage({sourceFileName}.class, "{key}")"/>
|
||||
</Property>
|
||||
</Properties>
|
||||
<Events>
|
||||
<EventHandler event="actionPerformed" listener="java.awt.event.ActionListener" parameters="java.awt.event.ActionEvent" handler="deleteOrphanCaseNodesButtonActionPerformed"/>
|
||||
</Events>
|
||||
</Component>
|
||||
<Component class="javax.swing.JButton" name="deleteOrphanManifestNodesButton">
|
||||
<Properties>
|
||||
<Property name="text" type="java.lang.String" editor="org.netbeans.modules.i18n.form.FormI18nStringEditor">
|
||||
<ResourceString bundle="org/sleuthkit/autopsy/experimental/autoingest/Bundle.properties" key="CasesDashboardTopComponent.deleteOrphanManifestNodesButton.text" replaceFormat="org.openide.util.NbBundle.getMessage({sourceFileName}.class, "{key}")"/>
|
||||
</Property>
|
||||
</Properties>
|
||||
<Events>
|
||||
<EventHandler event="actionPerformed" listener="java.awt.event.ActionListener" parameters="java.awt.event.ActionEvent" handler="deleteOrphanManifestNodesButtonActionPerformed"/>
|
||||
</Events>
|
||||
</Component>
|
||||
</SubComponents>
|
||||
</Form>
|
||||
|
@ -119,6 +119,8 @@ public final class CasesDashboardTopComponent extends TopComponent implements Ex
|
||||
|
||||
refreshButton = new javax.swing.JButton();
|
||||
caseBrowserScrollPane = new javax.swing.JScrollPane();
|
||||
deleteOrphanCaseNodesButton = new javax.swing.JButton();
|
||||
deleteOrphanManifestNodesButton = new javax.swing.JButton();
|
||||
|
||||
org.openide.awt.Mnemonics.setLocalizedText(refreshButton, org.openide.util.NbBundle.getMessage(CasesDashboardTopComponent.class, "CasesDashboardTopComponent.refreshButton.text")); // NOI18N
|
||||
refreshButton.addActionListener(new java.awt.event.ActionListener() {
|
||||
@ -127,6 +129,20 @@ public final class CasesDashboardTopComponent extends TopComponent implements Ex
|
||||
}
|
||||
});
|
||||
|
||||
org.openide.awt.Mnemonics.setLocalizedText(deleteOrphanCaseNodesButton, org.openide.util.NbBundle.getMessage(CasesDashboardTopComponent.class, "CasesDashboardTopComponent.deleteOrphanCaseNodesButton.text")); // NOI18N
|
||||
deleteOrphanCaseNodesButton.addActionListener(new java.awt.event.ActionListener() {
|
||||
public void actionPerformed(java.awt.event.ActionEvent evt) {
|
||||
deleteOrphanCaseNodesButtonActionPerformed(evt);
|
||||
}
|
||||
});
|
||||
|
||||
org.openide.awt.Mnemonics.setLocalizedText(deleteOrphanManifestNodesButton, org.openide.util.NbBundle.getMessage(CasesDashboardTopComponent.class, "CasesDashboardTopComponent.deleteOrphanManifestNodesButton.text")); // NOI18N
|
||||
deleteOrphanManifestNodesButton.addActionListener(new java.awt.event.ActionListener() {
|
||||
public void actionPerformed(java.awt.event.ActionEvent evt) {
|
||||
deleteOrphanManifestNodesButtonActionPerformed(evt);
|
||||
}
|
||||
});
|
||||
|
||||
javax.swing.GroupLayout layout = new javax.swing.GroupLayout(this);
|
||||
this.setLayout(layout);
|
||||
layout.setHorizontalGroup(
|
||||
@ -136,28 +152,51 @@ public final class CasesDashboardTopComponent extends TopComponent implements Ex
|
||||
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
|
||||
.addGroup(layout.createSequentialGroup()
|
||||
.addComponent(refreshButton)
|
||||
.addGap(0, 458, Short.MAX_VALUE))
|
||||
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
|
||||
.addComponent(deleteOrphanCaseNodesButton)
|
||||
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
|
||||
.addComponent(deleteOrphanManifestNodesButton)
|
||||
.addGap(0, 0, Short.MAX_VALUE))
|
||||
.addGroup(javax.swing.GroupLayout.Alignment.TRAILING, layout.createSequentialGroup()
|
||||
.addComponent(caseBrowserScrollPane)
|
||||
.addContainerGap())))
|
||||
);
|
||||
|
||||
layout.linkSize(javax.swing.SwingConstants.HORIZONTAL, new java.awt.Component[] {deleteOrphanCaseNodesButton, deleteOrphanManifestNodesButton, refreshButton});
|
||||
|
||||
layout.setVerticalGroup(
|
||||
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
|
||||
.addGroup(layout.createSequentialGroup()
|
||||
.addContainerGap()
|
||||
.addComponent(caseBrowserScrollPane, javax.swing.GroupLayout.DEFAULT_SIZE, 246, Short.MAX_VALUE)
|
||||
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
|
||||
.addComponent(refreshButton)
|
||||
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
|
||||
.addComponent(refreshButton)
|
||||
.addComponent(deleteOrphanCaseNodesButton)
|
||||
.addComponent(deleteOrphanManifestNodesButton))
|
||||
.addContainerGap())
|
||||
);
|
||||
|
||||
layout.linkSize(javax.swing.SwingConstants.VERTICAL, new java.awt.Component[] {deleteOrphanCaseNodesButton, deleteOrphanManifestNodesButton, refreshButton});
|
||||
|
||||
}// </editor-fold>//GEN-END:initComponents
|
||||
|
||||
private void refreshButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_refreshButtonActionPerformed
|
||||
caseBrowserPanel.displayCases();
|
||||
}//GEN-LAST:event_refreshButtonActionPerformed
|
||||
|
||||
private void deleteOrphanCaseNodesButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_deleteOrphanCaseNodesButtonActionPerformed
|
||||
new DeleteOrphanCaseNodesAction().actionPerformed(evt);
|
||||
}//GEN-LAST:event_deleteOrphanCaseNodesButtonActionPerformed
|
||||
|
||||
private void deleteOrphanManifestNodesButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_deleteOrphanManifestNodesButtonActionPerformed
|
||||
new DeleteOrphanManifestNodesAction().actionPerformed(evt);
|
||||
}//GEN-LAST:event_deleteOrphanManifestNodesButtonActionPerformed
|
||||
|
||||
// Variables declaration - do not modify//GEN-BEGIN:variables
|
||||
private javax.swing.JScrollPane caseBrowserScrollPane;
|
||||
private javax.swing.JButton deleteOrphanCaseNodesButton;
|
||||
private javax.swing.JButton deleteOrphanManifestNodesButton;
|
||||
private javax.swing.JButton refreshButton;
|
||||
// End of variables declaration//GEN-END:variables
|
||||
|
||||
|
@ -18,89 +18,57 @@
|
||||
*/
|
||||
package org.sleuthkit.autopsy.experimental.autoingest;
|
||||
|
||||
import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||
import java.awt.event.ActionEvent;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
import javax.swing.AbstractAction;
|
||||
import org.openide.util.Utilities;
|
||||
import org.openide.util.NbBundle;
|
||||
import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData;
|
||||
import org.sleuthkit.autopsy.progress.AppFrameProgressBar;
|
||||
import org.sleuthkit.autopsy.progress.TaskCancellable;
|
||||
import org.sleuthkit.autopsy.coreutils.MessageNotifyUtil;
|
||||
import org.sleuthkit.autopsy.progress.ProgressIndicator;
|
||||
|
||||
/**
|
||||
* An abstract class for an action that deletes one or more auto ingest cases
|
||||
* using a thread pool, one task per case. Uses the Template Method design
|
||||
* pattern to allow subclasses to specify the deletion task to be performed.
|
||||
*
|
||||
* This cases to delete are discovered by querying the actions global context
|
||||
* lookup for CaseNodeData objects. See
|
||||
* https://platform.netbeans.org/tutorials/nbm-selection-1.html and
|
||||
* https://platform.netbeans.org/tutorials/nbm-selection-2.html for details.
|
||||
* An action that completely deletes one or more multi-user cases. Only the
|
||||
* components created by the application are deleted: the case output and the
|
||||
* coordination service nodes. Note that manifest file coordination service
|
||||
* nodes are only marked as deleted by setting the processing status field for
|
||||
* the corresponding auto ingest job to DELETED. This is done to avoid imposing
|
||||
* the requirement that the manifests be deleted before deleting the cases,
|
||||
* since at this time manifests are not considered to be case components created
|
||||
* by the application.
|
||||
*/
|
||||
abstract class DeleteCaseAction extends AbstractAction {
|
||||
final class DeleteCaseAction extends DeleteCaseComponentsAction {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
private static final int NUMBER_OF_THREADS = 4;
|
||||
private static final String THREAD_NAME_SUFFIX = "-task-%d"; //NON-NLS
|
||||
private static final String PROGRESS_DISPLAY_NAME = "%s for %s"; //NON-NLS
|
||||
private final String taskDisplayName;
|
||||
private final ExecutorService executor;
|
||||
|
||||
/**
|
||||
* Constructs an abstract class for an action that deletes one or more auto
|
||||
* ingest cases using a thread pool, one task per case. Uses the Template
|
||||
* Method design pattern to allow subclasses to specify the deletion task to
|
||||
* be performed.
|
||||
*
|
||||
* @param menuItemText The menu item text for the action.
|
||||
* @param taskDisplayName The task display name for the progress indicator
|
||||
* for the task, to be inserted in the first position
|
||||
* of "%s for %s", where the second substitution is
|
||||
* the case name.
|
||||
* @param taskName The task name, to be inserted in the first
|
||||
* position of "%s-task-%d", where the second
|
||||
* substitution is the pool thread number.
|
||||
* Constructs action that completely deletes one or more multi-user cases.
|
||||
* Only the components created by the application are deleted: the case
|
||||
* output and the coordination service nodes. Note that manifest file
|
||||
* coordination service nodes are only marked as deleted by setting the
|
||||
* processing status field for the corresponding auto ingest job to DELETED.
|
||||
* This is done to avoid imposing the requirement that the manifests be
|
||||
* deleted before deleting the cases, since at this time manifests are not
|
||||
* considered to be case components created by the application.
|
||||
*/
|
||||
DeleteCaseAction(String menuItemText, String taskDisplayName, String taskName) {
|
||||
super(menuItemText);
|
||||
this.taskDisplayName = taskDisplayName;
|
||||
String threadNameFormat = taskName + THREAD_NAME_SUFFIX;
|
||||
executor = Executors.newFixedThreadPool(NUMBER_OF_THREADS, new ThreadFactoryBuilder().setNameFormat(threadNameFormat).build());
|
||||
@NbBundle.Messages({
|
||||
"DeleteCaseAction.menuItemText=Delete Case(s)",
|
||||
"DeleteCaseAction.progressDisplayName=Delete Case(s)",
|
||||
"DeleteCaseAction.taskName=app-input-and-output"
|
||||
})
|
||||
DeleteCaseAction() {
|
||||
super(Bundle.DeleteCaseAction_menuItemText(), Bundle.DeleteCaseAction_progressDisplayName(), Bundle.DeleteCaseAction_taskName());
|
||||
}
|
||||
|
||||
@NbBundle.Messages({
|
||||
"DeleteCaseAction.confirmationText=Are you sure you want to delete the following for the case(s):\n\tManifest file znodes\n\tCase database\n\tCore.properties file\n\tCase directory\n\tCase znodes"
|
||||
})
|
||||
@Override
|
||||
public void actionPerformed(ActionEvent event) {
|
||||
Collection<CaseNodeData> selectedNodeData = new ArrayList<>(Utilities.actionsGlobalContext().lookupAll(CaseNodeData.class));
|
||||
for (CaseNodeData nodeData : selectedNodeData) {
|
||||
AppFrameProgressBar progress = new AppFrameProgressBar(String.format(PROGRESS_DISPLAY_NAME, taskDisplayName, nodeData.getDisplayName()));
|
||||
TaskCancellable taskCanceller = new TaskCancellable(progress);
|
||||
progress.setCancellationBehavior(taskCanceller);
|
||||
Future<?> future = executor.submit(getTask(nodeData, progress));
|
||||
taskCanceller.setFuture(future);
|
||||
if (MessageNotifyUtil.Message.confirm(Bundle.DeleteCaseAction_confirmationText())) {
|
||||
super.actionPerformed(event);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Uses the Template Method design pattern to allow subclasses to specify
|
||||
* the deletion task to be performed in a worker thread by this action.
|
||||
*
|
||||
* @param caseNodeData The case directory lock coordination service node
|
||||
* data for the case to be deleted.
|
||||
* @param progress A progress indicator for the task.
|
||||
*
|
||||
* @return A case deletion task, ready to be executed.
|
||||
*/
|
||||
abstract DeleteCaseTask getTask(CaseNodeData caseNodeData, ProgressIndicator progress);
|
||||
|
||||
@Override
|
||||
public DeleteCaseAction clone() throws CloneNotSupportedException {
|
||||
super.clone();
|
||||
throw new CloneNotSupportedException();
|
||||
DeleteCaseTask getTask(CaseNodeData caseNodeData, ProgressIndicator progress) {
|
||||
return new DeleteCaseTask(caseNodeData, DeleteCaseTask.DeleteOptions.DELETE_CASE, progress);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -0,0 +1,107 @@
|
||||
/*
|
||||
* Autopsy Forensic Browser
|
||||
*
|
||||
* Copyright 2019-2019 Basis Technology Corp. Contact: carrier <at> sleuthkit
|
||||
* <dot> org
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
|
||||
* use this file except in compliance with the License. You may obtain a copy of
|
||||
* the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
package org.sleuthkit.autopsy.experimental.autoingest;
|
||||
|
||||
import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||
import java.awt.event.ActionEvent;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
import javax.swing.AbstractAction;
|
||||
import org.openide.util.Utilities;
|
||||
import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData;
|
||||
import org.sleuthkit.autopsy.progress.AppFrameProgressBar;
|
||||
import org.sleuthkit.autopsy.progress.TaskCancellable;
|
||||
import org.sleuthkit.autopsy.progress.ProgressIndicator;
|
||||
|
||||
/**
|
||||
* An abstract class for an action that deletes components one or more
|
||||
* multi-user cases using a thread pool, one task per case. Uses the Template
|
||||
* Method design pattern to allow subclasses to specify the deletion task to be
|
||||
* performed.
|
||||
*
|
||||
* This cases to delete are discovered by querying the actions global context
|
||||
* lookup for CaseNodeData objects. See
|
||||
* https://platform.netbeans.org/tutorials/nbm-selection-1.html and
|
||||
* https://platform.netbeans.org/tutorials/nbm-selection-2.html for details.
|
||||
*/
|
||||
abstract class DeleteCaseComponentsAction extends AbstractAction {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
private static final int NUMBER_OF_THREADS = 4;
|
||||
private static final String THREAD_NAME_SUFFIX = "-task-%d"; //NON-NLS
|
||||
private static final String PROGRESS_DISPLAY_NAME = "%s for %s"; //NON-NLS
|
||||
private final String taskDisplayName;
|
||||
private final ExecutorService executor;
|
||||
|
||||
/**
|
||||
* Constructs an abstract class for an action that deletes components of one
|
||||
* or more multi-user cases using a thread pool, one task per case. Uses the
|
||||
* Template Method design pattern to allow subclasses to specify the
|
||||
* deletion task to be performed.
|
||||
*
|
||||
* @param menuItemText The menu item text for the action.
|
||||
* @param taskDisplayName The task display name for the progress indicator
|
||||
* for the task, to be inserted in the first position
|
||||
* of "%s for %s", where the second substitution is
|
||||
* the case name.
|
||||
* @param taskName The task name, to be inserted in the first
|
||||
* position of "%s-task-%d", where the second
|
||||
* substitution is the pool thread number.
|
||||
*/
|
||||
DeleteCaseComponentsAction(String menuItemText, String taskDisplayName, String taskName) {
|
||||
super(menuItemText);
|
||||
this.taskDisplayName = taskDisplayName;
|
||||
String threadNameFormat = taskName + THREAD_NAME_SUFFIX;
|
||||
executor = Executors.newFixedThreadPool(NUMBER_OF_THREADS, new ThreadFactoryBuilder().setNameFormat(threadNameFormat).build());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void actionPerformed(ActionEvent event) {
|
||||
Collection<CaseNodeData> selectedNodeData = new ArrayList<>(Utilities.actionsGlobalContext().lookupAll(CaseNodeData.class));
|
||||
for (CaseNodeData nodeData : selectedNodeData) {
|
||||
AppFrameProgressBar progress = new AppFrameProgressBar(String.format(PROGRESS_DISPLAY_NAME, taskDisplayName, nodeData.getDisplayName()));
|
||||
TaskCancellable taskCanceller = new TaskCancellable(progress);
|
||||
progress.setCancellationBehavior(taskCanceller);
|
||||
Future<?> future = executor.submit(getTask(nodeData, progress));
|
||||
taskCanceller.setFuture(future);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Uses the Template Method design pattern to allow subclasses to specify
|
||||
* the deletion task to be performed in a worker thread by this action.
|
||||
*
|
||||
* @param caseNodeData The case directory lock coordination service node
|
||||
* data for the case to be deleted.
|
||||
* @param progress A progress indicator for the task.
|
||||
*
|
||||
* @return A case deletion task, ready to be executed.
|
||||
*/
|
||||
abstract DeleteCaseTask getTask(CaseNodeData caseNodeData, ProgressIndicator progress);
|
||||
|
||||
@Override
|
||||
public DeleteCaseComponentsAction clone() throws CloneNotSupportedException {
|
||||
super.clone();
|
||||
throw new CloneNotSupportedException();
|
||||
}
|
||||
|
||||
}
|
@ -32,7 +32,7 @@ import org.sleuthkit.autopsy.progress.ProgressIndicator;
|
||||
* may need to be directed to reclaim space, but the option to restore the
|
||||
* directories without having the jobs be reprocessed is retained.
|
||||
*/
|
||||
final class DeleteCaseInputAction extends DeleteCaseAction {
|
||||
final class DeleteCaseInputAction extends DeleteCaseComponentsAction {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
|
@ -31,7 +31,7 @@ import org.sleuthkit.autopsy.progress.ProgressIndicator;
|
||||
* associated auto ingest job input directories and all coordination service
|
||||
* nodes.
|
||||
*/
|
||||
final class DeleteCaseInputAndOutputAction extends DeleteCaseAction {
|
||||
final class DeleteCaseInputAndOutputAction extends DeleteCaseComponentsAction {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
@ -61,7 +61,7 @@ final class DeleteCaseInputAndOutputAction extends DeleteCaseAction {
|
||||
|
||||
@Override
|
||||
DeleteCaseTask getTask(CaseNodeData caseNodeData, ProgressIndicator progress) {
|
||||
return new DeleteCaseTask(caseNodeData, DeleteOptions.DELETE_ALL, progress);
|
||||
return new DeleteCaseTask(caseNodeData, DeleteOptions.DELETE_INPUT_AND_OUTPUT, progress);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -32,7 +32,7 @@ import org.sleuthkit.autopsy.progress.ProgressIndicator;
|
||||
* deleted even though the coordination service nodes for the auto ingest jobs
|
||||
* are deleted.
|
||||
*/
|
||||
final class DeleteCaseOutputAction extends DeleteCaseAction {
|
||||
final class DeleteCaseOutputAction extends DeleteCaseComponentsAction {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
|
@ -37,6 +37,7 @@ import org.sleuthkit.datamodel.SleuthkitCase;
|
||||
import org.sleuthkit.autopsy.casemodule.Case;
|
||||
import org.sleuthkit.autopsy.casemodule.CaseMetadata;
|
||||
import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData;
|
||||
import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData.CaseNodeDataException;
|
||||
import org.sleuthkit.autopsy.casemodule.multiusercases.CoordinationServiceUtils;
|
||||
import org.sleuthkit.autopsy.coordinationservice.CoordinationService;
|
||||
import org.sleuthkit.autopsy.coordinationservice.CoordinationService.CategoryNode;
|
||||
@ -61,13 +62,11 @@ final class DeleteCaseTask implements Runnable {
|
||||
|
||||
private static final int MANIFEST_FILE_LOCKING_TIMEOUT_MINS = 5;
|
||||
private static final int MANIFEST_DELETE_TRIES = 3;
|
||||
private static final String NO_NODE_ERROR_MSG_FRAGMENT = "KeeperErrorCode = NoNode";
|
||||
private static final Logger logger = AutoIngestDashboardLogger.getLogger();
|
||||
private final CaseNodeData caseNodeData;
|
||||
private final DeleteOptions deleteOption;
|
||||
private final ProgressIndicator progress;
|
||||
private final List<Path> manifestFilePaths;
|
||||
private final List<Lock> manifestFileLocks;
|
||||
private final List<ManifestFileLock> manifestFileLocks;
|
||||
private CoordinationService coordinationService;
|
||||
private CaseMetadata caseMetadata;
|
||||
|
||||
@ -94,7 +93,16 @@ final class DeleteCaseTask implements Runnable {
|
||||
/**
|
||||
* Delete everything.
|
||||
*/
|
||||
DELETE_ALL
|
||||
DELETE_INPUT_AND_OUTPUT,
|
||||
/**
|
||||
* Delete only the case components that the application created. This is
|
||||
* DELETE_OUTPUT with the additional feature that manifest file
|
||||
* coordination service nodes are marked as deleted, rather than
|
||||
* actually deleted. This eliminates the requirement that manifests and
|
||||
* data sources have to be deleted before deleting the case to avoid an
|
||||
* unwanted, automatic reprocessing of the case.
|
||||
*/
|
||||
DELETE_CASE
|
||||
}
|
||||
|
||||
/**
|
||||
@ -111,7 +119,6 @@ final class DeleteCaseTask implements Runnable {
|
||||
this.caseNodeData = caseNodeData;
|
||||
this.deleteOption = deleteOption;
|
||||
this.progress = progress;
|
||||
manifestFilePaths = new ArrayList<>();
|
||||
manifestFileLocks = new ArrayList<>();
|
||||
}
|
||||
|
||||
@ -214,8 +221,6 @@ final class DeleteCaseTask implements Runnable {
|
||||
}
|
||||
checkForCancellation();
|
||||
|
||||
getManifestFilePaths();
|
||||
checkForCancellation();
|
||||
/*
|
||||
* Acquire exclusive locks for the auto ingest job manifest
|
||||
* files for the case, if any. Manifest file locks are acquired
|
||||
@ -231,18 +236,24 @@ final class DeleteCaseTask implements Runnable {
|
||||
return;
|
||||
}
|
||||
checkForCancellation();
|
||||
|
||||
deleteCaseContents();
|
||||
checkForCancellation();
|
||||
|
||||
deleteCaseResourcesNode();
|
||||
checkForCancellation();
|
||||
|
||||
deleteCaseAutoIngestLogNode();
|
||||
checkForCancellation();
|
||||
|
||||
deleteManifestFileNodes();
|
||||
checkForCancellation();
|
||||
}
|
||||
|
||||
deleteCaseDirectoryNode();
|
||||
checkForCancellation();
|
||||
}
|
||||
|
||||
deleteCaseNameNode();
|
||||
}
|
||||
|
||||
@ -258,20 +269,16 @@ final class DeleteCaseTask implements Runnable {
|
||||
* @throws IOException If there is an error reading the
|
||||
* manifests list file.
|
||||
*/
|
||||
private void getManifestFilePaths() throws IOException, CoordinationServiceException, InterruptedException {
|
||||
private List<Path> getManifestFilePaths() throws IOException, CoordinationServiceException, InterruptedException {
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_gettingManifestPaths());
|
||||
logger.log(Level.INFO, String.format("Getting manifest file paths for %s", caseNodeData.getDisplayName()));
|
||||
final Path manifestsListFilePath = Paths.get(caseNodeData.getDirectory().toString(), AutoIngestManager.getCaseManifestsListFileName());
|
||||
final File manifestListsFile = manifestsListFilePath.toFile();
|
||||
if (manifestListsFile.exists()) {
|
||||
getManifestPathsFromFile(manifestsListFilePath);
|
||||
return getManifestPathsFromFile(manifestsListFilePath);
|
||||
} else {
|
||||
getManifestPathsFromNodes();
|
||||
return getManifestPathsFromNodes();
|
||||
}
|
||||
if (manifestFilePaths.isEmpty()) {
|
||||
setDeletedItemFlag(CaseNodeData.DeletedFlags.MANIFEST_FILE_NODES);
|
||||
}
|
||||
logger.log(Level.INFO, String.format("Found %d manifest file path(s) for %s", manifestFilePaths.size(), caseNodeData.getDisplayName()));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -280,13 +287,16 @@ final class DeleteCaseTask implements Runnable {
|
||||
*
|
||||
* @param manifestsListFilePath The path of the manifests list file.
|
||||
*
|
||||
* @return A list of manifest file paths, possibly empty.
|
||||
*
|
||||
* @throws IOException If there is an error reading the manifests
|
||||
* list file.
|
||||
* @throws InterruptedException If the thread in which this task is running
|
||||
* is interrupted while blocked waiting for a
|
||||
* coordination service operation to complete.
|
||||
*/
|
||||
private void getManifestPathsFromFile(Path manifestsListFilePath) throws IOException, InterruptedException {
|
||||
private List<Path> getManifestPathsFromFile(Path manifestsListFilePath) throws IOException, InterruptedException {
|
||||
final List<Path> manifestFilePaths = new ArrayList<>();
|
||||
try (final Scanner manifestsListFileScanner = new Scanner(manifestsListFilePath)) {
|
||||
while (manifestsListFileScanner.hasNextLine()) {
|
||||
checkForCancellation();
|
||||
@ -296,6 +306,7 @@ final class DeleteCaseTask implements Runnable {
|
||||
}
|
||||
}
|
||||
}
|
||||
return manifestFilePaths;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -303,6 +314,8 @@ final class DeleteCaseTask implements Runnable {
|
||||
* the node data of the manifest file coordination service nodes and
|
||||
* matching on case name.
|
||||
*
|
||||
* @return A list of manifest file paths, possibly empty.
|
||||
*
|
||||
* @throws CoordinationServiceException If there is an error completing a
|
||||
* coordination service operation.
|
||||
* @throws InterruptedException If the thread in which this task is
|
||||
@ -310,7 +323,7 @@ final class DeleteCaseTask implements Runnable {
|
||||
* waiting for a coordination service
|
||||
* operation to complete.
|
||||
*/
|
||||
private void getManifestPathsFromNodes() throws CoordinationServiceException, InterruptedException {
|
||||
private List<Path> getManifestPathsFromNodes() throws CoordinationServiceException, InterruptedException {
|
||||
/*
|
||||
* Get the original, undecorated case name from the case directory. This
|
||||
* is necessary because the case display name can be changed and the
|
||||
@ -320,6 +333,7 @@ final class DeleteCaseTask implements Runnable {
|
||||
* data.
|
||||
*/
|
||||
String caseName = CoordinationServiceUtils.getCaseNameNodePath(caseNodeData.getDirectory());
|
||||
final List<Path> manifestFilePaths = new ArrayList<>();
|
||||
final List<String> nodeNames = coordinationService.getNodeList(CoordinationService.CategoryNode.MANIFESTS);
|
||||
for (String manifestNodeName : nodeNames) {
|
||||
checkForCancellation();
|
||||
@ -336,6 +350,7 @@ final class DeleteCaseTask implements Runnable {
|
||||
logger.log(Level.WARNING, String.format("Error getting coordination service node data from %s", manifestNodeName), ex);
|
||||
}
|
||||
}
|
||||
return manifestFilePaths;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -356,6 +371,8 @@ final class DeleteCaseTask implements Runnable {
|
||||
})
|
||||
private boolean acquireManifestFileLocks() throws IOException, CoordinationServiceException, InterruptedException {
|
||||
boolean allLocksAcquired = true;
|
||||
List<Path> manifestFilePaths = getManifestFilePaths();
|
||||
logger.log(Level.INFO, String.format("Found %d manifest file path(s) for %s", manifestFilePaths.size(), caseNodeData.getDisplayName()));
|
||||
if (!manifestFilePaths.isEmpty()) {
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_acquiringManifestLocks());
|
||||
logger.log(Level.INFO, String.format("Acquiring exclusive manifest file locks for %s", caseNodeData.getDisplayName()));
|
||||
@ -376,7 +393,7 @@ final class DeleteCaseTask implements Runnable {
|
||||
logger.log(Level.INFO, String.format("Exclusively locking the manifest %s for %s", manifestPath, caseNodeData.getDisplayName()));
|
||||
CoordinationService.Lock manifestLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.MANIFESTS, manifestPath.toString(), MANIFEST_FILE_LOCKING_TIMEOUT_MINS, TimeUnit.MINUTES);
|
||||
if (null != manifestLock) {
|
||||
manifestFileLocks.add(manifestLock);
|
||||
manifestFileLocks.add(new ManifestFileLock(manifestPath, manifestLock));
|
||||
} else {
|
||||
logger.log(Level.INFO, String.format("Failed to exclusively lock the manifest %s because it was already held by another host", manifestPath, caseNodeData.getDisplayName()));
|
||||
allLocksAcquired = false;
|
||||
@ -388,6 +405,8 @@ final class DeleteCaseTask implements Runnable {
|
||||
releaseManifestFileLocks();
|
||||
throw ex;
|
||||
}
|
||||
} else {
|
||||
setDeletedItemFlag(CaseNodeData.DeletedFlags.MANIFEST_FILE_NODES);
|
||||
}
|
||||
return allLocksAcquired;
|
||||
}
|
||||
@ -409,11 +428,15 @@ final class DeleteCaseTask implements Runnable {
|
||||
try {
|
||||
caseMetadata = new CaseMetadata(caseMetadataPath);
|
||||
checkForCancellation();
|
||||
if (!manifestFilePaths.isEmpty() && (deleteOption == DeleteOptions.DELETE_INPUT || deleteOption == DeleteOptions.DELETE_ALL)) {
|
||||
deleteAutoIngestInput();
|
||||
if (!manifestFileLocks.isEmpty()) {
|
||||
if (deleteOption == DeleteOptions.DELETE_INPUT || deleteOption == DeleteOptions.DELETE_INPUT_AND_OUTPUT) {
|
||||
deleteAutoIngestInput();
|
||||
} else if (deleteOption == DeleteOptions.DELETE_CASE) {
|
||||
markManifestFileNodesAsDeleted();
|
||||
}
|
||||
}
|
||||
checkForCancellation();
|
||||
if (deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_ALL) {
|
||||
if (deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_INPUT_AND_OUTPUT || deleteOption == DeleteOptions.DELETE_CASE) {
|
||||
Case.deleteMultiUserCase(caseNodeData, caseMetadata, progress, logger);
|
||||
}
|
||||
|
||||
@ -441,8 +464,7 @@ final class DeleteCaseTask implements Runnable {
|
||||
*/
|
||||
@NbBundle.Messages({
|
||||
"DeleteCaseTask.progress.openingCaseDatabase=Opening the case database...",
|
||||
"# {0} - manifest file path", "DeleteCaseTask.progress.parsingManifest=Parsing manifest file {0}...",
|
||||
"# {0} - manifest file path", "DeleteCaseTask.progress.deletingManifest=Deleting manifest file {0}..."
|
||||
"# {0} - manifest file path", "DeleteCaseTask.progress.parsingManifest=Parsing manifest file {0}..."
|
||||
})
|
||||
private void deleteAutoIngestInput() throws InterruptedException {
|
||||
SleuthkitCase caseDb = null;
|
||||
@ -459,19 +481,16 @@ final class DeleteCaseTask implements Runnable {
|
||||
* manifest.
|
||||
*/
|
||||
boolean allInputDeleted = true;
|
||||
for (Path manifestFilePath : manifestFilePaths) {
|
||||
for (ManifestFileLock lock : manifestFileLocks) {
|
||||
checkForCancellation();
|
||||
Path manifestFilePath = lock.getManifestFilePath();
|
||||
final File manifestFile = manifestFilePath.toFile();
|
||||
if (manifestFile.exists()) {
|
||||
Manifest manifest = parseManifestFile(manifestFilePath);
|
||||
if (manifest != null) {
|
||||
if (deleteDataSources(manifest, dataSources)) {
|
||||
if (!deleteManifestFile(manifestFile)) {
|
||||
logger.log(Level.WARNING, String.format("Failed to delete manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName()));
|
||||
allInputDeleted = false;
|
||||
}
|
||||
if (deleteDataSources(manifest, dataSources) && deleteManifestFile(manifestFile)) {
|
||||
lock.setInputDeleted();
|
||||
} else {
|
||||
logger.log(Level.WARNING, String.format("Failed to delete manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName()));
|
||||
allInputDeleted = false;
|
||||
}
|
||||
} else {
|
||||
@ -531,6 +550,9 @@ final class DeleteCaseTask implements Runnable {
|
||||
* is interrupted while blocked waiting for a
|
||||
* coordination service operation to complete.
|
||||
*/
|
||||
@NbBundle.Messages({
|
||||
"# {0} - manifest file path", "DeleteCaseTask.progress.deletingManifest=Deleting manifest file {0}..."
|
||||
})
|
||||
private boolean deleteManifestFile(File manifestFile) throws InterruptedException {
|
||||
/*
|
||||
* Delete the manifest file, allowing a few retries. This is a way to
|
||||
@ -551,6 +573,9 @@ final class DeleteCaseTask implements Runnable {
|
||||
Thread.sleep(1000);
|
||||
}
|
||||
}
|
||||
if (!deleted) {
|
||||
logger.log(Level.WARNING, String.format("Failed to delete manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName()));
|
||||
}
|
||||
return deleted;
|
||||
}
|
||||
|
||||
@ -565,12 +590,12 @@ final class DeleteCaseTask implements Runnable {
|
||||
* otherwise.
|
||||
*/
|
||||
@NbBundle.Messages({
|
||||
"# {0} - data source name", "# {1} - device id", "DeleteCaseTask.progress.deletingDataSource=Deleting data source {0} with device id {1}...",})
|
||||
"# {0} - data source path", "DeleteCaseTask.progress.deletingDataSource=Deleting data source {0}..."
|
||||
})
|
||||
private boolean deleteDataSources(Manifest manifest, List<DataSource> dataSources) {
|
||||
final String dataSourceFileName = manifest.getDataSourceFileName();
|
||||
final String dataSourceDeviceId = manifest.getDeviceId();
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_deletingDataSource(dataSourceFileName, dataSourceDeviceId));
|
||||
logger.log(Level.INFO, String.format("Deleting data source %s with device id %s from %s", dataSourceFileName, dataSourceDeviceId, caseNodeData.getDisplayName()));
|
||||
final Path dataSourcePath = manifest.getDataSourcePath();
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_deletingDataSource(dataSourcePath));
|
||||
logger.log(Level.INFO, String.format("Deleting data source %s from %s", dataSourcePath, caseNodeData.getDisplayName()));
|
||||
|
||||
/*
|
||||
* There are two possibilities here. The data source may be an image,
|
||||
@ -580,37 +605,88 @@ final class DeleteCaseTask implements Runnable {
|
||||
* set, report file, archive file, etc.). In this case, just the file
|
||||
* referenced by the manifest will be deleted.
|
||||
*/
|
||||
boolean allFilesDeleted = true;
|
||||
Set<Path> filesToDelete = new HashSet<>();
|
||||
for (DataSource dataSource : dataSources) {
|
||||
int index = 0;
|
||||
while (index < dataSources.size() && filesToDelete.isEmpty()) {
|
||||
DataSource dataSource = dataSources.get(index);
|
||||
if (dataSource instanceof Image) {
|
||||
Image image = (Image) dataSource;
|
||||
if (image.getName().equals(dataSourceFileName) && image.getDeviceId().equals(dataSourceDeviceId)) {
|
||||
String[] imageFilePaths = image.getPaths();
|
||||
for (String path : imageFilePaths) {
|
||||
Path imageFilePath = Paths.get(path);
|
||||
filesToDelete.add(imageFilePath);
|
||||
String[] imageFilePaths = image.getPaths();
|
||||
/*
|
||||
* Check for a match between one of the paths for the image
|
||||
* files and the data source file path in the manifest.
|
||||
*/
|
||||
for (String imageFilePath : imageFilePaths) {
|
||||
Path candidatePath = Paths.get(imageFilePath);
|
||||
if (candidatePath.equals(dataSourcePath)) {
|
||||
/*
|
||||
* If a match is found, add all of the file paths for
|
||||
* the image to the set of files to be deleted.
|
||||
*/
|
||||
for (String path : imageFilePaths) {
|
||||
filesToDelete.add(Paths.get(path));
|
||||
}
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (filesToDelete.isEmpty()) {
|
||||
final Path dataSourcePath = manifest.getDataSourcePath();
|
||||
filesToDelete.add(dataSourcePath);
|
||||
++index;
|
||||
}
|
||||
|
||||
/*
|
||||
* At a minimum, the data source at the file path given in the manifest
|
||||
* should be deleted. If the data source is not a disk image, this will
|
||||
* be the path of an archive, a logical file, or a logical directory.
|
||||
* TODO-4933: Currently, the contents extracted from an archive are not
|
||||
* deleted, nor are any additional files associated with a report data
|
||||
* source.
|
||||
*/
|
||||
filesToDelete.add(dataSourcePath);
|
||||
|
||||
/*
|
||||
* Delete the file(s).
|
||||
*/
|
||||
boolean allFilesDeleted = true;
|
||||
for (Path path : filesToDelete) {
|
||||
File fileOrDir = path.toFile();
|
||||
if (fileOrDir.exists() && !FileUtil.deleteFileDir(fileOrDir)) {
|
||||
allFilesDeleted = false;
|
||||
logger.log(Level.INFO, String.format("Failed to delete data source file at %s for %s", path, caseNodeData.getDisplayName()));
|
||||
logger.log(Level.WARNING, String.format("Failed to delete data source file at %s for %s", path, caseNodeData.getDisplayName()));
|
||||
}
|
||||
}
|
||||
|
||||
return allFilesDeleted;
|
||||
}
|
||||
|
||||
/**
|
||||
* Marks the manifest file coordination service nodes as deleted by setting
|
||||
* the auto ingest job processing status field to deleted.
|
||||
*
|
||||
* @throws InterruptedException If the thread in which this task is running
|
||||
* is interrupted while blocked waiting for a
|
||||
* coordination service operation to complete.
|
||||
*/
|
||||
private void markManifestFileNodesAsDeleted() throws InterruptedException {
|
||||
boolean allNodesMarked = true;
|
||||
for (ManifestFileLock manifestFileLock : manifestFileLocks) {
|
||||
String manifestFilePath = manifestFileLock.getManifestFilePath().toString();
|
||||
try {
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_deletingManifestFileNode(manifestFilePath));
|
||||
logger.log(Level.INFO, String.format("Marking as deleted the manifest file znode for %s for %s", manifestFilePath, caseNodeData.getDisplayName()));
|
||||
final byte[] nodeBytes = coordinationService.getNodeData(CoordinationService.CategoryNode.MANIFESTS, manifestFilePath);
|
||||
AutoIngestJobNodeData nodeData = new AutoIngestJobNodeData(nodeBytes);
|
||||
nodeData.setProcessingStatus(AutoIngestJob.ProcessingStatus.DELETED);
|
||||
coordinationService.setNodeData(CategoryNode.MANIFESTS, manifestFilePath, nodeData.toArray());
|
||||
} catch (CoordinationServiceException | InvalidDataException ex) {
|
||||
logger.log(Level.WARNING, String.format("Error marking as deleted the manifest file znode for %s for %s", manifestFilePath, caseNodeData.getDisplayName()), ex);
|
||||
allNodesMarked = false;
|
||||
}
|
||||
}
|
||||
if (allNodesMarked) {
|
||||
setDeletedItemFlag(CaseNodeData.DeletedFlags.MANIFEST_FILE_NODES);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes the case resources coordination service node.
|
||||
*
|
||||
@ -619,14 +695,14 @@ final class DeleteCaseTask implements Runnable {
|
||||
* coordination service operation to complete.
|
||||
*/
|
||||
private void deleteCaseResourcesNode() throws InterruptedException {
|
||||
if (deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_ALL) {
|
||||
if (deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_INPUT_AND_OUTPUT || deleteOption == DeleteOptions.DELETE_CASE) {
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_deletingResourcesLockNode());
|
||||
logger.log(Level.INFO, String.format("Deleting case resources log znode for %s", caseNodeData.getDisplayName()));
|
||||
String resourcesNodePath = CoordinationServiceUtils.getCaseResourcesNodePath(caseNodeData.getDirectory());
|
||||
try {
|
||||
coordinationService.deleteNode(CategoryNode.CASES, resourcesNodePath);
|
||||
} catch (CoordinationServiceException ex) {
|
||||
if (!isNoNodeException(ex)) {
|
||||
if (!DeleteCaseUtils.isNoNodeException(ex)) {
|
||||
logger.log(Level.SEVERE, String.format("Error deleting case resources znode for %s", caseNodeData.getDisplayName()), ex);
|
||||
}
|
||||
}
|
||||
@ -641,14 +717,14 @@ final class DeleteCaseTask implements Runnable {
|
||||
* coordination service operation to complete.
|
||||
*/
|
||||
private void deleteCaseAutoIngestLogNode() throws InterruptedException {
|
||||
if (deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_ALL) {
|
||||
if (deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_INPUT_AND_OUTPUT || deleteOption == DeleteOptions.DELETE_CASE) {
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_deletingJobLogLockNode());
|
||||
logger.log(Level.INFO, String.format("Deleting case auto ingest job log znode for %s", caseNodeData.getDisplayName()));
|
||||
String logFilePath = CoordinationServiceUtils.getCaseAutoIngestLogNodePath(caseNodeData.getDirectory());
|
||||
try {
|
||||
coordinationService.deleteNode(CategoryNode.CASES, logFilePath);
|
||||
} catch (CoordinationServiceException ex) {
|
||||
if (!isNoNodeException(ex)) {
|
||||
if (!DeleteCaseUtils.isNoNodeException(ex)) {
|
||||
logger.log(Level.SEVERE, String.format("Error deleting case auto ingest job log znode for %s", caseNodeData.getDisplayName()), ex);
|
||||
}
|
||||
}
|
||||
@ -665,11 +741,17 @@ final class DeleteCaseTask implements Runnable {
|
||||
* coordination service operation to complete.
|
||||
*/
|
||||
private void deleteCaseDirectoryNode() throws InterruptedException {
|
||||
if ((deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_ALL)
|
||||
if (((deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_INPUT_AND_OUTPUT)
|
||||
&& caseNodeData.isDeletedFlagSet(CaseNodeData.DeletedFlags.DATA_SOURCES)
|
||||
&& caseNodeData.isDeletedFlagSet(CaseNodeData.DeletedFlags.CASE_DB)
|
||||
&& caseNodeData.isDeletedFlagSet(CaseNodeData.DeletedFlags.TEXT_INDEX)
|
||||
&& caseNodeData.isDeletedFlagSet(CaseNodeData.DeletedFlags.CASE_DIR)
|
||||
&& caseNodeData.isDeletedFlagSet(CaseNodeData.DeletedFlags.MANIFEST_FILE_NODES)) {
|
||||
&& caseNodeData.isDeletedFlagSet(CaseNodeData.DeletedFlags.MANIFEST_FILE_NODES))
|
||||
|| (deleteOption == DeleteOptions.DELETE_CASE
|
||||
&& caseNodeData.isDeletedFlagSet(CaseNodeData.DeletedFlags.CASE_DB)
|
||||
&& caseNodeData.isDeletedFlagSet(CaseNodeData.DeletedFlags.TEXT_INDEX)
|
||||
&& caseNodeData.isDeletedFlagSet(CaseNodeData.DeletedFlags.CASE_DIR)
|
||||
&& caseNodeData.isDeletedFlagSet(CaseNodeData.DeletedFlags.MANIFEST_FILE_NODES))) {
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_deletingCaseDirCoordSvcNode());
|
||||
logger.log(Level.INFO, String.format("Deleting case directory znode for %s", caseNodeData.getDisplayName()));
|
||||
String caseDirNodePath = CoordinationServiceUtils.getCaseDirectoryNodePath(caseNodeData.getDirectory());
|
||||
@ -689,7 +771,7 @@ final class DeleteCaseTask implements Runnable {
|
||||
* coordination service operation to complete.
|
||||
*/
|
||||
private void deleteCaseNameNode() throws InterruptedException {
|
||||
if (deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_ALL) {
|
||||
if (deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_INPUT_AND_OUTPUT || deleteOption == DeleteOptions.DELETE_CASE) {
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_deletingCaseNameCoordSvcNode());
|
||||
logger.log(Level.INFO, String.format("Deleting case name znode for %s", caseNodeData.getDisplayName()));
|
||||
try {
|
||||
@ -701,24 +783,6 @@ final class DeleteCaseTask implements Runnable {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Examines a coordination service exception to try to determine if it is a
|
||||
* no node exception.
|
||||
*
|
||||
* @param ex A coordination service exception.
|
||||
*
|
||||
* @return True or false.
|
||||
*/
|
||||
private boolean isNoNodeException(CoordinationServiceException ex) {
|
||||
boolean isNodeNodeEx = false;
|
||||
Throwable cause = ex.getCause();
|
||||
if (cause != null) {
|
||||
String causeMessage = cause.getMessage();
|
||||
isNodeNodeEx = causeMessage.contains(NO_NODE_ERROR_MSG_FRAGMENT);
|
||||
}
|
||||
return isNodeNodeEx;
|
||||
}
|
||||
|
||||
/**
|
||||
* Releases all of the manifest file locks that have been acquired by this
|
||||
* task.
|
||||
@ -727,8 +791,8 @@ final class DeleteCaseTask implements Runnable {
|
||||
"# {0} - manifest file path", "DeleteCaseTask.progress.releasingManifestLock=Releasing lock on the manifest file {0}..."
|
||||
})
|
||||
private void releaseManifestFileLocks() {
|
||||
for (Lock manifestFileLock : manifestFileLocks) {
|
||||
String manifestFilePath = manifestFileLock.getNodePath();
|
||||
for (ManifestFileLock manifestFileLock : manifestFileLocks) {
|
||||
String manifestFilePath = manifestFileLock.getManifestFilePath().toString();
|
||||
try {
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_releasingManifestLock(manifestFilePath));
|
||||
logger.log(Level.INFO, String.format("Releasing the exclusive coordination service lock on the manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName()));
|
||||
@ -755,19 +819,23 @@ final class DeleteCaseTask implements Runnable {
|
||||
"# {0} - manifest file path", "DeleteCaseTask.progress.deletingManifestFileNode=Deleting the manifest file znode for {0}..."
|
||||
})
|
||||
private void deleteManifestFileNodes() throws InterruptedException {
|
||||
if (deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_ALL) {
|
||||
if (deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_INPUT_AND_OUTPUT) {
|
||||
boolean allINodesDeleted = true;
|
||||
Iterator<Lock> iterator = manifestFileLocks.iterator();
|
||||
Iterator<ManifestFileLock> iterator = manifestFileLocks.iterator();
|
||||
while (iterator.hasNext()) {
|
||||
Lock manifestFileLock = iterator.next();
|
||||
String manifestFilePath = manifestFileLock.getNodePath();
|
||||
ManifestFileLock manifestFileLock = iterator.next();
|
||||
String manifestFilePath = manifestFileLock.getManifestFilePath().toString();
|
||||
try {
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_releasingManifestLock(manifestFilePath));
|
||||
logger.log(Level.INFO, String.format("Releasing the lock on the manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName()));
|
||||
manifestFileLock.release();
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_deletingManifestFileNode(manifestFilePath));
|
||||
logger.log(Level.INFO, String.format("Deleting the manifest file znode for %s for %s", manifestFilePath, caseNodeData.getDisplayName()));
|
||||
coordinationService.deleteNode(CoordinationService.CategoryNode.MANIFESTS, manifestFilePath);
|
||||
if (manifestFileLock.isInputDeleted()) {
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_deletingManifestFileNode(manifestFilePath));
|
||||
logger.log(Level.INFO, String.format("Deleting the manifest file znode for %s for %s", manifestFilePath, caseNodeData.getDisplayName()));
|
||||
coordinationService.deleteNode(CoordinationService.CategoryNode.MANIFESTS, manifestFilePath);
|
||||
} else {
|
||||
allINodesDeleted = false;
|
||||
}
|
||||
} catch (CoordinationServiceException ex) {
|
||||
allINodesDeleted = false;
|
||||
logger.log(Level.WARNING, String.format("Error deleting the manifest file znode for %s for %s", manifestFilePath, caseNodeData.getDisplayName()), ex);
|
||||
@ -785,12 +853,14 @@ final class DeleteCaseTask implements Runnable {
|
||||
* case.
|
||||
*
|
||||
* @param flag The flag to set.
|
||||
*
|
||||
* @throws InterruptedException If the interrupted flag is set.
|
||||
*/
|
||||
private void setDeletedItemFlag(CaseNodeData.DeletedFlags flag) {
|
||||
private void setDeletedItemFlag(CaseNodeData.DeletedFlags flag) throws InterruptedException {
|
||||
try {
|
||||
caseNodeData.setDeletedFlag(flag);
|
||||
coordinationService.setNodeData(CategoryNode.CASES, caseNodeData.getDirectory().toString(), caseNodeData.toArray());
|
||||
} catch (IOException | CoordinationServiceException | InterruptedException ex) {
|
||||
CaseNodeData.writeCaseNodeData(caseNodeData);
|
||||
} catch (CaseNodeDataException ex) {
|
||||
logger.log(Level.SEVERE, String.format("Error updating deleted item flag %s for %s", flag.name(), caseNodeData.getDisplayName()), ex);
|
||||
}
|
||||
}
|
||||
@ -806,4 +876,70 @@ final class DeleteCaseTask implements Runnable {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A wrapper class that bundles a manifest file coordination service lock
|
||||
* with a manifest file path and a flag indicating whether or not the case
|
||||
* input (manifest file and data source) associated with the lock has been
|
||||
* deleted.
|
||||
*/
|
||||
private static class ManifestFileLock {
|
||||
|
||||
private final Path manifestFilePath;
|
||||
private final Lock lock;
|
||||
private boolean inputDeleted;
|
||||
|
||||
/**
|
||||
* Constructs an instance of a wrapper class that bundles a manifest
|
||||
* file coordination service lock with a manifest file path and a flag
|
||||
* indicating whether or not the case input (manifest file and data
|
||||
* source) associated with the lock has been deleted.
|
||||
*
|
||||
* @param manifestFilePath The manifest file path.
|
||||
* @param lock The coordination service lock.
|
||||
*/
|
||||
private ManifestFileLock(Path manifestFilePath, Lock lock) {
|
||||
this.manifestFilePath = manifestFilePath;
|
||||
this.lock = lock;
|
||||
this.inputDeleted = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the path of the manifest file associated with the lock.
|
||||
*
|
||||
* @return
|
||||
*/
|
||||
Path getManifestFilePath() {
|
||||
return this.manifestFilePath;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the flag that indicates whether or not the case input (manifest
|
||||
* file and data source) associated with the lock has been deleted.
|
||||
*/
|
||||
private void setInputDeleted() {
|
||||
this.inputDeleted = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the value of the flag that indicates whether or not the case
|
||||
* input (manifest file and data source) associated with the lock has
|
||||
* been deleted.
|
||||
*
|
||||
* @return True or false.
|
||||
*/
|
||||
private boolean isInputDeleted() {
|
||||
return this.inputDeleted;
|
||||
}
|
||||
|
||||
/**
|
||||
* Releases the manifest file lock.
|
||||
*
|
||||
* @throws CoordinationServiceException If an error occurs while
|
||||
* releasing the lock.
|
||||
*/
|
||||
private void release() throws CoordinationServiceException {
|
||||
lock.release();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Autopsy Forensic Browser
|
||||
*
|
||||
* Copyright 2019-2019 Basis Technology Corp.
|
||||
* Contact: carrier <at> sleuthkit <dot> org
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.sleuthkit.autopsy.experimental.autoingest;
|
||||
|
||||
import org.sleuthkit.autopsy.coordinationservice.CoordinationService;
|
||||
|
||||
/**
|
||||
* A utility class supplying helper methods for case deletion.
|
||||
*/
|
||||
final class DeleteCaseUtils {
|
||||
|
||||
private static final String NO_NODE_ERROR_MSG_FRAGMENT = "KeeperErrorCode = NoNode";
|
||||
|
||||
/**
|
||||
* Examines a coordination service exception to try to determine if it is a
|
||||
* no node exception.
|
||||
*
|
||||
* @param ex A coordination service exception.
|
||||
*
|
||||
* @return True or false.
|
||||
*/
|
||||
static boolean isNoNodeException(CoordinationService.CoordinationServiceException ex) {
|
||||
boolean isNodeNodeEx = false;
|
||||
Throwable cause = ex.getCause();
|
||||
if (cause != null) {
|
||||
String causeMessage = cause.getMessage();
|
||||
isNodeNodeEx = causeMessage.contains(NO_NODE_ERROR_MSG_FRAGMENT);
|
||||
}
|
||||
return isNodeNodeEx;
|
||||
}
|
||||
|
||||
/**
|
||||
* A private constructor to prevent instantiation.
|
||||
*/
|
||||
private DeleteCaseUtils() {
|
||||
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,56 @@
|
||||
/*
|
||||
* Autopsy Forensic Browser
|
||||
*
|
||||
* Copyright 2019-2019 Basis Technology Corp.
|
||||
* Contact: carrier <at> sleuthkit <dot> org
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.sleuthkit.autopsy.experimental.autoingest;
|
||||
|
||||
import org.openide.util.NbBundle;
|
||||
import org.sleuthkit.autopsy.progress.ProgressIndicator;
|
||||
|
||||
/**
|
||||
* An action class that kicks off a cancellable orphaned case nodes deletion
|
||||
* task that runs in a background thread and reports progress using an
|
||||
* application frame progress bar.
|
||||
*/
|
||||
final class DeleteOrphanCaseNodesAction extends BackgroundTaskAction {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
/**
|
||||
* Constructs an instance of an action class that kicks off a cancellable
|
||||
* orphaned case nodes deletion task that runs in a background thread and
|
||||
* reports progress using an application frame progress bar.
|
||||
*/
|
||||
@NbBundle.Messages({
|
||||
"DeleteOrphanCaseNodesAction.progressDisplayName=Cleanup Case Znodes"
|
||||
})
|
||||
DeleteOrphanCaseNodesAction() {
|
||||
super(Bundle.DeleteOrphanCaseNodesAction_progressDisplayName(), Bundle.DeleteOrphanCaseNodesAction_progressDisplayName());
|
||||
}
|
||||
|
||||
@Override
|
||||
Runnable getTask(ProgressIndicator progress) {
|
||||
return new DeleteOrphanCaseNodesTask(progress);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DeleteOrphanCaseNodesAction clone() throws CloneNotSupportedException {
|
||||
super.clone();
|
||||
throw new CloneNotSupportedException();
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,149 @@
|
||||
/*
|
||||
* Autopsy Forensic Browser
|
||||
*
|
||||
* Copyright 2019-2019 Basis Technology Corp.
|
||||
* Contact: carrier <at> sleuthkit <dot> org
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.sleuthkit.autopsy.experimental.autoingest;
|
||||
|
||||
import java.io.File;
|
||||
import java.nio.file.Path;
|
||||
import java.util.List;
|
||||
import java.util.logging.Level;
|
||||
import org.openide.util.NbBundle;
|
||||
import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData;
|
||||
import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeDataCollector;
|
||||
import org.sleuthkit.autopsy.casemodule.multiusercases.CoordinationServiceUtils;
|
||||
import org.sleuthkit.autopsy.coordinationservice.CoordinationService;
|
||||
import org.sleuthkit.autopsy.coreutils.Logger;
|
||||
import org.sleuthkit.autopsy.progress.ProgressIndicator;
|
||||
|
||||
/**
|
||||
* Task for deleting case coordination service nodes for which there is no
|
||||
* longer a corresponding case.
|
||||
*/
|
||||
final class DeleteOrphanCaseNodesTask implements Runnable {
|
||||
|
||||
private static final Logger logger = AutoIngestDashboardLogger.getLogger();
|
||||
private final ProgressIndicator progress;
|
||||
|
||||
/**
|
||||
* Constucts an instance of a task for deleting case coordination service
|
||||
* nodes for which there is no longer a corresponding case.
|
||||
*
|
||||
* @param progress
|
||||
*/
|
||||
DeleteOrphanCaseNodesTask(ProgressIndicator progress) {
|
||||
this.progress = progress;
|
||||
}
|
||||
|
||||
@Override
|
||||
@NbBundle.Messages({
|
||||
"DeleteOrphanCaseNodesTask.progress.startMessage=Starting orphaned case znode cleanup",
|
||||
"DeleteOrphanCaseNodesTask.progress.connectingToCoordSvc=Connecting to the coordination service",
|
||||
"DeleteOrphanCaseNodesTask.progress.gettingCaseNodesListing=Querying coordination service for case znodes"
|
||||
})
|
||||
public void run() {
|
||||
progress.start(Bundle.DeleteOrphanCaseNodesTask_progress_startMessage());
|
||||
try {
|
||||
progress.progress(Bundle.DeleteOrphanCaseNodesTask_progress_connectingToCoordSvc());
|
||||
logger.log(Level.INFO, Bundle.DeleteOrphanCaseNodesTask_progress_connectingToCoordSvc());
|
||||
CoordinationService coordinationService;
|
||||
try {
|
||||
coordinationService = CoordinationService.getInstance();
|
||||
} catch (CoordinationService.CoordinationServiceException ex) {
|
||||
logger.log(Level.SEVERE, "Error connecting to the coordination service", ex); //NON-NLS
|
||||
return;
|
||||
}
|
||||
|
||||
progress.progress(Bundle.DeleteOrphanCaseNodesTask_progress_gettingCaseNodesListing());
|
||||
logger.log(Level.INFO, Bundle.DeleteOrphanCaseNodesTask_progress_gettingCaseNodesListing());
|
||||
List<CaseNodeData> nodeDataList;
|
||||
try {
|
||||
nodeDataList = CaseNodeDataCollector.getNodeData();
|
||||
} catch (CoordinationService.CoordinationServiceException ex) {
|
||||
logger.log(Level.SEVERE, "Error collecting case node data", ex); //NON-NLS
|
||||
return;
|
||||
} catch (InterruptedException unused) {
|
||||
logger.log(Level.WARNING, "Task cancelled while collecting case node data"); //NON-NLS
|
||||
return;
|
||||
}
|
||||
|
||||
for (CaseNodeData nodeData : nodeDataList) {
|
||||
final Path caseDirectoryPath = nodeData.getDirectory();
|
||||
final File caseDirectory = caseDirectoryPath.toFile();
|
||||
if (!caseDirectory.exists()) {
|
||||
String caseName = nodeData.getDisplayName();
|
||||
String nodePath = ""; // NON-NLS
|
||||
try {
|
||||
nodePath = CoordinationServiceUtils.getCaseNameNodePath(caseDirectoryPath);
|
||||
deleteNode(coordinationService, caseName, nodePath);
|
||||
|
||||
nodePath = CoordinationServiceUtils.getCaseResourcesNodePath(caseDirectoryPath);
|
||||
deleteNode(coordinationService, caseName, nodePath);
|
||||
|
||||
nodePath = CoordinationServiceUtils.getCaseAutoIngestLogNodePath(caseDirectoryPath);
|
||||
deleteNode(coordinationService, caseName, nodePath);
|
||||
|
||||
nodePath = CoordinationServiceUtils.getCaseDirectoryNodePath(caseDirectoryPath);
|
||||
deleteNode(coordinationService, caseName, nodePath);
|
||||
|
||||
} catch (InterruptedException unused) {
|
||||
logger.log(Level.WARNING, String.format("Task cancelled while deleting orphaned znode %s for %s", nodePath, caseName)); //NON-NLS
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
/*
|
||||
* This is an unexpected runtime exceptions firewall. It is here
|
||||
* because this task is designed to be able to be run in scenarios
|
||||
* where there is no call to get() on a Future<Void> associated with
|
||||
* the task, so this ensures that any such errors get logged.
|
||||
*/
|
||||
logger.log(Level.SEVERE, "Unexpected error during orphan case znode cleanup", ex); //NON-NLS
|
||||
throw ex;
|
||||
|
||||
} finally {
|
||||
progress.finish();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to delete a case coordination service node.
|
||||
*
|
||||
* @param coordinationService The ccordination service.
|
||||
* @param caseName The case name.
|
||||
* @param nodePath The path of the node to delete.
|
||||
*
|
||||
* @throws InterruptedException If the thread executing this task is
|
||||
* interrupted during the delete operation.
|
||||
*/
|
||||
@NbBundle.Messages({
|
||||
"# {0} - node path", "DeleteOrphanCaseNodesTask.progress.deletingOrphanedCaseNode=Deleting orphaned case znode {0}"
|
||||
})
|
||||
private void deleteNode(CoordinationService coordinationService, String caseName, String nodePath) throws InterruptedException {
|
||||
try {
|
||||
progress.progress(Bundle.DeleteOrphanCaseNodesTask_progress_deletingOrphanedCaseNode(nodePath));
|
||||
logger.log(Level.INFO, String.format("Deleting orphaned case node %s for %s", nodePath, caseName)); //NON-NLS
|
||||
coordinationService.deleteNode(CoordinationService.CategoryNode.CASES, nodePath);
|
||||
} catch (CoordinationService.CoordinationServiceException ex) {
|
||||
if (!DeleteCaseUtils.isNoNodeException(ex)) {
|
||||
logger.log(Level.SEVERE, String.format("Error deleting orphaned case node %s for %s", nodePath, caseName), ex); //NON-NLS
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,56 @@
|
||||
/*
|
||||
* Autopsy Forensic Browser
|
||||
*
|
||||
* Copyright 2019-2019 Basis Technology Corp.
|
||||
* Contact: carrier <at> sleuthkit <dot> org
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.sleuthkit.autopsy.experimental.autoingest;
|
||||
|
||||
import org.openide.util.NbBundle;
|
||||
import org.sleuthkit.autopsy.progress.ProgressIndicator;
|
||||
|
||||
/**
|
||||
* An action class that kicks off a cancellable orphaned manifest file nodes
|
||||
* deletion task that runs in a background thread and reports progress using an
|
||||
* application frame progress bar.
|
||||
*/
|
||||
public class DeleteOrphanManifestNodesAction extends BackgroundTaskAction {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
/**
|
||||
* Constructs an instance of an action class that kicks off a cancellable
|
||||
* orphaned manifest file nodes deletion task that runs in a background
|
||||
* thread and reports progress using an application frame progress bar.
|
||||
*/
|
||||
@NbBundle.Messages({
|
||||
"DeleteOrphanManifestNodesAction.progressDisplayName=Cleanup Manifest File Znodes"
|
||||
})
|
||||
DeleteOrphanManifestNodesAction() {
|
||||
super(Bundle.DeleteOrphanManifestNodesAction_progressDisplayName(), Bundle.DeleteOrphanManifestNodesAction_progressDisplayName());
|
||||
}
|
||||
|
||||
@Override
|
||||
Runnable getTask(ProgressIndicator progress) {
|
||||
return new DeleteOrphanManifestNodesTask(progress);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DeleteOrphanManifestNodesAction clone() throws CloneNotSupportedException {
|
||||
super.clone();
|
||||
throw new CloneNotSupportedException();
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,116 @@
|
||||
/*
|
||||
* Autopsy Forensic Browser
|
||||
*
|
||||
* Copyright 2019-2019 Basis Technology Corp.
|
||||
* Contact: carrier <at> sleuthkit <dot> org
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.sleuthkit.autopsy.experimental.autoingest;
|
||||
|
||||
import java.io.File;
|
||||
import java.nio.file.Path;
|
||||
import java.util.List;
|
||||
import java.util.logging.Level;
|
||||
import org.openide.util.NbBundle;
|
||||
import org.sleuthkit.autopsy.coordinationservice.CoordinationService;
|
||||
import org.sleuthkit.autopsy.coreutils.Logger;
|
||||
import org.sleuthkit.autopsy.progress.ProgressIndicator;
|
||||
|
||||
/**
|
||||
* A task class for cleaning up auto ingest job coordination service nodes for
|
||||
* which there is no longer a corresponding manifest file.
|
||||
*/
|
||||
final class DeleteOrphanManifestNodesTask implements Runnable {
|
||||
|
||||
private static final Logger logger = Logger.getLogger(DeleteOrphanManifestNodesTask.class.getName());
|
||||
private final ProgressIndicator progress;
|
||||
|
||||
/**
|
||||
* Constucts an instance of a task for cleaning up case coordination service
|
||||
* nodes for which there is no longer a corresponding case.
|
||||
*
|
||||
* @param progress
|
||||
*/
|
||||
DeleteOrphanManifestNodesTask(ProgressIndicator progress) {
|
||||
this.progress = progress;
|
||||
}
|
||||
|
||||
@Override
|
||||
@NbBundle.Messages({
|
||||
"DeleteOrphanManifestNodesTask.progress.startMessage=Starting orphaned manifest file znode cleanup",
|
||||
"DeleteOrphanManifestNodesTask.progress.connectingToCoordSvc=Connecting to the coordination service",
|
||||
"DeleteOrphanManifestNodesTask.progress.gettingManifestNodes=Querying the coordination service for manifest file znodes",
|
||||
"# {0} - node path", "DeleteOrphanManifestNodesTask.progress.deletingOrphanedManifestNode=Deleting orphaned manifest file znode {0}"
|
||||
})
|
||||
public void run() {
|
||||
progress.start(Bundle.DeleteOrphanManifestNodesTask_progress_startMessage());
|
||||
try {
|
||||
progress.progress(Bundle.DeleteOrphanManifestNodesTask_progress_connectingToCoordSvc());
|
||||
logger.log(Level.INFO, Bundle.DeleteOrphanManifestNodesTask_progress_connectingToCoordSvc());
|
||||
CoordinationService coordinationService;
|
||||
try {
|
||||
coordinationService = CoordinationService.getInstance();
|
||||
} catch (CoordinationService.CoordinationServiceException ex) {
|
||||
logger.log(Level.SEVERE, "Error connecting to the coordination service", ex); // NON-NLS
|
||||
return;
|
||||
}
|
||||
|
||||
progress.progress(Bundle.DeleteOrphanManifestNodesTask_progress_gettingManifestNodes());
|
||||
logger.log(Level.INFO, Bundle.DeleteOrphanManifestNodesTask_progress_gettingManifestNodes());
|
||||
List<AutoIngestJobNodeData> nodeDataList;
|
||||
try {
|
||||
nodeDataList = AutoIngestJobNodeDataCollector.getNodeData();
|
||||
} catch (CoordinationService.CoordinationServiceException ex) {
|
||||
logger.log(Level.SEVERE, "Error collecting auto ingest job node data", ex); // NON-NLS
|
||||
return;
|
||||
} catch (InterruptedException unused) {
|
||||
logger.log(Level.WARNING, "Task cancelled while collecting auto ingest job node data"); // NON-NLS
|
||||
return;
|
||||
}
|
||||
|
||||
for (AutoIngestJobNodeData nodeData : nodeDataList) {
|
||||
final String caseName = nodeData.getCaseName();
|
||||
final Path manifestFilePath = nodeData.getManifestFilePath();
|
||||
final File manifestFile = manifestFilePath.toFile();
|
||||
if (!manifestFile.exists()) {
|
||||
try {
|
||||
progress.progress(Bundle.DeleteOrphanManifestNodesTask_progress_deletingOrphanedManifestNode(manifestFilePath));
|
||||
logger.log(Level.INFO, String.format("Deleting orphaned manifest file znode %s for %s", manifestFilePath, caseName));
|
||||
coordinationService.deleteNode(CoordinationService.CategoryNode.MANIFESTS, manifestFilePath.toString());
|
||||
} catch (CoordinationService.CoordinationServiceException ex) {
|
||||
if (!DeleteCaseUtils.isNoNodeException(ex)) {
|
||||
logger.log(Level.SEVERE, String.format("Error deleting %s znode for %s", manifestFilePath, caseName), ex); // NON-NLS
|
||||
}
|
||||
} catch (InterruptedException unused) {
|
||||
logger.log(Level.WARNING, String.format("Task cancelled while deleting %s znode for %s", manifestFilePath, caseName)); // NON-NLS
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
/*
|
||||
* This is an unexpected runtime exceptions firewall. It is here
|
||||
* because this task is designed to be able to be run in scenarios
|
||||
* where there is no call to get() on a Future<Void> associated with
|
||||
* the task, so this ensures that any such errors get logged.
|
||||
*/
|
||||
logger.log(Level.SEVERE, "Unexpected error deleting orphan manifest file znodes", ex); // NON-NLS
|
||||
throw ex;
|
||||
|
||||
} finally {
|
||||
progress.finish();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -108,21 +108,39 @@ class Chrome extends Extract {
|
||||
|
||||
progressBar.progress(Bundle.Progress_Message_Chrome_History());
|
||||
this.getHistory();
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
progressBar.progress(Bundle.Progress_Message_Chrome_Bookmarks());
|
||||
this.getBookmark();
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
progressBar.progress(Bundle.Progress_Message_Chrome_Cookies());
|
||||
this.getCookie();
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
progressBar.progress(Bundle.Progress_Message_Chrome_Logins());
|
||||
this.getLogins();
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
progressBar.progress(Bundle.Progress_Message_Chrome_AutoFill());
|
||||
this.getAutofill();
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
progressBar.progress(Bundle.Progress_Message_Chrome_Downloads());
|
||||
this.getDownload();
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
progressBar.progress(Bundle.Progress_Message_Chrome_Cache());
|
||||
ChromeCacheExtractor chromeCacheExtractor = new ChromeCacheExtractor(dataSource, context, progressBar);
|
||||
|
@ -261,7 +261,12 @@ final class ChromeCacheExtractor {
|
||||
indexFiles = findCacheIndexFiles();
|
||||
|
||||
// Process each of the caches
|
||||
for (AbstractFile indexFile: indexFiles) {
|
||||
for (AbstractFile indexFile: indexFiles) {
|
||||
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
processCacheIndexFile(indexFile);
|
||||
}
|
||||
|
||||
@ -325,6 +330,12 @@ final class ChromeCacheExtractor {
|
||||
|
||||
// Process each address in the table
|
||||
for (int i = 0; i < indexHdr.getTableLen(); i++) {
|
||||
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
cleanup();
|
||||
return;
|
||||
}
|
||||
|
||||
CacheAddress addr = new CacheAddress(indexFileROBuffer.getInt() & UINT32_MASK, cachePath);
|
||||
if (addr.isInitialized()) {
|
||||
progressBar.progress( NbBundle.getMessage(this.getClass(),
|
||||
@ -339,6 +350,11 @@ final class ChromeCacheExtractor {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
cleanup();
|
||||
return;
|
||||
}
|
||||
|
||||
derivedFiles.forEach((derived) -> {
|
||||
services.fireModuleContentEvent(new ModuleContentEvent(derived));
|
||||
|
@ -311,6 +311,10 @@ final class ExtractEdge extends Extract {
|
||||
}
|
||||
|
||||
for (File file : historyFiles) {
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
Scanner fileScanner;
|
||||
try {
|
||||
fileScanner = new Scanner(new FileInputStream(file.toString()));
|
||||
@ -324,6 +328,10 @@ final class ExtractEdge extends Extract {
|
||||
try {
|
||||
List<String> headers = null;
|
||||
while (fileScanner.hasNext()) {
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
String line = fileScanner.nextLine();
|
||||
if (headers == null) {
|
||||
headers = Arrays.asList(line.toLowerCase().split(","));
|
||||
@ -413,6 +421,10 @@ final class ExtractEdge extends Extract {
|
||||
}
|
||||
|
||||
for (File file : containerFiles) {
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
Scanner fileScanner;
|
||||
try {
|
||||
fileScanner = new Scanner(new FileInputStream(file.toString()));
|
||||
@ -426,6 +438,10 @@ final class ExtractEdge extends Extract {
|
||||
try {
|
||||
List<String> headers = null;
|
||||
while (fileScanner.hasNext()) {
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
String line = fileScanner.nextLine();
|
||||
if (headers == null) {
|
||||
headers = Arrays.asList(line.toLowerCase().split(","));
|
||||
@ -468,6 +484,10 @@ final class ExtractEdge extends Extract {
|
||||
}
|
||||
|
||||
for (File file : downloadFiles) {
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
Scanner fileScanner;
|
||||
try {
|
||||
fileScanner = new Scanner(new FileInputStream(file.toString()));
|
||||
@ -480,6 +500,10 @@ final class ExtractEdge extends Extract {
|
||||
try {
|
||||
List<String> headers = null;
|
||||
while (fileScanner.hasNext()) {
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
String line = fileScanner.nextLine();
|
||||
if (headers == null) {
|
||||
headers = Arrays.asList(line.toLowerCase().split(","));
|
||||
|
@ -364,6 +364,9 @@ class ExtractIE extends Extract {
|
||||
bbartifacts.addAll(parsePascoOutput(indexFile, filename).stream()
|
||||
.filter(bbart -> bbart.getArtifactTypeID() == ARTIFACT_TYPE.TSK_WEB_HISTORY.getTypeID())
|
||||
.collect(Collectors.toList()));
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
return;
|
||||
}
|
||||
foundHistory = true;
|
||||
|
||||
//Delete index<n>.dat file since it was succcessfully by Pasco
|
||||
@ -465,6 +468,11 @@ class ExtractIE extends Extract {
|
||||
return bbartifacts;
|
||||
}
|
||||
while (fileScanner.hasNext()) {
|
||||
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
return bbartifacts;
|
||||
}
|
||||
|
||||
String line = fileScanner.nextLine();
|
||||
if (!line.startsWith("URL")) { //NON-NLS
|
||||
continue;
|
||||
@ -565,7 +573,7 @@ class ExtractIE extends Extract {
|
||||
this.indexArtifact(bbart);
|
||||
bbartifacts.add(bbart);
|
||||
} catch (TskCoreException ex) {
|
||||
logger.log(Level.SEVERE, "Error writing Internet Explorer web history artifact to the blackboard.", ex); //NON-NLS
|
||||
logger.log(Level.SEVERE, "Error writing Internet Explorer web history artifact to the blackboard. Pasco results will be incomplete", ex); //NON-NLS
|
||||
}
|
||||
}
|
||||
fileScanner.close();
|
||||
|
@ -391,6 +391,11 @@ class ExtractRegistry extends Extract {
|
||||
// that we will submit in a ModuleDataEvent for additional processing.
|
||||
Collection<BlackboardArtifact> wifiBBartifacts = new ArrayList<>();
|
||||
for (int i = 0; i < len; i++) {
|
||||
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Element tempnode = (Element) children.item(i);
|
||||
|
||||
String dataType = tempnode.getNodeName();
|
||||
@ -809,11 +814,11 @@ class ExtractRegistry extends Extract {
|
||||
} catch (FileNotFoundException ex) {
|
||||
logger.log(Level.SEVERE, "Error finding the registry file.", ex); //NON-NLS
|
||||
} catch (SAXException ex) {
|
||||
logger.log(Level.SEVERE, "Error parsing the registry XML: {0}", ex); //NON-NLS
|
||||
logger.log(Level.SEVERE, "Error parsing the registry XML.", ex); //NON-NLS
|
||||
} catch (IOException ex) {
|
||||
logger.log(Level.SEVERE, "Error building the document parser: {0}", ex); //NON-NLS
|
||||
logger.log(Level.SEVERE, "Error building the document parser.", ex); //NON-NLS
|
||||
} catch (ParserConfigurationException ex) {
|
||||
logger.log(Level.SEVERE, "Error configuring the registry parser: {0}", ex); //NON-NLS
|
||||
logger.log(Level.SEVERE, "Error configuring the registry parser.", ex); //NON-NLS
|
||||
} finally {
|
||||
try {
|
||||
if (fstream != null) {
|
||||
|
@ -28,6 +28,7 @@ import com.dd.plist.PropertyListParser;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.text.ParseException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
@ -49,7 +50,6 @@ import org.sleuthkit.autopsy.ingest.ModuleDataEvent;
|
||||
import org.sleuthkit.autopsy.recentactivity.BinaryCookieReader.Cookie;
|
||||
import org.sleuthkit.datamodel.AbstractFile;
|
||||
import org.sleuthkit.datamodel.BlackboardArtifact;
|
||||
import org.sleuthkit.datamodel.BlackboardAttribute;
|
||||
import org.sleuthkit.datamodel.Content;
|
||||
import org.sleuthkit.datamodel.TskCoreException;
|
||||
import org.xml.sax.SAXException;
|
||||
@ -121,7 +121,7 @@ final class ExtractSafari extends Extract {
|
||||
|
||||
} catch (IOException | TskCoreException ex) {
|
||||
this.addErrorMessage(Bundle.ExtractSafari_Error_Getting_History());
|
||||
LOG.log(Level.SEVERE, "Exception thrown while processing history file: {0}", ex); //NON-NLS
|
||||
LOG.log(Level.SEVERE, "Exception thrown while processing history file.", ex); //NON-NLS
|
||||
}
|
||||
|
||||
progressBar.progress(Bundle.Progress_Message_Safari_Bookmarks());
|
||||
@ -129,7 +129,7 @@ final class ExtractSafari extends Extract {
|
||||
processBookmarkPList(dataSource, context);
|
||||
} catch (IOException | TskCoreException | SAXException | PropertyListFormatException | ParseException | ParserConfigurationException ex) {
|
||||
this.addErrorMessage(Bundle.ExtractSafari_Error_Parsing_Bookmark());
|
||||
LOG.log(Level.SEVERE, "Exception thrown while parsing Safari Bookmarks file: {0}", ex); //NON-NLS
|
||||
LOG.log(Level.SEVERE, "Exception thrown while parsing Safari Bookmarks file.", ex); //NON-NLS
|
||||
}
|
||||
|
||||
progressBar.progress(Bundle.Progress_Message_Safari_Downloads());
|
||||
@ -137,15 +137,15 @@ final class ExtractSafari extends Extract {
|
||||
processDownloadsPList(dataSource, context);
|
||||
} catch (IOException | TskCoreException | SAXException | PropertyListFormatException | ParseException | ParserConfigurationException ex) {
|
||||
this.addErrorMessage(Bundle.ExtractSafari_Error_Parsing_Bookmark());
|
||||
LOG.log(Level.SEVERE, "Exception thrown while parsing Safari Download.plist file: {0}", ex); //NON-NLS
|
||||
LOG.log(Level.SEVERE, "Exception thrown while parsing Safari Download.plist file.", ex); //NON-NLS
|
||||
}
|
||||
|
||||
progressBar.progress(Bundle.Progress_Message_Safari_Cookies());
|
||||
try {
|
||||
processBinaryCookieFile(dataSource, context);
|
||||
} catch (IOException | TskCoreException ex) {
|
||||
} catch (TskCoreException ex) {
|
||||
this.addErrorMessage(Bundle.ExtractSafari_Error_Parsing_Cookies());
|
||||
LOG.log(Level.SEVERE, "Exception thrown while processing Safari cookies file: {0}", ex); //NON-NLS
|
||||
LOG.log(Level.SEVERE, "Exception thrown while processing Safari cookies file.", ex); //NON-NLS
|
||||
}
|
||||
}
|
||||
|
||||
@ -246,7 +246,7 @@ final class ExtractSafari extends Extract {
|
||||
* @throws TskCoreException
|
||||
* @throws IOException
|
||||
*/
|
||||
private void processBinaryCookieFile(Content dataSource, IngestJobContext context) throws TskCoreException, IOException {
|
||||
private void processBinaryCookieFile(Content dataSource, IngestJobContext context) throws TskCoreException {
|
||||
FileManager fileManager = getCurrentCase().getServices().getFileManager();
|
||||
|
||||
List<AbstractFile> files = fileManager.findFiles(dataSource, COOKIE_FILE_NAME, COOKIE_FOLDER);
|
||||
@ -261,7 +261,11 @@ final class ExtractSafari extends Extract {
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
break;
|
||||
}
|
||||
getCookies(context, file);
|
||||
try {
|
||||
getCookies(context, file);
|
||||
} catch (IOException ex) {
|
||||
LOG.log(Level.WARNING, String.format("Failed to get cookies from file %s", Paths.get(file.getUniquePath(), file.getName()).toString()), ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -287,7 +291,7 @@ final class ExtractSafari extends Extract {
|
||||
}
|
||||
|
||||
try {
|
||||
Collection<BlackboardArtifact> bbartifacts = getHistoryArtifacts(historyFile, tempHistoryFile.toPath());
|
||||
Collection<BlackboardArtifact> bbartifacts = getHistoryArtifacts(historyFile, tempHistoryFile.toPath(), context);
|
||||
if (!bbartifacts.isEmpty()) {
|
||||
services.fireModuleDataEvent(new ModuleDataEvent(
|
||||
RecentActivityExtracterModuleFactory.getModuleName(),
|
||||
@ -319,7 +323,7 @@ final class ExtractSafari extends Extract {
|
||||
File tempFile = createTemporaryFile(context, file);
|
||||
|
||||
try {
|
||||
Collection<BlackboardArtifact> bbartifacts = getBookmarkArtifacts(file, tempFile);
|
||||
Collection<BlackboardArtifact> bbartifacts = getBookmarkArtifacts(file, tempFile, context);
|
||||
if (!bbartifacts.isEmpty()) {
|
||||
services.fireModuleDataEvent(new ModuleDataEvent(
|
||||
RecentActivityExtracterModuleFactory.getModuleName(),
|
||||
@ -385,7 +389,7 @@ final class ExtractSafari extends Extract {
|
||||
try {
|
||||
tempFile = createTemporaryFile(context, file);
|
||||
|
||||
Collection<BlackboardArtifact> bbartifacts = getCookieArtifacts(file, tempFile);
|
||||
Collection<BlackboardArtifact> bbartifacts = getCookieArtifacts(file, tempFile, context);
|
||||
|
||||
if (!bbartifacts.isEmpty()) {
|
||||
services.fireModuleDataEvent(new ModuleDataEvent(
|
||||
@ -409,7 +413,7 @@ final class ExtractSafari extends Extract {
|
||||
* history artifacts
|
||||
* @throws TskCoreException
|
||||
*/
|
||||
private Collection<BlackboardArtifact> getHistoryArtifacts(AbstractFile origFile, Path tempFilePath) throws TskCoreException {
|
||||
private Collection<BlackboardArtifact> getHistoryArtifacts(AbstractFile origFile, Path tempFilePath, IngestJobContext context) throws TskCoreException {
|
||||
List<HashMap<String, Object>> historyList = this.dbConnect(tempFilePath.toString(), HISTORY_QUERY);
|
||||
|
||||
if (historyList == null || historyList.isEmpty()) {
|
||||
@ -418,6 +422,10 @@ final class ExtractSafari extends Extract {
|
||||
|
||||
Collection<BlackboardArtifact> bbartifacts = new ArrayList<>();
|
||||
for (HashMap<String, Object> row : historyList) {
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
return bbartifacts;
|
||||
}
|
||||
|
||||
String url = row.get(HEAD_URL).toString();
|
||||
String title = row.get(HEAD_TITLE).toString();
|
||||
Long time = (Double.valueOf(row.get(HEAD_TIME).toString())).longValue();
|
||||
@ -444,13 +452,13 @@ final class ExtractSafari extends Extract {
|
||||
* @throws SAXException
|
||||
* @throws TskCoreException
|
||||
*/
|
||||
private Collection<BlackboardArtifact> getBookmarkArtifacts(AbstractFile origFile, File tempFile) throws IOException, PropertyListFormatException, ParseException, ParserConfigurationException, SAXException, TskCoreException {
|
||||
private Collection<BlackboardArtifact> getBookmarkArtifacts(AbstractFile origFile, File tempFile, IngestJobContext context) throws IOException, PropertyListFormatException, ParseException, ParserConfigurationException, SAXException, TskCoreException {
|
||||
Collection<BlackboardArtifact> bbartifacts = new ArrayList<>();
|
||||
|
||||
try {
|
||||
NSDictionary root = (NSDictionary) PropertyListParser.parse(tempFile);
|
||||
|
||||
parseBookmarkDictionary(bbartifacts, origFile, root);
|
||||
parseBookmarkDictionary(bbartifacts, origFile, root, context);
|
||||
} catch (PropertyListFormatException ex) {
|
||||
PropertyListFormatException plfe = new PropertyListFormatException(origFile.getName() + ": " + ex.getMessage());
|
||||
plfe.setStackTrace(ex.getStackTrace());
|
||||
@ -542,7 +550,7 @@ final class ExtractSafari extends Extract {
|
||||
* @throws TskCoreException
|
||||
* @throws IOException
|
||||
*/
|
||||
private Collection<BlackboardArtifact> getCookieArtifacts(AbstractFile origFile, File tempFile) throws TskCoreException, IOException {
|
||||
private Collection<BlackboardArtifact> getCookieArtifacts(AbstractFile origFile, File tempFile, IngestJobContext context) throws TskCoreException, IOException {
|
||||
Collection<BlackboardArtifact> bbartifacts = null;
|
||||
BinaryCookieReader reader = BinaryCookieReader.initalizeReader(tempFile);
|
||||
|
||||
@ -551,6 +559,10 @@ final class ExtractSafari extends Extract {
|
||||
|
||||
Iterator<Cookie> iter = reader.iterator();
|
||||
while (iter.hasNext()) {
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
return bbartifacts;
|
||||
}
|
||||
|
||||
Cookie cookie = iter.next();
|
||||
|
||||
BlackboardArtifact bbart = origFile.newArtifact(BlackboardArtifact.ARTIFACT_TYPE.TSK_WEB_COOKIE);
|
||||
@ -571,13 +583,18 @@ final class ExtractSafari extends Extract {
|
||||
* @param root NSDictionary object to parse
|
||||
* @throws TskCoreException
|
||||
*/
|
||||
private void parseBookmarkDictionary(Collection<BlackboardArtifact> bbartifacts, AbstractFile origFile, NSDictionary root) throws TskCoreException {
|
||||
private void parseBookmarkDictionary(Collection<BlackboardArtifact> bbartifacts, AbstractFile origFile, NSDictionary root, IngestJobContext context) throws TskCoreException {
|
||||
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (root.containsKey(PLIST_KEY_CHILDREN)) {
|
||||
NSArray children = (NSArray) root.objectForKey(PLIST_KEY_CHILDREN);
|
||||
|
||||
if (children != null) {
|
||||
for (NSObject obj : children.getArray()) {
|
||||
parseBookmarkDictionary(bbartifacts, origFile, (NSDictionary) obj);
|
||||
parseBookmarkDictionary(bbartifacts, origFile, (NSDictionary) obj, context);
|
||||
}
|
||||
}
|
||||
} else if (root.containsKey(PLIST_KEY_URL)) {
|
||||
|
@ -100,6 +100,11 @@ final class ExtractZoneIdentifier extends Extract {
|
||||
Collection<BlackboardArtifact> downloadArtifacts = new ArrayList<>();
|
||||
|
||||
for (AbstractFile zoneFile : zoneFiles) {
|
||||
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
processZoneFile(context, dataSource, zoneFile, sourceArtifacts, downloadArtifacts, knownPathIDs);
|
||||
} catch (TskCoreException ex) {
|
||||
|
@ -153,6 +153,11 @@ class Firefox extends Extract {
|
||||
Collection<BlackboardArtifact> bbartifacts = new ArrayList<>();
|
||||
int j = 0;
|
||||
for (AbstractFile historyFile : historyFiles) {
|
||||
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (historyFile.getSize() == 0) {
|
||||
continue;
|
||||
}
|
||||
@ -184,6 +189,11 @@ class Firefox extends Extract {
|
||||
List<HashMap<String, Object>> tempList = this.dbConnect(temps, HISTORY_QUERY);
|
||||
logger.log(Level.INFO, "{0} - Now getting history from {1} with {2} artifacts identified.", new Object[]{moduleName, temps, tempList.size()}); //NON-NLS
|
||||
for (HashMap<String, Object> result : tempList) {
|
||||
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
String url = result.get("url").toString();
|
||||
|
||||
Collection<BlackboardAttribute> bbattributes = new ArrayList<>();
|
||||
@ -277,6 +287,11 @@ class Firefox extends Extract {
|
||||
List<HashMap<String, Object>> tempList = this.dbConnect(temps, BOOKMARK_QUERY);
|
||||
logger.log(Level.INFO, "{0} - Now getting bookmarks from {1} with {2} artifacts identified.", new Object[]{moduleName, temps, tempList.size()}); //NON-NLS
|
||||
for (HashMap<String, Object> result : tempList) {
|
||||
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
break;
|
||||
}
|
||||
|
||||
String url = result.get("url").toString();
|
||||
|
||||
Collection<BlackboardAttribute> bbattributes = new ArrayList<>();
|
||||
@ -376,6 +391,11 @@ class Firefox extends Extract {
|
||||
List<HashMap<String, Object>> tempList = this.dbConnect(temps, query);
|
||||
logger.log(Level.INFO, "{0} - Now getting cookies from {1} with {2} artifacts identified.", new Object[]{moduleName, temps, tempList.size()}); //NON-NLS
|
||||
for (HashMap<String, Object> result : tempList) {
|
||||
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
break;
|
||||
}
|
||||
|
||||
String host = result.get("host").toString();
|
||||
|
||||
Collection<BlackboardAttribute> bbattributes = new ArrayList<>();
|
||||
@ -487,6 +507,11 @@ class Firefox extends Extract {
|
||||
List<HashMap<String, Object>> tempList = this.dbConnect(temps, DOWNLOAD_QUERY);
|
||||
logger.log(Level.INFO, "{0}- Now getting downloads from {1} with {2} artifacts identified.", new Object[]{moduleName, temps, tempList.size()}); //NON-NLS
|
||||
for (HashMap<String, Object> result : tempList) {
|
||||
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
break;
|
||||
}
|
||||
|
||||
String source = result.get("source").toString();
|
||||
|
||||
Collection<BlackboardAttribute> bbattributes = new ArrayList<>();
|
||||
@ -621,6 +646,11 @@ class Firefox extends Extract {
|
||||
|
||||
logger.log(Level.INFO, "{0} - Now getting downloads from {1} with {2} artifacts identified.", new Object[]{moduleName, temps, tempList.size()}); //NON-NLS
|
||||
for (HashMap<String, Object> result : tempList) {
|
||||
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
break;
|
||||
}
|
||||
|
||||
String url = result.get("url").toString();
|
||||
|
||||
Collection<BlackboardAttribute> bbattributes = new ArrayList<>();
|
||||
@ -764,6 +794,11 @@ class Firefox extends Extract {
|
||||
List<HashMap<String, Object>> tempList = this.dbConnect(tempFilePath, formHistoryQuery);
|
||||
logger.log(Level.INFO, "{0} - Now getting history from {1} with {2} artifacts identified.", new Object[]{moduleName, tempFilePath, tempList.size()}); //NON-NLS
|
||||
for (HashMap<String, Object> result : tempList) {
|
||||
|
||||
if (context.dataSourceIngestIsCancelled()) {
|
||||
break;
|
||||
}
|
||||
|
||||
Collection<BlackboardAttribute> bbattributes = new ArrayList<>();
|
||||
|
||||
String fieldName = ((result.get("fieldname").toString() != null) ? result.get("fieldname").toString() : "");
|
||||
|
92
docs/doxygen-user/auto_ingest.dox
Normal file
@ -0,0 +1,92 @@
|
||||
/*! \page auto_ingest_page Automated Ingest
|
||||
|
||||
\section auto_ingest_overview Overview
|
||||
|
||||
Auto ingest allows one or many computers to process \ref ds_page "data sources" automatically with minimal support from a user. The resulting \ref multiuser_page "multi-user cases" can be opened and reviewed by analysts, using any of the normal functions in Autopsy.
|
||||
|
||||
There are three types of computers in an Automated Processing Deployment:
|
||||
<ul>
|
||||
<li><b>Automated Ingest Node:</b>
|
||||
These computers are responsible for monitoring the Shared Images Folder and detecting when new images have been copied in. Each writes its results to the Shared Cases Folder.
|
||||
<li><b>Examiner Node:</b> These computers can open a case during processing or after it has been analyzed by the Automated Ingest Node. They allow the examiner to review the results, tag files, and perform additional analysis as needed.
|
||||
<li><b>Services/Storage Node:</b> These computers run the services needed for \ref multiuser_page "multi-user cases", hold the images to be processed and store the analyzed Autopsy cases.
|
||||
</ul>
|
||||
|
||||
The general workflow is as follows:
|
||||
<ol>
|
||||
<li>Disk images or other types of data sources are added to the <b>shared images folder</b>. This folder will contain all the disk and phone images that are copied into the system. They must be copied into here before they can be analyzed. As more than one machine may need to access this folder across the network, use UNC paths (if possible) to refer to this folder.
|
||||
<li>A \ref auto_ingest_manifest_creation "manifest file" is added for each data source that is to be processed.
|
||||
<li>An auto ingest node finds that manifest file and begins processing the data source. It will make a case in the <b>shared cases folder</b> if there is not one there already. This folder will contain all of the analysis results after automated analysis has been performed on the images. This folder will not contain the images, those will stay in the Shared Images Folder. As more than one machine may need to access this folder across the network, use UNC paths (if possible) to refer to this folder.
|
||||
<li>An analyst on an examiner node opens the case and starts their analysis. This can happen while an auto ingest node is processing data or afterwards.
|
||||
</ol>
|
||||
|
||||
An Automated Processing Deployment could have an architecture, such as this:
|
||||
|
||||
\image html AutoIngest\overview_pic1.png
|
||||
|
||||
Another illustration, including the network infrastructure, is shown below:
|
||||
|
||||
\image html AutoIngest\overview_pic2.png
|
||||
|
||||
\section auto_ingest_setup_section Configuration
|
||||
|
||||
Configuring a group of computers for auto ingest is described on the \ref auto_ingest_setup_page page.
|
||||
|
||||
\section auto_ingest_ex_usage Examiner Node Usage
|
||||
|
||||
An examiner node in an auto ingest environment is generally the same as any normal Autopsy client set up for \ref multiuser_page "multi-user cases." Any number of examiner nodes can open cases that have been created by the auto ingest nodes. The cases do not need to be complete.
|
||||
|
||||
The examiner can open the auto ingest dashboard through the Tools menu. This allows the user to see what cases and data sources are scheduled, in progress, or done.
|
||||
|
||||
\image html AutoIngest\examiner_dashboard.png
|
||||
|
||||
\section auto_ingest_ain_usage Auto Ingest Node Usage
|
||||
|
||||
\subsection auto_ingest_manifest_creation Preparing Data for Auto Ingest
|
||||
|
||||
Users will manually copy images to the source images folder (using subfolders if desired) and schedule them to be ingested by creating one file in the folder alongside the image to be ingested. This file is a manifest file describing the image. This file's name must end in "_Manifest.xml."
|
||||
|
||||
\image html AutoIngest\manifest_file_in_file_explorer.png
|
||||
|
||||
The following is an example of an Autopsy manifest file. Line breaks/spaces are not required, but are shown here for better human readability.
|
||||
\verbatim<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<AutopsyManifest>
|
||||
<CaseName>XperiaCase</CaseName>
|
||||
<DeviceId>50549</DeviceId>
|
||||
<DataSource>mtd3_userdata.bin</DataSource>
|
||||
</AutopsyManifest>\endverbatim
|
||||
|
||||
The following is a description of each required field:
|
||||
<ul><li><b>CaseName</b>: Case name. Multiple data sources can belong to the same case.
|
||||
<li><b>DeviceId</b>: (Optional) A globally unique ID representing device this data source came from. This can be an integer or a UUID.
|
||||
<li><b>DataSource</b>: File name of the data source. Does not include the path.
|
||||
</ul>
|
||||
Any amount of additional data may be included in the XML file as long as the fields above are present.
|
||||
|
||||
Manifest files can be automatically generated by using the \ref manifest_tool_page.
|
||||
|
||||
\subsection auto_ingest_running Running an Auto Ingest Node
|
||||
|
||||
When auto ingest mode is enabled, Autopsy will open with a different UI than normal, allowing the user to see what cases are being processed, which are done, and which are next in the queue. You can also change the priority of cases and reprocess cases that may have had an error.
|
||||
|
||||
\image html AutoIngest\auto_ingest_in_progress.png
|
||||
|
||||
The user must press the "Start" button to being the auto ingest process. Note that if the computer running Autopsy in auto ingest mode is restarted, someone must log into it to restart Autopsy. It does not start by itself. When "Start" is pressed, the node will scan through the Shared Images folder looking for manifest files. This scan happens periodically when ingest is running. It can also be started manually using the "Refresh" button.
|
||||
|
||||
The UI for the auto ingest node will display what images are scheduled for analysis, what is currently running, and what has been completed. If a newly added image should be the highest priority, then you can select it and choose "Prioritize Case". This will prioritize all images within the same case to be top priority. You may also prioritize only a single data source (job) using the "Prioritize Job" button in the same manner. If you have prioritized something by mistake, the "Deprioritize" buttons will undo it.
|
||||
|
||||
In the middle area, you can see the currently running jobs. You have the option of cancelling an entire image that is being analyzed or to cancel only the current module that is running. The latter is used when one of the modules has been running for too long and you think that the module is having trouble with the image and will never complete. If the auto ingest node loses connection to either the database or Solr services it will automatically cancel the currently running job and will pause processing. Once the connectivity issue has been resolved you must manually resume processing.
|
||||
|
||||
If an error occurs while processing a job, or if a job was set up incorrectly, the "Reprocess Job" button can be used to move a completed job back into the Pending Jobs table, where it can be prioritized if desired. No case data is deleted which may result in some duplication in the results.
|
||||
|
||||
"Delete Case" will remove a case from the list and remove all of its data. This will not remove the original image, manifest file, or anything else from the input directory. A case can not be deleted if it is currently open in any Examiner Node or if an auto ingest node is currently working on a job related to the case. Care should be used with the delete case button. Note that once a case is deleted the path to its data sources must be changed before they can be reprocessed (i.e., rename the base folder).
|
||||
|
||||
The "Auto Ingest Metrics" button displays processing data for all of the auto ingest nodes in the system from a user-entered starting date.
|
||||
|
||||
\image html AutoIngest\metrics.png
|
||||
|
||||
\section auto_ingest_administration_section Auto Ingest Node Administration
|
||||
|
||||
See the \ref auto_ingest_admin_page for information on how to enable administrator features.
|
||||
|
||||
*/
|
91
docs/doxygen-user/auto_ingest_administration.dox
Normal file
@ -0,0 +1,91 @@
|
||||
/*! \page auto_ingest_admin_page Auto Ingest Administration
|
||||
|
||||
\section auto_ingest_admin_overview Overview
|
||||
|
||||
Examiner nodes in an \ref auto_ingest_page environment can be given a type of administrator access. This allows an admin to:
|
||||
|
||||
<ul><li>Access admin-only options on the Auto Ingest Jobs Panel, including:
|
||||
<ul>
|
||||
<li>Prioritizing jobs and cases
|
||||
<li>Cancelling jobs
|
||||
<li>Deleting and reprocessing jobs
|
||||
</ul>
|
||||
<li>Access the Auto Ingest Nodes Panel, which allows the user to:
|
||||
<ul><li>View the currently active auto ingest nodes
|
||||
<li>Pause/resume/shutdown the active auto ingest nodes
|
||||
<li>View/enabled the health monitor
|
||||
<li>View auto ingest metrics
|
||||
</ul></ul>
|
||||
|
||||
\section auto_ingest_admin_setup Setup
|
||||
|
||||
The admin panel is enabled by creating the file "admin" in the user config directory. Note that the name must be exactly that with no extension. It also works to make a folder named "admin" instead of a file which can be easier on machines where the file extension is hidden. No restart is needed; simply reopen the Auto Ingest Dashboard after creating the file.
|
||||
|
||||
For an installed copy of Autopsy, the file will go under \c "C:\Users\<user name>\AppData\Roaming\Autopsy\config".
|
||||
|
||||
\image html AutoIngest\admin_file.png
|
||||
|
||||
\section auto_ingest_admin_jobs_panel Auto Ingest Jobs Panel
|
||||
|
||||
With the admin file in place, the user can right-click on jobs in each of the tables of the jobs panel to perform different actions. In the Pending Jobs table, the context menu allows cases and individual jobs to be prioritized.
|
||||
|
||||
\image html AutoIngest\admin_jobs_panel.png
|
||||
|
||||
In the Running Jobs tables, the ingest progress can be viewed and the current job can be cancelled. Note that cancellation can take some time.
|
||||
|
||||
\image html AutoIngest\admin_jobs_cancel.png
|
||||
|
||||
In the Completed Jobs table, the user can reprocess a job (generally useful when a job had errors), delete a case (if no other machines are using it) and view the case log.
|
||||
|
||||
\image html AutoIngest\admin_jobs_completed.png
|
||||
|
||||
\section auto_ingest_admin_nodes_panel Auto Ingest Nodes Panel
|
||||
|
||||
The Nodes panel displays the status of every online auto ingest node. Additionally, an admin can pause or resume a node, or shut down a node entirely (i.e., exit the Autopsy app).
|
||||
|
||||
\image html AutoIngest\admin_nodes_panel.png
|
||||
|
||||
\section auto_ingest_admin_cases_panel Cases Panel
|
||||
|
||||
The Cases panel shows information about each auto ingest case - the name, creation and last accessed times, the case directory, and flags for which parts of the case have been deleted.
|
||||
|
||||
\image html AutoIngest\cases_panel.png
|
||||
|
||||
If you right-click on a case, you can open it, see the log, delete the case, or view properties of the case.
|
||||
|
||||
\image html AutoIngest\cases_context_menu.png
|
||||
|
||||
Note that you can select multiple cases at once to delete. If you choose to delete a case (or cases), you'll see the following confirmation dialog:
|
||||
|
||||
\image html case_delete_confirm.png
|
||||
|
||||
\section auto_ingest_admin_health_monitor Health Monitor
|
||||
|
||||
The health monitor shows timing stats and the general state of the system. The Health Monitor is accessed from the Auto Ingest Nodes panel. To enable health monitoring, click on the Health Monitor button to get the following screen and then press the "Enable monitor" button.
|
||||
|
||||
\image html AutoIngest\health_monitor_disabled.png
|
||||
|
||||
This will enable the health monitor metrics on every node (both auto ingest nodes and examiner nodes) that is using this PostgreSQL server. Once enabled, the monitor will display the collected metrics.
|
||||
|
||||
\image html AutoIngest\health_monitor.png
|
||||
|
||||
By default, the graphs will show all metrics collected in the last day.
|
||||
|
||||
The Timing Metrics area shows how long various tasks took to perform. There are several options in the Timing Metrics section:
|
||||
<ul><li><b>Max days to display</b>: Choose to show the last day, week, two week, or month
|
||||
<li><b>Filter by host</b>: Show only metrics that came from the selected host
|
||||
<li><b>Show trend line</b>: Show or hide the red trend line
|
||||
<li><b>Do not plot outliers</b>: Redraws the graph allowing very high metrics to go off the screen. Can be helpful with data where a couple of entries took an exceptionally long time.
|
||||
</ul>
|
||||
|
||||
The User Metrics section shows open cases and logged on nodes. For the open cases section, the count is the number of distinct cases open. If ten nodes have the same case open, the count will be one. The logged in users section shows the total number of active nodes, with auto ingest nodes on the bottom in green and examiner nodes on top in blue. The User Metrics section only has one option:
|
||||
<ul><li><b>Max days to display</b>: Choose to show the last day, week, two week, or month
|
||||
</ul>
|
||||
|
||||
\section auto_ingest_admin_metrics Auto Ingest Metrics
|
||||
|
||||
The Auto Ingest Metrics can be accessed the Auto Ingest Nodes panel and shows data about the jobs completed in a selected time frame.
|
||||
|
||||
\image html AutoIngest\metrics.png
|
||||
|
||||
*/
|
104
docs/doxygen-user/auto_ingest_setup.dox
Normal file
@ -0,0 +1,104 @@
|
||||
/*! \page auto_ingest_setup_page Auto Ingest Configuration
|
||||
|
||||
\section auto_ingest_setup_overview Overview
|
||||
|
||||
A multi-user installation requires several network-based services, such as a central database and a messaging system, and automated ingest requires one or more auto ingest nodes. While you may run all of the external services on a single node, this is not likely to be ideal - spreading the services out across several machines can improve throughput. Keeping in mind that all the following machines need to be able to communicate with each other with network visibility to the shared drive, here is a description of a possible configuration:
|
||||
|
||||
<table>
|
||||
<tr><th>Number of Machines</th><th>Services</th></tr>
|
||||
<tr><td>One</td><td><ul><li><b>Solr</b> - Install Solr on the highest-powered machine; the more CPUs the better.</li>
|
||||
<li>The <b>case output folders</b> can also be put on this machine.</li></ul></td></tr>
|
||||
<tr><td>One</td><td><ul><li><b>ActiveMQ</b> - This service has minimal memory and disk requirements.</li>
|
||||
<li><b>PostgreSQL</b> - This service has minimal memory and disk requirements.</li></ul></td></tr>
|
||||
<tr><td>One</td><td><ul><li><b>Shared image folder</b> - This machine needs a large amount of disk space but doesn't need the fastest hardware.</li></ul></td></tr>
|
||||
<tr><td>One or more</td><td><ul><li><b>Automated Ingest Node(s)</b> - These machines don't need much disk space but benefit from additional memory and processing power.</li></ul></td></tr>
|
||||
<tr><td>One or more</td><td><ul><li><b>Examiner Node(s)</b> - See \ref installation_page for recommended system requirements.</li></ul></td></tr>
|
||||
</table>
|
||||
|
||||
Solr is going to be a sizeable resource hog. A big performance increase will be seen if you put solid state drives (SSD) in the machine running Solr, and have that machine also host the large network drive on the SSDs as a place to store case output. The source images to can be on SAS drives (slower than SSD) with very little impact on performance. This idea here is to have the most resource-intensive operations on the fastest hardware. Using this strategy, there are actually two large network stores, one for input images and one for output cases.
|
||||
|
||||
\section auto_ingest_setup_services Installing Services and Configuring Autopsy
|
||||
Follow the instructions on the \ref install_multiuser_page page to set up the necessary services and configure your Autopsy clients to use them. After this is complete, you should be able to \ref multiuser_page "create and use multi-user cases".
|
||||
|
||||
\section auto_ingest_setup_ain_config Auto Ingest Node Configuration
|
||||
|
||||
While Examiner nodes only require multi-user cases to be set up, the auto ingest nodes need additional configuration. To start, go to the "Auto Ingest" tab on the Options menu and select the "Auto Ingest mode" radio button. If you haven't saved your multi-user settings there will be a warning message displayed here - if you see it, go back to the "Multi-User" tab and make sure you've entered all the required fields and then hit the "Apply" button.
|
||||
|
||||
\image html AutoIngest\auto_ingest_mode_setup.png
|
||||
|
||||
\subsection auto_ingest_config_folders Folder Configuration
|
||||
|
||||
The first thing to do is to set two folder locations. The shared images folder is the base folder for all data that will be ingested through the auto ingest node. The shared cases folder is the base folder for the cases that will be created by the auto ingest node.
|
||||
|
||||
\subsection auto_ingest_config_ingest_settings Ingest Module Settings
|
||||
The "Ingest Module Settings" button is used to configure the \ref ingest_page you want to run during auto-ingest. One note is that on auto-ingest nodes, we recommend that you configure the Keyword Search module to not perform periodic keyword searches. When a user is in front of the computer, this feature exists to provide frequent updates, but it is not needed on this node. To configure this, choose the Keyword Search item in the Options window. Select the "General" tab and choose the option for no periodic search.
|
||||
|
||||
\image html AutoIngest\no_periodic_searches.png
|
||||
|
||||
\subsection auto_ingest_advanced_settings Advanced Settings
|
||||
|
||||
The "Advanced Settings" button will bring up the automated ingest job settings. As expressed in the warning statement, care must be used when making changes on this panel.
|
||||
|
||||
\image html AutoIngest\advanced_settings.png
|
||||
|
||||
The Automated Ingest Job Settings section contains the following options:
|
||||
<dl>
|
||||
<dt>System synchronization wait time</dt>
|
||||
<dd>A wait time used by auto ingest nodes to ensure proper synchronization of node operations in circumstances where delays may occur, e.g., a wait to compensate for network file system latency effects on the visibility of newly created shared directories and files.</dd>
|
||||
<dt>External processes time out</dt>
|
||||
<dd>Autopsy components that spawn potentially long-running processes have the option to use this setting, if it is enabled, to terminate those processes if the specified time out period has elapsed. Each component that uses this feature is responsible for implementing its own policy for the handling of incomplete processing when an external process time out occurs. Core components that use external process time outs include the \ref recent_activity_page and \ref photorec_carver_page ingest modules.</dd>
|
||||
<dt>Interval between input scans</dt>
|
||||
<dd>The interval between scans of the auto ingest input directories for manifest files. Note that the actual timing of input scans by each node depends on both this setting and node startup time.</dd>
|
||||
<dt>Maximum job retries allowed</dt>
|
||||
<dd>The maximum number of times a crashed auto ingest job will be automatically retried. No distinction is made between jobs that crash due to system error conditions such as power outages and jobs that crash due to input data source corruption. In general, input data source corruption should be handled gracefully by Autopsy, but this setting provides insurance against unforeseen issues with input data viability.</dd>
|
||||
<dt>Target concurrent jobs per case</dt>
|
||||
<dd>A soft limit on the number of concurrent jobs per case when multiple cases are processed simultaneously by a group of auto ingest nodes. This setting specifies a target rather than a hard limit because nodes are never idled if there are ingest jobs to do and nodes work cooperatively rather than rely on a centralized, load-balancing job scheduling service.</dd>
|
||||
<dt>Number of threads to use for file ingest</dt>
|
||||
<dd>The number of threads an auto ingest node dedicates to analyzing files from input data sources in parallel. Note that analysis of input data source files themselves is always single-threaded.</dd>
|
||||
</dl>
|
||||
|
||||
\subsection auto_ingest_file_export File Export
|
||||
|
||||
The "File Export" button will bring up the \ref file_export_page settings. This allows certain types of files to be automatically exported during auto ingest. Setting up this feature requires knowledge of internal Autopsy data structures and can be ignored for users.
|
||||
|
||||
\subsection auto_ingest_shared_config Shared Configuration
|
||||
|
||||
When using multiple auto ingest nodes, configuration can be centralized and shared with any auto ingest node that desires to use it. This is called Shared Configuration. The general idea is that you will set up one node (the "master") and upload that configuration to a central location. Then the other auto ingest nodes (the "secondary" nodes) will download that configuration whenever they start a new job. This saves time because you only need to configure one node, and ensures consistency across the auto ingest nodes.
|
||||
|
||||
\subsubsection auto_ingest_shared_config_master Master Node
|
||||
|
||||
On the computer that is going to be the configuration master automated ingest node, follow the configuration steps described in above to configure the node.
|
||||
If you would like every automated ingest node to share the configuration settings, check the first checkbox in the Shared Configuration section of the Auto Ingest settings panel. Next select a folder to store the shared configuration in. This folder must be a path to a network share that the other machines in the system will have access to. Use a UNC path if possible. Next, check the "Use this node as a master node that can upload settings" checkbox which should enable the "Save & Upload Config" button. If this does not happen, look for a red error message explaining what settings are missing.
|
||||
|
||||
\image html AutoIngest\master_node.png
|
||||
|
||||
After saving and uploading the configuration, hit the "Save" button to exit the Options panel.
|
||||
|
||||
\subsubsection auto_ingest_shared_config_secondary Secondary Node
|
||||
|
||||
Once one node has uploaded shared configuration data, the remaining nodes can be set up to download it, skipping over some of the configuration steps above.
|
||||
|
||||
To set up a secondary node, start by going through the \ref install_multiuser_page "multi-user configuration." Apply those changes, then switch to the Auto Ingest tab on the Options panel. Check the box to enable auto ingest, and then the box to enable shared configuration and enter the same folder used on the master node. The "Download Config" button should now be enabled and can be used to get the rest of the configuration automatically. Afterwards a dialog will likely appear telling you to restart Autopsy.
|
||||
|
||||
\subsubsection auto_ingest_shared_config_notes Notes
|
||||
|
||||
Some notes on shared configuration:
|
||||
<ul><li>The \ref auto_ingest_error_suppression "error suppression registry edit" below will need to be done on each node
|
||||
<li>After the initial setup, the current shared configuration data will be updated before each job (no need to manually download it again)
|
||||
<li>A few options require a restart to take effect (for example, most of the multi-user settings). If these are downloaded automatically while automated ingest is running, they will not be used until the automated ingest node is restarted.
|
||||
<li>There is currently a limitation on where hash databases can be saved. Each database will be downloaded to the same folder it was in on the master node, which will cause errors if that drive letter is not present or the folder is not writeable on every node.
|
||||
<li>Shared copies of the hash databases are also not currently supported. Each node will download its own copy of each database.
|
||||
</ul>
|
||||
|
||||
|
||||
\subsection auto_ingest_error_suppression Error Suppression
|
||||
|
||||
On an auto ingest node, we also strongly recommend that you configure the system to suppress error dialogs that Windows may display if an application crashes. Some of the modules that Autopsy runs have crashed on some test data in the past and if an error dialog is displayed all processing stops.
|
||||
|
||||
Disabling the error messages is done by setting the following registry key to "1", as shown in the screenshot below.
|
||||
\verbatim HKCU\Software\Microsoft\Windows\Windows Error Reporting\DontShowUI\endverbatim
|
||||
|
||||
\image html AutoIngest\error_suppression.png
|
||||
|
||||
|
||||
*/
|
@ -38,15 +38,24 @@ To open a case, either:
|
||||
|
||||
\image html multi_user_case_select.png
|
||||
|
||||
\section case_properties Viewing Case Properties
|
||||
You can view the case properties by going to the "Case" menu and clicking "Case Properties".
|
||||
\section case_properties Viewing Case Details and the Data Source Summary
|
||||
You can view the case properties by going to the "Case" menu and clicking "Case Details".
|
||||
|
||||
\image html case_properties.png
|
||||
|
||||
You can use the "Ingest History" tab to view which data sources had which modules run upon them, and when, as shown in the screenshot below.
|
||||
Most of the case properties can be edited through the "Edit Details" button.
|
||||
|
||||
\image html case-properties-history-tab.PNG
|
||||
You can view the data source summary by going to the "Case" menu and clicking "Data Source Summary". The table at the top shows general information about each data source in the case. In the lower half, the first tab shows more detailed information about the selected data source.
|
||||
|
||||
\image html data_source_summary_details.png
|
||||
|
||||
The second tab, "Counts", shows the number of files found of various types and number of extracted results.
|
||||
|
||||
\image html data_source_summary_counts.png
|
||||
|
||||
The third tab, "Ingest History", shows each ingest job, the time it was completed, and which modules were run as part of the job.
|
||||
|
||||
\image html data_source_summary_ingest.png
|
||||
|
||||
|
||||
*/
|
||||
|
@ -143,7 +143,7 @@ If you would like to prevent the Interesting Items from being created in a parti
|
||||
through the run time ingest properties. Note that this only disables the Interesting Item results - all properties
|
||||
are still added to the central repository.
|
||||
|
||||
\image html central_repo_disable_flagging.png
|
||||
\image html central_repo_ingest_settings.png
|
||||
|
||||
\section cr_viewing_results Viewing Results
|
||||
|
||||
@ -161,20 +161,19 @@ properties from the central repository. If the selected file or artifact is asso
|
||||
to one or more properties in the database, the associated properties will be displayed. Note: the Content
|
||||
Viewer will display ALL associated properties available in the database. It ignores the user's enabled/disabled Correlation Properties.
|
||||
|
||||
By default, the rows in the content viewer will have background colors to indicate if they are known to be of interest. Properties that are notable
|
||||
will have a Red background, all others will have a White background.
|
||||
The other occurrences are grouped by case and then data source. The rows in the content viewer have background colors to indicate if they are known to be of interest. Properties that are notable
|
||||
will have a Red background, all others will have a White background. The notable status will also be displayed in the "Known" column.
|
||||
|
||||
\image html central_repo_content_viewer.png
|
||||
|
||||
The user can click on any column heading to sort by the values in that column.
|
||||
|
||||
If the user right-clicks on a row, a menu will be displayed.
|
||||
If the user selects a row and then right-clicks, a menu will be displayed.
|
||||
This menu has several options.
|
||||
-# Select All
|
||||
-# Export Selected Rows to CSV
|
||||
-# Show Case Details
|
||||
-# Show Frequency
|
||||
-# Add/Edit Comment
|
||||
|
||||
<b>Select All</b>
|
||||
|
||||
@ -206,9 +205,9 @@ the Case -> Case Properties menu.
|
||||
|
||||
This shows how common the selected file is. The value is the percentage of case/data source tuples that have the selected property.
|
||||
|
||||
<b>Add/Edit Comment</b>
|
||||
\subsection central_repo_comment Add/Edit Comment
|
||||
|
||||
This allows you to add a comment for this entry or edit an existing comment. If you want instead to edit the comment of the originally selected node, it can be done by right clicking on the original item in the result viewer and selecting "Add/Edit Central Repository Comment".
|
||||
If you want instead to edit the comment of a node, it can be done by right clicking on the original item in the result viewer and selecting "Add/Edit Central Repository Comment".
|
||||
|
||||
\image html central_repo_comment_menu.png
|
||||
|
||||
|
49
docs/doxygen-user/command_line_ingest.dox
Normal file
@ -0,0 +1,49 @@
|
||||
/*! \page command_line_ingest_page Command Line Ingest
|
||||
|
||||
\section command_line_ingest_overview Overview
|
||||
|
||||
The Command Line Ingest feature allows you to process a \ref ds_page "data source" with Autopsy from the command line. Autopsy will automatically create a case with the settings you specify and will generate a \ref report_case_uco report.
|
||||
|
||||
\section command_line_ingest_config Configuration
|
||||
|
||||
Go to Tools->Options and then select the "Command Line Ingest" tab.
|
||||
|
||||
\image html command_line_ingest_options.png
|
||||
|
||||
First, enter the output folder for the cases. Next, use the button to open the ingest module settings. Here you can configure the \ref ingest_page settings that will be used when running from the command line.
|
||||
|
||||
\section command_line_ingest_run Running Autopsy
|
||||
|
||||
In a command prompt, navigate to the Autopsy bin folder. This is normally located at "C:\Program Files\Autopsy-version\bin".
|
||||
|
||||
\image html command_line_ingest_bin_dir.png
|
||||
|
||||
Now run autopsy with the following parameters, substituting the path to your data source and your desired case name. Both \ref ds_img "disk images" and \ref ds_log "logical files" are supported. Note that the case name must be unique for each run.
|
||||
|
||||
\verbatim
|
||||
autopsy64.exe --inputPath=(data source path) --caseName=(case name) --runFromCommandLine=true
|
||||
\endverbatim
|
||||
|
||||
In the example below, we're going to process a disk image with path "R:\work\images\xp-sp3-v4.E01" and name the case "xpCase".
|
||||
|
||||
\image html command_line_ingest_command_entry.png
|
||||
|
||||
You'll start seeing output in the command prompt and the Autopsy UI will open. In the middle of the UI you'll see the following dialog:
|
||||
|
||||
\image html command_line_ingest_dialog.png
|
||||
|
||||
Once Autopsy finishes processing you'll be back at the command window. Press enter to return to the command prompt.
|
||||
|
||||
\image html command_line_ingest_console_output.png
|
||||
|
||||
\section command_line_ingest_results Viewing Results
|
||||
|
||||
You can open the case created on the command line like any other Autopsy case. Simply go to "Open Case" and then browse to the output folder you set up in the \ref command_line_ingest_config section and look for the folder starting with your case name. It will have a timestamp appended to the name you specified.
|
||||
|
||||
\image html command_line_ingest_open_case.png
|
||||
|
||||
If you are only interested in the \ref report_case_uco report then you don't need to open Autopsy. The report can be found in the case folder under "Reports\CASE-UCO" and then an automatically generated data source name containing the ID and timestamp.
|
||||
|
||||
\image html command_line_ingest_report.png
|
||||
|
||||
*/
|
@ -54,7 +54,7 @@ You can choose to hide matches that appear with a high frequency in the Central
|
||||
|
||||
Each search displays its results in a new tab. The title of the tab will include the search parameters.
|
||||
|
||||
\subsection common_properties_sort_by_count Sort by number of occurrences
|
||||
\subsection common_properties_sort_by_count Sort by number of data sources
|
||||
|
||||
\image html common_properties_result.png
|
||||
|
||||
|
@ -10,10 +10,18 @@ When a Result type is selected in the Result Viewer (as opposed to a file), most
|
||||
|
||||
\section cv_hex Hex
|
||||
|
||||
The Hex tab is nearly always available and shows the contents of the file.
|
||||
The Hex Content Viewer is nearly always available and shows you the raw and exact contents of a file. In this content viewer, the data of the file is represented as hexadecimal values grouped in 2 groups of 8 bytes, followed by one group of 16 ASCII characters which are derived from each pair of hex values (each byte). Non-printable ASCII characters and characters that would take more than one character space are typically represented by a dot (".") in the following ASCII field.
|
||||
|
||||
\image html content_viewer_hex.png
|
||||
|
||||
If desired, you can open the file in an external hex editor. This is configured through the "External Viewer" tab on the options panel. HxD has been tested to work, but alternate hex editors may also be compatible.
|
||||
|
||||
\image html content_viewer_hex_editor_setup.png
|
||||
|
||||
Note that this process saves the file to disk before launching the hex editor. A progress indicator will be displayed in the lower right corner of the application. If you wish to cancel the file export, click the 'X' to the right of the progress bar.
|
||||
|
||||
\image html content_viewer_hxd_progress.png
|
||||
|
||||
\section cv_strings Strings
|
||||
|
||||
The Strings tab shows all text strings found in the file. Different scripts can be chosen from the drop-down menu to display results for non-Latin alphabets.
|
||||
@ -24,7 +32,7 @@ The Strings tab shows all text strings found in the file. Different scripts can
|
||||
|
||||
For certain file types, the Application tab can display the contents in a user friendly format. The following screenshots show some examples of what the Application tab will display.
|
||||
|
||||
It will display most image types:
|
||||
It will display most image types, which can be scaled and rotated:
|
||||
|
||||
\image html content_viewer_app_image.png
|
||||
|
||||
@ -36,6 +44,10 @@ And plist file data will be shown and can be exported:
|
||||
|
||||
\image html content_viewer_app_plist.png
|
||||
|
||||
HTML files can be displayed closer to their original form:
|
||||
|
||||
\image html content_viewer_html.png
|
||||
|
||||
\section cv_indexed_text Indexed Text
|
||||
|
||||
The Indexed Text tab shows the text that has been indexed by the Keyword Search module. You can switch the "Text Source" Field to "Result Text" to see which text has been indexed for associated results.
|
||||
|
@ -54,11 +54,13 @@ Autopsy supports disk images in the following formats:
|
||||
|
||||
To add a disk image:
|
||||
|
||||
-# Choose "Disk Image or VM File" from the data source types.
|
||||
-# Browse to the first file in the disk image. You need to specify only the first file and Autopsy will find the rest.
|
||||
-# Choose the timezone that the disk image came from. This is most important for when adding FAT file systems because it does not store timezone information and Autopsy will not know how to normalize to UTC.
|
||||
-# Choose to perform orphan file finding on FAT file systems. This can be a time intensive process because it will require that Autopsy look at each sector in the device.
|
||||
-# Optionally choose the sector size. The Auto Detect mode will work correctly on the majority of images, but if adding the data source fails you may want to try the other sector sizes.
|
||||
<ol>
|
||||
<li>Choose "Disk Image or VM File" from the data source types.
|
||||
<li>Browse to the first file in the disk image. You need to specify only the first file and Autopsy will find the rest. <li>Choose to perform orphan file finding on FAT file systems. This can be a time intensive process because it will require that Autopsy look at each sector in the device.
|
||||
<li>Choose the timezone that the disk image came from. This is most important for when adding FAT file systems because it does not store timezone information and Autopsy will not know how to normalize to UTC.
|
||||
<li>Optionally choose the sector size. The Auto Detect mode will work correctly on the majority of images, but if adding the data source fails you may want to try the other sector sizes.
|
||||
<li>Optionally enter one or more hashes for the image. These will be saved under the image metadata and can be verified using the \ref data_source_integrity_page.
|
||||
</ol>
|
||||
|
||||
\section ds_local Adding a Local Disk
|
||||
|
||||
|
@ -6,13 +6,13 @@ The Experimental module, as the name implies, contains code that is not yet part
|
||||
|
||||
\section exp_setup Enabling the Experimental Module
|
||||
|
||||
To start, go to Tools->Plugins and select the "Installed" tab, then check the box next to "Experimental" and click "Activate" and go throught the next couple of screens. A reset should not be required.
|
||||
To start, go to Tools->Plugins and select the "Installed" tab, then check the box next to "Experimental" and click "Activate" and go through the next couple of screens. A restart should not be required.
|
||||
|
||||
\image html experimental_plugins_menu.png
|
||||
|
||||
\section exp_features Current Experimental Features
|
||||
|
||||
- Auto Ingest
|
||||
- \ref auto_ingest_page
|
||||
- \ref object_detection_page
|
||||
- \ref volatility_dsp_page
|
||||
|
||||
|
69
docs/doxygen-user/file_export.dox
Normal file
@ -0,0 +1,69 @@
|
||||
/*! \page file_export_page File Export
|
||||
|
||||
\section file_export_overview Overview
|
||||
|
||||
If enabled, the File Exporter will run after each \ref auto_ingest_page job and export any files from that data source that match the supplied rules. Most users will not need to use this feature - analysts can open the auto ingest cases in an examiner node and look through the data there.
|
||||
|
||||
\section file_export_setup Configuration
|
||||
|
||||
After enabling the file exporter, the first thing to do is set two output folders. The "Files Folder" is the base directory for all exported files, and the "Reports Folder" is the base directory for reports (lists of every file exported for each data source). If possible, it is best to use UNC paths.
|
||||
|
||||
\image html AutoIngest\file_exporter_main.png
|
||||
|
||||
Next you'll make rules for the files you want to export. Each rule must have a name and at least one condition set. If more than one condition is set, then all conditions must be true to export the file. When you're done setting up your rule, press the "Save" button to save it. You'll see the new rule in the list on the left side.
|
||||
|
||||
All of the saved rules will be run against each data source. There's no way to set a rule as inactive, so if you make a rule and don't want it to run you'll have to use the "Delete Rule" button to remove it.
|
||||
|
||||
You'll need to run at the \ref hash_db_page and \ref file_type_identification_page to use the file exporter. You may need to run additional modules based on any attributes in your rules.
|
||||
|
||||
\subsection file_exporter_mime MIME Type
|
||||
|
||||
The first condition is based on MIME type. To enable it, check the box before "MIME Type", then select a MIME type from the list and choose whether you want to match it or not match it. Multiple MIME types can not be selected at this time. The following shows a rule that will match all PNG images.
|
||||
|
||||
\image html AutoIngest\file_export_png.png
|
||||
|
||||
\subsection file_exporter_size File Size
|
||||
|
||||
The second condition is based on file size. You can choose a file size (using the list on the right to change the units) and then select whether files should be larger, smaller, equal to, or not equal to that size. The following shows a rule that will match plain text files that are over 1kB.
|
||||
|
||||
\image html AutoIngest\file_export_size.png
|
||||
|
||||
\subsection file_exporter_attributes Attributes
|
||||
|
||||
The third condition is based on blackboard artifacts and attributes, which is how Autopsy stores most of its analysis results. A file will be exported if it is linked to a matching attribute. Using this type of condition will require some familiarity with exactly how these attributes are being created and what data we expect to see in them. There's some information to get started in the <a href="http://sleuthkit.org/sleuthkit/docs/jni-docs/4.6.0/mod_bbpage.html">Sleuthkit documentation</a>. You will most likely also have to open an Autopsy database file to verify the exact attribute types being used to hold the data you're interested in.
|
||||
|
||||
To make an attribute condition, select the artifact type and then the attribute type that you are interested in. On the next line you can enter a value and set what relation you want the attribute to have to it (equals, not equals, greater/less than). Not all options will make sense with all data types. Then use the "Add Attribute" button to add it to the attribute list. If you make a mistake, use the "Delete Attribute" button to erase it. The following shows a rule that will export any files that had a keyword hit for the word "bomb" in them.
|
||||
|
||||
\image html AutoIngest\file_export_keyword.png
|
||||
|
||||
It's possible to do more general matching on the artifacts. Suppose you wanted to export all files that the \ref encryption_page flagged as "Encryption Suspected". These files will have a TSK_ENCRYPTION_SUSPECTED artifact with a single "TSK_COMMENT" attribute that contains the entropy calculated for the file. In this case we can use the "not equals" operator on a string that we wouldn't expect to see in the TSK_COMMENT field to effectively change the condition to "has an associated TSK_ENCRYPTION_SUSPECTED artifact."
|
||||
|
||||
\image html AutoIngest\file_export_encrypton.png
|
||||
|
||||
\section file_export_output Output
|
||||
|
||||
The exported files are found under the files folder that was specified in the \ref file_export_setup step and then organized at the top layer by the device ID of the data source.
|
||||
|
||||
\image html AutoIngest\file_export_dir_structure.png
|
||||
|
||||
Exported files are named with their hash and stored in subfolders based on parts of that hash, to prevent any single folder from becoming very large.
|
||||
|
||||
\image html AutoIngest\file_export_file_loc.png
|
||||
|
||||
The report files are also found in subfolders under the device ID and then the rule name.
|
||||
|
||||
\image html AutoIngest\file_export_json_loc.png
|
||||
|
||||
This json file will contain information about the file, and any associated artifact that was part of the rule's conditions.
|
||||
\verbatim
|
||||
{"7C89F280C337AB3E997D20527B8EC6F8":{"Filename":"\\\\WIN-4913\\AutopsyData\\FileExportFiles\\37567\\text-plain\\7C\\89\\F2\\80\\7C89F280C337AB3E997D20527B8EC6F8",
|
||||
"Type":"text/plain","MD5":"7C89F280C337AB3E997D20527B8EC6F8","File data":{"Modified":["0000-00-00 00:00:00"],"Changed":["0000-00-0000:00:00"],
|
||||
"Accessed":["0000-00-00 00:00:00"],"Created":["0000-00-00 00:00:00"],"Extension":["txt"],"Filename":["File about explosions.txt"],"Size":["54"],
|
||||
"Source Path":["/kwTest_2019_03_14_12_53_33//File about explosions.txt"],"Flags (Dir)":["Allocated"],"Flags (Meta)":["Allocated"],
|
||||
"Mode":["r---------"],"User ID":["0"],"Group ID":["0"],"Meta Addr":["0"],"Attr Addr":["1-0"],"Dir Type":["r"],"MetaType":["r"],
|
||||
"Known":["unknown"]},"TSK_KEYWORD_HIT":{"TSK_KEYWORD":["bomb"]},
|
||||
"TSK_KEYWORD_HIT":{"TSK_KEYWORD_PREVIEW":["keyword search for the word bomb in this file.\n\n\n------"]},
|
||||
"TSK_KEYWORD_HIT":{"TSK_SET_NAME":["bomb"]},"TSK_KEYWORD_HIT":{"TSK_KEYWORD_SEARCH_TYPE":["0"]}}}
|
||||
\endverbatim
|
||||
|
||||
*/
|
BIN
docs/doxygen-user/images/AutoIngest/admin_file.png
Normal file
After Width: | Height: | Size: 31 KiB |
BIN
docs/doxygen-user/images/AutoIngest/admin_jobs_cancel.png
Normal file
After Width: | Height: | Size: 8.1 KiB |
BIN
docs/doxygen-user/images/AutoIngest/admin_jobs_completed.png
Normal file
After Width: | Height: | Size: 10 KiB |
BIN
docs/doxygen-user/images/AutoIngest/admin_jobs_panel.png
Normal file
After Width: | Height: | Size: 13 KiB |
BIN
docs/doxygen-user/images/AutoIngest/admin_nodes_panel.png
Normal file
After Width: | Height: | Size: 12 KiB |
BIN
docs/doxygen-user/images/AutoIngest/advanced_settings.png
Normal file
After Width: | Height: | Size: 28 KiB |
BIN
docs/doxygen-user/images/AutoIngest/auto_ingest_in_progress.png
Normal file
After Width: | Height: | Size: 51 KiB |
BIN
docs/doxygen-user/images/AutoIngest/auto_ingest_mode_setup.png
Normal file
After Width: | Height: | Size: 47 KiB |
BIN
docs/doxygen-user/images/AutoIngest/case_delete_confirm.png
Normal file
After Width: | Height: | Size: 12 KiB |
BIN
docs/doxygen-user/images/AutoIngest/cases_context_menu.png
Normal file
After Width: | Height: | Size: 12 KiB |
BIN
docs/doxygen-user/images/AutoIngest/cases_panel.png
Normal file
After Width: | Height: | Size: 54 KiB |
BIN
docs/doxygen-user/images/AutoIngest/error_suppression.png
Normal file
After Width: | Height: | Size: 77 KiB |
BIN
docs/doxygen-user/images/AutoIngest/examiner_dashboard.png
Normal file
After Width: | Height: | Size: 66 KiB |
After Width: | Height: | Size: 16 KiB |
BIN
docs/doxygen-user/images/AutoIngest/file_export_encrypton.png
Normal file
After Width: | Height: | Size: 18 KiB |
BIN
docs/doxygen-user/images/AutoIngest/file_export_file_loc.png
Normal file
After Width: | Height: | Size: 8.5 KiB |
BIN
docs/doxygen-user/images/AutoIngest/file_export_json_loc.png
Normal file
After Width: | Height: | Size: 9.1 KiB |
BIN
docs/doxygen-user/images/AutoIngest/file_export_keyword.png
Normal file
After Width: | Height: | Size: 16 KiB |
BIN
docs/doxygen-user/images/AutoIngest/file_export_png.png
Normal file
After Width: | Height: | Size: 16 KiB |
BIN
docs/doxygen-user/images/AutoIngest/file_export_size.png
Normal file
After Width: | Height: | Size: 16 KiB |
BIN
docs/doxygen-user/images/AutoIngest/file_exporter_main.png
Normal file
After Width: | Height: | Size: 41 KiB |
BIN
docs/doxygen-user/images/AutoIngest/health_monitor.png
Normal file
After Width: | Height: | Size: 159 KiB |
BIN
docs/doxygen-user/images/AutoIngest/health_monitor_disabled.png
Normal file
After Width: | Height: | Size: 7.4 KiB |
After Width: | Height: | Size: 8.2 KiB |
Before Width: | Height: | Size: 33 KiB After Width: | Height: | Size: 18 KiB |
BIN
docs/doxygen-user/images/AutoIngest/master_node.png
Normal file
After Width: | Height: | Size: 16 KiB |
BIN
docs/doxygen-user/images/AutoIngest/metrics.png
Normal file
After Width: | Height: | Size: 11 KiB |
BIN
docs/doxygen-user/images/AutoIngest/no_periodic_searches.png
Normal file
After Width: | Height: | Size: 46 KiB |
BIN
docs/doxygen-user/images/AutoIngest/overview_pic1.png
Normal file
After Width: | Height: | Size: 80 KiB |
BIN
docs/doxygen-user/images/AutoIngest/overview_pic2.png
Normal file
After Width: | Height: | Size: 169 KiB |
Before Width: | Height: | Size: 28 KiB |
Before Width: | Height: | Size: 26 KiB After Width: | Height: | Size: 25 KiB |
Before Width: | Height: | Size: 11 KiB After Width: | Height: | Size: 19 KiB |
Before Width: | Height: | Size: 26 KiB |
BIN
docs/doxygen-user/images/command_line_ingest_bin_dir.png
Normal file
After Width: | Height: | Size: 3.9 KiB |
BIN
docs/doxygen-user/images/command_line_ingest_command_entry.png
Normal file
After Width: | Height: | Size: 1.6 KiB |
BIN
docs/doxygen-user/images/command_line_ingest_console_output.png
Normal file
After Width: | Height: | Size: 10 KiB |
BIN
docs/doxygen-user/images/command_line_ingest_dialog.png
Normal file
After Width: | Height: | Size: 4.4 KiB |
BIN
docs/doxygen-user/images/command_line_ingest_open_case.png
Normal file
After Width: | Height: | Size: 35 KiB |
BIN
docs/doxygen-user/images/command_line_ingest_options.png
Normal file
After Width: | Height: | Size: 27 KiB |
BIN
docs/doxygen-user/images/command_line_ingest_report.png
Normal file
After Width: | Height: | Size: 8.3 KiB |
Before Width: | Height: | Size: 30 KiB After Width: | Height: | Size: 42 KiB |
Before Width: | Height: | Size: 19 KiB After Width: | Height: | Size: 37 KiB |
Before Width: | Height: | Size: 29 KiB After Width: | Height: | Size: 32 KiB |
Before Width: | Height: | Size: 187 KiB After Width: | Height: | Size: 294 KiB |
BIN
docs/doxygen-user/images/content_viewer_hex_editor_setup.png
Normal file
After Width: | Height: | Size: 28 KiB |