newCompletedJobsList = new ArrayList<>();
+ private Lock currentDirLock;
/**
* Searches the input directories for manifest files. The search results
@@ -1227,9 +1117,9 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
} catch (Exception ex) {
/*
- * NOTE: Need to catch all exceptions here. Otherwise
- * uncaught exceptions will propagate up to the calling
- * thread and may stop it from running.
+ * NOTE: Need to catch all unhandled exceptions here.
+ * Otherwise uncaught exceptions will propagate up to the
+ * calling thread and may stop it from running.
*/
sysLogger.log(Level.SEVERE, String.format("Error scanning the input directory %s", rootInputDirectory), ex);
}
@@ -1263,20 +1153,15 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
}
/**
- * Invoked for a file in a directory. If the file is a manifest file,
- * creates a pending pending or completed auto ingest job for the
- * manifest, based on the data stored in the coordination service node
- * for the manifest.
- *
- * Note that the mapping of case names to manifest paths that is used
- * for case deletion is updated as well.
+ * Creates a pending or completed auto ingest job if the file visited is
+ * a manifest file, based on the data stored in the coordination service
+ * node for the manifest.
*
* @param filePath The path of the file.
* @param attrs The file system attributes of the file.
*
* @return TERMINATE if auto ingest is shutting down, CONTINUE if it has
* not.
- *
*/
@Override
public FileVisitResult visitFile(Path filePath, BasicFileAttributes attrs) {
@@ -1285,6 +1170,11 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
}
try {
+ /*
+ * Determine whether or not the file is an auto ingest job
+ * manifest file. If it is, then parse it. Otherwise, move on to
+ * the next file in the directory.
+ */
Manifest manifest = null;
for (ManifestFileParser parser : Lookup.getDefault().lookupAll(ManifestFileParser.class)) {
if (parser.fileIsManifest(filePath)) {
@@ -1304,76 +1194,95 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
return TERMINATE;
}
- if (null != manifest) {
- /*
- * Update the mapping of case names to manifest paths that
- * is used for case deletion.
- */
- String caseName = manifest.getCaseName();
- Path manifestPath = manifest.getFilePath();
- if (casesToManifests.containsKey(caseName)) {
- Set manifestPaths = casesToManifests.get(caseName);
- manifestPaths.add(manifestPath);
- } else {
- Set manifestPaths = new HashSet<>();
- manifestPaths.add(manifestPath);
- casesToManifests.put(caseName, manifestPaths);
- }
+ if (manifest == null) {
+ return CONTINUE;
+ }
- /*
- * Add a job to the pending jobs queue, the completed jobs
- * list, or do crashed job recovery, as required.
- */
- try {
- byte[] rawData = coordinationService.getNodeData(CoordinationService.CategoryNode.MANIFESTS, manifestPath.toString());
+ /*
+ * If a manifest file has been found, get a manifest file lock,
+ * analyze the job state, and put a job into the appropriate job
+ * list. There is a short wait here in case the input directory
+ * scanner file visitor of another auto ingest node (AIN) has
+ * the lock. If the lock ultmiately can't be obtained, the wait
+ * was not long enough, or another auto ingest node (AIN) is
+ * holding the lock because it is executing the job, or a case
+ * deletion task has aquired the lock. In all of these cases the
+ * manifest can be skipped for this scan.
+ */
+ try (Lock manifestLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.MANIFESTS, manifest.getFilePath().toString(), INPUT_SCAN_LOCKING_TIMEOUT_MINS, TimeUnit.MINUTES)) {
+ if (null != manifestLock) {
+
+ /*
+ * Now that the lock has been acquired, make sure the
+ * manifest is still here. This is a way to resolve the
+ * race condition between this task and case deletion
+ * tasks without resorting to a protocol using locking
+ * of the input directory.
+ */
+ if (!filePath.toFile().exists()) {
+ return CONTINUE;
+ }
+
+ byte[] rawData = coordinationService.getNodeData(CoordinationService.CategoryNode.MANIFESTS, manifest.getFilePath().toString());
if (null != rawData && rawData.length > 0) {
- try {
- AutoIngestJobNodeData nodeData = new AutoIngestJobNodeData(rawData);
- AutoIngestJob.ProcessingStatus processingStatus = nodeData.getProcessingStatus();
- switch (processingStatus) {
- case PENDING:
- addPendingJob(manifest, nodeData);
- break;
- case PROCESSING:
- doRecoveryIfCrashed(manifest, nodeData);
- break;
- case COMPLETED:
- addCompletedJob(manifest, nodeData);
- break;
- case DELETED:
- /*
- * Ignore jobs marked as "deleted."
- */
- break;
- default:
- sysLogger.log(Level.SEVERE, "Unknown ManifestNodeData.ProcessingStatus");
- break;
- }
- } catch (AutoIngestJobNodeData.InvalidDataException | AutoIngestJobException ex) {
- sysLogger.log(Level.SEVERE, String.format("Invalid auto ingest job node data for %s", manifestPath), ex);
+ AutoIngestJobNodeData nodeData = new AutoIngestJobNodeData(rawData);
+ AutoIngestJob.ProcessingStatus processingStatus = nodeData.getProcessingStatus();
+ switch (processingStatus) {
+ case PENDING:
+ addPendingJob(manifest, nodeData);
+ break;
+ case PROCESSING:
+ /*
+ * If an exclusive manifest file lock was
+ * obtained for an auto ingest job in the
+ * processing state, the auto ingest node
+ * (AIN) executing the job crashed and the
+ * lock was released when the coordination
+ * service detected that the AIN was no
+ * longer alive.
+ */
+ doCrashRecovery(manifest, nodeData);
+ break;
+ case COMPLETED:
+ addCompletedJob(manifest, nodeData);
+ break;
+ case DELETED:
+ /*
+ * Ignore jobs marked as deleted. Note that
+ * this state is no longer used and is
+ * retained for legacy jobs only.
+ */
+ break;
+ default:
+ sysLogger.log(Level.SEVERE, "Unknown ManifestNodeData.ProcessingStatus");
+ break;
}
} else {
try {
addNewPendingJob(manifest);
} catch (AutoIngestJobException ex) {
- sysLogger.log(Level.SEVERE, String.format("Invalid manifest data for %s", manifestPath), ex);
+ sysLogger.log(Level.SEVERE, String.format("Invalid manifest data for %s", manifest.getFilePath()), ex);
}
}
- } catch (CoordinationServiceException ex) {
- sysLogger.log(Level.SEVERE, String.format("Error transmitting node data for %s", manifestPath), ex);
- return CONTINUE;
- } catch (InterruptedException ex) {
- Thread.currentThread().interrupt();
- return TERMINATE;
}
+ } catch (CoordinationServiceException | AutoIngestJobException | AutoIngestJobNodeData.InvalidDataException ex) {
+ sysLogger.log(Level.SEVERE, String.format("Error handling manifest at %s", manifest.getFilePath()), ex);
+ } catch (InterruptedException ex) {
+ /*
+ * The thread running the input directory scan task was
+ * interrupted while blocked, i.e., auto ingest is shutting
+ * down.
+ */
+ return TERMINATE;
}
} catch (Exception ex) {
- // Catch all unhandled and unexpected exceptions. Otherwise one bad file
- // can stop the entire input folder scanning. Given that the exception is unexpected,
- // I'm hesitant to add logging which requires accessing or de-referencing data.
- sysLogger.log(Level.SEVERE, "Unexpected exception in file visitor", ex);
- return CONTINUE;
+ /*
+ * This is an exception firewall so that an unexpected runtime
+ * exception from the handling of a single manifest file does
+ * not take out the input directory scanner.
+ */
+ sysLogger.log(Level.SEVERE, String.format("Unexpected exception handling %s", filePath), ex);
}
if (!Thread.currentThread().isInterrupted()) {
@@ -1384,49 +1293,36 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
}
/**
- * Adds an existing job to the pending jobs queue.
+ * Adds an auto ingest job to the pending jobs queue.
*
* @param manifest The manifest for the job.
- * @param nodeData The data stored in the coordination service node for
- * the job.
+ * @param nodeData The data stored in the manifest file lock
+ * coordination service node for the job.
*
- * @throws InterruptedException if the thread running the input
- * directory scan task is interrupted while
- * blocked, i.e., if auto ingest is
- * shutting down.
+ * @throws AutoIngestJobException If there was an error working
+ * with the node data.
+ * @throws CoordinationServiceException If a lock node data version
+ * update was required and there
+ * was an error writing the node
+ * data by the coordination
+ * service.
+ * @throws InterruptedException If the thread running the input
+ * directory scan task is
+ * interrupted while blocked, i.e.,
+ * if auto ingest is shutting down.
*/
- private void addPendingJob(Manifest manifest, AutoIngestJobNodeData nodeData) throws InterruptedException, AutoIngestJobException {
+ private void addPendingJob(Manifest manifest, AutoIngestJobNodeData nodeData) throws AutoIngestJobException, CoordinationServiceException, InterruptedException {
AutoIngestJob job;
if (nodeData.getVersion() == AutoIngestJobNodeData.getCurrentVersion()) {
job = new AutoIngestJob(nodeData);
} else {
job = new AutoIngestJob(manifest);
- job.setPriority(nodeData.getPriority()); // Retain priority, present in all versions of the node data.
+ job.setPriority(nodeData.getPriority());
Path caseDirectory = PathUtils.findCaseDirectory(rootOutputDirectory, manifest.getCaseName());
if (null != caseDirectory) {
job.setCaseDirectoryPath(caseDirectory);
}
-
- /*
- * Try to upgrade/update the coordination service manifest node
- * data for the job.
- *
- * An exclusive lock is obtained before doing so because another
- * host may have already found the job, obtained an exclusive
- * lock, and started processing it. However, this locking does
- * make it possible that two processing hosts will both try to
- * obtain the lock to do the upgrade operation at the same time.
- * If this happens, the host that is holding the lock will
- * complete the upgrade operation, so there is nothing more for
- * this host to do.
- */
- try (Lock manifestLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.MANIFESTS, manifest.getFilePath().toString())) {
- if (null != manifestLock) {
- updateCoordinationServiceManifestNode(job);
- }
- } catch (CoordinationServiceException ex) {
- sysLogger.log(Level.SEVERE, String.format("Error attempting to set node data for %s", manifest.getFilePath()), ex);
- }
+ updateAutoIngestJobData(job);
}
newPendingJobsList.add(job);
}
@@ -1436,150 +1332,117 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
*
* @param manifest The manifest for the job.
*
- * @throws InterruptedException if the thread running the input
- * directory scan task is interrupted while
- * blocked, i.e., if auto ingest is
- * shutting down.
+ * @throws AutoIngestJobException If there was an error creating
+ * the node data.
+ * @throws CoordinationServiceException If there was an error writing
+ * the node data by the
+ * coordination service.
+ * @throws InterruptedException If the thread running the input
+ * directory scan task is
+ * interrupted while blocked, i.e.,
+ * if auto ingest is shutting down.
*/
- private void addNewPendingJob(Manifest manifest) throws InterruptedException, AutoIngestJobException {
- /*
- * Create the coordination service manifest node data for the job.
- * Note that getting the lock will create the node for the job (with
- * no data) if it does not already exist.
- *
- * An exclusive lock is obtained before creating the node data
- * because another host may have already found the job, obtained an
- * exclusive lock, and started processing it. However, this locking
- * does make it possible that two hosts will both try to obtain the
- * lock to do the create operation at the same time. If this
- * happens, the host that is locked out will not add the job to its
- * pending queue for this scan of the input directory, but it will
- * be picked up on the next scan.
- */
- try (Lock manifestLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.MANIFESTS, manifest.getFilePath().toString())) {
- if (null != manifestLock) {
- AutoIngestJob job = new AutoIngestJob(manifest);
- updateCoordinationServiceManifestNode(job);
- newPendingJobsList.add(job);
- }
- } catch (CoordinationServiceException ex) {
- sysLogger.log(Level.SEVERE, String.format("Error attempting to set node data for %s", manifest.getFilePath()), ex);
- }
+ private void addNewPendingJob(Manifest manifest) throws AutoIngestJobException, CoordinationServiceException, InterruptedException {
+ AutoIngestJob job = new AutoIngestJob(manifest);
+ updateAutoIngestJobData(job);
+ newPendingJobsList.add(job);
}
/**
- * Does crash recovery for a manifest, if required. The criterion for
- * crash recovery is a manifest with coordination service node data
- * indicating it is being processed for which an exclusive lock on the
- * node can be acquired. If this condition is true, it is probable that
- * the node that was processing the job crashed and the processing
- * status was not updated.
+ * Does recovery for an auto ingest job that was left in the processing
+ * state by an auot ingest node (AIN) that crashed.
*
- * @param manifest The manifest for upgrading the node.
- * @param jobNodeData The auto ingest job node data.
+ * @param manifest The manifest for the job.
+ * @param nodeData The data stored in the manifest file lock
+ * coordination service node for the job.
*
- * @throws InterruptedException if the thread running the input
- * directory scan task is interrupted
- * while blocked, i.e., if auto ingest is
- * shutting down.
- * @throws AutoIngestJobException if there is an issue creating a new
- * AutoIngestJob object.
+ * @throws AutoIngestJobException If there was an error working
+ * with the node data.
+ * @throws CoordinationServiceException If there was an error writing
+ * updated node data by the
+ * coordination service.
+ * @throws InterruptedException If the thread running the input
+ * directory scan task is
+ * interrupted while blocked, i.e.,
+ * if auto ingest is shutting down.
*/
- private void doRecoveryIfCrashed(Manifest manifest, AutoIngestJobNodeData jobNodeData) throws InterruptedException, AutoIngestJobException {
- /*
- * Try to get an exclusive lock on the coordination service node for
- * the job. If the lock cannot be obtained, another host in the auto
- * ingest cluster is already doing the recovery, so there is nothing
- * to do.
- */
+ private void doCrashRecovery(Manifest manifest, AutoIngestJobNodeData jobNodeData) throws AutoIngestJobException, CoordinationServiceException, InterruptedException {
String manifestPath = manifest.getFilePath().toString();
- try (Lock manifestLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.MANIFESTS, manifestPath)) {
- if (null != manifestLock) {
- sysLogger.log(Level.SEVERE, "Attempting crash recovery for {0}", manifestPath);
- Path caseDirectoryPath = PathUtils.findCaseDirectory(rootOutputDirectory, manifest.getCaseName());
+ sysLogger.log(Level.SEVERE, "Attempting crash recovery for {0}", manifestPath);
+ AutoIngestJob job = new AutoIngestJob(jobNodeData);
- /*
- * Create the recovery job.
- */
- AutoIngestJob job = new AutoIngestJob(jobNodeData);
- int numberOfCrashes = job.getNumberOfCrashes();
- if (numberOfCrashes <= AutoIngestUserPreferences.getMaxNumTimesToProcessImage()) {
- ++numberOfCrashes;
- job.setNumberOfCrashes(numberOfCrashes);
- if (numberOfCrashes <= AutoIngestUserPreferences.getMaxNumTimesToProcessImage()) {
- job.setCompletedDate(new Date(0));
- } else {
- job.setCompletedDate(Date.from(Instant.now()));
- }
- }
+ /*
+ * Try to set the error flags that indicate incomplete or messy data
+ * in displays for the job and the case. Note that if the job
+ * crashed before a case directory was created, the job was a no-op,
+ * so the data quality flags do not need to be set.
+ */
+ Path caseDirectoryPath = PathUtils.findCaseDirectory(rootOutputDirectory, manifest.getCaseName());
+ if (null != caseDirectoryPath) {
+ job.setCaseDirectoryPath(caseDirectoryPath);
+ job.setErrorsOccurred(true);
+ try {
+ setCaseNodeDataErrorsOccurred(caseDirectoryPath);
+ } catch (IOException ex) {
+ sysLogger.log(Level.WARNING, String.format("Error attempting to set error flag in case node data for %s", caseDirectoryPath), ex);
+ }
+ } else {
+ job.setErrorsOccurred(false);
+ }
- if (null != caseDirectoryPath) {
- job.setCaseDirectoryPath(caseDirectoryPath);
- job.setErrorsOccurred(true);
- try {
- setCaseNodeDataErrorsOccurred(caseDirectoryPath);
- } catch (IOException ex) {
- sysLogger.log(Level.SEVERE, String.format("Error attempting to set error flag in case node data for %s", caseDirectoryPath), ex);
- }
- } else {
- job.setErrorsOccurred(false);
- }
-
- if (numberOfCrashes <= AutoIngestUserPreferences.getMaxNumTimesToProcessImage()) {
- job.setProcessingStatus(AutoIngestJob.ProcessingStatus.PENDING);
- if (null != caseDirectoryPath) {
- try {
- new AutoIngestJobLogger(manifest.getFilePath(), manifest.getDataSourceFileName(), caseDirectoryPath).logCrashRecoveryWithRetry();
- } catch (AutoIngestJobLoggerException ex) {
- sysLogger.log(Level.SEVERE, String.format("Error creating case auto ingest log entry for crashed job for %s", manifestPath), ex);
- }
- }
- } else {
- job.setProcessingStatus(AutoIngestJob.ProcessingStatus.COMPLETED);
- if (null != caseDirectoryPath) {
- try {
- new AutoIngestJobLogger(manifest.getFilePath(), manifest.getDataSourceFileName(), caseDirectoryPath).logCrashRecoveryNoRetry();
- } catch (AutoIngestJobLoggerException ex) {
- sysLogger.log(Level.SEVERE, String.format("Error creating case auto ingest log entry for crashed job for %s", manifestPath), ex);
- }
- }
- }
-
- /*
- * Update the coordination service node for the job. If this
- * fails, leave the recovery to another host.
- */
+ /*
+ * Update the crash count for the job, determine whether or not to
+ * retry processing its data source, and deal with the job
+ * accordingly.
+ */
+ int numberOfCrashes = job.getNumberOfCrashes();
+ ++numberOfCrashes;
+ job.setNumberOfCrashes(numberOfCrashes);
+ if (numberOfCrashes < AutoIngestUserPreferences.getMaxNumTimesToProcessImage()) {
+ job.setProcessingStatus(AutoIngestJob.ProcessingStatus.PENDING);
+ job.setCompletedDate(new Date(0));
+ if (null != caseDirectoryPath) {
try {
- updateCoordinationServiceManifestNode(job);
- } catch (CoordinationServiceException ex) {
- sysLogger.log(Level.SEVERE, String.format("Error attempting to set node data for %s", manifestPath), ex);
- return;
- }
-
- jobNodeData = new AutoIngestJobNodeData(job);
-
- if (numberOfCrashes <= AutoIngestUserPreferences.getMaxNumTimesToProcessImage()) {
- newPendingJobsList.add(job);
- } else {
- newCompletedJobsList.add(new AutoIngestJob(jobNodeData));
+ new AutoIngestJobLogger(manifest.getFilePath(), manifest.getDataSourceFileName(), caseDirectoryPath).logCrashRecoveryWithRetry();
+ } catch (AutoIngestJobLoggerException ex) {
+ sysLogger.log(Level.SEVERE, String.format("Error writing case auto ingest log entry for crashed job for %s", manifestPath), ex);
}
}
- } catch (CoordinationServiceException ex) {
- sysLogger.log(Level.SEVERE, String.format("Error attempting to get exclusive lock for %s", manifestPath), ex);
+ updateAutoIngestJobData(job);
+ newPendingJobsList.add(job);
+ } else {
+ job.setProcessingStatus(AutoIngestJob.ProcessingStatus.COMPLETED);
+ job.setCompletedDate(Date.from(Instant.now()));
+ if (null != caseDirectoryPath) {
+ try {
+ new AutoIngestJobLogger(manifest.getFilePath(), manifest.getDataSourceFileName(), caseDirectoryPath).logCrashRecoveryNoRetry();
+ } catch (AutoIngestJobLoggerException ex) {
+ sysLogger.log(Level.SEVERE, String.format("Error writing case auto ingest log entry for crashed job for %s", manifestPath), ex);
+ }
+ }
+ updateAutoIngestJobData(job);
+ newCompletedJobsList.add(new AutoIngestJob(jobNodeData));
}
}
/**
* Adds a job to process a manifest to the completed jobs list.
*
- * @param nodeData The data stored in the coordination service node for
- * the manifest.
- * @param manifest The manifest for upgrading the node.
+ * @param manifest The manifest for the job.
+ * @param nodeData The data stored in the manifest file lock
+ * coordination service node for the job.
*
- * @throws CoordinationServiceException
- * @throws InterruptedException
+ * @throws AutoIngestJobException If there was an error working
+ * with the node data.
+ * @throws CoordinationServiceException If there was an error writing
+ * updated node data by the
+ * coordination service.
+ * @throws InterruptedException If the thread running the input
+ * directory scan task is
+ * interrupted while blocked, i.e.,
+ * if auto ingest is shutting down.
*/
- private void addCompletedJob(Manifest manifest, AutoIngestJobNodeData nodeData) throws CoordinationServiceException, InterruptedException, AutoIngestJobException {
+ private void addCompletedJob(Manifest manifest, AutoIngestJobNodeData nodeData) throws AutoIngestJobException, CoordinationServiceException, InterruptedException {
Path caseDirectoryPath = nodeData.getCaseDirectoryPath();
if (!caseDirectoryPath.toFile().exists()) {
sysLogger.log(Level.WARNING, String.format("Job completed for %s, but cannot find case directory %s, ignoring job", nodeData.getManifestFilePath(), caseDirectoryPath.toString()));
@@ -1611,21 +1474,9 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
job.setProcessingStage(AutoIngestJob.Stage.COMPLETED, nodeData.getCompletedDate());
job.setProcessingStatus(AutoIngestJob.ProcessingStatus.COMPLETED);
- /*
- * Try to upgrade/update the coordination service manifest node
- * data for the job. It is possible that two hosts will both try
- * to obtain the lock to do the upgrade operation at the same
- * time. If this happens, the host that is holding the lock will
- * complete the upgrade operation.
- */
- try (Lock manifestLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.MANIFESTS, manifest.getFilePath().toString())) {
- if (null != manifestLock) {
- updateCoordinationServiceManifestNode(job);
- }
- } catch (CoordinationServiceException ex) {
- sysLogger.log(Level.SEVERE, String.format("Error attempting to set node data for %s", manifest.getFilePath()), ex);
- }
+ updateAutoIngestJobData(job);
}
+
newCompletedJobsList.add(job);
}
@@ -1654,17 +1505,17 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
}
/**
- * Invoked for an input directory after entries in the directory are
+ * Invoked for an input directory after the files in the directory are
* visited. Checks if the task thread has been interrupted because auto
* ingest is shutting down and terminates the scan if that is the case.
*
* @param dirPath The directory about to be visited.
* @param unused Unused.
*
- * @return TERMINATE if the task thread has been interrupted, CONTINUE
- * if it has not.
+ * @return FileVisitResult.TERMINATE if the task thread has been
+ * interrupted, FileVisitResult.CONTINUE if it has not.
*
- * @throws IOException if an I/O error occurs, but this implementation
+ * @throws IOException If an I/O error occurs, but this implementation
* does not throw.
*/
@Override
@@ -2123,11 +1974,12 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
try {
AutoIngestJobNodeData nodeData = new AutoIngestJobNodeData(coordinationService.getNodeData(CoordinationService.CategoryNode.MANIFESTS, manifestPath.toString()));
if (!nodeData.getProcessingStatus().equals(PENDING)) {
- /*
- * Due to a timing issue or a missed event, a
- * non-pending job has ended up on the pending
- * queue. Skip the job and remove it from the queue.
- */
+ iterator.remove();
+ continue;
+ }
+
+ File manifestFile = nodeData.getManifestFilePath().toFile();
+ if (!manifestFile.exists()) {
iterator.remove();
continue;
}
@@ -2145,11 +1997,13 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
continue;
}
}
+
iterator.remove();
currentJob = job;
break;
+
} catch (AutoIngestJobNodeData.InvalidDataException ex) {
- sysLogger.log(Level.WARNING, String.format("Unable to use node data for %s", manifestPath), ex); // JCTODO: Is this right?
+ sysLogger.log(Level.WARNING, String.format("Unable to use node data for %s", manifestPath), ex);
}
}
}
@@ -2220,7 +2074,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
currentJob.setProcessingStatus(AutoIngestJob.ProcessingStatus.PROCESSING);
currentJob.setProcessingStage(AutoIngestJob.Stage.STARTING, Date.from(Instant.now()));
currentJob.setProcessingHostName(AutoIngestManager.LOCAL_HOST_NAME);
- updateCoordinationServiceManifestNode(currentJob);
+ updateAutoIngestJobData(currentJob);
setChanged();
notifyObservers(Event.JOB_STARTED);
eventPublisher.publishRemotely(new AutoIngestJobStartedEvent(currentJob));
@@ -2244,7 +2098,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
currentJob.setProcessingStatus(AutoIngestJob.ProcessingStatus.PENDING);
}
currentJob.setProcessingHostName("");
- updateCoordinationServiceManifestNode(currentJob);
+ updateAutoIngestJobData(currentJob);
boolean retry = (!currentJob.isCanceled() && !currentJob.isCompleted());
sysLogger.log(Level.INFO, "Completed processing of {0}, retry = {1}", new Object[]{manifestPath, retry});
@@ -2410,9 +2264,9 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
sysLogger.log(Level.INFO, "Opening case {0} for {1}", new Object[]{caseName, manifest.getFilePath()});
currentJob.setProcessingStage(AutoIngestJob.Stage.OPENING_CASE, Date.from(Instant.now()));
/*
- * Acquire and hold a case name lock so that only one node at as
- * time can scan the output directory at a time. This prevents
- * making duplicate cases for the saem auto ingest case.
+ * Acquire and hold a case name lock so that only one node at a time
+ * can search the output directory for an existing case. This
+ * prevents making duplicate cases for the same auto ingest case.
*/
try (Lock caseLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.CASES, caseName, 30, TimeUnit.MINUTES)) {
if (null != caseLock) {
@@ -2440,13 +2294,16 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
Thread.sleep(AutoIngestUserPreferences.getSecondsToSleepBetweenCases() * 1000);
}
currentJob.setCaseDirectoryPath(caseDirectoryPath);
- updateCoordinationServiceManifestNode(currentJob); // update case directory path
+ updateAutoIngestJobData(currentJob);
+ recordManifest(caseDirectoryPath, manifest.getFilePath());
Case caseForJob = Case.getCurrentCase();
sysLogger.log(Level.INFO, "Opened case {0} for {1}", new Object[]{caseForJob.getName(), manifest.getFilePath()});
return caseForJob;
} catch (KeywordSearchModuleException ex) {
throw new CaseManagementException(String.format("Error creating solr settings file for case %s for %s", caseName, manifest.getFilePath()), ex);
+ } catch (IOException ex) {
+ throw new CaseManagementException(String.format("Error recording manifest file path for case %s for %s", caseName, manifest.getFilePath()), ex);
} catch (CaseActionException ex) {
throw new CaseManagementException(String.format("Error creating or opening case %s for %s", caseName, manifest.getFilePath()), ex);
}
@@ -2456,6 +2313,22 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
}
}
+ /**
+ * Writes the path of the manifest file for the current job to a list of
+ * manifest file paths for the case in file in the case directory.
+ *
+ * @param caseDirectoryPath The case directory path.
+ *
+ * @throws IOException If the file cannot be created or opened and
+ * updated.
+ */
+ private void recordManifest(Path caseDirectoryPath, Path manifestFilePath) throws IOException {
+ final Path manifestsListFilePath = Paths.get(caseDirectoryPath.toString(), AutoIngestManager.getCaseManifestsListFileName());
+ try (FileWriter fileWriter = new FileWriter(manifestsListFilePath.toString(), true)) {
+ fileWriter.write(manifestFilePath.toString() + "\n");
+ }
+ }
+
/**
* Runs the ingest process for the current job.
*
@@ -3096,7 +2969,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
currentJob.setModuleRuntimesSnapshot(IngestManager.getInstance().getModuleRunTimes());
setChanged();
notifyObservers(Event.JOB_STATUS_UPDATED);
- updateCoordinationServiceManifestNode(currentJob);
+ updateAutoIngestJobData(currentJob);
eventPublisher.publishRemotely(new AutoIngestJobStatusEvent(currentJob));
}
}
@@ -3256,12 +3129,6 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
}
- enum CaseDeletionResult {
- FAILED,
- PARTIALLY_DELETED,
- FULLY_DELETED
- }
-
static final class AutoIngestManagerException extends Exception {
private static final long serialVersionUID = 1L;
diff --git a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/AutoIngestMetricsCollector.java b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/AutoIngestMetricsCollector.java
index 7b07a15aec..402714a021 100644
--- a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/AutoIngestMetricsCollector.java
+++ b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/AutoIngestMetricsCollector.java
@@ -72,7 +72,7 @@ final class AutoIngestMetricsCollector {
switch (processingStatus) {
case PENDING:
case PROCESSING:
- case DELETED:
+ case DELETED: // No longer used, retained for legacy jobs only.
/*
* These are not jobs we care about for metrics, so
* we will ignore them.
@@ -96,7 +96,7 @@ final class AutoIngestMetricsCollector {
return newMetricsSnapshot;
- } catch (CoordinationService.CoordinationServiceException ex) {
+ } catch (CoordinationService.CoordinationServiceException | InterruptedException ex) {
LOGGER.log(Level.SEVERE, "Failed to get node list from coordination service", ex);
return new MetricsSnapshot();
}
diff --git a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/AutoIngestMonitor.java b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/AutoIngestMonitor.java
index 8f7a9c0696..9fbd222cad 100644
--- a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/AutoIngestMonitor.java
+++ b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/AutoIngestMonitor.java
@@ -21,7 +21,6 @@ package org.sleuthkit.autopsy.experimental.autoingest;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import java.beans.PropertyChangeEvent;
import java.beans.PropertyChangeListener;
-import java.nio.file.Path;
import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
@@ -38,20 +37,15 @@ import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.stream.Collectors;
import javax.annotation.concurrent.GuardedBy;
-import org.sleuthkit.autopsy.casemodule.Case;
-import org.sleuthkit.autopsy.casemodule.CaseActionException;
-import org.sleuthkit.autopsy.casemodule.CaseMetadata;
import org.sleuthkit.autopsy.coordinationservice.CoordinationService;
import org.sleuthkit.autopsy.coordinationservice.CoordinationService.CoordinationServiceException;
import org.sleuthkit.autopsy.coreutils.Logger;
import org.sleuthkit.autopsy.coreutils.NetworkUtils;
-import org.sleuthkit.autopsy.coreutils.StopWatch;
import org.sleuthkit.autopsy.events.AutopsyEventException;
import org.sleuthkit.autopsy.events.AutopsyEventPublisher;
import org.sleuthkit.autopsy.experimental.autoingest.AutoIngestJob.ProcessingStatus;
import static org.sleuthkit.autopsy.experimental.autoingest.AutoIngestJob.ProcessingStatus.DELETED;
import static org.sleuthkit.autopsy.experimental.autoingest.AutoIngestJob.ProcessingStatus.PENDING;
-import org.sleuthkit.autopsy.experimental.autoingest.AutoIngestManager.CaseDeletionResult;
import org.sleuthkit.autopsy.experimental.autoingest.AutoIngestManager.Event;
import org.sleuthkit.autopsy.experimental.autoingest.AutoIngestNodeControlEvent.ControlEventType;
@@ -360,7 +354,7 @@ final class AutoIngestMonitor extends Observable implements PropertyChangeListen
case COMPLETED:
newJobsSnapshot.addOrReplaceCompletedJob(job);
break;
- case DELETED:
+ case DELETED: // No longer used, retained for legacy jobs only.
break;
default:
LOGGER.log(Level.SEVERE, "Unknown AutoIngestJobData.ProcessingStatus");
@@ -378,7 +372,7 @@ final class AutoIngestMonitor extends Observable implements PropertyChangeListen
return newJobsSnapshot;
- } catch (CoordinationServiceException ex) {
+ } catch (CoordinationServiceException | InterruptedException ex) {
LOGGER.log(Level.SEVERE, "Failed to get node list from coordination service", ex);
return new JobsSnapshot();
}
@@ -659,88 +653,6 @@ final class AutoIngestMonitor extends Observable implements PropertyChangeListen
}
}
- /**
- * Deletes a case. This includes deleting the case directory, the text
- * index, and the case database. This does not include the directories
- * containing the data sources and their manifests.
- *
- * @param job The job whose case you want to delete
- *
- * @return A result code indicating success, partial success, or failure.
- */
- CaseDeletionResult deleteCase(AutoIngestJob job) {
- String caseName = job.getManifest().getCaseName();
- Path caseDirectoryPath = job.getCaseDirectoryPath();
- Path metadataFilePath = caseDirectoryPath.resolve(caseName + CaseMetadata.getFileExtension());
- StopWatch stopWatch = new StopWatch();
- stopWatch.start();
- synchronized (jobsLock) {
- stopWatch.stop();
- LOGGER.log(Level.INFO, String.format("Used %d s to acquire jobsLock (Java monitor in AutoIngestMonitor class) for case %s at %s", stopWatch.getElapsedTimeSecs(), caseName, caseDirectoryPath));
- stopWatch.reset();
- stopWatch.start();
- try {
- CaseMetadata metadata = new CaseMetadata(metadataFilePath);
- stopWatch.stop();
- LOGGER.log(Level.INFO, String.format("Used %d s to read case metadata for case %s at %s", stopWatch.getElapsedTimeSecs(), caseName, caseDirectoryPath));
- stopWatch.reset();
- stopWatch.start();
- Case.deleteCase(metadata);
- } catch (CaseMetadata.CaseMetadataException ex) {
- LOGGER.log(Level.SEVERE, String.format("Failed to read case metadata file %s for case %s at %s", metadataFilePath, caseName, caseDirectoryPath), ex);
- stopWatch.stop();
- LOGGER.log(Level.INFO, String.format("Used %d s to fail to read case metadata file %s for case %s at %s", stopWatch.getElapsedTimeSecs(), metadataFilePath, caseName, caseDirectoryPath));
- return CaseDeletionResult.FAILED;
- } catch (CaseActionException ex) {
- LOGGER.log(Level.SEVERE, String.format("Failed to delete case %s at %s", caseName, caseDirectoryPath), ex);
- return CaseDeletionResult.FAILED;
- }
-
- // Update the state of completed jobs associated with this case to indicate
- // that the case has been deleted
- stopWatch.reset();
- stopWatch.start();
- List completedJobs = getCompletedJobs();
- stopWatch.stop();
- LOGGER.log(Level.INFO, String.format("Used %d s to get completed jobs listing for case %s at %s", stopWatch.getElapsedTimeSecs(), caseName, caseDirectoryPath));
- stopWatch.reset();
- stopWatch.start();
- for (AutoIngestJob completedJob : completedJobs) {
- if (caseName.equals(completedJob.getManifest().getCaseName())) {
- try {
- completedJob.setProcessingStatus(DELETED);
- AutoIngestJobNodeData nodeData = new AutoIngestJobNodeData(completedJob);
- coordinationService.setNodeData(CoordinationService.CategoryNode.MANIFESTS, completedJob.getManifest().getFilePath().toString(), nodeData.toArray());
- } catch (CoordinationServiceException | InterruptedException ex) {
- LOGGER.log(Level.SEVERE, String.format("Failed to update completed job node data for %s when deleting case %s at %s", completedJob.getManifest().getFilePath(), caseName, caseDirectoryPath), ex);
- stopWatch.stop();
- LOGGER.log(Level.INFO, String.format("Used %d s to fail to update job node data for completed jobs for case %s at %s", stopWatch.getElapsedTimeSecs(), caseName, caseDirectoryPath));
- return CaseDeletionResult.PARTIALLY_DELETED;
- }
- }
- }
- stopWatch.stop();
- LOGGER.log(Level.INFO, String.format("Used %d s to update job node data for completed jobs for case %s at %s", stopWatch.getElapsedTimeSecs(), caseName, caseDirectoryPath));
-
- // Remove jobs associated with this case from the completed jobs collection.
- stopWatch.reset();
- stopWatch.start();
- completedJobs.removeIf((AutoIngestJob completedJob)
- -> completedJob.getManifest().getCaseName().equals(caseName));
- stopWatch.stop();
- LOGGER.log(Level.INFO, String.format("Used %d s to remove completed jobs for case %s at %s from current jobs snapshot", stopWatch.getElapsedTimeSecs(), caseName, caseDirectoryPath));
-
- // Publish a message to update auto ingest nodes.
- stopWatch.reset();
- stopWatch.start();
- eventPublisher.publishRemotely(new AutoIngestCaseDeletedEvent(caseName, LOCAL_HOST_NAME, AutoIngestManager.getSystemUserNameProperty()));
- stopWatch.stop();
- LOGGER.log(Level.INFO, String.format("Used %d s to publish job deletion event for case %s at %s", stopWatch.getElapsedTimeSecs(), caseName, caseDirectoryPath));
- }
-
- return CaseDeletionResult.FULLY_DELETED;
- }
-
/**
* Send the given control event to the given node.
*
diff --git a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/Bundle.properties b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/Bundle.properties
index 0f074ca11c..8a66bb8764 100644
--- a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/Bundle.properties
+++ b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/Bundle.properties
@@ -228,7 +228,6 @@ AutoIngestControlPanel.bnShowProgress.text=Ingest Progress
AutoIngestControlPanel.bnCancelJob.text=&Cancel Job
AutoIngestControlPanel.bnCancelModule.text=Cancel &Module
AutoIngestControlPanel.bnReprocessJob.text=Reprocess Job
-AutoIngestControlPanel.bnDeleteCase.text=&Delete Case
AutoIngestControlPanel.bnShowCaseLog.text=Show Case &Log
AutoIngestControlPanel.bnPause.text=Pause
AutoIngestControlPanel.bnRefresh.text=&Refresh
@@ -255,3 +254,4 @@ AinStatusDashboard.clusterMetricsButton.text=Auto Ingest &Metrics
AinStatusDashboard.nodeStatusTableTitle.text=Auto Ingest Nodes
AinStatusDashboard.healthMonitorButton.text=Health Monitor
CasesDashboardTopComponent.refreshButton.text=Refresh
+AutoIngestCasesDeletionDialog.jLabel1.text=Progress
diff --git a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/Bundle.properties-MERGED b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/Bundle.properties-MERGED
index a7336e26a6..9dacb71384 100755
--- a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/Bundle.properties-MERGED
+++ b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/Bundle.properties-MERGED
@@ -10,8 +10,6 @@ AinStatusNode.status.title=Status
AinStatusNode.status.unknown=Unknown
AutoIngestAdminActions.cancelJobAction.title=Cancel Job
AutoIngestAdminActions.cancelModuleAction.title=Cancel Module
-AutoIngestAdminActions.deleteCaseAction.error=Failed to delete case.
-AutoIngestAdminActions.deleteCaseAction.title=Delete Case
AutoIngestAdminActions.pause.title=Pause Node
AutoIngestAdminActions.progressDialogAction.title=Ingest Progress
AutoIngestAdminActions.reprocessJobAction.error=Failed to reprocess job
@@ -56,7 +54,6 @@ AutoIngestControlPanel.Cancelling=Cancelling...
AutoIngestControlPanel.completedTable.toolTipText=The Completed table shows all Jobs that have been processed already
AutoIngestControlPanel.ConfigLocked=The shared configuration directory is locked because upload from another node is in progress. \nIf this is an error, you can unlock the directory and then retry the upload.
AutoIngestControlPanel.ConfigLockedTitle=Configuration directory locked
-AutoIngestControlPanel.DeletionFailed=Deletion failed for job
AutoIngestControlPanel.EnableConfigurationSettings=Enable shared configuration from the options panel before uploading
AutoIngestControlPanel.errorMessage.caseDeprioritization=An error occurred when deprioritizing the case. Some or all jobs may not have been deprioritized.
AutoIngestControlPanel.errorMessage.casePrioritization=An error occurred when prioritizing the case. Some or all jobs may not have been prioritized.
@@ -167,9 +164,43 @@ CTL_AutoIngestDashboardOpenAction=Auto Ingest Dashboard
CTL_AutoIngestDashboardTopComponent=Auto Ingest Jobs
CTL_CasesDashboardAction=Multi-User Cases Dashboard
CTL_CasesDashboardTopComponent=Cases
-DeleteCaseInputDirectoriesAction.menuItemText=Delete Input Directories
-DeleteCasesAction.menuItemText=Delete Case and Jobs
-DeleteCasesForReprocessingAction.menuItemText=Delete for Reprocessing
+DeleteCaseInputAction.confirmationText=Are you sure you want to delete the following for the case(s):\n\tManifest files\n\tData sources\n
+DeleteCaseInputAction.menuItemText=Delete Input
+DeleteCaseInputAction.progressDisplayName=Delete Input
+DeleteCaseInputAction.taskName=input
+DeleteCaseInputAndOutputAction.confirmationText=Are you sure you want to delete the following for the case(s):\n\tManifest files\n\tData sources\n\tManifest file znodes\n\tCase database\n\tCore.properties file\n\tCase directory\n\tCase znodes
+DeleteCaseInputAndOutputAction.menuItemText=Delete Input and Output
+DeleteCaseInputAndOutputAction.progressDisplayName=Delete Input and Output
+DeleteCaseInputAndOutputAction.taskName=input-and-output
+DeleteCaseOutputAction.confirmationText=Are you sure you want to delete the following for the case(s):\n\tManifest file znodes\n\tCase database\n\tCore.properties file\n\tCase directory\n\tCase znodes
+DeleteCaseOutputAction.menuItemText=Delete Output
+DeleteCaseOutputAction.progressDisplayName=Delete Output
+DeleteCaseOutputAction.taskName=output
+DeleteCaseTask.progress.acquiringCaseDirLock=Acquiring exclusive case directory lock...
+DeleteCaseTask.progress.acquiringCaseNameLock=Acquiring exclusive case name lock...
+DeleteCaseTask.progress.acquiringManifestLocks=Acquiring exclusive manifest file locks...
+DeleteCaseTask.progress.connectingToCoordSvc=Connecting to the coordination service...
+DeleteCaseTask.progress.deletingCaseDirCoordSvcNode=Deleting case directory znode...
+DeleteCaseTask.progress.deletingCaseNameCoordSvcNode=Deleting case name znode...
+# {0} - data source name
+# {1} - device id
+DeleteCaseTask.progress.deletingDataSource=Deleting data source {0} with device id {1}...
+DeleteCaseTask.progress.deletingJobLogLockNode=Deleting case auto ingest log znode...
+# {0} - manifest file path
+DeleteCaseTask.progress.deletingManifest=Deleting manifest file {0}...
+# {0} - manifest file path
+DeleteCaseTask.progress.deletingManifestFileNode=Deleting the manifest file znode for {0}...
+DeleteCaseTask.progress.deletingResourcesLockNode=Deleting case resources znode...
+DeleteCaseTask.progress.gettingManifestPaths=Getting manifest file paths...
+# {0} - manifest file path
+DeleteCaseTask.progress.lockingManifest=Locking manifest file {0}...
+DeleteCaseTask.progress.openingCaseDatabase=Opening the case database...
+DeleteCaseTask.progress.openingCaseMetadataFile=Opening case metadata file...
+# {0} - manifest file path
+DeleteCaseTask.progress.parsingManifest=Parsing manifest file {0}...
+# {0} - manifest file path
+DeleteCaseTask.progress.releasingManifestLock=Releasing lock on the manifest file {0}...
+DeleteCaseTask.progress.startMessage=Starting deletion...
HINT_CasesDashboardTopComponent=This is an adminstrative dashboard for multi-user cases
OpenAutoIngestLogAction.deletedLogErrorMsg=The case auto ingest log has been deleted.
OpenAutoIngestLogAction.logOpenFailedErrorMsg=Failed to open case auto ingest log. See application log for details.
@@ -286,7 +317,6 @@ PrioritizationAction.prioritizeJobAction.error=Failed to prioritize job "%s".
PrioritizationAction.prioritizeJobAction.title=Prioritize Job
PrioritizedIconCellRenderer.notPrioritized.tooltiptext=This job has not been prioritized.
PrioritizedIconCellRenderer.prioritized.tooltiptext=This job has been prioritized. The most recently prioritized job should be processed next.
-ShowCaseDeletionStatusAction.menuItemText=Show Deletion Status
SingleUserCaseImporter.NonUniqueOutputFolder=Output folder not unique. Skipping
SingleUserCaseImporter.WillImport=Will import:
SingleUserCaseImporter.None=None
@@ -380,7 +410,6 @@ AutoIngestControlPanel.bnShowProgress.text=Ingest Progress
AutoIngestControlPanel.bnCancelJob.text=&Cancel Job
AutoIngestControlPanel.bnCancelModule.text=Cancel &Module
AutoIngestControlPanel.bnReprocessJob.text=Reprocess Job
-AutoIngestControlPanel.bnDeleteCase.text=&Delete Case
AutoIngestControlPanel.bnShowCaseLog.text=Show Case &Log
AutoIngestControlPanel.bnPause.text=Pause
AutoIngestControlPanel.bnRefresh.text=&Refresh
@@ -407,3 +436,4 @@ AinStatusDashboard.clusterMetricsButton.text=Auto Ingest &Metrics
AinStatusDashboard.nodeStatusTableTitle.text=Auto Ingest Nodes
AinStatusDashboard.healthMonitorButton.text=Health Monitor
CasesDashboardTopComponent.refreshButton.text=Refresh
+AutoIngestCasesDeletionDialog.jLabel1.text=Progress
diff --git a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/CasesDashboardCustomizer.java b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/CasesDashboardCustomizer.java
index 498a4e67ad..56545625a9 100755
--- a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/CasesDashboardCustomizer.java
+++ b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/CasesDashboardCustomizer.java
@@ -31,23 +31,26 @@ import org.sleuthkit.autopsy.casemodule.multiusercasesbrowser.MultiUserCaseBrows
*/
final class CasesDashboardCustomizer implements MultiUserCaseBrowserCustomizer {
- private final DeleteCaseInputDirectoriesAction deleteCaseInputAction;
- private final DeleteCasesForReprocessingAction deleteCaseOutputAction;
- private final DeleteCasesAction deleteCaseAction;
+ private final DeleteCaseInputAction deleteCaseInputAction;
+ private final DeleteCaseOutputAction deleteCaseOutputAction;
+ private final DeleteCaseInputAndOutputAction deleteCaseAction;
/**
* Constructs a customizer for the multi-user case browser panel used in the
* administrative dashboard for auto ingest cases to present a tabular view
* of the multi-user cases known to the coordination service.
+ *
+ * @param executor An executor for tasks for actions that do work in the
+ * background.
*/
CasesDashboardCustomizer() {
/*
* These actions are shared by all nodes in order to support multiple
* selection.
*/
- deleteCaseInputAction = new DeleteCaseInputDirectoriesAction();
- deleteCaseOutputAction = new DeleteCasesForReprocessingAction();
- deleteCaseAction = new DeleteCasesAction();
+ deleteCaseInputAction = new DeleteCaseInputAction();
+ deleteCaseOutputAction = new DeleteCaseOutputAction();
+ deleteCaseAction = new DeleteCaseInputAndOutputAction();
}
@Override
@@ -56,6 +59,11 @@ final class CasesDashboardCustomizer implements MultiUserCaseBrowserCustomizer {
properties.add(Column.CREATE_DATE);
properties.add(Column.LAST_ACCESS_DATE);
properties.add(Column.DIRECTORY);
+ properties.add(Column.MANIFEST_FILE_ZNODES_DELETE_STATUS);
+ properties.add(Column.DATA_SOURCES_DELETE_STATUS);
+ properties.add(Column.TEXT_INDEX_DELETE_STATUS);
+ properties.add(Column.CASE_DB_DELETE_STATUS);
+ properties.add(Column.CASE_DIR_DELETE_STATUS);
return properties;
}
@@ -79,7 +87,6 @@ final class CasesDashboardCustomizer implements MultiUserCaseBrowserCustomizer {
actions.add(deleteCaseInputAction);
actions.add(deleteCaseOutputAction);
actions.add(deleteCaseAction);
- actions.add(new ShowCaseDeletionStatusAction(nodeData));
return actions;
}
diff --git a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/CasesDashboardTopComponent.java b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/CasesDashboardTopComponent.java
index 5df5dba929..ec2d1c0c17 100755
--- a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/CasesDashboardTopComponent.java
+++ b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/CasesDashboardTopComponent.java
@@ -34,7 +34,7 @@ import org.sleuthkit.autopsy.coreutils.Logger;
*/
@TopComponent.Description(
preferredID = "CasesDashboardTopComponent",
- persistenceType = TopComponent.PERSISTENCE_ALWAYS
+ persistenceType = TopComponent.PERSISTENCE_NEVER
)
@TopComponent.Registration(
mode = "dashboard",
@@ -62,8 +62,6 @@ public final class CasesDashboardTopComponent extends TopComponent implements Ex
* for multi-user cases. The top component is docked into the "dashboard
* mode" defined by the auto ingest jobs top component.
*/
- // RJCTODO: Consider moving all of the dashboard code into its own
- // admindashboards or dashboards package.
public static void openTopComponent() {
CasesDashboardTopComponent topComponent = (CasesDashboardTopComponent) WindowManager.getDefault().findTopComponent("CasesDashboardTopComponent"); // NON-NLS
if (topComponent == null) {
diff --git a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseAction.java b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseAction.java
new file mode 100755
index 0000000000..176ced102a
--- /dev/null
+++ b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseAction.java
@@ -0,0 +1,106 @@
+/*
+ * Autopsy Forensic Browser
+ *
+ * Copyright 2019-2019 Basis Technology Corp. Contact: carrier sleuthkit
+ * org
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.sleuthkit.autopsy.experimental.autoingest;
+
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import java.awt.event.ActionEvent;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import javax.swing.AbstractAction;
+import org.openide.util.Utilities;
+import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData;
+import org.sleuthkit.autopsy.progress.AppFrameProgressBar;
+import org.sleuthkit.autopsy.progress.TaskCancellable;
+import org.sleuthkit.autopsy.progress.ProgressIndicator;
+
+/**
+ * An abstract class for an action that deletes one or more auto ingest cases
+ * using a thread pool, one task per case. Uses the Template Method design
+ * pattern to allow subclasses to specify the deletion task to be performed.
+ *
+ * This cases to delete are discovered by querying the actions global context
+ * lookup for CaseNodeData objects. See
+ * https://platform.netbeans.org/tutorials/nbm-selection-1.html and
+ * https://platform.netbeans.org/tutorials/nbm-selection-2.html for details.
+ */
+abstract class DeleteCaseAction extends AbstractAction {
+
+ private static final long serialVersionUID = 1L;
+ private static final int NUMBER_OF_THREADS = 4;
+ private static final String THREAD_NAME_SUFFIX = "-task-%d"; //NON-NLS
+ private static final String PROGRESS_DISPLAY_NAME = "%s for %s"; //NON-NLS
+ private final String taskDisplayName;
+ private final ExecutorService executor;
+
+ /**
+ * Constructs an abstract class for an action that deletes one or more auto
+ * ingest cases using a thread pool, one task per case. Uses the Template
+ * Method design pattern to allow subclasses to specify the deletion task to
+ * be performed.
+ *
+ * @param menuItemText The menu item text for the action.
+ * @param taskDisplayName The task display name for the progress indicator
+ * for the task, to be inserted in the first position
+ * of "%s for %s", where the second substitution is
+ * the case name.
+ * @param taskName The task name, to be inserted in the first
+ * position of "%s-task-%d", where the second
+ * substitution is the pool thread number.
+ */
+ DeleteCaseAction(String menuItemText, String taskDisplayName, String taskName) {
+ super(menuItemText);
+ this.taskDisplayName = taskDisplayName;
+ String threadNameFormat = taskName + THREAD_NAME_SUFFIX;
+ executor = Executors.newFixedThreadPool(NUMBER_OF_THREADS, new ThreadFactoryBuilder().setNameFormat(threadNameFormat).build());
+ }
+
+ @Override
+ public void actionPerformed(ActionEvent event) {
+ Collection selectedNodeData = new ArrayList<>(Utilities.actionsGlobalContext().lookupAll(CaseNodeData.class));
+ for (CaseNodeData nodeData : selectedNodeData) {
+ AppFrameProgressBar progress = new AppFrameProgressBar(String.format(PROGRESS_DISPLAY_NAME, taskDisplayName, nodeData.getDisplayName()));
+ TaskCancellable taskCanceller = new TaskCancellable(progress);
+ progress.setCancellationBehavior(taskCanceller);
+ Future> future = executor.submit(getTask(nodeData, progress));
+ taskCanceller.setFuture(future);
+ }
+ }
+
+ /**
+ * Uses the Template Method design pattern to allow subclasses to specify
+ * the deletion task to be performed in a worker thread by this action.
+ *
+ * @param caseNodeData The case directory lock coordination service node
+ * data for the case to be deleted.
+ * @param progress A progress indicator for the task.
+ *
+ * @return A case deletion task, ready to be executed.
+ */
+ abstract DeleteCaseTask getTask(CaseNodeData caseNodeData, ProgressIndicator progress);
+
+ @Override
+ public DeleteCaseAction clone() throws CloneNotSupportedException {
+ super.clone();
+ throw new CloneNotSupportedException();
+ }
+
+}
diff --git a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseInputDirectoriesAction.java b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseInputAction.java
similarity index 60%
rename from Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseInputDirectoriesAction.java
rename to Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseInputAction.java
index aab830be8f..689b7311d5 100755
--- a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseInputDirectoriesAction.java
+++ b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseInputAction.java
@@ -19,12 +19,11 @@
package org.sleuthkit.autopsy.experimental.autoingest;
import java.awt.event.ActionEvent;
-import java.util.ArrayList;
-import java.util.Collection;
-import javax.swing.AbstractAction;
import org.openide.util.NbBundle;
-import org.openide.util.Utilities;
import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData;
+import org.sleuthkit.autopsy.coreutils.MessageNotifyUtil;
+import org.sleuthkit.autopsy.experimental.autoingest.DeleteCaseTask.DeleteOptions;
+import org.sleuthkit.autopsy.progress.ProgressIndicator;
/**
* An action that deletes the auto ingest job input directories associated with
@@ -32,13 +31,8 @@ import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData;
* ingest jobs are not deleted. This supports the use case where the directories
* may need to be directed to reclaim space, but the option to restore the
* directories without having the jobs be reprocessed is retained.
- *
- * This cases to delete are discovered by querying the actions global context
- * lookup for CaseNodeData objects. See
- * https://platform.netbeans.org/tutorials/nbm-selection-1.html and
- * https://platform.netbeans.org/tutorials/nbm-selection-2.html for details.
*/
-final class DeleteCaseInputDirectoriesAction extends AbstractAction {
+final class DeleteCaseInputAction extends DeleteCaseAction {
private static final long serialVersionUID = 1L;
@@ -51,27 +45,27 @@ final class DeleteCaseInputDirectoriesAction extends AbstractAction {
* reprocessed is retained.
*/
@NbBundle.Messages({
- "DeleteCaseInputDirectoriesAction.menuItemText=Delete Input Directories"
+ "DeleteCaseInputAction.menuItemText=Delete Input",
+ "DeleteCaseInputAction.progressDisplayName=Delete Input",
+ "DeleteCaseInputAction.taskName=input"
})
- DeleteCaseInputDirectoriesAction() {
- super(Bundle.DeleteCaseInputDirectoriesAction_menuItemText());
- setEnabled(false); // RJCTODO: Enable when implemented
+ DeleteCaseInputAction() {
+ super(Bundle.DeleteCaseInputAction_menuItemText(), Bundle.DeleteCaseInputAction_progressDisplayName(), Bundle.DeleteCaseInputAction_taskName());
}
+ @NbBundle.Messages({
+ "DeleteCaseInputAction.confirmationText=Are you sure you want to delete the following for the case(s):\n\tManifest files\n\tData sources\n"
+ })
@Override
public void actionPerformed(ActionEvent event) {
- final Collection selectedNodeData = new ArrayList<>(Utilities.actionsGlobalContext().lookupAll(CaseNodeData.class));
-// if (!selectedNodeData.isEmpty()) {
-// /*
-// * RJCTODO: Create a background task that does the deletion and
-// * displays results in a dialog with a scrolling text pane.
-// */
-// }
+ if (MessageNotifyUtil.Message.confirm(Bundle.DeleteCaseInputAction_confirmationText())) {
+ super.actionPerformed(event);
+ }
}
@Override
- public DeleteCaseInputDirectoriesAction clone() throws CloneNotSupportedException {
- throw new CloneNotSupportedException();
+ DeleteCaseTask getTask(CaseNodeData caseNodeData, ProgressIndicator progress) {
+ return new DeleteCaseTask(caseNodeData, DeleteOptions.DELETE_INPUT, progress);
}
}
diff --git a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseInputAndOutputAction.java b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseInputAndOutputAction.java
new file mode 100755
index 0000000000..0e38e85f40
--- /dev/null
+++ b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseInputAndOutputAction.java
@@ -0,0 +1,67 @@
+/*
+ * Autopsy Forensic Browser
+ *
+ * Copyright 2019-2019 Basis Technology Corp.
+ * Contact: carrier sleuthkit org
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.sleuthkit.autopsy.experimental.autoingest;
+
+import java.awt.event.ActionEvent;
+import org.openide.util.NbBundle;
+import org.openide.util.NbBundle.Messages;
+import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData;
+import org.sleuthkit.autopsy.coreutils.MessageNotifyUtil;
+import org.sleuthkit.autopsy.experimental.autoingest.DeleteCaseTask.DeleteOptions;
+import org.sleuthkit.autopsy.progress.ProgressIndicator;
+
+/**
+ * An action that completely deletes one or more multi-user cases, including any
+ * associated auto ingest job input directories and all coordination service
+ * nodes.
+ */
+final class DeleteCaseInputAndOutputAction extends DeleteCaseAction {
+
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * Constructs an action that completely deletes one or more multi-user
+ * cases, including any associated auto ingest job input directories and
+ * coordination service nodes.
+ */
+ @Messages({
+ "DeleteCaseInputAndOutputAction.menuItemText=Delete Input and Output",
+ "DeleteCaseInputAndOutputAction.progressDisplayName=Delete Input and Output",
+ "DeleteCaseInputAndOutputAction.taskName=input-and-output"
+ })
+ DeleteCaseInputAndOutputAction() {
+ super(Bundle.DeleteCaseInputAndOutputAction_menuItemText(), Bundle.DeleteCaseInputAndOutputAction_progressDisplayName(), Bundle.DeleteCaseInputAndOutputAction_taskName());
+ }
+
+ @NbBundle.Messages({
+ "DeleteCaseInputAndOutputAction.confirmationText=Are you sure you want to delete the following for the case(s):\n\tManifest files\n\tData sources\n\tManifest file znodes\n\tCase database\n\tCore.properties file\n\tCase directory\n\tCase znodes"
+ })
+ @Override
+ public void actionPerformed(ActionEvent event) {
+ if (MessageNotifyUtil.Message.confirm(Bundle.DeleteCaseInputAndOutputAction_confirmationText())) {
+ super.actionPerformed(event);
+ }
+ }
+
+ @Override
+ DeleteCaseTask getTask(CaseNodeData caseNodeData, ProgressIndicator progress) {
+ return new DeleteCaseTask(caseNodeData, DeleteOptions.DELETE_ALL, progress);
+ }
+
+}
diff --git a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCasesForReprocessingAction.java b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseOutputAction.java
similarity index 58%
rename from Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCasesForReprocessingAction.java
rename to Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseOutputAction.java
index 9a0c4d50fb..ceb2abaa4b 100755
--- a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCasesForReprocessingAction.java
+++ b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseOutputAction.java
@@ -19,12 +19,11 @@
package org.sleuthkit.autopsy.experimental.autoingest;
import java.awt.event.ActionEvent;
-import java.util.ArrayList;
-import java.util.Collection;
-import javax.swing.AbstractAction;
import org.openide.util.NbBundle;
-import org.openide.util.Utilities;
import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData;
+import org.sleuthkit.autopsy.coreutils.MessageNotifyUtil;
+import org.sleuthkit.autopsy.experimental.autoingest.DeleteCaseTask.DeleteOptions;
+import org.sleuthkit.autopsy.progress.ProgressIndicator;
/**
* An action that deletes everything except the auto ingest job input
@@ -32,13 +31,8 @@ import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData;
* where a case needs to be reprocessed, so the input directories are not
* deleted even though the coordination service nodes for the auto ingest jobs
* are deleted.
- *
- * This cases to delete are discovered by querying the actions global context
- * lookup for CaseNodeData objects. See
- * https://platform.netbeans.org/tutorials/nbm-selection-1.html and
- * https://platform.netbeans.org/tutorials/nbm-selection-2.html for details.
*/
-final class DeleteCasesForReprocessingAction extends AbstractAction {
+final class DeleteCaseOutputAction extends DeleteCaseAction {
private static final long serialVersionUID = 1L;
@@ -50,27 +44,27 @@ final class DeleteCasesForReprocessingAction extends AbstractAction {
* ingest jobs are deleted.
*/
@NbBundle.Messages({
- "DeleteCasesForReprocessingAction.menuItemText=Delete for Reprocessing"
+ "DeleteCaseOutputAction.menuItemText=Delete Output",
+ "DeleteCaseOutputAction.progressDisplayName=Delete Output",
+ "DeleteCaseOutputAction.taskName=output"
})
- DeleteCasesForReprocessingAction() {
- super(Bundle.DeleteCasesForReprocessingAction_menuItemText());
- setEnabled(false); // RJCTODO: Enable when implemented
+ DeleteCaseOutputAction() {
+ super(Bundle.DeleteCaseOutputAction_menuItemText(), Bundle.DeleteCaseOutputAction_progressDisplayName(), Bundle.DeleteCaseOutputAction_taskName());
}
+ @NbBundle.Messages({
+ "DeleteCaseOutputAction.confirmationText=Are you sure you want to delete the following for the case(s):\n\tManifest file znodes\n\tCase database\n\tCore.properties file\n\tCase directory\n\tCase znodes"
+ })
@Override
public void actionPerformed(ActionEvent event) {
- final Collection selectedNodeData = new ArrayList<>(Utilities.actionsGlobalContext().lookupAll(CaseNodeData.class));
-// if (!selectedNodeData.isEmpty()) {
-// /*
-// * RJCTODO: Create a background task that does the deletion and
-// * displays results in a dialog with a scrolling text pane.
-// */
-// }
- }
-
+ if (MessageNotifyUtil.Message.confirm(Bundle.DeleteCaseOutputAction_confirmationText())) {
+ super.actionPerformed(event);
+ }
+ }
+
@Override
- public DeleteCasesForReprocessingAction clone() throws CloneNotSupportedException {
- throw new CloneNotSupportedException();
+ DeleteCaseTask getTask(CaseNodeData caseNodeData, ProgressIndicator progress) {
+ return new DeleteCaseTask(caseNodeData, DeleteOptions.DELETE_OUTPUT, progress);
}
}
diff --git a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseTask.java b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseTask.java
new file mode 100755
index 0000000000..31447ce2a1
--- /dev/null
+++ b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseTask.java
@@ -0,0 +1,809 @@
+/*
+ * Autopsy Forensic Browser
+ *
+ * Copyright 2019-2019 Basis Technology Corp.
+ * Contact: carrier sleuthkit org
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.sleuthkit.autopsy.experimental.autoingest;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Scanner;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.logging.Level;
+import org.openide.util.Lookup;
+import org.openide.util.NbBundle;
+import org.openide.util.NbBundle.Messages;
+import org.sleuthkit.datamodel.SleuthkitCase;
+import org.sleuthkit.autopsy.casemodule.Case;
+import org.sleuthkit.autopsy.casemodule.CaseMetadata;
+import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData;
+import org.sleuthkit.autopsy.casemodule.multiusercases.CoordinationServiceUtils;
+import org.sleuthkit.autopsy.coordinationservice.CoordinationService;
+import org.sleuthkit.autopsy.coordinationservice.CoordinationService.CategoryNode;
+import org.sleuthkit.autopsy.coordinationservice.CoordinationService.CoordinationServiceException;
+import org.sleuthkit.autopsy.coordinationservice.CoordinationService.Lock;
+import org.sleuthkit.autopsy.core.UserPreferences;
+import org.sleuthkit.autopsy.core.UserPreferencesException;
+import org.sleuthkit.autopsy.coreutils.FileUtil;
+import org.sleuthkit.autopsy.progress.ProgressIndicator;
+import org.sleuthkit.autopsy.coreutils.Logger;
+import org.sleuthkit.autopsy.experimental.autoingest.AutoIngestJobNodeData.InvalidDataException;
+import org.sleuthkit.datamodel.DataSource;
+import org.sleuthkit.datamodel.Image;
+import org.sleuthkit.datamodel.TskCoreException;
+
+/**
+ * A task that deletes part or all of a given case. Note that all logging is
+ * directed to the dedicated auto ingest dashboard log instead of to the general
+ * application log.
+ */
+final class DeleteCaseTask implements Runnable {
+
+ private static final int MANIFEST_FILE_LOCKING_TIMEOUT_MINS = 5;
+ private static final int MANIFEST_DELETE_TRIES = 3;
+ private static final String NO_NODE_ERROR_MSG_FRAGMENT = "KeeperErrorCode = NoNode";
+ private static final Logger logger = AutoIngestDashboardLogger.getLogger();
+ private final CaseNodeData caseNodeData;
+ private final DeleteOptions deleteOption;
+ private final ProgressIndicator progress;
+ private final List manifestFilePaths;
+ private final List manifestFileLocks;
+ private CoordinationService coordinationService;
+ private CaseMetadata caseMetadata;
+
+ /**
+ * Options to support implementing different case deletion use cases.
+ */
+ enum DeleteOptions {
+ /**
+ * Delete the auto ingest job manifests and corresponding data sources,
+ * while leaving the manifest file coordination service nodes and the
+ * rest of the case intact. The use case is freeing auto ingest input
+ * directory space while retaining the option to restore the data
+ * sources, effectively restoring the case.
+ */
+ DELETE_INPUT,
+ /**
+ * Delete the manifest file coordination service nodes and the output
+ * for a case, while leaving the auto ingest job manifests and
+ * corresponding data sources intact. The use case is auto ingest
+ * reprocessing of a case with a clean slate without having to restore
+ * the manifests and data sources.
+ */
+ DELETE_OUTPUT,
+ /**
+ * Delete everything.
+ */
+ DELETE_ALL
+ }
+
+ /**
+ * Constructs a task that deletes part or all of a given case. Note that all
+ * logging is directed to the dedicated auto ingest dashboard log instead of
+ * to the general application log.
+ *
+ * @param caseNodeData The case directory coordination service node data for
+ * the case.
+ * @param deleteOption The deletion option for the task.
+ * @param progress A progress indicator.
+ */
+ DeleteCaseTask(CaseNodeData caseNodeData, DeleteOptions deleteOption, ProgressIndicator progress) {
+ this.caseNodeData = caseNodeData;
+ this.deleteOption = deleteOption;
+ this.progress = progress;
+ manifestFilePaths = new ArrayList<>();
+ manifestFileLocks = new ArrayList<>();
+ }
+
+ @Override
+ @NbBundle.Messages({
+ "DeleteCaseTask.progress.startMessage=Starting deletion..."
+ })
+ public void run() {
+ try {
+ progress.start(Bundle.DeleteCaseTask_progress_startMessage());
+ logger.log(Level.INFO, String.format("Starting deletion of %s (%s)", caseNodeData.getDisplayName(), deleteOption));
+ deleteCase();
+ logger.log(Level.INFO, String.format("Finished deletion of %s (%s)", caseNodeData.getDisplayName(), deleteOption));
+
+ } catch (CoordinationServiceException | IOException ex) {
+ logger.log(Level.SEVERE, String.format("Error deleting %s (%s) in %s", caseNodeData.getDisplayName(), caseNodeData.getName(), caseNodeData.getDirectory()), ex);
+
+ } catch (InterruptedException ex) {
+ logger.log(Level.WARNING, String.format("Deletion of %s cancelled while incomplete", caseNodeData.getDisplayName()), ex);
+ Thread.currentThread().interrupt();
+
+ } catch (Exception ex) {
+ /*
+ * This is an unexpected runtime exceptions firewall. It is here
+ * because this task is designed to be able to be run in scenarios
+ * where there is no call to get() on a Future associated with
+ * the task, so this ensures that any such errors get logged.
+ */
+ logger.log(Level.SEVERE, String.format("Unexpected error deleting %s", caseNodeData.getDisplayName()), ex);
+ throw ex;
+
+ } finally {
+ releaseManifestFileLocks();
+ progress.finish();
+ }
+ }
+
+ /**
+ * Deletes part or all of the given case.
+ *
+ * @throws InterruptedException If the thread in which this task is running
+ * is interrupted while blocked waiting for a
+ * coordination service operation to complete.
+ */
+ @NbBundle.Messages({
+ "DeleteCaseTask.progress.connectingToCoordSvc=Connecting to the coordination service...",
+ "DeleteCaseTask.progress.acquiringCaseNameLock=Acquiring exclusive case name lock...",
+ "DeleteCaseTask.progress.acquiringCaseDirLock=Acquiring exclusive case directory lock...",
+ "DeleteCaseTask.progress.gettingManifestPaths=Getting manifest file paths...",
+ "DeleteCaseTask.progress.acquiringManifestLocks=Acquiring exclusive manifest file locks...",
+ "DeleteCaseTask.progress.openingCaseMetadataFile=Opening case metadata file...",
+ "DeleteCaseTask.progress.deletingResourcesLockNode=Deleting case resources znode...",
+ "DeleteCaseTask.progress.deletingJobLogLockNode=Deleting case auto ingest log znode...",
+ "DeleteCaseTask.progress.deletingCaseDirCoordSvcNode=Deleting case directory znode...",
+ "DeleteCaseTask.progress.deletingCaseNameCoordSvcNode=Deleting case name znode..."
+ })
+ private void deleteCase() throws CoordinationServiceException, IOException, InterruptedException {
+ progress.progress(Bundle.DeleteCaseTask_progress_connectingToCoordSvc());
+ logger.log(Level.INFO, String.format("Connecting to the coordination service for deletion of %s", caseNodeData.getDisplayName()));
+ coordinationService = CoordinationService.getInstance();
+ checkForCancellation();
+
+ /*
+ * Acquire an exclusive case name lock. The case name lock is the lock
+ * that auto ingest node (AIN) job processing tasks acquire exclusively
+ * when creating or opening a case specified in an auto ingest job
+ * manifest file. The reason AINs do this is to ensure that only one of
+ * them at a time can search the auto ingest output directory for an
+ * existing case matching the one in the manifest file. If a matching
+ * case is found, it is opened, otherwise the case is created. Acquiring
+ * this lock effectively disables this AIN job processing task behavior
+ * while the case is being deleted.
+ */
+ progress.progress(Bundle.DeleteCaseTask_progress_acquiringCaseNameLock());
+ logger.log(Level.INFO, String.format("Acquiring an exclusive case name lock for %s", caseNodeData.getDisplayName()));
+ String caseNameLockName = CoordinationServiceUtils.getCaseNameNodePath(caseNodeData.getDirectory());
+ try (CoordinationService.Lock nameLock = coordinationService.tryGetExclusiveLock(CategoryNode.CASES, caseNameLockName)) {
+ if (nameLock == null) {
+ logger.log(Level.INFO, String.format("Could not delete %s because a case name lock was already held by another host", caseNodeData.getDisplayName()));
+ return;
+ }
+ checkForCancellation();
+
+ /*
+ * Acquire an exclusive case directory lock. A shared case directory
+ * lock is acquired by each auto ingest node (AIN) and examiner node
+ * (EIN) when it opens a case. The shared locks are held by the AINs
+ * and EINs for as long as they have the case open. Acquiring this
+ * lock exclusively ensures that no AIN or EIN has the case to be
+ * deleted open and prevents another node from trying to open the
+ * case while it is being deleted.
+ */
+ progress.progress(Bundle.DeleteCaseTask_progress_acquiringCaseDirLock());
+ logger.log(Level.INFO, String.format("Acquiring an exclusive case directory lock for %s", caseNodeData.getDisplayName()));
+ String caseDirLockName = CoordinationServiceUtils.getCaseDirectoryNodePath(caseNodeData.getDirectory());
+ try (CoordinationService.Lock caseDirLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.CASES, caseDirLockName)) {
+ if (caseDirLock == null) {
+ logger.log(Level.INFO, String.format("Could not delete %s because a case directory lock was already held by another host", caseNodeData.getDisplayName()));
+ return;
+ }
+ checkForCancellation();
+
+ getManifestFilePaths();
+ checkForCancellation();
+ /*
+ * Acquire exclusive locks for the auto ingest job manifest
+ * files for the case, if any. Manifest file locks are acquired
+ * by the auto ingest node (AIN) input directory scanning tasks
+ * when they look for auto ingest jobs to enqueue, and by the
+ * AIN job execution tasks when they do a job. Acquiring these
+ * locks here ensures that the scanning tasks and job execution
+ * tasks cannot do anything with the auto ingest jobs for a case
+ * during case deletion.
+ */
+ if (!acquireManifestFileLocks()) {
+ logger.log(Level.INFO, String.format("Could not delete %s because at least one manifest file lock was already held by another host", caseNodeData.getDisplayName()));
+ return;
+ }
+ checkForCancellation();
+ deleteCaseContents();
+ checkForCancellation();
+ deleteCaseResourcesNode();
+ checkForCancellation();
+ deleteCaseAutoIngestLogNode();
+ checkForCancellation();
+ deleteManifestFileNodes();
+ checkForCancellation();
+ }
+ deleteCaseDirectoryNode();
+ checkForCancellation();
+ }
+ deleteCaseNameNode();
+ }
+
+ /**
+ * Gets the manifest file paths for the case, if there are any.
+ *
+ * @throws CoordinationServiceException If there is an error completing a
+ * coordination service operation.
+ * @throws InterruptedException If the thread in which this task is
+ * running is interrupted while blocked
+ * waiting for a coordination service
+ * operation to complete.
+ * @throws IOException If there is an error reading the
+ * manifests list file.
+ */
+ private void getManifestFilePaths() throws IOException, CoordinationServiceException, InterruptedException {
+ progress.progress(Bundle.DeleteCaseTask_progress_gettingManifestPaths());
+ logger.log(Level.INFO, String.format("Getting manifest file paths for %s", caseNodeData.getDisplayName()));
+ final Path manifestsListFilePath = Paths.get(caseNodeData.getDirectory().toString(), AutoIngestManager.getCaseManifestsListFileName());
+ final File manifestListsFile = manifestsListFilePath.toFile();
+ if (manifestListsFile.exists()) {
+ getManifestPathsFromFile(manifestsListFilePath);
+ } else {
+ getManifestPathsFromNodes();
+ }
+ if (manifestFilePaths.isEmpty()) {
+ setDeletedItemFlag(CaseNodeData.DeletedFlags.MANIFEST_FILE_NODES);
+ }
+ logger.log(Level.INFO, String.format("Found %d manifest file path(s) for %s", manifestFilePaths.size(), caseNodeData.getDisplayName()));
+ }
+
+ /**
+ * Gets a list of the manifest file paths for the case by reading them from
+ * the manifests list file for the case.
+ *
+ * @param manifestsListFilePath The path of the manifests list file.
+ *
+ * @throws IOException If there is an error reading the manifests
+ * list file.
+ * @throws InterruptedException If the thread in which this task is running
+ * is interrupted while blocked waiting for a
+ * coordination service operation to complete.
+ */
+ private void getManifestPathsFromFile(Path manifestsListFilePath) throws IOException, InterruptedException {
+ try (final Scanner manifestsListFileScanner = new Scanner(manifestsListFilePath)) {
+ while (manifestsListFileScanner.hasNextLine()) {
+ checkForCancellation();
+ final Path manifestFilePath = Paths.get(manifestsListFileScanner.nextLine());
+ if (manifestFilePath.toFile().exists()) {
+ manifestFilePaths.add(manifestFilePath);
+ }
+ }
+ }
+ }
+
+ /**
+ * Gets a list of the manifest file paths for the case by sifting through
+ * the node data of the manifest file coordination service nodes and
+ * matching on case name.
+ *
+ * @throws CoordinationServiceException If there is an error completing a
+ * coordination service operation.
+ * @throws InterruptedException If the thread in which this task is
+ * running is interrupted while blocked
+ * waiting for a coordination service
+ * operation to complete.
+ */
+ private void getManifestPathsFromNodes() throws CoordinationServiceException, InterruptedException {
+ /*
+ * Get the original, undecorated case name from the case directory. This
+ * is necessary because the case display name can be changed and the
+ * original case name may have a time stamp added to make it unique,
+ * depending on how the case was created. An alternative aproach would
+ * be to strip off any time stamp from the case name in the case node
+ * data.
+ */
+ String caseName = CoordinationServiceUtils.getCaseNameNodePath(caseNodeData.getDirectory());
+ final List nodeNames = coordinationService.getNodeList(CoordinationService.CategoryNode.MANIFESTS);
+ for (String manifestNodeName : nodeNames) {
+ checkForCancellation();
+ try {
+ final byte[] nodeBytes = coordinationService.getNodeData(CoordinationService.CategoryNode.MANIFESTS, manifestNodeName);
+ AutoIngestJobNodeData nodeData = new AutoIngestJobNodeData(nodeBytes);
+ if (caseName.equals(nodeData.getCaseName())) {
+ Path manifestFilePath = nodeData.getManifestFilePath();
+ if (manifestFilePath.toFile().exists()) {
+ manifestFilePaths.add(manifestFilePath);
+ }
+ }
+ } catch (CoordinationServiceException | InvalidDataException ex) {
+ logger.log(Level.WARNING, String.format("Error getting coordination service node data from %s", manifestNodeName), ex);
+ }
+ }
+ }
+
+ /**
+ * Acquires either all or none of the auto ingest job manifest file locks
+ * for a case.
+ *
+ * @return True if all of the locks were acquired; false otherwise.
+ *
+ * @throws CoordinationServiceException If there is an error completing a
+ * coordination service operation.
+ * @throws InterruptedException If the thread in which this task is
+ * running is interrupted while blocked
+ * waiting for a coordination service
+ * operation to complete.
+ */
+ @NbBundle.Messages({
+ "# {0} - manifest file path", "DeleteCaseTask.progress.lockingManifest=Locking manifest file {0}..."
+ })
+ private boolean acquireManifestFileLocks() throws IOException, CoordinationServiceException, InterruptedException {
+ boolean allLocksAcquired = true;
+ if (!manifestFilePaths.isEmpty()) {
+ progress.progress(Bundle.DeleteCaseTask_progress_acquiringManifestLocks());
+ logger.log(Level.INFO, String.format("Acquiring exclusive manifest file locks for %s", caseNodeData.getDisplayName()));
+ /*
+ * When acquiring the locks, it is reasonable to block briefly,
+ * since the auto ingest node (AIN) input directory scanning tasks
+ * do a lot of short-term acquiring and releasing of the same locks.
+ * The assumption here is that the originator of this case deletion
+ * task is not asking for deletion of a case that has a job that an
+ * auto ingest node (AIN) job execution task is working on and that
+ * MANIFEST_FILE_LOCKING_TIMEOUT_MINS is not very long anyway, so
+ * waiting a bit should be fine.
+ */
+ try {
+ for (Path manifestPath : manifestFilePaths) {
+ checkForCancellation();
+ progress.progress(Bundle.DeleteCaseTask_progress_lockingManifest(manifestPath.toString()));
+ logger.log(Level.INFO, String.format("Exclusively locking the manifest %s for %s", manifestPath, caseNodeData.getDisplayName()));
+ CoordinationService.Lock manifestLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.MANIFESTS, manifestPath.toString(), MANIFEST_FILE_LOCKING_TIMEOUT_MINS, TimeUnit.MINUTES);
+ if (null != manifestLock) {
+ manifestFileLocks.add(manifestLock);
+ } else {
+ logger.log(Level.INFO, String.format("Failed to exclusively lock the manifest %s because it was already held by another host", manifestPath, caseNodeData.getDisplayName()));
+ allLocksAcquired = false;
+ releaseManifestFileLocks();
+ break;
+ }
+ }
+ } catch (CoordinationServiceException | InterruptedException ex) {
+ releaseManifestFileLocks();
+ throw ex;
+ }
+ }
+ return allLocksAcquired;
+ }
+
+ /**
+ * Deletes case contents, based on the specified deletion option.
+ *
+ * @throws InterruptedException If the thread in which this task is running
+ * is interrupted while blocked waiting for a
+ * coordination service operation to complete.
+ */
+ private void deleteCaseContents() throws InterruptedException {
+ final File caseDirectory = caseNodeData.getDirectory().toFile();
+ if (caseDirectory.exists()) {
+ progress.progress(Bundle.DeleteCaseTask_progress_openingCaseMetadataFile());
+ logger.log(Level.INFO, String.format("Opening case metadata file for %s", caseNodeData.getDisplayName()));
+ Path caseMetadataPath = CaseMetadata.getCaseMetadataFilePath(caseNodeData.getDirectory());
+ if (caseMetadataPath != null) {
+ try {
+ caseMetadata = new CaseMetadata(caseMetadataPath);
+ checkForCancellation();
+ if (!manifestFilePaths.isEmpty() && (deleteOption == DeleteOptions.DELETE_INPUT || deleteOption == DeleteOptions.DELETE_ALL)) {
+ deleteAutoIngestInput();
+ }
+ checkForCancellation();
+ if (deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_ALL) {
+ Case.deleteMultiUserCase(caseNodeData, caseMetadata, progress, logger);
+ }
+
+ } catch (CaseMetadata.CaseMetadataException ex) {
+ logger.log(Level.SEVERE, String.format("Error reading metadata file for %s", caseNodeData.getDisplayName()), ex);
+ }
+
+ } else {
+ logger.log(Level.WARNING, String.format("No case metadata file found for %s", caseNodeData.getDisplayName()));
+ }
+
+ } else {
+ setDeletedItemFlag(CaseNodeData.DeletedFlags.CASE_DIR);
+ logger.log(Level.INFO, String.format("No case directory found for %s", caseNodeData.getDisplayName()));
+ }
+ }
+
+ /**
+ * Deletes the auto ingest job input manifests for the case along with the
+ * corresponding data sources.
+ *
+ * @throws InterruptedException If the thread in which this task is running
+ * is interrupted while blocked waiting for a
+ * coordination service operation to complete.
+ */
+ @NbBundle.Messages({
+ "DeleteCaseTask.progress.openingCaseDatabase=Opening the case database...",
+ "# {0} - manifest file path", "DeleteCaseTask.progress.parsingManifest=Parsing manifest file {0}...",
+ "# {0} - manifest file path", "DeleteCaseTask.progress.deletingManifest=Deleting manifest file {0}..."
+ })
+ private void deleteAutoIngestInput() throws InterruptedException {
+ SleuthkitCase caseDb = null;
+ try {
+ progress.progress(Bundle.DeleteCaseTask_progress_openingCaseDatabase());
+ logger.log(Level.INFO, String.format("Opening the case database for %s", caseNodeData.getDisplayName()));
+ caseDb = SleuthkitCase.openCase(caseMetadata.getCaseDatabaseName(), UserPreferences.getDatabaseConnectionInfo(), caseMetadata.getCaseDirectory());
+ List dataSources = caseDb.getDataSources();
+ checkForCancellation();
+
+ /*
+ * For every manifest file associated with the case, attempt to
+ * delete both the data source referenced by the manifest and the
+ * manifest.
+ */
+ boolean allInputDeleted = true;
+ for (Path manifestFilePath : manifestFilePaths) {
+ checkForCancellation();
+ final File manifestFile = manifestFilePath.toFile();
+ if (manifestFile.exists()) {
+ Manifest manifest = parseManifestFile(manifestFilePath);
+ if (manifest != null) {
+ if (deleteDataSources(manifest, dataSources)) {
+ if (!deleteManifestFile(manifestFile)) {
+ logger.log(Level.WARNING, String.format("Failed to delete manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName()));
+ allInputDeleted = false;
+ }
+ } else {
+ logger.log(Level.WARNING, String.format("Failed to delete manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName()));
+ allInputDeleted = false;
+ }
+ } else {
+ logger.log(Level.WARNING, String.format("Failed to parse manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName()));
+ allInputDeleted = false;
+ }
+ } else {
+ logger.log(Level.WARNING, String.format("Did not find manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName()));
+ }
+ }
+ if (allInputDeleted) {
+ setDeletedItemFlag(CaseNodeData.DeletedFlags.DATA_SOURCES);
+ }
+
+ } catch (TskCoreException | UserPreferencesException ex) {
+ logger.log(Level.INFO, String.format("Failed to open or query the case database for %s", caseNodeData.getDisplayName()), ex);
+
+ } finally {
+ if (caseDb != null) {
+ caseDb.close();
+ }
+ }
+ }
+
+ /**
+ * Parses a manifest file.
+ *
+ * @param manifestFilePath The manifest file path.
+ *
+ * @return A manifest, if the parsing is successful, null otherwise.
+ */
+ private Manifest parseManifestFile(Path manifestFilePath) {
+ progress.progress(Bundle.DeleteCaseTask_progress_parsingManifest(manifestFilePath));
+ logger.log(Level.INFO, String.format("Parsing manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName()));
+ Manifest manifest = null;
+ for (ManifestFileParser parser : Lookup.getDefault().lookupAll(ManifestFileParser.class)) {
+ if (parser.fileIsManifest(manifestFilePath)) {
+ try {
+ manifest = parser.parse(manifestFilePath);
+ break;
+ } catch (ManifestFileParser.ManifestFileParserException ex) {
+ logger.log(Level.WARNING, String.format("Error parsing manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName()), ex);
+ }
+ }
+ }
+ return manifest;
+ }
+
+ /**
+ * Deletes a manifest file.
+ *
+ * @param manifestFile The manifest file.
+ *
+ * @return True if the file was deleted, false otherwise.
+ *
+ * @throws InterruptedException If the thread in which this task is running
+ * is interrupted while blocked waiting for a
+ * coordination service operation to complete.
+ */
+ private boolean deleteManifestFile(File manifestFile) throws InterruptedException {
+ /*
+ * Delete the manifest file, allowing a few retries. This is a way to
+ * resolve the race condition between this task and auto ingest node
+ * (AIN) input directory scanning tasks, which parse manifests (actually
+ * all files) before getting a coordination service lock, without
+ * resorting to a protocol using locking of the input directory.
+ */
+ Path manifestFilePath = manifestFile.toPath();
+ progress.progress(Bundle.DeleteCaseTask_progress_deletingManifest(manifestFilePath));
+ logger.log(Level.INFO, String.format("Deleting manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName()));
+ int tries = 0;
+ boolean deleted = false;
+ while (!deleted && tries < MANIFEST_DELETE_TRIES) {
+ deleted = manifestFile.delete();
+ if (!deleted) {
+ ++tries;
+ Thread.sleep(1000);
+ }
+ }
+ return deleted;
+ }
+
+ /**
+ * Locates and deletes the data source files referenced by a manifest.
+ *
+ * @param manifest A manifest.
+ * @param dataSources The data sources in the case as obtained from the case
+ * database.
+ *
+ * @return True if all of the data source files werre deleted, false
+ * otherwise.
+ */
+ @NbBundle.Messages({
+ "# {0} - data source name", "# {1} - device id", "DeleteCaseTask.progress.deletingDataSource=Deleting data source {0} with device id {1}...",})
+ private boolean deleteDataSources(Manifest manifest, List dataSources) {
+ final String dataSourceFileName = manifest.getDataSourceFileName();
+ final String dataSourceDeviceId = manifest.getDeviceId();
+ progress.progress(Bundle.DeleteCaseTask_progress_deletingDataSource(dataSourceFileName, dataSourceDeviceId));
+ logger.log(Level.INFO, String.format("Deleting data source %s with device id %s from %s", dataSourceFileName, dataSourceDeviceId, caseNodeData.getDisplayName()));
+
+ /*
+ * There are two possibilities here. The data source may be an image,
+ * and if so, it may be split into multiple files. In this case, all of
+ * the files for the image need to be deleted. Otherwise, the data
+ * source is a single directory or file (a logical file, logical file
+ * set, report file, archive file, etc.). In this case, just the file
+ * referenced by the manifest will be deleted.
+ */
+ boolean allFilesDeleted = true;
+ Set filesToDelete = new HashSet<>();
+ for (DataSource dataSource : dataSources) {
+ if (dataSource instanceof Image) {
+ Image image = (Image) dataSource;
+ if (image.getName().equals(dataSourceFileName) && image.getDeviceId().equals(dataSourceDeviceId)) {
+ String[] imageFilePaths = image.getPaths();
+ for (String path : imageFilePaths) {
+ Path imageFilePath = Paths.get(path);
+ filesToDelete.add(imageFilePath);
+ }
+ break;
+ }
+ }
+ }
+ if (filesToDelete.isEmpty()) {
+ final Path dataSourcePath = manifest.getDataSourcePath();
+ filesToDelete.add(dataSourcePath);
+ }
+
+ for (Path path : filesToDelete) {
+ File fileOrDir = path.toFile();
+ if (fileOrDir.exists() && !FileUtil.deleteFileDir(fileOrDir)) {
+ allFilesDeleted = false;
+ logger.log(Level.INFO, String.format("Failed to delete data source file at %s for %s", path, caseNodeData.getDisplayName()));
+ }
+ }
+
+ return allFilesDeleted;
+ }
+
+ /**
+ * Deletes the case resources coordination service node.
+ *
+ * @throws InterruptedException If the thread in which this task is running
+ * is interrupted while blocked waiting for a
+ * coordination service operation to complete.
+ */
+ private void deleteCaseResourcesNode() throws InterruptedException {
+ if (deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_ALL) {
+ progress.progress(Bundle.DeleteCaseTask_progress_deletingResourcesLockNode());
+ logger.log(Level.INFO, String.format("Deleting case resources log znode for %s", caseNodeData.getDisplayName()));
+ String resourcesNodePath = CoordinationServiceUtils.getCaseResourcesNodePath(caseNodeData.getDirectory());
+ try {
+ coordinationService.deleteNode(CategoryNode.CASES, resourcesNodePath);
+ } catch (CoordinationServiceException ex) {
+ if (!isNoNodeException(ex)) {
+ logger.log(Level.SEVERE, String.format("Error deleting case resources znode for %s", caseNodeData.getDisplayName()), ex);
+ }
+ }
+ }
+ }
+
+ /**
+ * Deletes the case auto ingest log coordination service node.
+ *
+ * @throws InterruptedException If the thread in which this task is running
+ * is interrupted while blocked waiting for a
+ * coordination service operation to complete.
+ */
+ private void deleteCaseAutoIngestLogNode() throws InterruptedException {
+ if (deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_ALL) {
+ progress.progress(Bundle.DeleteCaseTask_progress_deletingJobLogLockNode());
+ logger.log(Level.INFO, String.format("Deleting case auto ingest job log znode for %s", caseNodeData.getDisplayName()));
+ String logFilePath = CoordinationServiceUtils.getCaseAutoIngestLogNodePath(caseNodeData.getDirectory());
+ try {
+ coordinationService.deleteNode(CategoryNode.CASES, logFilePath);
+ } catch (CoordinationServiceException ex) {
+ if (!isNoNodeException(ex)) {
+ logger.log(Level.SEVERE, String.format("Error deleting case auto ingest job log znode for %s", caseNodeData.getDisplayName()), ex);
+ }
+ }
+ }
+ }
+
+ /**
+ * Deletes the case directory coordination service node if everything that
+ * was supposed to be deleted was deleted. Otherwise, leave the node so that
+ * what was and was not deleted can be inspected.
+ *
+ * @throws InterruptedException If the thread in which this task is running
+ * is interrupted while blocked waiting for a
+ * coordination service operation to complete.
+ */
+ private void deleteCaseDirectoryNode() throws InterruptedException {
+ if ((deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_ALL)
+ && caseNodeData.isDeletedFlagSet(CaseNodeData.DeletedFlags.DATA_SOURCES)
+ && caseNodeData.isDeletedFlagSet(CaseNodeData.DeletedFlags.CASE_DB)
+ && caseNodeData.isDeletedFlagSet(CaseNodeData.DeletedFlags.CASE_DIR)
+ && caseNodeData.isDeletedFlagSet(CaseNodeData.DeletedFlags.MANIFEST_FILE_NODES)) {
+ progress.progress(Bundle.DeleteCaseTask_progress_deletingCaseDirCoordSvcNode());
+ logger.log(Level.INFO, String.format("Deleting case directory znode for %s", caseNodeData.getDisplayName()));
+ String caseDirNodePath = CoordinationServiceUtils.getCaseDirectoryNodePath(caseNodeData.getDirectory());
+ try {
+ coordinationService.deleteNode(CategoryNode.CASES, caseDirNodePath);
+ } catch (CoordinationServiceException ex) {
+ logger.log(Level.SEVERE, String.format("Error deleting case directory lock node for %s", caseNodeData.getDisplayName()), ex);
+ }
+ }
+ }
+
+ /**
+ * Deletes the case name coordiation service node.
+ *
+ * @throws InterruptedException If the thread in which this task is running
+ * is interrupted while blocked waiting for a
+ * coordination service operation to complete.
+ */
+ private void deleteCaseNameNode() throws InterruptedException {
+ if (deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_ALL) {
+ progress.progress(Bundle.DeleteCaseTask_progress_deletingCaseNameCoordSvcNode());
+ logger.log(Level.INFO, String.format("Deleting case name znode for %s", caseNodeData.getDisplayName()));
+ try {
+ String caseNameLockNodeName = CoordinationServiceUtils.getCaseNameNodePath(caseNodeData.getDirectory());
+ coordinationService.deleteNode(CategoryNode.CASES, caseNameLockNodeName);
+ } catch (CoordinationServiceException ex) {
+ logger.log(Level.SEVERE, String.format("Error deleting case name lock node for %s", caseNodeData.getDisplayName()), ex);
+ }
+ }
+ }
+
+ /**
+ * Examines a coordination service exception to try to determine if it is a
+ * no node exception.
+ *
+ * @param ex A coordination service exception.
+ *
+ * @return True or false.
+ */
+ private boolean isNoNodeException(CoordinationServiceException ex) {
+ boolean isNodeNodeEx = false;
+ Throwable cause = ex.getCause();
+ if (cause != null) {
+ String causeMessage = cause.getMessage();
+ isNodeNodeEx = causeMessage.contains(NO_NODE_ERROR_MSG_FRAGMENT);
+ }
+ return isNodeNodeEx;
+ }
+
+ /**
+ * Releases all of the manifest file locks that have been acquired by this
+ * task.
+ */
+ @NbBundle.Messages({
+ "# {0} - manifest file path", "DeleteCaseTask.progress.releasingManifestLock=Releasing lock on the manifest file {0}..."
+ })
+ private void releaseManifestFileLocks() {
+ for (Lock manifestFileLock : manifestFileLocks) {
+ String manifestFilePath = manifestFileLock.getNodePath();
+ try {
+ progress.progress(Bundle.DeleteCaseTask_progress_releasingManifestLock(manifestFilePath));
+ logger.log(Level.INFO, String.format("Releasing the exclusive coordination service lock on the manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName()));
+ manifestFileLock.release();
+ } catch (CoordinationServiceException ex) {
+ logger.log(Level.WARNING, String.format("Error releasing the exclusive coordination service lock on the manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName()), ex);
+ }
+ }
+ manifestFileLocks.clear();
+ }
+
+ /**
+ * Releases all of the manifest file locks that have been acquired by this
+ * task and attempts to delete the corresponding coordination service nodes.
+ *
+ * @return True if all of the manifest file coordianiton service nodes have
+ * been deleted, false otherwise.
+ *
+ * @throws InterruptedException If the thread in which this task is running
+ * is interrupted while blocked waiting for a
+ * coordination service operation to complete.
+ */
+ @Messages({
+ "# {0} - manifest file path", "DeleteCaseTask.progress.deletingManifestFileNode=Deleting the manifest file znode for {0}..."
+ })
+ private void deleteManifestFileNodes() throws InterruptedException {
+ if (deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_ALL) {
+ boolean allINodesDeleted = true;
+ Iterator iterator = manifestFileLocks.iterator();
+ while (iterator.hasNext()) {
+ Lock manifestFileLock = iterator.next();
+ String manifestFilePath = manifestFileLock.getNodePath();
+ try {
+ progress.progress(Bundle.DeleteCaseTask_progress_releasingManifestLock(manifestFilePath));
+ logger.log(Level.INFO, String.format("Releasing the lock on the manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName()));
+ manifestFileLock.release();
+ progress.progress(Bundle.DeleteCaseTask_progress_deletingManifestFileNode(manifestFilePath));
+ logger.log(Level.INFO, String.format("Deleting the manifest file znode for %s for %s", manifestFilePath, caseNodeData.getDisplayName()));
+ coordinationService.deleteNode(CoordinationService.CategoryNode.MANIFESTS, manifestFilePath);
+ } catch (CoordinationServiceException ex) {
+ allINodesDeleted = false;
+ logger.log(Level.WARNING, String.format("Error deleting the manifest file znode for %s for %s", manifestFilePath, caseNodeData.getDisplayName()), ex);
+ }
+ iterator.remove();
+ }
+ if (allINodesDeleted) {
+ setDeletedItemFlag(CaseNodeData.DeletedFlags.MANIFEST_FILE_NODES);
+ }
+ }
+ }
+
+ /**
+ * Sets a deleted item flag in the coordination service node data for the
+ * case.
+ *
+ * @param flag The flag to set.
+ */
+ private void setDeletedItemFlag(CaseNodeData.DeletedFlags flag) {
+ try {
+ caseNodeData.setDeletedFlag(flag);
+ coordinationService.setNodeData(CategoryNode.CASES, caseNodeData.getDirectory().toString(), caseNodeData.toArray());
+ } catch (IOException | CoordinationServiceException | InterruptedException ex) {
+ logger.log(Level.SEVERE, String.format("Error updating deleted item flag %s for %s", flag.name(), caseNodeData.getDisplayName()), ex);
+ }
+ }
+
+ /**
+ * Checks whether the interrupted flag of the current thread is set.
+ *
+ * @throws InterruptedException If the interrupted flag is set.
+ */
+ private void checkForCancellation() throws InterruptedException {
+ if (Thread.currentThread().isInterrupted()) {
+ throw new InterruptedException("Interrupt detected");
+ }
+ }
+
+}
diff --git a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCasesAction.java b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCasesAction.java
deleted file mode 100755
index a1fcc632bb..0000000000
--- a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCasesAction.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Autopsy Forensic Browser
- *
- * Copyright 2019-2019 Basis Technology Corp.
- * Contact: carrier sleuthkit org
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.sleuthkit.autopsy.experimental.autoingest;
-
-import java.awt.event.ActionEvent;
-import java.util.ArrayList;
-import java.util.Collection;
-import javax.swing.AbstractAction;
-import org.openide.util.NbBundle;
-import org.openide.util.Utilities;
-import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData;
-
-/**
- * An action that completely deletes one or more multi-user cases, including any
- * associated auto ingest job input directories and coordination service nodes.
- *
- * This cases to delete are discovered by querying the actions global context
- * lookup for CaseNodeData objects. See
- * https://platform.netbeans.org/tutorials/nbm-selection-1.html and
- * https://platform.netbeans.org/tutorials/nbm-selection-2.html for details.
- */
-final class DeleteCasesAction extends AbstractAction {
-
- private static final long serialVersionUID = 1L;
-
- /**
- * Constructs an action that completely deletes one or more multi-user
- * cases, including any associated auto ingest job input directories and
- * coordination service nodes.
- */
- @NbBundle.Messages({
- "DeleteCasesAction.menuItemText=Delete Case and Jobs"
- })
- DeleteCasesAction() {
- super(Bundle.DeleteCasesAction_menuItemText());
- setEnabled(false); // RJCTODO: Enable when implemented
- }
-
- @Override
- public void actionPerformed(ActionEvent event) {
-// final Collection selectedNodeData = new ArrayList<>(Utilities.actionsGlobalContext().lookupAll(CaseNodeData.class));
-// if (!selectedNodeData.isEmpty()) {
-// /*
-// * RJCTODO: Create a background task that does the deletion and
-// * displays results in a dialog with a scrolling text pane.
-// */
-// }
- }
-
- @Override
- public DeleteCasesAction clone() throws CloneNotSupportedException {
- throw new CloneNotSupportedException();
- }
-
-}
diff --git a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/ShowCaseDeletionStatusAction.java b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/ShowCaseDeletionStatusAction.java
deleted file mode 100755
index c691cd2038..0000000000
--- a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/ShowCaseDeletionStatusAction.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * To change this license header, choose License Headers in Project Properties.
- * To change this template file, choose Tools | Templates
- * and open the template in the editor.
- */
-package org.sleuthkit.autopsy.experimental.autoingest;
-
-import java.awt.event.ActionEvent;
-import javax.swing.AbstractAction;
-import org.openide.util.NbBundle;
-import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData;
-
-/**
- * An action that shows a popup that enumerates the deletion status of the
- * various parts of a multi-user case known to the coordination service.
- */
-final class ShowCaseDeletionStatusAction extends AbstractAction {
-
- private static final long serialVersionUID = 1L;
- // private final CaseNodeData caseNodeData;
-
- /**
- * Constructs an action that shows a popup that enumerates the deletion
- * status of the various parts of a multi-user case known to the
- * coordination service.
- *
- * @param caseNodeData The coordination service node data for the case.
- */
- @NbBundle.Messages({
- "ShowCaseDeletionStatusAction.menuItemText=Show Deletion Status"
- })
- ShowCaseDeletionStatusAction(CaseNodeData caseNodeData) {
- super(Bundle.ShowCaseDeletionStatusAction_menuItemText());
- // this.caseNodeData = caseNodeData; // RJCTODO: Use
- setEnabled(false); // RJCTODO: Enable when implemented
- }
-
- @Override
- public void actionPerformed(ActionEvent e) {
- // RJCTODO: Implement
- }
-
- @Override
- public ShowCaseDeletionStatusAction clone() throws CloneNotSupportedException {
- throw new CloneNotSupportedException();
- }
-
-}
diff --git a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/multicase/MultiCaseSearcher.java b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/multicase/MultiCaseSearcher.java
new file mode 100755
index 0000000000..80395a0599
--- /dev/null
+++ b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/multicase/MultiCaseSearcher.java
@@ -0,0 +1,775 @@
+/*
+ * Autopsy Forensic Browser
+ *
+ * Copyright 2019 Basis Technology Corp.
+ * Contact: carrier sleuthkit org
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.sleuthkit.autopsy.keywordsearch.multicase;
+
+import com.google.common.eventbus.EventBus;
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.LinkOption;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.logging.Level;
+import java.util.stream.Collectors;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+import javax.xml.xpath.XPath;
+import javax.xml.xpath.XPathConstants;
+import javax.xml.xpath.XPathExpression;
+import javax.xml.xpath.XPathExpressionException;
+import javax.xml.xpath.XPathFactory;
+import org.apache.commons.lang.StringUtils;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.HttpSolrServer;
+import org.apache.solr.client.solrj.request.CoreAdminRequest;
+import org.apache.solr.client.solrj.response.CoreAdminResponse;
+import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.common.SolrDocument;
+import org.apache.solr.common.SolrDocumentList;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.common.params.CursorMarkParams;
+import org.openide.util.Exceptions;
+import org.openide.util.NbBundle;
+import org.sleuthkit.autopsy.casemodule.CaseMetadata;
+import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData;
+import org.sleuthkit.autopsy.coordinationservice.CoordinationService;
+import org.sleuthkit.autopsy.core.UserPreferences;
+import org.sleuthkit.autopsy.core.UserPreferencesException;
+import org.sleuthkit.autopsy.coreutils.Logger;
+import org.sleuthkit.autopsy.coreutils.UNCPathUtilities;
+import org.sleuthkit.autopsy.keywordsearch.Server;
+import org.sleuthkit.autopsy.progress.ProgressIndicator;
+import org.sleuthkit.datamodel.AbstractFile;
+import org.sleuthkit.datamodel.BlackboardArtifact;
+import org.sleuthkit.datamodel.CaseDbConnectionInfo;
+import org.sleuthkit.datamodel.Content;
+import org.sleuthkit.datamodel.Report;
+import org.sleuthkit.datamodel.SleuthkitCase;
+import org.sleuthkit.datamodel.TskCoreException;
+import org.w3c.dom.Document;
+import org.xml.sax.SAXException;
+
+/**
+ * Performs keyword searches across multiple cases
+ */
+final class MultiCaseSearcher {
+
+ private static final String CASE_AUTO_INGEST_LOG_NAME = "AUTO_INGEST_LOG.TXT"; //NON-NLS
+ private static final String SEARCH_COMPLETE_MESSAGE = "SEARCH_COMPLETE";
+ private static final String RESOURCES_LOCK_SUFFIX = "_RESOURCES"; //NON-NLS
+ private static final int CASE_DIR_READ_LOCK_TIMEOUT_HOURS = 12; //NON-NLS
+ private static final String SOLR_SERVER_URL_FORMAT_STRING = "http://%s:%s/solr"; //NON-NLS
+ private static final String SOLR_CORE_URL_FORMAT_STRING = "http://%s:%s/solr/%s"; //NON-NLS
+ private final static String SOLR_METADATA_FILE_NAME = "SolrCore.properties"; //NON-NLS
+ private static final String SOLR_CORE_NAME_XPATH = "/SolrCores/Core/CoreName/text()"; //NON-NLS
+ private static final String TEXT_INDEX_NAME_XPATH = "/SolrCores/Core/TextIndexPath/text()"; //NON-NLS
+ private static final String SOLR_CORE_INSTANCE_PATH_PROPERTY = "instanceDir"; //NON-NLS
+ private static final String SOLR_CONFIG_SET_NAME = "AutopsyConfig"; //NON-NLS
+ private static final int MAX_RESULTS_PER_CURSOR_MARK = 512;
+ private static final String SOLR_DOC_ID_FIELD = Server.Schema.ID.toString(); //NON-NLS
+ private static final String SOLR_DOC_CONTENT_STR_FIELD = Server.Schema.CONTENT_STR.toString(); //NON-NLS
+ private static final String SOLR_DOC_CHUNK_SIZE_FIELD = Server.Schema.CHUNK_SIZE.toString(); //NON-NLS
+ private static final String SOLR_DOC_ID_PARTS_SEPARATOR = "_";
+ private static final Logger logger = Logger.getLogger(MultiCaseSearcher.class.getName());
+ private final EventBus eventBus = new EventBus("MultiCaseSearcherEventBus");
+ private static final UNCPathUtilities pathUtils = new UNCPathUtilities();
+ private volatile boolean searchStopped = true;
+
+ MultiCaseSearcher() {
+
+ }
+
+ static String getSearchCompleteMessage() {
+ return SEARCH_COMPLETE_MESSAGE;
+ }
+
+ /**
+ *
+ * Performs keyword searches across multiple cases
+ *
+ * @param caseNames The names of the cases to search.
+ * @param query The keyword search query to perform.
+ * @param progressIndicator A progrss indicator for the search.
+ *
+ * @return The search results.
+ *
+ * @throws MultiCaseSearcherException
+ * @throws InterruptedException
+ */
+ @NbBundle.Messages({
+ "MultiCaseSearcher.progressMessage.findingCases=Finding selected cases",
+ "MultiCaseSearcher.progressMessage.creatingSolrQuery=Creating search query for Solr server",
+ "# {0} - total cases",
+ "MultiCaseSearcher.progressMessage.startingCaseSearches=Searching {0} case(s)",
+ "# {0} - case name",
+ "# {1} - case counter",
+ "# {2} - total cases",
+ "MultiCaseSearcher.progressMessage.acquiringSharedLockForCase=Acquiring shared lock for \"{0}\" ({1} of {2} case(s))",
+ "# {0} - case name",
+ "# {1} - case counter",
+ "# {2} - total cases",
+ "MultiCaseSearcher.progressMessage.loadingSolrCoreForCase=Loading Solr core for \"{0}\" ({1} of {2} case(s))",
+ "# {0} - case name",
+ "# {1} - case counter",
+ "# {2} - total cases",
+ "MultiCaseSearcher.progressMessage.openingCaseDbForCase=Opening case database for \"{0}\" ({1} of {2} case(s))",
+ "# {0} - case name",
+ "# {1} - case counter",
+ "# {2} - total cases",
+ "MultiCaseSearcher.progressMessage.executingSolrQueryForCase=Getting keyword hits for \"{0}\" ({1} of {2} case(s))",
+ "# {0} - case directory path",
+ "MultiCaseSearcher.exceptionMessage.failedToGetCaseDirReadlock=Failed to obtain read lock for case directory at {0}",
+ "MultiCaseSearcher.exceptionMessage.cancelledMessage=Search cancelled"
+ })
+ void performKeywordSearch(final Collection caseNodes, final SearchQuery query, final ProgressIndicator progressIndicator) {
+ progressIndicator.start(Bundle.MultiCaseSearcher_progressMessage_findingCases());
+ try {
+ searchStopped = false; //mark the search as started
+ final List caseMetadata = getMultiCaseMetadata(caseNodes);
+ checkForCancellation();
+ //eventBus.post("number of cases to search determined");
+ progressIndicator.progress(Bundle.MultiCaseSearcher_progressMessage_creatingSolrQuery());
+ final SolrQuery solrQuery = createSolrQuery(query);
+ checkForCancellation();
+ final int totalCases = caseMetadata.size();
+ int caseCounter = 1;
+ progressIndicator.progress(Bundle.MultiCaseSearcher_progressMessage_startingCaseSearches(totalCases));
+ int totalSteps = 5;
+ progressIndicator.switchToDeterminate(Bundle.MultiCaseSearcher_progressMessage_startingCaseSearches(totalCases), 0, totalCases * totalSteps);
+ int caseNumber = 0;
+ for (MultiCaseMetadata aCase : caseMetadata) {
+ CaseMetadata metadata = aCase.getCaseMetadata();
+ String caseName = metadata.getCaseDisplayName();
+ SleuthkitCase caseDatabase = null;
+
+ int stepsCompleted = 0;
+ progressIndicator.progress(Bundle.MultiCaseSearcher_progressMessage_acquiringSharedLockForCase(caseName, caseCounter, totalCases), stepsCompleted + caseNumber * totalSteps);
+ try (CoordinationService.Lock caseDirReadLock = CoordinationService.getInstance().tryGetSharedLock(CoordinationService.CategoryNode.CASES, aCase.getCaseMetadata().getCaseDirectory(), CASE_DIR_READ_LOCK_TIMEOUT_HOURS, TimeUnit.HOURS)) {
+ if (null == caseDirReadLock) {
+ throw new MultiCaseSearcherException(Bundle.MultiCaseSearcher_exceptionMessage_failedToGetCaseDirReadlock(aCase.getCaseMetadata().getCaseDirectory()));
+ }
+ checkForCancellation();
+ ++stepsCompleted;
+ progressIndicator.progress(Bundle.MultiCaseSearcher_progressMessage_loadingSolrCoreForCase(caseName, caseCounter, totalCases), stepsCompleted + caseNumber * totalSteps);
+ final HttpSolrServer solrServer = loadSolrCoreForCase(aCase);
+ checkForCancellation();
+ ++stepsCompleted;
+ progressIndicator.progress(Bundle.MultiCaseSearcher_progressMessage_openingCaseDbForCase(caseName, caseCounter, totalCases), stepsCompleted + caseNumber * totalSteps);
+ caseDatabase = openCase(aCase);
+ checkForCancellation();
+ ++stepsCompleted;
+ progressIndicator.progress(Bundle.MultiCaseSearcher_progressMessage_executingSolrQueryForCase(caseName, caseCounter, totalCases), stepsCompleted + caseNumber * totalSteps);
+ eventBus.post(executeQuery(solrServer, solrQuery, caseDatabase, aCase));
+ ++stepsCompleted;
+
+ progressIndicator.progress(stepsCompleted + caseNumber * totalSteps);
+ ++caseCounter;
+ } catch (CoordinationService.CoordinationServiceException ex) {
+ throw new MultiCaseSearcherException(Bundle.MultiCaseSearcher_exceptionMessage_failedToGetCaseDirReadlock(aCase.getCaseMetadata().getCaseDirectory()), ex);
+ } catch (MultiCaseSearcherException exception) {
+ logger.log(Level.INFO, "Exception encountered while performing multi-case keyword search", exception);
+ eventBus.post(exception);
+ } finally {
+ if (null != caseDatabase) {
+ closeCase(caseDatabase);
+ }
+ }
+ caseNumber++;
+ }
+ } catch (InterruptedException exception) {
+ logger.log(Level.INFO, Bundle.MultiCaseSearcher_exceptionMessage_cancelledMessage(), exception);
+ eventBus.post(exception);
+ } catch (MultiCaseSearcherException exception) {
+ logger.log(Level.WARNING, "Exception encountered while performing multi-case keyword search", exception);
+ eventBus.post(new InterruptedException("Exception encountered while performing multi-case keyword search"));
+ eventBus.post(exception);
+ } finally {
+ progressIndicator.finish();
+ eventBus.post(SEARCH_COMPLETE_MESSAGE);
+ }
+ }
+
+ /**
+ * Gets metadata for the cases associated with one or more with the search
+ *
+ * @param caseNames The names of the cases to search.
+ *
+ * @return The metadata for the cases.
+ *
+ * @throws MultiCaseSearcherException
+ * @throws InterruptedException
+ */
+ private List getMultiCaseMetadata(final Collection caseNodes) throws MultiCaseSearcherException, InterruptedException {
+ final Map casesToCasePaths = caseNodes.stream()
+ .collect(Collectors.toMap(CaseNodeData::getDirectory, CaseNodeData::getName));
+ checkForCancellation();
+ final List cases = new ArrayList<>();
+ for (Map.Entry entry : casesToCasePaths.entrySet()) {
+ final Path caseDirectoryPath = entry.getKey();
+ final CaseMetadata caseMetadata = getCaseMetadata(caseDirectoryPath);
+ checkForCancellation();
+ final TextIndexMetadata textIndexMetadata = getTextIndexMetadata(caseDirectoryPath);
+ checkForCancellation();
+ cases.add(new MultiCaseMetadata(caseMetadata, textIndexMetadata));
+ }
+ return cases;
+ }
+
+ /**
+ * Gets the metadata for a case from the case metadata file in a given case
+ * directory.
+ *
+ * @param caseDirectoryPath A case directory path.
+ *
+ * @return The case metadata.
+ *
+ * @throws MultiCaseSearcherException
+ */
+ @NbBundle.Messages({
+ "# {0} - case directory", "MultiCaseSearcher.exceptionMessage.failedToFindCaseMetadata=Failed to find case metadata file in {0}",
+ "# {0} - case directory", "MultiCaseSearcher.exceptionMessage.failedToParseCaseMetadata=Failed to parse case file metadata in {0}"
+ })
+
+ private static CaseMetadata getCaseMetadata(Path caseDirectoryPath) throws MultiCaseSearcherException {
+ Path metadataPath = CaseMetadata.getCaseMetadataFilePath(caseDirectoryPath);
+ if (metadataPath != null) {
+ try {
+ return new CaseMetadata(metadataPath);
+ } catch (CaseMetadata.CaseMetadataException ex) {
+ throw new MultiCaseSearcherException(Bundle.MultiCaseSearcher_exceptionMessage_failedToParseCaseMetadata(caseDirectoryPath), ex);
+ }
+ }
+ throw new MultiCaseSearcherException(Bundle.MultiCaseSearcher_exceptionMessage_failedToFindCaseMetadata(caseDirectoryPath));
+ }
+
+ /**
+ * Gets the text index metadata from the Solr.properties file in a given
+ * case directory.
+ *
+ * @param caseDirectoryPath A case directory path.
+ *
+ * @return The text index metadata.
+ *
+ * @throws MultiCaseSearcherException
+ */
+ @NbBundle.Messages({
+ "# {0} - file name", "# {1} - case directory", "MultiCaseSearcher.exceptionMessage.missingSolrPropertiesFile=Missing {0} file in {1}",
+ "# {0} - file name", "# {1} - case directory", "MultiCaseSearcher.exceptionMessage.solrPropertiesFileParseError=Error parsing {0} file in {1}",})
+ private static TextIndexMetadata getTextIndexMetadata(Path caseDirectoryPath) throws MultiCaseSearcherException {
+ final Path solrMetaDataFilePath = Paths.get(caseDirectoryPath.toString(), SOLR_METADATA_FILE_NAME);
+ final File solrMetaDataFile = solrMetaDataFilePath.toFile();
+ if (!solrMetaDataFile.exists() || !solrMetaDataFile.canRead()) {
+ throw new MultiCaseSearcherException(Bundle.MultiCaseSearcher_exceptionMessage_missingSolrPropertiesFile(SOLR_METADATA_FILE_NAME, caseDirectoryPath));
+ }
+ try {
+ final DocumentBuilder docBuilder = DocumentBuilderFactory.newInstance().newDocumentBuilder();
+ final Document doc = docBuilder.parse(solrMetaDataFile);
+ final XPath xPath = XPathFactory.newInstance().newXPath();
+ XPathExpression xPathExpr = xPath.compile(SOLR_CORE_NAME_XPATH);
+ final String solrCoreName = (String) xPathExpr.evaluate(doc, XPathConstants.STRING);
+ xPathExpr = xPath.compile(TEXT_INDEX_NAME_XPATH);
+ final String relativeTextIndexPath = (String) xPathExpr.evaluate(doc, XPathConstants.STRING);
+ Path textIndexPath = caseDirectoryPath.resolve(relativeTextIndexPath);
+ textIndexPath = textIndexPath.getParent(); // Remove "index" path component
+ final String textIndexUNCPath = pathUtils.convertPathToUNC(textIndexPath.toString());
+ return new TextIndexMetadata(caseDirectoryPath, solrCoreName, textIndexUNCPath);
+ } catch (ParserConfigurationException | SAXException | XPathExpressionException | IOException ex) {
+ throw new MultiCaseSearcherException(Bundle.MultiCaseSearcher_exceptionMessage_solrPropertiesFileParseError(SOLR_METADATA_FILE_NAME, caseDirectoryPath), ex);
+ }
+ }
+
+ /**
+ * Converts a keyword search query into a Solr query.
+ *
+ * @param searchQuery A keyword search query.
+ *
+ * @return A Solr query.
+ */
+ private static SolrQuery createSolrQuery(SearchQuery searchQuery) {
+ final SolrQuery solrQuery = new SolrQuery();
+ solrQuery.setQuery(searchQuery.getSearchTerm());
+ solrQuery.setRows(MAX_RESULTS_PER_CURSOR_MARK);
+ /*
+ * Note that setting the sort order is necessary for cursor based paging
+ * to work.
+ */
+ solrQuery.setSort(SolrQuery.SortClause.asc(SOLR_DOC_ID_FIELD));
+ solrQuery.setFields(SOLR_DOC_ID_FIELD, SOLR_DOC_CHUNK_SIZE_FIELD, SOLR_DOC_CONTENT_STR_FIELD);
+ return solrQuery;
+ }
+
+ /**
+ * Connects to the Solr server and loads the Solr core for a given case.
+ *
+ * @param aCase
+ *
+ * @return A Solr server client object that can be used for executing
+ * queries of the specified text index.
+ *
+ * MultiCaseSearcherException
+ *
+ * @throws InterruptedException
+ */
+ @NbBundle.Messages({
+ "# {0} - connection info",
+ "# {1} - case name",
+ "# {2} - case directory",
+ "MultiCaseSearcher.exceptionMessage.errorLoadingCore=Error connecting to Solr server and loading core (URL: {0}) for case {1} in {2}"
+ })
+ private HttpSolrServer loadSolrCoreForCase(MultiCaseMetadata aCase) throws MultiCaseSearcherException, InterruptedException {
+ TextIndexMetadata textIndexMetadata = aCase.getTextIndexMetadata();
+ Server.IndexingServerProperties indexServer = Server.getMultiUserServerProperties(aCase.getCaseMetadata().getCaseDirectory());
+ final String serverURL = String.format(SOLR_SERVER_URL_FORMAT_STRING, indexServer.getHost(), indexServer.getPort());
+ try {
+ /*
+ * Connect to the Solr server.
+ */
+ final HttpSolrServer solrServer = new HttpSolrServer(serverURL);
+ CoreAdminRequest statusRequest = new CoreAdminRequest();
+ statusRequest.setCoreName(null);
+ statusRequest.setAction(CoreAdminParams.CoreAdminAction.STATUS);
+ statusRequest.setIndexInfoNeeded(false);
+ checkForCancellation();
+ statusRequest.process(solrServer);
+ checkForCancellation();
+
+ /*
+ * Load the core for the text index if it is not already loaded.
+ */
+ CoreAdminResponse response = CoreAdminRequest.getStatus(textIndexMetadata.getSolrCoreName(), solrServer);
+ if (null == response.getCoreStatus(textIndexMetadata.getSolrCoreName()).get(SOLR_CORE_INSTANCE_PATH_PROPERTY)) {
+ CoreAdminRequest.Create loadCoreRequest = new CoreAdminRequest.Create();
+ loadCoreRequest.setDataDir(textIndexMetadata.getTextIndexPath());
+ loadCoreRequest.setCoreName(textIndexMetadata.getSolrCoreName());
+ loadCoreRequest.setConfigSet(SOLR_CONFIG_SET_NAME);
+ loadCoreRequest.setIsLoadOnStartup(false);
+ loadCoreRequest.setIsTransient(true);
+ solrServer.request(loadCoreRequest);
+ }
+
+ /*
+ * Create a server client object that can be used for executing
+ * queries of the specified text index.
+ */
+ final String coreURL = String.format(SOLR_CORE_URL_FORMAT_STRING, indexServer.getHost(), indexServer.getPort(), textIndexMetadata.getSolrCoreName());
+ final HttpSolrServer coreServer = new HttpSolrServer(coreURL);
+ return coreServer;
+
+ } catch (SolrServerException | IOException ex) {
+ throw new MultiCaseSearcherException(Bundle.MultiCaseSearcher_exceptionMessage_errorLoadingCore(serverURL, aCase.getCaseMetadata().getCaseName(), textIndexMetadata.getCaseDirectoryPath()), ex);
+ }
+ }
+
+ /**
+ * Opens a case database.
+ *
+ * @param caseMetadata
+ *
+ * @return A case database.
+ *
+ * @throws MultiCaseSearcherException
+ * @throws InterruptedException
+ */
+ @NbBundle.Messages({
+ "# {0} - case_name",
+ "MultiCaseSearcher.exceptionMessage.failedToGetCaseDatabaseConnectionInfo=Failed to get case database connection info for case {0}",
+ "# {0} - PostgreSQL server host",
+ "# {1} - PostgreSQL server port",
+ "# {2} - case database name",
+ "# {3} - case directory",
+ "MultiCaseSearcher.exceptionMessage.errorOpeningCaseDatabase=Error connecting to PostgreSQL server (Host/Port: [{0}:{1}] and opening case database {2} for case at {3}"
+ })
+ private SleuthkitCase openCase(MultiCaseMetadata aCase) throws MultiCaseSearcherException, InterruptedException {
+ CaseDbConnectionInfo dbConnectionInfo;
+ try {
+ dbConnectionInfo = UserPreferences.getDatabaseConnectionInfo();
+ } catch (UserPreferencesException ex) {
+ throw new MultiCaseSearcherException(Bundle.MultiCaseSearcher_exceptionMessage_failedToGetCaseDatabaseConnectionInfo(aCase.getCaseMetadata().getCaseName()), ex);
+ }
+ checkForCancellation();
+ final CaseMetadata caseMetadata = aCase.getCaseMetadata();
+ try {
+ return SleuthkitCase.openCase(caseMetadata.getCaseDatabaseName(), UserPreferences.getDatabaseConnectionInfo(), caseMetadata.getCaseDirectory());
+ } catch (UserPreferencesException | TskCoreException ex) {
+ throw new MultiCaseSearcherException(Bundle.MultiCaseSearcher_exceptionMessage_errorOpeningCaseDatabase(dbConnectionInfo.getHost(), dbConnectionInfo.getPort(), caseMetadata.getCaseDatabaseName(), caseMetadata.getCaseDirectory()), ex);
+ }
+ }
+
+ /**
+ * Closes a case database.
+ *
+ * @param aCase a case database.
+ */
+ private static void closeCase(SleuthkitCase aCase) {
+ aCase.close();
+ }
+
+ /**
+ * Executes a keyword search searchTerm in the text index of a case.
+ *
+ * @param solrServer The Solr server.
+ * @param solrQuery The Solr searchTerm.
+ * @param caseDatabase The case database.
+ * @param aCase The case metadata.
+ *
+ * @return A list of search results, possibly empty.
+ *
+ * @throws MultiCaseSearcherException
+ * @throws InterruptedException
+ */
+ @NbBundle.Messages({
+ "# {0} - query",
+ "# {1} - case_name",
+ "MultiCaseSearcher.exceptionMessage.solrQueryError=Failed to execute query \"{0}\" on case {1}"
+ })
+ private Collection executeQuery(HttpSolrServer solrServer, SolrQuery solrQuery, SleuthkitCase caseDatabase, MultiCaseMetadata aCase) throws MultiCaseSearcherException, InterruptedException {
+ final List hits = new ArrayList<>();
+ final Set uniqueObjectIds = new HashSet<>();
+ String cursorMark = CursorMarkParams.CURSOR_MARK_START;
+ boolean allResultsProcessed = false;
+ while (!allResultsProcessed) {
+ checkForCancellation();
+ solrQuery.set(CursorMarkParams.CURSOR_MARK_PARAM, cursorMark);
+ QueryResponse response;
+ try {
+ checkForCancellation();
+ response = solrServer.query(solrQuery, SolrRequest.METHOD.POST);
+ } catch (SolrServerException ex) {
+ throw new MultiCaseSearcherException(Bundle.MultiCaseSearcher_exceptionMessage_solrQueryError(solrQuery.getQuery(), aCase.getCaseMetadata().getCaseName()), ex);
+ }
+ SolrDocumentList resultDocuments = response.getResults();
+ for (SolrDocument resultDoc : resultDocuments) {
+ checkForCancellation();
+ String solrDocumentId = resultDoc.getFieldValue(SOLR_DOC_ID_FIELD).toString();
+ Long solrObjectId = parseSolrObjectId(solrDocumentId);
+ if (!uniqueObjectIds.contains(solrObjectId)) {
+ uniqueObjectIds.add(solrObjectId);
+ checkForCancellation();
+ hits.add(processHit(solrObjectId, caseDatabase, aCase));
+ }
+ }
+ checkForCancellation();
+ String nextCursorMark = response.getNextCursorMark();
+ if (cursorMark.equals(nextCursorMark)) {
+ allResultsProcessed = true;
+ }
+ cursorMark = nextCursorMark;
+ }
+ return hits;
+ }
+
+ /**
+ * Parses a Solr document id to get the Solr object id.
+ *
+ * @param solrDocumentId A Solr document id.
+ *
+ * @return A Solr object id.
+ */
+ private static Long parseSolrObjectId(String solrDocumentId) {
+ /**
+ * A Solr document id is of the form [solr_object_id] for Content object
+ * metadata documents and
+ * [solr_object_id][SOLR_DOC_ID_PARTS_SEPARATOR][chunk_id] for Content
+ * object text chunk documents.
+ */
+ final String[] solrDocumentIdParts = solrDocumentId.split(SOLR_DOC_ID_PARTS_SEPARATOR);
+ if (1 == solrDocumentIdParts.length) {
+ return Long.parseLong(solrDocumentId);
+ } else {
+ return Long.parseLong(solrDocumentIdParts[0]);
+ }
+ }
+
+ /**
+ * Creates a keyword search hit object for a Content object identified by
+ * its Solr object id.
+ *
+ * @param solrObjectId The Solr object id of a Content object.
+ * @param caseDatabase The case database of the case that has the Content.
+ * @param caseInfo Metadata about the case that has the content.
+ *
+ * @return
+ *
+ * @throws MultiCaseSearcherException
+ */
+ @NbBundle.Messages({
+ "# {0} - Solr document id",
+ "# {1} - case database name",
+ "# {2} - case directory",
+ "MultiCaseSearcher.exceptionMessage.hitProcessingError=Failed to query case database for processing of Solr object id {0} of case {1} in {2}"
+ })
+
+ private static SearchHit processHit(Long solrObjectId, SleuthkitCase caseDatabase, MultiCaseMetadata caseInfo) throws MultiCaseSearcherException {
+ try {
+ final long objectId = getObjectIdForSolrObjectId(solrObjectId, caseDatabase);
+ final CaseMetadata caseMetadata = caseInfo.getCaseMetadata();
+ final String caseDisplayName = caseMetadata.getCaseDisplayName();
+ final String caseDirectoryPath = caseMetadata.getCaseDirectory();
+ final Content content = caseDatabase.getContentById(objectId);
+ final Content dataSource = content.getDataSource();
+ final String dataSourceName = (dataSource == null) ? "" : dataSource.getName();
+ SearchHit.SourceType sourceType = SearchHit.SourceType.FILE;
+ String sourceName = "";
+ String sourcePath = "";
+ if (content instanceof AbstractFile) {
+ AbstractFile sourceFile = (AbstractFile) content;
+ sourceName = sourceFile.getName();
+ sourcePath = sourceFile.getLocalAbsPath();
+ if (null == sourcePath) {
+ sourceType = SearchHit.SourceType.FILE;
+ sourcePath = sourceFile.getUniquePath();
+ } else {
+ sourceType = SearchHit.SourceType.LOCAL_FILE;
+ sourceName = sourceFile.getName();
+ }
+ } else if (content instanceof BlackboardArtifact) {
+ BlackboardArtifact sourceArtifact = (BlackboardArtifact) content;
+ sourceType = SearchHit.SourceType.ARTIFACT;
+ BlackboardArtifact.Type artifactType = caseDatabase.getArtifactType(sourceArtifact.getArtifactTypeName());
+ sourceName = artifactType.getDisplayName();
+ Content source = sourceArtifact.getParent();
+ if (source instanceof AbstractFile) {
+ AbstractFile sourceFile = (AbstractFile) source;
+ sourcePath = sourceFile.getLocalAbsPath();
+ if (null == sourcePath) {
+ sourcePath = sourceFile.getUniquePath();
+ }
+ } else {
+ sourcePath = source.getUniquePath();
+ }
+ } else if (content instanceof Report) {
+ Report report = (Report) content;
+ sourceType = SearchHit.SourceType.REPORT;
+ sourceName = report.getReportName();
+ sourcePath = report.getUniquePath();
+ }
+
+ return new SearchHit(caseDisplayName, caseDirectoryPath, dataSourceName, sourceType, sourceName, sourcePath);
+ } catch (SQLException | TskCoreException ex) {
+ throw new MultiCaseSearcherException(Bundle.MultiCaseSearcher_exceptionMessage_hitProcessingError(solrObjectId, caseInfo.getCaseMetadata().getCaseName(), caseInfo.getCaseMetadata().getCaseDirectory()), ex);
+ }
+ }
+
+ /**
+ * Gets the Sleuthkit object id that corresponds to the Solr object id of
+ * some content.
+ *
+ * @param solrObjectId A solr object id for some content.
+ * @param caseDatabase The case database for the case that includes the
+ * content.
+ *
+ * @return The Sleuthkit object id of the content.
+ *
+ * @throws MultiCaseSearcherException
+ * @throws TskCoreException
+ * @throws SQLException
+ */
+ private static long getObjectIdForSolrObjectId(long solrObjectId, SleuthkitCase caseDatabase) throws MultiCaseSearcherException, TskCoreException, SQLException {
+ if (0 < solrObjectId) {
+ return solrObjectId;
+ } else {
+ try (SleuthkitCase.CaseDbQuery databaseQuery = caseDatabase.executeQuery("SELECT artifact_obj_id FROM blackboard_artifacts WHERE artifact_id = " + solrObjectId)) {
+ final ResultSet resultSet = databaseQuery.getResultSet();
+ if (resultSet.next()) {
+ return resultSet.getLong("artifact_obj_id");
+ } else {
+ throw new TskCoreException("Empty result set getting obj_id for artifact with artifact_id =" + solrObjectId);
+ }
+ }
+ }
+ }
+
+ /**
+ * Checks to see if the current thread has been interrupted (i.e, the search
+ * has been cancelled) and throws an InterruptedException if it has been.
+ *
+ * @throws InterruptedException
+ */
+ private void checkForCancellation() throws InterruptedException {
+ if (Thread.currentThread().isInterrupted() || searchStopped) {
+ throw new InterruptedException("Search Cancelled");
+ }
+ }
+
+ /**
+ * A bundle of metadata for a case.
+ */
+ private final static class MultiCaseMetadata {
+
+ private final CaseMetadata caseMetadata;
+ private final TextIndexMetadata textIndexMetadata;
+
+ /**
+ * Contructs a bundle of metadata for a case
+ *
+ * @param caseMetadata The case metadata.
+ * @param textIndexMetaData The text index metadata for the case.
+ */
+ private MultiCaseMetadata(CaseMetadata caseMetadata, TextIndexMetadata textIndexMetaData) {
+ this.caseMetadata = caseMetadata;
+ this.textIndexMetadata = textIndexMetaData;
+ }
+
+ /**
+ * Gets the case metadata.
+ *
+ * @return The case metadata.
+ */
+ private CaseMetadata getCaseMetadata() {
+ return this.caseMetadata;
+ }
+
+ /**
+ * Gets the text index metadata for the case.
+ *
+ * @return The text index metadata.
+ */
+ private TextIndexMetadata getTextIndexMetadata() {
+ return this.textIndexMetadata;
+ }
+
+ }
+
+ /**
+ * Bundles a case directory path, a Solr core fileName, and a text index UNC
+ * path.
+ */
+ private final static class TextIndexMetadata {
+
+ private final Path caseDirectoryPath;
+ private final String solrCoreName;
+ private final String textIndexUNCPath;
+
+ /**
+ * Constructs an object that bundles a Solr core fileName and a text
+ * index UNC path.
+ *
+ * @param caseDirectoryPath The case directory path.
+ * @param solrCoreName The core fileName.
+ * @param textIndexUNCPath The text index path.
+ */
+ private TextIndexMetadata(Path caseDirectoryPath, String solrCoreName, String textIndexUNCPath) {
+ this.caseDirectoryPath = caseDirectoryPath;
+ this.solrCoreName = solrCoreName;
+ this.textIndexUNCPath = textIndexUNCPath;
+ }
+
+ /**
+ * Gets the case directory path.
+ *
+ * @return The path.
+ */
+ private Path getCaseDirectoryPath() {
+ return this.caseDirectoryPath;
+ }
+
+ /**
+ * Gets the Solr core fileName.
+ *
+ * @return The Solr core fileName.
+ */
+ private String getSolrCoreName() {
+ return this.solrCoreName;
+ }
+
+ /**
+ *
+ * Gets the UNC path of the text index.
+ *
+ * @return The path.
+ */
+ private String getTextIndexPath() {
+ return this.textIndexUNCPath;
+ }
+
+ }
+
+ /**
+ * Exception thrown if there is an error executing a search.
+ */
+ static final class MultiCaseSearcherException extends Exception {
+
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * Constructs an instance of the exception thrown if there is an error
+ * executing a search.
+ *
+ * @param message The exception message.
+ */
+ private MultiCaseSearcherException(String message) {
+ super(message);
+ }
+
+ /**
+ * Constructs an instance of the exception thrown if there is an error
+ * executing a search.
+ *
+ * @param message The exception message.
+ * @param cause The Throwable that caused the error.
+ */
+ private MultiCaseSearcherException(String message, Throwable cause) {
+ super(message, cause);
+ }
+
+ }
+
+ /**
+ * Tell the MultiCaseSearcher that it's current search can be stopped the
+ * next time it checks for cancellation.
+ */
+ void stopMultiCaseSearch() {
+ //This is necessary because if the interrupt occurs during CoreAdminRequest.process,
+ //CoreAdminRequest.getStatus, or HttpSolrServer.query the interrupt gets ignored
+ searchStopped = true;
+ }
+
+ /**
+ * Register an object with the MultiCaseSearcher eventBus so that it's
+ * subscribe methods can receive results.
+ *
+ * @param object the object to register with the eventBus
+ */
+ void registerWithEventBus(Object object) {
+ eventBus.register(object);
+ }
+
+ /**
+ * Unregister an object with the MultiCaseSearcher eventBus so that it's
+ * subscribe methods no longer receive results.
+ *
+ * @param object the object to unregister with the eventBus
+ */
+ void unregisterWithEventBus(Object object) {
+ eventBus.unregister(object);
+ }
+
+}
diff --git a/RecentActivity/src/org/sleuthkit/autopsy/recentactivity/Bundle.properties b/RecentActivity/src/org/sleuthkit/autopsy/recentactivity/Bundle.properties
index 7c75aeb9d0..6cd8dede00 100644
--- a/RecentActivity/src/org/sleuthkit/autopsy/recentactivity/Bundle.properties
+++ b/RecentActivity/src/org/sleuthkit/autopsy/recentactivity/Bundle.properties
@@ -1,5 +1,5 @@
OpenIDE-Module-Display-Category=Ingest Module
-OpenIDE-Module-Long-Description=Recent Activity ingest module.\n\n\The module extracts useful information about the recent user activity on the disk image being ingested, such as:\n\n- Recently open documents,\n- Web acitivity (sites visited, stored cookies, bookmarked sites, search engine queries, file downloads),\n- Recently attached devices,\n- Installed programs.\n\n\The module currently supports Windows only disk images.\n\The plugin is also fully functional when deployed on Windows version of Autopsy.
+OpenIDE-Module-Long-Description=Recent Activity ingest module.\n\n\The module extracts useful information about the recent user activity on the disk image being ingested, such as:\n\n- Recently open documents,\n- Web activity (sites visited, stored cookies, book marked sites, search engine queries, file downloads),\n- Recently attached devices,\n- Installed programs.\n\nThe module currently supports Windows only disk images.\nThe plugin is also fully functional when deployed on Windows version of Autopsy.
OpenIDE-Module-Name=RecentActivity
OpenIDE-Module-Short-Description=Recent Activity finder ingest module
Chrome.moduleName=Chrome
@@ -93,7 +93,7 @@ RecentDocumentsByLnk.parentModuleName=Recent Activity
SearchEngineURLQueryAnalyzer.moduleName.text=Search Engine
SearchEngineURLQueryAnalyzer.engineName.none=NONE
SearchEngineURLQueryAnalyzer.domainSubStr.none=NONE
-SearchEngineURLQueryAnalyzer.toString=Name: {0}\nDomain Substring: {1}\n\count: {2}\nSplit Tokens: \n{3}
+SearchEngineURLQueryAnalyzer.toString=Name: {0}\nDomain Substring: {1}\nCount: {2}\nSplit Tokens: \n{3}
SearchEngineURLQueryAnalyzer.parentModuleName.noSpace=RecentActivity
SearchEngineURLQueryAnalyzer.parentModuleName=Recent Activity
UsbDeviceIdMapper.parseAndLookup.text=Product: {0}