Interim commit of improved case deletion

This commit is contained in:
Richard Cordovano 2019-03-19 15:12:41 -04:00
parent 8b014494da
commit 95c3cf2d74
13 changed files with 796 additions and 674 deletions

View File

@ -32,11 +32,11 @@ import java.nio.file.Path;
import java.nio.file.Paths; import java.nio.file.Paths;
import java.sql.Connection; import java.sql.Connection;
import java.sql.DriverManager; import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.SQLException; import java.sql.SQLException;
import java.sql.Statement; import java.sql.Statement;
import java.text.ParseException; import java.text.ParseException;
import java.text.SimpleDateFormat; import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.Date; import java.util.Date;
import java.util.HashMap; import java.util.HashMap;
@ -60,7 +60,6 @@ import javax.annotation.concurrent.GuardedBy;
import javax.annotation.concurrent.ThreadSafe; import javax.annotation.concurrent.ThreadSafe;
import javax.swing.JOptionPane; import javax.swing.JOptionPane;
import javax.swing.SwingUtilities; import javax.swing.SwingUtilities;
import org.openide.util.Exceptions;
import org.openide.util.Lookup; import org.openide.util.Lookup;
import org.openide.util.NbBundle; import org.openide.util.NbBundle;
import org.openide.util.NbBundle.Messages; import org.openide.util.NbBundle.Messages;
@ -100,7 +99,6 @@ import org.sleuthkit.autopsy.coreutils.MessageNotifyUtil;
import org.sleuthkit.autopsy.coreutils.NetworkUtils; import org.sleuthkit.autopsy.coreutils.NetworkUtils;
import org.sleuthkit.autopsy.coreutils.PlatformUtil; import org.sleuthkit.autopsy.coreutils.PlatformUtil;
import org.sleuthkit.autopsy.coreutils.ThreadUtils; import org.sleuthkit.autopsy.coreutils.ThreadUtils;
import org.sleuthkit.autopsy.coreutils.TimeStampUtils;
import org.sleuthkit.autopsy.coreutils.TimeZoneUtils; import org.sleuthkit.autopsy.coreutils.TimeZoneUtils;
import org.sleuthkit.autopsy.coreutils.Version; import org.sleuthkit.autopsy.coreutils.Version;
import org.sleuthkit.autopsy.events.AutopsyEvent; import org.sleuthkit.autopsy.events.AutopsyEvent;
@ -717,7 +715,8 @@ public class Case {
* lower-level exception. * lower-level exception.
*/ */
@Messages({ @Messages({
"Case.exceptionMessage.cannotDeleteCurrentCase=Cannot delete current case, it must be closed first." "Case.exceptionMessage.cannotDeleteCurrentCase=Cannot delete current case, it must be closed first.",
"# {0} - case display name", "Case.exceptionMessage.deletionInterrupted=Deletion of the case {0} was cancelled."
}) })
public static void deleteCase(CaseMetadata metadata) throws CaseActionException { public static void deleteCase(CaseMetadata metadata) throws CaseActionException {
synchronized (caseActionSerializationLock) { synchronized (caseActionSerializationLock) {
@ -737,7 +736,16 @@ public class Case {
if (CaseType.SINGLE_USER_CASE == metadata.getCaseType()) { if (CaseType.SINGLE_USER_CASE == metadata.getCaseType()) {
deleteSingleUserCase(metadata, progressIndicator); deleteSingleUserCase(metadata, progressIndicator);
} else { } else {
deleteMultiUserCase(metadata, progressIndicator); try {
deleteMultiUserCase(metadata, progressIndicator);
} catch (InterruptedException ex) {
/*
* Task cancellation is not currently supported for this
* code path, so this catch block is not expected to be
* executed.
*/
throw new CaseActionException(Bundle.Case_exceptionMessage_deletionInterrupted(metadata.getCaseDisplayName()), ex);
}
} }
} finally { } finally {
progressIndicator.finish(); progressIndicator.finish();
@ -978,7 +986,7 @@ public class Case {
"Case.exceptionMessage.cannotGetLockToDeleteCase=Cannot delete case because it is open for another user or host.", "Case.exceptionMessage.cannotGetLockToDeleteCase=Cannot delete case because it is open for another user or host.",
"Case.progressMessage.fetchingCoordSvcNodeData=Fetching coordination service node data for the case..." "Case.progressMessage.fetchingCoordSvcNodeData=Fetching coordination service node data for the case..."
}) })
private static void deleteMultiUserCase(CaseMetadata metadata, ProgressIndicator progressIndicator) throws CaseActionException { private static void deleteMultiUserCase(CaseMetadata metadata, ProgressIndicator progressIndicator) throws CaseActionException, InterruptedException {
progressIndicator.progress(Bundle.Case_progressMessage_connectingToCoordSvc()); progressIndicator.progress(Bundle.Case_progressMessage_connectingToCoordSvc());
CoordinationService coordinationService; CoordinationService coordinationService;
try { try {
@ -1010,7 +1018,16 @@ public class Case {
throw new CaseActionException(Bundle.Case_exceptionMessage_errorsDeletingCase()); throw new CaseActionException(Bundle.Case_exceptionMessage_errorsDeletingCase());
} }
errorsOccurred = deleteMultiUserCase(caseNodeData, metadata, progressIndicator); errorsOccurred = deleteMultiUserCase(caseNodeData, metadata, progressIndicator, logger);
try {
deleteCaseResourcesLockNode(caseNodeData, progressIndicator);
} catch (CoordinationServiceException | InterruptedException ex) {
errorsOccurred = true;
logger.log(Level.WARNING, String.format("Error deleting the case resources lock coordination service node for the case at %s (%s) in %s", metadata.getCaseDisplayName(), metadata.getCaseName(), metadata.getCaseDirectory()), ex);
}
// RJCTODO: Is this behavior implemented correctly?
} catch (CoordinationServiceException ex) { } catch (CoordinationServiceException ex) {
logger.log(Level.SEVERE, String.format("Error exclusively locking the case directory for %s (%s) in %s", metadata.getCaseDisplayName(), metadata.getCaseName(), metadata.getCaseDirectory()), ex); logger.log(Level.SEVERE, String.format("Error exclusively locking the case directory for %s (%s) in %s", metadata.getCaseDisplayName(), metadata.getCaseName(), metadata.getCaseDirectory()), ex);
@ -1037,12 +1054,13 @@ public class Case {
* *
* Attempts to delete the case database, the text index, the case directory, * Attempts to delete the case database, the text index, the case directory,
* and the case resources coordination service lock code for a case and * and the case resources coordination service lock code for a case and
* removes the case from the recent cases menu of the mian application * removes the case from the recent cases menu of the main application
* window. * window.
* *
* @param caseNodeData The coordination service node data for the case. * @param caseNodeData The coordination service node data for the case.
* @param metadata The case metadata. * @param metadata The case metadata.
* @param progressIndicator A progress indicator. * @param progressIndicator A progress indicator.
* @param logger A logger.
* *
* @return True if one or more errors occurred (see log for details), false * @return True if one or more errors occurred (see log for details), false
* otherwise. * otherwise.
@ -1053,10 +1071,10 @@ public class Case {
* during a wait. * during a wait.
*/ */
@Beta @Beta
public static boolean deleteMultiUserCase(CaseNodeData caseNodeData, CaseMetadata metadata, ProgressIndicator progressIndicator) throws InterruptedException { public static boolean deleteMultiUserCase(CaseNodeData caseNodeData, CaseMetadata metadata, ProgressIndicator progressIndicator, Logger logger) throws InterruptedException {
boolean errorsOccurred = false; boolean errorsOccurred = false;
try { try {
deleteCaseDatabase(caseNodeData, metadata, progressIndicator); deleteCaseDatabase(caseNodeData, metadata, progressIndicator, logger);
} catch (UserPreferencesException | ClassNotFoundException | SQLException ex) { } catch (UserPreferencesException | ClassNotFoundException | SQLException ex) {
errorsOccurred = true; errorsOccurred = true;
logger.log(Level.WARNING, String.format("Failed to delete the case database for %s (%s) in %s", metadata.getCaseDisplayName(), metadata.getCaseName(), metadata.getCaseDirectory()), ex); logger.log(Level.WARNING, String.format("Failed to delete the case database for %s (%s) in %s", metadata.getCaseDisplayName(), metadata.getCaseName(), metadata.getCaseDirectory()), ex);
@ -1066,7 +1084,7 @@ public class Case {
} }
try { try {
deleteTextIndex(caseNodeData, metadata, progressIndicator); deleteTextIndex(caseNodeData, metadata, progressIndicator, logger);
} catch (KeywordSearchServiceException ex) { } catch (KeywordSearchServiceException ex) {
errorsOccurred = true; errorsOccurred = true;
logger.log(Level.WARNING, String.format("Failed to delete the text index for %s (%s) in %s", metadata.getCaseDisplayName(), metadata.getCaseName(), metadata.getCaseDirectory()), ex); logger.log(Level.WARNING, String.format("Failed to delete the text index for %s (%s) in %s", metadata.getCaseDisplayName(), metadata.getCaseName(), metadata.getCaseDirectory()), ex);
@ -1076,7 +1094,7 @@ public class Case {
} }
try { try {
deleteCaseDirectory(caseNodeData, metadata, progressIndicator); deleteCaseDirectory(caseNodeData, metadata, progressIndicator, logger);
} catch (CaseActionException ex) { } catch (CaseActionException ex) {
errorsOccurred = true; errorsOccurred = true;
logger.log(Level.WARNING, String.format("Failed to delete the case directory for %s (%s) in %s", metadata.getCaseDisplayName(), metadata.getCaseName(), metadata.getCaseDirectory()), ex); logger.log(Level.WARNING, String.format("Failed to delete the case directory for %s (%s) in %s", metadata.getCaseDisplayName(), metadata.getCaseName(), metadata.getCaseDirectory()), ex);
@ -1085,23 +1103,6 @@ public class Case {
return errorsOccurred; return errorsOccurred;
} }
deleteFromRecentCases(metadata, progressIndicator);
if (Thread.currentThread().isInterrupted()) {
Thread.currentThread().interrupt();
return errorsOccurred;
}
try {
deleteCaseResourcesLockNode(caseNodeData, progressIndicator);
} catch (CoordinationServiceException ex) {
errorsOccurred = true;
logger.log(Level.WARNING, String.format("Error deleting the case resources lock coordination service node for the case at %s (%s) in %s", metadata.getCaseDisplayName(), metadata.getCaseName(), metadata.getCaseDirectory()), ex);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
return errorsOccurred;
}
return errorsOccurred; return errorsOccurred;
} }
@ -1127,15 +1128,20 @@ public class Case {
@Messages({ @Messages({
"Case.progressMessage.deletingCaseDatabase=Deleting case database..." "Case.progressMessage.deletingCaseDatabase=Deleting case database..."
}) })
private static void deleteCaseDatabase(CaseNodeData caseNodeData, CaseMetadata metadata, ProgressIndicator progressIndicator) throws UserPreferencesException, ClassNotFoundException, SQLException, InterruptedException { private static void deleteCaseDatabase(CaseNodeData caseNodeData, CaseMetadata metadata, ProgressIndicator progressIndicator, Logger logger) throws UserPreferencesException, ClassNotFoundException, SQLException, InterruptedException {
if (!caseNodeData.isDeletedFlagSet(CaseNodeData.DeletedFlags.CASE_DB)) { if (!caseNodeData.isDeletedFlagSet(CaseNodeData.DeletedFlags.CASE_DB)) {
progressIndicator.progress(Bundle.Case_progressMessage_deletingCaseDatabase()); progressIndicator.progress(Bundle.Case_progressMessage_deletingCaseDatabase());
logger.log(Level.INFO, String.format("Deleting case database for %s (%s) in %s", caseNodeData.getDisplayName(), caseNodeData.getName(), caseNodeData.getDirectory()));
CaseDbConnectionInfo info = UserPreferences.getDatabaseConnectionInfo(); CaseDbConnectionInfo info = UserPreferences.getDatabaseConnectionInfo();
String url = "jdbc:postgresql://" + info.getHost() + ":" + info.getPort() + "/postgres"; //NON-NLS String url = "jdbc:postgresql://" + info.getHost() + ":" + info.getPort() + "/postgres"; //NON-NLS
Class.forName("org.postgresql.Driver"); //NON-NLS Class.forName("org.postgresql.Driver"); //NON-NLS
try (Connection connection = DriverManager.getConnection(url, info.getUserName(), info.getPassword()); Statement statement = connection.createStatement()) { try (Connection connection = DriverManager.getConnection(url, info.getUserName(), info.getPassword()); Statement statement = connection.createStatement()) {
String deleteCommand = "DROP DATABASE \"" + metadata.getCaseDatabaseName() + "\""; //NON-NLS String dbExistsQuery = "SELECT 1 from pg_database WHERE datname = '" + metadata.getCaseDatabaseName() + "'";
statement.execute(deleteCommand); ResultSet queryResult = statement.executeQuery(dbExistsQuery);
if (queryResult.next()) {
String deleteCommand = "DROP DATABASE \"" + metadata.getCaseDatabaseName() + "\""; //NON-NLS
statement.execute(deleteCommand);
}
} }
setDeletedItemFlag(caseNodeData, CaseNodeData.DeletedFlags.CASE_DB); setDeletedItemFlag(caseNodeData, CaseNodeData.DeletedFlags.CASE_DB);
} }
@ -1174,8 +1180,9 @@ public class Case {
* data to be written to the * data to be written to the
* coordination service node database. * coordination service node database.
*/ */
private static void deleteTextIndex(CaseNodeData caseNodeData, CaseMetadata metadata, ProgressIndicator progressIndicator) throws KeywordSearchServiceException, InterruptedException { private static void deleteTextIndex(CaseNodeData caseNodeData, CaseMetadata metadata, ProgressIndicator progressIndicator, Logger logger) throws KeywordSearchServiceException, InterruptedException {
if (!caseNodeData.isDeletedFlagSet(CaseNodeData.DeletedFlags.TEXT_INDEX)) { if (!caseNodeData.isDeletedFlagSet(CaseNodeData.DeletedFlags.TEXT_INDEX)) {
logger.log(Level.INFO, String.format("Deleting text index for %s (%s) in %s", caseNodeData.getDisplayName(), caseNodeData.getName(), caseNodeData.getDirectory()));
deleteTextIndex(metadata, progressIndicator); deleteTextIndex(metadata, progressIndicator);
setDeletedItemFlag(caseNodeData, CaseNodeData.DeletedFlags.TEXT_INDEX); setDeletedItemFlag(caseNodeData, CaseNodeData.DeletedFlags.TEXT_INDEX);
} }
@ -1198,6 +1205,7 @@ public class Case {
// when the path is >= 255 chars. Actually, deprecate this method and // when the path is >= 255 chars. Actually, deprecate this method and
// replace it with one that throws instead of returning a boolean value. // replace it with one that throws instead of returning a boolean value.
progressIndicator.progress(Bundle.Case_progressMessage_deletingCaseDirectory()); progressIndicator.progress(Bundle.Case_progressMessage_deletingCaseDirectory());
logger.log(Level.INFO, String.format("Deleting case directory for %s (%s) in %s", metadata.getCaseDisplayName(), metadata.getCaseName(), metadata.getCaseDirectory()));
if (!FileUtil.deleteDir(new File(metadata.getCaseDirectory()))) { if (!FileUtil.deleteDir(new File(metadata.getCaseDirectory()))) {
throw new CaseActionException(String.format("Failed to delete %s", metadata.getCaseDirectory())); throw new CaseActionException(String.format("Failed to delete %s", metadata.getCaseDirectory()));
} }
@ -1216,7 +1224,7 @@ public class Case {
* coordination service data to be written to * coordination service data to be written to
* the coordination service node database. * the coordination service node database.
*/ */
private static void deleteCaseDirectory(CaseNodeData caseNodeData, CaseMetadata metadata, ProgressIndicator progressIndicator) throws CaseActionException, InterruptedException { private static void deleteCaseDirectory(CaseNodeData caseNodeData, CaseMetadata metadata, ProgressIndicator progressIndicator, Logger logger) throws CaseActionException, InterruptedException {
if (!caseNodeData.isDeletedFlagSet(CaseNodeData.DeletedFlags.CASE_DIR)) { if (!caseNodeData.isDeletedFlagSet(CaseNodeData.DeletedFlags.CASE_DIR)) {
deleteCaseDirectory(metadata, progressIndicator); deleteCaseDirectory(metadata, progressIndicator);
setDeletedItemFlag(caseNodeData, CaseNodeData.DeletedFlags.CASE_DIR); setDeletedItemFlag(caseNodeData, CaseNodeData.DeletedFlags.CASE_DIR);
@ -1242,7 +1250,6 @@ public class Case {
} }
} }
// RJCTODO: Copy-paste instead
/** /**
* IMPORTANT: This is a "beta" method and is subject to change or removal * IMPORTANT: This is a "beta" method and is subject to change or removal
* without notice! * without notice!
@ -1264,14 +1271,13 @@ public class Case {
"Case.progressMessage.deletingResourcesLockNode=Deleting case resources lock node..." "Case.progressMessage.deletingResourcesLockNode=Deleting case resources lock node..."
}) })
@Beta @Beta
private static void deleteCaseResourcesLockNode(CaseNodeData caseNodeData, ProgressIndicator progressIndicator) throws CoordinationServiceException, InterruptedException { public static void deleteCaseResourcesLockNode(CaseNodeData caseNodeData, ProgressIndicator progressIndicator) throws CoordinationServiceException, InterruptedException {
progressIndicator.progress(Bundle.Case_progressMessage_deletingResourcesLockNode()); progressIndicator.progress(Bundle.Case_progressMessage_deletingResourcesLockNode());
String resourcesLockNodePath = caseNodeData.getDirectory().toString() + RESOURCES_LOCK_SUFFIX;//RJCTODO: Use utility String resourcesLockNodePath = caseNodeData.getDirectory().toString() + RESOURCES_LOCK_SUFFIX;//RJCTODO: Use utility
CoordinationService coordinationService = CoordinationService.getInstance(); CoordinationService coordinationService = CoordinationService.getInstance();
coordinationService.deleteNode(CategoryNode.CASES, resourcesLockNodePath); coordinationService.deleteNode(CategoryNode.CASES, resourcesLockNodePath);
} }
// RJCTODO: Copy-paste instead
/** /**
* IMPORTANT: This is a "beta" method and is subject to change or removal * IMPORTANT: This is a "beta" method and is subject to change or removal
* without notice! * without notice!

View File

@ -42,7 +42,7 @@ public class CaseCoordinationServiceUtils {
return caseDirectoryPath.toString(); return caseDirectoryPath.toString();
} }
public static String getCaseLockName(Path caseDirectoryPath) { public static String getCaseNameLockName(Path caseDirectoryPath) {
String caseName = caseDirectoryPath.getFileName().toString(); String caseName = caseDirectoryPath.getFileName().toString();
if (TimeStampUtils.endsWithTimeStamp(caseName)) { if (TimeStampUtils.endsWithTimeStamp(caseName)) {
caseName = TimeStampUtils.removeTimeStamp(caseName); caseName = TimeStampUtils.removeTimeStamp(caseName);

View File

@ -161,7 +161,7 @@ final public class MultiUserCaseNodeDataCollector { // RJCTODO: Shorten name aft
deleteCoordinationServiceNode(coordinationService, CaseCoordinationServiceUtils.getCaseResourcesLockName(caseDirectoryPath)); deleteCoordinationServiceNode(coordinationService, CaseCoordinationServiceUtils.getCaseResourcesLockName(caseDirectoryPath));
deleteCoordinationServiceNode(coordinationService, CaseCoordinationServiceUtils.getCaseAutoIngestLogLockName(caseDirectoryPath)); deleteCoordinationServiceNode(coordinationService, CaseCoordinationServiceUtils.getCaseAutoIngestLogLockName(caseDirectoryPath));
deleteCoordinationServiceNode(coordinationService, CaseCoordinationServiceUtils.getCaseDirectoryLockName(caseDirectoryPath)); deleteCoordinationServiceNode(coordinationService, CaseCoordinationServiceUtils.getCaseDirectoryLockName(caseDirectoryPath));
deleteCoordinationServiceNode(coordinationService, CaseCoordinationServiceUtils.getCaseLockName(caseDirectoryPath)); deleteCoordinationServiceNode(coordinationService, CaseCoordinationServiceUtils.getCaseNameLockName(caseDirectoryPath));
} }
/** /**

View File

@ -142,22 +142,22 @@ public interface MultiUserCaseBrowserCustomizer {
"MultiUserCaseBrowserCustomizer.column.createTime=Create Time", "MultiUserCaseBrowserCustomizer.column.createTime=Create Time",
"MultiUserCaseBrowserCustomizer.column.directory=Directory", "MultiUserCaseBrowserCustomizer.column.directory=Directory",
"MultiUserCaseBrowserCustomizer.column.lastAccessTime=Last Access Time", "MultiUserCaseBrowserCustomizer.column.lastAccessTime=Last Access Time",
"MultiUserCaseBrowserCustomizer.column.manifestFileZNodesDeleteStatus=Manifest Znodes Deleted",
"MultiUserCaseBrowserCustomizer.column.dataSourcesDeleteStatus=Data Sources Deleted",
"MultiUserCaseBrowserCustomizer.column.textIndexDeleteStatus=Text Index Deleted", "MultiUserCaseBrowserCustomizer.column.textIndexDeleteStatus=Text Index Deleted",
"MultiUserCaseBrowserCustomizer.column.caseDbDeleteStatus=Case Database Deleted", "MultiUserCaseBrowserCustomizer.column.caseDbDeleteStatus=Case Database Deleted",
"MultiUserCaseBrowserCustomizer.column.caseDirDeleteStatus=Case Directory Deleted", "MultiUserCaseBrowserCustomizer.column.caseDirDeleteStatus=Case Directory Deleted"
"MultiUserCaseBrowserCustomizer.column.dataSourcesDeleteStatus=Data Sources Deleted",
"MultiUserCaseBrowserCustomizer.column.manifestCoordSvcNodesDeleteStatus=Manifest ZooKeeper Node Deleted"
}) })
public enum Column { public enum Column {
DISPLAY_NAME(Bundle.MultiUserCaseBrowserCustomizer_column_displayName()), DISPLAY_NAME(Bundle.MultiUserCaseBrowserCustomizer_column_displayName()),
CREATE_DATE(Bundle.MultiUserCaseBrowserCustomizer_column_createTime()), CREATE_DATE(Bundle.MultiUserCaseBrowserCustomizer_column_createTime()),
DIRECTORY(Bundle.MultiUserCaseBrowserCustomizer_column_directory()), DIRECTORY(Bundle.MultiUserCaseBrowserCustomizer_column_directory()),
LAST_ACCESS_DATE(Bundle.MultiUserCaseBrowserCustomizer_column_lastAccessTime()), LAST_ACCESS_DATE(Bundle.MultiUserCaseBrowserCustomizer_column_lastAccessTime()),
TEXT_INDEX_DELETION_STATUS(Bundle.MultiUserCaseBrowserCustomizer_column_textIndexDeleteStatus()), MANIFEST_FILE_ZNODES_DELETE_STATUS(Bundle.MultiUserCaseBrowserCustomizer_column_manifestFileZNodesDeleteStatus()),
CASE_DB_DELETION_STATUS(Bundle.MultiUserCaseBrowserCustomizer_column_caseDbDeleteStatus()), DATA_SOURCES_DELETE_STATUS(Bundle.MultiUserCaseBrowserCustomizer_column_dataSourcesDeleteStatus()),
CASE_DIR_DELETION_STATUS(Bundle.MultiUserCaseBrowserCustomizer_column_caseDirDeleteStatus()), TEXT_INDEX_DELETE_STATUS(Bundle.MultiUserCaseBrowserCustomizer_column_textIndexDeleteStatus()),
DATA_SOURCES_DELETION_STATUS(Bundle.MultiUserCaseBrowserCustomizer_column_dataSourcesDeleteStatus()), CASE_DB_DELETE_STATUS(Bundle.MultiUserCaseBrowserCustomizer_column_caseDbDeleteStatus()),
MANIFEST_FILE_LOCK_NODES_DELETION_STATUS(Bundle.MultiUserCaseBrowserCustomizer_column_manifestCoordSvcNodesDeleteStatus()); CASE_DIR_DELETE_STATUS(Bundle.MultiUserCaseBrowserCustomizer_column_caseDirDeleteStatus());
private final String displayName; private final String displayName;

View File

@ -25,8 +25,10 @@ import javax.swing.Action;
import org.openide.nodes.AbstractNode; import org.openide.nodes.AbstractNode;
import org.openide.nodes.Children; import org.openide.nodes.Children;
import org.openide.nodes.Sheet; import org.openide.nodes.Sheet;
import org.openide.util.NbBundle;
import org.openide.util.lookup.Lookups; import org.openide.util.lookup.Lookups;
import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData; import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData;
import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData.DeletedFlags;
import org.sleuthkit.autopsy.casemodule.multiusercasesbrowser.MultiUserCaseBrowserCustomizer.Column; import org.sleuthkit.autopsy.casemodule.multiusercasesbrowser.MultiUserCaseBrowserCustomizer.Column;
import org.sleuthkit.autopsy.datamodel.NodeProperty; import org.sleuthkit.autopsy.datamodel.NodeProperty;
@ -75,6 +77,21 @@ final class MultiUserCaseNode extends AbstractNode {
case LAST_ACCESS_DATE: case LAST_ACCESS_DATE:
sheetSet.put(new NodeProperty<>(propName, propName, propName, caseNodeData.getLastAccessDate())); sheetSet.put(new NodeProperty<>(propName, propName, propName, caseNodeData.getLastAccessDate()));
break; break;
case MANIFEST_FILE_ZNODES_DELETE_STATUS:
sheetSet.put(new NodeProperty<>(propName, propName, propName, isDeleted(DeletedFlags.MANIFEST_FILE_LOCK_NODES)));
break;
case DATA_SOURCES_DELETE_STATUS:
sheetSet.put(new NodeProperty<>(propName, propName, propName, isDeleted(DeletedFlags.DATA_SOURCES)));
break;
case TEXT_INDEX_DELETE_STATUS:
sheetSet.put(new NodeProperty<>(propName, propName, propName, isDeleted(DeletedFlags.TEXT_INDEX)));
break;
case CASE_DB_DELETE_STATUS:
sheetSet.put(new NodeProperty<>(propName, propName, propName, isDeleted(DeletedFlags.CASE_DB)));
break;
case CASE_DIR_DELETE_STATUS:
sheetSet.put(new NodeProperty<>(propName, propName, propName, isDeleted(DeletedFlags.CASE_DIR)));
break;
default: default:
break; break;
} }
@ -95,4 +112,20 @@ final class MultiUserCaseNode extends AbstractNode {
return customizer.getPreferredAction(caseNodeData); return customizer.getPreferredAction(caseNodeData);
} }
/**
* Interprets the deletion status of part of a case.
*
* @param flag The coordination service node data deleted items flag
* to interpret.
*
* @return A string stating "True" or "False."
*/
@NbBundle.Messages({
"MultiUserCaseNode.columnValue.true=True",
"MultiUserCaseNode.column.createTime=False",
})
private String isDeleted(CaseNodeData.DeletedFlags flag) {
return caseNodeData.isDeletedFlagSet(flag) ? "True" : "False";
}
} }

View File

@ -33,7 +33,6 @@ import javax.annotation.concurrent.Immutable;
import javax.annotation.concurrent.ThreadSafe; import javax.annotation.concurrent.ThreadSafe;
import org.sleuthkit.autopsy.corecomponentinterfaces.DataSourceProcessor; import org.sleuthkit.autopsy.corecomponentinterfaces.DataSourceProcessor;
import org.sleuthkit.autopsy.coreutils.NetworkUtils; import org.sleuthkit.autopsy.coreutils.NetworkUtils;
import org.sleuthkit.autopsy.experimental.autoingest.Manifest;
import org.sleuthkit.autopsy.ingest.DataSourceIngestJob.Snapshot; import org.sleuthkit.autopsy.ingest.DataSourceIngestJob.Snapshot;
import org.sleuthkit.autopsy.ingest.IngestJob; import org.sleuthkit.autopsy.ingest.IngestJob;
import org.sleuthkit.autopsy.ingest.IngestManager.IngestThreadActivitySnapshot; import org.sleuthkit.autopsy.ingest.IngestManager.IngestThreadActivitySnapshot;
@ -194,6 +193,7 @@ final class AutoIngestJob implements Comparable<AutoIngestJob>, IngestProgressSn
this.ingestThreadsSnapshot = Collections.emptyList(); this.ingestThreadsSnapshot = Collections.emptyList();
this.ingestJobsSnapshot = Collections.emptyList(); this.ingestJobsSnapshot = Collections.emptyList();
this.moduleRunTimesSnapshot = Collections.emptyMap(); this.moduleRunTimesSnapshot = Collections.emptyMap();
} catch (Exception ex) { } catch (Exception ex) {
throw new AutoIngestJobException(String.format("Error creating automated ingest job"), ex); throw new AutoIngestJobException(String.format("Error creating automated ingest job"), ex);
} }

View File

@ -1,7 +1,7 @@
/* /*
* Autopsy Forensic Browser * Autopsy Forensic Browser
* *
* Copyright 2011-2018 Basis Technology Corp. * Copyright 2011-2019 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org * Contact: carrier <at> sleuthkit <dot> org
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
@ -22,6 +22,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
import java.beans.PropertyChangeEvent; import java.beans.PropertyChangeEvent;
import java.beans.PropertyChangeListener; import java.beans.PropertyChangeListener;
import java.io.File; import java.io.File;
import java.io.FileWriter;
import java.io.IOException; import java.io.IOException;
import static java.nio.file.FileVisitOption.FOLLOW_LINKS; import static java.nio.file.FileVisitOption.FOLLOW_LINKS;
import java.nio.file.FileVisitResult; import java.nio.file.FileVisitResult;
@ -32,7 +33,6 @@ import java.nio.file.Files;
import java.nio.file.Path; import java.nio.file.Path;
import java.nio.file.Paths; import java.nio.file.Paths;
import java.nio.file.attribute.BasicFileAttributes; import java.nio.file.attribute.BasicFileAttributes;
import java.sql.SQLException;
import java.time.Duration; import java.time.Duration;
import java.time.Instant; import java.time.Instant;
import java.util.ArrayList; import java.util.ArrayList;
@ -40,11 +40,9 @@ import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.Date; import java.util.Date;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map;
import java.util.Observable; import java.util.Observable;
import java.util.Set; import java.util.Set;
import java.util.UUID; import java.util.UUID;
@ -70,7 +68,6 @@ import org.sleuthkit.autopsy.coordinationservice.CoordinationService.Lock;
import org.sleuthkit.autopsy.core.RuntimeProperties; import org.sleuthkit.autopsy.core.RuntimeProperties;
import org.sleuthkit.autopsy.core.ServicesMonitor; import org.sleuthkit.autopsy.core.ServicesMonitor;
import org.sleuthkit.autopsy.core.ServicesMonitor.ServicesMonitorException; import org.sleuthkit.autopsy.core.ServicesMonitor.ServicesMonitorException;
import org.sleuthkit.autopsy.core.UserPreferencesException;
import org.sleuthkit.autopsy.corecomponentinterfaces.DataSourceProcessorCallback; import org.sleuthkit.autopsy.corecomponentinterfaces.DataSourceProcessorCallback;
import org.sleuthkit.autopsy.corecomponentinterfaces.DataSourceProcessorCallback.DataSourceProcessorResult; import org.sleuthkit.autopsy.corecomponentinterfaces.DataSourceProcessorCallback.DataSourceProcessorResult;
import static org.sleuthkit.autopsy.corecomponentinterfaces.DataSourceProcessorCallback.DataSourceProcessorResult.CRITICAL_ERRORS; import static org.sleuthkit.autopsy.corecomponentinterfaces.DataSourceProcessorCallback.DataSourceProcessorResult.CRITICAL_ERRORS;
@ -126,6 +123,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
private static final int NUM_INPUT_SCAN_SCHEDULING_THREADS = 1; private static final int NUM_INPUT_SCAN_SCHEDULING_THREADS = 1;
private static final String INPUT_SCAN_SCHEDULER_THREAD_NAME = "AIM-input-scan-scheduler-%d"; private static final String INPUT_SCAN_SCHEDULER_THREAD_NAME = "AIM-input-scan-scheduler-%d";
private static final String INPUT_SCAN_THREAD_NAME = "AIM-input-scan-%d"; private static final String INPUT_SCAN_THREAD_NAME = "AIM-input-scan-%d";
private static final int INPUT_SCAN_LOCKING_TIMEOUT_MINS = 5;
private static final String AUTO_INGEST_THREAD_NAME = "AIM-job-processing-%d"; private static final String AUTO_INGEST_THREAD_NAME = "AIM-job-processing-%d";
private static final String LOCAL_HOST_NAME = NetworkUtils.getLocalHostName(); private static final String LOCAL_HOST_NAME = NetworkUtils.getLocalHostName();
private static final String EVENT_CHANNEL_NAME = "Auto-Ingest-Manager-Events"; private static final String EVENT_CHANNEL_NAME = "Auto-Ingest-Manager-Events";
@ -145,6 +143,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
private static final String JOB_STATUS_PUBLISHING_THREAD_NAME = "AIM-job-status-event-publisher-%d"; private static final String JOB_STATUS_PUBLISHING_THREAD_NAME = "AIM-job-status-event-publisher-%d";
private static final long MAX_MISSED_JOB_STATUS_UPDATES = 10; private static final long MAX_MISSED_JOB_STATUS_UPDATES = 10;
private static final int DEFAULT_PRIORITY = 0; private static final int DEFAULT_PRIORITY = 0;
private static String CASE_MANIFESTS_LIST_FILE_NAME = "auto-ingest-job-manifests.txt";
private static final Logger sysLogger = AutoIngestSystemLogger.getLogger(); private static final Logger sysLogger = AutoIngestSystemLogger.getLogger();
private static AutoIngestManager instance; private static AutoIngestManager instance;
private final AutopsyEventPublisher eventPublisher; private final AutopsyEventPublisher eventPublisher;
@ -157,8 +156,6 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
private final ConcurrentHashMap<String, AutoIngestJob> hostNamesToRunningJobs; private final ConcurrentHashMap<String, AutoIngestJob> hostNamesToRunningJobs;
private final Object jobsLock; private final Object jobsLock;
@GuardedBy("jobsLock") @GuardedBy("jobsLock")
private final Map<String, Set<Path>> casesToManifests;
@GuardedBy("jobsLock")
private List<AutoIngestJob> pendingJobs; private List<AutoIngestJob> pendingJobs;
@GuardedBy("jobsLock") @GuardedBy("jobsLock")
private AutoIngestJob currentJob; private AutoIngestJob currentJob;
@ -174,6 +171,10 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
private volatile AutoIngestNodeStateEvent lastPublishedStateEvent; private volatile AutoIngestNodeStateEvent lastPublishedStateEvent;
static String getCaseManifestsListFileName() {
return CASE_MANIFESTS_LIST_FILE_NAME;
}
/** /**
* Gets a singleton auto ingest manager responsible for processing auto * Gets a singleton auto ingest manager responsible for processing auto
* ingest jobs defined by manifest files that can be added to any level of a * ingest jobs defined by manifest files that can be added to any level of a
@ -205,7 +206,6 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
hostNamesToRunningJobs = new ConcurrentHashMap<>(); hostNamesToRunningJobs = new ConcurrentHashMap<>();
hostNamesToLastMsgTime = new ConcurrentHashMap<>(); hostNamesToLastMsgTime = new ConcurrentHashMap<>();
jobsLock = new Object(); jobsLock = new Object();
casesToManifests = new HashMap<>();
pendingJobs = new ArrayList<>(); pendingJobs = new ArrayList<>();
completedJobs = new ArrayList<>(); completedJobs = new ArrayList<>();
try { try {
@ -694,7 +694,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
int oldPriority = job.getPriority(); int oldPriority = job.getPriority();
job.setPriority(DEFAULT_PRIORITY); job.setPriority(DEFAULT_PRIORITY);
try { try {
this.updateCoordinationServiceManifestNode(job); this.updateAutoIngestJobData(job);
} catch (CoordinationServiceException | InterruptedException ex) { } catch (CoordinationServiceException | InterruptedException ex) {
job.setPriority(oldPriority); job.setPriority(oldPriority);
throw new AutoIngestManagerException("Error updating case priority", ex); throw new AutoIngestManagerException("Error updating case priority", ex);
@ -744,7 +744,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
int oldPriority = job.getPriority(); int oldPriority = job.getPriority();
job.setPriority(maxPriority); job.setPriority(maxPriority);
try { try {
this.updateCoordinationServiceManifestNode(job); this.updateAutoIngestJobData(job);
} catch (CoordinationServiceException | InterruptedException ex) { } catch (CoordinationServiceException | InterruptedException ex) {
job.setPriority(oldPriority); job.setPriority(oldPriority);
throw new AutoIngestManagerException("Error updating case priority", ex); throw new AutoIngestManagerException("Error updating case priority", ex);
@ -796,7 +796,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
int oldPriority = jobToDeprioritize.getPriority(); int oldPriority = jobToDeprioritize.getPriority();
jobToDeprioritize.setPriority(DEFAULT_PRIORITY); jobToDeprioritize.setPriority(DEFAULT_PRIORITY);
try { try {
this.updateCoordinationServiceManifestNode(jobToDeprioritize); this.updateAutoIngestJobData(jobToDeprioritize);
} catch (CoordinationServiceException | InterruptedException ex) { } catch (CoordinationServiceException | InterruptedException ex) {
jobToDeprioritize.setPriority(oldPriority); jobToDeprioritize.setPriority(oldPriority);
throw new AutoIngestManagerException("Error updating job priority", ex); throw new AutoIngestManagerException("Error updating job priority", ex);
@ -854,7 +854,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
int oldPriority = jobToPrioritize.getPriority(); int oldPriority = jobToPrioritize.getPriority();
jobToPrioritize.setPriority(maxPriority); jobToPrioritize.setPriority(maxPriority);
try { try {
this.updateCoordinationServiceManifestNode(jobToPrioritize); this.updateAutoIngestJobData(jobToPrioritize);
} catch (CoordinationServiceException | InterruptedException ex) { } catch (CoordinationServiceException | InterruptedException ex) {
jobToPrioritize.setPriority(oldPriority); jobToPrioritize.setPriority(oldPriority);
throw new AutoIngestManagerException("Error updating job priority", ex); throw new AutoIngestManagerException("Error updating job priority", ex);
@ -909,7 +909,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
completedJob.setCompletedDate(new Date(0)); completedJob.setCompletedDate(new Date(0));
completedJob.setProcessingStatus(PENDING); completedJob.setProcessingStatus(PENDING);
completedJob.setProcessingStage(AutoIngestJob.Stage.PENDING, Date.from(Instant.now())); completedJob.setProcessingStage(AutoIngestJob.Stage.PENDING, Date.from(Instant.now()));
updateCoordinationServiceManifestNode(completedJob); updateAutoIngestJobData(completedJob);
pendingJobs.add(completedJob); pendingJobs.add(completedJob);
} catch (CoordinationServiceException ex) { } catch (CoordinationServiceException ex) {
sysLogger.log(Level.SEVERE, String.format("Coordination service error while reprocessing %s", manifestPath), ex); sysLogger.log(Level.SEVERE, String.format("Coordination service error while reprocessing %s", manifestPath), ex);
@ -996,15 +996,12 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
} }
/** /**
* Sets the coordination service manifest node. * Writes the node data for an auto ingest job to the job's manifest file
* * lock coordination service node.
* Note that a new auto ingest job node data object will be created from the
* job passed in. Thus, if the data version of the node has changed, the
* node will be "upgraded" as well as updated.
* *
* @param job The auto ingest job. * @param job The auto ingest job.
*/ */
void updateCoordinationServiceManifestNode(AutoIngestJob job) throws CoordinationServiceException, InterruptedException { void updateAutoIngestJobData(AutoIngestJob job) throws CoordinationServiceException, InterruptedException {
AutoIngestJobNodeData nodeData = new AutoIngestJobNodeData(job); AutoIngestJobNodeData nodeData = new AutoIngestJobNodeData(job);
String manifestNodePath = job.getManifest().getFilePath().toString(); String manifestNodePath = job.getManifest().getFilePath().toString();
byte[] rawData = nodeData.toArray(); byte[] rawData = nodeData.toArray();
@ -1016,14 +1013,21 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
* *
* @param caseDirectoryPath The case directory path. * @param caseDirectoryPath The case directory path.
* *
* @throws CoordinationService.CoordinationServiceException * @throws CoordinationServiceException If there was an error getting the
* @throws InterruptedException * node data from the cooordination
* @throws IOException * service.
* @throws IOException If the node data was missing or
* there was an error interpreting it.
* @throws InterruptedException If the thread running the input
* directory scan task is interrupted
* while blocked, i.e., if auto ingest
* is shutting down.
*/ */
private void setCaseNodeDataErrorsOccurred(Path caseDirectoryPath) throws CoordinationServiceException, InterruptedException, IOException { private void setCaseNodeDataErrorsOccurred(Path caseDirectoryPath) throws IOException, CoordinationServiceException, InterruptedException {
CaseNodeData caseNodeData = new CaseNodeData(coordinationService.getNodeData(CoordinationService.CategoryNode.CASES, caseDirectoryPath.toString())); byte[] rawData = coordinationService.getNodeData(CoordinationService.CategoryNode.CASES, caseDirectoryPath.toString());
CaseNodeData caseNodeData = new CaseNodeData(rawData);
caseNodeData.setErrorsOccurred(true); caseNodeData.setErrorsOccurred(true);
byte[] rawData = caseNodeData.toArray(); rawData = caseNodeData.toArray();
coordinationService.setNodeData(CoordinationService.CategoryNode.CASES, caseDirectoryPath.toString(), rawData); coordinationService.setNodeData(CoordinationService.CategoryNode.CASES, caseDirectoryPath.toString(), rawData);
} }
@ -1088,6 +1092,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
private final List<AutoIngestJob> newPendingJobsList = new ArrayList<>(); private final List<AutoIngestJob> newPendingJobsList = new ArrayList<>();
private final List<AutoIngestJob> newCompletedJobsList = new ArrayList<>(); private final List<AutoIngestJob> newCompletedJobsList = new ArrayList<>();
private Lock currentDirLock;
/** /**
* Searches the input directories for manifest files. The search results * Searches the input directories for manifest files. The search results
@ -1109,9 +1114,9 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
} catch (Exception ex) { } catch (Exception ex) {
/* /*
* NOTE: Need to catch all exceptions here. Otherwise * NOTE: Need to catch all unhandled exceptions here.
* uncaught exceptions will propagate up to the calling * Otherwise uncaught exceptions will propagate up to the
* thread and may stop it from running. * calling thread and may stop it from running.
*/ */
sysLogger.log(Level.SEVERE, String.format("Error scanning the input directory %s", rootInputDirectory), ex); sysLogger.log(Level.SEVERE, String.format("Error scanning the input directory %s", rootInputDirectory), ex);
} }
@ -1145,20 +1150,15 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
} }
/** /**
* Invoked for a file in a directory. If the file is a manifest file, * Creates a pending or completed auto ingest job if the file visited is
* creates a pending pending or completed auto ingest job for the * a manifest file, based on the data stored in the coordination service
* manifest, based on the data stored in the coordination service node * node for the manifest.
* for the manifest.
* <p>
* Note that the mapping of case names to manifest paths that is used
* for case deletion is updated as well.
* *
* @param filePath The path of the file. * @param filePath The path of the file.
* @param attrs The file system attributes of the file. * @param attrs The file system attributes of the file.
* *
* @return TERMINATE if auto ingest is shutting down, CONTINUE if it has * @return TERMINATE if auto ingest is shutting down, CONTINUE if it has
* not. * not.
*
*/ */
@Override @Override
public FileVisitResult visitFile(Path filePath, BasicFileAttributes attrs) { public FileVisitResult visitFile(Path filePath, BasicFileAttributes attrs) {
@ -1167,6 +1167,11 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
} }
try { try {
/*
* Determine whether or not the file is an auto ingest job
* manifest file. If it is, then parse it. Otherwise, move on to
* the next file in the directory.
*/
Manifest manifest = null; Manifest manifest = null;
for (ManifestFileParser parser : Lookup.getDefault().lookupAll(ManifestFileParser.class)) { for (ManifestFileParser parser : Lookup.getDefault().lookupAll(ManifestFileParser.class)) {
if (parser.fileIsManifest(filePath)) { if (parser.fileIsManifest(filePath)) {
@ -1186,76 +1191,83 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
return TERMINATE; return TERMINATE;
} }
if (null != manifest) { if (manifest == null) {
/* return CONTINUE;
* Update the mapping of case names to manifest paths that }
* is used for case deletion.
*/
String caseName = manifest.getCaseName();
Path manifestPath = manifest.getFilePath();
if (casesToManifests.containsKey(caseName)) {
Set<Path> manifestPaths = casesToManifests.get(caseName);
manifestPaths.add(manifestPath);
} else {
Set<Path> manifestPaths = new HashSet<>();
manifestPaths.add(manifestPath);
casesToManifests.put(caseName, manifestPaths);
}
/* /*
* Add a job to the pending jobs queue, the completed jobs * If a manifest file has been found, get a manifest file lock,
* list, or do crashed job recovery, as required. * analyze the job state, and put a job into the appropriate job
*/ * list. There is a short wait here in case the input directory
try { * scanner file visitor of another auto ingest node (AIN) has
byte[] rawData = coordinationService.getNodeData(CoordinationService.CategoryNode.MANIFESTS, manifestPath.toString()); * the lock. If the lock ultmiately can't be obtained, the wait
* was not long enough, or another auto ingest node (AIN) is
* holding the lock because it is executing the job, or a case
* deletion task has aquired the lock. In all of these cases the
* manifest can be skipped for this scan.
*/
try (Lock manifestLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.MANIFESTS, manifest.getFilePath().toString(), INPUT_SCAN_LOCKING_TIMEOUT_MINS, TimeUnit.MINUTES)) {
if (null != manifestLock) {
byte[] rawData = coordinationService.getNodeData(CoordinationService.CategoryNode.MANIFESTS, manifest.getFilePath().toString());
if (null != rawData && rawData.length > 0) { if (null != rawData && rawData.length > 0) {
try { AutoIngestJobNodeData nodeData = new AutoIngestJobNodeData(rawData);
AutoIngestJobNodeData nodeData = new AutoIngestJobNodeData(rawData); AutoIngestJob.ProcessingStatus processingStatus = nodeData.getProcessingStatus();
AutoIngestJob.ProcessingStatus processingStatus = nodeData.getProcessingStatus(); switch (processingStatus) {
switch (processingStatus) { case PENDING:
case PENDING: addPendingJob(manifest, nodeData);
addPendingJob(manifest, nodeData); break;
break; case PROCESSING:
case PROCESSING: /*
doRecoveryIfCrashed(manifest, nodeData); * If an exclusive manifest file lock was
break; * obtained for an auto ingest job in the
case COMPLETED: * processing state, the auto ingest node
addCompletedJob(manifest, nodeData); * (AIN) executing the job crashed and the
break; * lock was released when the coordination
case DELETED: // No longer used, retained for legacy jobs only. * service detected that the AIN was no
/* * longer alive.
* Ignore jobs marked as "deleted." */
*/ doCrashRecovery(manifest, nodeData);
break; break;
default: case COMPLETED:
sysLogger.log(Level.SEVERE, "Unknown ManifestNodeData.ProcessingStatus"); addCompletedJob(manifest, nodeData);
break; break;
} case DELETED:
} catch (AutoIngestJobNodeData.InvalidDataException | AutoIngestJobException ex) { /*
sysLogger.log(Level.SEVERE, String.format("Invalid auto ingest job node data for %s", manifestPath), ex); * Ignore jobs marked as deleted. Note that
* this state is no longer used and is
* retained for legacy jobs only.
*/
break;
default:
sysLogger.log(Level.SEVERE, "Unknown ManifestNodeData.ProcessingStatus");
break;
} }
} else { } else {
try { try {
addNewPendingJob(manifest); addNewPendingJob(manifest);
} catch (AutoIngestJobException ex) { } catch (AutoIngestJobException ex) {
sysLogger.log(Level.SEVERE, String.format("Invalid manifest data for %s", manifestPath), ex); sysLogger.log(Level.SEVERE, String.format("Invalid manifest data for %s", manifest.getFilePath()), ex);
} }
} }
} catch (CoordinationServiceException ex) {
sysLogger.log(Level.SEVERE, String.format("Error transmitting node data for %s", manifestPath), ex);
return CONTINUE;
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
return TERMINATE;
} }
} catch (CoordinationServiceException | AutoIngestJobException | AutoIngestJobNodeData.InvalidDataException ex) {
sysLogger.log(Level.SEVERE, String.format("Error handling manifest at %s", manifest.getFilePath()), ex);
} catch (InterruptedException ex) {
/*
* The thread running the input directory scan task was
* interrupted while blocked, i.e., auto ingest is shutting
* down.
*/
return TERMINATE;
} }
} catch (Exception ex) { } catch (Exception ex) {
// Catch all unhandled and unexpected exceptions. Otherwise one bad file /*
// can stop the entire input folder scanning. Given that the exception is unexpected, * This is an exception firewall so that an unexpected runtime
// I'm hesitant to add logging which requires accessing or de-referencing data. * exception from the handling of a single manifest file does
sysLogger.log(Level.SEVERE, "Unexpected exception in file visitor", ex); * not take out the input directory scanner.
return CONTINUE; */
sysLogger.log(Level.SEVERE, String.format("Unexpected exception handling %s", filePath), ex);
} }
if (!Thread.currentThread().isInterrupted()) { if (!Thread.currentThread().isInterrupted()) {
@ -1266,49 +1278,36 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
} }
/** /**
* Adds an existing job to the pending jobs queue. * Adds an auto ingest job to the pending jobs queue.
* *
* @param manifest The manifest for the job. * @param manifest The manifest for the job.
* @param nodeData The data stored in the coordination service node for * @param nodeData The data stored in the manifest file lock
* the job. * coordination service node for the job.
* *
* @throws InterruptedException if the thread running the input * @throws AutoIngestJobException If there was an error working
* directory scan task is interrupted while * with the node data.
* blocked, i.e., if auto ingest is * @throws CoordinationServiceException If a lock node data version
* shutting down. * update was required and there
* was an error writing the node
* data by the coordination
* service.
* @throws InterruptedException If the thread running the input
* directory scan task is
* interrupted while blocked, i.e.,
* if auto ingest is shutting down.
*/ */
private void addPendingJob(Manifest manifest, AutoIngestJobNodeData nodeData) throws InterruptedException, AutoIngestJobException { private void addPendingJob(Manifest manifest, AutoIngestJobNodeData nodeData) throws AutoIngestJobException, CoordinationServiceException, InterruptedException {
AutoIngestJob job; AutoIngestJob job;
if (nodeData.getVersion() == AutoIngestJobNodeData.getCurrentVersion()) { if (nodeData.getVersion() == AutoIngestJobNodeData.getCurrentVersion()) {
job = new AutoIngestJob(nodeData); job = new AutoIngestJob(nodeData);
} else { } else {
job = new AutoIngestJob(manifest); job = new AutoIngestJob(manifest);
job.setPriority(nodeData.getPriority()); // Retain priority, present in all versions of the node data. job.setPriority(nodeData.getPriority());
Path caseDirectory = PathUtils.findCaseDirectory(rootOutputDirectory, manifest.getCaseName()); Path caseDirectory = PathUtils.findCaseDirectory(rootOutputDirectory, manifest.getCaseName());
if (null != caseDirectory) { if (null != caseDirectory) {
job.setCaseDirectoryPath(caseDirectory); job.setCaseDirectoryPath(caseDirectory);
} }
updateAutoIngestJobData(job);
/*
* Try to upgrade/update the coordination service manifest node
* data for the job.
*
* An exclusive lock is obtained before doing so because another
* host may have already found the job, obtained an exclusive
* lock, and started processing it. However, this locking does
* make it possible that two processing hosts will both try to
* obtain the lock to do the upgrade operation at the same time.
* If this happens, the host that is holding the lock will
* complete the upgrade operation, so there is nothing more for
* this host to do.
*/
try (Lock manifestLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.MANIFESTS, manifest.getFilePath().toString())) {
if (null != manifestLock) {
updateCoordinationServiceManifestNode(job);
}
} catch (CoordinationServiceException ex) {
sysLogger.log(Level.SEVERE, String.format("Error attempting to set node data for %s", manifest.getFilePath()), ex);
}
} }
newPendingJobsList.add(job); newPendingJobsList.add(job);
} }
@ -1318,150 +1317,117 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
* *
* @param manifest The manifest for the job. * @param manifest The manifest for the job.
* *
* @throws InterruptedException if the thread running the input * @throws AutoIngestJobException If there was an error creating
* directory scan task is interrupted while * the node data.
* blocked, i.e., if auto ingest is * @throws CoordinationServiceException If there was an error writing
* shutting down. * the node data by the
* coordination service.
* @throws InterruptedException If the thread running the input
* directory scan task is
* interrupted while blocked, i.e.,
* if auto ingest is shutting down.
*/ */
private void addNewPendingJob(Manifest manifest) throws InterruptedException, AutoIngestJobException { private void addNewPendingJob(Manifest manifest) throws AutoIngestJobException, CoordinationServiceException, InterruptedException {
/* AutoIngestJob job = new AutoIngestJob(manifest);
* Create the coordination service manifest node data for the job. updateAutoIngestJobData(job);
* Note that getting the lock will create the node for the job (with newPendingJobsList.add(job);
* no data) if it does not already exist.
*
* An exclusive lock is obtained before creating the node data
* because another host may have already found the job, obtained an
* exclusive lock, and started processing it. However, this locking
* does make it possible that two hosts will both try to obtain the
* lock to do the create operation at the same time. If this
* happens, the host that is locked out will not add the job to its
* pending queue for this scan of the input directory, but it will
* be picked up on the next scan.
*/
try (Lock manifestLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.MANIFESTS, manifest.getFilePath().toString())) {
if (null != manifestLock) {
AutoIngestJob job = new AutoIngestJob(manifest);
updateCoordinationServiceManifestNode(job);
newPendingJobsList.add(job);
}
} catch (CoordinationServiceException ex) {
sysLogger.log(Level.SEVERE, String.format("Error attempting to set node data for %s", manifest.getFilePath()), ex);
}
} }
/** /**
* Does crash recovery for a manifest, if required. The criterion for * Does recovery for an auto ingest job that was left in the processing
* crash recovery is a manifest with coordination service node data * state by an auot ingest node (AIN) that crashed.
* indicating it is being processed for which an exclusive lock on the
* node can be acquired. If this condition is true, it is probable that
* the node that was processing the job crashed and the processing
* status was not updated.
* *
* @param manifest The manifest for upgrading the node. * @param manifest The manifest for the job.
* @param jobNodeData The auto ingest job node data. * @param nodeData The data stored in the manifest file lock
* coordination service node for the job.
* *
* @throws InterruptedException if the thread running the input * @throws AutoIngestJobException If there was an error working
* directory scan task is interrupted * with the node data.
* while blocked, i.e., if auto ingest is * @throws CoordinationServiceException If there was an error writing
* shutting down. * updated node data by the
* @throws AutoIngestJobException if there is an issue creating a new * coordination service.
* AutoIngestJob object. * @throws InterruptedException If the thread running the input
* directory scan task is
* interrupted while blocked, i.e.,
* if auto ingest is shutting down.
*/ */
private void doRecoveryIfCrashed(Manifest manifest, AutoIngestJobNodeData jobNodeData) throws InterruptedException, AutoIngestJobException { private void doCrashRecovery(Manifest manifest, AutoIngestJobNodeData jobNodeData) throws AutoIngestJobException, CoordinationServiceException, InterruptedException {
/*
* Try to get an exclusive lock on the coordination service node for
* the job. If the lock cannot be obtained, another host in the auto
* ingest cluster is already doing the recovery, so there is nothing
* to do.
*/
String manifestPath = manifest.getFilePath().toString(); String manifestPath = manifest.getFilePath().toString();
try (Lock manifestLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.MANIFESTS, manifestPath)) { sysLogger.log(Level.SEVERE, "Attempting crash recovery for {0}", manifestPath);
if (null != manifestLock) { AutoIngestJob job = new AutoIngestJob(jobNodeData);
sysLogger.log(Level.SEVERE, "Attempting crash recovery for {0}", manifestPath);
Path caseDirectoryPath = PathUtils.findCaseDirectory(rootOutputDirectory, manifest.getCaseName());
/* /*
* Create the recovery job. * Try to set the error flags that indicate incomplete or messy data
*/ * in displays for the job and the case. Note that if the job
AutoIngestJob job = new AutoIngestJob(jobNodeData); * crashed before a case directory was created, the job was a no-op,
int numberOfCrashes = job.getNumberOfCrashes(); * so the data quality flags do not need to be set.
if (numberOfCrashes <= AutoIngestUserPreferences.getMaxNumTimesToProcessImage()) { */
++numberOfCrashes; Path caseDirectoryPath = PathUtils.findCaseDirectory(rootOutputDirectory, manifest.getCaseName());
job.setNumberOfCrashes(numberOfCrashes); if (null != caseDirectoryPath) {
if (numberOfCrashes <= AutoIngestUserPreferences.getMaxNumTimesToProcessImage()) { job.setCaseDirectoryPath(caseDirectoryPath);
job.setCompletedDate(new Date(0)); job.setErrorsOccurred(true);
} else { try {
job.setCompletedDate(Date.from(Instant.now())); setCaseNodeDataErrorsOccurred(caseDirectoryPath);
} } catch (IOException ex) {
} sysLogger.log(Level.WARNING, String.format("Error attempting to set error flag in case node data for %s", caseDirectoryPath), ex);
}
} else {
job.setErrorsOccurred(false);
}
if (null != caseDirectoryPath) { /*
job.setCaseDirectoryPath(caseDirectoryPath); * Update the crash count for the job, determine whether or not to
job.setErrorsOccurred(true); * retry processing its data source, and deal with the job
try { * accordingly.
setCaseNodeDataErrorsOccurred(caseDirectoryPath); */
} catch (IOException ex) { int numberOfCrashes = job.getNumberOfCrashes();
sysLogger.log(Level.SEVERE, String.format("Error attempting to set error flag in case node data for %s", caseDirectoryPath), ex); ++numberOfCrashes;
} job.setNumberOfCrashes(numberOfCrashes);
} else { if (numberOfCrashes < AutoIngestUserPreferences.getMaxNumTimesToProcessImage()) {
job.setErrorsOccurred(false); job.setProcessingStatus(AutoIngestJob.ProcessingStatus.PENDING);
} job.setCompletedDate(new Date(0));
if (null != caseDirectoryPath) {
if (numberOfCrashes <= AutoIngestUserPreferences.getMaxNumTimesToProcessImage()) {
job.setProcessingStatus(AutoIngestJob.ProcessingStatus.PENDING);
if (null != caseDirectoryPath) {
try {
new AutoIngestJobLogger(manifest.getFilePath(), manifest.getDataSourceFileName(), caseDirectoryPath).logCrashRecoveryWithRetry();
} catch (AutoIngestJobLoggerException ex) {
sysLogger.log(Level.SEVERE, String.format("Error creating case auto ingest log entry for crashed job for %s", manifestPath), ex);
}
}
} else {
job.setProcessingStatus(AutoIngestJob.ProcessingStatus.COMPLETED);
if (null != caseDirectoryPath) {
try {
new AutoIngestJobLogger(manifest.getFilePath(), manifest.getDataSourceFileName(), caseDirectoryPath).logCrashRecoveryNoRetry();
} catch (AutoIngestJobLoggerException ex) {
sysLogger.log(Level.SEVERE, String.format("Error creating case auto ingest log entry for crashed job for %s", manifestPath), ex);
}
}
}
/*
* Update the coordination service node for the job. If this
* fails, leave the recovery to another host.
*/
try { try {
updateCoordinationServiceManifestNode(job); new AutoIngestJobLogger(manifest.getFilePath(), manifest.getDataSourceFileName(), caseDirectoryPath).logCrashRecoveryWithRetry();
} catch (CoordinationServiceException ex) { } catch (AutoIngestJobLoggerException ex) {
sysLogger.log(Level.SEVERE, String.format("Error attempting to set node data for %s", manifestPath), ex); sysLogger.log(Level.SEVERE, String.format("Error writing case auto ingest log entry for crashed job for %s", manifestPath), ex);
return;
}
jobNodeData = new AutoIngestJobNodeData(job);
if (numberOfCrashes <= AutoIngestUserPreferences.getMaxNumTimesToProcessImage()) {
newPendingJobsList.add(job);
} else {
newCompletedJobsList.add(new AutoIngestJob(jobNodeData));
} }
} }
} catch (CoordinationServiceException ex) { updateAutoIngestJobData(job);
sysLogger.log(Level.SEVERE, String.format("Error attempting to get exclusive lock for %s", manifestPath), ex); newPendingJobsList.add(job);
} else {
job.setProcessingStatus(AutoIngestJob.ProcessingStatus.COMPLETED);
job.setCompletedDate(Date.from(Instant.now()));
if (null != caseDirectoryPath) {
try {
new AutoIngestJobLogger(manifest.getFilePath(), manifest.getDataSourceFileName(), caseDirectoryPath).logCrashRecoveryNoRetry();
} catch (AutoIngestJobLoggerException ex) {
sysLogger.log(Level.SEVERE, String.format("Error writing case auto ingest log entry for crashed job for %s", manifestPath), ex);
}
}
updateAutoIngestJobData(job);
newCompletedJobsList.add(new AutoIngestJob(jobNodeData));
} }
} }
/** /**
* Adds a job to process a manifest to the completed jobs list. * Adds a job to process a manifest to the completed jobs list.
* *
* @param nodeData The data stored in the coordination service node for * @param manifest The manifest for the job.
* the manifest. * @param nodeData The data stored in the manifest file lock
* @param manifest The manifest for upgrading the node. * coordination service node for the job.
* *
* @throws CoordinationServiceException * @throws AutoIngestJobException If there was an error working
* @throws InterruptedException * with the node data.
* @throws CoordinationServiceException If there was an error writing
* updated node data by the
* coordination service.
* @throws InterruptedException If the thread running the input
* directory scan task is
* interrupted while blocked, i.e.,
* if auto ingest is shutting down.
*/ */
private void addCompletedJob(Manifest manifest, AutoIngestJobNodeData nodeData) throws CoordinationServiceException, InterruptedException, AutoIngestJobException { private void addCompletedJob(Manifest manifest, AutoIngestJobNodeData nodeData) throws AutoIngestJobException, CoordinationServiceException, InterruptedException {
Path caseDirectoryPath = nodeData.getCaseDirectoryPath(); Path caseDirectoryPath = nodeData.getCaseDirectoryPath();
if (!caseDirectoryPath.toFile().exists()) { if (!caseDirectoryPath.toFile().exists()) {
sysLogger.log(Level.WARNING, String.format("Job completed for %s, but cannot find case directory %s, ignoring job", nodeData.getManifestFilePath(), caseDirectoryPath.toString())); sysLogger.log(Level.WARNING, String.format("Job completed for %s, but cannot find case directory %s, ignoring job", nodeData.getManifestFilePath(), caseDirectoryPath.toString()));
@ -1493,21 +1459,9 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
job.setProcessingStage(AutoIngestJob.Stage.COMPLETED, nodeData.getCompletedDate()); job.setProcessingStage(AutoIngestJob.Stage.COMPLETED, nodeData.getCompletedDate());
job.setProcessingStatus(AutoIngestJob.ProcessingStatus.COMPLETED); job.setProcessingStatus(AutoIngestJob.ProcessingStatus.COMPLETED);
/* updateAutoIngestJobData(job);
* Try to upgrade/update the coordination service manifest node
* data for the job. It is possible that two hosts will both try
* to obtain the lock to do the upgrade operation at the same
* time. If this happens, the host that is holding the lock will
* complete the upgrade operation.
*/
try (Lock manifestLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.MANIFESTS, manifest.getFilePath().toString())) {
if (null != manifestLock) {
updateCoordinationServiceManifestNode(job);
}
} catch (CoordinationServiceException ex) {
sysLogger.log(Level.SEVERE, String.format("Error attempting to set node data for %s", manifest.getFilePath()), ex);
}
} }
newCompletedJobsList.add(job); newCompletedJobsList.add(job);
} }
@ -1536,17 +1490,17 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
} }
/** /**
* Invoked for an input directory after entries in the directory are * Invoked for an input directory after the files in the directory are
* visited. Checks if the task thread has been interrupted because auto * visited. Checks if the task thread has been interrupted because auto
* ingest is shutting down and terminates the scan if that is the case. * ingest is shutting down and terminates the scan if that is the case.
* *
* @param dirPath The directory about to be visited. * @param dirPath The directory about to be visited.
* @param unused Unused. * @param unused Unused.
* *
* @return TERMINATE if the task thread has been interrupted, CONTINUE * @return FileVisitResult.TERMINATE if the task thread has been
* if it has not. * interrupted, FileVisitResult.CONTINUE if it has not.
* *
* @throws IOException if an I/O error occurs, but this implementation * @throws IOException If an I/O error occurs, but this implementation
* does not throw. * does not throw.
*/ */
@Override @Override
@ -2005,11 +1959,12 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
try { try {
AutoIngestJobNodeData nodeData = new AutoIngestJobNodeData(coordinationService.getNodeData(CoordinationService.CategoryNode.MANIFESTS, manifestPath.toString())); AutoIngestJobNodeData nodeData = new AutoIngestJobNodeData(coordinationService.getNodeData(CoordinationService.CategoryNode.MANIFESTS, manifestPath.toString()));
if (!nodeData.getProcessingStatus().equals(PENDING)) { if (!nodeData.getProcessingStatus().equals(PENDING)) {
/* iterator.remove();
* Due to a timing issue or a missed event, a continue;
* non-pending job has ended up on the pending }
* queue. Skip the job and remove it from the queue.
*/ File manifestFile = nodeData.getManifestFilePath().toFile();
if (!manifestFile.exists()) {
iterator.remove(); iterator.remove();
continue; continue;
} }
@ -2027,11 +1982,13 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
continue; continue;
} }
} }
iterator.remove(); iterator.remove();
currentJob = job; currentJob = job;
break; break;
} catch (AutoIngestJobNodeData.InvalidDataException ex) { } catch (AutoIngestJobNodeData.InvalidDataException ex) {
sysLogger.log(Level.WARNING, String.format("Unable to use node data for %s", manifestPath), ex); // JCTODO: Is this right? sysLogger.log(Level.WARNING, String.format("Unable to use node data for %s", manifestPath), ex);
} }
} }
} }
@ -2102,7 +2059,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
currentJob.setProcessingStatus(AutoIngestJob.ProcessingStatus.PROCESSING); currentJob.setProcessingStatus(AutoIngestJob.ProcessingStatus.PROCESSING);
currentJob.setProcessingStage(AutoIngestJob.Stage.STARTING, Date.from(Instant.now())); currentJob.setProcessingStage(AutoIngestJob.Stage.STARTING, Date.from(Instant.now()));
currentJob.setProcessingHostName(AutoIngestManager.LOCAL_HOST_NAME); currentJob.setProcessingHostName(AutoIngestManager.LOCAL_HOST_NAME);
updateCoordinationServiceManifestNode(currentJob); updateAutoIngestJobData(currentJob);
setChanged(); setChanged();
notifyObservers(Event.JOB_STARTED); notifyObservers(Event.JOB_STARTED);
eventPublisher.publishRemotely(new AutoIngestJobStartedEvent(currentJob)); eventPublisher.publishRemotely(new AutoIngestJobStartedEvent(currentJob));
@ -2126,7 +2083,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
currentJob.setProcessingStatus(AutoIngestJob.ProcessingStatus.PENDING); currentJob.setProcessingStatus(AutoIngestJob.ProcessingStatus.PENDING);
} }
currentJob.setProcessingHostName(""); currentJob.setProcessingHostName("");
updateCoordinationServiceManifestNode(currentJob); updateAutoIngestJobData(currentJob);
boolean retry = (!currentJob.isCanceled() && !currentJob.isCompleted()); boolean retry = (!currentJob.isCanceled() && !currentJob.isCompleted());
sysLogger.log(Level.INFO, "Completed processing of {0}, retry = {1}", new Object[]{manifestPath, retry}); sysLogger.log(Level.INFO, "Completed processing of {0}, retry = {1}", new Object[]{manifestPath, retry});
@ -2322,13 +2279,16 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
Thread.sleep(AutoIngestUserPreferences.getSecondsToSleepBetweenCases() * 1000); Thread.sleep(AutoIngestUserPreferences.getSecondsToSleepBetweenCases() * 1000);
} }
currentJob.setCaseDirectoryPath(caseDirectoryPath); currentJob.setCaseDirectoryPath(caseDirectoryPath);
updateCoordinationServiceManifestNode(currentJob); // update case directory path updateAutoIngestJobData(currentJob);
recordManifest(caseDirectoryPath, manifest.getFilePath());
Case caseForJob = Case.getCurrentCase(); Case caseForJob = Case.getCurrentCase();
sysLogger.log(Level.INFO, "Opened case {0} for {1}", new Object[]{caseForJob.getName(), manifest.getFilePath()}); sysLogger.log(Level.INFO, "Opened case {0} for {1}", new Object[]{caseForJob.getName(), manifest.getFilePath()});
return caseForJob; return caseForJob;
} catch (KeywordSearchModuleException ex) { } catch (KeywordSearchModuleException ex) {
throw new CaseManagementException(String.format("Error creating solr settings file for case %s for %s", caseName, manifest.getFilePath()), ex); throw new CaseManagementException(String.format("Error creating solr settings file for case %s for %s", caseName, manifest.getFilePath()), ex);
} catch (IOException ex) {
throw new CaseManagementException(String.format("Error recording manifest file path for case %s for %s", caseName, manifest.getFilePath()), ex);
} catch (CaseActionException ex) { } catch (CaseActionException ex) {
throw new CaseManagementException(String.format("Error creating or opening case %s for %s", caseName, manifest.getFilePath()), ex); throw new CaseManagementException(String.format("Error creating or opening case %s for %s", caseName, manifest.getFilePath()), ex);
} }
@ -2338,6 +2298,22 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
} }
} }
/**
* Writes the path of the manifest file for the current job to a list of
* manifest file paths for the case in file in the case directory.
*
* @param caseDirectoryPath The case directory path.
*
* @throws IOException If the file cannot be created or opened and
* updated.
*/
private void recordManifest(Path caseDirectoryPath, Path manifestFilePath) throws IOException {
final Path manifestsListFilePath = Paths.get(caseDirectoryPath.toString(), AutoIngestManager.getCaseManifestsListFileName());
try (FileWriter fileWriter = new FileWriter(manifestsListFilePath.toString(), true)) {
fileWriter.write(manifestFilePath.toString() + "\n");
}
}
/** /**
* Runs the ingest process for the current job. * Runs the ingest process for the current job.
* *
@ -2978,7 +2954,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
currentJob.setModuleRuntimesSnapshot(IngestManager.getInstance().getModuleRunTimes()); currentJob.setModuleRuntimesSnapshot(IngestManager.getInstance().getModuleRunTimes());
setChanged(); setChanged();
notifyObservers(Event.JOB_STATUS_UPDATED); notifyObservers(Event.JOB_STATUS_UPDATED);
updateCoordinationServiceManifestNode(currentJob); updateAutoIngestJobData(currentJob);
eventPublisher.publishRemotely(new AutoIngestJobStatusEvent(currentJob)); eventPublisher.publishRemotely(new AutoIngestJobStatusEvent(currentJob));
} }
} }

View File

@ -59,6 +59,11 @@ final class CasesDashboardCustomizer implements MultiUserCaseBrowserCustomizer {
properties.add(Column.CREATE_DATE); properties.add(Column.CREATE_DATE);
properties.add(Column.LAST_ACCESS_DATE); properties.add(Column.LAST_ACCESS_DATE);
properties.add(Column.DIRECTORY); properties.add(Column.DIRECTORY);
properties.add(Column.MANIFEST_FILE_ZNODES_DELETE_STATUS);
properties.add(Column.DATA_SOURCES_DELETE_STATUS);
properties.add(Column.TEXT_INDEX_DELETE_STATUS);
properties.add(Column.CASE_DB_DELETE_STATUS);
properties.add(Column.CASE_DIR_DELETE_STATUS);
return properties; return properties;
} }

View File

@ -62,8 +62,6 @@ public final class CasesDashboardTopComponent extends TopComponent implements Ex
* for multi-user cases. The top component is docked into the "dashboard * for multi-user cases. The top component is docked into the "dashboard
* mode" defined by the auto ingest jobs top component. * mode" defined by the auto ingest jobs top component.
*/ */
// RJCTODO: Consider moving all of the dashboard code into its own
// autoingest.dashboard package.
public static void openTopComponent() { public static void openTopComponent() {
CasesDashboardTopComponent topComponent = (CasesDashboardTopComponent) WindowManager.getDefault().findTopComponent("CasesDashboardTopComponent"); // NON-NLS CasesDashboardTopComponent topComponent = (CasesDashboardTopComponent) WindowManager.getDefault().findTopComponent("CasesDashboardTopComponent"); // NON-NLS
if (topComponent == null) { if (topComponent == null) {

View File

@ -21,6 +21,7 @@ package org.sleuthkit.autopsy.experimental.autoingest;
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
import org.openide.util.NbBundle; import org.openide.util.NbBundle;
import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData; import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData;
import org.sleuthkit.autopsy.experimental.autoingest.DeleteCaseTask.DeleteOptions;
import org.sleuthkit.autopsy.progress.ProgressIndicator; import org.sleuthkit.autopsy.progress.ProgressIndicator;
/** /**
@ -53,7 +54,7 @@ final class DeleteCaseInputAction extends DeleteCaseAction {
@Override @Override
DeleteCaseTask getTask(CaseNodeData caseNodeData, ProgressIndicator progress) { DeleteCaseTask getTask(CaseNodeData caseNodeData, ProgressIndicator progress) {
return new DeleteCaseInputTask(caseNodeData, progress); return new DeleteCaseTask(caseNodeData, DeleteOptions.DELETE_INPUT, progress);
} }
@Override @Override

View File

@ -20,6 +20,7 @@ package org.sleuthkit.autopsy.experimental.autoingest;
import org.openide.util.NbBundle.Messages; import org.openide.util.NbBundle.Messages;
import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData; import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData;
import org.sleuthkit.autopsy.experimental.autoingest.DeleteCaseTask.DeleteOptions;
import org.sleuthkit.autopsy.progress.ProgressIndicator; import org.sleuthkit.autopsy.progress.ProgressIndicator;
/** /**
@ -47,7 +48,7 @@ final class DeleteCaseInputAndOutputAction extends DeleteCaseAction {
@Override @Override
DeleteCaseTask getTask(CaseNodeData caseNodeData, ProgressIndicator progress) { DeleteCaseTask getTask(CaseNodeData caseNodeData, ProgressIndicator progress) {
return new DeleteCaseInputAndOutputTask(caseNodeData, progress); return new DeleteCaseTask(caseNodeData, DeleteOptions.DELETE_OUTPUT, progress);
} }
@Override @Override

View File

@ -20,6 +20,7 @@ package org.sleuthkit.autopsy.experimental.autoingest;
import org.openide.util.NbBundle; import org.openide.util.NbBundle;
import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData; import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData;
import org.sleuthkit.autopsy.experimental.autoingest.DeleteCaseTask.DeleteOptions;
import org.sleuthkit.autopsy.progress.ProgressIndicator; import org.sleuthkit.autopsy.progress.ProgressIndicator;
/** /**
@ -51,7 +52,7 @@ final class DeleteCaseOutputAction extends DeleteCaseAction {
@Override @Override
DeleteCaseTask getTask(CaseNodeData caseNodeData, ProgressIndicator progress) { DeleteCaseTask getTask(CaseNodeData caseNodeData, ProgressIndicator progress) {
return new DeleteCaseOutputTask(caseNodeData, progress); return new DeleteCaseTask(caseNodeData, DeleteOptions.DELETE_OUTPUT, progress);
} }
@Override @Override

View File

@ -23,10 +23,12 @@ import java.io.IOException;
import java.nio.file.Path; import java.nio.file.Path;
import java.nio.file.Paths; import java.nio.file.Paths;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Scanner;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level; import java.util.logging.Level;
import org.openide.util.Exceptions;
import org.openide.util.Lookup;
import org.openide.util.NbBundle; import org.openide.util.NbBundle;
import org.openide.util.NbBundle.Messages; import org.openide.util.NbBundle.Messages;
import org.sleuthkit.autopsy.casemodule.Case; import org.sleuthkit.autopsy.casemodule.Case;
@ -36,73 +38,111 @@ import org.sleuthkit.autopsy.casemodule.multiusercases.CaseCoordinationServiceUt
import org.sleuthkit.autopsy.coordinationservice.CoordinationService; import org.sleuthkit.autopsy.coordinationservice.CoordinationService;
import org.sleuthkit.autopsy.coordinationservice.CoordinationService.CategoryNode; import org.sleuthkit.autopsy.coordinationservice.CoordinationService.CategoryNode;
import org.sleuthkit.autopsy.coordinationservice.CoordinationService.CoordinationServiceException; import org.sleuthkit.autopsy.coordinationservice.CoordinationService.CoordinationServiceException;
import org.sleuthkit.autopsy.coordinationservice.CoordinationService.Lock;
import org.sleuthkit.autopsy.coreutils.FileUtil; import org.sleuthkit.autopsy.coreutils.FileUtil;
import org.sleuthkit.autopsy.progress.ProgressIndicator; import org.sleuthkit.autopsy.progress.ProgressIndicator;
import org.sleuthkit.autopsy.coreutils.Logger; import org.sleuthkit.autopsy.coreutils.Logger;
import org.sleuthkit.autopsy.experimental.autoingest.AutoIngestJobNodeData.InvalidDataException; import org.sleuthkit.autopsy.experimental.autoingest.AutoIngestJobNodeData.InvalidDataException;
/** /**
* A base class for tasks that delete part or all of a given case. * A task that deletes part or all of a given case. Note that all logging done
* by this task is directed to the dedicated auto ingest dashboard log instead
* of to the general application log.
*/ */
abstract class DeleteCaseTask implements Runnable { // RJCTODO:
// 1. Expand case type in case metadata to include auto ingest cases.
// Disable the delete menu item in the main app menu for auto ingest cases,
// and possibly also use this to delete the add data source capability. Could use
// this to limit the display of nodes in the in the auto ingest cases dashboard.
// 2. When an instance of this class finishes, publish an event via event bus
// so that the case browser can refresh.
// 3. Add code to file deletion utilities such that on Wimdows, for paths
// exceeding 255 chars, robocopy is invoked for the deletion. Make the new file
// deletion utility throw exceptions instead of return a boolean result code.
// 4. Make other dashbaord use the dashboard logger.
// 5. Consider moving all of the dashboard code into its own autoingest.dashboard package.
// 6. AutoIngestManager.addCompletedJob node data version updating might be out of date.
// 7. Deal with cancellation during lock releases. Look at using
// https://google.github.io/guava/releases/19.0/api/docs/com/google/common/util/concurrent/Uninterruptibles.html
// getUninterruptibly to do os.
// 8. With the removal of the auto ingest control panel, we can eliminate the
// completed jobs list and the processing list from AutoIngestManager.
final class DeleteCaseTask implements Runnable {
private static final int MANIFEST_FILE_LOCKING_TIMEOUT_MINS = 5;
private static final Logger logger = AutoIngestDashboardLogger.getLogger(); private static final Logger logger = AutoIngestDashboardLogger.getLogger();
private final CaseNodeData caseNodeData; private final CaseNodeData caseNodeData;
private final String caseDisplayName; private final DeleteOptions deleteOption;
private final String caseUniqueName;
private final Path caseDirectoryPath;
private final ProgressIndicator progress; private final ProgressIndicator progress;
private final List<AutoIngestJobNodeData> nodeDataForAutoIngestJobs; private final List<Lock> manifestFileLocks;
private final Map<String, CoordinationService.Lock> manifestFileLocks;
private CoordinationService coordinationService; private CoordinationService coordinationService;
/*
* Options to support implementing differnet case deletion uses cases.
*/
public enum DeleteOptions {
/**
* Delete the auto ingest job manifests and corresponding data sources,
* if any, while leaving the manifest file coordination service nodes
* and the rest of the case intact. The use case is freeing auto ingest
* input directory space while retaining the option to restore the data
* sources, effectively restoring the case.
*/
DELETE_INPUT,
/**
* Delete the auto ingest job coordination service nodes, if any, and
* the output for a case produced via auto ingest, while leaving the
* auto ingest job input directories intact. The use case is auto ingest
* reprocessing of a case with a clean slate without having to restore
* the input directories.
*/
DELETE_OUTPUT,
/**
* Delete everything.
*/
DELETE_ALL
}
/** /**
* Constructs the base class part of a task that deletes part or all of a * Constructs a task that deletes part or all of a given case. Note that all
* given case. * logging is directed to the dedicated auto ingest dashboard log instead of
* to the general application log.
* *
* @param caseNodeData The case directory lock coordination service node * @param caseNodeData The case directory coordination service node data for
* data for the case. * the case.
* @param deleteOption The deletion option for the task.
* @param progress A progress indicator. * @param progress A progress indicator.
*/ */
DeleteCaseTask(CaseNodeData caseNodeData, ProgressIndicator progress) { DeleteCaseTask(CaseNodeData caseNodeData, DeleteOptions deleteOption, ProgressIndicator progress) {
this.caseNodeData = caseNodeData; this.caseNodeData = caseNodeData;
this.deleteOption = deleteOption;
this.progress = progress; this.progress = progress;
/* this.manifestFileLocks = new ArrayList<>();
* Design Decision Note: It was decided to add the following state to
* instances of this class make it easier to access given that the class
* design favors instance methods over static methods.
*/
this.caseDisplayName = caseNodeData.getDisplayName();
this.caseUniqueName = caseNodeData.getName();
this.caseDirectoryPath = caseNodeData.getDirectory();
this.nodeDataForAutoIngestJobs = new ArrayList<>();
this.manifestFileLocks = new HashMap<>();
} }
@Override @Override
@NbBundle.Messages({ @NbBundle.Messages({
"DeleteCaseTask.progress.startMessage=Preparing for deletion..." "DeleteCaseTask.progress.startMessage=Starting deletion..."
}) })
public void run() { public void run() {
try { try {
progress.start(Bundle.DeleteCaseTask_progress_startMessage()); progress.start(Bundle.DeleteCaseTask_progress_startMessage());
logger.log(Level.INFO, String.format("Beginning deletion of %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath)); logger.log(Level.INFO, String.format("Starting attempt to delete %s (%s)", caseNodeData.getDisplayName(), deleteOption));
deleteCase(); deleteCase();
logger.log(Level.SEVERE, String.format("Deletion of %s (%s) in %s completed", caseDisplayName, caseUniqueName, caseDirectoryPath)); logger.log(Level.INFO, String.format("Finished attempt to delete %s (%s)", caseNodeData.getDisplayName(), deleteOption));
} catch (Throwable ex) { } catch (Throwable ex) {
/* /*
* Unexpected runtime exceptions firewall. This task is designed to * This is an unexpected runtime exceptions firewall. It is here
* be able to be run in an executor service thread pool without * because this task is designed to be able to be run in scenarios
* calling get() on the task's Future<Void>, so this ensures that * where there is no call to get() on a Future<Void> associated with
* such errors do get ignored. * the task, so this ensures that any such errors get logged.
*/ */
logger.log(Level.INFO, String.format("Unexpected error deleting %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath), ex); logger.log(Level.SEVERE, String.format("Unexpected error deleting %s", caseNodeData.getDisplayName()), ex);
} finally { } finally {
progress.finish(); progress.finish();
} }
} }
/** /**
@ -110,426 +150,487 @@ abstract class DeleteCaseTask implements Runnable {
*/ */
@NbBundle.Messages({ @NbBundle.Messages({
"DeleteCaseTask.progress.connectingToCoordSvc=Connecting to the coordination service...", "DeleteCaseTask.progress.connectingToCoordSvc=Connecting to the coordination service...",
"DeleteCaseTask.progress.acquiringCaseNameLock=Acquiring an exclusive case name lock...", "DeleteCaseTask.progress.acquiringCaseNameLock=Acquiring exclusive case name lock...",
"DeleteCaseTask.progress.acquiringCaseDirLock=Acquiring an exclusive case directory lock...", "DeleteCaseTask.progress.acquiringCaseDirLock=Acquiring exclusive case directory lock...",
"DeleteCaseTask.progress.gettingJobNodeData=Getting node data for auto ingest jobs...", "DeleteCaseTask.progress.acquiringManifestLocks=Acquiring exclusive manifest file locks...",
"DeleteCaseTask.progress.acquiringManifestLocks=Acquiring exclusive manifest file locks..." "DeleteCaseTask.progress.deletingDirLockNode=Deleting case directory lock coordination service node...",
"DeleteCaseTask.progress.deletingNameLockNode=Deleting case name lock coordination service node..."
}) })
private void deleteCase() { private void deleteCase() {
progress.progress(Bundle.DeleteCaseTask_progress_connectingToCoordSvc()); progress.progress(Bundle.DeleteCaseTask_progress_connectingToCoordSvc());
logger.log(Level.INFO, String.format("Connecting to coordination service for deletion of %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath)); logger.log(Level.INFO, String.format("Connecting to the coordination service for deletion of %s", caseNodeData.getDisplayName()));
try { try {
coordinationService = CoordinationService.getInstance(); coordinationService = CoordinationService.getInstance();
} catch (CoordinationService.CoordinationServiceException ex) { } catch (CoordinationService.CoordinationServiceException ex) {
logger.log(Level.SEVERE, String.format("Failed to connect to the coordination service, cannot delete %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath), ex); logger.log(Level.SEVERE, String.format("Could not delete %s because an error occurred connecting to the coordination service", caseNodeData.getDisplayName()), ex);
return; return;
} }
if (Thread.currentThread().isInterrupted()) { if (Thread.currentThread().isInterrupted()) {
logger.log(Level.INFO, String.format("Deletion of %s (%s) in %s cancelled", caseDisplayName, caseUniqueName, caseDirectoryPath)); logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()));
return; return;
} }
/* /*
* Acquire an exclusive case name lock. This is the lock that auto * Acquire an exclusive case name lock. The case name lock is the lock
* ingest nodes acquire exclusively when creating or opening a case * that auto ingest node (AIN) job processing tasks acquire exclusively
* specified in an auto ingest job manifest file to ensure that only one * when creating or opening a case specified in an auto ingest job
* auto ingest node at a time can search the auto ingest output * manifest file. The reason AINs do this is to ensure that only one of
* directory for an existing case matching the one in the manifest file. * them at a time can search the auto ingest output directory for an
* Acquiring this lock effectively locks auto ingest node job processing * existing case matching the one in the manifest file. If a matching
* tasks out of the case to be deleted. * case is found, it is opened, otherwise the case is created. Acquiring
* this lock effectively disables this AIN job processing task behavior
* while the case is being deleted.
*/ */
progress.progress(Bundle.DeleteCaseTask_progress_acquiringCaseNameLock()); progress.progress(Bundle.DeleteCaseTask_progress_acquiringCaseNameLock());
logger.log(Level.INFO, String.format("Acquiring an exclusive case name lock for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath)); logger.log(Level.INFO, String.format("Acquiring an exclusive case name lock for %s", caseNodeData.getDisplayName()));
String caseNameLockNodeName = CaseCoordinationServiceUtils.getCaseLockName(caseDirectoryPath); String caseNameLockName = CaseCoordinationServiceUtils.getCaseNameLockName(caseNodeData.getDirectory());
try (CoordinationService.Lock nameLock = coordinationService.tryGetExclusiveLock(CategoryNode.CASES, caseNameLockNodeName)) { try (CoordinationService.Lock nameLock = coordinationService.tryGetExclusiveLock(CategoryNode.CASES, caseNameLockName)) {
if (nameLock == null) { if (nameLock == null) {
logger.log(Level.INFO, String.format("Could not delete %s (%s) in %s because a case name lock was held by another host", caseDisplayName, caseUniqueName, caseDirectoryPath)); logger.log(Level.INFO, String.format("Could not delete %s because a case name lock was already held by another host", caseNodeData.getDisplayName()));
return; return;
} }
if (Thread.currentThread().isInterrupted()) { if (Thread.currentThread().isInterrupted()) {
logger.log(Level.INFO, String.format("Deletion of %s (%s) in %s cancelled", caseDisplayName, caseUniqueName, caseDirectoryPath)); logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()));
return; return;
} }
/* /*
* Acquire an exclusive case directory lock. A shared case directory * Acquire an exclusive case directory lock. A shared case directory
* lock is acquired by any node (auto ingest or examiner) when it * lock is acquired by each auto ingest node (AIN) and examiner node
* opens a case and is held by the node for as long as the case is * (EIN) when it opens a case. The shared locks are held by the AINs
* open. Acquiring this lock exclusively ensures that no other node * and EINs for as long as they have the case open. Acquiring this
* currently has the case to be deleted open and prevents another * lock exclusively ensures that no AIN or EIN has the case to be
* node from trying to open the case while it is being deleted. * deleted open and prevents another node from trying to open the
* case while it is being deleted.
*/ */
boolean success = true; // RJCTODO: Instead of having this flag, read the casenodedata instead
progress.progress(Bundle.DeleteCaseTask_progress_acquiringCaseDirLock()); progress.progress(Bundle.DeleteCaseTask_progress_acquiringCaseDirLock());
logger.log(Level.INFO, String.format("Acquiring an exclusive case directory lock for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath)); logger.log(Level.INFO, String.format("Acquiring an exclusive case directory lock for %s", caseNodeData.getDisplayName()));
String caseDirLockNodeName = CaseCoordinationServiceUtils.getCaseDirectoryLockName(caseDirectoryPath); String caseDirLockName = CaseCoordinationServiceUtils.getCaseDirectoryLockName(caseNodeData.getDirectory());
try (CoordinationService.Lock caseDirLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.CASES, caseDirLockNodeName)) { try (CoordinationService.Lock caseDirLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.CASES, caseDirLockName)) {
if (caseDirLock == null) { if (caseDirLock == null) {
logger.log(Level.INFO, String.format("Could not delete %s (%s) in %s because a case directory lock was held by another host", caseDisplayName, caseUniqueName, caseDirectoryPath)); logger.log(Level.INFO, String.format("Could not delete %s because a case directory lock was already held by another host", caseNodeData.getDisplayName()));
return; return;
} }
if (Thread.currentThread().isInterrupted()) { if (Thread.currentThread().isInterrupted()) {
logger.log(Level.INFO, String.format("Deletion of %s (%s) in %s cancelled", caseDisplayName, caseUniqueName, caseDirectoryPath)); logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()));
return; return;
} }
progress.progress(Bundle.DeleteCaseTask_progress_gettingJobNodeData()); /*
logger.log(Level.INFO, String.format("Fetching auto ingest job node data for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath)); * Acquire exclusive locks for the auto ingest job manifest
* files for the case, if any. Manifest file locks are acquired
* by the auto ingest node (AIN) input directory scanning tasks
* when they look for auto ingest jobs to enqueue, and by the
* AIN job processing tasks when they execute a job. Acquiring
* these locks here ensures that the scanning tasks and job
* processing tasks cannot do anything with the auto ingest jobs
* for a case during case deletion.
*/
progress.progress(Bundle.DeleteCaseTask_progress_acquiringManifestLocks());
logger.log(Level.INFO, String.format("Acquiring exclusive manifest file locks for %s", caseNodeData.getDisplayName()));
try { try {
getAutoIngestJobNodeData(); if (!acquireManifestFileLocks()) {
} catch (CoordinationServiceException ex) { logger.log(Level.INFO, String.format("Could not delete %s because a manifest file lock was already held by another host", caseNodeData.getDisplayName()));
logger.log(Level.SEVERE, String.format("Error fetching auto ingest job node data for %s (%s) in %s, cannot delete case", caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
return;
} catch (InterruptedException ex) {
logger.log(Level.INFO, String.format("Deletion of %s (%s) in %s cancelled", caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
return;
}
if (Thread.currentThread().isInterrupted()) {
logger.log(Level.INFO, String.format("Deletion of %s (%s) in %s cancelled", caseDisplayName, caseUniqueName, caseDirectoryPath));
return;
}
if (!nodeDataForAutoIngestJobs.isEmpty()) {
progress.progress(Bundle.DeleteCaseTask_progress_acquiringManifestLocks());
logger.log(Level.INFO, String.format("Acquiring exclusive manifest file locks for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath));
getManifestFileLocks();
if (manifestFileLocks.isEmpty()) {
logger.log(Level.INFO, String.format("Could not delete %s (%s) in %s because a case directory lock was held by another host", caseDisplayName, caseUniqueName, caseDirectoryPath));
return; return;
} }
} else { } catch (CoordinationServiceException ex) {
logger.log(Level.INFO, String.format("No auto ingest job node data found for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath)); logger.log(Level.WARNING, String.format("Could not delete %s because an error occurred acquiring the manifest file locks", caseNodeData.getDisplayName()), ex);
return;
} catch (InterruptedException ex) {
logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()), ex);
return;
} }
if (Thread.currentThread().isInterrupted()) { if (Thread.currentThread().isInterrupted()) {
logger.log(Level.INFO, String.format("Deletion of %s (%s) in %s cancelled", caseDisplayName, caseUniqueName, caseDirectoryPath)); logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()));
releaseManifestFileLocks();
return;
}
if (deleteOption == DeleteOptions.DELETE_INPUT || deleteOption == DeleteOptions.DELETE_ALL) {
try {
logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()));
deleteAutoIngestInput();
} catch (IOException ex) {
// RJCTODO:
} catch (InterruptedException ex) {
logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()), ex);
releaseManifestFileLocks();
return;
}
}
if (Thread.currentThread().isInterrupted()) {
logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()));
releaseManifestFileLocks();
return;
}
if (deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_ALL) {
try {
success = deleteCaseOutput();
} catch (InterruptedException ex) {
logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()), ex);
releaseManifestFileLocks();
return;
}
}
if (Thread.currentThread().isInterrupted()) {
logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()));
releaseManifestFileLocks(); releaseManifestFileLocks();
return; return;
} }
try { try {
deleteWhileHoldingAllLocks(); if (deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_ALL) {
success = deleteManifestFileNodes();
} else {
releaseManifestFileLocks();
}
} catch (InterruptedException ex) { } catch (InterruptedException ex) {
logger.log(Level.INFO, String.format("Deletion of %s (%s) in %s cancelled", caseDisplayName, caseUniqueName, caseDirectoryPath), ex); logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()), ex);
return;
}
releaseManifestFileLocks();
try {
deleteAfterManifestLocksReleased();
} catch (InterruptedException ex) {
logger.log(Level.INFO, String.format("Deletion of %s (%s) in %s cancelled", caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
return; return;
} }
} catch (CoordinationServiceException ex) { } catch (CoordinationServiceException ex) {
logger.log(Level.SEVERE, String.format("Error acquiring exclusive case directory lock for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath), ex); logger.log(Level.SEVERE, String.format("Could not delete %s because an error occurred acquiring the case directory lock", caseNodeData.getDisplayName()), ex);
return;
} }
if (Thread.currentThread().isInterrupted()) { if (Thread.currentThread().isInterrupted()) {
logger.log(Level.INFO, String.format("Deletion of %s (%s) in %s cancelled", caseDisplayName, caseUniqueName, caseDirectoryPath)); logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()));
return; return;
} }
try { /*
deleteAfterCaseDirectoryLockReleased(); * Now that the case directory lock has been released, the
} catch (InterruptedException ex) { * coordination service node for it can be deleted if the use case
logger.log(Level.INFO, String.format("Deletion of %s (%s) in %s cancelled", caseDisplayName, caseUniqueName, caseDirectoryPath), ex); * requires it. However, if something to ge deleted was not deleted,
return; * leave the node so that what was and was not deleted can be
* inspected.
*/
if (success && (deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_ALL)) {
progress.progress(Bundle.DeleteCaseTask_progress_deletingDirLockNode());
try {
Case.deleteCaseDirectoryLockNode(caseNodeData, progress);
} catch (CoordinationServiceException ex) {
logger.log(Level.WARNING, String.format("Error deleting case directory lock node for %s", caseNodeData.getDisplayName()), ex);
} catch (InterruptedException ex) {
logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()), ex);
return;
}
} }
} catch (CoordinationServiceException ex) { } catch (CoordinationServiceException ex) {
logger.log(Level.SEVERE, String.format("Error acquiring exclusive case name lock for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath), ex); logger.log(Level.SEVERE, String.format("Could not delete %s because an error occurred acquiring the case name lock", caseNodeData.getDisplayName()), ex);
}
try {
deleteAfterCaseNameLockReleased();
} catch (InterruptedException ex) {
logger.log(Level.INFO, String.format("Deletion of %s (%s) in %s cancelled", caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
return; return;
} }
}
/** if (Thread.currentThread().isInterrupted()) {
* Deletes the parts of the case that need to be deleted while holding all logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()));
* of the exclusive locks: the case name lock, the case directory lock, amd return;
* the manifest file locks. Note that the locks are acquired in that order }
* and released in the opposite order.
*/
abstract void deleteWhileHoldingAllLocks() throws InterruptedException;
/** /*
* Deletes the parts of the case that need to be deleted after the release * Now that the case name lock has been released, the coordination
* of the exclusive manifest file locks, while still holding the exclusive * service node for it can be deleted if the use case requires it.
* case name and case directory locks; the manifest file locks are the first */
* locks released. if (deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_ALL) {
*/ progress.progress(Bundle.DeleteCaseTask_progress_deletingNameLockNode());
abstract void deleteAfterManifestLocksReleased() throws InterruptedException; try {
String caseNameLockNodeName = CaseCoordinationServiceUtils.getCaseNameLockName(caseNodeData.getDirectory());
/** coordinationService.deleteNode(CategoryNode.CASES, caseNameLockNodeName); // RJCTODO: Should this be a Case method?
* Deletes the parts of the case that need to be deleted after the release } catch (CoordinationServiceException ex) {
* of the exclusive manifest file locks and case directory lock, while still logger.log(Level.WARNING, String.format("Error deleting case name lock node for %s", caseNodeData.getDisplayName()), ex);
* holding the exclusive case name; the case name lock is the last lock } catch (InterruptedException ex) {
* released. logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()), ex);
*/
abstract void deleteAfterCaseDirectoryLockReleased() throws InterruptedException;
/**
* Deletes the parts of the case that need to be deleted after the release
* of all of the exclusive locks; the case name lock is the last lock
* released.
*/
abstract void deleteAfterCaseNameLockReleased() throws InterruptedException;
/**
* Deletes the auto ingest job input directories for the case. Intended to
* be called by subclasses, if required, in their customization of the
* deleteWhileHoldingAllLocks step of the case deletion algorithm.
*/
@NbBundle.Messages({
"# {0} - input directory name", "DeleteCaseTask.progress.deletingInputDir=Deleting input directory {0}..."
})
protected void deleteInputDirectories() {
boolean allInputDirsDeleted = true;
for (AutoIngestJobNodeData jobNodeData : nodeDataForAutoIngestJobs) {
Path inputDirPath = jobNodeData.getManifestFilePath().getParent();
File inputDir = inputDirPath.toFile();
if (inputDir.exists()) {
progress.progress(Bundle.DeleteCaseTask_progress_deletingInputDir(inputDirPath));
logger.log(Level.INFO, String.format("Deleting input directory %s for %s (%s) in %s", inputDirPath, caseDisplayName, caseUniqueName, caseDirectoryPath));
if (!FileUtil.deleteDir(new File(inputDirPath.toString()))) {
logger.log(Level.WARNING, String.format("Failed to delete the input directory %s for %s (%s) in %s", inputDirPath, caseDisplayName, caseUniqueName, caseDirectoryPath));
allInputDirsDeleted = false;
}
} }
} }
if (allInputDirsDeleted) { }
setDeletedItemFlag(CaseNodeData.DeletedFlags.DATA_SOURCES);
/**
* Acquires either all or none of the auto ingest job manifest file locks
* for a case.
*
* @return True if all of the locks were acquired; false otherwise.
*
* @throws CoordinationServiceException If there is an error completing a
* coordination service operation.
* @throws InterruptedException If the thread in which this task is
* running is interrupted while blocked
* waiting for a coordination service
* operation to complete.
*/
@NbBundle.Messages({
"# {0} - manifest file path", "DeleteCaseTask.progress.lockingManifest=Locking manifest file {0}..."
})
private boolean acquireManifestFileLocks() throws CoordinationServiceException, InterruptedException {
/*
* Get the "original" case name that from the case directory. This is
* necessary because the case display name can be changed and the case
* name may have a time stamp added to make it unique, depending on how
* the case was created. An alternative aproach would be to strip the
* time stamp from the case name in the case node data instead, but the
* code for that is already in the utility method called here.
*/
String caseName = CaseCoordinationServiceUtils.getCaseNameLockName(caseNodeData.getDirectory());
try {
boolean allLocksAcquired = true;
// RJCTODO: Read in the list of manifests for the case instead of
// inspecting the nodes this way, once the recording of the
// manifests is in place.
final List<String> nodeNames = coordinationService.getNodeList(CoordinationService.CategoryNode.MANIFESTS);
for (String manifestPath : nodeNames) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException();
}
byte[] nodeBytes = coordinationService.getNodeData(CoordinationService.CategoryNode.MANIFESTS, manifestPath);
if (nodeBytes == null || nodeBytes.length <= 0) {
logger.log(Level.WARNING, String.format("Empty coordination service node data found for %s", manifestPath));
continue;
}
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException();
}
AutoIngestJobNodeData nodeData;
try {
nodeData = new AutoIngestJobNodeData(nodeBytes);
} catch (InvalidDataException ex) {
logger.log(Level.WARNING, String.format("Invalid coordination service node data found for %s", manifestPath), ex);
continue;
}
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException();
}
if (caseName.equals(nodeData.getCaseName())) {
/*
* When acquiring manifest file locks, it is reasonable to
* block while acquiring this lock since the auto ingest
* node (AIN) input directory scanning tasks do a lot of
* short-term acquiring and releasing of manifest file
* locks. The assumption here is that the originator of this
* case deletion task is not asking for deletion of a case
* that has a job an auto ingest node (AIN) job processing
* task is working on and that
* MANIFEST_FILE_LOCKING_TIMEOUT_MINS is not very long,
* anyway, so we can and should wait a bit.
*/
logger.log(Level.INFO, String.format("Exclusively locking the manifest %s for %s", manifestPath, caseNodeData.getDisplayName()));
progress.progress(Bundle.DeleteCaseTask_progress_lockingManifest(manifestPath));
CoordinationService.Lock manifestLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.MANIFESTS, manifestPath, MANIFEST_FILE_LOCKING_TIMEOUT_MINS, TimeUnit.MINUTES);
if (null != manifestLock) {
manifestFileLocks.add(manifestLock);
} else {
allLocksAcquired = false;
logger.log(Level.INFO, String.format("Failed to exclusively lock the manifest %s because it was already held by another host", manifestPath, caseNodeData.getDisplayName()));
releaseManifestFileLocks();
break;
}
}
}
return allLocksAcquired;
} catch (CoordinationServiceException | InterruptedException ex) {
releaseManifestFileLocks();
throw ex;
} }
} }
/** /**
* Deletes the case database, the text index, and the case directory for the * Deletes the auto ingest job input manifests for the case along with the
* case. Intended to be called by subclasses, if required, in their * corresponding data sources.
* customization of the deleteWhileHoldingAllLocks step of the case deletion *
* algorithm. * @throws IOException If there is an error opening the case
* manifests list file.
* @throws InterruptedException If the thread in which this task is running
* is interrupted while blocked waiting for a
* coordination service operation to complete.
*/
@NbBundle.Messages({
"# {0} - manifest file path", "DeleteCaseTask.progress.deletingManifest=Deleting manifest file {0}..."
})
private void deleteAutoIngestInput() throws IOException, InterruptedException {
boolean allInputDeleted = true;
final Path manifestsListFilePath = Paths.get(caseNodeData.getDirectory().toString(), AutoIngestManager.getCaseManifestsListFileName());
final Scanner manifestsListFileScanner = new Scanner(manifestsListFilePath);
while (manifestsListFileScanner.hasNext()) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException();
}
final String manifestFilePath = manifestsListFileScanner.next();
final File manifestFile = new File(manifestFilePath);
if (manifestFile.exists()) {
// RJCTODO: Parse file, open case database, delete data sources
// before deleting manifest file
progress.progress(Bundle.DeleteCaseTask_progress_deletingManifest(manifestFilePath));
logger.log(Level.INFO, String.format("Deleting manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName()));
if (manifestFile.delete()) {
logger.log(Level.WARNING, String.format("Failed to delete manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName()));
allInputDeleted = false;
}
}
if (allInputDeleted) {
setDeletedItemFlag(CaseNodeData.DeletedFlags.DATA_SOURCES);
}
}
}
/**
* Deletes the case database, the text index, the case directory, and the
* case resources and auto ingest log coordination service lock nodes for
* the case.
*
* @return If true if all of the case output that was found was deleted,
* false otherwise.
*
* @throws InterruptedException If the thread in which this task is running
* is interrupted while blocked waiting for a
* coordination service operation to complete.
*/ */
@NbBundle.Messages({ @NbBundle.Messages({
"DeleteCaseTask.progress.locatingCaseMetadataFile=Locating case metadata file...", "DeleteCaseTask.progress.locatingCaseMetadataFile=Locating case metadata file...",
"DeleteCaseTask.progress.deletingCaseOutput=Deleting case database, text index, and directory...", "DeleteCaseTask.progress.deletingResourcesLockNode=Deleting case resources coordination service node...",
"DeleteCaseTask.progress.deletingJobLogLockNode=Deleting case auto ingest job log lock node..." "DeleteCaseTask.progress.deletingJobLogLockNode=Deleting case auto ingest job coordination service node..."
}) })
protected void deleteCaseOutput() { private boolean deleteCaseOutput() throws InterruptedException {
boolean errorsOccurred = false;
progress.progress(Bundle.DeleteCaseTask_progress_locatingCaseMetadataFile()); progress.progress(Bundle.DeleteCaseTask_progress_locatingCaseMetadataFile());
logger.log(Level.INFO, String.format("Locating metadata file for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath)); logger.log(Level.INFO, String.format("Locating metadata file for %s", caseNodeData.getDisplayName()));
CaseMetadata caseMetadata = null; CaseMetadata caseMetadata = null;
final File caseDirectory = caseDirectoryPath.toFile(); final File caseDirectory = caseNodeData.getDirectory().toFile();
final File[] filesInDirectory = caseDirectory.listFiles(); if (caseDirectory.exists()) {
if (filesInDirectory != null) { final File[] filesInDirectory = caseDirectory.listFiles();
for (File file : filesInDirectory) { if (filesInDirectory != null) {
if (file.getName().toLowerCase().endsWith(CaseMetadata.getFileExtension()) && file.isFile()) { for (File file : filesInDirectory) {
try { if (file.getName().toLowerCase().endsWith(CaseMetadata.getFileExtension()) && file.isFile()) {
caseMetadata = new CaseMetadata(Paths.get(file.getPath())); try {
} catch (CaseMetadata.CaseMetadataException ex) { caseMetadata = new CaseMetadata(Paths.get(file.getPath()));
logger.log(Level.WARNING, String.format("Error getting opening case metadata file for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath), ex); } catch (CaseMetadata.CaseMetadataException ex) {
logger.log(Level.WARNING, String.format("Error getting opening case metadata file for %s", caseNodeData.getDisplayName()), ex);
}
break;
} }
break;
} }
} }
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException();
}
if (caseMetadata != null) {
logger.log(Level.INFO, String.format("Deleting output for %s", caseNodeData.getDisplayName()));
errorsOccurred = Case.deleteMultiUserCase(caseNodeData, caseMetadata, progress, logger); // RJCTODO: CHeck for errors occurred?
} else {
logger.log(Level.WARNING, String.format("Failed to locate metadata file for %s", caseNodeData.getDisplayName()));
}
} }
if (caseMetadata != null) { if (Thread.currentThread().isInterrupted()) {
progress.progress(Bundle.DeleteCaseTask_progress_deletingCaseOutput()); throw new InterruptedException();
logger.log(Level.INFO, String.format("Deleting output for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath));
Case.deleteMultiUserCase(caseNodeData, caseMetadata, progress); // RJCTODO: Make this method throw the interrupted exception.
} else {
logger.log(Level.WARNING, String.format("Failed to locate metadata file for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath));
} }
progress.progress(Bundle.DeleteCaseTask_progress_deletingResourcesLockNode());
try {
Case.deleteCaseResourcesLockNode(caseNodeData, progress);
} catch (CoordinationServiceException ex) {
logger.log(Level.WARNING, String.format("Error deleting case resources coordiation service node for %s", caseNodeData.getDisplayName()), ex);
}
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException();
}
// RJCTODO: Check to see if getNodeData return null if the node does not exist;
// if so, make use of it
progress.progress(Bundle.DeleteCaseTask_progress_deletingJobLogLockNode()); progress.progress(Bundle.DeleteCaseTask_progress_deletingJobLogLockNode());
logger.log(Level.INFO, String.format("Deleting case auto ingest job log lock node for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath)); logger.log(Level.INFO, String.format("Deleting case auto ingest job log coordiation service node for %s", caseNodeData.getDisplayName()));
Path logFilePath = AutoIngestJobLogger.getLogPath(caseDirectoryPath); //RJCTODO: USe util here String logFilePath = CaseCoordinationServiceUtils.getCaseAutoIngestLogLockName(caseNodeData.getDirectory());
try { try {
coordinationService.deleteNode(CategoryNode.CASES, logFilePath.toString()); coordinationService.deleteNode(CategoryNode.CASES, logFilePath);
} catch (CoordinationServiceException ex) { } catch (CoordinationServiceException ex) {
logger.log(Level.WARNING, String.format("Error deleting case auto ingest job log lock node for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath), ex); logger.log(Level.WARNING, String.format("Error deleting case auto ingest job log coordiation service node for %s", caseNodeData.getDisplayName()), ex);
} catch (InterruptedException ex) {
logger.log(Level.INFO, String.format("Deletion of %s (%s) in %s cancelled", caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
} }
return errorsOccurred;
} }
/** /**
* Deletes the manifest file lock coordination service nodes for the case. * Releases all of the manifest file locks that have been acquired by this
* Intended to be called by subclasses, if required, in their customization * task.
* of the deleteAfterManifestLocksReleased step of the case deletion
* algorithm.
*/
@Messages({
"DeleteCaseTask.progress.deletingManifestFileLockNodes=Deleting manifest file lock nodes..."
})
protected void deleteManifestFileLockNodes() throws InterruptedException {
boolean allInputDirsDeleted = true;
progress.progress(Bundle.DeleteCaseTask_progress_deletingManifestFileLockNodes());
logger.log(Level.INFO, String.format("Deleting manifest file lock nodes for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath));
for (AutoIngestJobNodeData jobNodeData : nodeDataForAutoIngestJobs) {
try {
logger.log(Level.INFO, String.format("Deleting manifest file lock node for %s for %s (%s) in %s", jobNodeData.getManifestFilePath(), caseDisplayName, caseUniqueName, caseDirectoryPath));
coordinationService.deleteNode(CoordinationService.CategoryNode.MANIFESTS, jobNodeData.getManifestFilePath().toString());
} catch (CoordinationServiceException ex) {
logger.log(Level.WARNING, String.format("Error deleting manifest file lock node %s for %s (%s) in %s", jobNodeData.getManifestFilePath(), caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
allInputDirsDeleted = false;
}
}
if (allInputDirsDeleted) {
setDeletedItemFlag(CaseNodeData.DeletedFlags.MANIFEST_FILE_LOCK_NODES);
}
// RJCTODO: Expand case type in case metadata to include auto ingest cases.
// Disable delete menu item for auto ingest cases, and possibly also add data source
// capability.
}
/**
* Deletes the case directory coordination service lock node for the case.
* Intended to be called by subclasses, if required, in their customization
* of the deleteAfterCaseDirectoryLockReleased step of the case deletion
* algorithm.
*/
@Messages({
"DeleteCaseTask.progress.deletingDirLockNode=Deleting case directory lock coordination service node..."
})
protected void deleteCaseDirectoryLockNode() throws InterruptedException {
progress.progress(Bundle.DeleteCaseTask_progress_deletingDirLockNode());
try {
Case.deleteCaseDirectoryLockNode(caseNodeData, progress); // RJCTODO: Case does not need to expose this?
} catch (CoordinationServiceException ex) {
logger.log(Level.WARNING, String.format("Error deleting case directory lock node for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
}
}
/**
* Deletes the case name coordination service lock node for the case.
* Intended to be called by subclasses, if required, in their customization
* of the deleteAfterCaseNameLockReleased step of the case deletion
* algorithm.
*
* @throws InterruptedException
*/
@Messages({
"DeleteCaseTask.progress.deletingNameLockNode=Deleting case name lock node..." // RJCTODO: Use consistent terminology
})
protected void deleteCaseNameLockNode() throws InterruptedException {
progress.progress(Bundle.DeleteCaseTask_progress_deletingNameLockNode());
try {
String caseNameLockNodeName = CaseCoordinationServiceUtils.getCaseLockName(caseDirectoryPath);
coordinationService.deleteNode(CategoryNode.CASES, caseNameLockNodeName);
} catch (CoordinationServiceException ex) {
logger.log(Level.WARNING, String.format("Error deleting case name lock node for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
}
}
/**
* Fetches the auto ingest job data from the manifest file lock coordination
* service nodes for a case.
*
* @throws CoordinationServiceException If there is an error interacting
* with the coordination service.
* @throws InterruptedException If the current thread is interrupted
* while waiting for the coordination
* service.
*/
private void getAutoIngestJobNodeData() throws CoordinationServiceException, InterruptedException {
String caseName = caseDisplayName;
final List<String> nodeNames = coordinationService.getNodeList(CoordinationService.CategoryNode.MANIFESTS);
for (String nodeName : nodeNames) {
try {
byte[] nodeBytes = coordinationService.getNodeData(CoordinationService.CategoryNode.CASES, nodeName);
if (nodeBytes == null || nodeBytes.length <= 0) {
logger.log(Level.WARNING, String.format("Missing auto ingest job node data for manifest file lock node %s, deleting node", nodeName));
try {
coordinationService.deleteNode(CategoryNode.MANIFESTS, nodeName);
} catch (CoordinationServiceException ex) {
logger.log(Level.WARNING, String.format("Failed to delete empty manifest file lock node %s", nodeName));
}
continue;
}
AutoIngestJobNodeData nodeData = new AutoIngestJobNodeData(nodeBytes);
if (caseName.equals(nodeData.getCaseName())) {
nodeDataForAutoIngestJobs.add(nodeData);
}
} catch (CoordinationService.CoordinationServiceException | InvalidDataException ex) {
logger.log(Level.WARNING, String.format("Failed to get auto ingest job node data for %s", nodeName), ex);
}
}
}
/**
* Acquires either all or none of the manifest file locks for a case.
*/ */
@NbBundle.Messages({ @NbBundle.Messages({
"# {0} - manifest file name", "DeleteCaseTask.progress.lockingManifestFile=Acquiring exclusive lock on manifest {0}..." "# {0} - manifest file path", "DeleteCaseTask.progress.releasingManifestLock=Releasing the exclusive coordination service lock on the manifest file {0}..."
})
private void getManifestFileLocks() {
for (AutoIngestJobNodeData autoIngestJobNodeData : nodeDataForAutoIngestJobs) {
String manifestPath = autoIngestJobNodeData.getManifestFilePath().toString();
try {
progress.progress(Bundle.DeleteCaseTask_progress_lockingManifestFile(manifestPath));
logger.log(Level.INFO, String.format("Exclusively locking the manifest %s for %s (%s) in %s", manifestPath, caseDisplayName, caseUniqueName, caseDirectoryPath));
CoordinationService.Lock inputDirLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.MANIFESTS, manifestPath);
if (null != inputDirLock) {
manifestFileLocks.put(manifestPath, inputDirLock);
} else {
logger.log(Level.INFO, String.format("Failed to exclusively lock the manifest %s for %s (%s) in %s", manifestPath, caseDisplayName, caseUniqueName, caseDirectoryPath));
releaseManifestFileLocks();
manifestFileLocks.clear();
break;
}
} catch (CoordinationService.CoordinationServiceException ex) {
logger.log(Level.SEVERE, String.format("Error exclusively locking the manifest %s for %s (%s) in %s", manifestPath, caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
releaseManifestFileLocks();
manifestFileLocks.clear();
break;
}
}
}
/**
* Releases any manifest file coordination service locks that were acquired
* for the case.
*/
@NbBundle.Messages({
"# {0} - manifest file path", "DeleteCaseTask.progress.releasingManifestLock=Releasing the exclusive lock on manifest file {0}..."
}) })
private void releaseManifestFileLocks() { private void releaseManifestFileLocks() {
if (!manifestFileLocks.isEmpty()) { for (Lock manifestFileLock : manifestFileLocks) {
for (Map.Entry<String, CoordinationService.Lock> entry : manifestFileLocks.entrySet()) { String manifestFilePath = manifestFileLock.getNodePath();
String manifestFilePath = entry.getKey(); try {
CoordinationService.Lock manifestFileLock = entry.getValue(); progress.progress(Bundle.DeleteCaseTask_progress_releasingManifestLock(manifestFilePath));
try { logger.log(Level.INFO, String.format("Releasing the exclusive coordination service lock on the manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName()));
progress.progress(Bundle.DeleteCaseTask_progress_releasingManifestLock(manifestFilePath)); manifestFileLock.release();
logger.log(Level.INFO, String.format("Releasing the exclusive lock on the manifest file %s for %s (%s) in %s", manifestFilePath, caseDisplayName, caseUniqueName, caseDirectoryPath)); } catch (CoordinationServiceException ex) {
manifestFileLock.release(); logger.log(Level.WARNING, String.format("Error releasing the exclusive coordination service lock on the manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName()), ex);
} catch (CoordinationServiceException ex) {
logger.log(Level.SEVERE, String.format("Error releasing exclusive lock on the manifest file %s for %s (%s) in %s", manifestFilePath, caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
}
} }
} }
manifestFileLocks.clear();
} }
/** /**
* Sets a deleted item flag for the case. * Releases all of the manifest file locks that have been acquired by this
* task and attempts to delete the corresponding coordination service nodes.
*
* @return True if all of the manifest file coordianiton service nodes have
* been deleted, false otherwise.
*
* @throws InterruptedException If the thread in which this task is running
* is interrupted while blocked waiting for a
* coordination service operation to complete.
*/
@Messages({
"# {0} - manifest file path", "DeleteCaseTask.progress.deletingManifestFileNode=Deleting the manifest file coordination service node for {0}..."
})
private boolean deleteManifestFileNodes() throws InterruptedException {
boolean allINodesDeleted = true;
for (Lock manifestFileLock : manifestFileLocks) {
String manifestFilePath = manifestFileLock.getNodePath();
try {
progress.progress(Bundle.DeleteCaseTask_progress_releasingManifestLock(manifestFilePath));
logger.log(Level.INFO, String.format("Releasing the exclusive coordination service lock on the manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName()));
manifestFileLock.release();
progress.progress(Bundle.DeleteCaseTask_progress_deletingManifestFileNode(manifestFilePath));
logger.log(Level.INFO, String.format("Deleting the manifest file coordination service node for %s for %s", manifestFilePath, caseNodeData.getDisplayName()));
coordinationService.deleteNode(CoordinationService.CategoryNode.MANIFESTS, manifestFilePath);
} catch (CoordinationServiceException ex) {
allINodesDeleted = false;
logger.log(Level.WARNING, String.format("Error deleting the manifest file coordination service node for %s for %s", manifestFilePath, caseNodeData.getDisplayName()), ex);
}
}
manifestFileLocks.clear();
return allINodesDeleted;
}
/**
* Sets a deleted item flag in the coordination service node data for the
* case.
* *
* @param flag The flag to set. * @param flag The flag to set.
*/ */
private void setDeletedItemFlag(CaseNodeData.DeletedFlags flag) { private void setDeletedItemFlag(CaseNodeData.DeletedFlags flag) {
try { try {
caseNodeData.setDeletedFlag(flag); caseNodeData.setDeletedFlag(flag);
coordinationService.setNodeData(CategoryNode.CASES, caseDirectoryPath.toString(), caseNodeData.toArray()); coordinationService.setNodeData(CategoryNode.CASES, caseNodeData.getDirectory().toString(), caseNodeData.toArray());
} catch (IOException | CoordinationServiceException | InterruptedException ex) { } catch (IOException | CoordinationServiceException | InterruptedException ex) {
logger.log(Level.SEVERE, String.format("Error updating deleted item flag %s for %s (%s) in %s", flag.name(), caseDisplayName, caseUniqueName, caseDirectoryPath), ex); logger.log(Level.SEVERE, String.format("Error updating deleted item flag %s for %s", flag.name(), caseNodeData.getDisplayName()), ex);
} }
} }