mirror of
https://github.com/overcuriousity/autopsy-flatpak.git
synced 2025-07-12 16:06:15 +00:00
Interim commit of improved case deletion
This commit is contained in:
parent
8b014494da
commit
95c3cf2d74
@ -32,11 +32,11 @@ import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.sql.Connection;
|
||||
import java.sql.DriverManager;
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Statement;
|
||||
import java.text.ParseException;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
@ -60,7 +60,6 @@ import javax.annotation.concurrent.GuardedBy;
|
||||
import javax.annotation.concurrent.ThreadSafe;
|
||||
import javax.swing.JOptionPane;
|
||||
import javax.swing.SwingUtilities;
|
||||
import org.openide.util.Exceptions;
|
||||
import org.openide.util.Lookup;
|
||||
import org.openide.util.NbBundle;
|
||||
import org.openide.util.NbBundle.Messages;
|
||||
@ -100,7 +99,6 @@ import org.sleuthkit.autopsy.coreutils.MessageNotifyUtil;
|
||||
import org.sleuthkit.autopsy.coreutils.NetworkUtils;
|
||||
import org.sleuthkit.autopsy.coreutils.PlatformUtil;
|
||||
import org.sleuthkit.autopsy.coreutils.ThreadUtils;
|
||||
import org.sleuthkit.autopsy.coreutils.TimeStampUtils;
|
||||
import org.sleuthkit.autopsy.coreutils.TimeZoneUtils;
|
||||
import org.sleuthkit.autopsy.coreutils.Version;
|
||||
import org.sleuthkit.autopsy.events.AutopsyEvent;
|
||||
@ -717,7 +715,8 @@ public class Case {
|
||||
* lower-level exception.
|
||||
*/
|
||||
@Messages({
|
||||
"Case.exceptionMessage.cannotDeleteCurrentCase=Cannot delete current case, it must be closed first."
|
||||
"Case.exceptionMessage.cannotDeleteCurrentCase=Cannot delete current case, it must be closed first.",
|
||||
"# {0} - case display name", "Case.exceptionMessage.deletionInterrupted=Deletion of the case {0} was cancelled."
|
||||
})
|
||||
public static void deleteCase(CaseMetadata metadata) throws CaseActionException {
|
||||
synchronized (caseActionSerializationLock) {
|
||||
@ -737,7 +736,16 @@ public class Case {
|
||||
if (CaseType.SINGLE_USER_CASE == metadata.getCaseType()) {
|
||||
deleteSingleUserCase(metadata, progressIndicator);
|
||||
} else {
|
||||
deleteMultiUserCase(metadata, progressIndicator);
|
||||
try {
|
||||
deleteMultiUserCase(metadata, progressIndicator);
|
||||
} catch (InterruptedException ex) {
|
||||
/*
|
||||
* Task cancellation is not currently supported for this
|
||||
* code path, so this catch block is not expected to be
|
||||
* executed.
|
||||
*/
|
||||
throw new CaseActionException(Bundle.Case_exceptionMessage_deletionInterrupted(metadata.getCaseDisplayName()), ex);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
progressIndicator.finish();
|
||||
@ -978,7 +986,7 @@ public class Case {
|
||||
"Case.exceptionMessage.cannotGetLockToDeleteCase=Cannot delete case because it is open for another user or host.",
|
||||
"Case.progressMessage.fetchingCoordSvcNodeData=Fetching coordination service node data for the case..."
|
||||
})
|
||||
private static void deleteMultiUserCase(CaseMetadata metadata, ProgressIndicator progressIndicator) throws CaseActionException {
|
||||
private static void deleteMultiUserCase(CaseMetadata metadata, ProgressIndicator progressIndicator) throws CaseActionException, InterruptedException {
|
||||
progressIndicator.progress(Bundle.Case_progressMessage_connectingToCoordSvc());
|
||||
CoordinationService coordinationService;
|
||||
try {
|
||||
@ -1010,8 +1018,17 @@ public class Case {
|
||||
throw new CaseActionException(Bundle.Case_exceptionMessage_errorsDeletingCase());
|
||||
}
|
||||
|
||||
errorsOccurred = deleteMultiUserCase(caseNodeData, metadata, progressIndicator);
|
||||
errorsOccurred = deleteMultiUserCase(caseNodeData, metadata, progressIndicator, logger);
|
||||
|
||||
try {
|
||||
deleteCaseResourcesLockNode(caseNodeData, progressIndicator);
|
||||
} catch (CoordinationServiceException | InterruptedException ex) {
|
||||
errorsOccurred = true;
|
||||
logger.log(Level.WARNING, String.format("Error deleting the case resources lock coordination service node for the case at %s (%s) in %s", metadata.getCaseDisplayName(), metadata.getCaseName(), metadata.getCaseDirectory()), ex);
|
||||
}
|
||||
|
||||
// RJCTODO: Is this behavior implemented correctly?
|
||||
|
||||
} catch (CoordinationServiceException ex) {
|
||||
logger.log(Level.SEVERE, String.format("Error exclusively locking the case directory for %s (%s) in %s", metadata.getCaseDisplayName(), metadata.getCaseName(), metadata.getCaseDirectory()), ex);
|
||||
throw new CaseActionException(Bundle.Case_exceptionMessage_errorsDeletingCase());
|
||||
@ -1034,15 +1051,16 @@ public class Case {
|
||||
/**
|
||||
* IMPORTANT: This is a "beta" method and is subject to change or removal
|
||||
* without notice!
|
||||
*
|
||||
*
|
||||
* Attempts to delete the case database, the text index, the case directory,
|
||||
* and the case resources coordination service lock code for a case and
|
||||
* removes the case from the recent cases menu of the mian application
|
||||
* removes the case from the recent cases menu of the main application
|
||||
* window.
|
||||
*
|
||||
* @param caseNodeData The coordination service node data for the case.
|
||||
* @param metadata The case metadata.
|
||||
* @param progressIndicator A progress indicator.
|
||||
* @param logger A logger.
|
||||
*
|
||||
* @return True if one or more errors occurred (see log for details), false
|
||||
* otherwise.
|
||||
@ -1053,10 +1071,10 @@ public class Case {
|
||||
* during a wait.
|
||||
*/
|
||||
@Beta
|
||||
public static boolean deleteMultiUserCase(CaseNodeData caseNodeData, CaseMetadata metadata, ProgressIndicator progressIndicator) throws InterruptedException {
|
||||
public static boolean deleteMultiUserCase(CaseNodeData caseNodeData, CaseMetadata metadata, ProgressIndicator progressIndicator, Logger logger) throws InterruptedException {
|
||||
boolean errorsOccurred = false;
|
||||
try {
|
||||
deleteCaseDatabase(caseNodeData, metadata, progressIndicator);
|
||||
deleteCaseDatabase(caseNodeData, metadata, progressIndicator, logger);
|
||||
} catch (UserPreferencesException | ClassNotFoundException | SQLException ex) {
|
||||
errorsOccurred = true;
|
||||
logger.log(Level.WARNING, String.format("Failed to delete the case database for %s (%s) in %s", metadata.getCaseDisplayName(), metadata.getCaseName(), metadata.getCaseDirectory()), ex);
|
||||
@ -1066,7 +1084,7 @@ public class Case {
|
||||
}
|
||||
|
||||
try {
|
||||
deleteTextIndex(caseNodeData, metadata, progressIndicator);
|
||||
deleteTextIndex(caseNodeData, metadata, progressIndicator, logger);
|
||||
} catch (KeywordSearchServiceException ex) {
|
||||
errorsOccurred = true;
|
||||
logger.log(Level.WARNING, String.format("Failed to delete the text index for %s (%s) in %s", metadata.getCaseDisplayName(), metadata.getCaseName(), metadata.getCaseDirectory()), ex);
|
||||
@ -1076,7 +1094,7 @@ public class Case {
|
||||
}
|
||||
|
||||
try {
|
||||
deleteCaseDirectory(caseNodeData, metadata, progressIndicator);
|
||||
deleteCaseDirectory(caseNodeData, metadata, progressIndicator, logger);
|
||||
} catch (CaseActionException ex) {
|
||||
errorsOccurred = true;
|
||||
logger.log(Level.WARNING, String.format("Failed to delete the case directory for %s (%s) in %s", metadata.getCaseDisplayName(), metadata.getCaseName(), metadata.getCaseDirectory()), ex);
|
||||
@ -1085,23 +1103,6 @@ public class Case {
|
||||
return errorsOccurred;
|
||||
}
|
||||
|
||||
deleteFromRecentCases(metadata, progressIndicator);
|
||||
|
||||
if (Thread.currentThread().isInterrupted()) {
|
||||
Thread.currentThread().interrupt();
|
||||
return errorsOccurred;
|
||||
}
|
||||
|
||||
try {
|
||||
deleteCaseResourcesLockNode(caseNodeData, progressIndicator);
|
||||
} catch (CoordinationServiceException ex) {
|
||||
errorsOccurred = true;
|
||||
logger.log(Level.WARNING, String.format("Error deleting the case resources lock coordination service node for the case at %s (%s) in %s", metadata.getCaseDisplayName(), metadata.getCaseName(), metadata.getCaseDirectory()), ex);
|
||||
} catch (InterruptedException ex) {
|
||||
Thread.currentThread().interrupt();
|
||||
return errorsOccurred;
|
||||
}
|
||||
|
||||
return errorsOccurred;
|
||||
}
|
||||
|
||||
@ -1127,15 +1128,20 @@ public class Case {
|
||||
@Messages({
|
||||
"Case.progressMessage.deletingCaseDatabase=Deleting case database..."
|
||||
})
|
||||
private static void deleteCaseDatabase(CaseNodeData caseNodeData, CaseMetadata metadata, ProgressIndicator progressIndicator) throws UserPreferencesException, ClassNotFoundException, SQLException, InterruptedException {
|
||||
private static void deleteCaseDatabase(CaseNodeData caseNodeData, CaseMetadata metadata, ProgressIndicator progressIndicator, Logger logger) throws UserPreferencesException, ClassNotFoundException, SQLException, InterruptedException {
|
||||
if (!caseNodeData.isDeletedFlagSet(CaseNodeData.DeletedFlags.CASE_DB)) {
|
||||
progressIndicator.progress(Bundle.Case_progressMessage_deletingCaseDatabase());
|
||||
logger.log(Level.INFO, String.format("Deleting case database for %s (%s) in %s", caseNodeData.getDisplayName(), caseNodeData.getName(), caseNodeData.getDirectory()));
|
||||
CaseDbConnectionInfo info = UserPreferences.getDatabaseConnectionInfo();
|
||||
String url = "jdbc:postgresql://" + info.getHost() + ":" + info.getPort() + "/postgres"; //NON-NLS
|
||||
Class.forName("org.postgresql.Driver"); //NON-NLS
|
||||
try (Connection connection = DriverManager.getConnection(url, info.getUserName(), info.getPassword()); Statement statement = connection.createStatement()) {
|
||||
String deleteCommand = "DROP DATABASE \"" + metadata.getCaseDatabaseName() + "\""; //NON-NLS
|
||||
statement.execute(deleteCommand);
|
||||
String dbExistsQuery = "SELECT 1 from pg_database WHERE datname = '" + metadata.getCaseDatabaseName() + "'";
|
||||
ResultSet queryResult = statement.executeQuery(dbExistsQuery);
|
||||
if (queryResult.next()) {
|
||||
String deleteCommand = "DROP DATABASE \"" + metadata.getCaseDatabaseName() + "\""; //NON-NLS
|
||||
statement.execute(deleteCommand);
|
||||
}
|
||||
}
|
||||
setDeletedItemFlag(caseNodeData, CaseNodeData.DeletedFlags.CASE_DB);
|
||||
}
|
||||
@ -1174,8 +1180,9 @@ public class Case {
|
||||
* data to be written to the
|
||||
* coordination service node database.
|
||||
*/
|
||||
private static void deleteTextIndex(CaseNodeData caseNodeData, CaseMetadata metadata, ProgressIndicator progressIndicator) throws KeywordSearchServiceException, InterruptedException {
|
||||
private static void deleteTextIndex(CaseNodeData caseNodeData, CaseMetadata metadata, ProgressIndicator progressIndicator, Logger logger) throws KeywordSearchServiceException, InterruptedException {
|
||||
if (!caseNodeData.isDeletedFlagSet(CaseNodeData.DeletedFlags.TEXT_INDEX)) {
|
||||
logger.log(Level.INFO, String.format("Deleting text index for %s (%s) in %s", caseNodeData.getDisplayName(), caseNodeData.getName(), caseNodeData.getDirectory()));
|
||||
deleteTextIndex(metadata, progressIndicator);
|
||||
setDeletedItemFlag(caseNodeData, CaseNodeData.DeletedFlags.TEXT_INDEX);
|
||||
}
|
||||
@ -1198,6 +1205,7 @@ public class Case {
|
||||
// when the path is >= 255 chars. Actually, deprecate this method and
|
||||
// replace it with one that throws instead of returning a boolean value.
|
||||
progressIndicator.progress(Bundle.Case_progressMessage_deletingCaseDirectory());
|
||||
logger.log(Level.INFO, String.format("Deleting case directory for %s (%s) in %s", metadata.getCaseDisplayName(), metadata.getCaseName(), metadata.getCaseDirectory()));
|
||||
if (!FileUtil.deleteDir(new File(metadata.getCaseDirectory()))) {
|
||||
throw new CaseActionException(String.format("Failed to delete %s", metadata.getCaseDirectory()));
|
||||
}
|
||||
@ -1216,7 +1224,7 @@ public class Case {
|
||||
* coordination service data to be written to
|
||||
* the coordination service node database.
|
||||
*/
|
||||
private static void deleteCaseDirectory(CaseNodeData caseNodeData, CaseMetadata metadata, ProgressIndicator progressIndicator) throws CaseActionException, InterruptedException {
|
||||
private static void deleteCaseDirectory(CaseNodeData caseNodeData, CaseMetadata metadata, ProgressIndicator progressIndicator, Logger logger) throws CaseActionException, InterruptedException {
|
||||
if (!caseNodeData.isDeletedFlagSet(CaseNodeData.DeletedFlags.CASE_DIR)) {
|
||||
deleteCaseDirectory(metadata, progressIndicator);
|
||||
setDeletedItemFlag(caseNodeData, CaseNodeData.DeletedFlags.CASE_DIR);
|
||||
@ -1242,7 +1250,6 @@ public class Case {
|
||||
}
|
||||
}
|
||||
|
||||
// RJCTODO: Copy-paste instead
|
||||
/**
|
||||
* IMPORTANT: This is a "beta" method and is subject to change or removal
|
||||
* without notice!
|
||||
@ -1264,14 +1271,13 @@ public class Case {
|
||||
"Case.progressMessage.deletingResourcesLockNode=Deleting case resources lock node..."
|
||||
})
|
||||
@Beta
|
||||
private static void deleteCaseResourcesLockNode(CaseNodeData caseNodeData, ProgressIndicator progressIndicator) throws CoordinationServiceException, InterruptedException {
|
||||
public static void deleteCaseResourcesLockNode(CaseNodeData caseNodeData, ProgressIndicator progressIndicator) throws CoordinationServiceException, InterruptedException {
|
||||
progressIndicator.progress(Bundle.Case_progressMessage_deletingResourcesLockNode());
|
||||
String resourcesLockNodePath = caseNodeData.getDirectory().toString() + RESOURCES_LOCK_SUFFIX;//RJCTODO: Use utility
|
||||
CoordinationService coordinationService = CoordinationService.getInstance();
|
||||
coordinationService.deleteNode(CategoryNode.CASES, resourcesLockNodePath);
|
||||
}
|
||||
|
||||
// RJCTODO: Copy-paste instead
|
||||
/**
|
||||
* IMPORTANT: This is a "beta" method and is subject to change or removal
|
||||
* without notice!
|
||||
|
@ -42,7 +42,7 @@ public class CaseCoordinationServiceUtils {
|
||||
return caseDirectoryPath.toString();
|
||||
}
|
||||
|
||||
public static String getCaseLockName(Path caseDirectoryPath) {
|
||||
public static String getCaseNameLockName(Path caseDirectoryPath) {
|
||||
String caseName = caseDirectoryPath.getFileName().toString();
|
||||
if (TimeStampUtils.endsWithTimeStamp(caseName)) {
|
||||
caseName = TimeStampUtils.removeTimeStamp(caseName);
|
||||
|
@ -161,7 +161,7 @@ final public class MultiUserCaseNodeDataCollector { // RJCTODO: Shorten name aft
|
||||
deleteCoordinationServiceNode(coordinationService, CaseCoordinationServiceUtils.getCaseResourcesLockName(caseDirectoryPath));
|
||||
deleteCoordinationServiceNode(coordinationService, CaseCoordinationServiceUtils.getCaseAutoIngestLogLockName(caseDirectoryPath));
|
||||
deleteCoordinationServiceNode(coordinationService, CaseCoordinationServiceUtils.getCaseDirectoryLockName(caseDirectoryPath));
|
||||
deleteCoordinationServiceNode(coordinationService, CaseCoordinationServiceUtils.getCaseLockName(caseDirectoryPath));
|
||||
deleteCoordinationServiceNode(coordinationService, CaseCoordinationServiceUtils.getCaseNameLockName(caseDirectoryPath));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -142,22 +142,22 @@ public interface MultiUserCaseBrowserCustomizer {
|
||||
"MultiUserCaseBrowserCustomizer.column.createTime=Create Time",
|
||||
"MultiUserCaseBrowserCustomizer.column.directory=Directory",
|
||||
"MultiUserCaseBrowserCustomizer.column.lastAccessTime=Last Access Time",
|
||||
"MultiUserCaseBrowserCustomizer.column.manifestFileZNodesDeleteStatus=Manifest Znodes Deleted",
|
||||
"MultiUserCaseBrowserCustomizer.column.dataSourcesDeleteStatus=Data Sources Deleted",
|
||||
"MultiUserCaseBrowserCustomizer.column.textIndexDeleteStatus=Text Index Deleted",
|
||||
"MultiUserCaseBrowserCustomizer.column.caseDbDeleteStatus=Case Database Deleted",
|
||||
"MultiUserCaseBrowserCustomizer.column.caseDirDeleteStatus=Case Directory Deleted",
|
||||
"MultiUserCaseBrowserCustomizer.column.dataSourcesDeleteStatus=Data Sources Deleted",
|
||||
"MultiUserCaseBrowserCustomizer.column.manifestCoordSvcNodesDeleteStatus=Manifest ZooKeeper Node Deleted"
|
||||
"MultiUserCaseBrowserCustomizer.column.caseDirDeleteStatus=Case Directory Deleted"
|
||||
})
|
||||
public enum Column {
|
||||
DISPLAY_NAME(Bundle.MultiUserCaseBrowserCustomizer_column_displayName()),
|
||||
CREATE_DATE(Bundle.MultiUserCaseBrowserCustomizer_column_createTime()),
|
||||
DIRECTORY(Bundle.MultiUserCaseBrowserCustomizer_column_directory()),
|
||||
LAST_ACCESS_DATE(Bundle.MultiUserCaseBrowserCustomizer_column_lastAccessTime()),
|
||||
TEXT_INDEX_DELETION_STATUS(Bundle.MultiUserCaseBrowserCustomizer_column_textIndexDeleteStatus()),
|
||||
CASE_DB_DELETION_STATUS(Bundle.MultiUserCaseBrowserCustomizer_column_caseDbDeleteStatus()),
|
||||
CASE_DIR_DELETION_STATUS(Bundle.MultiUserCaseBrowserCustomizer_column_caseDirDeleteStatus()),
|
||||
DATA_SOURCES_DELETION_STATUS(Bundle.MultiUserCaseBrowserCustomizer_column_dataSourcesDeleteStatus()),
|
||||
MANIFEST_FILE_LOCK_NODES_DELETION_STATUS(Bundle.MultiUserCaseBrowserCustomizer_column_manifestCoordSvcNodesDeleteStatus());
|
||||
MANIFEST_FILE_ZNODES_DELETE_STATUS(Bundle.MultiUserCaseBrowserCustomizer_column_manifestFileZNodesDeleteStatus()),
|
||||
DATA_SOURCES_DELETE_STATUS(Bundle.MultiUserCaseBrowserCustomizer_column_dataSourcesDeleteStatus()),
|
||||
TEXT_INDEX_DELETE_STATUS(Bundle.MultiUserCaseBrowserCustomizer_column_textIndexDeleteStatus()),
|
||||
CASE_DB_DELETE_STATUS(Bundle.MultiUserCaseBrowserCustomizer_column_caseDbDeleteStatus()),
|
||||
CASE_DIR_DELETE_STATUS(Bundle.MultiUserCaseBrowserCustomizer_column_caseDirDeleteStatus());
|
||||
|
||||
private final String displayName;
|
||||
|
||||
|
@ -25,8 +25,10 @@ import javax.swing.Action;
|
||||
import org.openide.nodes.AbstractNode;
|
||||
import org.openide.nodes.Children;
|
||||
import org.openide.nodes.Sheet;
|
||||
import org.openide.util.NbBundle;
|
||||
import org.openide.util.lookup.Lookups;
|
||||
import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData;
|
||||
import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData.DeletedFlags;
|
||||
import org.sleuthkit.autopsy.casemodule.multiusercasesbrowser.MultiUserCaseBrowserCustomizer.Column;
|
||||
import org.sleuthkit.autopsy.datamodel.NodeProperty;
|
||||
|
||||
@ -75,6 +77,21 @@ final class MultiUserCaseNode extends AbstractNode {
|
||||
case LAST_ACCESS_DATE:
|
||||
sheetSet.put(new NodeProperty<>(propName, propName, propName, caseNodeData.getLastAccessDate()));
|
||||
break;
|
||||
case MANIFEST_FILE_ZNODES_DELETE_STATUS:
|
||||
sheetSet.put(new NodeProperty<>(propName, propName, propName, isDeleted(DeletedFlags.MANIFEST_FILE_LOCK_NODES)));
|
||||
break;
|
||||
case DATA_SOURCES_DELETE_STATUS:
|
||||
sheetSet.put(new NodeProperty<>(propName, propName, propName, isDeleted(DeletedFlags.DATA_SOURCES)));
|
||||
break;
|
||||
case TEXT_INDEX_DELETE_STATUS:
|
||||
sheetSet.put(new NodeProperty<>(propName, propName, propName, isDeleted(DeletedFlags.TEXT_INDEX)));
|
||||
break;
|
||||
case CASE_DB_DELETE_STATUS:
|
||||
sheetSet.put(new NodeProperty<>(propName, propName, propName, isDeleted(DeletedFlags.CASE_DB)));
|
||||
break;
|
||||
case CASE_DIR_DELETE_STATUS:
|
||||
sheetSet.put(new NodeProperty<>(propName, propName, propName, isDeleted(DeletedFlags.CASE_DIR)));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -95,4 +112,20 @@ final class MultiUserCaseNode extends AbstractNode {
|
||||
return customizer.getPreferredAction(caseNodeData);
|
||||
}
|
||||
|
||||
/**
|
||||
* Interprets the deletion status of part of a case.
|
||||
*
|
||||
* @param flag The coordination service node data deleted items flag
|
||||
* to interpret.
|
||||
*
|
||||
* @return A string stating "True" or "False."
|
||||
*/
|
||||
@NbBundle.Messages({
|
||||
"MultiUserCaseNode.columnValue.true=True",
|
||||
"MultiUserCaseNode.column.createTime=False",
|
||||
})
|
||||
private String isDeleted(CaseNodeData.DeletedFlags flag) {
|
||||
return caseNodeData.isDeletedFlagSet(flag) ? "True" : "False";
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -33,7 +33,6 @@ import javax.annotation.concurrent.Immutable;
|
||||
import javax.annotation.concurrent.ThreadSafe;
|
||||
import org.sleuthkit.autopsy.corecomponentinterfaces.DataSourceProcessor;
|
||||
import org.sleuthkit.autopsy.coreutils.NetworkUtils;
|
||||
import org.sleuthkit.autopsy.experimental.autoingest.Manifest;
|
||||
import org.sleuthkit.autopsy.ingest.DataSourceIngestJob.Snapshot;
|
||||
import org.sleuthkit.autopsy.ingest.IngestJob;
|
||||
import org.sleuthkit.autopsy.ingest.IngestManager.IngestThreadActivitySnapshot;
|
||||
@ -194,6 +193,7 @@ final class AutoIngestJob implements Comparable<AutoIngestJob>, IngestProgressSn
|
||||
this.ingestThreadsSnapshot = Collections.emptyList();
|
||||
this.ingestJobsSnapshot = Collections.emptyList();
|
||||
this.moduleRunTimesSnapshot = Collections.emptyMap();
|
||||
|
||||
} catch (Exception ex) {
|
||||
throw new AutoIngestJobException(String.format("Error creating automated ingest job"), ex);
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* Autopsy Forensic Browser
|
||||
*
|
||||
* Copyright 2011-2018 Basis Technology Corp.
|
||||
* Copyright 2011-2019 Basis Technology Corp.
|
||||
* Contact: carrier <at> sleuthkit <dot> org
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -22,6 +22,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||
import java.beans.PropertyChangeEvent;
|
||||
import java.beans.PropertyChangeListener;
|
||||
import java.io.File;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import static java.nio.file.FileVisitOption.FOLLOW_LINKS;
|
||||
import java.nio.file.FileVisitResult;
|
||||
@ -32,7 +33,6 @@ import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.sql.SQLException;
|
||||
import java.time.Duration;
|
||||
import java.time.Instant;
|
||||
import java.util.ArrayList;
|
||||
@ -40,11 +40,9 @@ import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Observable;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
@ -70,7 +68,6 @@ import org.sleuthkit.autopsy.coordinationservice.CoordinationService.Lock;
|
||||
import org.sleuthkit.autopsy.core.RuntimeProperties;
|
||||
import org.sleuthkit.autopsy.core.ServicesMonitor;
|
||||
import org.sleuthkit.autopsy.core.ServicesMonitor.ServicesMonitorException;
|
||||
import org.sleuthkit.autopsy.core.UserPreferencesException;
|
||||
import org.sleuthkit.autopsy.corecomponentinterfaces.DataSourceProcessorCallback;
|
||||
import org.sleuthkit.autopsy.corecomponentinterfaces.DataSourceProcessorCallback.DataSourceProcessorResult;
|
||||
import static org.sleuthkit.autopsy.corecomponentinterfaces.DataSourceProcessorCallback.DataSourceProcessorResult.CRITICAL_ERRORS;
|
||||
@ -126,6 +123,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
private static final int NUM_INPUT_SCAN_SCHEDULING_THREADS = 1;
|
||||
private static final String INPUT_SCAN_SCHEDULER_THREAD_NAME = "AIM-input-scan-scheduler-%d";
|
||||
private static final String INPUT_SCAN_THREAD_NAME = "AIM-input-scan-%d";
|
||||
private static final int INPUT_SCAN_LOCKING_TIMEOUT_MINS = 5;
|
||||
private static final String AUTO_INGEST_THREAD_NAME = "AIM-job-processing-%d";
|
||||
private static final String LOCAL_HOST_NAME = NetworkUtils.getLocalHostName();
|
||||
private static final String EVENT_CHANNEL_NAME = "Auto-Ingest-Manager-Events";
|
||||
@ -145,6 +143,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
private static final String JOB_STATUS_PUBLISHING_THREAD_NAME = "AIM-job-status-event-publisher-%d";
|
||||
private static final long MAX_MISSED_JOB_STATUS_UPDATES = 10;
|
||||
private static final int DEFAULT_PRIORITY = 0;
|
||||
private static String CASE_MANIFESTS_LIST_FILE_NAME = "auto-ingest-job-manifests.txt";
|
||||
private static final Logger sysLogger = AutoIngestSystemLogger.getLogger();
|
||||
private static AutoIngestManager instance;
|
||||
private final AutopsyEventPublisher eventPublisher;
|
||||
@ -157,8 +156,6 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
private final ConcurrentHashMap<String, AutoIngestJob> hostNamesToRunningJobs;
|
||||
private final Object jobsLock;
|
||||
@GuardedBy("jobsLock")
|
||||
private final Map<String, Set<Path>> casesToManifests;
|
||||
@GuardedBy("jobsLock")
|
||||
private List<AutoIngestJob> pendingJobs;
|
||||
@GuardedBy("jobsLock")
|
||||
private AutoIngestJob currentJob;
|
||||
@ -174,6 +171,10 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
|
||||
private volatile AutoIngestNodeStateEvent lastPublishedStateEvent;
|
||||
|
||||
static String getCaseManifestsListFileName() {
|
||||
return CASE_MANIFESTS_LIST_FILE_NAME;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a singleton auto ingest manager responsible for processing auto
|
||||
* ingest jobs defined by manifest files that can be added to any level of a
|
||||
@ -205,7 +206,6 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
hostNamesToRunningJobs = new ConcurrentHashMap<>();
|
||||
hostNamesToLastMsgTime = new ConcurrentHashMap<>();
|
||||
jobsLock = new Object();
|
||||
casesToManifests = new HashMap<>();
|
||||
pendingJobs = new ArrayList<>();
|
||||
completedJobs = new ArrayList<>();
|
||||
try {
|
||||
@ -694,7 +694,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
int oldPriority = job.getPriority();
|
||||
job.setPriority(DEFAULT_PRIORITY);
|
||||
try {
|
||||
this.updateCoordinationServiceManifestNode(job);
|
||||
this.updateAutoIngestJobData(job);
|
||||
} catch (CoordinationServiceException | InterruptedException ex) {
|
||||
job.setPriority(oldPriority);
|
||||
throw new AutoIngestManagerException("Error updating case priority", ex);
|
||||
@ -744,7 +744,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
int oldPriority = job.getPriority();
|
||||
job.setPriority(maxPriority);
|
||||
try {
|
||||
this.updateCoordinationServiceManifestNode(job);
|
||||
this.updateAutoIngestJobData(job);
|
||||
} catch (CoordinationServiceException | InterruptedException ex) {
|
||||
job.setPriority(oldPriority);
|
||||
throw new AutoIngestManagerException("Error updating case priority", ex);
|
||||
@ -796,7 +796,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
int oldPriority = jobToDeprioritize.getPriority();
|
||||
jobToDeprioritize.setPriority(DEFAULT_PRIORITY);
|
||||
try {
|
||||
this.updateCoordinationServiceManifestNode(jobToDeprioritize);
|
||||
this.updateAutoIngestJobData(jobToDeprioritize);
|
||||
} catch (CoordinationServiceException | InterruptedException ex) {
|
||||
jobToDeprioritize.setPriority(oldPriority);
|
||||
throw new AutoIngestManagerException("Error updating job priority", ex);
|
||||
@ -854,7 +854,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
int oldPriority = jobToPrioritize.getPriority();
|
||||
jobToPrioritize.setPriority(maxPriority);
|
||||
try {
|
||||
this.updateCoordinationServiceManifestNode(jobToPrioritize);
|
||||
this.updateAutoIngestJobData(jobToPrioritize);
|
||||
} catch (CoordinationServiceException | InterruptedException ex) {
|
||||
jobToPrioritize.setPriority(oldPriority);
|
||||
throw new AutoIngestManagerException("Error updating job priority", ex);
|
||||
@ -909,7 +909,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
completedJob.setCompletedDate(new Date(0));
|
||||
completedJob.setProcessingStatus(PENDING);
|
||||
completedJob.setProcessingStage(AutoIngestJob.Stage.PENDING, Date.from(Instant.now()));
|
||||
updateCoordinationServiceManifestNode(completedJob);
|
||||
updateAutoIngestJobData(completedJob);
|
||||
pendingJobs.add(completedJob);
|
||||
} catch (CoordinationServiceException ex) {
|
||||
sysLogger.log(Level.SEVERE, String.format("Coordination service error while reprocessing %s", manifestPath), ex);
|
||||
@ -996,15 +996,12 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the coordination service manifest node.
|
||||
*
|
||||
* Note that a new auto ingest job node data object will be created from the
|
||||
* job passed in. Thus, if the data version of the node has changed, the
|
||||
* node will be "upgraded" as well as updated.
|
||||
* Writes the node data for an auto ingest job to the job's manifest file
|
||||
* lock coordination service node.
|
||||
*
|
||||
* @param job The auto ingest job.
|
||||
*/
|
||||
void updateCoordinationServiceManifestNode(AutoIngestJob job) throws CoordinationServiceException, InterruptedException {
|
||||
void updateAutoIngestJobData(AutoIngestJob job) throws CoordinationServiceException, InterruptedException {
|
||||
AutoIngestJobNodeData nodeData = new AutoIngestJobNodeData(job);
|
||||
String manifestNodePath = job.getManifest().getFilePath().toString();
|
||||
byte[] rawData = nodeData.toArray();
|
||||
@ -1016,14 +1013,21 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
*
|
||||
* @param caseDirectoryPath The case directory path.
|
||||
*
|
||||
* @throws CoordinationService.CoordinationServiceException
|
||||
* @throws InterruptedException
|
||||
* @throws IOException
|
||||
* @throws CoordinationServiceException If there was an error getting the
|
||||
* node data from the cooordination
|
||||
* service.
|
||||
* @throws IOException If the node data was missing or
|
||||
* there was an error interpreting it.
|
||||
* @throws InterruptedException If the thread running the input
|
||||
* directory scan task is interrupted
|
||||
* while blocked, i.e., if auto ingest
|
||||
* is shutting down.
|
||||
*/
|
||||
private void setCaseNodeDataErrorsOccurred(Path caseDirectoryPath) throws CoordinationServiceException, InterruptedException, IOException {
|
||||
CaseNodeData caseNodeData = new CaseNodeData(coordinationService.getNodeData(CoordinationService.CategoryNode.CASES, caseDirectoryPath.toString()));
|
||||
private void setCaseNodeDataErrorsOccurred(Path caseDirectoryPath) throws IOException, CoordinationServiceException, InterruptedException {
|
||||
byte[] rawData = coordinationService.getNodeData(CoordinationService.CategoryNode.CASES, caseDirectoryPath.toString());
|
||||
CaseNodeData caseNodeData = new CaseNodeData(rawData);
|
||||
caseNodeData.setErrorsOccurred(true);
|
||||
byte[] rawData = caseNodeData.toArray();
|
||||
rawData = caseNodeData.toArray();
|
||||
coordinationService.setNodeData(CoordinationService.CategoryNode.CASES, caseDirectoryPath.toString(), rawData);
|
||||
}
|
||||
|
||||
@ -1088,6 +1092,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
|
||||
private final List<AutoIngestJob> newPendingJobsList = new ArrayList<>();
|
||||
private final List<AutoIngestJob> newCompletedJobsList = new ArrayList<>();
|
||||
private Lock currentDirLock;
|
||||
|
||||
/**
|
||||
* Searches the input directories for manifest files. The search results
|
||||
@ -1109,9 +1114,9 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
|
||||
} catch (Exception ex) {
|
||||
/*
|
||||
* NOTE: Need to catch all exceptions here. Otherwise
|
||||
* uncaught exceptions will propagate up to the calling
|
||||
* thread and may stop it from running.
|
||||
* NOTE: Need to catch all unhandled exceptions here.
|
||||
* Otherwise uncaught exceptions will propagate up to the
|
||||
* calling thread and may stop it from running.
|
||||
*/
|
||||
sysLogger.log(Level.SEVERE, String.format("Error scanning the input directory %s", rootInputDirectory), ex);
|
||||
}
|
||||
@ -1145,20 +1150,15 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
}
|
||||
|
||||
/**
|
||||
* Invoked for a file in a directory. If the file is a manifest file,
|
||||
* creates a pending pending or completed auto ingest job for the
|
||||
* manifest, based on the data stored in the coordination service node
|
||||
* for the manifest.
|
||||
* <p>
|
||||
* Note that the mapping of case names to manifest paths that is used
|
||||
* for case deletion is updated as well.
|
||||
* Creates a pending or completed auto ingest job if the file visited is
|
||||
* a manifest file, based on the data stored in the coordination service
|
||||
* node for the manifest.
|
||||
*
|
||||
* @param filePath The path of the file.
|
||||
* @param attrs The file system attributes of the file.
|
||||
*
|
||||
* @return TERMINATE if auto ingest is shutting down, CONTINUE if it has
|
||||
* not.
|
||||
*
|
||||
*/
|
||||
@Override
|
||||
public FileVisitResult visitFile(Path filePath, BasicFileAttributes attrs) {
|
||||
@ -1167,6 +1167,11 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
}
|
||||
|
||||
try {
|
||||
/*
|
||||
* Determine whether or not the file is an auto ingest job
|
||||
* manifest file. If it is, then parse it. Otherwise, move on to
|
||||
* the next file in the directory.
|
||||
*/
|
||||
Manifest manifest = null;
|
||||
for (ManifestFileParser parser : Lookup.getDefault().lookupAll(ManifestFileParser.class)) {
|
||||
if (parser.fileIsManifest(filePath)) {
|
||||
@ -1186,76 +1191,83 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
return TERMINATE;
|
||||
}
|
||||
|
||||
if (null != manifest) {
|
||||
/*
|
||||
* Update the mapping of case names to manifest paths that
|
||||
* is used for case deletion.
|
||||
*/
|
||||
String caseName = manifest.getCaseName();
|
||||
Path manifestPath = manifest.getFilePath();
|
||||
if (casesToManifests.containsKey(caseName)) {
|
||||
Set<Path> manifestPaths = casesToManifests.get(caseName);
|
||||
manifestPaths.add(manifestPath);
|
||||
} else {
|
||||
Set<Path> manifestPaths = new HashSet<>();
|
||||
manifestPaths.add(manifestPath);
|
||||
casesToManifests.put(caseName, manifestPaths);
|
||||
}
|
||||
if (manifest == null) {
|
||||
return CONTINUE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a job to the pending jobs queue, the completed jobs
|
||||
* list, or do crashed job recovery, as required.
|
||||
*/
|
||||
try {
|
||||
byte[] rawData = coordinationService.getNodeData(CoordinationService.CategoryNode.MANIFESTS, manifestPath.toString());
|
||||
/*
|
||||
* If a manifest file has been found, get a manifest file lock,
|
||||
* analyze the job state, and put a job into the appropriate job
|
||||
* list. There is a short wait here in case the input directory
|
||||
* scanner file visitor of another auto ingest node (AIN) has
|
||||
* the lock. If the lock ultmiately can't be obtained, the wait
|
||||
* was not long enough, or another auto ingest node (AIN) is
|
||||
* holding the lock because it is executing the job, or a case
|
||||
* deletion task has aquired the lock. In all of these cases the
|
||||
* manifest can be skipped for this scan.
|
||||
*/
|
||||
try (Lock manifestLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.MANIFESTS, manifest.getFilePath().toString(), INPUT_SCAN_LOCKING_TIMEOUT_MINS, TimeUnit.MINUTES)) {
|
||||
if (null != manifestLock) {
|
||||
byte[] rawData = coordinationService.getNodeData(CoordinationService.CategoryNode.MANIFESTS, manifest.getFilePath().toString());
|
||||
if (null != rawData && rawData.length > 0) {
|
||||
try {
|
||||
AutoIngestJobNodeData nodeData = new AutoIngestJobNodeData(rawData);
|
||||
AutoIngestJob.ProcessingStatus processingStatus = nodeData.getProcessingStatus();
|
||||
switch (processingStatus) {
|
||||
case PENDING:
|
||||
addPendingJob(manifest, nodeData);
|
||||
break;
|
||||
case PROCESSING:
|
||||
doRecoveryIfCrashed(manifest, nodeData);
|
||||
break;
|
||||
case COMPLETED:
|
||||
addCompletedJob(manifest, nodeData);
|
||||
break;
|
||||
case DELETED: // No longer used, retained for legacy jobs only.
|
||||
/*
|
||||
* Ignore jobs marked as "deleted."
|
||||
*/
|
||||
break;
|
||||
default:
|
||||
sysLogger.log(Level.SEVERE, "Unknown ManifestNodeData.ProcessingStatus");
|
||||
break;
|
||||
}
|
||||
} catch (AutoIngestJobNodeData.InvalidDataException | AutoIngestJobException ex) {
|
||||
sysLogger.log(Level.SEVERE, String.format("Invalid auto ingest job node data for %s", manifestPath), ex);
|
||||
AutoIngestJobNodeData nodeData = new AutoIngestJobNodeData(rawData);
|
||||
AutoIngestJob.ProcessingStatus processingStatus = nodeData.getProcessingStatus();
|
||||
switch (processingStatus) {
|
||||
case PENDING:
|
||||
addPendingJob(manifest, nodeData);
|
||||
break;
|
||||
case PROCESSING:
|
||||
/*
|
||||
* If an exclusive manifest file lock was
|
||||
* obtained for an auto ingest job in the
|
||||
* processing state, the auto ingest node
|
||||
* (AIN) executing the job crashed and the
|
||||
* lock was released when the coordination
|
||||
* service detected that the AIN was no
|
||||
* longer alive.
|
||||
*/
|
||||
doCrashRecovery(manifest, nodeData);
|
||||
break;
|
||||
case COMPLETED:
|
||||
addCompletedJob(manifest, nodeData);
|
||||
break;
|
||||
case DELETED:
|
||||
/*
|
||||
* Ignore jobs marked as deleted. Note that
|
||||
* this state is no longer used and is
|
||||
* retained for legacy jobs only.
|
||||
*/
|
||||
break;
|
||||
default:
|
||||
sysLogger.log(Level.SEVERE, "Unknown ManifestNodeData.ProcessingStatus");
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
addNewPendingJob(manifest);
|
||||
} catch (AutoIngestJobException ex) {
|
||||
sysLogger.log(Level.SEVERE, String.format("Invalid manifest data for %s", manifestPath), ex);
|
||||
sysLogger.log(Level.SEVERE, String.format("Invalid manifest data for %s", manifest.getFilePath()), ex);
|
||||
}
|
||||
}
|
||||
} catch (CoordinationServiceException ex) {
|
||||
sysLogger.log(Level.SEVERE, String.format("Error transmitting node data for %s", manifestPath), ex);
|
||||
return CONTINUE;
|
||||
} catch (InterruptedException ex) {
|
||||
Thread.currentThread().interrupt();
|
||||
return TERMINATE;
|
||||
}
|
||||
} catch (CoordinationServiceException | AutoIngestJobException | AutoIngestJobNodeData.InvalidDataException ex) {
|
||||
sysLogger.log(Level.SEVERE, String.format("Error handling manifest at %s", manifest.getFilePath()), ex);
|
||||
} catch (InterruptedException ex) {
|
||||
/*
|
||||
* The thread running the input directory scan task was
|
||||
* interrupted while blocked, i.e., auto ingest is shutting
|
||||
* down.
|
||||
*/
|
||||
return TERMINATE;
|
||||
}
|
||||
|
||||
} catch (Exception ex) {
|
||||
// Catch all unhandled and unexpected exceptions. Otherwise one bad file
|
||||
// can stop the entire input folder scanning. Given that the exception is unexpected,
|
||||
// I'm hesitant to add logging which requires accessing or de-referencing data.
|
||||
sysLogger.log(Level.SEVERE, "Unexpected exception in file visitor", ex);
|
||||
return CONTINUE;
|
||||
/*
|
||||
* This is an exception firewall so that an unexpected runtime
|
||||
* exception from the handling of a single manifest file does
|
||||
* not take out the input directory scanner.
|
||||
*/
|
||||
sysLogger.log(Level.SEVERE, String.format("Unexpected exception handling %s", filePath), ex);
|
||||
}
|
||||
|
||||
if (!Thread.currentThread().isInterrupted()) {
|
||||
@ -1266,49 +1278,36 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an existing job to the pending jobs queue.
|
||||
* Adds an auto ingest job to the pending jobs queue.
|
||||
*
|
||||
* @param manifest The manifest for the job.
|
||||
* @param nodeData The data stored in the coordination service node for
|
||||
* the job.
|
||||
* @param nodeData The data stored in the manifest file lock
|
||||
* coordination service node for the job.
|
||||
*
|
||||
* @throws InterruptedException if the thread running the input
|
||||
* directory scan task is interrupted while
|
||||
* blocked, i.e., if auto ingest is
|
||||
* shutting down.
|
||||
* @throws AutoIngestJobException If there was an error working
|
||||
* with the node data.
|
||||
* @throws CoordinationServiceException If a lock node data version
|
||||
* update was required and there
|
||||
* was an error writing the node
|
||||
* data by the coordination
|
||||
* service.
|
||||
* @throws InterruptedException If the thread running the input
|
||||
* directory scan task is
|
||||
* interrupted while blocked, i.e.,
|
||||
* if auto ingest is shutting down.
|
||||
*/
|
||||
private void addPendingJob(Manifest manifest, AutoIngestJobNodeData nodeData) throws InterruptedException, AutoIngestJobException {
|
||||
private void addPendingJob(Manifest manifest, AutoIngestJobNodeData nodeData) throws AutoIngestJobException, CoordinationServiceException, InterruptedException {
|
||||
AutoIngestJob job;
|
||||
if (nodeData.getVersion() == AutoIngestJobNodeData.getCurrentVersion()) {
|
||||
job = new AutoIngestJob(nodeData);
|
||||
} else {
|
||||
job = new AutoIngestJob(manifest);
|
||||
job.setPriority(nodeData.getPriority()); // Retain priority, present in all versions of the node data.
|
||||
job.setPriority(nodeData.getPriority());
|
||||
Path caseDirectory = PathUtils.findCaseDirectory(rootOutputDirectory, manifest.getCaseName());
|
||||
if (null != caseDirectory) {
|
||||
job.setCaseDirectoryPath(caseDirectory);
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to upgrade/update the coordination service manifest node
|
||||
* data for the job.
|
||||
*
|
||||
* An exclusive lock is obtained before doing so because another
|
||||
* host may have already found the job, obtained an exclusive
|
||||
* lock, and started processing it. However, this locking does
|
||||
* make it possible that two processing hosts will both try to
|
||||
* obtain the lock to do the upgrade operation at the same time.
|
||||
* If this happens, the host that is holding the lock will
|
||||
* complete the upgrade operation, so there is nothing more for
|
||||
* this host to do.
|
||||
*/
|
||||
try (Lock manifestLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.MANIFESTS, manifest.getFilePath().toString())) {
|
||||
if (null != manifestLock) {
|
||||
updateCoordinationServiceManifestNode(job);
|
||||
}
|
||||
} catch (CoordinationServiceException ex) {
|
||||
sysLogger.log(Level.SEVERE, String.format("Error attempting to set node data for %s", manifest.getFilePath()), ex);
|
||||
}
|
||||
updateAutoIngestJobData(job);
|
||||
}
|
||||
newPendingJobsList.add(job);
|
||||
}
|
||||
@ -1318,150 +1317,117 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
*
|
||||
* @param manifest The manifest for the job.
|
||||
*
|
||||
* @throws InterruptedException if the thread running the input
|
||||
* directory scan task is interrupted while
|
||||
* blocked, i.e., if auto ingest is
|
||||
* shutting down.
|
||||
* @throws AutoIngestJobException If there was an error creating
|
||||
* the node data.
|
||||
* @throws CoordinationServiceException If there was an error writing
|
||||
* the node data by the
|
||||
* coordination service.
|
||||
* @throws InterruptedException If the thread running the input
|
||||
* directory scan task is
|
||||
* interrupted while blocked, i.e.,
|
||||
* if auto ingest is shutting down.
|
||||
*/
|
||||
private void addNewPendingJob(Manifest manifest) throws InterruptedException, AutoIngestJobException {
|
||||
/*
|
||||
* Create the coordination service manifest node data for the job.
|
||||
* Note that getting the lock will create the node for the job (with
|
||||
* no data) if it does not already exist.
|
||||
*
|
||||
* An exclusive lock is obtained before creating the node data
|
||||
* because another host may have already found the job, obtained an
|
||||
* exclusive lock, and started processing it. However, this locking
|
||||
* does make it possible that two hosts will both try to obtain the
|
||||
* lock to do the create operation at the same time. If this
|
||||
* happens, the host that is locked out will not add the job to its
|
||||
* pending queue for this scan of the input directory, but it will
|
||||
* be picked up on the next scan.
|
||||
*/
|
||||
try (Lock manifestLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.MANIFESTS, manifest.getFilePath().toString())) {
|
||||
if (null != manifestLock) {
|
||||
AutoIngestJob job = new AutoIngestJob(manifest);
|
||||
updateCoordinationServiceManifestNode(job);
|
||||
newPendingJobsList.add(job);
|
||||
}
|
||||
} catch (CoordinationServiceException ex) {
|
||||
sysLogger.log(Level.SEVERE, String.format("Error attempting to set node data for %s", manifest.getFilePath()), ex);
|
||||
}
|
||||
private void addNewPendingJob(Manifest manifest) throws AutoIngestJobException, CoordinationServiceException, InterruptedException {
|
||||
AutoIngestJob job = new AutoIngestJob(manifest);
|
||||
updateAutoIngestJobData(job);
|
||||
newPendingJobsList.add(job);
|
||||
}
|
||||
|
||||
/**
|
||||
* Does crash recovery for a manifest, if required. The criterion for
|
||||
* crash recovery is a manifest with coordination service node data
|
||||
* indicating it is being processed for which an exclusive lock on the
|
||||
* node can be acquired. If this condition is true, it is probable that
|
||||
* the node that was processing the job crashed and the processing
|
||||
* status was not updated.
|
||||
* Does recovery for an auto ingest job that was left in the processing
|
||||
* state by an auot ingest node (AIN) that crashed.
|
||||
*
|
||||
* @param manifest The manifest for upgrading the node.
|
||||
* @param jobNodeData The auto ingest job node data.
|
||||
* @param manifest The manifest for the job.
|
||||
* @param nodeData The data stored in the manifest file lock
|
||||
* coordination service node for the job.
|
||||
*
|
||||
* @throws InterruptedException if the thread running the input
|
||||
* directory scan task is interrupted
|
||||
* while blocked, i.e., if auto ingest is
|
||||
* shutting down.
|
||||
* @throws AutoIngestJobException if there is an issue creating a new
|
||||
* AutoIngestJob object.
|
||||
* @throws AutoIngestJobException If there was an error working
|
||||
* with the node data.
|
||||
* @throws CoordinationServiceException If there was an error writing
|
||||
* updated node data by the
|
||||
* coordination service.
|
||||
* @throws InterruptedException If the thread running the input
|
||||
* directory scan task is
|
||||
* interrupted while blocked, i.e.,
|
||||
* if auto ingest is shutting down.
|
||||
*/
|
||||
private void doRecoveryIfCrashed(Manifest manifest, AutoIngestJobNodeData jobNodeData) throws InterruptedException, AutoIngestJobException {
|
||||
/*
|
||||
* Try to get an exclusive lock on the coordination service node for
|
||||
* the job. If the lock cannot be obtained, another host in the auto
|
||||
* ingest cluster is already doing the recovery, so there is nothing
|
||||
* to do.
|
||||
*/
|
||||
private void doCrashRecovery(Manifest manifest, AutoIngestJobNodeData jobNodeData) throws AutoIngestJobException, CoordinationServiceException, InterruptedException {
|
||||
String manifestPath = manifest.getFilePath().toString();
|
||||
try (Lock manifestLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.MANIFESTS, manifestPath)) {
|
||||
if (null != manifestLock) {
|
||||
sysLogger.log(Level.SEVERE, "Attempting crash recovery for {0}", manifestPath);
|
||||
Path caseDirectoryPath = PathUtils.findCaseDirectory(rootOutputDirectory, manifest.getCaseName());
|
||||
sysLogger.log(Level.SEVERE, "Attempting crash recovery for {0}", manifestPath);
|
||||
AutoIngestJob job = new AutoIngestJob(jobNodeData);
|
||||
|
||||
/*
|
||||
* Create the recovery job.
|
||||
*/
|
||||
AutoIngestJob job = new AutoIngestJob(jobNodeData);
|
||||
int numberOfCrashes = job.getNumberOfCrashes();
|
||||
if (numberOfCrashes <= AutoIngestUserPreferences.getMaxNumTimesToProcessImage()) {
|
||||
++numberOfCrashes;
|
||||
job.setNumberOfCrashes(numberOfCrashes);
|
||||
if (numberOfCrashes <= AutoIngestUserPreferences.getMaxNumTimesToProcessImage()) {
|
||||
job.setCompletedDate(new Date(0));
|
||||
} else {
|
||||
job.setCompletedDate(Date.from(Instant.now()));
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Try to set the error flags that indicate incomplete or messy data
|
||||
* in displays for the job and the case. Note that if the job
|
||||
* crashed before a case directory was created, the job was a no-op,
|
||||
* so the data quality flags do not need to be set.
|
||||
*/
|
||||
Path caseDirectoryPath = PathUtils.findCaseDirectory(rootOutputDirectory, manifest.getCaseName());
|
||||
if (null != caseDirectoryPath) {
|
||||
job.setCaseDirectoryPath(caseDirectoryPath);
|
||||
job.setErrorsOccurred(true);
|
||||
try {
|
||||
setCaseNodeDataErrorsOccurred(caseDirectoryPath);
|
||||
} catch (IOException ex) {
|
||||
sysLogger.log(Level.WARNING, String.format("Error attempting to set error flag in case node data for %s", caseDirectoryPath), ex);
|
||||
}
|
||||
} else {
|
||||
job.setErrorsOccurred(false);
|
||||
}
|
||||
|
||||
if (null != caseDirectoryPath) {
|
||||
job.setCaseDirectoryPath(caseDirectoryPath);
|
||||
job.setErrorsOccurred(true);
|
||||
try {
|
||||
setCaseNodeDataErrorsOccurred(caseDirectoryPath);
|
||||
} catch (IOException ex) {
|
||||
sysLogger.log(Level.SEVERE, String.format("Error attempting to set error flag in case node data for %s", caseDirectoryPath), ex);
|
||||
}
|
||||
} else {
|
||||
job.setErrorsOccurred(false);
|
||||
}
|
||||
|
||||
if (numberOfCrashes <= AutoIngestUserPreferences.getMaxNumTimesToProcessImage()) {
|
||||
job.setProcessingStatus(AutoIngestJob.ProcessingStatus.PENDING);
|
||||
if (null != caseDirectoryPath) {
|
||||
try {
|
||||
new AutoIngestJobLogger(manifest.getFilePath(), manifest.getDataSourceFileName(), caseDirectoryPath).logCrashRecoveryWithRetry();
|
||||
} catch (AutoIngestJobLoggerException ex) {
|
||||
sysLogger.log(Level.SEVERE, String.format("Error creating case auto ingest log entry for crashed job for %s", manifestPath), ex);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
job.setProcessingStatus(AutoIngestJob.ProcessingStatus.COMPLETED);
|
||||
if (null != caseDirectoryPath) {
|
||||
try {
|
||||
new AutoIngestJobLogger(manifest.getFilePath(), manifest.getDataSourceFileName(), caseDirectoryPath).logCrashRecoveryNoRetry();
|
||||
} catch (AutoIngestJobLoggerException ex) {
|
||||
sysLogger.log(Level.SEVERE, String.format("Error creating case auto ingest log entry for crashed job for %s", manifestPath), ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the coordination service node for the job. If this
|
||||
* fails, leave the recovery to another host.
|
||||
*/
|
||||
/*
|
||||
* Update the crash count for the job, determine whether or not to
|
||||
* retry processing its data source, and deal with the job
|
||||
* accordingly.
|
||||
*/
|
||||
int numberOfCrashes = job.getNumberOfCrashes();
|
||||
++numberOfCrashes;
|
||||
job.setNumberOfCrashes(numberOfCrashes);
|
||||
if (numberOfCrashes < AutoIngestUserPreferences.getMaxNumTimesToProcessImage()) {
|
||||
job.setProcessingStatus(AutoIngestJob.ProcessingStatus.PENDING);
|
||||
job.setCompletedDate(new Date(0));
|
||||
if (null != caseDirectoryPath) {
|
||||
try {
|
||||
updateCoordinationServiceManifestNode(job);
|
||||
} catch (CoordinationServiceException ex) {
|
||||
sysLogger.log(Level.SEVERE, String.format("Error attempting to set node data for %s", manifestPath), ex);
|
||||
return;
|
||||
}
|
||||
|
||||
jobNodeData = new AutoIngestJobNodeData(job);
|
||||
|
||||
if (numberOfCrashes <= AutoIngestUserPreferences.getMaxNumTimesToProcessImage()) {
|
||||
newPendingJobsList.add(job);
|
||||
} else {
|
||||
newCompletedJobsList.add(new AutoIngestJob(jobNodeData));
|
||||
new AutoIngestJobLogger(manifest.getFilePath(), manifest.getDataSourceFileName(), caseDirectoryPath).logCrashRecoveryWithRetry();
|
||||
} catch (AutoIngestJobLoggerException ex) {
|
||||
sysLogger.log(Level.SEVERE, String.format("Error writing case auto ingest log entry for crashed job for %s", manifestPath), ex);
|
||||
}
|
||||
}
|
||||
} catch (CoordinationServiceException ex) {
|
||||
sysLogger.log(Level.SEVERE, String.format("Error attempting to get exclusive lock for %s", manifestPath), ex);
|
||||
updateAutoIngestJobData(job);
|
||||
newPendingJobsList.add(job);
|
||||
} else {
|
||||
job.setProcessingStatus(AutoIngestJob.ProcessingStatus.COMPLETED);
|
||||
job.setCompletedDate(Date.from(Instant.now()));
|
||||
if (null != caseDirectoryPath) {
|
||||
try {
|
||||
new AutoIngestJobLogger(manifest.getFilePath(), manifest.getDataSourceFileName(), caseDirectoryPath).logCrashRecoveryNoRetry();
|
||||
} catch (AutoIngestJobLoggerException ex) {
|
||||
sysLogger.log(Level.SEVERE, String.format("Error writing case auto ingest log entry for crashed job for %s", manifestPath), ex);
|
||||
}
|
||||
}
|
||||
updateAutoIngestJobData(job);
|
||||
newCompletedJobsList.add(new AutoIngestJob(jobNodeData));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a job to process a manifest to the completed jobs list.
|
||||
*
|
||||
* @param nodeData The data stored in the coordination service node for
|
||||
* the manifest.
|
||||
* @param manifest The manifest for upgrading the node.
|
||||
* @param manifest The manifest for the job.
|
||||
* @param nodeData The data stored in the manifest file lock
|
||||
* coordination service node for the job.
|
||||
*
|
||||
* @throws CoordinationServiceException
|
||||
* @throws InterruptedException
|
||||
* @throws AutoIngestJobException If there was an error working
|
||||
* with the node data.
|
||||
* @throws CoordinationServiceException If there was an error writing
|
||||
* updated node data by the
|
||||
* coordination service.
|
||||
* @throws InterruptedException If the thread running the input
|
||||
* directory scan task is
|
||||
* interrupted while blocked, i.e.,
|
||||
* if auto ingest is shutting down.
|
||||
*/
|
||||
private void addCompletedJob(Manifest manifest, AutoIngestJobNodeData nodeData) throws CoordinationServiceException, InterruptedException, AutoIngestJobException {
|
||||
private void addCompletedJob(Manifest manifest, AutoIngestJobNodeData nodeData) throws AutoIngestJobException, CoordinationServiceException, InterruptedException {
|
||||
Path caseDirectoryPath = nodeData.getCaseDirectoryPath();
|
||||
if (!caseDirectoryPath.toFile().exists()) {
|
||||
sysLogger.log(Level.WARNING, String.format("Job completed for %s, but cannot find case directory %s, ignoring job", nodeData.getManifestFilePath(), caseDirectoryPath.toString()));
|
||||
@ -1493,21 +1459,9 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
job.setProcessingStage(AutoIngestJob.Stage.COMPLETED, nodeData.getCompletedDate());
|
||||
job.setProcessingStatus(AutoIngestJob.ProcessingStatus.COMPLETED);
|
||||
|
||||
/*
|
||||
* Try to upgrade/update the coordination service manifest node
|
||||
* data for the job. It is possible that two hosts will both try
|
||||
* to obtain the lock to do the upgrade operation at the same
|
||||
* time. If this happens, the host that is holding the lock will
|
||||
* complete the upgrade operation.
|
||||
*/
|
||||
try (Lock manifestLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.MANIFESTS, manifest.getFilePath().toString())) {
|
||||
if (null != manifestLock) {
|
||||
updateCoordinationServiceManifestNode(job);
|
||||
}
|
||||
} catch (CoordinationServiceException ex) {
|
||||
sysLogger.log(Level.SEVERE, String.format("Error attempting to set node data for %s", manifest.getFilePath()), ex);
|
||||
}
|
||||
updateAutoIngestJobData(job);
|
||||
}
|
||||
|
||||
newCompletedJobsList.add(job);
|
||||
}
|
||||
|
||||
@ -1536,17 +1490,17 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
}
|
||||
|
||||
/**
|
||||
* Invoked for an input directory after entries in the directory are
|
||||
* Invoked for an input directory after the files in the directory are
|
||||
* visited. Checks if the task thread has been interrupted because auto
|
||||
* ingest is shutting down and terminates the scan if that is the case.
|
||||
*
|
||||
* @param dirPath The directory about to be visited.
|
||||
* @param unused Unused.
|
||||
*
|
||||
* @return TERMINATE if the task thread has been interrupted, CONTINUE
|
||||
* if it has not.
|
||||
* @return FileVisitResult.TERMINATE if the task thread has been
|
||||
* interrupted, FileVisitResult.CONTINUE if it has not.
|
||||
*
|
||||
* @throws IOException if an I/O error occurs, but this implementation
|
||||
* @throws IOException If an I/O error occurs, but this implementation
|
||||
* does not throw.
|
||||
*/
|
||||
@Override
|
||||
@ -2005,11 +1959,12 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
try {
|
||||
AutoIngestJobNodeData nodeData = new AutoIngestJobNodeData(coordinationService.getNodeData(CoordinationService.CategoryNode.MANIFESTS, manifestPath.toString()));
|
||||
if (!nodeData.getProcessingStatus().equals(PENDING)) {
|
||||
/*
|
||||
* Due to a timing issue or a missed event, a
|
||||
* non-pending job has ended up on the pending
|
||||
* queue. Skip the job and remove it from the queue.
|
||||
*/
|
||||
iterator.remove();
|
||||
continue;
|
||||
}
|
||||
|
||||
File manifestFile = nodeData.getManifestFilePath().toFile();
|
||||
if (!manifestFile.exists()) {
|
||||
iterator.remove();
|
||||
continue;
|
||||
}
|
||||
@ -2027,11 +1982,13 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
iterator.remove();
|
||||
currentJob = job;
|
||||
break;
|
||||
|
||||
} catch (AutoIngestJobNodeData.InvalidDataException ex) {
|
||||
sysLogger.log(Level.WARNING, String.format("Unable to use node data for %s", manifestPath), ex); // JCTODO: Is this right?
|
||||
sysLogger.log(Level.WARNING, String.format("Unable to use node data for %s", manifestPath), ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2102,7 +2059,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
currentJob.setProcessingStatus(AutoIngestJob.ProcessingStatus.PROCESSING);
|
||||
currentJob.setProcessingStage(AutoIngestJob.Stage.STARTING, Date.from(Instant.now()));
|
||||
currentJob.setProcessingHostName(AutoIngestManager.LOCAL_HOST_NAME);
|
||||
updateCoordinationServiceManifestNode(currentJob);
|
||||
updateAutoIngestJobData(currentJob);
|
||||
setChanged();
|
||||
notifyObservers(Event.JOB_STARTED);
|
||||
eventPublisher.publishRemotely(new AutoIngestJobStartedEvent(currentJob));
|
||||
@ -2126,7 +2083,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
currentJob.setProcessingStatus(AutoIngestJob.ProcessingStatus.PENDING);
|
||||
}
|
||||
currentJob.setProcessingHostName("");
|
||||
updateCoordinationServiceManifestNode(currentJob);
|
||||
updateAutoIngestJobData(currentJob);
|
||||
|
||||
boolean retry = (!currentJob.isCanceled() && !currentJob.isCompleted());
|
||||
sysLogger.log(Level.INFO, "Completed processing of {0}, retry = {1}", new Object[]{manifestPath, retry});
|
||||
@ -2322,13 +2279,16 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
Thread.sleep(AutoIngestUserPreferences.getSecondsToSleepBetweenCases() * 1000);
|
||||
}
|
||||
currentJob.setCaseDirectoryPath(caseDirectoryPath);
|
||||
updateCoordinationServiceManifestNode(currentJob); // update case directory path
|
||||
updateAutoIngestJobData(currentJob);
|
||||
recordManifest(caseDirectoryPath, manifest.getFilePath());
|
||||
Case caseForJob = Case.getCurrentCase();
|
||||
sysLogger.log(Level.INFO, "Opened case {0} for {1}", new Object[]{caseForJob.getName(), manifest.getFilePath()});
|
||||
return caseForJob;
|
||||
|
||||
} catch (KeywordSearchModuleException ex) {
|
||||
throw new CaseManagementException(String.format("Error creating solr settings file for case %s for %s", caseName, manifest.getFilePath()), ex);
|
||||
} catch (IOException ex) {
|
||||
throw new CaseManagementException(String.format("Error recording manifest file path for case %s for %s", caseName, manifest.getFilePath()), ex);
|
||||
} catch (CaseActionException ex) {
|
||||
throw new CaseManagementException(String.format("Error creating or opening case %s for %s", caseName, manifest.getFilePath()), ex);
|
||||
}
|
||||
@ -2338,6 +2298,22 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes the path of the manifest file for the current job to a list of
|
||||
* manifest file paths for the case in file in the case directory.
|
||||
*
|
||||
* @param caseDirectoryPath The case directory path.
|
||||
*
|
||||
* @throws IOException If the file cannot be created or opened and
|
||||
* updated.
|
||||
*/
|
||||
private void recordManifest(Path caseDirectoryPath, Path manifestFilePath) throws IOException {
|
||||
final Path manifestsListFilePath = Paths.get(caseDirectoryPath.toString(), AutoIngestManager.getCaseManifestsListFileName());
|
||||
try (FileWriter fileWriter = new FileWriter(manifestsListFilePath.toString(), true)) {
|
||||
fileWriter.write(manifestFilePath.toString() + "\n");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs the ingest process for the current job.
|
||||
*
|
||||
@ -2978,7 +2954,7 @@ final class AutoIngestManager extends Observable implements PropertyChangeListen
|
||||
currentJob.setModuleRuntimesSnapshot(IngestManager.getInstance().getModuleRunTimes());
|
||||
setChanged();
|
||||
notifyObservers(Event.JOB_STATUS_UPDATED);
|
||||
updateCoordinationServiceManifestNode(currentJob);
|
||||
updateAutoIngestJobData(currentJob);
|
||||
eventPublisher.publishRemotely(new AutoIngestJobStatusEvent(currentJob));
|
||||
}
|
||||
}
|
||||
|
@ -59,6 +59,11 @@ final class CasesDashboardCustomizer implements MultiUserCaseBrowserCustomizer {
|
||||
properties.add(Column.CREATE_DATE);
|
||||
properties.add(Column.LAST_ACCESS_DATE);
|
||||
properties.add(Column.DIRECTORY);
|
||||
properties.add(Column.MANIFEST_FILE_ZNODES_DELETE_STATUS);
|
||||
properties.add(Column.DATA_SOURCES_DELETE_STATUS);
|
||||
properties.add(Column.TEXT_INDEX_DELETE_STATUS);
|
||||
properties.add(Column.CASE_DB_DELETE_STATUS);
|
||||
properties.add(Column.CASE_DIR_DELETE_STATUS);
|
||||
return properties;
|
||||
}
|
||||
|
||||
|
@ -62,8 +62,6 @@ public final class CasesDashboardTopComponent extends TopComponent implements Ex
|
||||
* for multi-user cases. The top component is docked into the "dashboard
|
||||
* mode" defined by the auto ingest jobs top component.
|
||||
*/
|
||||
// RJCTODO: Consider moving all of the dashboard code into its own
|
||||
// autoingest.dashboard package.
|
||||
public static void openTopComponent() {
|
||||
CasesDashboardTopComponent topComponent = (CasesDashboardTopComponent) WindowManager.getDefault().findTopComponent("CasesDashboardTopComponent"); // NON-NLS
|
||||
if (topComponent == null) {
|
||||
|
@ -21,6 +21,7 @@ package org.sleuthkit.autopsy.experimental.autoingest;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import org.openide.util.NbBundle;
|
||||
import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData;
|
||||
import org.sleuthkit.autopsy.experimental.autoingest.DeleteCaseTask.DeleteOptions;
|
||||
import org.sleuthkit.autopsy.progress.ProgressIndicator;
|
||||
|
||||
/**
|
||||
@ -53,7 +54,7 @@ final class DeleteCaseInputAction extends DeleteCaseAction {
|
||||
|
||||
@Override
|
||||
DeleteCaseTask getTask(CaseNodeData caseNodeData, ProgressIndicator progress) {
|
||||
return new DeleteCaseInputTask(caseNodeData, progress);
|
||||
return new DeleteCaseTask(caseNodeData, DeleteOptions.DELETE_INPUT, progress);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -20,6 +20,7 @@ package org.sleuthkit.autopsy.experimental.autoingest;
|
||||
|
||||
import org.openide.util.NbBundle.Messages;
|
||||
import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData;
|
||||
import org.sleuthkit.autopsy.experimental.autoingest.DeleteCaseTask.DeleteOptions;
|
||||
import org.sleuthkit.autopsy.progress.ProgressIndicator;
|
||||
|
||||
/**
|
||||
@ -47,7 +48,7 @@ final class DeleteCaseInputAndOutputAction extends DeleteCaseAction {
|
||||
|
||||
@Override
|
||||
DeleteCaseTask getTask(CaseNodeData caseNodeData, ProgressIndicator progress) {
|
||||
return new DeleteCaseInputAndOutputTask(caseNodeData, progress);
|
||||
return new DeleteCaseTask(caseNodeData, DeleteOptions.DELETE_OUTPUT, progress);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -20,6 +20,7 @@ package org.sleuthkit.autopsy.experimental.autoingest;
|
||||
|
||||
import org.openide.util.NbBundle;
|
||||
import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData;
|
||||
import org.sleuthkit.autopsy.experimental.autoingest.DeleteCaseTask.DeleteOptions;
|
||||
import org.sleuthkit.autopsy.progress.ProgressIndicator;
|
||||
|
||||
/**
|
||||
@ -51,7 +52,7 @@ final class DeleteCaseOutputAction extends DeleteCaseAction {
|
||||
|
||||
@Override
|
||||
DeleteCaseTask getTask(CaseNodeData caseNodeData, ProgressIndicator progress) {
|
||||
return new DeleteCaseOutputTask(caseNodeData, progress);
|
||||
return new DeleteCaseTask(caseNodeData, DeleteOptions.DELETE_OUTPUT, progress);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -23,10 +23,12 @@ import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Scanner;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.logging.Level;
|
||||
import org.openide.util.Exceptions;
|
||||
import org.openide.util.Lookup;
|
||||
import org.openide.util.NbBundle;
|
||||
import org.openide.util.NbBundle.Messages;
|
||||
import org.sleuthkit.autopsy.casemodule.Case;
|
||||
@ -36,73 +38,111 @@ import org.sleuthkit.autopsy.casemodule.multiusercases.CaseCoordinationServiceUt
|
||||
import org.sleuthkit.autopsy.coordinationservice.CoordinationService;
|
||||
import org.sleuthkit.autopsy.coordinationservice.CoordinationService.CategoryNode;
|
||||
import org.sleuthkit.autopsy.coordinationservice.CoordinationService.CoordinationServiceException;
|
||||
import org.sleuthkit.autopsy.coordinationservice.CoordinationService.Lock;
|
||||
import org.sleuthkit.autopsy.coreutils.FileUtil;
|
||||
import org.sleuthkit.autopsy.progress.ProgressIndicator;
|
||||
import org.sleuthkit.autopsy.coreutils.Logger;
|
||||
import org.sleuthkit.autopsy.experimental.autoingest.AutoIngestJobNodeData.InvalidDataException;
|
||||
|
||||
/**
|
||||
* A base class for tasks that delete part or all of a given case.
|
||||
* A task that deletes part or all of a given case. Note that all logging done
|
||||
* by this task is directed to the dedicated auto ingest dashboard log instead
|
||||
* of to the general application log.
|
||||
*/
|
||||
abstract class DeleteCaseTask implements Runnable {
|
||||
// RJCTODO:
|
||||
// 1. Expand case type in case metadata to include auto ingest cases.
|
||||
// Disable the delete menu item in the main app menu for auto ingest cases,
|
||||
// and possibly also use this to delete the add data source capability. Could use
|
||||
// this to limit the display of nodes in the in the auto ingest cases dashboard.
|
||||
// 2. When an instance of this class finishes, publish an event via event bus
|
||||
// so that the case browser can refresh.
|
||||
// 3. Add code to file deletion utilities such that on Wimdows, for paths
|
||||
// exceeding 255 chars, robocopy is invoked for the deletion. Make the new file
|
||||
// deletion utility throw exceptions instead of return a boolean result code.
|
||||
// 4. Make other dashbaord use the dashboard logger.
|
||||
// 5. Consider moving all of the dashboard code into its own autoingest.dashboard package.
|
||||
// 6. AutoIngestManager.addCompletedJob node data version updating might be out of date.
|
||||
// 7. Deal with cancellation during lock releases. Look at using
|
||||
// https://google.github.io/guava/releases/19.0/api/docs/com/google/common/util/concurrent/Uninterruptibles.html
|
||||
// getUninterruptibly to do os.
|
||||
// 8. With the removal of the auto ingest control panel, we can eliminate the
|
||||
// completed jobs list and the processing list from AutoIngestManager.
|
||||
final class DeleteCaseTask implements Runnable {
|
||||
|
||||
private static final int MANIFEST_FILE_LOCKING_TIMEOUT_MINS = 5;
|
||||
private static final Logger logger = AutoIngestDashboardLogger.getLogger();
|
||||
private final CaseNodeData caseNodeData;
|
||||
private final String caseDisplayName;
|
||||
private final String caseUniqueName;
|
||||
private final Path caseDirectoryPath;
|
||||
private final DeleteOptions deleteOption;
|
||||
private final ProgressIndicator progress;
|
||||
private final List<AutoIngestJobNodeData> nodeDataForAutoIngestJobs;
|
||||
private final Map<String, CoordinationService.Lock> manifestFileLocks;
|
||||
private final List<Lock> manifestFileLocks;
|
||||
private CoordinationService coordinationService;
|
||||
|
||||
/*
|
||||
* Options to support implementing differnet case deletion uses cases.
|
||||
*/
|
||||
public enum DeleteOptions {
|
||||
/**
|
||||
* Delete the auto ingest job manifests and corresponding data sources,
|
||||
* if any, while leaving the manifest file coordination service nodes
|
||||
* and the rest of the case intact. The use case is freeing auto ingest
|
||||
* input directory space while retaining the option to restore the data
|
||||
* sources, effectively restoring the case.
|
||||
*/
|
||||
DELETE_INPUT,
|
||||
/**
|
||||
* Delete the auto ingest job coordination service nodes, if any, and
|
||||
* the output for a case produced via auto ingest, while leaving the
|
||||
* auto ingest job input directories intact. The use case is auto ingest
|
||||
* reprocessing of a case with a clean slate without having to restore
|
||||
* the input directories.
|
||||
*/
|
||||
DELETE_OUTPUT,
|
||||
/**
|
||||
* Delete everything.
|
||||
*/
|
||||
DELETE_ALL
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs the base class part of a task that deletes part or all of a
|
||||
* given case.
|
||||
* Constructs a task that deletes part or all of a given case. Note that all
|
||||
* logging is directed to the dedicated auto ingest dashboard log instead of
|
||||
* to the general application log.
|
||||
*
|
||||
* @param caseNodeData The case directory lock coordination service node
|
||||
* data for the case.
|
||||
* @param caseNodeData The case directory coordination service node data for
|
||||
* the case.
|
||||
* @param deleteOption The deletion option for the task.
|
||||
* @param progress A progress indicator.
|
||||
*/
|
||||
DeleteCaseTask(CaseNodeData caseNodeData, ProgressIndicator progress) {
|
||||
DeleteCaseTask(CaseNodeData caseNodeData, DeleteOptions deleteOption, ProgressIndicator progress) {
|
||||
this.caseNodeData = caseNodeData;
|
||||
this.deleteOption = deleteOption;
|
||||
this.progress = progress;
|
||||
/*
|
||||
* Design Decision Note: It was decided to add the following state to
|
||||
* instances of this class make it easier to access given that the class
|
||||
* design favors instance methods over static methods.
|
||||
*/
|
||||
this.caseDisplayName = caseNodeData.getDisplayName();
|
||||
this.caseUniqueName = caseNodeData.getName();
|
||||
this.caseDirectoryPath = caseNodeData.getDirectory();
|
||||
this.nodeDataForAutoIngestJobs = new ArrayList<>();
|
||||
this.manifestFileLocks = new HashMap<>();
|
||||
this.manifestFileLocks = new ArrayList<>();
|
||||
}
|
||||
|
||||
@Override
|
||||
@NbBundle.Messages({
|
||||
"DeleteCaseTask.progress.startMessage=Preparing for deletion..."
|
||||
"DeleteCaseTask.progress.startMessage=Starting deletion..."
|
||||
})
|
||||
public void run() {
|
||||
try {
|
||||
progress.start(Bundle.DeleteCaseTask_progress_startMessage());
|
||||
logger.log(Level.INFO, String.format("Beginning deletion of %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
logger.log(Level.INFO, String.format("Starting attempt to delete %s (%s)", caseNodeData.getDisplayName(), deleteOption));
|
||||
deleteCase();
|
||||
logger.log(Level.SEVERE, String.format("Deletion of %s (%s) in %s completed", caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
logger.log(Level.INFO, String.format("Finished attempt to delete %s (%s)", caseNodeData.getDisplayName(), deleteOption));
|
||||
|
||||
} catch (Throwable ex) {
|
||||
/*
|
||||
* Unexpected runtime exceptions firewall. This task is designed to
|
||||
* be able to be run in an executor service thread pool without
|
||||
* calling get() on the task's Future<Void>, so this ensures that
|
||||
* such errors do get ignored.
|
||||
* This is an unexpected runtime exceptions firewall. It is here
|
||||
* because this task is designed to be able to be run in scenarios
|
||||
* where there is no call to get() on a Future<Void> associated with
|
||||
* the task, so this ensures that any such errors get logged.
|
||||
*/
|
||||
logger.log(Level.INFO, String.format("Unexpected error deleting %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
|
||||
logger.log(Level.SEVERE, String.format("Unexpected error deleting %s", caseNodeData.getDisplayName()), ex);
|
||||
|
||||
} finally {
|
||||
progress.finish();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
@ -110,426 +150,487 @@ abstract class DeleteCaseTask implements Runnable {
|
||||
*/
|
||||
@NbBundle.Messages({
|
||||
"DeleteCaseTask.progress.connectingToCoordSvc=Connecting to the coordination service...",
|
||||
"DeleteCaseTask.progress.acquiringCaseNameLock=Acquiring an exclusive case name lock...",
|
||||
"DeleteCaseTask.progress.acquiringCaseDirLock=Acquiring an exclusive case directory lock...",
|
||||
"DeleteCaseTask.progress.gettingJobNodeData=Getting node data for auto ingest jobs...",
|
||||
"DeleteCaseTask.progress.acquiringManifestLocks=Acquiring exclusive manifest file locks..."
|
||||
"DeleteCaseTask.progress.acquiringCaseNameLock=Acquiring exclusive case name lock...",
|
||||
"DeleteCaseTask.progress.acquiringCaseDirLock=Acquiring exclusive case directory lock...",
|
||||
"DeleteCaseTask.progress.acquiringManifestLocks=Acquiring exclusive manifest file locks...",
|
||||
"DeleteCaseTask.progress.deletingDirLockNode=Deleting case directory lock coordination service node...",
|
||||
"DeleteCaseTask.progress.deletingNameLockNode=Deleting case name lock coordination service node..."
|
||||
})
|
||||
private void deleteCase() {
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_connectingToCoordSvc());
|
||||
logger.log(Level.INFO, String.format("Connecting to coordination service for deletion of %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
logger.log(Level.INFO, String.format("Connecting to the coordination service for deletion of %s", caseNodeData.getDisplayName()));
|
||||
try {
|
||||
coordinationService = CoordinationService.getInstance();
|
||||
} catch (CoordinationService.CoordinationServiceException ex) {
|
||||
logger.log(Level.SEVERE, String.format("Failed to connect to the coordination service, cannot delete %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
|
||||
logger.log(Level.SEVERE, String.format("Could not delete %s because an error occurred connecting to the coordination service", caseNodeData.getDisplayName()), ex);
|
||||
return;
|
||||
}
|
||||
|
||||
if (Thread.currentThread().isInterrupted()) {
|
||||
logger.log(Level.INFO, String.format("Deletion of %s (%s) in %s cancelled", caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()));
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Acquire an exclusive case name lock. This is the lock that auto
|
||||
* ingest nodes acquire exclusively when creating or opening a case
|
||||
* specified in an auto ingest job manifest file to ensure that only one
|
||||
* auto ingest node at a time can search the auto ingest output
|
||||
* directory for an existing case matching the one in the manifest file.
|
||||
* Acquiring this lock effectively locks auto ingest node job processing
|
||||
* tasks out of the case to be deleted.
|
||||
* Acquire an exclusive case name lock. The case name lock is the lock
|
||||
* that auto ingest node (AIN) job processing tasks acquire exclusively
|
||||
* when creating or opening a case specified in an auto ingest job
|
||||
* manifest file. The reason AINs do this is to ensure that only one of
|
||||
* them at a time can search the auto ingest output directory for an
|
||||
* existing case matching the one in the manifest file. If a matching
|
||||
* case is found, it is opened, otherwise the case is created. Acquiring
|
||||
* this lock effectively disables this AIN job processing task behavior
|
||||
* while the case is being deleted.
|
||||
*/
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_acquiringCaseNameLock());
|
||||
logger.log(Level.INFO, String.format("Acquiring an exclusive case name lock for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
String caseNameLockNodeName = CaseCoordinationServiceUtils.getCaseLockName(caseDirectoryPath);
|
||||
try (CoordinationService.Lock nameLock = coordinationService.tryGetExclusiveLock(CategoryNode.CASES, caseNameLockNodeName)) {
|
||||
logger.log(Level.INFO, String.format("Acquiring an exclusive case name lock for %s", caseNodeData.getDisplayName()));
|
||||
String caseNameLockName = CaseCoordinationServiceUtils.getCaseNameLockName(caseNodeData.getDirectory());
|
||||
try (CoordinationService.Lock nameLock = coordinationService.tryGetExclusiveLock(CategoryNode.CASES, caseNameLockName)) {
|
||||
if (nameLock == null) {
|
||||
logger.log(Level.INFO, String.format("Could not delete %s (%s) in %s because a case name lock was held by another host", caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
logger.log(Level.INFO, String.format("Could not delete %s because a case name lock was already held by another host", caseNodeData.getDisplayName()));
|
||||
return;
|
||||
}
|
||||
|
||||
if (Thread.currentThread().isInterrupted()) {
|
||||
logger.log(Level.INFO, String.format("Deletion of %s (%s) in %s cancelled", caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()));
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Acquire an exclusive case directory lock. A shared case directory
|
||||
* lock is acquired by any node (auto ingest or examiner) when it
|
||||
* opens a case and is held by the node for as long as the case is
|
||||
* open. Acquiring this lock exclusively ensures that no other node
|
||||
* currently has the case to be deleted open and prevents another
|
||||
* node from trying to open the case while it is being deleted.
|
||||
* lock is acquired by each auto ingest node (AIN) and examiner node
|
||||
* (EIN) when it opens a case. The shared locks are held by the AINs
|
||||
* and EINs for as long as they have the case open. Acquiring this
|
||||
* lock exclusively ensures that no AIN or EIN has the case to be
|
||||
* deleted open and prevents another node from trying to open the
|
||||
* case while it is being deleted.
|
||||
*/
|
||||
boolean success = true; // RJCTODO: Instead of having this flag, read the casenodedata instead
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_acquiringCaseDirLock());
|
||||
logger.log(Level.INFO, String.format("Acquiring an exclusive case directory lock for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
String caseDirLockNodeName = CaseCoordinationServiceUtils.getCaseDirectoryLockName(caseDirectoryPath);
|
||||
try (CoordinationService.Lock caseDirLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.CASES, caseDirLockNodeName)) {
|
||||
logger.log(Level.INFO, String.format("Acquiring an exclusive case directory lock for %s", caseNodeData.getDisplayName()));
|
||||
String caseDirLockName = CaseCoordinationServiceUtils.getCaseDirectoryLockName(caseNodeData.getDirectory());
|
||||
try (CoordinationService.Lock caseDirLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.CASES, caseDirLockName)) {
|
||||
if (caseDirLock == null) {
|
||||
logger.log(Level.INFO, String.format("Could not delete %s (%s) in %s because a case directory lock was held by another host", caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
logger.log(Level.INFO, String.format("Could not delete %s because a case directory lock was already held by another host", caseNodeData.getDisplayName()));
|
||||
return;
|
||||
}
|
||||
|
||||
if (Thread.currentThread().isInterrupted()) {
|
||||
logger.log(Level.INFO, String.format("Deletion of %s (%s) in %s cancelled", caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()));
|
||||
return;
|
||||
}
|
||||
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_gettingJobNodeData());
|
||||
logger.log(Level.INFO, String.format("Fetching auto ingest job node data for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
/*
|
||||
* Acquire exclusive locks for the auto ingest job manifest
|
||||
* files for the case, if any. Manifest file locks are acquired
|
||||
* by the auto ingest node (AIN) input directory scanning tasks
|
||||
* when they look for auto ingest jobs to enqueue, and by the
|
||||
* AIN job processing tasks when they execute a job. Acquiring
|
||||
* these locks here ensures that the scanning tasks and job
|
||||
* processing tasks cannot do anything with the auto ingest jobs
|
||||
* for a case during case deletion.
|
||||
*/
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_acquiringManifestLocks());
|
||||
logger.log(Level.INFO, String.format("Acquiring exclusive manifest file locks for %s", caseNodeData.getDisplayName()));
|
||||
try {
|
||||
getAutoIngestJobNodeData();
|
||||
} catch (CoordinationServiceException ex) {
|
||||
logger.log(Level.SEVERE, String.format("Error fetching auto ingest job node data for %s (%s) in %s, cannot delete case", caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
|
||||
return;
|
||||
} catch (InterruptedException ex) {
|
||||
logger.log(Level.INFO, String.format("Deletion of %s (%s) in %s cancelled", caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
|
||||
return;
|
||||
}
|
||||
|
||||
if (Thread.currentThread().isInterrupted()) {
|
||||
logger.log(Level.INFO, String.format("Deletion of %s (%s) in %s cancelled", caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
return;
|
||||
}
|
||||
|
||||
if (!nodeDataForAutoIngestJobs.isEmpty()) {
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_acquiringManifestLocks());
|
||||
logger.log(Level.INFO, String.format("Acquiring exclusive manifest file locks for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
getManifestFileLocks();
|
||||
if (manifestFileLocks.isEmpty()) {
|
||||
logger.log(Level.INFO, String.format("Could not delete %s (%s) in %s because a case directory lock was held by another host", caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
if (!acquireManifestFileLocks()) {
|
||||
logger.log(Level.INFO, String.format("Could not delete %s because a manifest file lock was already held by another host", caseNodeData.getDisplayName()));
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
logger.log(Level.INFO, String.format("No auto ingest job node data found for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
} catch (CoordinationServiceException ex) {
|
||||
logger.log(Level.WARNING, String.format("Could not delete %s because an error occurred acquiring the manifest file locks", caseNodeData.getDisplayName()), ex);
|
||||
return;
|
||||
} catch (InterruptedException ex) {
|
||||
logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()), ex);
|
||||
return;
|
||||
}
|
||||
|
||||
if (Thread.currentThread().isInterrupted()) {
|
||||
logger.log(Level.INFO, String.format("Deletion of %s (%s) in %s cancelled", caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()));
|
||||
releaseManifestFileLocks();
|
||||
return;
|
||||
}
|
||||
|
||||
if (deleteOption == DeleteOptions.DELETE_INPUT || deleteOption == DeleteOptions.DELETE_ALL) {
|
||||
try {
|
||||
logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()));
|
||||
deleteAutoIngestInput();
|
||||
} catch (IOException ex) {
|
||||
// RJCTODO:
|
||||
} catch (InterruptedException ex) {
|
||||
logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()), ex);
|
||||
releaseManifestFileLocks();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (Thread.currentThread().isInterrupted()) {
|
||||
logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()));
|
||||
releaseManifestFileLocks();
|
||||
return;
|
||||
}
|
||||
|
||||
if (deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_ALL) {
|
||||
try {
|
||||
success = deleteCaseOutput();
|
||||
} catch (InterruptedException ex) {
|
||||
logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()), ex);
|
||||
releaseManifestFileLocks();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (Thread.currentThread().isInterrupted()) {
|
||||
logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()));
|
||||
releaseManifestFileLocks();
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
deleteWhileHoldingAllLocks();
|
||||
if (deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_ALL) {
|
||||
success = deleteManifestFileNodes();
|
||||
} else {
|
||||
releaseManifestFileLocks();
|
||||
}
|
||||
} catch (InterruptedException ex) {
|
||||
logger.log(Level.INFO, String.format("Deletion of %s (%s) in %s cancelled", caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
|
||||
return;
|
||||
}
|
||||
|
||||
releaseManifestFileLocks();
|
||||
|
||||
try {
|
||||
deleteAfterManifestLocksReleased();
|
||||
} catch (InterruptedException ex) {
|
||||
logger.log(Level.INFO, String.format("Deletion of %s (%s) in %s cancelled", caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
|
||||
logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()), ex);
|
||||
return;
|
||||
}
|
||||
|
||||
} catch (CoordinationServiceException ex) {
|
||||
logger.log(Level.SEVERE, String.format("Error acquiring exclusive case directory lock for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
|
||||
logger.log(Level.SEVERE, String.format("Could not delete %s because an error occurred acquiring the case directory lock", caseNodeData.getDisplayName()), ex);
|
||||
return;
|
||||
}
|
||||
|
||||
if (Thread.currentThread().isInterrupted()) {
|
||||
logger.log(Level.INFO, String.format("Deletion of %s (%s) in %s cancelled", caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()));
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
deleteAfterCaseDirectoryLockReleased();
|
||||
} catch (InterruptedException ex) {
|
||||
logger.log(Level.INFO, String.format("Deletion of %s (%s) in %s cancelled", caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
|
||||
return;
|
||||
/*
|
||||
* Now that the case directory lock has been released, the
|
||||
* coordination service node for it can be deleted if the use case
|
||||
* requires it. However, if something to ge deleted was not deleted,
|
||||
* leave the node so that what was and was not deleted can be
|
||||
* inspected.
|
||||
*/
|
||||
if (success && (deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_ALL)) {
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_deletingDirLockNode());
|
||||
try {
|
||||
Case.deleteCaseDirectoryLockNode(caseNodeData, progress);
|
||||
} catch (CoordinationServiceException ex) {
|
||||
logger.log(Level.WARNING, String.format("Error deleting case directory lock node for %s", caseNodeData.getDisplayName()), ex);
|
||||
} catch (InterruptedException ex) {
|
||||
logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()), ex);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
} catch (CoordinationServiceException ex) {
|
||||
logger.log(Level.SEVERE, String.format("Error acquiring exclusive case name lock for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
|
||||
}
|
||||
|
||||
try {
|
||||
deleteAfterCaseNameLockReleased();
|
||||
} catch (InterruptedException ex) {
|
||||
logger.log(Level.INFO, String.format("Deletion of %s (%s) in %s cancelled", caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
|
||||
logger.log(Level.SEVERE, String.format("Could not delete %s because an error occurred acquiring the case name lock", caseNodeData.getDisplayName()), ex);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes the parts of the case that need to be deleted while holding all
|
||||
* of the exclusive locks: the case name lock, the case directory lock, amd
|
||||
* the manifest file locks. Note that the locks are acquired in that order
|
||||
* and released in the opposite order.
|
||||
*/
|
||||
abstract void deleteWhileHoldingAllLocks() throws InterruptedException;
|
||||
if (Thread.currentThread().isInterrupted()) {
|
||||
logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()));
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes the parts of the case that need to be deleted after the release
|
||||
* of the exclusive manifest file locks, while still holding the exclusive
|
||||
* case name and case directory locks; the manifest file locks are the first
|
||||
* locks released.
|
||||
*/
|
||||
abstract void deleteAfterManifestLocksReleased() throws InterruptedException;
|
||||
|
||||
/**
|
||||
* Deletes the parts of the case that need to be deleted after the release
|
||||
* of the exclusive manifest file locks and case directory lock, while still
|
||||
* holding the exclusive case name; the case name lock is the last lock
|
||||
* released.
|
||||
*/
|
||||
abstract void deleteAfterCaseDirectoryLockReleased() throws InterruptedException;
|
||||
|
||||
/**
|
||||
* Deletes the parts of the case that need to be deleted after the release
|
||||
* of all of the exclusive locks; the case name lock is the last lock
|
||||
* released.
|
||||
*/
|
||||
abstract void deleteAfterCaseNameLockReleased() throws InterruptedException;
|
||||
|
||||
/**
|
||||
* Deletes the auto ingest job input directories for the case. Intended to
|
||||
* be called by subclasses, if required, in their customization of the
|
||||
* deleteWhileHoldingAllLocks step of the case deletion algorithm.
|
||||
*/
|
||||
@NbBundle.Messages({
|
||||
"# {0} - input directory name", "DeleteCaseTask.progress.deletingInputDir=Deleting input directory {0}..."
|
||||
})
|
||||
protected void deleteInputDirectories() {
|
||||
boolean allInputDirsDeleted = true;
|
||||
for (AutoIngestJobNodeData jobNodeData : nodeDataForAutoIngestJobs) {
|
||||
Path inputDirPath = jobNodeData.getManifestFilePath().getParent();
|
||||
File inputDir = inputDirPath.toFile();
|
||||
if (inputDir.exists()) {
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_deletingInputDir(inputDirPath));
|
||||
logger.log(Level.INFO, String.format("Deleting input directory %s for %s (%s) in %s", inputDirPath, caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
if (!FileUtil.deleteDir(new File(inputDirPath.toString()))) {
|
||||
logger.log(Level.WARNING, String.format("Failed to delete the input directory %s for %s (%s) in %s", inputDirPath, caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
allInputDirsDeleted = false;
|
||||
}
|
||||
/*
|
||||
* Now that the case name lock has been released, the coordination
|
||||
* service node for it can be deleted if the use case requires it.
|
||||
*/
|
||||
if (deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_ALL) {
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_deletingNameLockNode());
|
||||
try {
|
||||
String caseNameLockNodeName = CaseCoordinationServiceUtils.getCaseNameLockName(caseNodeData.getDirectory());
|
||||
coordinationService.deleteNode(CategoryNode.CASES, caseNameLockNodeName); // RJCTODO: Should this be a Case method?
|
||||
} catch (CoordinationServiceException ex) {
|
||||
logger.log(Level.WARNING, String.format("Error deleting case name lock node for %s", caseNodeData.getDisplayName()), ex);
|
||||
} catch (InterruptedException ex) {
|
||||
logger.log(Level.INFO, String.format("Deletion of %s cancelled", caseNodeData.getDisplayName()), ex);
|
||||
}
|
||||
}
|
||||
if (allInputDirsDeleted) {
|
||||
setDeletedItemFlag(CaseNodeData.DeletedFlags.DATA_SOURCES);
|
||||
}
|
||||
|
||||
/**
|
||||
* Acquires either all or none of the auto ingest job manifest file locks
|
||||
* for a case.
|
||||
*
|
||||
* @return True if all of the locks were acquired; false otherwise.
|
||||
*
|
||||
* @throws CoordinationServiceException If there is an error completing a
|
||||
* coordination service operation.
|
||||
* @throws InterruptedException If the thread in which this task is
|
||||
* running is interrupted while blocked
|
||||
* waiting for a coordination service
|
||||
* operation to complete.
|
||||
*/
|
||||
@NbBundle.Messages({
|
||||
"# {0} - manifest file path", "DeleteCaseTask.progress.lockingManifest=Locking manifest file {0}..."
|
||||
})
|
||||
private boolean acquireManifestFileLocks() throws CoordinationServiceException, InterruptedException {
|
||||
/*
|
||||
* Get the "original" case name that from the case directory. This is
|
||||
* necessary because the case display name can be changed and the case
|
||||
* name may have a time stamp added to make it unique, depending on how
|
||||
* the case was created. An alternative aproach would be to strip the
|
||||
* time stamp from the case name in the case node data instead, but the
|
||||
* code for that is already in the utility method called here.
|
||||
*/
|
||||
String caseName = CaseCoordinationServiceUtils.getCaseNameLockName(caseNodeData.getDirectory());
|
||||
try {
|
||||
boolean allLocksAcquired = true;
|
||||
// RJCTODO: Read in the list of manifests for the case instead of
|
||||
// inspecting the nodes this way, once the recording of the
|
||||
// manifests is in place.
|
||||
final List<String> nodeNames = coordinationService.getNodeList(CoordinationService.CategoryNode.MANIFESTS);
|
||||
for (String manifestPath : nodeNames) {
|
||||
if (Thread.currentThread().isInterrupted()) {
|
||||
throw new InterruptedException();
|
||||
}
|
||||
|
||||
byte[] nodeBytes = coordinationService.getNodeData(CoordinationService.CategoryNode.MANIFESTS, manifestPath);
|
||||
if (nodeBytes == null || nodeBytes.length <= 0) {
|
||||
logger.log(Level.WARNING, String.format("Empty coordination service node data found for %s", manifestPath));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (Thread.currentThread().isInterrupted()) {
|
||||
throw new InterruptedException();
|
||||
}
|
||||
|
||||
AutoIngestJobNodeData nodeData;
|
||||
try {
|
||||
nodeData = new AutoIngestJobNodeData(nodeBytes);
|
||||
} catch (InvalidDataException ex) {
|
||||
logger.log(Level.WARNING, String.format("Invalid coordination service node data found for %s", manifestPath), ex);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (Thread.currentThread().isInterrupted()) {
|
||||
throw new InterruptedException();
|
||||
}
|
||||
|
||||
if (caseName.equals(nodeData.getCaseName())) {
|
||||
/*
|
||||
* When acquiring manifest file locks, it is reasonable to
|
||||
* block while acquiring this lock since the auto ingest
|
||||
* node (AIN) input directory scanning tasks do a lot of
|
||||
* short-term acquiring and releasing of manifest file
|
||||
* locks. The assumption here is that the originator of this
|
||||
* case deletion task is not asking for deletion of a case
|
||||
* that has a job an auto ingest node (AIN) job processing
|
||||
* task is working on and that
|
||||
* MANIFEST_FILE_LOCKING_TIMEOUT_MINS is not very long,
|
||||
* anyway, so we can and should wait a bit.
|
||||
*/
|
||||
logger.log(Level.INFO, String.format("Exclusively locking the manifest %s for %s", manifestPath, caseNodeData.getDisplayName()));
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_lockingManifest(manifestPath));
|
||||
CoordinationService.Lock manifestLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.MANIFESTS, manifestPath, MANIFEST_FILE_LOCKING_TIMEOUT_MINS, TimeUnit.MINUTES);
|
||||
if (null != manifestLock) {
|
||||
manifestFileLocks.add(manifestLock);
|
||||
} else {
|
||||
allLocksAcquired = false;
|
||||
logger.log(Level.INFO, String.format("Failed to exclusively lock the manifest %s because it was already held by another host", manifestPath, caseNodeData.getDisplayName()));
|
||||
releaseManifestFileLocks();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return allLocksAcquired;
|
||||
|
||||
} catch (CoordinationServiceException | InterruptedException ex) {
|
||||
releaseManifestFileLocks();
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes the case database, the text index, and the case directory for the
|
||||
* case. Intended to be called by subclasses, if required, in their
|
||||
* customization of the deleteWhileHoldingAllLocks step of the case deletion
|
||||
* algorithm.
|
||||
* Deletes the auto ingest job input manifests for the case along with the
|
||||
* corresponding data sources.
|
||||
*
|
||||
* @throws IOException If there is an error opening the case
|
||||
* manifests list file.
|
||||
* @throws InterruptedException If the thread in which this task is running
|
||||
* is interrupted while blocked waiting for a
|
||||
* coordination service operation to complete.
|
||||
*/
|
||||
@NbBundle.Messages({
|
||||
"# {0} - manifest file path", "DeleteCaseTask.progress.deletingManifest=Deleting manifest file {0}..."
|
||||
})
|
||||
private void deleteAutoIngestInput() throws IOException, InterruptedException {
|
||||
boolean allInputDeleted = true;
|
||||
final Path manifestsListFilePath = Paths.get(caseNodeData.getDirectory().toString(), AutoIngestManager.getCaseManifestsListFileName());
|
||||
final Scanner manifestsListFileScanner = new Scanner(manifestsListFilePath);
|
||||
while (manifestsListFileScanner.hasNext()) {
|
||||
if (Thread.currentThread().isInterrupted()) {
|
||||
throw new InterruptedException();
|
||||
}
|
||||
final String manifestFilePath = manifestsListFileScanner.next();
|
||||
final File manifestFile = new File(manifestFilePath);
|
||||
if (manifestFile.exists()) {
|
||||
// RJCTODO: Parse file, open case database, delete data sources
|
||||
// before deleting manifest file
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_deletingManifest(manifestFilePath));
|
||||
logger.log(Level.INFO, String.format("Deleting manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName()));
|
||||
if (manifestFile.delete()) {
|
||||
logger.log(Level.WARNING, String.format("Failed to delete manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName()));
|
||||
allInputDeleted = false;
|
||||
}
|
||||
}
|
||||
if (allInputDeleted) {
|
||||
setDeletedItemFlag(CaseNodeData.DeletedFlags.DATA_SOURCES);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes the case database, the text index, the case directory, and the
|
||||
* case resources and auto ingest log coordination service lock nodes for
|
||||
* the case.
|
||||
*
|
||||
* @return If true if all of the case output that was found was deleted,
|
||||
* false otherwise.
|
||||
*
|
||||
* @throws InterruptedException If the thread in which this task is running
|
||||
* is interrupted while blocked waiting for a
|
||||
* coordination service operation to complete.
|
||||
*/
|
||||
@NbBundle.Messages({
|
||||
"DeleteCaseTask.progress.locatingCaseMetadataFile=Locating case metadata file...",
|
||||
"DeleteCaseTask.progress.deletingCaseOutput=Deleting case database, text index, and directory...",
|
||||
"DeleteCaseTask.progress.deletingJobLogLockNode=Deleting case auto ingest job log lock node..."
|
||||
"DeleteCaseTask.progress.deletingResourcesLockNode=Deleting case resources coordination service node...",
|
||||
"DeleteCaseTask.progress.deletingJobLogLockNode=Deleting case auto ingest job coordination service node..."
|
||||
})
|
||||
protected void deleteCaseOutput() {
|
||||
private boolean deleteCaseOutput() throws InterruptedException {
|
||||
boolean errorsOccurred = false;
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_locatingCaseMetadataFile());
|
||||
logger.log(Level.INFO, String.format("Locating metadata file for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
logger.log(Level.INFO, String.format("Locating metadata file for %s", caseNodeData.getDisplayName()));
|
||||
CaseMetadata caseMetadata = null;
|
||||
final File caseDirectory = caseDirectoryPath.toFile();
|
||||
final File[] filesInDirectory = caseDirectory.listFiles();
|
||||
if (filesInDirectory != null) {
|
||||
for (File file : filesInDirectory) {
|
||||
if (file.getName().toLowerCase().endsWith(CaseMetadata.getFileExtension()) && file.isFile()) {
|
||||
try {
|
||||
caseMetadata = new CaseMetadata(Paths.get(file.getPath()));
|
||||
} catch (CaseMetadata.CaseMetadataException ex) {
|
||||
logger.log(Level.WARNING, String.format("Error getting opening case metadata file for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
|
||||
final File caseDirectory = caseNodeData.getDirectory().toFile();
|
||||
if (caseDirectory.exists()) {
|
||||
final File[] filesInDirectory = caseDirectory.listFiles();
|
||||
if (filesInDirectory != null) {
|
||||
for (File file : filesInDirectory) {
|
||||
if (file.getName().toLowerCase().endsWith(CaseMetadata.getFileExtension()) && file.isFile()) {
|
||||
try {
|
||||
caseMetadata = new CaseMetadata(Paths.get(file.getPath()));
|
||||
} catch (CaseMetadata.CaseMetadataException ex) {
|
||||
logger.log(Level.WARNING, String.format("Error getting opening case metadata file for %s", caseNodeData.getDisplayName()), ex);
|
||||
}
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (Thread.currentThread().isInterrupted()) {
|
||||
throw new InterruptedException();
|
||||
}
|
||||
|
||||
if (caseMetadata != null) {
|
||||
logger.log(Level.INFO, String.format("Deleting output for %s", caseNodeData.getDisplayName()));
|
||||
errorsOccurred = Case.deleteMultiUserCase(caseNodeData, caseMetadata, progress, logger); // RJCTODO: CHeck for errors occurred?
|
||||
} else {
|
||||
logger.log(Level.WARNING, String.format("Failed to locate metadata file for %s", caseNodeData.getDisplayName()));
|
||||
}
|
||||
}
|
||||
|
||||
if (caseMetadata != null) {
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_deletingCaseOutput());
|
||||
logger.log(Level.INFO, String.format("Deleting output for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
Case.deleteMultiUserCase(caseNodeData, caseMetadata, progress); // RJCTODO: Make this method throw the interrupted exception.
|
||||
} else {
|
||||
logger.log(Level.WARNING, String.format("Failed to locate metadata file for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
if (Thread.currentThread().isInterrupted()) {
|
||||
throw new InterruptedException();
|
||||
}
|
||||
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_deletingResourcesLockNode());
|
||||
try {
|
||||
Case.deleteCaseResourcesLockNode(caseNodeData, progress);
|
||||
} catch (CoordinationServiceException ex) {
|
||||
logger.log(Level.WARNING, String.format("Error deleting case resources coordiation service node for %s", caseNodeData.getDisplayName()), ex);
|
||||
}
|
||||
|
||||
if (Thread.currentThread().isInterrupted()) {
|
||||
throw new InterruptedException();
|
||||
}
|
||||
|
||||
// RJCTODO: Check to see if getNodeData return null if the node does not exist;
|
||||
// if so, make use of it
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_deletingJobLogLockNode());
|
||||
logger.log(Level.INFO, String.format("Deleting case auto ingest job log lock node for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
Path logFilePath = AutoIngestJobLogger.getLogPath(caseDirectoryPath); //RJCTODO: USe util here
|
||||
logger.log(Level.INFO, String.format("Deleting case auto ingest job log coordiation service node for %s", caseNodeData.getDisplayName()));
|
||||
String logFilePath = CaseCoordinationServiceUtils.getCaseAutoIngestLogLockName(caseNodeData.getDirectory());
|
||||
try {
|
||||
coordinationService.deleteNode(CategoryNode.CASES, logFilePath.toString());
|
||||
coordinationService.deleteNode(CategoryNode.CASES, logFilePath);
|
||||
} catch (CoordinationServiceException ex) {
|
||||
logger.log(Level.WARNING, String.format("Error deleting case auto ingest job log lock node for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
|
||||
} catch (InterruptedException ex) {
|
||||
logger.log(Level.INFO, String.format("Deletion of %s (%s) in %s cancelled", caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
|
||||
logger.log(Level.WARNING, String.format("Error deleting case auto ingest job log coordiation service node for %s", caseNodeData.getDisplayName()), ex);
|
||||
}
|
||||
|
||||
return errorsOccurred;
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes the manifest file lock coordination service nodes for the case.
|
||||
* Intended to be called by subclasses, if required, in their customization
|
||||
* of the deleteAfterManifestLocksReleased step of the case deletion
|
||||
* algorithm.
|
||||
*/
|
||||
@Messages({
|
||||
"DeleteCaseTask.progress.deletingManifestFileLockNodes=Deleting manifest file lock nodes..."
|
||||
})
|
||||
protected void deleteManifestFileLockNodes() throws InterruptedException {
|
||||
boolean allInputDirsDeleted = true;
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_deletingManifestFileLockNodes());
|
||||
logger.log(Level.INFO, String.format("Deleting manifest file lock nodes for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
for (AutoIngestJobNodeData jobNodeData : nodeDataForAutoIngestJobs) {
|
||||
try {
|
||||
logger.log(Level.INFO, String.format("Deleting manifest file lock node for %s for %s (%s) in %s", jobNodeData.getManifestFilePath(), caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
coordinationService.deleteNode(CoordinationService.CategoryNode.MANIFESTS, jobNodeData.getManifestFilePath().toString());
|
||||
} catch (CoordinationServiceException ex) {
|
||||
logger.log(Level.WARNING, String.format("Error deleting manifest file lock node %s for %s (%s) in %s", jobNodeData.getManifestFilePath(), caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
|
||||
allInputDirsDeleted = false;
|
||||
}
|
||||
}
|
||||
if (allInputDirsDeleted) {
|
||||
setDeletedItemFlag(CaseNodeData.DeletedFlags.MANIFEST_FILE_LOCK_NODES);
|
||||
}
|
||||
// RJCTODO: Expand case type in case metadata to include auto ingest cases.
|
||||
// Disable delete menu item for auto ingest cases, and possibly also add data source
|
||||
// capability.
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes the case directory coordination service lock node for the case.
|
||||
* Intended to be called by subclasses, if required, in their customization
|
||||
* of the deleteAfterCaseDirectoryLockReleased step of the case deletion
|
||||
* algorithm.
|
||||
*/
|
||||
@Messages({
|
||||
"DeleteCaseTask.progress.deletingDirLockNode=Deleting case directory lock coordination service node..."
|
||||
})
|
||||
protected void deleteCaseDirectoryLockNode() throws InterruptedException {
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_deletingDirLockNode());
|
||||
try {
|
||||
Case.deleteCaseDirectoryLockNode(caseNodeData, progress); // RJCTODO: Case does not need to expose this?
|
||||
} catch (CoordinationServiceException ex) {
|
||||
logger.log(Level.WARNING, String.format("Error deleting case directory lock node for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes the case name coordination service lock node for the case.
|
||||
* Intended to be called by subclasses, if required, in their customization
|
||||
* of the deleteAfterCaseNameLockReleased step of the case deletion
|
||||
* algorithm.
|
||||
*
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
@Messages({
|
||||
"DeleteCaseTask.progress.deletingNameLockNode=Deleting case name lock node..." // RJCTODO: Use consistent terminology
|
||||
})
|
||||
protected void deleteCaseNameLockNode() throws InterruptedException {
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_deletingNameLockNode());
|
||||
try {
|
||||
String caseNameLockNodeName = CaseCoordinationServiceUtils.getCaseLockName(caseDirectoryPath);
|
||||
coordinationService.deleteNode(CategoryNode.CASES, caseNameLockNodeName);
|
||||
} catch (CoordinationServiceException ex) {
|
||||
logger.log(Level.WARNING, String.format("Error deleting case name lock node for %s (%s) in %s", caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetches the auto ingest job data from the manifest file lock coordination
|
||||
* service nodes for a case.
|
||||
*
|
||||
* @throws CoordinationServiceException If there is an error interacting
|
||||
* with the coordination service.
|
||||
* @throws InterruptedException If the current thread is interrupted
|
||||
* while waiting for the coordination
|
||||
* service.
|
||||
*/
|
||||
private void getAutoIngestJobNodeData() throws CoordinationServiceException, InterruptedException {
|
||||
String caseName = caseDisplayName;
|
||||
final List<String> nodeNames = coordinationService.getNodeList(CoordinationService.CategoryNode.MANIFESTS);
|
||||
for (String nodeName : nodeNames) {
|
||||
try {
|
||||
byte[] nodeBytes = coordinationService.getNodeData(CoordinationService.CategoryNode.CASES, nodeName);
|
||||
if (nodeBytes == null || nodeBytes.length <= 0) {
|
||||
logger.log(Level.WARNING, String.format("Missing auto ingest job node data for manifest file lock node %s, deleting node", nodeName));
|
||||
try {
|
||||
coordinationService.deleteNode(CategoryNode.MANIFESTS, nodeName);
|
||||
} catch (CoordinationServiceException ex) {
|
||||
logger.log(Level.WARNING, String.format("Failed to delete empty manifest file lock node %s", nodeName));
|
||||
}
|
||||
continue;
|
||||
}
|
||||
AutoIngestJobNodeData nodeData = new AutoIngestJobNodeData(nodeBytes);
|
||||
if (caseName.equals(nodeData.getCaseName())) {
|
||||
nodeDataForAutoIngestJobs.add(nodeData);
|
||||
}
|
||||
} catch (CoordinationService.CoordinationServiceException | InvalidDataException ex) {
|
||||
logger.log(Level.WARNING, String.format("Failed to get auto ingest job node data for %s", nodeName), ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Acquires either all or none of the manifest file locks for a case.
|
||||
* Releases all of the manifest file locks that have been acquired by this
|
||||
* task.
|
||||
*/
|
||||
@NbBundle.Messages({
|
||||
"# {0} - manifest file name", "DeleteCaseTask.progress.lockingManifestFile=Acquiring exclusive lock on manifest {0}..."
|
||||
})
|
||||
private void getManifestFileLocks() {
|
||||
for (AutoIngestJobNodeData autoIngestJobNodeData : nodeDataForAutoIngestJobs) {
|
||||
String manifestPath = autoIngestJobNodeData.getManifestFilePath().toString();
|
||||
try {
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_lockingManifestFile(manifestPath));
|
||||
logger.log(Level.INFO, String.format("Exclusively locking the manifest %s for %s (%s) in %s", manifestPath, caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
CoordinationService.Lock inputDirLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.MANIFESTS, manifestPath);
|
||||
if (null != inputDirLock) {
|
||||
manifestFileLocks.put(manifestPath, inputDirLock);
|
||||
} else {
|
||||
logger.log(Level.INFO, String.format("Failed to exclusively lock the manifest %s for %s (%s) in %s", manifestPath, caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
releaseManifestFileLocks();
|
||||
manifestFileLocks.clear();
|
||||
break;
|
||||
}
|
||||
} catch (CoordinationService.CoordinationServiceException ex) {
|
||||
logger.log(Level.SEVERE, String.format("Error exclusively locking the manifest %s for %s (%s) in %s", manifestPath, caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
|
||||
releaseManifestFileLocks();
|
||||
manifestFileLocks.clear();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Releases any manifest file coordination service locks that were acquired
|
||||
* for the case.
|
||||
*/
|
||||
@NbBundle.Messages({
|
||||
"# {0} - manifest file path", "DeleteCaseTask.progress.releasingManifestLock=Releasing the exclusive lock on manifest file {0}..."
|
||||
"# {0} - manifest file path", "DeleteCaseTask.progress.releasingManifestLock=Releasing the exclusive coordination service lock on the manifest file {0}..."
|
||||
})
|
||||
private void releaseManifestFileLocks() {
|
||||
if (!manifestFileLocks.isEmpty()) {
|
||||
for (Map.Entry<String, CoordinationService.Lock> entry : manifestFileLocks.entrySet()) {
|
||||
String manifestFilePath = entry.getKey();
|
||||
CoordinationService.Lock manifestFileLock = entry.getValue();
|
||||
try {
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_releasingManifestLock(manifestFilePath));
|
||||
logger.log(Level.INFO, String.format("Releasing the exclusive lock on the manifest file %s for %s (%s) in %s", manifestFilePath, caseDisplayName, caseUniqueName, caseDirectoryPath));
|
||||
manifestFileLock.release();
|
||||
} catch (CoordinationServiceException ex) {
|
||||
logger.log(Level.SEVERE, String.format("Error releasing exclusive lock on the manifest file %s for %s (%s) in %s", manifestFilePath, caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
|
||||
}
|
||||
for (Lock manifestFileLock : manifestFileLocks) {
|
||||
String manifestFilePath = manifestFileLock.getNodePath();
|
||||
try {
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_releasingManifestLock(manifestFilePath));
|
||||
logger.log(Level.INFO, String.format("Releasing the exclusive coordination service lock on the manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName()));
|
||||
manifestFileLock.release();
|
||||
} catch (CoordinationServiceException ex) {
|
||||
logger.log(Level.WARNING, String.format("Error releasing the exclusive coordination service lock on the manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName()), ex);
|
||||
}
|
||||
}
|
||||
manifestFileLocks.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets a deleted item flag for the case.
|
||||
* Releases all of the manifest file locks that have been acquired by this
|
||||
* task and attempts to delete the corresponding coordination service nodes.
|
||||
*
|
||||
* @return True if all of the manifest file coordianiton service nodes have
|
||||
* been deleted, false otherwise.
|
||||
*
|
||||
* @throws InterruptedException If the thread in which this task is running
|
||||
* is interrupted while blocked waiting for a
|
||||
* coordination service operation to complete.
|
||||
*/
|
||||
@Messages({
|
||||
"# {0} - manifest file path", "DeleteCaseTask.progress.deletingManifestFileNode=Deleting the manifest file coordination service node for {0}..."
|
||||
})
|
||||
private boolean deleteManifestFileNodes() throws InterruptedException {
|
||||
boolean allINodesDeleted = true;
|
||||
for (Lock manifestFileLock : manifestFileLocks) {
|
||||
String manifestFilePath = manifestFileLock.getNodePath();
|
||||
try {
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_releasingManifestLock(manifestFilePath));
|
||||
logger.log(Level.INFO, String.format("Releasing the exclusive coordination service lock on the manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName()));
|
||||
manifestFileLock.release();
|
||||
progress.progress(Bundle.DeleteCaseTask_progress_deletingManifestFileNode(manifestFilePath));
|
||||
logger.log(Level.INFO, String.format("Deleting the manifest file coordination service node for %s for %s", manifestFilePath, caseNodeData.getDisplayName()));
|
||||
coordinationService.deleteNode(CoordinationService.CategoryNode.MANIFESTS, manifestFilePath);
|
||||
} catch (CoordinationServiceException ex) {
|
||||
allINodesDeleted = false;
|
||||
logger.log(Level.WARNING, String.format("Error deleting the manifest file coordination service node for %s for %s", manifestFilePath, caseNodeData.getDisplayName()), ex);
|
||||
}
|
||||
}
|
||||
manifestFileLocks.clear();
|
||||
return allINodesDeleted;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets a deleted item flag in the coordination service node data for the
|
||||
* case.
|
||||
*
|
||||
* @param flag The flag to set.
|
||||
*/
|
||||
private void setDeletedItemFlag(CaseNodeData.DeletedFlags flag) {
|
||||
try {
|
||||
caseNodeData.setDeletedFlag(flag);
|
||||
coordinationService.setNodeData(CategoryNode.CASES, caseDirectoryPath.toString(), caseNodeData.toArray());
|
||||
coordinationService.setNodeData(CategoryNode.CASES, caseNodeData.getDirectory().toString(), caseNodeData.toArray());
|
||||
} catch (IOException | CoordinationServiceException | InterruptedException ex) {
|
||||
logger.log(Level.SEVERE, String.format("Error updating deleted item flag %s for %s (%s) in %s", flag.name(), caseDisplayName, caseUniqueName, caseDirectoryPath), ex);
|
||||
logger.log(Level.SEVERE, String.format("Error updating deleted item flag %s for %s", flag.name(), caseNodeData.getDisplayName()), ex);
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user