mirror of
https://github.com/overcuriousity/autopsy-flatpak.git
synced 2025-07-06 21:00:22 +00:00
Change data source ingest behavior
This commit is contained in:
parent
db07da4a0e
commit
99fd68b1c5
@ -29,7 +29,6 @@
|
||||
*/
|
||||
package org.sleuthkit.autopsy.examples;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.logging.Level;
|
||||
import org.sleuthkit.autopsy.casemodule.Case;
|
||||
@ -46,42 +45,38 @@ import org.sleuthkit.autopsy.coreutils.Logger;
|
||||
import org.sleuthkit.autopsy.ingest.DataSourceIngestModule;
|
||||
import org.sleuthkit.autopsy.ingest.IngestJobContext;
|
||||
import org.sleuthkit.autopsy.ingest.IngestMessage;
|
||||
import org.sleuthkit.autopsy.ingest.IngestModuleAdapter;
|
||||
import org.sleuthkit.autopsy.ingest.IngestServices;
|
||||
import org.sleuthkit.autopsy.ingest.IngestModuleReferenceCounter;
|
||||
import org.sleuthkit.datamodel.TskData;
|
||||
|
||||
/**
|
||||
* Sample data source ingest module that doesn't do much. Demonstrates per
|
||||
* ingest job module settings, use of a subset of the available ingest services
|
||||
* and thread-safe sharing of per ingest job data.
|
||||
* ingest job module settings, checking for job cancellation, updating the
|
||||
* DataSourceIngestModuleProgress object, and use of a subset of the available
|
||||
* ingest services.
|
||||
*/
|
||||
class SampleDataSourceIngestModule extends IngestModuleAdapter implements DataSourceIngestModule {
|
||||
class SampleDataSourceIngestModule implements DataSourceIngestModule {
|
||||
|
||||
private static final HashMap<Long, Long> fileCountsForIngestJobs = new HashMap<>();
|
||||
private final boolean skipKnownFiles;
|
||||
private IngestJobContext context = null;
|
||||
private static final IngestModuleReferenceCounter refCounter = new IngestModuleReferenceCounter();
|
||||
|
||||
SampleDataSourceIngestModule(SampleModuleIngestJobSettings settings) {
|
||||
this.skipKnownFiles = settings.skipKnownFiles();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void startUp(IngestJobContext context) throws IngestModuleException {
|
||||
this.context = context;
|
||||
|
||||
// This method is thread-safe with per ingest job reference counted
|
||||
// management of shared data.
|
||||
initFileCount(context.getJobId());
|
||||
}
|
||||
|
||||
@Override
|
||||
public ProcessResult process(Content dataSource, DataSourceIngestModuleProgress progressBar) {
|
||||
// There are two tasks to do. Set the the progress bar to determinate
|
||||
// and set the remaining number of work units to be completed to two.
|
||||
if (context.isJobCancelled()) {
|
||||
return IngestModule.ProcessResult.OK;
|
||||
}
|
||||
|
||||
// There are two tasks to do.
|
||||
progressBar.switchToDeterminate(2);
|
||||
|
||||
|
||||
Case autopsyCase = Case.getCurrentCase();
|
||||
SleuthkitCase sleuthkitCase = autopsyCase.getSleuthkitCase();
|
||||
Services services = new Services(sleuthkitCase);
|
||||
@ -93,11 +88,14 @@ class SampleDataSourceIngestModule extends IngestModuleAdapter implements DataSo
|
||||
for (AbstractFile docFile : docFiles) {
|
||||
if (!skipKnownFiles || docFile.getKnown() != TskData.FileKnown.KNOWN) {
|
||||
++fileCount;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
progressBar.progress(1);
|
||||
|
||||
|
||||
if (context.isJobCancelled()) {
|
||||
return IngestModule.ProcessResult.OK;
|
||||
}
|
||||
|
||||
// Get files by creation time.
|
||||
long currentTime = System.currentTimeMillis() / 1000;
|
||||
long minTime = currentTime - (14 * 24 * 60 * 60); // Go back two weeks.
|
||||
@ -105,16 +103,24 @@ class SampleDataSourceIngestModule extends IngestModuleAdapter implements DataSo
|
||||
for (FsContent otherFile : otherFiles) {
|
||||
if (!skipKnownFiles || otherFile.getKnown() != TskData.FileKnown.KNOWN) {
|
||||
++fileCount;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This method is thread-safe with per ingest job reference counted
|
||||
// management of shared data.
|
||||
addToFileCount(context.getJobId(), fileCount);
|
||||
|
||||
progressBar.progress(1);
|
||||
return IngestModule.ProcessResult.OK;
|
||||
|
||||
|
||||
if (context.isJobCancelled()) {
|
||||
return IngestModule.ProcessResult.OK;
|
||||
}
|
||||
|
||||
// Post a message to the ingest messages in box.
|
||||
String msgText = String.format("Found %d files", fileCount);
|
||||
IngestMessage message = IngestMessage.createMessage(
|
||||
IngestMessage.MessageType.DATA,
|
||||
SampleIngestModuleFactory.getModuleName(),
|
||||
msgText);
|
||||
IngestServices.getInstance().postMessage(message);
|
||||
|
||||
return IngestModule.ProcessResult.OK;
|
||||
|
||||
} catch (TskCoreException ex) {
|
||||
IngestServices ingestServices = IngestServices.getInstance();
|
||||
Logger logger = ingestServices.getLogger(SampleIngestModuleFactory.getModuleName());
|
||||
@ -122,38 +128,4 @@ class SampleDataSourceIngestModule extends IngestModuleAdapter implements DataSo
|
||||
return IngestModule.ProcessResult.ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutDown(boolean ingestJobCancelled) {
|
||||
// This method is thread-safe with per ingest job reference counted
|
||||
// management of shared data.
|
||||
postFileCount(context.getJobId());
|
||||
}
|
||||
|
||||
synchronized static void initFileCount(long ingestJobId) {
|
||||
Long refCount = refCounter.incrementAndGet(ingestJobId);
|
||||
if (refCount == 1) {
|
||||
fileCountsForIngestJobs.put(ingestJobId, 0L);
|
||||
}
|
||||
}
|
||||
|
||||
synchronized static void addToFileCount(long ingestJobId, long countToAdd) {
|
||||
Long fileCount = fileCountsForIngestJobs.get(ingestJobId);
|
||||
fileCount += countToAdd;
|
||||
fileCountsForIngestJobs.put(ingestJobId, fileCount);
|
||||
}
|
||||
|
||||
synchronized static void postFileCount(long ingestJobId) {
|
||||
Long refCount = refCounter.decrementAndGet(ingestJobId);
|
||||
if (refCount == 0) {
|
||||
Long filesCount = fileCountsForIngestJobs.remove(ingestJobId);
|
||||
String msgText = String.format("Found %d files", filesCount);
|
||||
IngestMessage message = IngestMessage.createMessage(
|
||||
IngestMessage.MessageType.DATA,
|
||||
SampleIngestModuleFactory.getModuleName(),
|
||||
msgText);
|
||||
IngestServices.getInstance().postMessage(message);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -37,7 +37,6 @@ import org.sleuthkit.autopsy.ingest.FileIngestModule;
|
||||
import org.sleuthkit.autopsy.ingest.IngestModule;
|
||||
import org.sleuthkit.autopsy.ingest.IngestJobContext;
|
||||
import org.sleuthkit.autopsy.ingest.IngestMessage;
|
||||
import org.sleuthkit.autopsy.ingest.IngestModuleAdapter;
|
||||
import org.sleuthkit.autopsy.ingest.IngestServices;
|
||||
import org.sleuthkit.autopsy.ingest.ModuleDataEvent;
|
||||
import org.sleuthkit.autopsy.ingest.IngestModuleReferenceCounter;
|
||||
@ -54,7 +53,7 @@ import org.sleuthkit.datamodel.TskData;
|
||||
* module settings, use of a subset of the available ingest services and
|
||||
* thread-safe sharing of per ingest job data.
|
||||
*/
|
||||
class SampleFileIngestModule extends IngestModuleAdapter implements FileIngestModule {
|
||||
class SampleFileIngestModule implements FileIngestModule {
|
||||
|
||||
private static final HashMap<Long, Long> artifactCountsForIngestJobs = new HashMap<>();
|
||||
private static int attrId = -1;
|
||||
@ -159,21 +158,23 @@ class SampleFileIngestModule extends IngestModuleAdapter implements FileIngestMo
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutDown(boolean ingestJobCancelled) {
|
||||
// This method is thread-safe with per ingest job reference counted
|
||||
// management of shared data.
|
||||
reportBlackboardPostCount(context.getJobId());
|
||||
public void shutDown() {
|
||||
if (!context.isJobCancelled()) {
|
||||
// This method is thread-safe with per ingest job reference counted
|
||||
// management of shared data.
|
||||
reportBlackboardPostCount(context.getJobId());
|
||||
}
|
||||
}
|
||||
|
||||
synchronized static void addToBlackboardPostCount(long ingestJobId, long countToAdd) {
|
||||
Long fileCount = artifactCountsForIngestJobs.get(ingestJobId);
|
||||
|
||||
|
||||
// Ensures that this job has an entry
|
||||
if (fileCount == null) {
|
||||
fileCount = 0L;
|
||||
artifactCountsForIngestJobs.put(ingestJobId, fileCount);
|
||||
}
|
||||
|
||||
|
||||
fileCount += countToAdd;
|
||||
artifactCountsForIngestJobs.put(ingestJobId, fileCount);
|
||||
}
|
||||
@ -189,5 +190,5 @@ class SampleFileIngestModule extends IngestModuleAdapter implements FileIngestMo
|
||||
msgText);
|
||||
IngestServices.getInstance().postMessage(message);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -16,8 +16,9 @@ IngestMessagePanel.totalMessagesNameLabel.text=Total:
|
||||
IngestMessagePanel.totalMessagesNameVal.text=-
|
||||
IngestMessagePanel.totalUniqueMessagesNameLabel.text=Unique:
|
||||
IngestMessagePanel.totalUniqueMessagesNameVal.text=-
|
||||
IngestJob.progress.dataSourceIngest.displayName=Data Source Ingest of {0}
|
||||
IngestJob.progress.fileIngest.displayName=File Ingest of {0}
|
||||
IngestJob.progress.dataSourceIngest.initialDisplayName=Analyzing {0}
|
||||
IngestJob.progress.dataSourceIngest.displayName={0} for {1}
|
||||
IngestJob.progress.fileIngest.displayName=Analyzing files from {0}
|
||||
IngestJob.progress.cancelling={0} (Cancelling...)
|
||||
IngestJobConfigurationPanel.processUnallocCheckbox.toolTipText=Processes unallocated space, such as deleted files. Produces more complete results, but it may take longer to process on large images.
|
||||
IngestJobConfigurationPanel.processUnallocCheckbox.text=Process Unallocated Space
|
||||
|
@ -26,11 +26,9 @@ import org.netbeans.api.progress.ProgressHandle;
|
||||
public class DataSourceIngestModuleProgress {
|
||||
|
||||
private final ProgressHandle progress;
|
||||
private final String moduleDisplayName;
|
||||
|
||||
DataSourceIngestModuleProgress(ProgressHandle progress, String moduleDisplayName) {
|
||||
DataSourceIngestModuleProgress(ProgressHandle progress) {
|
||||
this.progress = progress;
|
||||
this.moduleDisplayName = moduleDisplayName;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -60,6 +58,6 @@ public class DataSourceIngestModuleProgress {
|
||||
* @param workUnits Number of work units performed so far by the module.
|
||||
*/
|
||||
public void progress(int workUnits) {
|
||||
progress.progress(this.moduleDisplayName, workUnits);
|
||||
progress.progress("", workUnits);
|
||||
}
|
||||
}
|
@ -23,6 +23,7 @@ import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import org.netbeans.api.progress.ProgressHandle;
|
||||
import org.openide.util.NbBundle;
|
||||
import org.sleuthkit.datamodel.Content;
|
||||
|
||||
/**
|
||||
@ -79,7 +80,10 @@ final class DataSourceIngestPipeline {
|
||||
List<IngestModuleError> errors = new ArrayList<>();
|
||||
for (DataSourceIngestModuleDecorator module : this.modules) {
|
||||
try {
|
||||
module.process(dataSource, new DataSourceIngestModuleProgress(progress, module.getDisplayName()));
|
||||
progress.setDisplayName(NbBundle.getMessage(this.getClass(),
|
||||
"IngestJob.progress.dataSourceIngest.displayName",
|
||||
module.getDisplayName(), dataSource.getName()));
|
||||
module.process(dataSource, new DataSourceIngestModuleProgress(progress));
|
||||
} catch (Exception ex) {
|
||||
errors.add(new IngestModuleError(module.getDisplayName(), ex));
|
||||
}
|
||||
@ -90,18 +94,6 @@ final class DataSourceIngestPipeline {
|
||||
return errors;
|
||||
}
|
||||
|
||||
List<IngestModuleError> shutDown() {
|
||||
List<IngestModuleError> errors = new ArrayList<>();
|
||||
for (DataSourceIngestModuleDecorator module : this.modules) {
|
||||
try {
|
||||
module.shutDown(context.isJobCancelled());
|
||||
} catch (Exception ex) {
|
||||
errors.add(new IngestModuleError(module.getDisplayName(), ex));
|
||||
}
|
||||
}
|
||||
return errors;
|
||||
}
|
||||
|
||||
private static class DataSourceIngestModuleDecorator implements DataSourceIngestModule {
|
||||
|
||||
private final DataSourceIngestModule module;
|
||||
@ -129,10 +121,5 @@ final class DataSourceIngestPipeline {
|
||||
public IngestModule.ProcessResult process(Content dataSource, DataSourceIngestModuleProgress statusHelper) {
|
||||
return module.process(dataSource, statusHelper);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutDown(boolean ingestJobWasCancelled) {
|
||||
module.shutDown(ingestJobWasCancelled);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -35,6 +35,6 @@ final class DataSourceIngestTask extends IngestTask {
|
||||
|
||||
@Override
|
||||
void execute() throws InterruptedException {
|
||||
getIngestJob().process(dataSource);
|
||||
getIngestJob().process(this);
|
||||
}
|
||||
}
|
||||
|
@ -21,17 +21,27 @@ package org.sleuthkit.autopsy.ingest;
|
||||
import org.sleuthkit.datamodel.AbstractFile;
|
||||
|
||||
/**
|
||||
* Interface that must be implemented by all file ingest modules.
|
||||
* See description of IngestModule for more details on interface behavior.
|
||||
* Interface that must be implemented by all file ingest modules. See
|
||||
* description of IngestModule for more details on interface behavior.
|
||||
*/
|
||||
public interface FileIngestModule extends IngestModule {
|
||||
|
||||
/**
|
||||
* Processes a file. Called between calls to startUp() and shutDown().
|
||||
* Will be called for each file in a data source.
|
||||
* Processes a file. Called between calls to startUp() and shutDown(). Will
|
||||
* be called for each file in a data source.
|
||||
*
|
||||
* @param file The file to analyze.
|
||||
* @return A result code indicating success or failure of the processing.
|
||||
*/
|
||||
ProcessResult process(AbstractFile file);
|
||||
|
||||
/**
|
||||
* Invoked by Autopsy when an ingest job is completed (either because the
|
||||
* data has been analyzed or because the job was canceled - check
|
||||
* IngestJobContext.isJobCancelled()), before the ingest module instance is
|
||||
* discarded. The module should respond by doing things like releasing
|
||||
* private resources, submitting final results, and posting a final ingest
|
||||
* message.
|
||||
*/
|
||||
void shutDown();
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ final class FileIngestPipeline {
|
||||
List<IngestModuleError> errors = new ArrayList<>();
|
||||
for (FileIngestModuleDecorator module : this.modules) {
|
||||
try {
|
||||
module.shutDown(context.isJobCancelled());
|
||||
module.shutDown();
|
||||
} catch (Exception ex) {
|
||||
errors.add(new IngestModuleError(module.getDisplayName(), ex));
|
||||
}
|
||||
@ -134,8 +134,8 @@ final class FileIngestPipeline {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutDown(boolean ingestJobWasCancelled) {
|
||||
module.shutDown(ingestJobWasCancelled);
|
||||
public void shutDown() {
|
||||
module.shutDown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ final class FileIngestTask extends IngestTask {
|
||||
|
||||
@Override
|
||||
void execute() throws InterruptedException {
|
||||
getIngestJob().process(file);
|
||||
getIngestJob().process(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -20,7 +20,9 @@ package org.sleuthkit.autopsy.ingest;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.logging.Level;
|
||||
import org.netbeans.api.progress.ProgressHandle;
|
||||
import org.netbeans.api.progress.ProgressHandleFactory;
|
||||
@ -33,21 +35,65 @@ import org.sleuthkit.datamodel.Content;
|
||||
final class IngestJob {
|
||||
|
||||
private static final Logger logger = Logger.getLogger(IngestManager.class.getName());
|
||||
private static final AtomicLong nextIngestJobId = new AtomicLong(0L);
|
||||
private static final ConcurrentHashMap<Long, IngestJob> ingestJobsById = new ConcurrentHashMap<>();
|
||||
private static final IngestScheduler taskScheduler = IngestScheduler.getInstance();
|
||||
private final long id;
|
||||
private final Content rootDataSource;
|
||||
private final Content dataSource;
|
||||
private final List<IngestModuleTemplate> ingestModuleTemplates;
|
||||
private final boolean processUnallocatedSpace;
|
||||
private final LinkedBlockingQueue<DataSourceIngestPipeline> dataSourceIngestPipelines = new LinkedBlockingQueue<>();
|
||||
private final LinkedBlockingQueue<FileIngestPipeline> fileIngestPipelines = new LinkedBlockingQueue<>();
|
||||
private long estimatedFilesToProcess = 0L; // Guarded by this
|
||||
private long processedFiles = 0L; // Guarded by this
|
||||
private DataSourceIngestPipeline dataSourceIngestPipeline;
|
||||
private ProgressHandle dataSourceTasksProgress;
|
||||
private ProgressHandle fileTasksProgress;
|
||||
private volatile boolean cancelled = false;
|
||||
|
||||
/**
|
||||
* Creates an ingest job for a data source.
|
||||
*
|
||||
* @param dataSource The data source to ingest.
|
||||
* @param ingestModuleTemplates The ingest module templates to use to create
|
||||
* the ingest pipelines for the job.
|
||||
* @param processUnallocatedSpace Whether or not the job should include
|
||||
* processing of unallocated space.
|
||||
* @return A collection of ingest module start up errors, empty on success.
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
static List<IngestModuleError> startIngestJob(Content dataSource, List<IngestModuleTemplate> ingestModuleTemplates, boolean processUnallocatedSpace) throws InterruptedException {
|
||||
long jobId = nextIngestJobId.incrementAndGet();
|
||||
IngestJob job = new IngestJob(jobId, dataSource, ingestModuleTemplates, processUnallocatedSpace);
|
||||
ingestJobsById.put(jobId, job);
|
||||
IngestManager.getInstance().fireIngestJobStarted(jobId);
|
||||
List<IngestModuleError> errors = job.start();
|
||||
if (errors.isEmpty()) {
|
||||
taskScheduler.scheduleTasksForIngestJob(job, dataSource);
|
||||
} else {
|
||||
ingestJobsById.remove(jobId);
|
||||
IngestManager.getInstance().fireIngestJobCancelled(jobId);
|
||||
}
|
||||
return errors;
|
||||
}
|
||||
|
||||
static boolean ingestJobsAreRunning() {
|
||||
for (IngestJob job : ingestJobsById.values()) {
|
||||
if (!job.isCancelled()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void cancelAllIngestJobs() {
|
||||
for (IngestJob job : ingestJobsById.values()) {
|
||||
job.cancel();
|
||||
}
|
||||
}
|
||||
|
||||
IngestJob(long id, Content dataSource, List<IngestModuleTemplate> ingestModuleTemplates, boolean processUnallocatedSpace) {
|
||||
this.id = id;
|
||||
this.rootDataSource = dataSource;
|
||||
this.dataSource = dataSource;
|
||||
this.ingestModuleTemplates = ingestModuleTemplates;
|
||||
this.processUnallocatedSpace = processUnallocatedSpace;
|
||||
}
|
||||
@ -60,7 +106,7 @@ final class IngestJob {
|
||||
return processUnallocatedSpace;
|
||||
}
|
||||
|
||||
List<IngestModuleError> startUp() throws InterruptedException {
|
||||
List<IngestModuleError> start() throws InterruptedException {
|
||||
List<IngestModuleError> errors = startUpIngestPipelines();
|
||||
if (errors.isEmpty()) {
|
||||
startFileIngestProgressBar();
|
||||
@ -71,20 +117,12 @@ final class IngestJob {
|
||||
|
||||
private List<IngestModuleError> startUpIngestPipelines() throws InterruptedException {
|
||||
IngestJobContext context = new IngestJobContext(this);
|
||||
|
||||
dataSourceIngestPipeline = new DataSourceIngestPipeline(context, ingestModuleTemplates);
|
||||
List<IngestModuleError> errors = new ArrayList<>();
|
||||
errors.addAll(dataSourceIngestPipeline.startUp());
|
||||
|
||||
int numberOfPipelines = IngestManager.getInstance().getNumberOfDataSourceIngestThreads();
|
||||
for (int i = 0; i < numberOfPipelines; ++i) {
|
||||
DataSourceIngestPipeline pipeline = new DataSourceIngestPipeline(context, ingestModuleTemplates);
|
||||
errors.addAll(pipeline.startUp());
|
||||
dataSourceIngestPipelines.put(pipeline);
|
||||
if (!errors.isEmpty()) {
|
||||
// No need to accumulate presumably redundant errors.
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
numberOfPipelines = IngestManager.getInstance().getNumberOfFileIngestThreads();
|
||||
int numberOfPipelines = IngestManager.getInstance().getNumberOfFileIngestThreads();
|
||||
for (int i = 0; i < numberOfPipelines; ++i) {
|
||||
FileIngestPipeline pipeline = new FileIngestPipeline(context, ingestModuleTemplates);
|
||||
errors.addAll(pipeline.startUp());
|
||||
@ -96,13 +134,13 @@ final class IngestJob {
|
||||
}
|
||||
|
||||
logIngestModuleErrors(errors);
|
||||
return errors; // Returned so UI can report to user.
|
||||
return errors;
|
||||
}
|
||||
|
||||
private void startDataSourceIngestProgressBar() {
|
||||
final String displayName = NbBundle.getMessage(this.getClass(),
|
||||
"IngestJob.progress.dataSourceIngest.displayName",
|
||||
rootDataSource.getName());
|
||||
"IngestJob.progress.dataSourceIngest.initialDisplayName",
|
||||
dataSource.getName());
|
||||
dataSourceTasksProgress = ProgressHandleFactory.createHandle(displayName, new Cancellable() {
|
||||
@Override
|
||||
public boolean cancel() {
|
||||
@ -123,7 +161,7 @@ final class IngestJob {
|
||||
private void startFileIngestProgressBar() {
|
||||
final String displayName = NbBundle.getMessage(this.getClass(),
|
||||
"IngestJob.progress.fileIngest.displayName",
|
||||
rootDataSource.getName());
|
||||
dataSource.getName());
|
||||
fileTasksProgress = ProgressHandleFactory.createHandle(displayName, new Cancellable() {
|
||||
@Override
|
||||
public boolean cancel() {
|
||||
@ -136,32 +174,32 @@ final class IngestJob {
|
||||
return true;
|
||||
}
|
||||
});
|
||||
estimatedFilesToProcess = rootDataSource.accept(new GetFilesCountVisitor());
|
||||
estimatedFilesToProcess = dataSource.accept(new GetFilesCountVisitor());
|
||||
fileTasksProgress.start();
|
||||
fileTasksProgress.switchToDeterminate((int) estimatedFilesToProcess);
|
||||
}
|
||||
|
||||
void process(Content dataSource) throws InterruptedException {
|
||||
void process(DataSourceIngestTask task) throws InterruptedException {
|
||||
// If the job is not cancelled, complete the task, otherwise just flush
|
||||
// it. In either case, the task counter needs to be decremented and the
|
||||
// shut down check needs to occur.
|
||||
// it.
|
||||
if (!isCancelled()) {
|
||||
List<IngestModuleError> errors = new ArrayList<>();
|
||||
DataSourceIngestPipeline pipeline = dataSourceIngestPipelines.take();
|
||||
errors.addAll(pipeline.process(dataSource, dataSourceTasksProgress));
|
||||
errors.addAll(dataSourceIngestPipeline.process(task.getDataSource(), dataSourceTasksProgress));
|
||||
if (!errors.isEmpty()) {
|
||||
logIngestModuleErrors(errors);
|
||||
}
|
||||
dataSourceIngestPipelines.put(pipeline);
|
||||
dataSourceTasksProgress.finish();
|
||||
}
|
||||
if (taskScheduler.isLastTaskForIngestJob(task)) {
|
||||
finish();
|
||||
}
|
||||
}
|
||||
|
||||
void process(AbstractFile file) throws InterruptedException {
|
||||
void process(FileIngestTask task) throws InterruptedException {
|
||||
// If the job is not cancelled, complete the task, otherwise just flush
|
||||
// it. In either case, the task counter needs to be decremented and the
|
||||
// shut down check needs to occur.
|
||||
// it.
|
||||
if (!isCancelled()) {
|
||||
List<IngestModuleError> errors = new ArrayList<>();
|
||||
AbstractFile file = task.getFile();
|
||||
synchronized (this) {
|
||||
++processedFiles;
|
||||
if (processedFiles <= estimatedFilesToProcess) {
|
||||
@ -171,29 +209,33 @@ final class IngestJob {
|
||||
}
|
||||
}
|
||||
FileIngestPipeline pipeline = fileIngestPipelines.take();
|
||||
List<IngestModuleError> errors = new ArrayList<>();
|
||||
errors.addAll(pipeline.process(file));
|
||||
fileIngestPipelines.put(pipeline);
|
||||
if (!errors.isEmpty()) {
|
||||
logIngestModuleErrors(errors);
|
||||
}
|
||||
}
|
||||
if (taskScheduler.isLastTaskForIngestJob(task)) {
|
||||
finish();
|
||||
}
|
||||
}
|
||||
|
||||
void shutDown() {
|
||||
private void finish() {
|
||||
List<IngestModuleError> errors = new ArrayList<>();
|
||||
while (!dataSourceIngestPipelines.isEmpty()) {
|
||||
DataSourceIngestPipeline pipeline = dataSourceIngestPipelines.poll();
|
||||
errors.addAll(pipeline.shutDown());
|
||||
}
|
||||
while (!fileIngestPipelines.isEmpty()) {
|
||||
FileIngestPipeline pipeline = fileIngestPipelines.poll();
|
||||
errors.addAll(pipeline.shutDown());
|
||||
}
|
||||
fileTasksProgress.finish();
|
||||
dataSourceTasksProgress.finish();
|
||||
if (!errors.isEmpty()) {
|
||||
logIngestModuleErrors(errors);
|
||||
}
|
||||
|
||||
ingestJobsById.remove(id);
|
||||
if (!cancelled) {
|
||||
IngestManager.getInstance().fireIngestJobCompleted(id);
|
||||
}
|
||||
}
|
||||
|
||||
private void logIngestModuleErrors(List<IngestModuleError> errors) {
|
||||
@ -208,7 +250,7 @@ final class IngestJob {
|
||||
|
||||
void cancel() {
|
||||
cancelled = true;
|
||||
fileTasksProgress.finish();
|
||||
fileTasksProgress.finish();
|
||||
dataSourceTasksProgress.finish();
|
||||
IngestManager.getInstance().fireIngestJobCancelled(id);
|
||||
}
|
||||
|
@ -60,7 +60,7 @@ public final class IngestJobContext {
|
||||
*/
|
||||
public void addFiles(List<AbstractFile> files) {
|
||||
for (AbstractFile file : files) {
|
||||
IngestScheduler.getInstance().addFileToIngestJob(ingestJob, file);
|
||||
IngestScheduler.getInstance().addFileTaskToIngestJob(ingestJob, file);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ public class IngestManager {
|
||||
private final IngestMonitor ingestMonitor = new IngestMonitor();
|
||||
private final ExecutorService startIngestJobsThreadPool = Executors.newSingleThreadExecutor();
|
||||
private final ExecutorService dataSourceIngestThreadPool = Executors.newSingleThreadExecutor();
|
||||
private final ExecutorService fileIngestThreadPool = Executors.newFixedThreadPool(MAX_NUMBER_OF_FILE_INGEST_THREADS);
|
||||
private final ExecutorService fileIngestThreadPool;
|
||||
private final ExecutorService fireIngestEventsThreadPool = Executors.newSingleThreadExecutor();
|
||||
private final AtomicLong nextThreadId = new AtomicLong(0L);
|
||||
private final ConcurrentHashMap<Long, Future<Void>> startIngestJobThreads = new ConcurrentHashMap<>(); // Maps thread ids to cancellation handles.
|
||||
@ -80,11 +80,13 @@ public class IngestManager {
|
||||
*/
|
||||
private IngestManager() {
|
||||
startDataSourceIngestThread();
|
||||
|
||||
numberOfFileIngestThreads = UserPreferences.numberOfFileIngestThreads();
|
||||
if ((numberOfFileIngestThreads < MIN_NUMBER_OF_FILE_INGEST_THREADS) || (numberOfFileIngestThreads > MAX_NUMBER_OF_FILE_INGEST_THREADS)) {
|
||||
numberOfFileIngestThreads = DEFAULT_NUMBER_OF_FILE_INGEST_THREADS;
|
||||
UserPreferences.setNumberOfFileIngestThreads(numberOfFileIngestThreads);
|
||||
}
|
||||
fileIngestThreadPool = Executors.newFixedThreadPool(numberOfFileIngestThreads);
|
||||
for (int i = 0; i < numberOfFileIngestThreads; ++i) {
|
||||
startFileIngestThread();
|
||||
}
|
||||
@ -165,7 +167,7 @@ public class IngestManager {
|
||||
* @return True if any ingest jobs are in progress, false otherwise.
|
||||
*/
|
||||
public boolean isIngestRunning() {
|
||||
return scheduler.ingestJobsAreRunning();
|
||||
return IngestJob.ingestJobsAreRunning();
|
||||
}
|
||||
|
||||
public void cancelAllIngestJobs() {
|
||||
@ -184,9 +186,8 @@ public class IngestManager {
|
||||
}
|
||||
}
|
||||
|
||||
// Cancel all the jobs already created. This will make the the ingest
|
||||
// threads flush out any lingering ingest tasks without processing them.
|
||||
scheduler.cancelAllIngestJobs();
|
||||
// Cancel all the jobs already created.
|
||||
IngestJob.cancelAllIngestJobs();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -432,7 +433,7 @@ public class IngestManager {
|
||||
}
|
||||
|
||||
// Start an ingest job for the data source.
|
||||
List<IngestModuleError> errors = scheduler.startIngestJob(dataSource, moduleTemplates, processUnallocatedSpace);
|
||||
List<IngestModuleError> errors = IngestJob.startIngestJob(dataSource, moduleTemplates, processUnallocatedSpace);
|
||||
if (!errors.isEmpty()) {
|
||||
// Report the errors to the user. They have already been logged.
|
||||
StringBuilder moduleStartUpErrors = new StringBuilder();
|
||||
@ -489,7 +490,6 @@ public class IngestManager {
|
||||
try {
|
||||
IngestTask task = tasks.getNextTask(); // Blocks.
|
||||
task.execute();
|
||||
scheduler.ingestTaskIsCompleted(task);
|
||||
} catch (InterruptedException ex) {
|
||||
break;
|
||||
}
|
||||
|
@ -20,25 +20,25 @@ package org.sleuthkit.autopsy.ingest;
|
||||
|
||||
/**
|
||||
* The interface that must be implemented by all ingest modules.
|
||||
*
|
||||
*
|
||||
* Autopsy will generally use several instances of an ingest module for each
|
||||
* ingest job it performs (one for each thread that it is using).
|
||||
*
|
||||
* Autopsy will call startUp() before any data is processed, will pass any
|
||||
* data to be analyzed into the process() method (FileIngestModule.process() or DataSourceIngestModule.process()),
|
||||
* and call shutDown() after
|
||||
* either all data is analyzed or the has has cancelled the job.
|
||||
* ingest job it performs (one for each thread that it is using).
|
||||
*
|
||||
* Autopsy will call startUp() before any data is processed, will pass any data
|
||||
* to be analyzed into the process() method (FileIngestModule.process() or
|
||||
* DataSourceIngestModule.process()), and call shutDown() after either all data
|
||||
* is analyzed or the user has canceled the job.
|
||||
*
|
||||
* Autopsy may use multiple threads to complete an ingest job, but it is
|
||||
* guaranteed that a module instance will always be called from a single thread.
|
||||
* Therefore, you can easily have thread-safe code by not using any static
|
||||
* guaranteed that a module instance will always be called from a single thread.
|
||||
* Therefore, you can easily have thread-safe code by not using any static
|
||||
* member variables.
|
||||
*
|
||||
* If the module instances must share resources, the modules are
|
||||
* responsible for synchronizing access to the shared resources and doing
|
||||
* reference counting as required to release those resources correctly. Also,
|
||||
* more than one ingest job may be in progress at any given time. This must also
|
||||
* be taken into consideration when sharing resources between module instances.
|
||||
* If the module instances must share resources, the modules are responsible for
|
||||
* synchronizing access to the shared resources and doing reference counting as
|
||||
* required to release those resources correctly. Also, more than one ingest job
|
||||
* may be in progress at any given time. This must also be taken into
|
||||
* consideration when sharing resources between module instances.
|
||||
*
|
||||
* TIP: An ingest module that does not require initialization or clean up may
|
||||
* extend the abstract IngestModuleAdapter class to get a default "do nothing"
|
||||
@ -71,29 +71,14 @@ public interface IngestModule {
|
||||
/**
|
||||
* Invoked by Autopsy to allow an ingest module instance to set up any
|
||||
* internal data structures and acquire any private resources it will need
|
||||
* during an ingest job.
|
||||
*
|
||||
* If the module depends on loading any resources, it should do so in this
|
||||
* method so that it can throw an exception in the case of an error and
|
||||
* alert the user. Exceptions that are thrown from process() and shutDown()
|
||||
* are logged, but do not stop processing of the data source.
|
||||
*
|
||||
* On error, throw a IngestModuleException.
|
||||
* during an ingest job. If the module depends on loading any resources, it
|
||||
* should do so in this method so that it can throw an exception in the case
|
||||
* of an error and alert the user. Exceptions that are thrown from process()
|
||||
* and shutDown() are logged, but do not stop processing of the data source.
|
||||
*
|
||||
* @param context Provides data and services specific to the ingest job and
|
||||
* the ingest pipeline of which the module is a part.
|
||||
* @throws org.sleuthkit.autopsy.ingest.IngestModule.IngestModuleException
|
||||
*/
|
||||
void startUp(IngestJobContext context) throws IngestModuleException;
|
||||
|
||||
/**
|
||||
* Invoked by Autopsy when an ingest job is completed (either because the
|
||||
* data has been analyzed or because the job was cancelled), before the ingest
|
||||
* module instance is discarded. The module should respond by doing things
|
||||
* like releasing private resources, submitting final results, and posting a
|
||||
* final ingest message.
|
||||
* @param ingestJobWasCancelled True if this is being called because the user
|
||||
* cancelled the job.
|
||||
*/
|
||||
void shutDown(boolean ingestJobWasCancelled);
|
||||
}
|
||||
|
@ -1,33 +0,0 @@
|
||||
/*
|
||||
* Autopsy Forensic Browser
|
||||
*
|
||||
* Copyright 2014 Basis Technology Corp.
|
||||
* Contact: carrier <at> sleuthkit <dot> org
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.sleuthkit.autopsy.ingest;
|
||||
|
||||
/**
|
||||
* An adapter that provides a default implementation of the IngestModule
|
||||
* interface.
|
||||
*/
|
||||
public abstract class IngestModuleAdapter implements IngestModule {
|
||||
@Override
|
||||
public void startUp(IngestJobContext context) throws IngestModuleException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutDown(boolean ingestJobCancelled) {
|
||||
}
|
||||
}
|
@ -23,9 +23,7 @@ import java.util.Collection;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.TreeSet;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import java.util.regex.Matcher;
|
||||
@ -42,7 +40,6 @@ final class IngestScheduler {
|
||||
private static final IngestScheduler instance = new IngestScheduler();
|
||||
private static final Logger logger = Logger.getLogger(IngestScheduler.class.getName());
|
||||
private static final int FAT_NTFS_FLAGS = TskData.TSK_FS_TYPE_ENUM.TSK_FS_TYPE_FAT12.getValue() | TskData.TSK_FS_TYPE_ENUM.TSK_FS_TYPE_FAT16.getValue() | TskData.TSK_FS_TYPE_ENUM.TSK_FS_TYPE_FAT32.getValue() | TskData.TSK_FS_TYPE_ENUM.TSK_FS_TYPE_NTFS.getValue();
|
||||
private final ConcurrentHashMap<Long, IngestJob> ingestJobsById = new ConcurrentHashMap<>();
|
||||
private final LinkedBlockingQueue<DataSourceIngestTask> dataSourceTasks = new LinkedBlockingQueue<>();
|
||||
private final TreeSet<FileIngestTask> rootDirectoryTasks = new TreeSet<>(new RootDirectoryTaskComparator()); // Guarded by this
|
||||
private final List<FileIngestTask> directoryTasks = new ArrayList<>(); // Guarded by this
|
||||
@ -50,7 +47,6 @@ final class IngestScheduler {
|
||||
private final List<IngestTask> tasksInProgress = new ArrayList<>(); // Guarded by this
|
||||
private final DataSourceIngestTaskQueue dataSourceTaskDispenser = new DataSourceIngestTaskQueue();
|
||||
private final FileIngestTaskQueue fileTaskDispenser = new FileIngestTaskQueue();
|
||||
private final AtomicLong nextIngestJobId = new AtomicLong(0L);
|
||||
|
||||
static IngestScheduler getInstance() {
|
||||
return instance;
|
||||
@ -59,42 +55,7 @@ final class IngestScheduler {
|
||||
private IngestScheduler() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an ingest job for a data source.
|
||||
*
|
||||
* @param rootDataSource The data source to ingest.
|
||||
* @param ingestModuleTemplates The ingest module templates to use to create
|
||||
* the ingest pipelines for the job.
|
||||
* @param processUnallocatedSpace Whether or not the job should include
|
||||
* processing of unallocated space.
|
||||
* @return A collection of ingest module start up errors, empty on success.
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
List<IngestModuleError> startIngestJob(Content dataSource, List<IngestModuleTemplate> ingestModuleTemplates, boolean processUnallocatedSpace) throws InterruptedException {
|
||||
long jobId = nextIngestJobId.incrementAndGet();
|
||||
IngestJob job = new IngestJob(jobId, dataSource, ingestModuleTemplates, processUnallocatedSpace);
|
||||
ingestJobsById.put(jobId, job);
|
||||
IngestManager.getInstance().fireIngestJobStarted(jobId);
|
||||
List<IngestModuleError> errors = job.startUp();
|
||||
if (errors.isEmpty()) {
|
||||
addDataSourceToIngestJob(job, dataSource);
|
||||
} else {
|
||||
ingestJobsById.remove(jobId);
|
||||
IngestManager.getInstance().fireIngestJobCancelled(jobId);
|
||||
}
|
||||
return errors;
|
||||
}
|
||||
|
||||
boolean ingestJobsAreRunning() {
|
||||
for (IngestJob job : ingestJobsById.values()) {
|
||||
if (!job.isCancelled()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
synchronized void addDataSourceToIngestJob(IngestJob job, Content dataSource) throws InterruptedException {
|
||||
synchronized void scheduleTasksForIngestJob(IngestJob job, Content dataSource) throws InterruptedException {
|
||||
// Enqueue a data source ingest task for the data source.
|
||||
// If the thread executing this code is interrupted, it is because the
|
||||
// the number of ingest threads has been decreased while ingest jobs are
|
||||
@ -153,7 +114,7 @@ final class IngestScheduler {
|
||||
updateFileTaskQueues(null);
|
||||
}
|
||||
|
||||
void addFileToIngestJob(IngestJob job, AbstractFile file) {
|
||||
void addFileTaskToIngestJob(IngestJob job, AbstractFile file) {
|
||||
FileIngestTask task = new FileIngestTask(job, file);
|
||||
if (shouldEnqueueFileTask(task)) {
|
||||
addTaskToFileQueue(task);
|
||||
@ -271,12 +232,6 @@ final class IngestScheduler {
|
||||
return true;
|
||||
}
|
||||
|
||||
void cancelAllIngestJobs() {
|
||||
for (IngestJob job : ingestJobsById.values()) {
|
||||
job.cancel();
|
||||
}
|
||||
}
|
||||
|
||||
IngestTaskQueue getDataSourceIngestTaskQueue() {
|
||||
return dataSourceTaskDispenser;
|
||||
}
|
||||
@ -285,16 +240,7 @@ final class IngestScheduler {
|
||||
return fileTaskDispenser;
|
||||
}
|
||||
|
||||
void ingestTaskIsCompleted(IngestTask completedTask) {
|
||||
if (ingestJobIsCompleted(completedTask)) {
|
||||
IngestJob job = completedTask.getIngestJob();
|
||||
job.shutDown();
|
||||
ingestJobsById.remove(job.getId());
|
||||
IngestManager.getInstance().fireIngestJobCompleted(job.getId());
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized boolean ingestJobIsCompleted(IngestTask completedTask) {
|
||||
synchronized boolean isLastTaskForIngestJob(IngestTask completedTask) {
|
||||
tasksInProgress.remove(completedTask);
|
||||
IngestJob job = completedTask.getIngestJob();
|
||||
long jobId = job.getId();
|
||||
|
@ -36,7 +36,6 @@ import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.logging.Level;
|
||||
import org.sleuthkit.autopsy.coreutils.ImageUtils;
|
||||
import org.sleuthkit.autopsy.coreutils.Logger;
|
||||
import org.sleuthkit.autopsy.ingest.IngestModuleAdapter;
|
||||
import org.sleuthkit.autopsy.ingest.FileIngestModule;
|
||||
import org.sleuthkit.autopsy.ingest.IngestJobContext;
|
||||
import org.sleuthkit.autopsy.ingest.IngestServices;
|
||||
@ -56,7 +55,7 @@ import org.sleuthkit.datamodel.TskData.TSK_DB_FILES_TYPE_ENUM;
|
||||
* files. Ingests an image file and, if available, adds it's date, latitude,
|
||||
* longitude, altitude, device model, and device make to a blackboard artifact.
|
||||
*/
|
||||
public final class ExifParserFileIngestModule extends IngestModuleAdapter implements FileIngestModule {
|
||||
public final class ExifParserFileIngestModule implements FileIngestModule {
|
||||
|
||||
private static final Logger logger = Logger.getLogger(ExifParserFileIngestModule.class.getName());
|
||||
private final IngestServices services = IngestServices.getInstance();
|
||||
@ -198,7 +197,7 @@ public final class ExifParserFileIngestModule extends IngestModuleAdapter implem
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutDown(boolean ingestJobCancelled) {
|
||||
public void shutDown() {
|
||||
// We only need to check for this final event on the last module per job
|
||||
if (refCounter.decrementAndGet(jobId) == 0) {
|
||||
if (filesToFire) {
|
||||
|
@ -26,7 +26,6 @@ import java.util.List;
|
||||
import java.util.logging.Level;
|
||||
import org.openide.util.NbBundle;
|
||||
import org.sleuthkit.autopsy.coreutils.Logger;
|
||||
import org.sleuthkit.autopsy.ingest.IngestModuleAdapter;
|
||||
import org.sleuthkit.autopsy.ingest.FileIngestModule;
|
||||
import org.sleuthkit.autopsy.ingest.IngestJobContext;
|
||||
import org.sleuthkit.autopsy.ingest.IngestMessage;
|
||||
@ -44,7 +43,7 @@ import org.sleuthkit.datamodel.TskException;
|
||||
/**
|
||||
* Flags mismatched filename extensions based on file signature.
|
||||
*/
|
||||
public class FileExtMismatchIngestModule extends IngestModuleAdapter implements FileIngestModule {
|
||||
public class FileExtMismatchIngestModule implements FileIngestModule {
|
||||
|
||||
private static final Logger logger = Logger.getLogger(FileExtMismatchIngestModule.class.getName());
|
||||
private final IngestServices services = IngestServices.getInstance();
|
||||
@ -173,7 +172,7 @@ public class FileExtMismatchIngestModule extends IngestModuleAdapter implements
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutDown(boolean ingestJobCancelled) {
|
||||
public void shutDown() {
|
||||
// We only need to post the summary msg from the last module per job
|
||||
if (refCounter.decrementAndGet(jobId) == 0) {
|
||||
IngestJobTotals jobTotals;
|
||||
|
@ -34,14 +34,13 @@ import org.sleuthkit.datamodel.TskData;
|
||||
import org.sleuthkit.datamodel.TskData.FileKnown;
|
||||
import org.sleuthkit.datamodel.TskException;
|
||||
import org.sleuthkit.autopsy.ingest.IngestModule.ProcessResult;
|
||||
import org.sleuthkit.autopsy.ingest.IngestModuleAdapter;
|
||||
import org.sleuthkit.autopsy.ingest.IngestModuleReferenceCounter;
|
||||
|
||||
/**
|
||||
* Detects the type of a file based on signature (magic) values. Posts results
|
||||
* to the blackboard.
|
||||
*/
|
||||
public class FileTypeIdIngestModule extends IngestModuleAdapter implements FileIngestModule {
|
||||
public class FileTypeIdIngestModule implements FileIngestModule {
|
||||
|
||||
private static final Logger logger = Logger.getLogger(FileTypeIdIngestModule.class.getName());
|
||||
private static final long MIN_FILE_SIZE = 512;
|
||||
@ -129,7 +128,7 @@ public class FileTypeIdIngestModule extends IngestModuleAdapter implements FileI
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutDown(boolean ingestJobCancelled) {
|
||||
public void shutDown() {
|
||||
// We only need to post the summary msg from the last module per job
|
||||
if (refCounter.decrementAndGet(jobId) == 0) {
|
||||
IngestJobTotals jobTotals;
|
||||
|
@ -46,7 +46,6 @@ import org.netbeans.api.progress.ProgressHandleFactory;
|
||||
import org.sleuthkit.autopsy.casemodule.services.FileManager;
|
||||
import org.sleuthkit.autopsy.ingest.FileIngestModule;
|
||||
import org.sleuthkit.autopsy.ingest.IngestMessage;
|
||||
import org.sleuthkit.autopsy.ingest.IngestModuleAdapter;
|
||||
import org.sleuthkit.autopsy.ingest.IngestMonitor;
|
||||
import org.sleuthkit.autopsy.ingest.ModuleContentEvent;
|
||||
import org.sleuthkit.datamodel.BlackboardArtifact;
|
||||
@ -66,7 +65,7 @@ import org.sleuthkit.autopsy.ingest.IngestModuleReferenceCounter;
|
||||
* 7Zip ingest module extracts supported archives, adds extracted DerivedFiles,
|
||||
* reschedules extracted DerivedFiles for ingest.
|
||||
*/
|
||||
public final class SevenZipIngestModule extends IngestModuleAdapter implements FileIngestModule {
|
||||
public final class SevenZipIngestModule implements FileIngestModule {
|
||||
|
||||
private static final Logger logger = Logger.getLogger(SevenZipIngestModule.class.getName());
|
||||
private IngestServices services = IngestServices.getInstance();
|
||||
@ -186,7 +185,7 @@ public final class SevenZipIngestModule extends IngestModuleAdapter implements F
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutDown(boolean ingestJobCancelled) {
|
||||
public void shutDown() {
|
||||
// We don't need the value, but for cleanliness and consistency
|
||||
refCounter.decrementAndGet(jobId);
|
||||
}
|
||||
|
@ -42,12 +42,11 @@ import org.sleuthkit.datamodel.TskCoreException;
|
||||
import org.sleuthkit.datamodel.TskData;
|
||||
import org.sleuthkit.datamodel.TskException;
|
||||
import org.sleuthkit.autopsy.hashdatabase.HashDbManager.HashDb;
|
||||
import org.sleuthkit.autopsy.ingest.IngestModuleAdapter;
|
||||
import org.sleuthkit.autopsy.ingest.FileIngestModule;
|
||||
import org.sleuthkit.autopsy.ingest.IngestModuleReferenceCounter;
|
||||
import org.sleuthkit.datamodel.HashInfo;
|
||||
|
||||
public class HashDbIngestModule extends IngestModuleAdapter implements FileIngestModule {
|
||||
public class HashDbIngestModule implements FileIngestModule {
|
||||
private static final Logger logger = Logger.getLogger(HashDbIngestModule.class.getName());
|
||||
private static final int MAX_COMMENT_SIZE = 500;
|
||||
private final IngestServices services = IngestServices.getInstance();
|
||||
@ -359,7 +358,7 @@ public class HashDbIngestModule extends IngestModuleAdapter implements FileInges
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutDown(boolean ingestJobCancelled) {
|
||||
public void shutDown() {
|
||||
if (refCounter.decrementAndGet(jobId) == 0) {
|
||||
postSummary();
|
||||
}
|
||||
|
@ -36,7 +36,6 @@ import org.sleuthkit.autopsy.ingest.FileIngestModule;
|
||||
import org.sleuthkit.autopsy.ingest.IngestServices;
|
||||
import org.sleuthkit.autopsy.ingest.IngestMessage;
|
||||
import org.sleuthkit.autopsy.ingest.IngestMessage.MessageType;
|
||||
import org.sleuthkit.autopsy.ingest.IngestModuleAdapter;
|
||||
import org.sleuthkit.autopsy.ingest.IngestJobContext;
|
||||
import org.sleuthkit.autopsy.ingest.IngestModuleReferenceCounter;
|
||||
import org.sleuthkit.autopsy.keywordsearch.Ingester.IngesterException;
|
||||
@ -55,7 +54,7 @@ import org.sleuthkit.datamodel.TskData.FileKnown;
|
||||
* on currently configured lists for ingest and writes results to blackboard
|
||||
* Reports interesting events to Inbox and to viewers
|
||||
*/
|
||||
public final class KeywordSearchIngestModule extends IngestModuleAdapter implements FileIngestModule {
|
||||
public final class KeywordSearchIngestModule implements FileIngestModule {
|
||||
|
||||
enum UpdateFrequency {
|
||||
|
||||
@ -93,6 +92,7 @@ public final class KeywordSearchIngestModule extends IngestModuleAdapter impleme
|
||||
private static AtomicInteger instanceCount = new AtomicInteger(0); //just used for logging
|
||||
private int instanceNum = 0;
|
||||
private static final IngestModuleReferenceCounter refCounter = new IngestModuleReferenceCounter();
|
||||
private IngestJobContext context;
|
||||
|
||||
private enum IngestStatus {
|
||||
|
||||
@ -136,6 +136,7 @@ public final class KeywordSearchIngestModule extends IngestModuleAdapter impleme
|
||||
caseHandle = Case.getCurrentCase().getSleuthkitCase();
|
||||
tikaFormatDetector = new Tika();
|
||||
ingester = Server.getIngester();
|
||||
this.context = context;
|
||||
|
||||
// increment the module reference count
|
||||
// if first instance of this module for this job then check the server and existence of keywords
|
||||
@ -248,14 +249,14 @@ public final class KeywordSearchIngestModule extends IngestModuleAdapter impleme
|
||||
* Cleanup resources, threads, timers
|
||||
*/
|
||||
@Override
|
||||
public void shutDown(boolean ingestJobCancelled) {
|
||||
public void shutDown() {
|
||||
logger.log(Level.INFO, "Instance {0}", instanceNum); //NON-NLS
|
||||
|
||||
if (initialized == false) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (ingestJobCancelled) {
|
||||
if (context.isJobCancelled()) {
|
||||
logger.log(Level.INFO, "Ingest job cancelled"); //NON-NLS
|
||||
stop();
|
||||
return;
|
||||
|
@ -36,13 +36,12 @@ import org.sleuthkit.autopsy.ingest.IngestMessage;
|
||||
import org.sleuthkit.autopsy.ingest.IngestMessage.MessageType;
|
||||
import org.sleuthkit.datamodel.Content;
|
||||
import org.sleuthkit.autopsy.ingest.IngestModule.ProcessResult;
|
||||
import org.sleuthkit.autopsy.ingest.IngestModuleAdapter;
|
||||
import org.sleuthkit.autopsy.ingest.IngestJobContext;
|
||||
|
||||
/**
|
||||
* Recent activity image ingest module
|
||||
*/
|
||||
public final class RAImageIngestModule extends IngestModuleAdapter implements DataSourceIngestModule {
|
||||
public final class RAImageIngestModule implements DataSourceIngestModule {
|
||||
|
||||
private static final Logger logger = Logger.getLogger(RAImageIngestModule.class.getName());
|
||||
private final List<Extract> extracters = new ArrayList<>();
|
||||
@ -160,14 +159,9 @@ public final class RAImageIngestModule extends IngestModuleAdapter implements Da
|
||||
historyMsg.toString());
|
||||
services.postMessage(inboxMsg);
|
||||
|
||||
return ProcessResult.OK;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutDown(boolean ingestJobCancelled) {
|
||||
if (ingestJobCancelled) {
|
||||
if (context.isJobCancelled()) {
|
||||
stop();
|
||||
return;
|
||||
return ProcessResult.OK;
|
||||
}
|
||||
|
||||
for (int i = 0; i < extracters.size(); i++) {
|
||||
@ -180,6 +174,8 @@ public final class RAImageIngestModule extends IngestModuleAdapter implements Da
|
||||
extracter.getName()));
|
||||
}
|
||||
}
|
||||
|
||||
return ProcessResult.OK;
|
||||
}
|
||||
|
||||
private void stop() {
|
||||
|
@ -29,7 +29,6 @@ import org.sleuthkit.autopsy.casemodule.Case;
|
||||
import org.sleuthkit.autopsy.coreutils.Logger;
|
||||
import org.sleuthkit.autopsy.coreutils.PlatformUtil;
|
||||
import org.sleuthkit.autopsy.ingest.FileIngestModule;
|
||||
import org.sleuthkit.autopsy.ingest.IngestModuleAdapter;
|
||||
import org.sleuthkit.autopsy.ingest.IngestJobContext;
|
||||
import org.sleuthkit.autopsy.ingest.IngestServices;
|
||||
import org.sleuthkit.autopsy.ingest.ModuleContentEvent;
|
||||
@ -50,7 +49,7 @@ import org.sleuthkit.datamodel.Volume;
|
||||
/**
|
||||
* Scalpel carving ingest module
|
||||
*/
|
||||
class ScalpelCarverIngestModule extends IngestModuleAdapter implements FileIngestModule {
|
||||
class ScalpelCarverIngestModule implements FileIngestModule {
|
||||
|
||||
private static final Logger logger = Logger.getLogger(ScalpelCarverIngestModule.class.getName());
|
||||
private final String MODULE_OUTPUT_DIR_NAME = "ScalpelCarver"; //NON-NLS
|
||||
@ -228,4 +227,8 @@ class ScalpelCarverIngestModule extends IngestModuleAdapter implements FileInges
|
||||
|
||||
return ProcessResult.OK;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutDown() {
|
||||
}
|
||||
}
|
||||
|
@ -23,7 +23,6 @@ import java.security.NoSuchAlgorithmException;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import javax.xml.bind.DatatypeConverter;
|
||||
import org.sleuthkit.autopsy.ingest.IngestModuleAdapter;
|
||||
import org.sleuthkit.autopsy.ingest.DataSourceIngestModuleProgress;
|
||||
import org.sleuthkit.autopsy.ingest.IngestMessage;
|
||||
import org.sleuthkit.autopsy.ingest.IngestMessage.MessageType;
|
||||
@ -41,7 +40,7 @@ import org.openide.util.NbBundle;
|
||||
* Format (EWF) E01 image file by generating a hash of the file and comparing it
|
||||
* to the value stored in the image.
|
||||
*/
|
||||
public class EwfVerifyIngestModule extends IngestModuleAdapter implements DataSourceIngestModule {
|
||||
public class EwfVerifyIngestModule implements DataSourceIngestModule {
|
||||
|
||||
private static final Logger logger = Logger.getLogger(EwfVerifyIngestModule.class.getName());
|
||||
private static final long DEFAULT_CHUNK_SIZE = 32 * 1024;
|
||||
@ -62,7 +61,6 @@ public class EwfVerifyIngestModule extends IngestModuleAdapter implements DataSo
|
||||
public void startUp(IngestJobContext context) throws IngestModuleException {
|
||||
this.context = context;
|
||||
verified = false;
|
||||
skipped = false;
|
||||
img = null;
|
||||
imgName = "";
|
||||
storedHash = "";
|
||||
@ -104,7 +102,6 @@ public class EwfVerifyIngestModule extends IngestModuleAdapter implements DataSo
|
||||
NbBundle.getMessage(this.getClass(),
|
||||
"EwfVerifyIngestModule.process.skipNonEwf",
|
||||
imgName)));
|
||||
skipped = true;
|
||||
return ProcessResult.OK;
|
||||
}
|
||||
|
||||
@ -169,26 +166,22 @@ public class EwfVerifyIngestModule extends IngestModuleAdapter implements DataSo
|
||||
calculatedHash = DatatypeConverter.printHexBinary(messageDigest.digest()).toLowerCase();
|
||||
verified = calculatedHash.equals(storedHash);
|
||||
logger.log(Level.INFO, "Hash calculated from {0}: {1}", new Object[]{imgName, calculatedHash}); //NON-NLS
|
||||
|
||||
logger.log(Level.INFO, "complete() {0}", EwfVerifierModuleFactory.getModuleName()); //NON-NLS
|
||||
String msg;
|
||||
if (verified) {
|
||||
msg = NbBundle.getMessage(this.getClass(), "EwfVerifyIngestModule.shutDown.verified");
|
||||
} else {
|
||||
msg = NbBundle.getMessage(this.getClass(), "EwfVerifyIngestModule.shutDown.notVerified");
|
||||
}
|
||||
String extra = NbBundle
|
||||
.getMessage(this.getClass(), "EwfVerifyIngestModule.shutDown.verifyResultsHeader", imgName);
|
||||
extra += NbBundle.getMessage(this.getClass(), "EwfVerifyIngestModule.shutDown.resultLi", msg);
|
||||
extra += NbBundle.getMessage(this.getClass(), "EwfVerifyIngestModule.shutDown.calcHashLi", calculatedHash);
|
||||
extra += NbBundle.getMessage(this.getClass(), "EwfVerifyIngestModule.shutDown.storedHashLi", storedHash);
|
||||
services.postMessage(IngestMessage.createMessage( MessageType.INFO, EwfVerifierModuleFactory.getModuleName(), imgName + msg, extra));
|
||||
logger.log(Level.INFO, "{0}{1}", new Object[]{imgName, msg});
|
||||
|
||||
return ProcessResult.OK;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutDown(boolean ingestJobCancelled) {
|
||||
logger.log(Level.INFO, "complete() {0}", EwfVerifierModuleFactory.getModuleName()); //NON-NLS
|
||||
if (skipped == false) {
|
||||
String msg = "";
|
||||
if (verified) {
|
||||
msg = NbBundle.getMessage(this.getClass(), "EwfVerifyIngestModule.shutDown.verified");
|
||||
} else {
|
||||
msg = NbBundle.getMessage(this.getClass(), "EwfVerifyIngestModule.shutDown.notVerified");
|
||||
}
|
||||
String extra = NbBundle
|
||||
.getMessage(this.getClass(), "EwfVerifyIngestModule.shutDown.verifyResultsHeader", imgName);
|
||||
extra += NbBundle.getMessage(this.getClass(), "EwfVerifyIngestModule.shutDown.resultLi", msg);
|
||||
extra += NbBundle.getMessage(this.getClass(), "EwfVerifyIngestModule.shutDown.calcHashLi", calculatedHash);
|
||||
extra += NbBundle.getMessage(this.getClass(), "EwfVerifyIngestModule.shutDown.storedHashLi", storedHash);
|
||||
services.postMessage(IngestMessage.createMessage( MessageType.INFO, EwfVerifierModuleFactory.getModuleName(), imgName + msg, extra));
|
||||
logger.log(Level.INFO, "{0}{1}", new Object[]{imgName, msg});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -31,7 +31,6 @@ import org.sleuthkit.autopsy.datamodel.ContentUtils;
|
||||
import org.sleuthkit.autopsy.ingest.FileIngestModule;
|
||||
import org.sleuthkit.autopsy.ingest.IngestMessage;
|
||||
import org.sleuthkit.autopsy.ingest.IngestModule.ProcessResult;
|
||||
import org.sleuthkit.autopsy.ingest.IngestModuleAdapter;
|
||||
import org.sleuthkit.autopsy.ingest.IngestJobContext;
|
||||
import org.sleuthkit.autopsy.ingest.IngestServices;
|
||||
import org.sleuthkit.autopsy.ingest.ModuleContentEvent;
|
||||
@ -50,7 +49,7 @@ import org.sleuthkit.datamodel.TskException;
|
||||
* Understands Thunderbird folder layout to provide additional structure and
|
||||
* metadata.
|
||||
*/
|
||||
public final class ThunderbirdMboxFileIngestModule extends IngestModuleAdapter implements FileIngestModule {
|
||||
public final class ThunderbirdMboxFileIngestModule implements FileIngestModule {
|
||||
|
||||
private static final Logger logger = Logger.getLogger(ThunderbirdMboxFileIngestModule.class.getName());
|
||||
private IngestServices services = IngestServices.getInstance();
|
||||
@ -403,4 +402,8 @@ public final class ThunderbirdMboxFileIngestModule extends IngestModuleAdapter i
|
||||
IngestServices getServices() {
|
||||
return services;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutDown() {
|
||||
}
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user