diff --git a/Core/src/org/sleuthkit/autopsy/corecomponents/GeneralPanel.form b/Core/src/org/sleuthkit/autopsy/corecomponents/GeneralPanel.form
index ea8c8fed21..2c0bbd1ee6 100644
--- a/Core/src/org/sleuthkit/autopsy/corecomponents/GeneralPanel.form
+++ b/Core/src/org/sleuthkit/autopsy/corecomponents/GeneralPanel.form
@@ -188,15 +188,12 @@
-
-
-
-
-
-
+
-
+
+
+
diff --git a/Core/src/org/sleuthkit/autopsy/corecomponents/GeneralPanel.java b/Core/src/org/sleuthkit/autopsy/corecomponents/GeneralPanel.java
index ea334dae46..d34a49b16a 100644
--- a/Core/src/org/sleuthkit/autopsy/corecomponents/GeneralPanel.java
+++ b/Core/src/org/sleuthkit/autopsy/corecomponents/GeneralPanel.java
@@ -19,6 +19,7 @@
package org.sleuthkit.autopsy.corecomponents;
import java.util.prefs.Preferences;
+import javax.swing.DefaultComboBoxModel;
import org.openide.util.NbPreferences;
import org.sleuthkit.autopsy.datamodel.ContentUtils;
import org.sleuthkit.autopsy.ingest.IngestManager;
@@ -33,10 +34,35 @@ final class GeneralPanel extends javax.swing.JPanel {
GeneralPanel(GeneralOptionsPanelController controller) {
initComponents();
+ numberOfFileIngestThreadsComboBox.setModel(new DefaultComboBoxModel<>(new Integer[]{1, 2, 4, 8, 16}));
ContentUtils.setDisplayInLocalTime(useLocalTimeRB.isSelected());
// TODO listen to changes in form fields and call controller.changed()
}
+ void load() {
+ boolean keepPreferredViewer = prefs.getBoolean(KEEP_PREFERRED_VIEWER, false);
+ keepCurrentViewerRB.setSelected(keepPreferredViewer);
+ useBestViewerRB.setSelected(!keepPreferredViewer);
+ boolean useLocalTime = prefs.getBoolean(USE_LOCAL_TIME, true);
+ useLocalTimeRB.setSelected(useLocalTime);
+ useGMTTimeRB.setSelected(!useLocalTime);
+ dataSourcesHideKnownCB.setSelected(prefs.getBoolean(DS_HIDE_KNOWN, false));
+ viewsHideKnownCB.setSelected(prefs.getBoolean(VIEWS_HIDE_KNOWN, true));
+ numberOfFileIngestThreadsComboBox.setSelectedItem(IngestManager.getNumberOfFileIngestThreads());
+ }
+
+ void store() {
+ prefs.putBoolean(KEEP_PREFERRED_VIEWER, keepCurrentViewerRB.isSelected());
+ prefs.putBoolean(USE_LOCAL_TIME, useLocalTimeRB.isSelected());
+ prefs.putBoolean(DS_HIDE_KNOWN, dataSourcesHideKnownCB.isSelected());
+ prefs.putBoolean(VIEWS_HIDE_KNOWN, viewsHideKnownCB.isSelected());
+ IngestManager.setNumberOfFileIngestThreads((Integer) numberOfFileIngestThreadsComboBox.getSelectedItem());
+ }
+
+ boolean valid() {
+ return true;
+ }
+
/**
* This method is called from within the constructor to initialize the form.
* WARNING: Do NOT modify this code. The content of this method is always
@@ -57,7 +83,7 @@ final class GeneralPanel extends javax.swing.JPanel {
dataSourcesHideKnownCB = new javax.swing.JCheckBox();
viewsHideKnownCB = new javax.swing.JCheckBox();
jLabel4 = new javax.swing.JLabel();
- numberOfFileIngestThreadsComboBox = new javax.swing.JComboBox();
+ numberOfFileIngestThreadsComboBox = new javax.swing.JComboBox();
buttonGroup1.add(useBestViewerRB);
useBestViewerRB.setSelected(true);
@@ -97,9 +123,6 @@ final class GeneralPanel extends javax.swing.JPanel {
org.openide.awt.Mnemonics.setLocalizedText(jLabel4, org.openide.util.NbBundle.getMessage(GeneralPanel.class, "GeneralPanel.jLabel4.text")); // NOI18N
- numberOfFileIngestThreadsComboBox.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "1", "2", "3", "4" }));
- numberOfFileIngestThreadsComboBox.setSelectedIndex(1);
-
javax.swing.GroupLayout layout = new javax.swing.GroupLayout(this);
this.setLayout(layout);
layout.setHorizontalGroup(
@@ -163,33 +186,8 @@ final class GeneralPanel extends javax.swing.JPanel {
}//GEN-LAST:event_useBestViewerRBActionPerformed
private void useGMTTimeRBActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_useGMTTimeRBActionPerformed
- ContentUtils.setDisplayInLocalTime(useLocalTimeRB.isSelected());
+ ContentUtils.setDisplayInLocalTime(useLocalTimeRB.isSelected());
}//GEN-LAST:event_useGMTTimeRBActionPerformed
-
- void load() {
- boolean keepPreferredViewer = prefs.getBoolean(KEEP_PREFERRED_VIEWER, false);
- keepCurrentViewerRB.setSelected(keepPreferredViewer);
- useBestViewerRB.setSelected(!keepPreferredViewer);
- boolean useLocalTime = prefs.getBoolean(USE_LOCAL_TIME, true);
- useLocalTimeRB.setSelected(useLocalTime);
- useGMTTimeRB.setSelected(!useLocalTime);
- dataSourcesHideKnownCB.setSelected(prefs.getBoolean(DS_HIDE_KNOWN, false));
- viewsHideKnownCB.setSelected(prefs.getBoolean(VIEWS_HIDE_KNOWN, true));
- numberOfFileIngestThreadsComboBox.setSelectedItem(IngestManager.getInstance().getNumberOfFileIngestThreads());
- }
-
- void store() {
- prefs.putBoolean(KEEP_PREFERRED_VIEWER, keepCurrentViewerRB.isSelected());
- prefs.putBoolean(USE_LOCAL_TIME, useLocalTimeRB.isSelected());
- prefs.putBoolean(DS_HIDE_KNOWN, dataSourcesHideKnownCB.isSelected());
- prefs.putBoolean(VIEWS_HIDE_KNOWN, viewsHideKnownCB.isSelected());
- IngestManager.getInstance().setNumberOfFileIngestThreads(Integer.valueOf(numberOfFileIngestThreadsComboBox.getSelectedItem().toString()));
- }
-
- boolean valid() {
- // TODO check whether form is consistent and complete
- return true;
- }
// Variables declaration - do not modify//GEN-BEGIN:variables
private javax.swing.ButtonGroup buttonGroup1;
private javax.swing.ButtonGroup buttonGroup3;
@@ -199,7 +197,7 @@ final class GeneralPanel extends javax.swing.JPanel {
private javax.swing.JLabel jLabel3;
private javax.swing.JLabel jLabel4;
private javax.swing.JRadioButton keepCurrentViewerRB;
- private javax.swing.JComboBox numberOfFileIngestThreadsComboBox;
+ private javax.swing.JComboBox numberOfFileIngestThreadsComboBox;
private javax.swing.JRadioButton useBestViewerRB;
private javax.swing.JRadioButton useGMTTimeRB;
private javax.swing.JRadioButton useLocalTimeRB;
diff --git a/Core/src/org/sleuthkit/autopsy/ingest/DataSourceIngestTask.java b/Core/src/org/sleuthkit/autopsy/ingest/DataSourceIngestTask.java
index c62b6796a1..5598033397 100755
--- a/Core/src/org/sleuthkit/autopsy/ingest/DataSourceIngestTask.java
+++ b/Core/src/org/sleuthkit/autopsy/ingest/DataSourceIngestTask.java
@@ -1,7 +1,7 @@
/*
* Autopsy Forensic Browser
*
- * Copyright 2014 Basis Technology Corp.
+ * Copyright 2012-2014 Basis Technology Corp.
* Contact: carrier sleuthkit org
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -20,26 +20,21 @@ package org.sleuthkit.autopsy.ingest;
import org.sleuthkit.datamodel.Content;
-final class DataSourceIngestTask implements IngestTask {
+final class DataSourceIngestTask extends IngestTask {
- private final IngestJob ingestJob;
private final Content dataSource;
DataSourceIngestTask(IngestJob ingestJob, Content dataSource) {
- this.ingestJob = ingestJob;
+ super(ingestJob);
this.dataSource = dataSource;
}
-
- IngestJob getIngestJob() {
- return ingestJob;
- }
-
+
Content getDataSource() {
return dataSource;
}
@Override
- public void execute() throws InterruptedException {
- ingestJob.process(dataSource);
+ void execute() throws InterruptedException {
+ getIngestJob().process(dataSource);
}
}
diff --git a/Core/src/org/sleuthkit/autopsy/ingest/DataSourceIngestTaskScheduler.java b/Core/src/org/sleuthkit/autopsy/ingest/DataSourceIngestTaskScheduler.java
deleted file mode 100755
index ba89a669a4..0000000000
--- a/Core/src/org/sleuthkit/autopsy/ingest/DataSourceIngestTaskScheduler.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Autopsy Forensic Browser
- *
- * Copyright 2012-2014 Basis Technology Corp.
- * Contact: carrier sleuthkit org
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.sleuthkit.autopsy.ingest;
-
-import java.util.concurrent.LinkedBlockingQueue;
-
-//final class DataSourceIngestTaskScheduler implements IngestTaskQueue {
-//
-// private static DataSourceIngestTaskScheduler instance = new DataSourceIngestTaskScheduler();
-// private final LinkedBlockingQueue tasks = new LinkedBlockingQueue<>();
-//
-// static DataSourceIngestTaskScheduler getInstance() {
-// return instance;
-// }
-//
-// private DataSourceIngestTaskScheduler() {
-// }
-//
-// void addTask(DataSourceIngestTask task) {
-// // If the thread executing this code is interrupted, it is because the
-// // number of ingest threads has been decreased while ingest jobs are
-// // running. This thread will exit in an orderly fashion, but the task
-// // still needs to be enqueued rather than lost.
-// while (true) {
-// try {
-// tasks.put(task);
-// break;
-// } catch (InterruptedException ex) {
-// // Reset the interrupted status of the thread so the orderly
-// // exit can occur in the intended place.
-// Thread.currentThread().interrupt();
-// }
-// }
-// }
-//
-// @Override
-// public DataSourceIngestTask getNextTask() throws InterruptedException {
-// return tasks.take();
-// }
-//}
diff --git a/Core/src/org/sleuthkit/autopsy/ingest/FileIngestTask.java b/Core/src/org/sleuthkit/autopsy/ingest/FileIngestTask.java
index 75e9165ded..3134b6deb6 100755
--- a/Core/src/org/sleuthkit/autopsy/ingest/FileIngestTask.java
+++ b/Core/src/org/sleuthkit/autopsy/ingest/FileIngestTask.java
@@ -21,27 +21,22 @@ package org.sleuthkit.autopsy.ingest;
import java.util.Objects;
import org.sleuthkit.datamodel.AbstractFile;
-final class FileIngestTask implements IngestTask {
+final class FileIngestTask extends IngestTask {
- private final IngestJob ingestJob;
private final AbstractFile file;
- FileIngestTask(IngestJob task, AbstractFile file) {
- this.ingestJob = task;
+ FileIngestTask(IngestJob job, AbstractFile file) {
+ super(job);
this.file = file;
}
- public IngestJob getIngestJob() { // RJCTODO: Maybe add to interface
- return ingestJob;
- }
-
- public AbstractFile getFile() {
+ AbstractFile getFile() {
return file;
}
@Override
- public void execute() throws InterruptedException {
- ingestJob.process(file);
+ void execute() throws InterruptedException {
+ getIngestJob().process(file);
}
@Override
@@ -53,7 +48,9 @@ final class FileIngestTask implements IngestTask {
return false;
}
FileIngestTask other = (FileIngestTask) obj;
- if (this.ingestJob != other.ingestJob && (this.ingestJob == null || !this.ingestJob.equals(other.ingestJob))) {
+ IngestJob job = getIngestJob();
+ IngestJob otherJob = other.getIngestJob();
+ if (job != otherJob && (job == null || !job.equals(otherJob))) {
return false;
}
if (this.file != other.file && (this.file == null || !this.file.equals(other.file))) {
@@ -65,7 +62,7 @@ final class FileIngestTask implements IngestTask {
@Override
public int hashCode() {
int hash = 5;
- hash = 47 * hash + Objects.hashCode(this.ingestJob);
+ hash = 47 * hash + Objects.hashCode(getIngestJob());
hash = 47 * hash + Objects.hashCode(this.file);
return hash;
}
diff --git a/Core/src/org/sleuthkit/autopsy/ingest/FileIngestTaskScheduler.java b/Core/src/org/sleuthkit/autopsy/ingest/FileIngestTaskScheduler.java
deleted file mode 100755
index c0a4b34295..0000000000
--- a/Core/src/org/sleuthkit/autopsy/ingest/FileIngestTaskScheduler.java
+++ /dev/null
@@ -1,393 +0,0 @@
-/*
- * Autopsy Forensic Browser
- *
- * Copyright 2012-2014 Basis Technology Corp.
- * Contact: carrier sleuthkit org
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.sleuthkit.autopsy.ingest;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Comparator;
-import java.util.List;
-import java.util.TreeSet;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-import org.sleuthkit.datamodel.AbstractFile;
-import org.sleuthkit.datamodel.Content;
-import org.sleuthkit.datamodel.DerivedFile;
-import org.sleuthkit.datamodel.Directory;
-import org.sleuthkit.datamodel.File;
-import org.sleuthkit.datamodel.FileSystem;
-import org.sleuthkit.datamodel.LayoutFile;
-import org.sleuthkit.datamodel.LocalFile;
-import org.sleuthkit.datamodel.TskCoreException;
-import org.sleuthkit.datamodel.TskData;
-import org.sleuthkit.datamodel.VirtualDirectory;
-
-//final class FileIngestTaskScheduler implements IngestTaskQueue {
-//
-// private static final Logger logger = Logger.getLogger(FileIngestTaskScheduler.class.getName());
-// private static FileIngestTaskScheduler instance = new FileIngestTaskScheduler();
-// private final TreeSet rootDirectoryTasks = new TreeSet<>(new RootDirectoryTaskComparator());
-// private final List directoryTasks = new ArrayList<>();
-// private final LinkedBlockingQueue fileTasks = new LinkedBlockingQueue<>();
-// private static final int FAT_NTFS_FLAGS = TskData.TSK_FS_TYPE_ENUM.TSK_FS_TYPE_FAT12.getValue() | TskData.TSK_FS_TYPE_ENUM.TSK_FS_TYPE_FAT16.getValue() | TskData.TSK_FS_TYPE_ENUM.TSK_FS_TYPE_FAT32.getValue() | TskData.TSK_FS_TYPE_ENUM.TSK_FS_TYPE_NTFS.getValue();
-//
-// static FileIngestTaskScheduler getInstance() {
-// return instance;
-// }
-//
-// private FileIngestTaskScheduler() {
-// }
-//
-// synchronized void addTasks(IngestJob job, Content dataSource) throws InterruptedException {
-// Collection rootObjects = dataSource.accept(new GetRootDirectoryVisitor());
-// List firstLevelFiles = new ArrayList<>();
-// if (rootObjects.isEmpty() && dataSource instanceof AbstractFile) {
-// // The data source is file.
-// firstLevelFiles.add((AbstractFile) dataSource);
-// } else {
-// for (AbstractFile root : rootObjects) {
-// List children;
-// try {
-// children = root.getChildren();
-// if (children.isEmpty()) {
-// //add the root itself, could be unalloc file, child of volume or image
-// firstLevelFiles.add(root);
-// } else {
-// //root for fs root dir, schedule children dirs/files
-// for (Content child : children) {
-// if (child instanceof AbstractFile) {
-// firstLevelFiles.add((AbstractFile) child);
-// }
-// }
-// }
-// } catch (TskCoreException ex) {
-// logger.log(Level.WARNING, "Could not get children of root to enqueue: " + root.getId() + ": " + root.getName(), ex); //NON-NLS
-// }
-// }
-// }
-// for (AbstractFile firstLevelFile : firstLevelFiles) {
-// FileIngestTask fileTask = new FileIngestTask(job, firstLevelFile);
-// if (shouldEnqueueTask(fileTask)) {
-// rootDirectoryTasks.add(fileTask);
-// fileTask.getIngestJob().notifyTaskAdded();
-// }
-// }
-//
-// // Reshuffle/update the dir and file level queues if needed
-// updateQueues();
-// }
-//
-// synchronized void addTask(FileIngestTask task) {
-// if (shouldEnqueueTask(task)) {
-// addTaskToFileQueue(task, true);
-// }
-// }
-//
-// @Override
-// public FileIngestTask getNextTask() throws InterruptedException {
-// FileIngestTask task = fileTasks.take();
-// updateQueues();
-// return task;
-// }
-//
-// private void updateQueues() throws InterruptedException {
-// // we loop because we could have a directory that has all files
-// // that do not get enqueued
-// while (true) {
-// // There are files in the queue, we're done
-// if (fileTasks.isEmpty() == false) {
-// return;
-// }
-// // fill in the directory queue if it is empty.
-// if (this.directoryTasks.isEmpty()) {
-// // bail out if root is also empty -- we are done
-// if (rootDirectoryTasks.isEmpty()) {
-// return;
-// }
-// FileIngestTask rootTask = rootDirectoryTasks.pollFirst();
-// directoryTasks.add(rootTask);
-// }
-// //pop and push AbstractFile directory children if any
-// //add the popped and its leaf children onto cur file list
-// FileIngestTask parentTask = directoryTasks.remove(directoryTasks.size() - 1);
-// final AbstractFile parentFile = parentTask.getFile();
-// // add itself to the file list
-// if (shouldEnqueueTask(parentTask)) {
-// addTaskToFileQueue(parentTask, false);
-// }
-// // add its children to the file and directory lists
-// try {
-// List children = parentFile.getChildren();
-// for (Content c : children) {
-// if (c instanceof AbstractFile) {
-// AbstractFile childFile = (AbstractFile) c;
-// FileIngestTask childTask = new FileIngestTask(parentTask.getIngestJob(), childFile);
-// if (childFile.hasChildren()) {
-// directoryTasks.add(childTask);
-// childTask.getIngestJob().notifyTaskAdded();
-// } else if (shouldEnqueueTask(childTask)) {
-// addTaskToFileQueue(childTask, true);
-// }
-// }
-// }
-// } catch (TskCoreException ex) {
-// logger.log(Level.SEVERE, "Could not get children of file and update file queues: " + parentFile.getName(), ex);
-// }
-// }
-// }
-//
-// private void addTaskToFileQueue(FileIngestTask task, boolean isNewTask) {
-// if (isNewTask) {
-// // The capacity of the file tasks queue is not bounded, so the call
-// // to put() should not block except for normal synchronized access.
-// // Still, notify the job that the task has been added first so that
-// // the take() of the task cannot occur before the notification.
-// task.getIngestJob().notifyTaskAdded();
-// }
-// // If the thread executing this code is ever interrupted, it is
-// // because the number of ingest threads has been decreased while
-// // ingest jobs are running. This thread will exit in an orderly fashion,
-// // but the task still needs to be enqueued rather than lost.
-// while (true) {
-// try {
-// fileTasks.put(task);
-// break;
-// } catch (InterruptedException ex) {
-// // Reset the interrupted status of the thread so the orderly
-// // exit can occur in the intended place.
-// Thread.currentThread().interrupt();
-// }
-// }
-// }
-//
-// /**
-// * Check if the file is a special file that we should skip
-// *
-// * @param processTask a task whose file to check if should be queued of
-// * skipped
-// * @return true if should be enqueued, false otherwise
-// */
-// private static boolean shouldEnqueueTask(final FileIngestTask processTask) {
-// final AbstractFile aFile = processTask.getFile();
-// //if it's unalloc file, skip if so scheduled
-// if (processTask.getIngestJob().shouldProcessUnallocatedSpace() == false && aFile.getType().equals(TskData.TSK_DB_FILES_TYPE_ENUM.UNALLOC_BLOCKS)) {
-// return false;
-// }
-// String fileName = aFile.getName();
-// if (fileName.equals(".") || fileName.equals("..")) {
-// return false;
-// } else if (aFile instanceof org.sleuthkit.datamodel.File) {
-// final org.sleuthkit.datamodel.File f = (File) aFile;
-// //skip files in root dir, starting with $, containing : (not default attributes)
-// //with meta address < 32, i.e. some special large NTFS and FAT files
-// FileSystem fs = null;
-// try {
-// fs = f.getFileSystem();
-// } catch (TskCoreException ex) {
-// logger.log(Level.SEVERE, "Could not get FileSystem for " + f, ex); //NON-NLS
-// }
-// TskData.TSK_FS_TYPE_ENUM fsType = TskData.TSK_FS_TYPE_ENUM.TSK_FS_TYPE_UNSUPP;
-// if (fs != null) {
-// fsType = fs.getFsType();
-// }
-// if ((fsType.getValue() & FAT_NTFS_FLAGS) == 0) {
-// //not fat or ntfs, accept all files
-// return true;
-// }
-// boolean isInRootDir = false;
-// try {
-// isInRootDir = f.getParentDirectory().isRoot();
-// } catch (TskCoreException ex) {
-// logger.log(Level.WARNING, "Could not check if should enqueue the file: " + f.getName(), ex); //NON-NLS
-// }
-// if (isInRootDir && f.getMetaAddr() < 32) {
-// String name = f.getName();
-// if (name.length() > 0 && name.charAt(0) == '$' && name.contains(":")) {
-// return false;
-// }
-// } else {
-// return true;
-// }
-// }
-// return true;
-// }
-//
-// /**
-// * Visitor that gets a collection of top level objects to be scheduled, such
-// * as root directories (if there is FS) or LayoutFiles and virtual
-// * directories, also if there is no FS.
-// */
-// static class GetRootDirectoryVisitor extends GetFilesContentVisitor {
-//
-// @Override
-// public Collection visit(VirtualDirectory ld) {
-// //case when we hit a layout directoryor local file container, not under a real FS
-// //or when root virt dir is scheduled
-// Collection ret = new ArrayList<>();
-// ret.add(ld);
-// return ret;
-// }
-//
-// @Override
-// public Collection visit(LayoutFile lf) {
-// //case when we hit a layout file, not under a real FS
-// Collection ret = new ArrayList<>();
-// ret.add(lf);
-// return ret;
-// }
-//
-// @Override
-// public Collection visit(Directory drctr) {
-// //we hit a real directory, a child of real FS
-// Collection ret = new ArrayList<>();
-// ret.add(drctr);
-// return ret;
-// }
-//
-// @Override
-// public Collection visit(FileSystem fs) {
-// return getAllFromChildren(fs);
-// }
-//
-// @Override
-// public Collection visit(File file) {
-// //can have derived files
-// return getAllFromChildren(file);
-// }
-//
-// @Override
-// public Collection visit(DerivedFile derivedFile) {
-// //can have derived files
-// //TODO test this and overall scheduler with derived files
-// return getAllFromChildren(derivedFile);
-// }
-//
-// @Override
-// public Collection visit(LocalFile localFile) {
-// //can have local files
-// //TODO test this and overall scheduler with local files
-// return getAllFromChildren(localFile);
-// }
-// }
-//
-// /**
-// * Root directory sorter
-// */
-// private static class RootDirectoryTaskComparator implements Comparator {
-//
-// @Override
-// public int compare(FileIngestTask q1, FileIngestTask q2) {
-// AbstractFilePriority.Priority p1 = AbstractFilePriority.getPriority(q1.getFile());
-// AbstractFilePriority.Priority p2 = AbstractFilePriority.getPriority(q2.getFile());
-// if (p1 == p2) {
-// return (int) (q2.getFile().getId() - q1.getFile().getId());
-// } else {
-// return p2.ordinal() - p1.ordinal();
-// }
-// }
-//
-// /**
-// * Priority determination for sorted AbstractFile, used by
-// * RootDirComparator
-// */
-// private static class AbstractFilePriority {
-//
-// enum Priority {
-//
-// LAST, LOW, MEDIUM, HIGH
-// }
-// static final List LAST_PRI_PATHS = new ArrayList<>();
-// static final List LOW_PRI_PATHS = new ArrayList<>();
-// static final List MEDIUM_PRI_PATHS = new ArrayList<>();
-// static final List HIGH_PRI_PATHS = new ArrayList<>();
-// /* prioritize root directory folders based on the assumption that we are
-// * looking for user content. Other types of investigations may want different
-// * priorities. */
-//
-// static /* prioritize root directory folders based on the assumption that we are
-// * looking for user content. Other types of investigations may want different
-// * priorities. */ {
-// // these files have no structure, so they go last
-// //unalloc files are handled as virtual files in getPriority()
-// //LAST_PRI_PATHS.schedule(Pattern.compile("^\\$Unalloc", Pattern.CASE_INSENSITIVE));
-// //LAST_PRI_PATHS.schedule(Pattern.compile("^\\Unalloc", Pattern.CASE_INSENSITIVE));
-// LAST_PRI_PATHS.add(Pattern.compile("^pagefile", Pattern.CASE_INSENSITIVE));
-// LAST_PRI_PATHS.add(Pattern.compile("^hiberfil", Pattern.CASE_INSENSITIVE));
-// // orphan files are often corrupt and windows does not typically have
-// // user content, so put them towards the bottom
-// LOW_PRI_PATHS.add(Pattern.compile("^\\$OrphanFiles", Pattern.CASE_INSENSITIVE));
-// LOW_PRI_PATHS.add(Pattern.compile("^Windows", Pattern.CASE_INSENSITIVE));
-// // all other files go into the medium category too
-// MEDIUM_PRI_PATHS.add(Pattern.compile("^Program Files", Pattern.CASE_INSENSITIVE));
-// // user content is top priority
-// HIGH_PRI_PATHS.add(Pattern.compile("^Users", Pattern.CASE_INSENSITIVE));
-// HIGH_PRI_PATHS.add(Pattern.compile("^Documents and Settings", Pattern.CASE_INSENSITIVE));
-// HIGH_PRI_PATHS.add(Pattern.compile("^home", Pattern.CASE_INSENSITIVE));
-// HIGH_PRI_PATHS.add(Pattern.compile("^ProgramData", Pattern.CASE_INSENSITIVE));
-// }
-//
-// /**
-// * Get the scheduling priority for a given file.
-// *
-// * @param abstractFile
-// * @return
-// */
-// static AbstractFilePriority.Priority getPriority(final AbstractFile abstractFile) {
-// if (!abstractFile.getType().equals(TskData.TSK_DB_FILES_TYPE_ENUM.FS)) {
-// //quickly filter out unstructured content
-// //non-fs virtual files and dirs, such as representing unalloc space
-// return AbstractFilePriority.Priority.LAST;
-// }
-// //determine the fs files priority by name
-// final String path = abstractFile.getName();
-// if (path == null) {
-// return AbstractFilePriority.Priority.MEDIUM;
-// }
-// for (Pattern p : HIGH_PRI_PATHS) {
-// Matcher m = p.matcher(path);
-// if (m.find()) {
-// return AbstractFilePriority.Priority.HIGH;
-// }
-// }
-// for (Pattern p : MEDIUM_PRI_PATHS) {
-// Matcher m = p.matcher(path);
-// if (m.find()) {
-// return AbstractFilePriority.Priority.MEDIUM;
-// }
-// }
-// for (Pattern p : LOW_PRI_PATHS) {
-// Matcher m = p.matcher(path);
-// if (m.find()) {
-// return AbstractFilePriority.Priority.LOW;
-// }
-// }
-// for (Pattern p : LAST_PRI_PATHS) {
-// Matcher m = p.matcher(path);
-// if (m.find()) {
-// return AbstractFilePriority.Priority.LAST;
-// }
-// }
-// //default is medium
-// return AbstractFilePriority.Priority.MEDIUM;
-// }
-// }
-// }
-//}
diff --git a/Core/src/org/sleuthkit/autopsy/ingest/GetRootDirectoryVisitor.java b/Core/src/org/sleuthkit/autopsy/ingest/GetRootDirectoryVisitor.java
new file mode 100755
index 0000000000..a81ce17f6a
--- /dev/null
+++ b/Core/src/org/sleuthkit/autopsy/ingest/GetRootDirectoryVisitor.java
@@ -0,0 +1,88 @@
+/*
+ * Autopsy Forensic Browser
+ *
+ * Copyright 2012 Basis Technology Corp.
+ * Contact: carrier sleuthkit org
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.sleuthkit.autopsy.ingest;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import org.sleuthkit.datamodel.AbstractFile;
+import org.sleuthkit.datamodel.DerivedFile;
+import org.sleuthkit.datamodel.Directory;
+import org.sleuthkit.datamodel.File;
+import org.sleuthkit.datamodel.FileSystem;
+import org.sleuthkit.datamodel.LayoutFile;
+import org.sleuthkit.datamodel.LocalFile;
+import org.sleuthkit.datamodel.VirtualDirectory;
+
+/**
+ * Finds top level objects such as file system root directories, layout
+ * files and virtual directories.
+ */
+final class GetRootDirectoryVisitor extends GetFilesContentVisitor {
+
+ @Override
+ public Collection visit(VirtualDirectory ld) {
+ //case when we hit a layout directoryor local file container, not under a real FS
+ //or when root virt dir is scheduled
+ Collection ret = new ArrayList<>();
+ ret.add(ld);
+ return ret;
+ }
+
+ @Override
+ public Collection visit(LayoutFile lf) {
+ //case when we hit a layout file, not under a real FS
+ Collection ret = new ArrayList<>();
+ ret.add(lf);
+ return ret;
+ }
+
+ @Override
+ public Collection visit(Directory drctr) {
+ //we hit a real directory, a child of real FS
+ Collection ret = new ArrayList<>();
+ ret.add(drctr);
+ return ret;
+ }
+
+ @Override
+ public Collection visit(FileSystem fs) {
+ return getAllFromChildren(fs);
+ }
+
+ @Override
+ public Collection visit(File file) {
+ //can have derived files
+ return getAllFromChildren(file);
+ }
+
+ @Override
+ public Collection visit(DerivedFile derivedFile) {
+ //can have derived files
+ //TODO test this and overall scheduler with derived files
+ return getAllFromChildren(derivedFile);
+ }
+
+ @Override
+ public Collection visit(LocalFile localFile) {
+ //can have local files
+ //TODO test this and overall scheduler with local files
+ return getAllFromChildren(localFile);
+ }
+
+}
diff --git a/Core/src/org/sleuthkit/autopsy/ingest/IngestJob.java b/Core/src/org/sleuthkit/autopsy/ingest/IngestJob.java
index 529e0f5d1e..22d32b3638 100644
--- a/Core/src/org/sleuthkit/autopsy/ingest/IngestJob.java
+++ b/Core/src/org/sleuthkit/autopsy/ingest/IngestJob.java
@@ -20,10 +20,7 @@ package org.sleuthkit.autopsy.ingest;
import java.util.ArrayList;
import java.util.List;
-import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
import java.util.logging.Level;
import org.netbeans.api.progress.ProgressHandle;
import org.netbeans.api.progress.ProgressHandleFactory;
@@ -33,33 +30,26 @@ import org.sleuthkit.autopsy.coreutils.Logger;
import org.sleuthkit.datamodel.AbstractFile;
import org.sleuthkit.datamodel.Content;
-/**
- * Encapsulates a data source and the ingest module pipelines to be used to
- * ingest the data source.
- */
final class IngestJob {
private static final Logger logger = Logger.getLogger(IngestManager.class.getName());
- private static final ConcurrentHashMap ingestJobsById = new ConcurrentHashMap<>();
private final long id;
private final Content rootDataSource;
private final List ingestModuleTemplates;
private final boolean processUnallocatedSpace;
private final LinkedBlockingQueue dataSourceIngestPipelines = new LinkedBlockingQueue<>();
private final LinkedBlockingQueue fileIngestPipelines = new LinkedBlockingQueue<>();
- private final AtomicInteger tasksInProgress = new AtomicInteger(0);
- private final AtomicLong processedFiles = new AtomicLong(0L);
- private final AtomicLong filesToIngestEstimate = new AtomicLong(0L);
+ private long estimatedFilesToProcess = 0L; // Guarded by this
+ private long processedFiles = 0L; // Guarded by this
private ProgressHandle dataSourceTasksProgress;
private ProgressHandle fileTasksProgress;
- private volatile boolean cancelled;
+ private volatile boolean cancelled = false;
IngestJob(long id, Content dataSource, List ingestModuleTemplates, boolean processUnallocatedSpace) {
this.id = id;
this.rootDataSource = dataSource;
this.ingestModuleTemplates = ingestModuleTemplates;
this.processUnallocatedSpace = processUnallocatedSpace;
- this.cancelled = false;
}
long getId() {
@@ -73,8 +63,8 @@ final class IngestJob {
List startUp() throws InterruptedException {
List errors = startUpIngestPipelines();
if (errors.isEmpty()) {
- startDataSourceIngestProgressBar();
startFileIngestProgressBar();
+ startDataSourceIngestProgressBar();
}
return errors;
}
@@ -105,7 +95,8 @@ final class IngestJob {
}
}
- return errors;
+ logIngestModuleErrors(errors);
+ return errors; // Returned so UI can report to user.
}
private void startDataSourceIngestProgressBar() {
@@ -145,18 +136,9 @@ final class IngestJob {
return true;
}
});
- long initialFilesCount = rootDataSource.accept(new GetFilesCountVisitor());
- filesToIngestEstimate.getAndAdd(initialFilesCount);
+ estimatedFilesToProcess = rootDataSource.accept(new GetFilesCountVisitor());
fileTasksProgress.start();
- fileTasksProgress.switchToDeterminate((int) initialFilesCount); // RJCTODO: This cast is troublesome, can use intValue
- }
-
- /**
- * Called by the ingest task schedulers when an ingest task is added to this
- * ingest job.
- */
- void notifyTaskAdded() {
- tasksInProgress.incrementAndGet();
+ fileTasksProgress.switchToDeterminate((int) estimatedFilesToProcess);
}
void process(Content dataSource) throws InterruptedException {
@@ -172,7 +154,6 @@ final class IngestJob {
}
dataSourceIngestPipelines.put(pipeline);
}
- shutDownIfAllTasksCompleted();
}
void process(AbstractFile file) throws InterruptedException {
@@ -181,35 +162,37 @@ final class IngestJob {
// shut down check needs to occur.
if (!isCancelled()) {
List errors = new ArrayList<>();
+ synchronized (this) {
+ ++processedFiles;
+ if (processedFiles <= estimatedFilesToProcess) {
+ fileTasksProgress.progress(file.getName(), (int) processedFiles);
+ } else {
+ fileTasksProgress.progress(file.getName(), (int) estimatedFilesToProcess);
+ }
+ }
FileIngestPipeline pipeline = fileIngestPipelines.take();
- fileTasksProgress.progress(file.getName(), (int) processedFiles.incrementAndGet());
errors.addAll(pipeline.process(file));
fileIngestPipelines.put(pipeline);
if (!errors.isEmpty()) {
logIngestModuleErrors(errors);
}
}
- shutDownIfAllTasksCompleted();
}
- private void shutDownIfAllTasksCompleted() {
- if (tasksInProgress.decrementAndGet() == 0) {
- List errors = new ArrayList<>();
- while (!dataSourceIngestPipelines.isEmpty()) {
- DataSourceIngestPipeline pipeline = dataSourceIngestPipelines.poll();
- errors.addAll(pipeline.shutDown());
- }
- while (!fileIngestPipelines.isEmpty()) {
- FileIngestPipeline pipeline = fileIngestPipelines.poll();
- errors.addAll(pipeline.shutDown());
- }
- fileTasksProgress.finish();
- dataSourceTasksProgress.finish();
- ingestJobsById.remove(id);
- if (!errors.isEmpty()) {
- logIngestModuleErrors(errors);
- }
- IngestManager.getInstance().fireIngestJobCompleted(id);
+ void shutDown() {
+ List errors = new ArrayList<>();
+ while (!dataSourceIngestPipelines.isEmpty()) {
+ DataSourceIngestPipeline pipeline = dataSourceIngestPipelines.poll();
+ errors.addAll(pipeline.shutDown());
+ }
+ while (!fileIngestPipelines.isEmpty()) {
+ FileIngestPipeline pipeline = fileIngestPipelines.poll();
+ errors.addAll(pipeline.shutDown());
+ }
+ fileTasksProgress.finish();
+ dataSourceTasksProgress.finish();
+ if (!errors.isEmpty()) {
+ logIngestModuleErrors(errors);
}
}
@@ -225,7 +208,8 @@ final class IngestJob {
void cancel() {
cancelled = true;
- fileTasksProgress.finish(); // RJCTODO: What about the other progress bar?
+ fileTasksProgress.finish();
+ dataSourceTasksProgress.finish();
IngestManager.getInstance().fireIngestJobCancelled(id);
}
}
diff --git a/Core/src/org/sleuthkit/autopsy/ingest/IngestJobContext.java b/Core/src/org/sleuthkit/autopsy/ingest/IngestJobContext.java
index 968f72a7f8..735f39bc28 100755
--- a/Core/src/org/sleuthkit/autopsy/ingest/IngestJobContext.java
+++ b/Core/src/org/sleuthkit/autopsy/ingest/IngestJobContext.java
@@ -60,7 +60,7 @@ public final class IngestJobContext {
*/
public void addFiles(List files) {
for (AbstractFile file : files) {
- IngestJobScheduler.getInstance().addFileToIngestJob(ingestJob, file);
+ IngestScheduler.getInstance().addFileToIngestJob(ingestJob, file);
}
}
}
diff --git a/Core/src/org/sleuthkit/autopsy/ingest/IngestManager.java b/Core/src/org/sleuthkit/autopsy/ingest/IngestManager.java
index d09783f0d1..b5674ab698 100644
--- a/Core/src/org/sleuthkit/autopsy/ingest/IngestManager.java
+++ b/Core/src/org/sleuthkit/autopsy/ingest/IngestManager.java
@@ -48,23 +48,23 @@ public class IngestManager {
private static final int MAX_NUMBER_OF_DATA_SOURCE_INGEST_THREADS = 1;
private static final String NUMBER_OF_FILE_INGEST_THREADS_KEY = "NumberOfFileingestThreads"; //NON-NLS
private static final int MIN_NUMBER_OF_FILE_INGEST_THREADS = 1;
- private static final int MAX_NUMBER_OF_FILE_INGEST_THREADS = 4;
+ private static final int MAX_NUMBER_OF_FILE_INGEST_THREADS = 16;
private static final int DEFAULT_NUMBER_OF_FILE_INGEST_THREADS = 2;
private static final Logger logger = Logger.getLogger(IngestManager.class.getName());
private static final Preferences userPreferences = NbPreferences.forModule(IngestManager.class);
private static final IngestManager instance = new IngestManager();
private final PropertyChangeSupport ingestJobEventPublisher = new PropertyChangeSupport(IngestManager.class);
private final PropertyChangeSupport ingestModuleEventPublisher = new PropertyChangeSupport(IngestManager.class);
- private final IngestJobScheduler scheduler = IngestJobScheduler.getInstance();
+ private final IngestScheduler scheduler = IngestScheduler.getInstance();
private final IngestMonitor ingestMonitor = new IngestMonitor();
private final ExecutorService startIngestJobsThreadPool = Executors.newSingleThreadExecutor();
private final ExecutorService dataSourceIngestThreadPool = Executors.newSingleThreadExecutor();
private final ExecutorService fileIngestThreadPool = Executors.newFixedThreadPool(MAX_NUMBER_OF_FILE_INGEST_THREADS);
private final ExecutorService fireIngestEventsThreadPool = Executors.newSingleThreadExecutor();
+ private final AtomicLong nextThreadId = new AtomicLong(0L);
private final ConcurrentHashMap> startIngestJobThreads = new ConcurrentHashMap<>(); // Maps thread ids to cancellation handles.
private final ConcurrentHashMap> dataSourceIngestThreads = new ConcurrentHashMap<>(); // Maps thread ids to cancellation handles.
private final ConcurrentHashMap> fileIngestThreads = new ConcurrentHashMap<>(); // Maps thread ids to cancellation handles.
- private final AtomicLong nextThreadId = new AtomicLong(0L);
private volatile IngestMessageTopComponent ingestMessageBox;
/**
@@ -186,8 +186,8 @@ public class IngestManager {
}
long taskId = nextThreadId.incrementAndGet();
- Future> task = startIngestJobsThreadPool.submit(new StartIngestJobsThread(taskId, dataSources, moduleTemplates, processUnallocatedSpace));
- fileIngestThreads.put(taskId, task);
+ Future task = startIngestJobsThreadPool.submit(new StartIngestJobsThread(taskId, dataSources, moduleTemplates, processUnallocatedSpace));
+ startIngestJobThreads.put(taskId, task);
if (ingestMessageBox != null) {
ingestMessageBox.restoreMessages();
@@ -218,7 +218,6 @@ public class IngestManager {
logger.log(Level.SEVERE, "Unexpected thread interrupt", ex);
}
}
- startIngestJobThreads.clear(); // Make sure.
// Cancel all the jobs already created. This will make the the ingest
// threads flush out any lingering ingest tasks without processing them.
@@ -511,6 +510,7 @@ public class IngestManager {
try {
IngestTask task = tasks.getNextTask(); // Blocks.
task.execute();
+ scheduler.ingestTaskIsCompleted(task);
} catch (InterruptedException ex) {
break;
}
diff --git a/Core/src/org/sleuthkit/autopsy/ingest/IngestJobScheduler.java b/Core/src/org/sleuthkit/autopsy/ingest/IngestScheduler.java
similarity index 79%
rename from Core/src/org/sleuthkit/autopsy/ingest/IngestJobScheduler.java
rename to Core/src/org/sleuthkit/autopsy/ingest/IngestScheduler.java
index 0141f3587f..6bad7b6ab7 100755
--- a/Core/src/org/sleuthkit/autopsy/ingest/IngestJobScheduler.java
+++ b/Core/src/org/sleuthkit/autopsy/ingest/IngestScheduler.java
@@ -32,35 +32,31 @@ import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.sleuthkit.datamodel.AbstractFile;
import org.sleuthkit.datamodel.Content;
-import org.sleuthkit.datamodel.DerivedFile;
-import org.sleuthkit.datamodel.Directory;
import org.sleuthkit.datamodel.File;
import org.sleuthkit.datamodel.FileSystem;
-import org.sleuthkit.datamodel.LayoutFile;
-import org.sleuthkit.datamodel.LocalFile;
import org.sleuthkit.datamodel.TskCoreException;
import org.sleuthkit.datamodel.TskData;
-import org.sleuthkit.datamodel.VirtualDirectory;
-final class IngestJobScheduler {
+final class IngestScheduler {
- private static final Logger logger = Logger.getLogger(IngestJobScheduler.class.getName());
+ private static final IngestScheduler instance = new IngestScheduler();
+ private static final Logger logger = Logger.getLogger(IngestScheduler.class.getName());
private static final int FAT_NTFS_FLAGS = TskData.TSK_FS_TYPE_ENUM.TSK_FS_TYPE_FAT12.getValue() | TskData.TSK_FS_TYPE_ENUM.TSK_FS_TYPE_FAT16.getValue() | TskData.TSK_FS_TYPE_ENUM.TSK_FS_TYPE_FAT32.getValue() | TskData.TSK_FS_TYPE_ENUM.TSK_FS_TYPE_NTFS.getValue();
- private static IngestJobScheduler instance = new IngestJobScheduler();
- private final AtomicLong nextIngestJobId = new AtomicLong(0L);
private final ConcurrentHashMap ingestJobsById = new ConcurrentHashMap<>();
private final LinkedBlockingQueue dataSourceTasks = new LinkedBlockingQueue<>();
- private final TreeSet rootDirectoryTasks = new TreeSet<>(new RootDirectoryTaskComparator());
- private final List directoryTasks = new ArrayList<>();
- private final LinkedBlockingQueue fileTasks = new LinkedBlockingQueue<>();
- private final DataSourceIngestTaskQueue dataSourceIngestTaskQueue = new DataSourceIngestTaskQueue();
- private final FileIngestTaskQueue fileIngestTaskQueue = new FileIngestTaskQueue();
+ private final TreeSet rootDirectoryTasks = new TreeSet<>(new RootDirectoryTaskComparator()); // Guarded by this
+ private final List directoryTasks = new ArrayList<>(); // Guarded by this
+ private final LinkedBlockingQueue fileTasks = new LinkedBlockingQueue<>(); // Guarded by this
+ private final List tasksInProgress = new ArrayList<>(); // Guarded by this
+ private final DataSourceIngestTaskQueue dataSourceTaskDispenser = new DataSourceIngestTaskQueue();
+ private final FileIngestTaskQueue fileTaskDispenser = new FileIngestTaskQueue();
+ private final AtomicLong nextIngestJobId = new AtomicLong(0L);
- static IngestJobScheduler getInstance() {
+ static IngestScheduler getInstance() {
return instance;
}
- private IngestJobScheduler() {
+ private IngestScheduler() {
}
/**
@@ -74,7 +70,7 @@ final class IngestJobScheduler {
* @return A collection of ingest module start up errors, empty on success.
* @throws InterruptedException
*/
- synchronized List startIngestJob(Content dataSource, List ingestModuleTemplates, boolean processUnallocatedSpace) throws InterruptedException {
+ List startIngestJob(Content dataSource, List ingestModuleTemplates, boolean processUnallocatedSpace) throws InterruptedException {
long jobId = nextIngestJobId.incrementAndGet();
IngestJob job = new IngestJob(jobId, dataSource, ingestModuleTemplates, processUnallocatedSpace);
ingestJobsById.put(jobId, job);
@@ -99,10 +95,11 @@ final class IngestJobScheduler {
}
synchronized void addDataSourceToIngestJob(IngestJob job, Content dataSource) throws InterruptedException {
+ // Enqueue a data source ingest task for the data source.
// If the thread executing this code is interrupted, it is because the
// the number of ingest threads has been decreased while ingest jobs are
// running. The calling thread will exit in an orderly fashion, but the
- // task still needs to be enqueued rather than lost.
+ // task still needs to be enqueued rather than lost, hence the loop.
DataSourceIngestTask task = new DataSourceIngestTask(job, dataSource);
while (true) {
try {
@@ -115,24 +112,27 @@ final class IngestJobScheduler {
}
}
+ // Get the top level files of the data source.
Collection rootObjects = dataSource.accept(new GetRootDirectoryVisitor());
- List firstLevelFiles = new ArrayList<>();
+ List toptLevelFiles = new ArrayList<>();
if (rootObjects.isEmpty() && dataSource instanceof AbstractFile) {
- // The data source is file.
- firstLevelFiles.add((AbstractFile) dataSource);
+ // The data source is itself a file.
+ toptLevelFiles.add((AbstractFile) dataSource);
} else {
for (AbstractFile root : rootObjects) {
List children;
try {
children = root.getChildren();
if (children.isEmpty()) {
- //add the root itself, could be unalloc file, child of volume or image
- firstLevelFiles.add(root);
+ // Add the root object itself, it could be an unallocated space
+ // file, or a child of a volume or an image.
+ toptLevelFiles.add(root);
} else {
- //root for fs root dir, schedule children dirs/files
+ // The root object is a file system root directory, get
+ // the files within it.
for (Content child : children) {
if (child instanceof AbstractFile) {
- firstLevelFiles.add((AbstractFile) child);
+ toptLevelFiles.add((AbstractFile) child);
}
}
}
@@ -141,26 +141,30 @@ final class IngestJobScheduler {
}
}
}
- for (AbstractFile firstLevelFile : firstLevelFiles) {
+
+ // Enqueue file ingest tasks for the top level files.
+ for (AbstractFile firstLevelFile : toptLevelFiles) {
FileIngestTask fileTask = new FileIngestTask(job, firstLevelFile);
if (shouldEnqueueFileTask(fileTask)) {
rootDirectoryTasks.add(fileTask);
- fileTask.getIngestJob().notifyTaskAdded();
}
}
- // Reshuffle/update the dir and file level queues if needed
- updateFileTaskQueues();
+ updateFileTaskQueues(null);
}
- synchronized void addFileToIngestJob(IngestJob job, AbstractFile file) { // RJCTODO: Just one at a time?
+ void addFileToIngestJob(IngestJob job, AbstractFile file) {
FileIngestTask task = new FileIngestTask(job, file);
if (shouldEnqueueFileTask(task)) {
addTaskToFileQueue(task);
- }
+ }
}
- private synchronized void updateFileTaskQueues() throws InterruptedException {
+ private synchronized void updateFileTaskQueues(FileIngestTask taskInProgress) throws InterruptedException {
+ if (taskInProgress != null) {
+ tasksInProgress.add(taskInProgress);
+ }
+
// we loop because we could have a directory that has all files
// that do not get enqueued
while (true) {
@@ -194,7 +198,6 @@ final class IngestJobScheduler {
FileIngestTask childTask = new FileIngestTask(parentTask.getIngestJob(), childFile);
if (childFile.hasChildren()) {
directoryTasks.add(childTask);
- childTask.getIngestJob().notifyTaskAdded();
} else if (shouldEnqueueFileTask(childTask)) {
addTaskToFileQueue(childTask);
}
@@ -205,7 +208,7 @@ final class IngestJobScheduler {
}
}
}
-
+
private void addTaskToFileQueue(FileIngestTask task) {
// If the thread executing this code is interrupted, it is because the
// the number of ingest threads has been decreased while ingest jobs are
@@ -222,14 +225,7 @@ final class IngestJobScheduler {
}
}
}
-
- /**
- * Check if the file is a special file that we should skip
- *
- * @param processTask a task whose file to check if should be queued of
- * skipped
- * @return true if should be enqueued, false otherwise
- */
+
private static boolean shouldEnqueueFileTask(final FileIngestTask processTask) {
final AbstractFile aFile = processTask.getFile();
//if it's unalloc file, skip if so scheduled
@@ -282,68 +278,52 @@ final class IngestJobScheduler {
}
IngestTaskQueue getDataSourceIngestTaskQueue() {
- return dataSourceIngestTaskQueue;
+ return dataSourceTaskDispenser;
}
IngestTaskQueue getFileIngestTaskQueue() {
- return fileIngestTaskQueue;
+ return fileTaskDispenser;
}
- /**
- * Finds top level objects such as file system root directories, layout
- * files and virtual directories.
- */
- private static class GetRootDirectoryVisitor extends GetFilesContentVisitor {
-
- @Override
- public Collection visit(VirtualDirectory ld) {
- //case when we hit a layout directoryor local file container, not under a real FS
- //or when root virt dir is scheduled
- Collection ret = new ArrayList<>();
- ret.add(ld);
- return ret;
+ void ingestTaskIsCompleted(IngestTask completedTask) {
+ if (ingestJobIsCompleted(completedTask)) {
+ IngestJob job = completedTask.getIngestJob();
+ job.shutDown();
+ ingestJobsById.remove(job.getId());
+ IngestManager.getInstance().fireIngestJobCompleted(job.getId());
}
+ }
- @Override
- public Collection visit(LayoutFile lf) {
- //case when we hit a layout file, not under a real FS
- Collection ret = new ArrayList<>();
- ret.add(lf);
- return ret;
+ private synchronized boolean ingestJobIsCompleted(IngestTask completedTask) {
+ tasksInProgress.remove(completedTask);
+ IngestJob job = completedTask.getIngestJob();
+ long jobId = job.getId();
+ for (IngestTask task : tasksInProgress) {
+ if (task.getIngestJob().getId() == jobId) {
+ return false;
+ }
}
-
- @Override
- public Collection visit(Directory drctr) {
- //we hit a real directory, a child of real FS
- Collection ret = new ArrayList<>();
- ret.add(drctr);
- return ret;
+ for (FileIngestTask task : fileTasks) {
+ if (task.getIngestJob().getId() == jobId) {
+ return false;
+ }
}
-
- @Override
- public Collection visit(FileSystem fs) {
- return getAllFromChildren(fs);
+ for (FileIngestTask task : directoryTasks) {
+ if (task.getIngestJob().getId() == jobId) {
+ return false;
+ }
}
-
- @Override
- public Collection visit(File file) {
- //can have derived files
- return getAllFromChildren(file);
+ for (FileIngestTask task : rootDirectoryTasks) {
+ if (task.getIngestJob().getId() == jobId) {
+ return false;
+ }
}
-
- @Override
- public Collection visit(DerivedFile derivedFile) {
- //can have derived files
- //TODO test this and overall scheduler with derived files
- return getAllFromChildren(derivedFile);
- }
-
- @Override
- public Collection visit(LocalFile localFile) {
- //can have local files
- //TODO test this and overall scheduler with local files
- return getAllFromChildren(localFile);
+ for (DataSourceIngestTask task : dataSourceTasks) {
+ if (task.getIngestJob().getId() == jobId) {
+ return false;
+ }
}
+ return true;
}
private static class RootDirectoryTaskComparator implements Comparator {
@@ -455,7 +435,7 @@ final class IngestJobScheduler {
@Override
public IngestTask getNextTask() throws InterruptedException {
FileIngestTask task = fileTasks.take();
- updateFileTaskQueues();
+ updateFileTaskQueues(task);
return task;
}
}
diff --git a/Core/src/org/sleuthkit/autopsy/ingest/IngestTask.java b/Core/src/org/sleuthkit/autopsy/ingest/IngestTask.java
index 1133be1210..414128fd5f 100755
--- a/Core/src/org/sleuthkit/autopsy/ingest/IngestTask.java
+++ b/Core/src/org/sleuthkit/autopsy/ingest/IngestTask.java
@@ -18,6 +18,17 @@
*/
package org.sleuthkit.autopsy.ingest;
-interface IngestTask {
- void execute() throws InterruptedException;
+abstract class IngestTask {
+
+ private final IngestJob job;
+
+ IngestTask(IngestJob job) {
+ this.job = job;
+ }
+
+ IngestJob getIngestJob() {
+ return job;
+ }
+
+ abstract void execute() throws InterruptedException;
}