mirror of
https://github.com/overcuriousity/autopsy-flatpak.git
synced 2025-07-17 18:17:43 +00:00
Improve IngestJob/IngestScheduler interation
This commit is contained in:
parent
5130353ef3
commit
f8d26589e0
@ -68,7 +68,7 @@ final class IngestJob {
|
|||||||
List<IngestModuleError> errors = job.start();
|
List<IngestModuleError> errors = job.start();
|
||||||
if (errors.isEmpty()) {
|
if (errors.isEmpty()) {
|
||||||
IngestManager.getInstance().fireIngestJobStarted(jobId);
|
IngestManager.getInstance().fireIngestJobStarted(jobId);
|
||||||
taskScheduler.scheduleTasksForIngestJob(job, dataSource);
|
taskScheduler.addTasksForIngestJob(job, dataSource);
|
||||||
} else {
|
} else {
|
||||||
ingestJobsById.remove(jobId);
|
ingestJobsById.remove(jobId);
|
||||||
}
|
}
|
||||||
@ -185,14 +185,16 @@ final class IngestJob {
|
|||||||
if (!errors.isEmpty()) {
|
if (!errors.isEmpty()) {
|
||||||
logIngestModuleErrors(errors);
|
logIngestModuleErrors(errors);
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
taskScheduler.removeTasksForIngestJob(id);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Because there is only one data source task per job, it is o.k. to
|
// Because there is only one data source task per job, it is o.k. to
|
||||||
// call ProgressHandle.finish() now that the data source ingest modules
|
// call ProgressHandle.finish() now that the data source ingest modules
|
||||||
// are through using it via the DataSourceIngestModuleProgress wrapper.
|
// are through using the progress bar via the DataSourceIngestModuleProgress wrapper.
|
||||||
// Calling ProgressHandle.finish() again in finish() will be harmless.
|
// Calling ProgressHandle.finish() again in finish() will be harmless.
|
||||||
dataSourceTasksProgress.finish();
|
dataSourceTasksProgress.finish();
|
||||||
|
|
||||||
if (taskScheduler.isLastTaskForIngestJob(task)) {
|
if (taskScheduler.isLastTaskForIngestJob(task)) {
|
||||||
finish();
|
finish();
|
||||||
}
|
}
|
||||||
@ -216,7 +218,10 @@ final class IngestJob {
|
|||||||
if (!errors.isEmpty()) {
|
if (!errors.isEmpty()) {
|
||||||
logIngestModuleErrors(errors);
|
logIngestModuleErrors(errors);
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
taskScheduler.removeTasksForIngestJob(id);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (taskScheduler.isLastTaskForIngestJob(task)) {
|
if (taskScheduler.isLastTaskForIngestJob(task)) {
|
||||||
finish();
|
finish();
|
||||||
}
|
}
|
||||||
|
@ -56,22 +56,15 @@ final class IngestScheduler {
|
|||||||
private IngestScheduler() {
|
private IngestScheduler() {
|
||||||
}
|
}
|
||||||
|
|
||||||
synchronized void scheduleTasksForIngestJob(IngestJob job, Content dataSource) throws InterruptedException {
|
synchronized void addTasksForIngestJob(IngestJob job, Content dataSource) throws InterruptedException {
|
||||||
// Enqueue a data source ingest task for the data source.
|
// Enqueue a data source ingest task for the data source.
|
||||||
// If the thread executing this code is interrupted, tasksInProgressIterator is because the
|
|
||||||
// the number of ingest threads has been decreased while ingest jobs are
|
|
||||||
// running. The calling thread will exit in an orderly fashion, but the
|
|
||||||
// task still needs to be enqueued rather than lost, hence the loop.
|
|
||||||
DataSourceIngestTask task = new DataSourceIngestTask(job, dataSource);
|
DataSourceIngestTask task = new DataSourceIngestTask(job, dataSource);
|
||||||
while (true) {
|
try {
|
||||||
try {
|
dataSourceTasks.put(task);
|
||||||
dataSourceTasks.put(task);
|
} catch (InterruptedException ex) {
|
||||||
break;
|
Thread.currentThread().interrupt();
|
||||||
} catch (InterruptedException ex) {
|
logger.log(Level.FINE, "Task scheduling for ingest job interrupted", ex); //NON-NLS
|
||||||
// Reset the interrupted status of the thread so the orderly
|
return;
|
||||||
// exit can occur in the intended place.
|
|
||||||
Thread.currentThread().interrupt();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the top level files of the data source.
|
// Get the top level files of the data source.
|
||||||
@ -86,12 +79,12 @@ final class IngestScheduler {
|
|||||||
try {
|
try {
|
||||||
children = root.getChildren();
|
children = root.getChildren();
|
||||||
if (children.isEmpty()) {
|
if (children.isEmpty()) {
|
||||||
// Add the root object itself, tasksInProgressIterator could be an unallocated space
|
// Add the root object itself, it could be an unallocated space
|
||||||
// file, or a child of a volume or an image.
|
// file, or a child of a volume or an image.
|
||||||
toptLevelFiles.add(root);
|
toptLevelFiles.add(root);
|
||||||
} else {
|
} else {
|
||||||
// The root object is a file system root directory, get
|
// The root object is a file system root directory, get
|
||||||
// the files within tasksInProgressIterator.
|
// the files within it.
|
||||||
for (Content child : children) {
|
for (Content child : children) {
|
||||||
if (child instanceof AbstractFile) {
|
if (child instanceof AbstractFile) {
|
||||||
toptLevelFiles.add((AbstractFile) child);
|
toptLevelFiles.add((AbstractFile) child);
|
||||||
@ -118,10 +111,43 @@ final class IngestScheduler {
|
|||||||
void addFileTaskToIngestJob(IngestJob job, AbstractFile file) {
|
void addFileTaskToIngestJob(IngestJob job, AbstractFile file) {
|
||||||
FileIngestTask task = new FileIngestTask(job, file);
|
FileIngestTask task = new FileIngestTask(job, file);
|
||||||
if (shouldEnqueueFileTask(task)) {
|
if (shouldEnqueueFileTask(task)) {
|
||||||
addTaskToFileQueue(task);
|
try {
|
||||||
|
fileTasks.put(task);
|
||||||
|
} catch (InterruptedException ex) {
|
||||||
|
Thread.currentThread().interrupt();
|
||||||
|
logger.log(Level.FINE, "Task scheduling for ingest job interrupted", ex); //NON-NLS
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
synchronized void removeTasksForIngestJob(long ingestJobId) {
|
||||||
|
// Remove all tasks for this ingest job that are not in progress.
|
||||||
|
Iterator<FileIngestTask> fileTasksIterator = fileTasks.iterator();
|
||||||
|
while (fileTasksIterator.hasNext()) {
|
||||||
|
if (fileTasksIterator.next().getIngestJob().getId() == ingestJobId) {
|
||||||
|
fileTasksIterator.remove();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Iterator<FileIngestTask> directoryTasksIterator = directoryTasks.iterator();
|
||||||
|
while (directoryTasksIterator.hasNext()) {
|
||||||
|
if (directoryTasksIterator.next().getIngestJob().getId() == ingestJobId) {
|
||||||
|
directoryTasksIterator.remove();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Iterator<FileIngestTask> rootDirectoryTasksIterator = rootDirectoryTasks.iterator();
|
||||||
|
while (rootDirectoryTasksIterator.hasNext()) {
|
||||||
|
if (rootDirectoryTasksIterator.next().getIngestJob().getId() == ingestJobId) {
|
||||||
|
rootDirectoryTasksIterator.remove();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Iterator<DataSourceIngestTask> dataSourceTasksIterator = dataSourceTasks.iterator();
|
||||||
|
while (dataSourceTasksIterator.hasNext()) {
|
||||||
|
if (dataSourceTasksIterator.next().getIngestJob().getId() == ingestJobId) {
|
||||||
|
dataSourceTasksIterator.remove();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private synchronized void updateFileTaskQueues(FileIngestTask taskInProgress) throws InterruptedException {
|
private synchronized void updateFileTaskQueues(FileIngestTask taskInProgress) throws InterruptedException {
|
||||||
if (taskInProgress != null) {
|
if (taskInProgress != null) {
|
||||||
tasksInProgress.add(taskInProgress);
|
tasksInProgress.add(taskInProgress);
|
||||||
@ -134,7 +160,7 @@ final class IngestScheduler {
|
|||||||
if (fileTasks.isEmpty() == false) {
|
if (fileTasks.isEmpty() == false) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// fill in the directory queue if tasksInProgressIterator is empty.
|
// fill in the directory queue if it is empty.
|
||||||
if (this.directoryTasks.isEmpty()) {
|
if (this.directoryTasks.isEmpty()) {
|
||||||
// bail out if root is also empty -- we are done
|
// bail out if root is also empty -- we are done
|
||||||
if (rootDirectoryTasks.isEmpty()) {
|
if (rootDirectoryTasks.isEmpty()) {
|
||||||
@ -149,7 +175,13 @@ final class IngestScheduler {
|
|||||||
final AbstractFile parentFile = parentTask.getFile();
|
final AbstractFile parentFile = parentTask.getFile();
|
||||||
// add itself to the file list
|
// add itself to the file list
|
||||||
if (shouldEnqueueFileTask(parentTask)) {
|
if (shouldEnqueueFileTask(parentTask)) {
|
||||||
addTaskToFileQueue(parentTask);
|
try {
|
||||||
|
fileTasks.put(parentTask);
|
||||||
|
} catch (InterruptedException ex) {
|
||||||
|
Thread.currentThread().interrupt();
|
||||||
|
logger.log(Level.FINE, "Task scheduling for ingest job interrupted", ex); //NON-NLS
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// add its children to the file and directory lists
|
// add its children to the file and directory lists
|
||||||
try {
|
try {
|
||||||
@ -161,7 +193,13 @@ final class IngestScheduler {
|
|||||||
if (childFile.hasChildren()) {
|
if (childFile.hasChildren()) {
|
||||||
directoryTasks.add(childTask);
|
directoryTasks.add(childTask);
|
||||||
} else if (shouldEnqueueFileTask(childTask)) {
|
} else if (shouldEnqueueFileTask(childTask)) {
|
||||||
addTaskToFileQueue(childTask);
|
try {
|
||||||
|
fileTasks.put(childTask);
|
||||||
|
} catch (InterruptedException ex) {
|
||||||
|
Thread.currentThread().interrupt();
|
||||||
|
logger.log(Level.FINE, "Task scheduling for ingest job interrupted", ex); //NON-NLS
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -171,26 +209,9 @@ final class IngestScheduler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void addTaskToFileQueue(FileIngestTask task) {
|
|
||||||
// If the thread executing this code is interrupted, tasksInProgressIterator is because the
|
|
||||||
// the number of ingest threads has been decreased while ingest jobs are
|
|
||||||
// running. The calling thread will exit in an orderly fashion, but the
|
|
||||||
// task still needs to be enqueued rather than lost.
|
|
||||||
while (true) {
|
|
||||||
try {
|
|
||||||
fileTasks.put(task);
|
|
||||||
break;
|
|
||||||
} catch (InterruptedException ex) {
|
|
||||||
// Reset the interrupted status of the thread so the orderly
|
|
||||||
// exit can occur in the intended place.
|
|
||||||
Thread.currentThread().interrupt();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static boolean shouldEnqueueFileTask(final FileIngestTask processTask) {
|
private static boolean shouldEnqueueFileTask(final FileIngestTask processTask) {
|
||||||
final AbstractFile aFile = processTask.getFile();
|
final AbstractFile aFile = processTask.getFile();
|
||||||
//if tasksInProgressIterator's unalloc file, skip if so scheduled
|
//if it's unalloc file, skip if so scheduled
|
||||||
if (processTask.getIngestJob().shouldProcessUnallocatedSpace() == false && aFile.getType().equals(TskData.TSK_DB_FILES_TYPE_ENUM.UNALLOC_BLOCKS)) {
|
if (processTask.getIngestJob().shouldProcessUnallocatedSpace() == false && aFile.getType().equals(TskData.TSK_DB_FILES_TYPE_ENUM.UNALLOC_BLOCKS)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user