Merge branch 'develop' into 7837-tagName-okAction

This commit is contained in:
Kelly Kelly 2021-08-04 13:06:00 -04:00
commit 28bd998711
85 changed files with 2944 additions and 1776 deletions

View File

@ -247,10 +247,15 @@ AddImageWizardIngestConfigPanel.dsProcDone.errs.text=*Errors encountered in addi
AddImageWizardIngestConfigVisual.getName.text=Configure Ingest AddImageWizardIngestConfigVisual.getName.text=Configure Ingest
AddImageWizardIterator.stepXofN=Step {0} of {1} AddImageWizardIterator.stepXofN=Step {0} of {1}
AddLocalFilesTask.localFileAdd.progress.text=Adding: {0}/{1} AddLocalFilesTask.localFileAdd.progress.text=Adding: {0}/{1}
Case.getCurCase.exception.noneOpen=Cannot get the current case; there is no case open! Case.getCurCase.exception.noneOpen=Cannot get the current case; there is no case open\!
Case.open.msgDlg.updated.msg=Updated case database schema.\nA backup copy of the database with the following path has been made:\n {0} Case.open.msgDlg.updated.msg=Updated case database schema.\nA backup copy of the database with the following path has been made:\n {0}
Case.open.msgDlg.updated.title=Case Database Schema Update Case.open.msgDlg.updated.title=Case Database Schema Update
Case.checkImgExist.confDlg.doesntExist.msg=One of the images associated with \nthis case are missing. Would you like to search for them now?\nPreviously, the image was located at:\n{0}\nPlease note that you will still be able to browse directories and generate reports\nif you choose No, but you will not be able to view file content or run the ingest process. Case.checkImgExist.confDlg.doesntExist.msg=One of the images associated with \n\
this case are missing. Would you like to search for them now?\n\
Previously, the image was located at:\n\
{0}\n\
Please note that you will still be able to browse directories and generate reports\n\
if you choose No, but you will not be able to view file content or run the ingest process.
Case.checkImgExist.confDlg.doesntExist.title=Missing Image Case.checkImgExist.confDlg.doesntExist.title=Missing Image
Case.addImg.exception.msg=Error adding image to the case Case.addImg.exception.msg=Error adding image to the case
Case.updateCaseName.exception.msg=Error while trying to update the case name. Case.updateCaseName.exception.msg=Error while trying to update the case name.
@ -269,9 +274,12 @@ Case.GetCaseTypeGivenPath.Failure=Unable to get case type
Case.metaDataFileCorrupt.exception.msg=The case metadata file (.aut) is corrupted. Case.metaDataFileCorrupt.exception.msg=The case metadata file (.aut) is corrupted.
Case.deleteReports.deleteFromDiskException.log.msg=Unable to delete the report from the disk. Case.deleteReports.deleteFromDiskException.log.msg=Unable to delete the report from the disk.
Case.deleteReports.deleteFromDiskException.msg=Unable to delete the report {0} from the disk.\nYou may manually delete it from {1} Case.deleteReports.deleteFromDiskException.msg=Unable to delete the report {0} from the disk.\nYou may manually delete it from {1}
CaseDeleteAction.closeConfMsg.text=Are you sure want to close and delete this case? \nCase Name: {0}\nCase Directory: {1} CaseDeleteAction.closeConfMsg.text=Are you sure want to close and delete this case? \n\
Case Name: {0}\n\
Case Directory: {1}
CaseDeleteAction.closeConfMsg.title=Warning: Closing the Current Case CaseDeleteAction.closeConfMsg.title=Warning: Closing the Current Case
CaseDeleteAction.msgDlg.fileInUse.msg=The delete action cannot be fully completed because the folder or file in it is open by another program.\n\nClose the folder and file and try again or you can delete the case manually. CaseDeleteAction.msgDlg.fileInUse.msg=The delete action cannot be fully completed because the folder or file in it is open by another program.\n\n\
Close the folder and file and try again or you can delete the case manually.
CaseDeleteAction.msgDlg.fileInUse.title=Error: Folder In Use CaseDeleteAction.msgDlg.fileInUse.title=Error: Folder In Use
CaseDeleteAction.msgDlg.caseDelete.msg=Case {0} has been deleted. CaseDeleteAction.msgDlg.caseDelete.msg=Case {0} has been deleted.
CaseOpenAction.autFilter.title={0} Case File ( {1}) CaseOpenAction.autFilter.title={0} Case File ( {1})
@ -303,7 +311,8 @@ NewCaseWizardAction.databaseProblem1.text=Cannot open database. Cancelling case
NewCaseWizardAction.databaseProblem2.text=Error NewCaseWizardAction.databaseProblem2.text=Error
NewCaseWizardPanel1.validate.errMsg.invalidSymbols=The Case Name cannot contain any of the following symbols: \\ / : * ? " < > | NewCaseWizardPanel1.validate.errMsg.invalidSymbols=The Case Name cannot contain any of the following symbols: \\ / : * ? " < > |
NewCaseWizardPanel1.validate.errMsg.dirExists=Case directory ''{0}'' already exists. NewCaseWizardPanel1.validate.errMsg.dirExists=Case directory ''{0}'' already exists.
NewCaseWizardPanel1.validate.confMsg.createDir.msg=The base directory "{0}" does not exist. \n\nDo you want to create that directory? NewCaseWizardPanel1.validate.confMsg.createDir.msg=The base directory "{0}" does not exist. \n\n\
Do you want to create that directory?
NewCaseWizardPanel1.validate.confMsg.createDir.title=Create directory NewCaseWizardPanel1.validate.confMsg.createDir.title=Create directory
NewCaseWizardPanel1.validate.errMsg.cantCreateParDir.msg=Error: Could not create case parent directory {0} NewCaseWizardPanel1.validate.errMsg.cantCreateParDir.msg=Error: Could not create case parent directory {0}
NewCaseWizardPanel1.validate.errMsg.prevCreateBaseDir.msg=Prevented from creating base directory {0} NewCaseWizardPanel1.validate.errMsg.prevCreateBaseDir.msg=Prevented from creating base directory {0}
@ -332,6 +341,7 @@ OptionalCasePropertiesPanel.lbPointOfContactPhoneLabel.text=Phone:
OptionalCasePropertiesPanel.orgainizationPanel.border.title=Organization OptionalCasePropertiesPanel.orgainizationPanel.border.title=Organization
RecentCases.exception.caseIdxOutOfRange.msg=Recent case index {0} is out of range. RecentCases.exception.caseIdxOutOfRange.msg=Recent case index {0} is out of range.
RecentCases.getName.text=Clear Recent Cases RecentCases.getName.text=Clear Recent Cases
# {0} - case name
RecentItems.openRecentCase.msgDlg.text=Case {0} no longer exists. RecentItems.openRecentCase.msgDlg.text=Case {0} no longer exists.
SelectDataSourceProcessorPanel.name.text=Select Data Source Type SelectDataSourceProcessorPanel.name.text=Select Data Source Type
StartupWindow.title.text=Welcome StartupWindow.title.text=Welcome
@ -344,6 +354,7 @@ StartupWindowProvider.openCase.noFile=Unable to open previously open case becaus
UnpackagePortableCaseDialog.title.text=Unpackage Portable Case UnpackagePortableCaseDialog.title.text=Unpackage Portable Case
UnpackagePortableCaseDialog.UnpackagePortableCaseDialog.extensions=Portable case package (.zip, .zip.001) UnpackagePortableCaseDialog.UnpackagePortableCaseDialog.extensions=Portable case package (.zip, .zip.001)
UnpackagePortableCaseDialog.validatePaths.badExtension=File extension must be .zip or .zip.001 UnpackagePortableCaseDialog.validatePaths.badExtension=File extension must be .zip or .zip.001
# {0} - case folder
UnpackagePortableCaseDialog.validatePaths.caseFolderExists=Folder {0} already exists UnpackagePortableCaseDialog.validatePaths.caseFolderExists=Folder {0} already exists
UnpackagePortableCaseDialog.validatePaths.caseIsNotFile=Selected path is not a file UnpackagePortableCaseDialog.validatePaths.caseIsNotFile=Selected path is not a file
UnpackagePortableCaseDialog.validatePaths.caseNotFound=File does not exist UnpackagePortableCaseDialog.validatePaths.caseNotFound=File does not exist
@ -358,8 +369,8 @@ UnpackageWorker.doInBackground.previouslySeenCase=Case has been previously opene
UpdateRecentCases.menuItem.clearRecentCases.text=Clear Recent Cases UpdateRecentCases.menuItem.clearRecentCases.text=Clear Recent Cases
UpdateRecentCases.menuItem.empty=-Empty- UpdateRecentCases.menuItem.empty=-Empty-
AddImageWizardIngestConfigPanel.CANCEL_BUTTON.text=Cancel AddImageWizardIngestConfigPanel.CANCEL_BUTTON.text=Cancel
NewCaseVisualPanel1.CaseFolderOnCDriveError.text=Warning: Path to multi-user case folder is on "C:" drive NewCaseVisualPanel1.CaseFolderOnCDriveError.text=Warning: Path to multi-user case folder is on \"C:\" drive
NewCaseVisualPanel1.CaseFolderOnInternalDriveWindowsError.text=Warning: Path to case folder is on "C:" drive. Case folder is created on the target system NewCaseVisualPanel1.CaseFolderOnInternalDriveWindowsError.text=Warning: Path to case folder is on \"C:\" drive. Case folder is created on the target system
NewCaseVisualPanel1.CaseFolderOnInternalDriveLinuxError.text=Warning: Path to case folder is on the target system. Create case folder in mounted drive. NewCaseVisualPanel1.CaseFolderOnInternalDriveLinuxError.text=Warning: Path to case folder is on the target system. Create case folder in mounted drive.
NewCaseVisualPanel1.uncPath.error=Error: UNC paths are not allowed for Single-User cases NewCaseVisualPanel1.uncPath.error=Error: UNC paths are not allowed for Single-User cases
CollaborationMonitor.addingDataSourceStatus.msg={0} adding data source CollaborationMonitor.addingDataSourceStatus.msg={0} adding data source
@ -367,7 +378,7 @@ CollaborationMonitor.analyzingDataSourceStatus.msg={0} analyzing {1}
MissingImageDialog.lbWarning.text= MissingImageDialog.lbWarning.text=
MissingImageDialog.lbWarning.toolTipText= MissingImageDialog.lbWarning.toolTipText=
NewCaseVisualPanel1.caseParentDirWarningLabel.text= NewCaseVisualPanel1.caseParentDirWarningLabel.text=
NewCaseVisualPanel1.multiUserCaseRadioButton.text=Multi-User\t\t NewCaseVisualPanel1.multiUserCaseRadioButton.text=Multi-User
NewCaseVisualPanel1.singleUserCaseRadioButton.text=Single-User NewCaseVisualPanel1.singleUserCaseRadioButton.text=Single-User
NewCaseVisualPanel1.caseTypeLabel.text=Case Type: NewCaseVisualPanel1.caseTypeLabel.text=Case Type:
SingleUserCaseConverter.BadDatabaseFileName=Database file does not exist! SingleUserCaseConverter.BadDatabaseFileName=Database file does not exist!

View File

@ -530,25 +530,6 @@ public class Case {
eventPublisher.publish(new TimelineEventAddedEvent(event)); eventPublisher.publish(new TimelineEventAddedEvent(event));
} }
@SuppressWarnings("deprecation")
@Subscribe
public void publishArtifactsPostedEvent(Blackboard.ArtifactsPostedEvent event) {
for (BlackboardArtifact.Type artifactType : event.getArtifactTypes()) {
/*
* IngestServices.fireModuleDataEvent is deprecated to
* discourage ingest module writers from using it (they should
* use org.sleuthkit.datamodel.Blackboard.postArtifact(s)
* instead), but a way to publish
* Blackboard.ArtifactsPostedEvents from the SleuthKit layer as
* Autopsy ModuleDataEvents is still needed.
*/
IngestServices.getInstance().fireModuleDataEvent(new ModuleDataEvent(
event.getModuleName(),
artifactType,
event.getArtifacts(artifactType)));
}
}
@Subscribe @Subscribe
public void publishOsAccountsAddedEvent(TskEvent.OsAccountsAddedTskEvent event) { public void publishOsAccountsAddedEvent(TskEvent.OsAccountsAddedTskEvent event) {
hasData = true; hasData = true;

View File

@ -0,0 +1,54 @@
/*
* Autopsy Forensic Browser
*
* Copyright 2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.autopsy.centralrepository.ingestmodule;
import java.util.concurrent.atomic.AtomicLong;
import org.sleuthkit.autopsy.ingest.DataArtifactIngestModule;
import org.sleuthkit.autopsy.ingest.IngestMessage;
import org.sleuthkit.autopsy.ingest.IngestServices;
import org.sleuthkit.datamodel.DataArtifact;
/**
* RJCTODO
*
* NOTE TO REVIEWER:
*
* This is a placeholder data artifact ingest module that counts the number of
* data artifacts it processes and posts the final count to the ingest inbox.
* The guts of the module will be supplied by a later PR.
*/
public class CentralRepoDataArtifactIngestModule implements DataArtifactIngestModule {
private final AtomicLong artifactCounter = new AtomicLong();
@Override
public ProcessResult process(DataArtifact artifact) {
artifactCounter.incrementAndGet();
return ProcessResult.OK;
}
@Override
public void shutDown() {
IngestServices.getInstance().postMessage(IngestMessage.createMessage(
IngestMessage.MessageType.INFO,
CentralRepoIngestModuleFactory.getModuleName(),
String.format("%d data artifacts processed", artifactCounter.get()))); //NON-NLS
}
}

View File

@ -1,7 +1,7 @@
/* /*
* Central Repository * Central Repository
* *
* Copyright 2015-2018 Basis Technology Corp. * Copyright 2015-2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org * Contact: carrier <at> sleuthkit <dot> org
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
@ -26,15 +26,18 @@ import org.sleuthkit.autopsy.ingest.IngestModuleGlobalSettingsPanel;
import org.sleuthkit.autopsy.ingest.IngestModuleIngestJobSettings; import org.sleuthkit.autopsy.ingest.IngestModuleIngestJobSettings;
import org.sleuthkit.autopsy.centralrepository.optionspanel.GlobalSettingsPanel; import org.sleuthkit.autopsy.centralrepository.optionspanel.GlobalSettingsPanel;
import org.sleuthkit.autopsy.coreutils.Version; import org.sleuthkit.autopsy.coreutils.Version;
import org.sleuthkit.autopsy.ingest.DataArtifactIngestModule;
import org.sleuthkit.autopsy.ingest.IngestModuleIngestJobSettingsPanel; import org.sleuthkit.autopsy.ingest.IngestModuleIngestJobSettingsPanel;
import org.sleuthkit.autopsy.ingest.NoIngestModuleIngestJobSettings; import org.sleuthkit.autopsy.ingest.NoIngestModuleIngestJobSettings;
/** /**
* Factory for Central Repository ingest modules * Factory for Central Repository ingest modules.
*/ */
@ServiceProvider(service = org.sleuthkit.autopsy.ingest.IngestModuleFactory.class) @ServiceProvider(service = org.sleuthkit.autopsy.ingest.IngestModuleFactory.class)
@NbBundle.Messages({"CentralRepoIngestModuleFactory.ingestmodule.name=Central Repository", @NbBundle.Messages({
"CentralRepoIngestModuleFactory.ingestmodule.desc=Saves properties to the central repository for later correlation"}) "CentralRepoIngestModuleFactory.ingestmodule.name=Central Repository",
"CentralRepoIngestModuleFactory.ingestmodule.desc=Saves properties to the central repository for later correlation"
})
public class CentralRepoIngestModuleFactory extends IngestModuleFactoryAdapter { public class CentralRepoIngestModuleFactory extends IngestModuleFactoryAdapter {
/** /**
@ -72,12 +75,12 @@ public class CentralRepoIngestModuleFactory extends IngestModuleFactoryAdapter {
return new CentralRepoIngestModule((IngestSettings) settings); return new CentralRepoIngestModule((IngestSettings) settings);
} }
/* /*
* Compatibility check for older versions. * Earlier versions of the modules had no ingest job settings. Create a
* module with the default settings.
*/ */
if (settings instanceof NoIngestModuleIngestJobSettings) { if (settings instanceof NoIngestModuleIngestJobSettings) {
return new CentralRepoIngestModule(new IngestSettings()); return new CentralRepoIngestModule((IngestSettings) getDefaultIngestJobSettings());
} }
throw new IllegalArgumentException("Expected settings argument to be an instance of IngestSettings"); throw new IllegalArgumentException("Expected settings argument to be an instance of IngestSettings");
} }
@ -109,13 +112,23 @@ public class CentralRepoIngestModuleFactory extends IngestModuleFactoryAdapter {
return new IngestSettingsPanel((IngestSettings) settings); return new IngestSettingsPanel((IngestSettings) settings);
} }
/* /*
* Compatibility check for older versions. * Earlier versions of the modules had no ingest job settings. Create a
* panel with the default settings.
*/ */
if (settings instanceof NoIngestModuleIngestJobSettings) { if (settings instanceof NoIngestModuleIngestJobSettings) {
return new IngestSettingsPanel(new IngestSettings()); return new IngestSettingsPanel((IngestSettings) getDefaultIngestJobSettings());
} }
throw new IllegalArgumentException("Expected settings argument to be an instance of IngestSettings"); throw new IllegalArgumentException("Expected settings argument to be an instance of IngestSettings");
} }
@Override
public boolean isDataArtifactIngestModuleFactory() {
return true;
}
@Override
public DataArtifactIngestModule createDataArtifactIngestModule(IngestModuleIngestJobSettings settings) {
return new CentralRepoDataArtifactIngestModule();
}
} }

View File

@ -2,7 +2,7 @@ CommandLineIngestSettingPanel_empty_report_name_mgs=Report profile name was empt
CommandLineIngestSettingPanel_existing_report_name_mgs=Report profile name was already exists, no profile created. CommandLineIngestSettingPanel_existing_report_name_mgs=Report profile name was already exists, no profile created.
CommandListIngestSettingsPanel_Default_Report_DisplayName=Default CommandListIngestSettingsPanel_Default_Report_DisplayName=Default
CommandListIngestSettingsPanel_Make_Config=Make new profile... CommandListIngestSettingsPanel_Make_Config=Make new profile...
CommandListIngestSettingsPanel_Report_Name_Msg=Please supply a report profile name: CommandListIngestSettingsPanel_Report_Name_Msg=Please supply a report profile name (commas not allowed):
OpenIDE-Module-Name=CommandLineAutopsy OpenIDE-Module-Name=CommandLineAutopsy
OptionsCategory_Keywords_Command_Line_Ingest_Settings=Command Line Ingest Settings OptionsCategory_Keywords_Command_Line_Ingest_Settings=Command Line Ingest Settings
OptionsCategory_Keywords_General=Options OptionsCategory_Keywords_General=Options

View File

@ -1,7 +1,7 @@
/* /*
* Autopsy Forensic Browser * Autopsy Forensic Browser
* *
* Copyright 2019-2020 Basis Technology Corp. * Copyright 2019-2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org * Contact: carrier <at> sleuthkit <dot> org
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
@ -62,7 +62,6 @@ import org.sleuthkit.autopsy.ingest.IngestModuleError;
import org.sleuthkit.autopsy.ingest.IngestProfiles; import org.sleuthkit.autopsy.ingest.IngestProfiles;
import org.sleuthkit.autopsy.modules.interestingitems.FilesSet; import org.sleuthkit.autopsy.modules.interestingitems.FilesSet;
import org.sleuthkit.autopsy.modules.interestingitems.FilesSetsManager; import org.sleuthkit.autopsy.modules.interestingitems.FilesSetsManager;
import org.sleuthkit.autopsy.progress.LoggingProgressIndicator;
import org.sleuthkit.autopsy.report.infrastructure.ReportGenerator; import org.sleuthkit.autopsy.report.infrastructure.ReportGenerator;
import org.sleuthkit.autopsy.report.infrastructure.ReportProgressIndicator; import org.sleuthkit.autopsy.report.infrastructure.ReportProgressIndicator;
import org.sleuthkit.datamodel.Content; import org.sleuthkit.datamodel.Content;
@ -383,8 +382,11 @@ public class CommandLineIngestManager extends CommandLineManager{
* @throws AutoIngestDataSourceProcessorException if there was a DSP * @throws AutoIngestDataSourceProcessorException if there was a DSP
* processing error. * processing error.
* *
* @throws InterruptedException running the job processing task while * @throws InterruptedException running the job
* blocking, i.e., if auto ingest is shutting down. * processing task while
* blocking, i.e., if
* auto ingest is
* shutting down.
*/ */
private void runDataSourceProcessor(Case caseForJob, AutoIngestDataSource dataSource) throws InterruptedException, AutoIngestDataSourceProcessor.AutoIngestDataSourceProcessorException { private void runDataSourceProcessor(Case caseForJob, AutoIngestDataSource dataSource) throws InterruptedException, AutoIngestDataSourceProcessor.AutoIngestDataSourceProcessorException {

View File

@ -1,7 +1,7 @@
/* /*
* Autopsy Forensic Browser * Autopsy Forensic Browser
* *
* Copyright 2019-2020 Basis Technology Corp. * Copyright 2019-2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org * Contact: carrier <at> sleuthkit <dot> org
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
@ -280,7 +280,7 @@ public class CommandLineIngestSettingsPanel extends javax.swing.JPanel {
add(nodePanel, java.awt.BorderLayout.CENTER); add(nodePanel, java.awt.BorderLayout.CENTER);
}// </editor-fold>//GEN-END:initComponents }// </editor-fold>//GEN-END:initComponents
@Messages({ @Messages({
"CommandListIngestSettingsPanel_Report_Name_Msg=Please supply a report profile name:", "CommandListIngestSettingsPanel_Report_Name_Msg=Please supply a report profile name (commas not allowed):",
"CommandLineIngestSettingPanel_empty_report_name_mgs=Report profile name was empty, no profile created.", "CommandLineIngestSettingPanel_empty_report_name_mgs=Report profile name was empty, no profile created.",
"CommandLineIngestSettingPanel_existing_report_name_mgs=Report profile name was already exists, no profile created." "CommandLineIngestSettingPanel_existing_report_name_mgs=Report profile name was already exists, no profile created."
}) })
@ -289,6 +289,10 @@ public class CommandLineIngestSettingsPanel extends javax.swing.JPanel {
if (reportName.equals(Bundle.CommandListIngestSettingsPanel_Make_Config())) { if (reportName.equals(Bundle.CommandListIngestSettingsPanel_Make_Config())) {
reportName = JOptionPane.showInputDialog(this, Bundle.CommandListIngestSettingsPanel_Report_Name_Msg()); reportName = JOptionPane.showInputDialog(this, Bundle.CommandListIngestSettingsPanel_Report_Name_Msg());
// sanitize report name. Remove all commas because in CommandLineOptionProcessor we use commas
// to separate multiple report names
reportName = reportName.replaceAll(",", "");
// User hit cancel // User hit cancel
if (reportName == null) { if (reportName == null) {
return; return;

View File

@ -1,7 +1,7 @@
/* /*
* Autopsy Forensic Browser * Autopsy Forensic Browser
* *
* Copyright 2019-2020 Basis Technology Corp. * Copyright 2019-2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org * Contact: carrier <at> sleuthkit <dot> org
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
@ -20,12 +20,15 @@ package org.sleuthkit.autopsy.commandlineingest;
import java.io.File; import java.io.File;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.logging.Level; import java.util.logging.Level;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.sleuthkit.autopsy.coreutils.Logger; import org.sleuthkit.autopsy.coreutils.Logger;
import org.netbeans.api.sendopts.CommandException; import org.netbeans.api.sendopts.CommandException;
import org.netbeans.spi.sendopts.Env; import org.netbeans.spi.sendopts.Env;
@ -291,7 +294,6 @@ public class CommandLineOptionProcessor extends OptionProcessor {
} }
// Add "GENERATE_REPORTS" command, if present // Add "GENERATE_REPORTS" command, if present
String reportProfile = null;
if (values.containsKey(generateReportsOption)) { if (values.containsKey(generateReportsOption)) {
// 'caseDir' must only be specified if the case is not being created during the current run // 'caseDir' must only be specified if the case is not being created during the current run
@ -300,24 +302,34 @@ public class CommandLineOptionProcessor extends OptionProcessor {
handleError("'caseDir' argument is empty"); handleError("'caseDir' argument is empty");
} }
List<String> reportProfiles;
argDirs = values.get(generateReportsOption); argDirs = values.get(generateReportsOption);
if (argDirs.length > 0) { if (argDirs.length > 0) {
reportProfile = argDirs[0]; // use custom report configuration(s)
} reportProfiles = Stream.of(argDirs[0].split(","))
.map(String::trim)
.collect(Collectors.toList());
// If the user doesn't supply an options for generateReports the if (reportProfiles == null || reportProfiles.isEmpty()) {
// argsDirs length will be 0, so if reportProfile is empty
// something is not right.
if (reportProfile != null && reportProfile.isEmpty()) {
handleError("'generateReports' argument is empty"); handleError("'generateReports' argument is empty");
} }
for (String reportProfile : reportProfiles) {
if (reportProfile.isEmpty()) {
handleError("Empty report profile name");
}
CommandLineCommand newCommand = new CommandLineCommand(CommandLineCommand.CommandType.GENERATE_REPORTS); CommandLineCommand newCommand = new CommandLineCommand(CommandLineCommand.CommandType.GENERATE_REPORTS);
newCommand.addInputValue(CommandLineCommand.InputType.CASE_FOLDER_PATH.name(), caseDir); newCommand.addInputValue(CommandLineCommand.InputType.CASE_FOLDER_PATH.name(), caseDir);
if (reportProfile != null) {
newCommand.addInputValue(CommandLineCommand.InputType.REPORT_PROFILE_NAME.name(), reportProfile); newCommand.addInputValue(CommandLineCommand.InputType.REPORT_PROFILE_NAME.name(), reportProfile);
}
commands.add(newCommand); commands.add(newCommand);
}
} else {
// use default report configuration
CommandLineCommand newCommand = new CommandLineCommand(CommandLineCommand.CommandType.GENERATE_REPORTS);
newCommand.addInputValue(CommandLineCommand.InputType.CASE_FOLDER_PATH.name(), caseDir);
commands.add(newCommand);
}
runFromCommandLine = true; runFromCommandLine = true;
} }
} }

View File

@ -141,7 +141,7 @@ public class OsAccountViewer extends javax.swing.JPanel implements DataContentVi
@Override @Override
public int isPreferred(Node node) { public int isPreferred(Node node) {
return 5; return 1;
} }
/** /**

View File

@ -3,7 +3,13 @@ Installer.closing.confirmationDialog.title=Ingest is Running
# {0} - exception message # {0} - exception message
Installer.closing.messageBox.caseCloseExceptionMessage=Error closing case: {0} Installer.closing.messageBox.caseCloseExceptionMessage=Error closing case: {0}
OpenIDE-Module-Display-Category=Infrastructure OpenIDE-Module-Display-Category=Infrastructure
OpenIDE-Module-Long-Description=This is the core Autopsy module.\n\nThe module contains the core components needed for the bare application to run; the RCP platform, windowing GUI, sleuthkit bindings, datamodel / storage, explorer, result viewers, content viewers, ingest framework, reporting, and core tools, such as the file search.\n\nThe framework included in the module contains APIs for developing modules for ingest, viewers and reporting. The modules can be deployed as Plugins using the Autopsy plugin installer.\nThis module should not be uninstalled - without it, Autopsy will not run.\n\nFor more information, see http://www.sleuthkit.org/autopsy/ OpenIDE-Module-Long-Description=\
This is the core Autopsy module.\n\n\
The module contains the core components needed for the bare application to run; the RCP platform, windowing GUI, sleuthkit bindings, datamodel / storage, explorer, result viewers, content viewers, ingest framework, reporting, and core tools, such as the file search.\n\n\
The framework included in the module contains APIs for developing modules for ingest, viewers and reporting. \
The modules can be deployed as Plugins using the Autopsy plugin installer.\n\
This module should not be uninstalled - without it, Autopsy will not run.\n\n\
For more information, see http://www.sleuthkit.org/autopsy/
OpenIDE-Module-Name=Autopsy-Core OpenIDE-Module-Name=Autopsy-Core
OpenIDE-Module-Short-Description=Autopsy Core Module OpenIDE-Module-Short-Description=Autopsy Core Module
org_sleuthkit_autopsy_core_update_center=http://sleuthkit.org/autopsy/updates.xml org_sleuthkit_autopsy_core_update_center=http://sleuthkit.org/autopsy/updates.xml

View File

@ -75,9 +75,9 @@ DataContentViewerHex.totalPageLabel.text_1=100
DataContentViewerHex.pageLabel2.text=Page DataContentViewerHex.pageLabel2.text=Page
# Product Information panel # Product Information panel
LBL_Description=<div style="font-size: 12pt; font-family: Verdana, 'Verdana CE', Arial, 'Arial CE', 'Lucida Grande CE', lucida, 'Helvetica CE', sans-serif;">\n <b>Product Version:</b> {0} ({9}) <br><b>Sleuth Kit Version:</b> {7} <br><b>Netbeans RCP Build:</b> {8} <br> <b>Java:</b> {1}; {2}<br> <b>System:</b> {3}; {4}; {5}<br><b>Userdir:</b> {6}</div> LBL_Description=<div style=\"font-size: 12pt; font-family: Verdana, 'Verdana CE', Arial, 'Arial CE', 'Lucida Grande CE', lucida, 'Helvetica CE', sans-serif;\">\n <b>Product Version:</b> {0} ({9}) <br><b>Sleuth Kit Version:</b> {7} <br><b>Netbeans RCP Build:</b> {8} <br> <b>Java:</b> {1}; {2}<br> <b>System:</b> {3}; {4}; {5}<br><b>Userdir:</b> {6}</div>
Format_OperatingSystem_Value={0} version {1} running on {2} Format_OperatingSystem_Value={0} version {1} running on {2}
LBL_Copyright=<div style="font-size: 12pt; font-family: Verdana, 'Verdana CE', Arial, 'Arial CE', 'Lucida Grande CE', lucida, 'Helvetica CE', sans-serif; ">Autopsy&trade; is a digital forensics platform based on The Sleuth Kit&trade; and other tools. <br><ul><li>General Information: <a style="color: #1E2A60;" href="http://www.sleuthkit.org">http://www.sleuthkit.org</a>.</li><li>Training: <a style="color: #1E2A60;" href="https://www.autopsy.com/support/training/">https://www.autopsy.com/support/training/</a></li><li>Support: <a style="color: #1E2A60;" href="https://www.sleuthkit.org/support.php">https://www.sleuthkit.org/support.php</a></li></ul>Copyright &copy; 2003-2020. </div> LBL_Copyright=<div style\="font-size: 12pt; font-family: Verdana, 'Verdana CE', Arial, 'Arial CE', 'Lucida Grande CE', lucida, 'Helvetica CE', sans-serif; ">Autopsy&trade; is a digital forensics platform based on The Sleuth Kit&trade; and other tools. <br><ul><li>General Information: <a style\="color: \#1E2A60;" href\="http://www.sleuthkit.org">http://www.sleuthkit.org</a>.</li><li>Training: <a style\="color: \#1E2A60;" href\="https://www.autopsy.com/support/training/">https://www.autopsy.com/support/training/</a></li><li>Support: <a style\="color: \#1E2A60;" href\="https://www.sleuthkit.org/support.php">https://www.sleuthkit.org/support.php</a></li></ul>Copyright &copy; 2003-2020. </div>
SortChooser.dialogTitle=Choose Sort Criteria SortChooser.dialogTitle=Choose Sort Criteria
ThumbnailViewChildren.progress.cancelling=(Cancelling) ThumbnailViewChildren.progress.cancelling=(Cancelling)
# {0} - file name # {0} - file name
@ -105,7 +105,7 @@ DataResultViewerThumbnail.pageNextButton.text=
DataResultViewerThumbnail.imagesLabel.text=Images: DataResultViewerThumbnail.imagesLabel.text=Images:
DataResultViewerThumbnail.imagesRangeLabel.text=- DataResultViewerThumbnail.imagesRangeLabel.text=-
DataResultViewerThumbnail.pageNumLabel.text=- DataResultViewerThumbnail.pageNumLabel.text=-
DataResultViewerThumbnail.filePathLabel.text=\ DataResultViewerThumbnail.filePathLabel.text=\ \ \
DataResultViewerThumbnail.goToPageLabel.text=Go to Page: DataResultViewerThumbnail.goToPageLabel.text=Go to Page:
DataResultViewerThumbnail.goToPageField.text= DataResultViewerThumbnail.goToPageField.text=
AdvancedConfigurationDialog.cancelButton.text=Cancel AdvancedConfigurationDialog.cancelButton.text=Cancel

View File

@ -385,7 +385,6 @@ public class DataArtifactContentViewer extends javax.swing.JPanel implements Dat
case DATA_ARTIFACT: case DATA_ARTIFACT:
return MORE_PREFERRED; return MORE_PREFERRED;
// everything else is less preferred // everything else is less preferred
case ANALYSIS_RESULT:
default: default:
return LESS_PREFERRED; return LESS_PREFERRED;
} }

View File

@ -30,7 +30,9 @@ PlatformUtil.getProcVmUsed.sigarNotInit.msg=Cannot get virt mem used, sigar not
PlatformUtil.getProcVmUsed.gen.msg=Cannot get virt mem used, {0} PlatformUtil.getProcVmUsed.gen.msg=Cannot get virt mem used, {0}
PlatformUtil.getJvmMemInfo.usageText=JVM heap usage: {0}, JVM non-heap usage: {1} PlatformUtil.getJvmMemInfo.usageText=JVM heap usage: {0}, JVM non-heap usage: {1}
PlatformUtil.getPhysicalMemInfo.usageText=Physical memory usage (max, total, free): {0}, {1}, {2} PlatformUtil.getPhysicalMemInfo.usageText=Physical memory usage (max, total, free): {0}, {1}, {2}
PlatformUtil.getAllMemUsageInfo.usageText={0}\n{1}\nProcess Virtual Memory: {2} PlatformUtil.getAllMemUsageInfo.usageText={0}\n\
{1}\n\
Process Virtual Memory: {2}
# {0} - file name # {0} - file name
ReadImageTask.mesageText=Reading image: {0} ReadImageTask.mesageText=Reading image: {0}
StringExtract.illegalStateException.cannotInit.msg=Unicode table not properly initialized, cannot instantiate StringExtract StringExtract.illegalStateException.cannotInit.msg=Unicode table not properly initialized, cannot instantiate StringExtract

View File

@ -12,7 +12,7 @@ DateSearchPanel.changedCheckBox.text=Changed
DateSearchPanel.modifiedCheckBox.text=Modified DateSearchPanel.modifiedCheckBox.text=Modified
DateSearchPanel.jLabel1.text=to DateSearchPanel.jLabel1.text=to
NameSearchPanel.nameCheckBox.text=Name: NameSearchPanel.nameCheckBox.text=Name:
NameSearchPanel.noteNameLabel.text=<html>*Note: Name match is case insensitive and matches any part of the file name. Regular expressions are not currently supported.</html> NameSearchPanel.noteNameLabel.text=<html>*Note: Name match is case insensitive and matches any part of the file name (not including parent path). Regular expressions are not currently supported.</html>
NameSearchPanel.searchTextField.text= NameSearchPanel.searchTextField.text=
SizeSearchPanel.sizeCheckBox.text=Size: SizeSearchPanel.sizeCheckBox.text=Size:
NameSearchPanel.cutMenuItem.text=Cut NameSearchPanel.cutMenuItem.text=Cut

View File

@ -1,7 +1,10 @@
DataSourceFilter.errorMessage.emptyDataSource=At least one data source must be selected. DataSourceFilter.errorMessage.emptyDataSource=At least one data source must be selected.
DateSearchFilter.errorMessage.endDateBeforeStartDate=The end date should be after the start date. DateSearchFilter.errorMessage.endDateBeforeStartDate=The end date should be after the start date.
DateSearchFilter.errorMessage.noCheckboxSelected=At least one date type checkbox must be selected. DateSearchFilter.errorMessage.noCheckboxSelected=At least one date type checkbox must be selected.
FileSearchPanel.cancelledSearch.text=Search Was Cancelled
FileSearchPanel.emptyNode.display.text=No results found. FileSearchPanel.emptyNode.display.text=No results found.
FileSearchPanel.searchingNode.display.text=Performing file search by attributes. Please wait.
FileSearchPanel.searchingPath.text=File Search In Progress
HashSearchFilter.errorMessage.emptyHash=Hash data is empty. HashSearchFilter.errorMessage.emptyHash=Hash data is empty.
HashSearchFilter.errorMessage.wrongCharacter=MD5 contains invalid hex characters. HashSearchFilter.errorMessage.wrongCharacter=MD5 contains invalid hex characters.
# {0} - hash data length # {0} - hash data length
@ -16,7 +19,7 @@ KnownStatusSearchPanel.knownCheckBox.text=Known Status:
KnownStatusSearchPanel.knownBadOptionCheckBox.text=Notable KnownStatusSearchPanel.knownBadOptionCheckBox.text=Notable
KnownStatusSearchPanel.knownOptionCheckBox.text=Known (NSRL or other) KnownStatusSearchPanel.knownOptionCheckBox.text=Known (NSRL or other)
KnownStatusSearchPanel.unknownOptionCheckBox.text=Unknown KnownStatusSearchPanel.unknownOptionCheckBox.text=Unknown
DateSearchFilter.noneSelectedMsg.text=At least one date type must be selected! DateSearchFilter.noneSelectedMsg.text=At least one date type must be selected\!
DateSearchPanel.dateCheckBox.text=Date: DateSearchPanel.dateCheckBox.text=Date:
DateSearchPanel.jLabel4.text=Timezone: DateSearchPanel.jLabel4.text=Timezone:
DateSearchPanel.createdCheckBox.text=Created DateSearchPanel.createdCheckBox.text=Created
@ -25,7 +28,7 @@ DateSearchPanel.changedCheckBox.text=Changed
DateSearchPanel.modifiedCheckBox.text=Modified DateSearchPanel.modifiedCheckBox.text=Modified
DateSearchPanel.jLabel1.text=to DateSearchPanel.jLabel1.text=to
NameSearchPanel.nameCheckBox.text=Name: NameSearchPanel.nameCheckBox.text=Name:
NameSearchPanel.noteNameLabel.text=<html>*Note: Name match is case insensitive and matches any part of the file name. Regular expressions are not currently supported.</html> NameSearchPanel.noteNameLabel.text=<html>*Note: Name match is case insensitive and matches any part of the file name (not including parent path). Regular expressions are not currently supported.</html>
NameSearchPanel.searchTextField.text= NameSearchPanel.searchTextField.text=
SearchNode.getName.text=Search Result SearchNode.getName.text=Search Result
SizeSearchFilter.errorMessage.nonNegativeNumber=Input size data is a negative number. SizeSearchFilter.errorMessage.nonNegativeNumber=Input size data is a negative number.
@ -57,7 +60,7 @@ FileSearchPanel.search.results.details=Large number of matches may impact perfor
FileSearchPanel.search.exception.noFilterSelected.msg=At least one filter must be selected. FileSearchPanel.search.exception.noFilterSelected.msg=At least one filter must be selected.
FileSearchPanel.search.validationErr.msg=Validation Error: {0} FileSearchPanel.search.validationErr.msg=Validation Error: {0}
FileSearchPanel.emptyWhereClause.text=Invalid options, nothing to show. FileSearchPanel.emptyWhereClause.text=Invalid options, nothing to show.
KnownStatusSearchFilter.noneSelectedMsg.text=At least one known status must be selected! KnownStatusSearchFilter.noneSelectedMsg.text=At least one known status must be selected\!
NameSearchFilter.emptyNameMsg.text=Must enter something for name search. NameSearchFilter.emptyNameMsg.text=Must enter something for name search.
SizeSearchPanel.sizeCompareComboBox.equalTo=equal to SizeSearchPanel.sizeCompareComboBox.equalTo=equal to
SizeSearchPanel.sizeCompareComboBox.greaterThan=greater than SizeSearchPanel.sizeCompareComboBox.greaterThan=greater than

View File

@ -29,15 +29,19 @@ import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ExecutionException;
import java.util.logging.Level; import java.util.logging.Level;
import javax.swing.JLabel; import javax.swing.JLabel;
import javax.swing.JPanel; import javax.swing.JPanel;
import javax.swing.SwingWorker;
import javax.swing.border.EmptyBorder; import javax.swing.border.EmptyBorder;
import org.openide.DialogDisplayer; import org.openide.DialogDisplayer;
import org.openide.NotifyDescriptor; import org.openide.NotifyDescriptor;
import org.openide.nodes.Node; import org.openide.nodes.Node;
import org.openide.util.NbBundle; import org.openide.util.NbBundle;
import org.openide.windows.TopComponent; import org.openide.windows.TopComponent;
import org.openide.windows.WindowManager;
import org.sleuthkit.autopsy.casemodule.Case; import org.sleuthkit.autopsy.casemodule.Case;
import org.sleuthkit.autopsy.casemodule.NoCurrentCaseException; import org.sleuthkit.autopsy.casemodule.NoCurrentCaseException;
import org.sleuthkit.autopsy.corecomponents.DataResultTopComponent; import org.sleuthkit.autopsy.corecomponents.DataResultTopComponent;
@ -56,9 +60,12 @@ import org.sleuthkit.datamodel.TskCoreException;
@SuppressWarnings("PMD.SingularField") // UI widgets cause lots of false positives @SuppressWarnings("PMD.SingularField") // UI widgets cause lots of false positives
class FileSearchPanel extends javax.swing.JPanel { class FileSearchPanel extends javax.swing.JPanel {
private static final Logger logger = Logger.getLogger(FileSearchPanel.class.getName());
private static final long serialVersionUID = 1L;
private final List<FileSearchFilter> filters = new ArrayList<>(); private final List<FileSearchFilter> filters = new ArrayList<>();
private static int resultWindowCount = 0; //keep track of result windows so they get unique names private static int resultWindowCount = 0; //keep track of result windows so they get unique names
private static final String EMPTY_WHERE_CLAUSE = NbBundle.getMessage(DateSearchFilter.class, "FileSearchPanel.emptyWhereClause.text"); private static final String EMPTY_WHERE_CLAUSE = NbBundle.getMessage(DateSearchFilter.class, "FileSearchPanel.emptyWhereClause.text");
private static SwingWorker<TableFilterNode, Void> searchWorker = null;
enum EVENT { enum EVENT {
CHECKED CHECKED
@ -168,56 +175,102 @@ class FileSearchPanel extends javax.swing.JPanel {
* Action when the "Search" button is pressed. * Action when the "Search" button is pressed.
* *
*/ */
@NbBundle.Messages("FileSearchPanel.emptyNode.display.text=No results found.") @NbBundle.Messages({"FileSearchPanel.emptyNode.display.text=No results found.",
"FileSearchPanel.searchingNode.display.text=Performing file search by attributes. Please wait.",
"FileSearchPanel.searchingPath.text=File Search In Progress",
"FileSearchPanel.cancelledSearch.text=Search Was Cancelled"})
private void search() { private void search() {
// change the cursor to "waiting cursor" for this operation if (searchWorker != null && searchWorker.isDone()) {
this.setCursor(Cursor.getPredefinedCursor(Cursor.WAIT_CURSOR)); searchWorker.cancel(true);
}
try { try {
if (this.isValidSearch()) { if (this.isValidSearch()) {
String title = NbBundle.getMessage(this.getClass(), "FileSearchPanel.search.results.title", ++resultWindowCount);
String pathText = NbBundle.getMessage(this.getClass(), "FileSearchPanel.search.results.pathText");
// try to get the number of matches first // try to get the number of matches first
Case currentCase = Case.getCurrentCaseThrows(); // get the most updated case Case currentCase = Case.getCurrentCaseThrows(); // get the most updated case
long totalMatches = 0; Node emptyNode = new TableFilterNode(new EmptyNode(Bundle.FileSearchPanel_searchingNode_display_text()), true);
String title = NbBundle.getMessage(this.getClass(), "FileSearchPanel.search.results.title", ++resultWindowCount);
String pathText = Bundle.FileSearchPanel_searchingPath_text();
final DataResultTopComponent searchResultWin = DataResultTopComponent.createInstance(title, pathText,
emptyNode, 0);
searchResultWin.requestActive(); // make it the active top component
searchResultWin.setCursor(Cursor.getPredefinedCursor(Cursor.WAIT_CURSOR));
searchWorker = new SwingWorker<TableFilterNode, Void>() {
List<AbstractFile> contentList = null; List<AbstractFile> contentList = null;
@Override
protected TableFilterNode doInBackground() throws Exception {
try { try {
SleuthkitCase tskDb = currentCase.getSleuthkitCase(); SleuthkitCase tskDb = currentCase.getSleuthkitCase();
contentList = tskDb.findAllFilesWhere(this.getQuery()); contentList = tskDb.findAllFilesWhere(getQuery());
} catch (TskCoreException ex) { } catch (TskCoreException ex) {
Logger logger = Logger.getLogger(this.getClass().getName()); Logger logger = Logger.getLogger(this.getClass().getName());
logger.log(Level.WARNING, "Error while trying to get the number of matches.", ex); //NON-NLS logger.log(Level.WARNING, "Error while trying to get the number of matches.", ex); //NON-NLS
} }
if (contentList == null) { if (contentList == null) {
contentList = Collections.<AbstractFile>emptyList(); contentList = Collections.<AbstractFile>emptyList();
} }
SearchNode sn = new SearchNode(contentList);
TableFilterNode tableFilterNode = new TableFilterNode(sn, true, sn.getName());
final TopComponent searchResultWin;
if (contentList.isEmpty()) { if (contentList.isEmpty()) {
Node emptyNode = new TableFilterNode(new EmptyNode(Bundle.FileSearchPanel_emptyNode_display_text()), true); return new TableFilterNode(new EmptyNode(Bundle.FileSearchPanel_emptyNode_display_text()), true);
searchResultWin = DataResultTopComponent.createInstance(title, pathText, }
emptyNode, 0); SearchNode sn = new SearchNode(contentList);
} else { return new TableFilterNode(sn, true, sn.getName());
searchResultWin = DataResultTopComponent.createInstance(title, pathText,
tableFilterNode, contentList.size());
} }
searchResultWin.requestActive(); // make it the active top component
@Override
protected void done() {
String pathText = NbBundle.getMessage(this.getClass(), "FileSearchPanel.search.results.pathText");
try {
TableFilterNode tableFilterNode = get();
if (tableFilterNode == null) { //just incase this get() gets modified to return null or somehow can return null
tableFilterNode = new TableFilterNode(new EmptyNode(Bundle.FileSearchPanel_emptyNode_display_text()), true);
}
if (searchResultWin != null && searchResultWin.isOpened()) {
searchResultWin.setNode(tableFilterNode);
searchResultWin.requestActive(); // make it the active top component
}
/** /**
* If total matches more than 1000, pop up a dialog box that say * If total matches more than 1000, pop up a dialog
* the performance maybe be slow and to increase the * box that say the performance maybe be slow and to
* performance, tell the users to refine their search. * increase the performance, tell the users to
* refine their search.
*/ */
if (totalMatches > 10000) { if (contentList.size() > 10000) {
// show info // show info
String msg = NbBundle.getMessage(this.getClass(), "FileSearchPanel.search.results.msg", totalMatches); String msg = NbBundle.getMessage(this.getClass(), "FileSearchPanel.search.results.msg", contentList.size());
String details = NbBundle.getMessage(this.getClass(), "FileSearchPanel.search.results.details"); String details = NbBundle.getMessage(this.getClass(), "FileSearchPanel.search.results.details");
MessageNotifyUtil.Notify.info(msg, details); MessageNotifyUtil.Notify.info(msg, details);
} }
} catch (InterruptedException | ExecutionException ex) {
logger.log(Level.SEVERE, "Error while performing file search by attributes", ex);
} catch (CancellationException ex) {
if (searchResultWin != null && searchResultWin.isOpened()) {
Node emptyNode = new TableFilterNode(new EmptyNode(Bundle.FileSearchPanel_cancelledSearch_text()), true);
searchResultWin.setNode(emptyNode);
pathText = Bundle.FileSearchPanel_cancelledSearch_text();
}
logger.log(Level.INFO, "File search by attributes was cancelled", ex);
} finally {
if (searchResultWin != null && searchResultWin.isOpened()) {
searchResultWin.setPath(pathText);
searchResultWin.requestActive(); // make it the active top component
searchResultWin.setCursor(Cursor.getPredefinedCursor(Cursor.DEFAULT_CURSOR));
}
}
}
};
if (searchResultWin != null && searchResultWin.isOpened()) {
searchResultWin.addPropertyChangeListener(new PropertyChangeListener() {
@Override
public void propertyChange(PropertyChangeEvent evt) {
if (evt.getPropertyName().equals("tcClosed") && !searchWorker.isDone() && evt.getOldValue() == null) {
searchWorker.cancel(true);
logger.log(Level.INFO, "User has closed the results window while search was in progress, search will be cancelled");
}
}
});
}
searchWorker.execute();
} else { } else {
throw new FilterValidationException( throw new FilterValidationException(
NbBundle.getMessage(this.getClass(), "FileSearchPanel.search.exception.noFilterSelected.msg")); NbBundle.getMessage(this.getClass(), "FileSearchPanel.search.exception.noFilterSelected.msg"));
@ -226,8 +279,6 @@ class FileSearchPanel extends javax.swing.JPanel {
NotifyDescriptor d = new NotifyDescriptor.Message( NotifyDescriptor d = new NotifyDescriptor.Message(
NbBundle.getMessage(this.getClass(), "FileSearchPanel.search.validationErr.msg", ex.getMessage())); NbBundle.getMessage(this.getClass(), "FileSearchPanel.search.validationErr.msg", ex.getMessage()));
DialogDisplayer.getDefault().notify(d); DialogDisplayer.getDefault().notify(d);
} finally {
this.setCursor(null);
} }
} }

View File

@ -219,7 +219,7 @@ public final class MapWaypoint extends KdTree.XYZPoint implements org.jxmapviewe
JMenuItem[] getMenuItems() throws TskCoreException { JMenuItem[] getMenuItems() throws TskCoreException {
List<JMenuItem> menuItems = new ArrayList<>(); List<JMenuItem> menuItems = new ArrayList<>();
BlackboardArtifact artifact = dataModelWaypoint.getArtifact(); BlackboardArtifact artifact = dataModelWaypoint.getArtifact();
Content content = artifact.getSleuthkitCase().getContentById(artifact.getObjectID()); Content content = dataModelWaypoint.getContent();
menuItems.addAll(getTimelineMenuItems(dataModelWaypoint.getArtifact())); menuItems.addAll(getTimelineMenuItems(dataModelWaypoint.getArtifact()));
menuItems.addAll(getDataModelActionFactoryMenuItems(artifact, content)); menuItems.addAll(getDataModelActionFactoryMenuItems(artifact, content));

View File

@ -28,6 +28,7 @@ import java.util.Set;
import org.sleuthkit.datamodel.AbstractFile; import org.sleuthkit.datamodel.AbstractFile;
import org.sleuthkit.datamodel.BlackboardArtifact; import org.sleuthkit.datamodel.BlackboardArtifact;
import org.sleuthkit.datamodel.BlackboardAttribute; import org.sleuthkit.datamodel.BlackboardAttribute;
import org.sleuthkit.datamodel.Content;
import org.sleuthkit.datamodel.TskCoreException; import org.sleuthkit.datamodel.TskCoreException;
/** /**
@ -44,6 +45,7 @@ public class Waypoint {
final private AbstractFile image; final private AbstractFile image;
final private BlackboardArtifact artifact; final private BlackboardArtifact artifact;
final private GeoPath parentGeoPath; final private GeoPath parentGeoPath;
final private Content content;
final private List<Waypoint.Property> propertiesList; final private List<Waypoint.Property> propertiesList;
@ -93,6 +95,11 @@ public class Waypoint {
this.parentGeoPath = parentGeoPath; this.parentGeoPath = parentGeoPath;
propertiesList = createGeolocationProperties(attributeMap); propertiesList = createGeolocationProperties(attributeMap);
try {
content = artifact.getSleuthkitCase().getContentById(artifact.getObjectID());
} catch (TskCoreException ex) {
throw new GeoLocationDataException(String.format("Failed to get contend for artifact id (%d)", artifact.getId()), ex);
}
} }
/** /**
@ -249,6 +256,10 @@ public class Waypoint {
return list; return list;
} }
public Content getContent() {
return content;
}
/** /**
* Simple property class for waypoint properties that a purely * Simple property class for waypoint properties that a purely
* informational. * informational.

View File

@ -20,6 +20,7 @@ IngestMessagePanel.totalUniqueMessagesNameVal.text=-
IngestJob.progress.dataSourceIngest.initialDisplayName=Analyzing {0} IngestJob.progress.dataSourceIngest.initialDisplayName=Analyzing {0}
IngestJob.progress.dataSourceIngest.displayName={0} for {1} IngestJob.progress.dataSourceIngest.displayName={0} for {1}
IngestJob.progress.fileIngest.displayName=Analyzing files from {0} IngestJob.progress.fileIngest.displayName=Analyzing files from {0}
IngestJob.progress.dataArtifactIngest.displayName=Analyzing data artifacts from {0}
IngestJob.progress.cancelling=Cancelling... IngestJob.progress.cancelling=Cancelling...
IngestJob.cancellationDialog.title=Cancel Ingest IngestJob.cancellationDialog.title=Cancel Ingest
IngestDialog.startButton.title=Start IngestDialog.startButton.title=Start
@ -83,14 +84,15 @@ IngestProgressSnapshotPanel.SnapshotsTableModel.colNames.jobID=Job ID
IngestJobTableModel.colName.jobID=Job ID IngestJobTableModel.colName.jobID=Job ID
IngestJobTableModel.colName.dataSource=Data Source IngestJobTableModel.colName.dataSource=Data Source
IngestJobTableModel.colName.start=Start IngestJobTableModel.colName.start=Start
IngestJobTableModel.colName.numProcessed=Num Processed IngestJobTableModel.colName.numProcessed=Files Processed
IngestJobTableModel.colName.filesPerSec=Files/Sec IngestJobTableModel.colName.filesPerSec=Files/Sec
IngestJobTableModel.colName.inProgress=In Progress IngestJobTableModel.colName.inProgress=In Progress
IngestJobTableModel.colName.filesQueued=Files Queued IngestJobTableModel.colName.filesQueued=Files Queued
IngestJobTableModel.colName.dirQueued=Dir Queued IngestJobTableModel.colName.dirQueued=Dirs Queued
IngestJobTableModel.colName.rootQueued=Root Queued IngestJobTableModel.colName.rootQueued=Roots Queued
IngestJobTableModel.colName.streamingQueued=Streaming Queued IngestJobTableModel.colName.streamingQueued=Streamed Files Queued
IngestJobTableModel.colName.dsQueued=DS Queued IngestJobTableModel.colName.dsQueued=DS Queued
IngestJobTableModel.colName.artifactsQueued=Artifacts Queued
ModuleTableModel.colName.module=Module ModuleTableModel.colName.module=Module
ModuleTableModel.colName.duration=Duration ModuleTableModel.colName.duration=Duration
IngestJobSettingsPanel.jButtonSelectAll.text=Select All IngestJobSettingsPanel.jButtonSelectAll.text=Select All

View File

@ -36,6 +36,7 @@ IngestMessagePanel.totalUniqueMessagesNameVal.text=-
IngestJob.progress.dataSourceIngest.initialDisplayName=Analyzing {0} IngestJob.progress.dataSourceIngest.initialDisplayName=Analyzing {0}
IngestJob.progress.dataSourceIngest.displayName={0} for {1} IngestJob.progress.dataSourceIngest.displayName={0} for {1}
IngestJob.progress.fileIngest.displayName=Analyzing files from {0} IngestJob.progress.fileIngest.displayName=Analyzing files from {0}
IngestJob.progress.dataArtifactIngest.displayName=Analyzing data artifacts from {0}
IngestJob.progress.cancelling=Cancelling... IngestJob.progress.cancelling=Cancelling...
IngestJob.cancellationDialog.title=Cancel Ingest IngestJob.cancellationDialog.title=Cancel Ingest
IngestDialog.startButton.title=Start IngestDialog.startButton.title=Start
@ -99,14 +100,15 @@ IngestProgressSnapshotPanel.SnapshotsTableModel.colNames.jobID=Job ID
IngestJobTableModel.colName.jobID=Job ID IngestJobTableModel.colName.jobID=Job ID
IngestJobTableModel.colName.dataSource=Data Source IngestJobTableModel.colName.dataSource=Data Source
IngestJobTableModel.colName.start=Start IngestJobTableModel.colName.start=Start
IngestJobTableModel.colName.numProcessed=Num Processed IngestJobTableModel.colName.numProcessed=Files Processed
IngestJobTableModel.colName.filesPerSec=Files/Sec IngestJobTableModel.colName.filesPerSec=Files/Sec
IngestJobTableModel.colName.inProgress=In Progress IngestJobTableModel.colName.inProgress=In Progress
IngestJobTableModel.colName.filesQueued=Files Queued IngestJobTableModel.colName.filesQueued=Files Queued
IngestJobTableModel.colName.dirQueued=Dir Queued IngestJobTableModel.colName.dirQueued=Dirs Queued
IngestJobTableModel.colName.rootQueued=Root Queued IngestJobTableModel.colName.rootQueued=Roots Queued
IngestJobTableModel.colName.streamingQueued=Streaming Queued IngestJobTableModel.colName.streamingQueued=Streamed Files Queued
IngestJobTableModel.colName.dsQueued=DS Queued IngestJobTableModel.colName.dsQueued=DS Queued
IngestJobTableModel.colName.artifactsQueued=Artifacts Queued
ModuleTableModel.colName.module=Module ModuleTableModel.colName.module=Module
ModuleTableModel.colName.duration=Duration ModuleTableModel.colName.duration=Duration
IngestJobSettingsPanel.jButtonSelectAll.text=Select All IngestJobSettingsPanel.jButtonSelectAll.text=Select All
@ -142,7 +144,7 @@ IngestJob.cancelReason.outOfDiskSpace.text=Out of disk space
IngestJob.cancelReason.servicesDown.text=Services Down IngestJob.cancelReason.servicesDown.text=Services Down
IngestJob.cancelReason.caseClosed.text=Case closed IngestJob.cancelReason.caseClosed.text=Case closed
IngestJobSettingsPanel.globalSettingsButton.text=Global Settings IngestJobSettingsPanel.globalSettingsButton.text=Global Settings
gest= gest
IngestJobSettingsPanel.globalSettingsButton.actionCommand=Advanced IngestJobSettingsPanel.globalSettingsButton.actionCommand=Advanced
IngestJobSettingsPanel.globalSettingsButton.text=Global Settings IngestJobSettingsPanel.globalSettingsButton.text=Global Settings
IngestJobSettingsPanel.pastJobsButton.text=History IngestJobSettingsPanel.pastJobsButton.text=History

View File

@ -0,0 +1,46 @@
/*
* Autopsy Forensic Browser
*
* Copyright 2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.autopsy.ingest;
import org.sleuthkit.datamodel.DataArtifact;
/**
* Interface that must be implemented by all ingest modules that process data
* artifacts.
*/
public interface DataArtifactIngestModule extends IngestModule {
/**
* Processes a data artifact.
*
* IMPORTANT: In addition to returning ProcessResult.OK or
* ProcessResult.ERROR, modules should log all errors using methods provided
* by the org.sleuthkit.autopsy.coreutils.Logger class. Log messages should
* include the name and object ID of the data being processed. If an
* exception has been caught by the module, the exception should be sent to
* the Logger along with the log message so that a stack trace will appear
* in the application log.
*
* @param artifact The artifact to process.
*
* @return A result code indicating success or failure of the processing.
*/
ProcessResult process(DataArtifact artifact);
}

View File

@ -0,0 +1,90 @@
/*
* Autopsy Forensic Browser
*
* Copyright 2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.autopsy.ingest;
import java.util.List;
import java.util.Optional;
import org.sleuthkit.datamodel.DataArtifact;
/**
* A pipeline of data artifact ingest modules used to execute data artifact
* ingest tasks for an ingest job.
*/
final class DataArtifactIngestPipeline extends IngestTaskPipeline<DataArtifactIngestTask> {
/**
* Constructs a pipeline of data artifact ingest modules used to execute
* data artifact ingest tasks for an ingest job.
*
* @param ingestJobPipeline The ingest job pipeline that owns this ingest
* task pipeline.
* @param moduleTemplates The ingest module templates that define this
* pipeline. May be an empty list.
*/
DataArtifactIngestPipeline(IngestJobPipeline ingestJobPipeline, List<IngestModuleTemplate> moduleTemplates) {
super(ingestJobPipeline, moduleTemplates);
}
@Override
Optional<PipelineModule<DataArtifactIngestTask>> acceptModuleTemplate(IngestModuleTemplate template) {
Optional<IngestTaskPipeline.PipelineModule<DataArtifactIngestTask>> module = Optional.empty();
if (template.isDataArtifactIngestModuleTemplate()) {
DataArtifactIngestModule ingestModule = template.createDataArtifactIngestModule();
module = Optional.of(new DataArtifactIngestPipelineModule(ingestModule, template.getModuleName()));
}
return module;
}
@Override
void prepareForTask(DataArtifactIngestTask task) throws IngestTaskPipelineException {
}
@Override
void cleanUpAfterTask(DataArtifactIngestTask task) throws IngestTaskPipelineException {
}
/**
* A decorator that adds ingest infrastructure operations to a data artifact
* ingest module.
*/
static final class DataArtifactIngestPipelineModule extends IngestTaskPipeline.PipelineModule<DataArtifactIngestTask> {
private final DataArtifactIngestModule module;
/**
* Constructs a decorator that adds ingest infrastructure operations to
* a data artifact ingest module.
*
* @param module The module.
* @param displayName The display name of the module.
*/
DataArtifactIngestPipelineModule(DataArtifactIngestModule module, String displayName) {
super(module, displayName);
this.module = module;
}
@Override
void executeTask(IngestJobPipeline ingestJobPipeline, DataArtifactIngestTask task) throws IngestModuleException {
DataArtifact artifact = task.getDataArtifact();
module.process(artifact);
}
}
}

View File

@ -0,0 +1,59 @@
/*
* Autopsy Forensic Browser
*
* Copyright 2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.autopsy.ingest;
import org.sleuthkit.datamodel.DataArtifact;
/**
* A data artifact ingest task that will be executed by an ingest thread using a
* given ingest job pipeline.
*/
final class DataArtifactIngestTask extends IngestTask {
private final DataArtifact artifact;
/**
* Constructs a data artifact ingest task that will be executed by an ingest
* thread using a given ingest job pipeline.
*
* @param ingestJobPipeline The ingest job pipeline to use to execute the
* task.
* @param artifact The data artifact to be processed.
*/
DataArtifactIngestTask(IngestJobPipeline ingestJobPipeline, DataArtifact artifact) {
super(ingestJobPipeline);
this.artifact = artifact;
}
/**
* Gets the data artifact for this task.
*
* @return The data artifact.
*/
DataArtifact getDataArtifact() {
return artifact;
}
@Override
void execute(long threadId) {
super.setThreadId(threadId);
getIngestJobPipeline().execute(this);
}
}

View File

@ -1,7 +1,7 @@
/* /*
* Autopsy Forensic Browser * Autopsy Forensic Browser
* *
* Copyright 2014 Basis Technology Corp. * Copyright 2014-2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org * Contact: carrier <at> sleuthkit <dot> org
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
@ -30,10 +30,19 @@ public interface DataSourceIngestModule extends IngestModule {
* Processes a data source. Called once between calls to startUp() and * Processes a data source. Called once between calls to startUp() and
* shutDown(). * shutDown().
* *
* IMPORTANT: In addition to returning ProcessResult.OK or
* ProcessResult.ERROR, modules should log all errors using methods provided
* by the org.sleuthkit.autopsy.coreutils.Logger class. Log messages should
* include the name and object ID of the data being processed. If an
* exception has been caught by the module, the exception should be sent to
* the Logger along with the log message so that a stack trace will appear
* in the application log.
*
* @param dataSource The data source to process. * @param dataSource The data source to process.
* @param progressBar A progress bar to be used to report progress. * @param progressBar A progress bar to be used to report progress.
* *
* @return A result code indicating success or failure of the processing. * @return A result code indicating success or failure of the processing.
*/ */
ProcessResult process(Content dataSource, DataSourceIngestModuleProgress progressBar); ProcessResult process(Content dataSource, DataSourceIngestModuleProgress progressBar);
} }

View File

@ -1,7 +1,7 @@
/* /*
* Autopsy Forensic Browser * Autopsy Forensic Browser
* *
* Copyright 2014 Basis Technology Corp. * Copyright 2014-2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org * Contact: carrier <at> sleuthkit <dot> org
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
@ -21,20 +21,15 @@ package org.sleuthkit.autopsy.ingest;
import org.sleuthkit.datamodel.Content; import org.sleuthkit.datamodel.Content;
/** /**
* DO NOT USE: As of Java 8, interfaces can have default methods. IngestModule
* now provides default no-op versions of startUp() and shutDown(). This class
* is no longer needed and can be DEPRECATED when convenient.
*
* An adapter that provides a no-op implementation of the startUp() method for * An adapter that provides a no-op implementation of the startUp() method for
* data source ingest modules. * data source ingest modules.
*
* NOTE: As of Java 8, interfaces can have default methods.
* DataSourceIngestModule now provides default no-op versions of startUp() and
* shutDown(). This class is no longer needed and can be deprecated when
* convenient.
*/ */
public abstract class DataSourceIngestModuleAdapter implements DataSourceIngestModule { public abstract class DataSourceIngestModuleAdapter implements DataSourceIngestModule {
@Override
public void startUp(IngestJobContext context) throws IngestModuleException {
}
@Override @Override
public abstract ProcessResult process(Content dataSource, DataSourceIngestModuleProgress progressBar); public abstract ProcessResult process(Content dataSource, DataSourceIngestModuleProgress progressBar);

View File

@ -26,7 +26,7 @@ import org.sleuthkit.autopsy.coreutils.Logger;
import org.sleuthkit.datamodel.Content; import org.sleuthkit.datamodel.Content;
/** /**
* A pipeline of data source level ingest modules for performing data source * A pipeline of data source level ingest modules for executing data source
* level ingest tasks for an ingest job. * level ingest tasks for an ingest job.
*/ */
final class DataSourceIngestPipeline extends IngestTaskPipeline<DataSourceIngestTask> { final class DataSourceIngestPipeline extends IngestTaskPipeline<DataSourceIngestTask> {
@ -57,11 +57,11 @@ final class DataSourceIngestPipeline extends IngestTaskPipeline<DataSourceIngest
} }
@Override @Override
void prepareTask(DataSourceIngestTask task) { void prepareForTask(DataSourceIngestTask task) {
} }
@Override @Override
void completeTask(DataSourceIngestTask task) { void cleanUpAfterTask(DataSourceIngestTask task) {
ingestManager.setIngestTaskProgressCompleted(task); ingestManager.setIngestTaskProgressCompleted(task);
} }
@ -83,22 +83,18 @@ final class DataSourceIngestPipeline extends IngestTaskPipeline<DataSourceIngest
} }
@Override @Override
void performTask(IngestJobPipeline ingestJobPipeline, DataSourceIngestTask task) throws IngestModuleException { void executeTask(IngestJobPipeline ingestJobPipeline, DataSourceIngestTask task) throws IngestModuleException {
Content dataSource = task.getDataSource(); Content dataSource = task.getDataSource();
String progressBarDisplayName = NbBundle.getMessage(this.getClass(), "IngestJob.progress.dataSourceIngest.displayName", getDisplayName(), dataSource.getName()); String progressBarDisplayName = NbBundle.getMessage(this.getClass(), "IngestJob.progress.dataSourceIngest.displayName", getDisplayName(), dataSource.getName());
ingestJobPipeline.updateDataSourceIngestProgressBarDisplayName(progressBarDisplayName); ingestJobPipeline.updateDataSourceIngestProgressBarDisplayName(progressBarDisplayName);
ingestJobPipeline.switchDataSourceIngestProgressBarToIndeterminate(); ingestJobPipeline.switchDataSourceIngestProgressBarToIndeterminate();
ingestManager.setIngestTaskProgress(task, getDisplayName()); ingestManager.setIngestTaskProgress(task, getDisplayName());
logger.log(Level.INFO, "{0} analysis of {1} starting", new Object[]{getDisplayName(), dataSource.getName()}); //NON-NLS logger.log(Level.INFO, "{0} analysis of {1} starting", new Object[]{getDisplayName(), dataSource.getName()}); //NON-NLS
ProcessResult result = module.process(dataSource, new DataSourceIngestModuleProgress(ingestJobPipeline)); module.process(dataSource, new DataSourceIngestModuleProgress(ingestJobPipeline));
logger.log(Level.INFO, "{0} analysis of {1} finished", new Object[]{getDisplayName(), dataSource.getName()}); //NON-NLS logger.log(Level.INFO, "{0} analysis of {1} finished", new Object[]{getDisplayName(), dataSource.getName()}); //NON-NLS
if (!ingestJobPipeline.isCancelled() && ingestJobPipeline.currentDataSourceIngestModuleIsCancelled()) { if (!ingestJobPipeline.isCancelled() && ingestJobPipeline.currentDataSourceIngestModuleIsCancelled()) {
ingestJobPipeline.currentDataSourceIngestModuleCancellationCompleted(getDisplayName()); ingestJobPipeline.currentDataSourceIngestModuleCancellationCompleted(getDisplayName());
} }
// See JIRA-7449
// if (result == ProcessResult.ERROR) {
// throw new IngestModuleException(String.format("%s experienced an error analyzing %s (data source objId = %d)", getDisplayName(), dataSource.getName(), dataSource.getId())); //NON-NLS
// }
} }
} }

View File

@ -1,7 +1,7 @@
/* /*
* Autopsy Forensic Browser * Autopsy Forensic Browser
* *
* Copyright 2012-2014 Basis Technology Corp. * Copyright 2014-2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org * Contact: carrier <at> sleuthkit <dot> org
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
@ -18,15 +18,27 @@
*/ */
package org.sleuthkit.autopsy.ingest; package org.sleuthkit.autopsy.ingest;
/**
* A data source level ingest task that will be executed by an ingest thread
* using a given ingest job pipeline.
*/
final class DataSourceIngestTask extends IngestTask { final class DataSourceIngestTask extends IngestTask {
/**
* Constructs a data source level ingest task that will be executed by an
* ingest thread using a given ingest job pipeline.
*
* @param ingestJobPipeline The ingest job pipeline to use to execute the
* task.
*/
DataSourceIngestTask(IngestJobPipeline ingestJobPipeline) { DataSourceIngestTask(IngestJobPipeline ingestJobPipeline) {
super(ingestJobPipeline); super(ingestJobPipeline);
} }
@Override @Override
void execute(long threadId) throws InterruptedException { void execute(long threadId) {
super.setThreadId(threadId); super.setThreadId(threadId);
getIngestJobPipeline().process(this); getIngestJobPipeline().execute(this);
} }
} }

View File

@ -30,6 +30,14 @@ public interface FileIngestModule extends IngestModule {
* Processes a file. Called between calls to startUp() and shutDown(). Will * Processes a file. Called between calls to startUp() and shutDown(). Will
* be called for each file in a data source. * be called for each file in a data source.
* *
* IMPORTANT: In addition to returning ProcessResult.OK or
* ProcessResult.ERROR, modules should log all errors using methods provided
* by the org.sleuthkit.autopsy.coreutils.Logger class. Log messages should
* include the name and object ID of the data being processed. If an
* exception has been caught by the module, the exception should be sent to
* the Logger along with the log message so that a stack trace will appear
* in the application log.
*
* @param file The file to analyze. * @param file The file to analyze.
* *
* @return A result code indicating success or failure of the processing. * @return A result code indicating success or failure of the processing.

View File

@ -1,7 +1,7 @@
/* /*
* Autopsy Forensic Browser * Autopsy Forensic Browser
* *
* Copyright 2014 Basis Technology Corp. * Copyright 2014-2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org * Contact: carrier <at> sleuthkit <dot> org
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
@ -21,23 +21,17 @@ package org.sleuthkit.autopsy.ingest;
import org.sleuthkit.datamodel.AbstractFile; import org.sleuthkit.datamodel.AbstractFile;
/** /**
* DO NOT USE: As of Java 8, interfaces can have default methods. IngestModule
* now provides default no-op versions of startUp() and shutDown(). This class
* is no longer needed and can be DEPRECATED when convenient.
* *
* An adapter that provides no-op implementations of the startUp() and * An adapter that provides no-op implementations of the startUp() and
* shutDown() methods for file ingest modules. * shutDown() methods for file ingest modules.
* *
* NOTE: As of Java 8, interfaces can have default methods. FileIngestModule now
* provides default no-op versions of startUp() and shutDown(). This class is no
* longer needed and can be deprecated when convenient.
*/ */
public abstract class FileIngestModuleAdapter implements FileIngestModule { public abstract class FileIngestModuleAdapter implements FileIngestModule {
@Override
public void startUp(IngestJobContext context) throws IngestModuleException {
}
@Override @Override
public abstract ProcessResult process(AbstractFile file); public abstract ProcessResult process(AbstractFile file);
@Override
public void shutDown() {
}
} }

View File

@ -33,7 +33,7 @@ import org.sleuthkit.datamodel.SleuthkitCase.CaseDbTransaction;
import org.sleuthkit.datamodel.TskCoreException; import org.sleuthkit.datamodel.TskCoreException;
/** /**
* A pipeline of file ingest modules for performing file ingest tasks for an * A pipeline of file ingest modules for executing file ingest tasks for an
* ingest job. * ingest job.
*/ */
@NbBundle.Messages({ @NbBundle.Messages({
@ -49,7 +49,7 @@ final class FileIngestPipeline extends IngestTaskPipeline<FileIngestTask> {
private final List<AbstractFile> fileBatch; private final List<AbstractFile> fileBatch;
/** /**
* Constructs a pipeline of file ingest modules for performing file ingest * Constructs a pipeline of file ingest modules for executing file ingest
* tasks for an ingest job. * tasks for an ingest job.
* *
* @param ingestJobPipeline The ingest job pipeline that owns this pipeline. * @param ingestJobPipeline The ingest job pipeline that owns this pipeline.
@ -73,11 +73,11 @@ final class FileIngestPipeline extends IngestTaskPipeline<FileIngestTask> {
} }
@Override @Override
void prepareTask(FileIngestTask task) throws IngestTaskPipelineException { void prepareForTask(FileIngestTask task) throws IngestTaskPipelineException {
} }
@Override @Override
void completeTask(FileIngestTask task) throws IngestTaskPipelineException { void cleanUpAfterTask(FileIngestTask task) throws IngestTaskPipelineException {
try { try {
ingestManager.setIngestTaskProgress(task, SAVE_RESULTS_ACTIVITY); ingestManager.setIngestTaskProgress(task, SAVE_RESULTS_ACTIVITY);
AbstractFile file = task.getFile(); AbstractFile file = task.getFile();
@ -106,7 +106,7 @@ final class FileIngestPipeline extends IngestTaskPipeline<FileIngestTask> {
} }
/** /**
* Adds a file to a file cache used to update the case database with new * Adds a file to a file cache used to update the case database with any new
* properties added to the files in the cache by the ingest modules that * properties added to the files in the cache by the ingest modules that
* processed them. If adding the file to the cache fills the cache, a batch * processed them. If adding the file to the cache fills the cache, a batch
* update is done immediately. * update is done immediately.
@ -195,7 +195,7 @@ final class FileIngestPipeline extends IngestTaskPipeline<FileIngestTask> {
} }
@Override @Override
void performTask(IngestJobPipeline ingestJobPipeline, FileIngestTask task) throws IngestModuleException { void executeTask(IngestJobPipeline ingestJobPipeline, FileIngestTask task) throws IngestModuleException {
AbstractFile file = null; AbstractFile file = null;
try { try {
file = task.getFile(); file = task.getFile();
@ -203,12 +203,7 @@ final class FileIngestPipeline extends IngestTaskPipeline<FileIngestTask> {
throw new IngestModuleException(String.format("Failed to get file (file objId = %d)", task.getFileId()), ex); //NON-NLS throw new IngestModuleException(String.format("Failed to get file (file objId = %d)", task.getFileId()), ex); //NON-NLS
} }
ingestManager.setIngestTaskProgress(task, getDisplayName()); ingestManager.setIngestTaskProgress(task, getDisplayName());
ingestJobPipeline.setCurrentFileIngestModule(getDisplayName(), file.getName()); module.process(file);
ProcessResult result = module.process(file);
// See JIRA-7449
// if (result == ProcessResult.ERROR) {
// throw new IngestModuleException(String.format("%s experienced an error analyzing %s (file objId = %d)", getDisplayName(), file.getName(), file.getId())); //NON-NLS
// }
} }
} }

View File

@ -1,7 +1,7 @@
/* /*
* Autopsy Forensic Browser * Autopsy Forensic Browser
* *
* Copyright 2012-2015 Basis Technology Corp. * Copyright 2014-2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org * Contact: carrier <at> sleuthkit <dot> org
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
@ -24,29 +24,60 @@ import org.sleuthkit.datamodel.AbstractFile;
import org.sleuthkit.datamodel.TskCoreException; import org.sleuthkit.datamodel.TskCoreException;
/** /**
* Represents a single file analysis task, which is defined by a file to analyze * A file ingest task that will be executed by an ingest thread using a given
* and the InjestJob/Pipeline to run it on. * ingest job pipeline.
*/ */
final class FileIngestTask extends IngestTask { final class FileIngestTask extends IngestTask {
private final long fileId; private final long fileId;
private AbstractFile file = null; private AbstractFile file;
/**
* Constructs a file ingest task that will be executed by an ingest thread
* using a given ingest job pipeline.
*
* @param ingestJobPipeline The ingest job pipeline to use to execute the
* task.
* @param file The file to be processed.
*/
FileIngestTask(IngestJobPipeline ingestJobPipeline, AbstractFile file) { FileIngestTask(IngestJobPipeline ingestJobPipeline, AbstractFile file) {
super(ingestJobPipeline); super(ingestJobPipeline);
this.file = file; this.file = file;
fileId = file.getId(); fileId = file.getId();
} }
/**
* Constructs a file ingest task that will be executed by an ingest thread
* using a given ingest job pipeline. This constructor supports streaming
* ingest by deferring the construction of the AbstractFile object for this
* task to conserve heap memory.
*
* @param ingestJobPipeline The ingest job pipeline to use to execute the
* task.
* @param fileId The object ID of the file to be processed.
*/
FileIngestTask(IngestJobPipeline ingestJobPipeline, long fileId) { FileIngestTask(IngestJobPipeline ingestJobPipeline, long fileId) {
super(ingestJobPipeline); super(ingestJobPipeline);
this.fileId = fileId; this.fileId = fileId;
} }
/**
* Gets the object ID of the file for this task.
*
* @return The object ID.
*/
long getFileId() { long getFileId() {
return fileId; return fileId;
} }
/**
* Gets the file for this task.
*
* @return The file.
*
* @throws TskCoreException The exception is thrown if there is an error
* retieving the file from the case database.
*/
synchronized AbstractFile getFile() throws TskCoreException { synchronized AbstractFile getFile() throws TskCoreException {
if (file == null) { if (file == null) {
file = Case.getCurrentCase().getSleuthkitCase().getAbstractFileById(fileId); file = Case.getCurrentCase().getSleuthkitCase().getAbstractFileById(fileId);
@ -55,9 +86,9 @@ final class FileIngestTask extends IngestTask {
} }
@Override @Override
void execute(long threadId) throws InterruptedException { void execute(long threadId) {
super.setThreadId(threadId); super.setThreadId(threadId);
getIngestJobPipeline().process(this); getIngestJobPipeline().execute(this);
} }
@Override @Override
@ -84,4 +115,5 @@ final class FileIngestTask extends IngestTask {
hash = 47 * hash + Objects.hashCode(this.fileId); hash = 47 * hash + Objects.hashCode(this.fileId);
return hash; return hash;
} }
} }

View File

@ -33,7 +33,6 @@ import org.openide.util.NbBundle;
import org.sleuthkit.autopsy.coreutils.Logger; import org.sleuthkit.autopsy.coreutils.Logger;
import org.sleuthkit.datamodel.AbstractFile; import org.sleuthkit.datamodel.AbstractFile;
import org.sleuthkit.datamodel.Content; import org.sleuthkit.datamodel.Content;
import org.sleuthkit.datamodel.DataSource;
/** /**
* Analyzes one or more data sources using a set of ingest modules specified via * Analyzes one or more data sources using a set of ingest modules specified via
@ -120,7 +119,7 @@ public final class IngestJob {
* *
* @param settings The ingest job settings. * @param settings The ingest job settings.
*/ */
IngestJob(DataSource dataSource, Mode ingestMode, IngestJobSettings settings) { IngestJob(Content dataSource, Mode ingestMode, IngestJobSettings settings) {
this.id = IngestJob.nextId.getAndIncrement(); this.id = IngestJob.nextId.getAndIncrement();
this.ingestJobPipelines = new ConcurrentHashMap<>(); this.ingestJobPipelines = new ConcurrentHashMap<>();
this.dataSources.add(dataSource); this.dataSources.add(dataSource);
@ -162,7 +161,7 @@ public final class IngestJob {
} }
// Streaming ingest jobs will only have one data source // Streaming ingest jobs will only have one data source
IngestJobPipeline streamingIngestPipeline = ingestJobPipelines.values().iterator().next(); IngestJobPipeline streamingIngestPipeline = ingestJobPipelines.values().iterator().next();
streamingIngestPipeline.addStreamingIngestFiles(fileObjIds); streamingIngestPipeline.addStreamedFiles(fileObjIds);
} }
/** /**
@ -175,7 +174,7 @@ public final class IngestJob {
} }
// Streaming ingest jobs will only have one data source // Streaming ingest jobs will only have one data source
IngestJobPipeline streamingIngestPipeline = ingestJobPipelines.values().iterator().next(); IngestJobPipeline streamingIngestPipeline = ingestJobPipelines.values().iterator().next();
streamingIngestPipeline.processStreamingIngestDataSource(); streamingIngestPipeline.addStreamedDataSource();
} }
/** /**
@ -184,33 +183,28 @@ public final class IngestJob {
* *
* @return A collection of ingest module start up errors, empty on success. * @return A collection of ingest module start up errors, empty on success.
*/ */
List<IngestModuleError> start() { List<IngestModuleError> start() throws InterruptedException {
/* /*
* Set up the pipeline(s) * Set up the ingest job pipelines, one for each data source to be
* ingested by this job.
*/ */
if (files.isEmpty()) { if (files.isEmpty()) {
for (Content dataSource : dataSources) { for (Content dataSource : dataSources) {
IngestJobPipeline ingestJobPipeline = new IngestJobPipeline(this, dataSource, settings); IngestJobPipeline ingestJobPipeline = new IngestJobPipeline(this, dataSource, settings);
this.ingestJobPipelines.put(ingestJobPipeline.getId(), ingestJobPipeline); ingestJobPipelines.put(ingestJobPipeline.getId(), ingestJobPipeline);
} }
} else { } else {
IngestJobPipeline ingestJobPipeline = new IngestJobPipeline(this, dataSources.get(0), files, settings); IngestJobPipeline ingestJobPipeline = new IngestJobPipeline(this, dataSources.get(0), files, settings);
this.ingestJobPipelines.put(ingestJobPipeline.getId(), ingestJobPipeline); ingestJobPipelines.put(ingestJobPipeline.getId(), ingestJobPipeline);
} }
incompleteJobsCount.set(ingestJobPipelines.size()); incompleteJobsCount.set(ingestJobPipelines.size());
/* /*
* Try to start each data source ingest job. Note that there is an * Try to start up each ingest job pipeline. Stop at the first failure.
* assumption here that if there is going to be a module startup
* failure, it will be for the first ingest job pipeline.
*
* TODO (RC): Consider separating module start up from pipeline startup
* so that no processing is done if this assumption is false.
*/ */
List<IngestModuleError> errors = new ArrayList<>(); List<IngestModuleError> errors = new ArrayList<>();
for (IngestJobPipeline ingestJobPipeline : this.ingestJobPipelines.values()) { for (IngestJobPipeline ingestJobPipeline : ingestJobPipelines.values()) {
errors.addAll(ingestJobPipeline.start()); errors.addAll(ingestJobPipeline.startUp());
if (errors.isEmpty() == false) { if (errors.isEmpty() == false) {
break; break;
} }
@ -220,8 +214,8 @@ public final class IngestJob {
* Handle start up success or failure. * Handle start up success or failure.
*/ */
if (errors.isEmpty()) { if (errors.isEmpty()) {
for (IngestJobPipeline dataSourceJob : this.ingestJobPipelines.values()) { for (IngestJobPipeline ingestJobPipeline : ingestJobPipelines.values()) {
IngestManager.getInstance().fireDataSourceAnalysisStarted(id, dataSourceJob.getId(), dataSourceJob.getDataSource()); IngestManager.getInstance().fireDataSourceAnalysisStarted(id, ingestJobPipeline.getId(), ingestJobPipeline.getDataSource());
} }
} else { } else {
cancel(CancellationReason.INGEST_MODULES_STARTUP_FAILED); cancel(CancellationReason.INGEST_MODULES_STARTUP_FAILED);
@ -337,7 +331,7 @@ public final class IngestJob {
* *
* @param ingestJobPipeline A completed ingestJobPipeline. * @param ingestJobPipeline A completed ingestJobPipeline.
*/ */
void ingestJobPipelineFinished(IngestJobPipeline ingestJobPipeline) { void notifyIngestPipelineShutDown(IngestJobPipeline ingestJobPipeline) {
IngestManager ingestManager = IngestManager.getInstance(); IngestManager ingestManager = IngestManager.getInstance();
if (!ingestJobPipeline.isCancelled()) { if (!ingestJobPipeline.isCancelled()) {
ingestManager.fireDataSourceAnalysisCompleted(id, ingestJobPipeline.getId(), ingestJobPipeline.getDataSource()); ingestManager.fireDataSourceAnalysisCompleted(id, ingestJobPipeline.getId(), ingestJobPipeline.getDataSource());

View File

@ -1,7 +1,7 @@
/* /*
* Autopsy Forensic Browser * Autopsy Forensic Browser
* *
* Copyright 2014 Basis Technology Corp. * Copyright 2014-2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org * Contact: carrier <at> sleuthkit <dot> org
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
@ -21,6 +21,7 @@ package org.sleuthkit.autopsy.ingest;
import java.util.List; import java.util.List;
import org.sleuthkit.datamodel.AbstractFile; import org.sleuthkit.datamodel.AbstractFile;
import org.sleuthkit.datamodel.Content; import org.sleuthkit.datamodel.Content;
import org.sleuthkit.datamodel.DataArtifact;
/** /**
* Provides an ingest module with services specific to the ingest job of which * Provides an ingest module with services specific to the ingest job of which
@ -30,81 +31,108 @@ public final class IngestJobContext {
private final IngestJobPipeline ingestJobPipeline; private final IngestJobPipeline ingestJobPipeline;
/**
* Constructs an ingest job context object that provides an ingest module
* with services specific to the ingest job of which the module is a part.
*
* @param ingestJobPipeline The ingest pipeline for the job.
*/
IngestJobContext(IngestJobPipeline ingestJobPipeline) { IngestJobContext(IngestJobPipeline ingestJobPipeline) {
this.ingestJobPipeline = ingestJobPipeline; this.ingestJobPipeline = ingestJobPipeline;
} }
/** /**
* Gets the ingest job execution context identifier. * Gets the execution context identifier of the ingest job.
* *
* @return The context string. * @return The context string.
*/ */
public String getExecutionContext() { public String getExecutionContext() {
return this.ingestJobPipeline.getExecutionContext(); return ingestJobPipeline.getExecutionContext();
} }
/** /**
* Gets the data source associated with this context. * Gets the data source for the ingest job.
* *
* @return The data source. * @return The data source.
*/ */
public Content getDataSource() { public Content getDataSource() {
return this.ingestJobPipeline.getDataSource(); return ingestJobPipeline.getDataSource();
} }
/** /**
* Gets the identifier of the ingest job associated with this context. * Gets the unique identifier for the ingest job.
* *
* @return The ingest job identifier. * @return The ID.
*/ */
public long getJobId() { public long getJobId() {
return this.ingestJobPipeline.getId(); return ingestJobPipeline.getId();
} }
/** /**
* Queries whether or not cancellation of the data source ingest part of the * Indicates whether or not cancellation of the ingest job has been
* ingest job associated with this context has been requested. * requested.
* *
* @return True or false. * @return True or false.
* *
* @deprecated Use dataSourceIngestIsCancelled() or fileIngestIsCancelled() * @deprecated Modules should call a type-specific cancellation check method
* instead. * instead.
*/ */
@Deprecated @Deprecated
public boolean isJobCancelled() { public boolean isJobCancelled() {
return this.dataSourceIngestIsCancelled(); return ingestJobPipeline.isCancelled();
} }
/** /**
* Allows a data source ingest module to determine whether or not * Indicates whether or not cancellation of the currently running data
* cancellation of the data source ingest part of the ingest job associated * source level ingest module has been requested. Data source level ingest
* with this context has been requested. * modules should check this periodically and break off processing if the
* method returns true.
* *
* @return True or false. * @return True or false.
*/ */
public boolean dataSourceIngestIsCancelled() { public boolean dataSourceIngestIsCancelled() {
return this.ingestJobPipeline.currentDataSourceIngestModuleIsCancelled() || this.ingestJobPipeline.isCancelled(); return ingestJobPipeline.currentDataSourceIngestModuleIsCancelled() || ingestJobPipeline.isCancelled();
} }
/** /**
* Allows a file ingest module to determine whether or not cancellation of * Indicates whether or not cancellation of the currently running file level
* the file ingest part of the ingest job associated with this context has * ingest module has been requested. File level ingest modules should check
* been requested. * this periodically and break off processing if the method returns true.
* *
* @return True or false. * @return True or false.
*/ */
public boolean fileIngestIsCancelled() { public boolean fileIngestIsCancelled() {
return this.ingestJobPipeline.isCancelled(); /*
* It is not currently possible to cancel individual file ingest
* modules.
*/
return ingestJobPipeline.isCancelled();
}
/**
* Checks whether or not cancellation of the currently running data artifact
* ingest module for the ingest job has been requested. Data artifact ingest
* modules should check this periodically and break off processing if the
* method returns true.
*
* @return True or false.
*/
public boolean dataArtifactIngestIsCancelled() {
/*
* It is not currently possible to cancel individual data artifact
* ingest modules.
*/
return ingestJobPipeline.isCancelled();
} }
/** /**
* Queries whether or not unallocated space should be processed for the * Queries whether or not unallocated space should be processed for the
* ingest job associated with this context. * ingest job.
* *
* @return True or false. * @return True or false.
*/ */
public boolean processingUnallocatedSpace() { public boolean processingUnallocatedSpace() {
return this.ingestJobPipeline.shouldProcessUnallocatedSpace(); return ingestJobPipeline.shouldProcessUnallocatedSpace();
} }
/** /**
@ -113,21 +141,31 @@ public final class IngestJobContext {
* *
* @param files The files to be added. * @param files The files to be added.
* *
* @deprecated use addFilesToJob() instead * @deprecated use addFilesToJob() instead.
*/ */
@Deprecated @Deprecated
public void scheduleFiles(List<AbstractFile> files) { public void scheduleFiles(List<AbstractFile> files) {
this.addFilesToJob(files); addFilesToJob(files);
} }
/** /**
* Adds one or more files, i.e., extracted or carved files, to the ingest * Adds one or more files, e.g., extracted or carved files, to the ingest
* job associated with this context. * job for processing by its file ingest modules.
* *
* @param files The files to be added. * @param files The files.
*/ */
public void addFilesToJob(List<AbstractFile> files) { public void addFilesToJob(List<AbstractFile> files) {
this.ingestJobPipeline.addFiles(files); ingestJobPipeline.addFiles(files);
}
/**
* Adds one or more data artifacts to the ingest job for processing by its
* data artifact ingest modules.
*
* @param artifacts The artifacts.
*/
public void addDataArtifactsToJob(List<DataArtifact> artifacts) {
ingestJobPipeline.addDataArtifacts(artifacts);
} }
} }

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
/* /*
* Autopsy Forensic Browser * Autopsy Forensic Browser
* *
* Copyright 2014-2018 Basis Technology Corp. * Copyright 2014-2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org * Contact: carrier <at> sleuthkit <dot> org
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
@ -318,7 +318,7 @@ public final class IngestJobSettings {
// Add modules that are going to be used for this ingest depending on type. // Add modules that are going to be used for this ingest depending on type.
for (IngestModuleFactory moduleFactory : allModuleFactories) { for (IngestModuleFactory moduleFactory : allModuleFactories) {
if (this.ingestType.equals(IngestType.ALL_MODULES)) { if (moduleFactory.isDataArtifactIngestModuleFactory() || ingestType.equals(IngestType.ALL_MODULES)) {
moduleFactories.add(moduleFactory); moduleFactories.add(moduleFactory);
} else if (this.ingestType.equals(IngestType.DATA_SOURCE_ONLY) && moduleFactory.isDataSourceIngestModuleFactory()) { } else if (this.ingestType.equals(IngestType.DATA_SOURCE_ONLY) && moduleFactory.isDataSourceIngestModuleFactory()) {
moduleFactories.add(moduleFactory); moduleFactories.add(moduleFactory);

View File

@ -18,6 +18,7 @@
*/ */
package org.sleuthkit.autopsy.ingest; package org.sleuthkit.autopsy.ingest;
import com.google.common.eventbus.Subscribe;
import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.google.common.util.concurrent.ThreadFactoryBuilder;
import java.awt.EventQueue; import java.awt.EventQueue;
import java.beans.PropertyChangeEvent; import java.beans.PropertyChangeEvent;
@ -68,8 +69,11 @@ import org.sleuthkit.autopsy.ingest.events.DataSourceAnalysisCompletedEvent;
import org.sleuthkit.autopsy.ingest.events.DataSourceAnalysisStartedEvent; import org.sleuthkit.autopsy.ingest.events.DataSourceAnalysisStartedEvent;
import org.sleuthkit.autopsy.ingest.events.FileAnalyzedEvent; import org.sleuthkit.autopsy.ingest.events.FileAnalyzedEvent;
import org.sleuthkit.datamodel.AbstractFile; import org.sleuthkit.datamodel.AbstractFile;
import org.sleuthkit.datamodel.Blackboard;
import org.sleuthkit.datamodel.BlackboardArtifact;
import org.sleuthkit.datamodel.Content; import org.sleuthkit.datamodel.Content;
import org.sleuthkit.datamodel.DataSource; import org.sleuthkit.datamodel.DataSource;
import org.sleuthkit.datamodel.SleuthkitCase;
import org.sleuthkit.datamodel.TskCoreException; import org.sleuthkit.datamodel.TskCoreException;
/** /**
@ -129,8 +133,9 @@ public class IngestManager implements IngestProgressSnapshotProvider {
private final Map<Long, Future<Void>> startIngestJobFutures = new ConcurrentHashMap<>(); private final Map<Long, Future<Void>> startIngestJobFutures = new ConcurrentHashMap<>();
@GuardedBy("ingestJobsById") @GuardedBy("ingestJobsById")
private final Map<Long, IngestJob> ingestJobsById = new HashMap<>(); private final Map<Long, IngestJob> ingestJobsById = new HashMap<>();
private final ExecutorService dataSourceLevelIngestJobTasksExecutor = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setNameFormat("IM-data-source-ingest-%d").build()); //NON-NLS; private final ExecutorService dataSourceLevelIngestJobTasksExecutor;
private final ExecutorService fileLevelIngestJobTasksExecutor; private final ExecutorService fileLevelIngestJobTasksExecutor;
private final ExecutorService resultIngestTasksExecutor;
private final ExecutorService eventPublishingExecutor = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setNameFormat("IM-ingest-events-%d").build()); //NON-NLS; private final ExecutorService eventPublishingExecutor = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setNameFormat("IM-ingest-events-%d").build()); //NON-NLS;
private final IngestMonitor ingestMonitor = new IngestMonitor(); private final IngestMonitor ingestMonitor = new IngestMonitor();
private final ServicesMonitor servicesMonitor = ServicesMonitor.getInstance(); private final ServicesMonitor servicesMonitor = ServicesMonitor.getInstance();
@ -168,6 +173,7 @@ public class IngestManager implements IngestProgressSnapshotProvider {
* source level ingest job tasks to the data source level ingest job * source level ingest job tasks to the data source level ingest job
* tasks executor. * tasks executor.
*/ */
dataSourceLevelIngestJobTasksExecutor = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setNameFormat("IM-data-source-ingest-%d").build()); //NON-NLS;
long threadId = nextIngestManagerTaskId.incrementAndGet(); long threadId = nextIngestManagerTaskId.incrementAndGet();
dataSourceLevelIngestJobTasksExecutor.submit(new ExecuteIngestJobTasksTask(threadId, IngestTasksScheduler.getInstance().getDataSourceIngestTaskQueue())); dataSourceLevelIngestJobTasksExecutor.submit(new ExecuteIngestJobTasksTask(threadId, IngestTasksScheduler.getInstance().getDataSourceIngestTaskQueue()));
ingestThreadActivitySnapshots.put(threadId, new IngestThreadActivitySnapshot(threadId)); ingestThreadActivitySnapshots.put(threadId, new IngestThreadActivitySnapshot(threadId));
@ -184,6 +190,13 @@ public class IngestManager implements IngestProgressSnapshotProvider {
fileLevelIngestJobTasksExecutor.submit(new ExecuteIngestJobTasksTask(threadId, IngestTasksScheduler.getInstance().getFileIngestTaskQueue())); fileLevelIngestJobTasksExecutor.submit(new ExecuteIngestJobTasksTask(threadId, IngestTasksScheduler.getInstance().getFileIngestTaskQueue()));
ingestThreadActivitySnapshots.put(threadId, new IngestThreadActivitySnapshot(threadId)); ingestThreadActivitySnapshots.put(threadId, new IngestThreadActivitySnapshot(threadId));
} }
resultIngestTasksExecutor = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setNameFormat("IM-results-ingest-%d").build()); //NON-NLS;
threadId = nextIngestManagerTaskId.incrementAndGet();
resultIngestTasksExecutor.submit(new ExecuteIngestJobTasksTask(threadId, IngestTasksScheduler.getInstance().getResultIngestTaskQueue()));
// RJCTODO
// ingestThreadActivitySnapshots.put(threadId, new IngestThreadActivitySnapshot(threadId));
// RJCTODO: Where is the shut down code?
} }
/** /**
@ -248,9 +261,10 @@ public class IngestManager implements IngestProgressSnapshotProvider {
}); });
} }
/* /**
* Handles a current case opened event by clearing the ingest messages inbox * Handles a current case opened event by clearing the ingest messages
* and opening a remote event channel for the current case. * inbox, opening a remote event channel for the current case, and
* registering to receive events from the event bus for the case database.
* *
* Note that current case change events are published in a strictly * Note that current case change events are published in a strictly
* serialized manner, i.e., one event at a time, synchronously. * serialized manner, i.e., one event at a time, synchronously.
@ -265,6 +279,7 @@ public class IngestManager implements IngestProgressSnapshotProvider {
jobEventPublisher.openRemoteEventChannel(String.format(INGEST_JOB_EVENT_CHANNEL_NAME, channelPrefix)); jobEventPublisher.openRemoteEventChannel(String.format(INGEST_JOB_EVENT_CHANNEL_NAME, channelPrefix));
moduleEventPublisher.openRemoteEventChannel(String.format(INGEST_MODULE_EVENT_CHANNEL_NAME, channelPrefix)); moduleEventPublisher.openRemoteEventChannel(String.format(INGEST_MODULE_EVENT_CHANNEL_NAME, channelPrefix));
} }
openedCase.getSleuthkitCase().registerForEvents(this);
} catch (NoCurrentCaseException | AutopsyEventException ex) { } catch (NoCurrentCaseException | AutopsyEventException ex) {
logger.log(Level.SEVERE, "Failed to open remote events channel", ex); //NON-NLS logger.log(Level.SEVERE, "Failed to open remote events channel", ex); //NON-NLS
MessageNotifyUtil.Notify.error(NbBundle.getMessage(IngestManager.class, "IngestManager.OpenEventChannel.Fail.Title"), MessageNotifyUtil.Notify.error(NbBundle.getMessage(IngestManager.class, "IngestManager.OpenEventChannel.Fail.Title"),
@ -272,10 +287,27 @@ public class IngestManager implements IngestProgressSnapshotProvider {
} }
} }
/* /**
* Handles artifacts posted events published by the Sleuth Kit layer
* blackboard via the event bus for the case database.
*
* @param tskEvent A Sleuth Kit data model ArtifactsPostedEvent from the
* case database event bus.
*/
@Subscribe
void handleArtifactsPosted(Blackboard.ArtifactsPostedEvent tskEvent) {
for (BlackboardArtifact.Type artifactType : tskEvent.getArtifactTypes()) {
ModuleDataEvent legacyEvent = new ModuleDataEvent(tskEvent.getModuleName(), artifactType, tskEvent.getArtifacts(artifactType));
AutopsyEvent autopsyEvent = new BlackboardPostEvent(legacyEvent);
eventPublishingExecutor.submit(new PublishEventTask(autopsyEvent, moduleEventPublisher));
}
}
/**
* Handles a current case closed event by cancelling all ingest jobs for the * Handles a current case closed event by cancelling all ingest jobs for the
* case, closing the remote event channel for the case, and clearing the * case, unregistering from receiving events from the case database, closing
* ingest messages inbox. * the remote event channel for the case, and clearing the ingest messages
* inbox.
* *
* Note that current case change events are published in a strictly * Note that current case change events are published in a strictly
* serialized manner, i.e., one event at a time, synchronously. * serialized manner, i.e., one event at a time, synchronously.
@ -285,7 +317,8 @@ public class IngestManager implements IngestProgressSnapshotProvider {
* TODO (JIRA-2227): IngestManager should wait for cancelled ingest jobs * TODO (JIRA-2227): IngestManager should wait for cancelled ingest jobs
* to complete when a case is closed. * to complete when a case is closed.
*/ */
this.cancelAllIngestJobs(IngestJob.CancellationReason.CASE_CLOSED); cancelAllIngestJobs(IngestJob.CancellationReason.CASE_CLOSED);
Case.getCurrentCase().getSleuthkitCase().unregisterForEvents(this);
jobEventPublisher.closeRemoteEventChannel(); jobEventPublisher.closeRemoteEventChannel();
moduleEventPublisher.closeRemoteEventChannel(); moduleEventPublisher.closeRemoteEventChannel();
caseIsOpen = false; caseIsOpen = false;
@ -455,7 +488,11 @@ public class IngestManager implements IngestProgressSnapshotProvider {
ingestJobsById.put(job.getId(), job); ingestJobsById.put(job.getId(), job);
} }
IngestManager.logger.log(Level.INFO, "Starting ingest job {0}", job.getId()); //NON-NLS IngestManager.logger.log(Level.INFO, "Starting ingest job {0}", job.getId()); //NON-NLS
try {
errors = job.start(); errors = job.start();
} catch (InterruptedException ex) {
return new IngestJobStartResult(null, new IngestManagerException("Interrupted while starting ingest", ex), errors); //NON-NLS
}
if (errors.isEmpty()) { if (errors.isEmpty()) {
this.fireIngestJobStarted(job.getId()); this.fireIngestJobStarted(job.getId());
} else { } else {
@ -492,7 +529,8 @@ public class IngestManager implements IngestProgressSnapshotProvider {
* *
* @param job The completed job. * @param job The completed job.
*/ */
void finishIngestJob(IngestJob job) { void finishIngestJob(IngestJob job
) {
long jobId = job.getId(); long jobId = job.getId();
synchronized (ingestJobsById) { synchronized (ingestJobsById) {
ingestJobsById.remove(jobId); ingestJobsById.remove(jobId);
@ -700,18 +738,6 @@ public class IngestManager implements IngestProgressSnapshotProvider {
eventPublishingExecutor.submit(new PublishEventTask(event, moduleEventPublisher)); eventPublishingExecutor.submit(new PublishEventTask(event, moduleEventPublisher));
} }
/**
* Publishes an ingest module event signifying a blackboard post by an
* ingest module.
*
* @param moduleDataEvent A ModuleDataEvent with the details of the
* blackboard post.
*/
void fireIngestModuleDataEvent(ModuleDataEvent moduleDataEvent) {
AutopsyEvent event = new BlackboardPostEvent(moduleDataEvent);
eventPublishingExecutor.submit(new PublishEventTask(event, moduleEventPublisher));
}
/** /**
* Publishes an ingest module event signifying discovery of additional * Publishes an ingest module event signifying discovery of additional
* content by an ingest module. * content by an ingest module.

View File

@ -87,7 +87,8 @@ public interface IngestModule {
} }
/** /**
* An exception for the use of ingest modules. * An exception for ingest modules to throw if they experience a start up
* error.
*/ */
public class IngestModuleException extends Exception { public class IngestModuleException extends Exception {
@ -108,7 +109,7 @@ public interface IngestModule {
} }
/** /**
* A return code for subclass process() methods. * A return code for process() method implementations.
*/ */
public enum ProcessResult { public enum ProcessResult {

View File

@ -1,7 +1,7 @@
/* /*
* Autopsy Forensic Browser * Autopsy Forensic Browser
* *
* Copyright 2014 Basis Technology Corp. * Copyright 2014-2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org * Contact: carrier <at> sleuthkit <dot> org
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");

View File

@ -1,7 +1,7 @@
/* /*
* Autopsy Forensic Browser * Autopsy Forensic Browser
* *
* Copyright 2011-2016 Basis Technology Corp. * Copyright 2014-2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org * Contact: carrier <at> sleuthkit <dot> org
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
@ -91,14 +91,13 @@ public interface IngestModuleFactory {
* family of ingest modules the factory creates. For example, the Autopsy * family of ingest modules the factory creates. For example, the Autopsy
* core hash lookup ingest module factory provides a global settings panel * core hash lookup ingest module factory provides a global settings panel
* to import and create hash databases. The hash databases are then enabled * to import and create hash databases. The hash databases are then enabled
* or disabled per ingest job using an ingest job settings panel. If the * or disabled per ingest job using an ingest job settings panel.
* module family does not have global settings, the factory may extend
* IngestModuleFactoryAdapter to get an implementation of this method that
* returns false.
* *
* @return True if the factory provides a global settings panel. * @return True if the factory provides a global settings panel.
*/ */
boolean hasGlobalSettingsPanel(); default boolean hasGlobalSettingsPanel() {
return false;
}
/** /**
* Gets a user interface panel that allows a user to change settings that * Gets a user interface panel that allows a user to change settings that
@ -106,68 +105,64 @@ public interface IngestModuleFactory {
* creates. For example, the Autopsy core hash lookup ingest module factory * creates. For example, the Autopsy core hash lookup ingest module factory
* provides a global settings panel to import and create hash databases. The * provides a global settings panel to import and create hash databases. The
* imported hash databases are then enabled or disabled per ingest job using * imported hash databases are then enabled or disabled per ingest job using
* ingest an ingest job settings panel. If the module family does not have a * ingest an ingest job settings panel.
* global settings, the factory may extend IngestModuleFactoryAdapter to get
* an implementation of this method that throws an
* UnsupportedOperationException.
* *
* @return A global settings panel. * @return A global settings panel.
*/ */
IngestModuleGlobalSettingsPanel getGlobalSettingsPanel(); default IngestModuleGlobalSettingsPanel getGlobalSettingsPanel() {
throw new UnsupportedOperationException();
}
/** /**
* Gets the default per ingest job settings for instances of the family of * Gets the default per ingest job settings for instances of the family of
* ingest modules the factory creates. For example, the Autopsy core hash * ingest modules the factory creates. For example, the Autopsy core hash
* lookup ingest modules family uses hash databases imported or created * lookup ingest modules family uses hash databases imported or created
* using its global settings panel. All of the hash databases are enabled by * using its global settings panel. All of the hash databases are enabled by
* default for an ingest job. If the module family does not have per ingest * default for an ingest job.
* job settings, the factory may extend IngestModuleFactoryAdapter to get an
* implementation of this method that returns an instance of the
* NoIngestModuleJobSettings class.
* *
* @return The default ingest job settings. * @return The default ingest job settings.
*/ */
IngestModuleIngestJobSettings getDefaultIngestJobSettings(); default IngestModuleIngestJobSettings getDefaultIngestJobSettings() {
return new NoIngestModuleIngestJobSettings();
}
/** /**
* Queries the factory to determine if it provides user a interface panel to * Queries the factory to determine if it provides user a interface panel to
* allow a user to make per ingest job settings for instances of the family * allow a user to make per ingest job settings for instances of the family
* of ingest modules the factory creates. For example, the Autopsy core hash * of ingest modules the factory creates. For example, the Autopsy core hash
* lookup ingest module factory provides an ingest job settings panels to * lookup ingest module factory provides an ingest job settings panels to
* enable or disable hash databases per ingest job. If the module family * enable or disable hash databases per ingest job.
* does not have per ingest job settings, the factory may extend
* IngestModuleFactoryAdapter to get an implementation of this method that
* returns false.
* *
* @return True if the factory provides ingest job settings panels. * @return True if the factory provides ingest job settings panels.
*/ */
boolean hasIngestJobSettingsPanel(); default boolean hasIngestJobSettingsPanel() {
return false;
}
/** /**
* Gets a user interface panel that can be used to set per ingest job * Gets a user interface panel that can be used to set per ingest job
* settings for instances of the family of ingest modules the factory * settings for instances of the family of ingest modules the factory
* creates. For example, the core hash lookup ingest module factory provides * creates. For example, the core hash lookup ingest module factory provides
* an ingest job settings panel to enable or disable hash databases per * an ingest job settings panel to enable or disable hash databases per
* ingest job. If the module family does not have per ingest job settings, * ingest job.
* the factory may extend IngestModuleFactoryAdapter to get an
* implementation of this method that throws an
* UnsupportedOperationException.
* *
* @param settings Per ingest job settings to initialize the panel. * @param settings Per ingest job settings to initialize the panel.
* *
* @return An ingest job settings panel. * @return An ingest job settings panel.
*/ */
IngestModuleIngestJobSettingsPanel getIngestJobSettingsPanel(IngestModuleIngestJobSettings settings); default IngestModuleIngestJobSettingsPanel getIngestJobSettingsPanel(IngestModuleIngestJobSettings settings) {
throw new UnsupportedOperationException();
}
/** /**
* Queries the factory to determine if it is capable of creating data source * Queries the factory to determine if it is capable of creating data source
* ingest modules. If the module family does not include data source ingest * ingest modules.
* modules, the factory may extend IngestModuleFactoryAdapter to get an
* implementation of this method that returns false.
* *
* @return True if the factory can create data source ingest modules. * @return True if the factory can create data source ingest modules.
*/ */
boolean isDataSourceIngestModuleFactory(); default boolean isDataSourceIngestModuleFactory() {
return false;
}
/** /**
* Creates a data source ingest module instance. * Creates a data source ingest module instance.
@ -189,26 +184,24 @@ public interface IngestModuleFactory {
* correctly. Also, more than one ingest job may be in progress at any given * correctly. Also, more than one ingest job may be in progress at any given
* time. This must also be taken into consideration when sharing resources * time. This must also be taken into consideration when sharing resources
* between module instances. modules. * between module instances. modules.
* <p>
* If the module family does not include data source ingest modules, the
* factory may extend IngestModuleFactoryAdapter to get an implementation of
* this method that throws an UnsupportedOperationException.
* *
* @param settings The settings for the ingest job. * @param ingestOptions The settings for the ingest job.
* *
* @return A data source ingest module instance. * @return A data source ingest module instance.
*/ */
DataSourceIngestModule createDataSourceIngestModule(IngestModuleIngestJobSettings settings); default DataSourceIngestModule createDataSourceIngestModule(IngestModuleIngestJobSettings ingestOptions) {
throw new UnsupportedOperationException();
}
/** /**
* Queries the factory to determine if it is capable of creating file ingest * Queries the factory to determine if it is capable of creating file ingest
* modules. If the module family does not include file ingest modules, the * modules.
* factory may extend IngestModuleFactoryAdapter to get an implementation of
* this method that returns false.
* *
* @return True if the factory can create file ingest modules. * @return True if the factory can create file ingest modules.
*/ */
boolean isFileIngestModuleFactory(); default boolean isFileIngestModuleFactory() {
return false;
}
/** /**
* Creates a file ingest module instance. * Creates a file ingest module instance.
@ -230,14 +223,52 @@ public interface IngestModuleFactory {
* correctly. Also, more than one ingest job may be in progress at any given * correctly. Also, more than one ingest job may be in progress at any given
* time. This must also be taken into consideration when sharing resources * time. This must also be taken into consideration when sharing resources
* between module instances. modules. * between module instances. modules.
* <p>
* If the module family does not include file ingest modules, the factory
* may extend IngestModuleFactoryAdapter to get an implementation of this
* method that throws an UnsupportedOperationException.
* *
* @param settings The settings for the ingest job. * @param settings The settings for the ingest job.
* *
* @return A file ingest module instance. * @return A file ingest module instance.
*/ */
FileIngestModule createFileIngestModule(IngestModuleIngestJobSettings settings); default FileIngestModule createFileIngestModule(IngestModuleIngestJobSettings ingestOptions) {
throw new UnsupportedOperationException();
}
/**
* Queries the factory to determine if it is capable of creating data
* artifact ingest modules.
*
* @return True or false.
*/
default boolean isDataArtifactIngestModuleFactory() {
return false;
}
/**
* Creates a data artifact ingest module instance.
* <p>
* Autopsy will generally use the factory to several instances of each type
* of module for each ingest job it performs. Completing an ingest job
* entails processing a single data source (e.g., a disk image) and all of
* the files from the data source, including files extracted from archives
* and any unallocated space (made to look like a series of files). The data
* source is passed through one or more pipelines of data source ingest
* modules. The files are passed through one or more pipelines of file
* ingest modules.
* <p>
* The ingest framework may use multiple threads to complete an ingest job,
* but it is guaranteed that there will be no more than one module instance
* per thread. However, if the module instances must share resources, the
* modules are responsible for synchronizing access to the shared resources
* and doing reference counting as required to release those resources
* correctly. Also, more than one ingest job may be in progress at any given
* time. This must also be taken into consideration when sharing resources
* between module instances. modules.
*
* @param settings The settings for the ingest job.
*
* @return A file ingest module instance.
*/
default DataArtifactIngestModule createDataArtifactIngestModule(IngestModuleIngestJobSettings settings) {
throw new UnsupportedOperationException();
}
} }

View File

@ -1,7 +1,7 @@
/* /*
* Autopsy Forensic Browser * Autopsy Forensic Browser
* *
* Copyright 2011-2016 Basis Technology Corp. * Copyright 2014-2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org * Contact: carrier <at> sleuthkit <dot> org
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
@ -21,6 +21,10 @@ package org.sleuthkit.autopsy.ingest;
/** /**
* An adapter that provides no-op implementations of various IngestModuleFactory * An adapter that provides no-op implementations of various IngestModuleFactory
* methods. * methods.
*
* NOTE: As of Java 8, interfaces can have default methods. IngestModuleFactory
* now provides default no-op versions of all of its optional methods. This
* class is no longer needed and can be DEPRECATED when convenient.
*/ */
public abstract class IngestModuleFactoryAdapter implements IngestModuleFactory { public abstract class IngestModuleFactoryAdapter implements IngestModuleFactory {

View File

@ -1,7 +1,7 @@
/* /*
* Autopsy Forensic Browser * Autopsy Forensic Browser
* *
* Copyright 2014-2018 Basis Technology Corp. * Copyright 2014-2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org * Contact: carrier <at> sleuthkit <dot> org
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
@ -85,6 +85,14 @@ public final class IngestModuleTemplate {
return moduleFactory.createFileIngestModule(settings); return moduleFactory.createFileIngestModule(settings);
} }
public boolean isDataArtifactIngestModuleTemplate() {
return moduleFactory.isDataArtifactIngestModuleFactory();
}
public DataArtifactIngestModule createDataArtifactIngestModule() {
return moduleFactory.createDataArtifactIngestModule(settings);
}
public void setEnabled(boolean enabled) { public void setEnabled(boolean enabled) {
this.enabled = enabled; this.enabled = enabled;
} }

View File

@ -165,26 +165,22 @@ class IngestProgressSnapshotPanel extends javax.swing.JPanel {
private class IngestJobTableModel extends AbstractTableModel { private class IngestJobTableModel extends AbstractTableModel {
private final String[] columnNames = {NbBundle.getMessage(this.getClass(), "IngestJobTableModel.colName.jobID"), private static final long serialVersionUID = 1L;
NbBundle.getMessage(this.getClass(),
"IngestJobTableModel.colName.dataSource"), private final String[] columnNames = {
NbBundle.getMessage(this.getClass(), "IngestJobTableModel.colName.jobID"),
NbBundle.getMessage(this.getClass(), "IngestJobTableModel.colName.dataSource"),
NbBundle.getMessage(this.getClass(), "IngestJobTableModel.colName.start"), NbBundle.getMessage(this.getClass(), "IngestJobTableModel.colName.start"),
NbBundle.getMessage(this.getClass(), NbBundle.getMessage(this.getClass(), "IngestJobTableModel.colName.numProcessed"),
"IngestJobTableModel.colName.numProcessed"), NbBundle.getMessage(this.getClass(), "IngestJobTableModel.colName.filesPerSec"),
NbBundle.getMessage(this.getClass(), NbBundle.getMessage(this.getClass(), "IngestJobTableModel.colName.inProgress"),
"IngestJobTableModel.colName.filesPerSec"), NbBundle.getMessage(this.getClass(), "IngestJobTableModel.colName.filesQueued"),
NbBundle.getMessage(this.getClass(), NbBundle.getMessage(this.getClass(), "IngestJobTableModel.colName.dirQueued"),
"IngestJobTableModel.colName.inProgress"), NbBundle.getMessage(this.getClass(), "IngestJobTableModel.colName.rootQueued"),
NbBundle.getMessage(this.getClass(), NbBundle.getMessage(this.getClass(), "IngestJobTableModel.colName.streamingQueued"),
"IngestJobTableModel.colName.filesQueued"), NbBundle.getMessage(this.getClass(), "IngestJobTableModel.colName.dsQueued"),
NbBundle.getMessage(this.getClass(), NbBundle.getMessage(this.getClass(), "IngestJobTableModel.colName.artifactsQueued")};
"IngestJobTableModel.colName.dirQueued"),
NbBundle.getMessage(this.getClass(),
"IngestJobTableModel.colName.rootQueued"),
NbBundle.getMessage(this.getClass(),
"IngestJobTableModel.colName.streamingQueued"),
NbBundle.getMessage(this.getClass(),
"IngestJobTableModel.colName.dsQueued")};
private List<Snapshot> jobSnapshots; private List<Snapshot> jobSnapshots;
private IngestJobTableModel() { private IngestJobTableModel() {
@ -250,6 +246,9 @@ class IngestProgressSnapshotPanel extends javax.swing.JPanel {
case 10: case 10:
cellValue = snapShot.getDsQueueSize(); cellValue = snapShot.getDsQueueSize();
break; break;
case 11:
cellValue = snapShot.getArtifactTasksQueueSize();
break;
default: default:
cellValue = null; cellValue = null;
break; break;
@ -260,6 +259,8 @@ class IngestProgressSnapshotPanel extends javax.swing.JPanel {
private class ModuleTableModel extends AbstractTableModel { private class ModuleTableModel extends AbstractTableModel {
private static final long serialVersionUID = 1L;
private class ModuleStats implements Comparable<ModuleStats> { private class ModuleStats implements Comparable<ModuleStats> {
private final String name; private final String name;

View File

@ -1,7 +1,7 @@
/* /*
* Autopsy Forensic Browser * Autopsy Forensic Browser
* *
* Copyright 2012-2018 Basis Technology Corp. * Copyright 2012-2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org * Contact: carrier <at> sleuthkit <dot> org
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
@ -19,10 +19,12 @@
package org.sleuthkit.autopsy.ingest; package org.sleuthkit.autopsy.ingest;
import java.util.Map; import java.util.Map;
import java.util.logging.Level;
import org.sleuthkit.autopsy.casemodule.Case; import org.sleuthkit.autopsy.casemodule.Case;
import org.sleuthkit.autopsy.casemodule.NoCurrentCaseException; import org.sleuthkit.autopsy.casemodule.NoCurrentCaseException;
import org.sleuthkit.autopsy.coreutils.Logger; import org.sleuthkit.autopsy.coreutils.Logger;
import org.sleuthkit.autopsy.coreutils.ModuleSettings; import org.sleuthkit.autopsy.coreutils.ModuleSettings;
import org.sleuthkit.datamodel.Blackboard;
import org.sleuthkit.datamodel.SleuthkitCase; import org.sleuthkit.datamodel.SleuthkitCase;
/** /**
@ -31,6 +33,7 @@ import org.sleuthkit.datamodel.SleuthkitCase;
*/ */
public final class IngestServices { public final class IngestServices {
private static Logger logger = Logger.getLogger(IngestServices.class.getName());
private static IngestServices instance = null; private static IngestServices instance = null;
/** /**
@ -105,11 +108,17 @@ public final class IngestServices {
* @param moduleDataEvent A module data event, i.e., an event that * @param moduleDataEvent A module data event, i.e., an event that
* encapsulates artifact data. * encapsulates artifact data.
* *
* @deprecated use org.sleuthkit.datamodel.Blackboard.postArtifact instead. * @deprecated Use org.sleuthkit.datamodel.Blackboard.postArtifact or
* org.sleuthkit.datamodel.Blackboard.postArtifacts instead.
*/ */
@Deprecated @Deprecated
public void fireModuleDataEvent(ModuleDataEvent moduleDataEvent) { public void fireModuleDataEvent(ModuleDataEvent moduleDataEvent) {
IngestManager.getInstance().fireIngestModuleDataEvent(moduleDataEvent); try {
Blackboard blackboard = Case.getCurrentCaseThrows().getSleuthkitCase().getBlackboard();
blackboard.postArtifacts(moduleDataEvent.getArtifacts(), moduleDataEvent.getModuleName());
} catch (NoCurrentCaseException | Blackboard.BlackboardException ex) {
logger.log(Level.SEVERE, "Failed to post artifacts", ex);
}
} }
/** /**

View File

@ -1,7 +1,7 @@
/* /*
* Autopsy Forensic Browser * Autopsy Forensic Browser
* *
* Copyright 2014 Basis Technology Corp. * Copyright 2014-2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org * Contact: carrier <at> sleuthkit <dot> org
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
@ -20,32 +20,76 @@ package org.sleuthkit.autopsy.ingest;
import org.sleuthkit.datamodel.Content; import org.sleuthkit.datamodel.Content;
/**
* An ingest task that will be executed by an ingest thread using a given ingest
* job pipeline. Three examples of concrete types of ingest tasks are tasks to
* analyze a data source, tasks to analyze the files in a data source, and tasks
* that analyze data artifacts.
*/
abstract class IngestTask { abstract class IngestTask {
private final static long NOT_SET = Long.MIN_VALUE; private final static long NOT_SET = Long.MIN_VALUE;
private final IngestJobPipeline ingestJobPipeline; private final IngestJobPipeline ingestJobPipeline;
private long threadId; private long threadId;
/**
* Constructs an ingest task that will be executed by an ingest thread using
* a given ingest job pipeline. Three examples of concrete types of ingest
* tasks are tasks to analyze a data source, tasks to analyze the files in a
* data source, and tasks that analyze data artifacts.
*
* @param ingestJobPipeline The ingest job pipeline to use to execute the
* task.
*/
IngestTask(IngestJobPipeline ingestJobPipeline) { IngestTask(IngestJobPipeline ingestJobPipeline) {
this.ingestJobPipeline = ingestJobPipeline; this.ingestJobPipeline = ingestJobPipeline;
threadId = NOT_SET; threadId = NOT_SET;
} }
/**
* Gets the ingest job pipeline used to complete this task.
*
* @return The ingest job pipeline.
*/
IngestJobPipeline getIngestJobPipeline() { IngestJobPipeline getIngestJobPipeline() {
return ingestJobPipeline; return ingestJobPipeline;
} }
/**
* Gets the data source for the ingest job of which this task is a part.
*
* @return The data source.
*/
Content getDataSource() { Content getDataSource() {
return getIngestJobPipeline().getDataSource(); return getIngestJobPipeline().getDataSource();
} }
/**
* Gets the thread ID of the ingest thread executing this task.
*
* @return The thread ID.
*/
long getThreadId() { long getThreadId() {
return threadId; return threadId;
} }
/**
* Sets the thread ID of the ingest thread executing this task.
*
* @param threadId The thread ID.
*/
void setThreadId(long threadId) { void setThreadId(long threadId) {
this.threadId = threadId; this.threadId = threadId;
} }
abstract void execute(long threadId) throws InterruptedException; /**
* Records the ingest thread ID of the calling thread and executes this task
* using the ingest job pipeline specified when the task was created. The
* implementation of the method should simple call
* super.setThreadId(threadId) and getIngestJobPipeline().process(this).
*
* @param threadId The numeric ID of the ingest thread executing this task.
*/
abstract void execute(long threadId);
} }

View File

@ -26,6 +26,8 @@ import java.util.ArrayList;
import java.util.Date; import java.util.Date;
import java.util.List; import java.util.List;
import java.util.Optional; import java.util.Optional;
import javax.annotation.concurrent.GuardedBy;
import javax.annotation.concurrent.ThreadSafe;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.logging.Level; import java.util.logging.Level;
import org.openide.util.NbBundle; import org.openide.util.NbBundle;
@ -33,54 +35,65 @@ import org.sleuthkit.autopsy.coreutils.Logger;
import org.sleuthkit.autopsy.coreutils.MessageNotifyUtil; import org.sleuthkit.autopsy.coreutils.MessageNotifyUtil;
/** /**
* An abstract superclass for pipelines of ingest modules for a given ingest * An abstract superclass for pipelines of ingest modules that execute ingest
* task type. Some examples of ingest task types: data source level ingest * tasks for an ingest job.
* tasks, file ingest tasks, data artifact ingest tasks, etc. Subclasses need to *
* implement a specialization of the inner PipelineModule abstract superclass * Conceptually, an ingest job pipeline is divided into one or more "sub
* for the type of ingest modules that make up the pipeline. * pipelines" that are actually ingest task pipelines of varying types. Thus,
* the type parameter of this generic is an ingest task type.
*
* IMPORTANT: Subclasses need to both extend this class, and to implement a
* specialization of the inner PipelineModule abstract superclass.
* *
* @param <T> The ingest task type. * @param <T> The ingest task type.
*/ */
@ThreadSafe
abstract class IngestTaskPipeline<T extends IngestTask> { abstract class IngestTaskPipeline<T extends IngestTask> {
private static final Logger logger = Logger.getLogger(IngestTaskPipeline.class.getName()); private static final Logger logger = Logger.getLogger(IngestTaskPipeline.class.getName());
private final IngestJobPipeline ingestJobPipeline; private final IngestJobPipeline ingestJobPipeline;
@GuardedBy("this")
private final List<IngestModuleTemplate> moduleTemplates; private final List<IngestModuleTemplate> moduleTemplates;
@GuardedBy("this")
private final List<PipelineModule<T>> modules; private final List<PipelineModule<T>> modules;
private volatile Date startTime; private volatile Date startTime;
private volatile boolean running; private volatile boolean running;
private volatile PipelineModule<T> currentModule; private volatile PipelineModule<T> currentModule;
/** /**
* Constructs an instance of an abstract superclass for pipelines of ingest * Constructs the superclass part of a pipeline of ingest modules that
* modules for a given ingest task type. Some examples of ingest task types: * executes ingest tasks for an ingest job.
* data source level ingest tasks, file ingest tasks, data artifact ingest
* tasks, etc. Subclasses need to implement a specialization of the inner
* PipelineModule abstract superclass for the type of ingest modules that
* make up the pipeline.
* *
* @param ingestJobPipeline The ingest job pipeline that owns this pipeline. * @param ingestPipeline The parent ingest job pipeline for this ingest
* task pipeline.
* @param moduleTemplates The ingest module templates that define this * @param moduleTemplates The ingest module templates that define this
* pipeline. * ingest task pipeline. May be an empty list.
*/
IngestTaskPipeline(IngestJobPipeline ingestPipeline, List<IngestModuleTemplate> moduleTemplates) {
this.ingestJobPipeline = ingestPipeline;
/*
* The creation of ingest modules from the ingest module templates has
* been deliberately deferred to the startUp() method so that any and
* all errors in module construction or start up can be reported to the
* client code.
*/ */
IngestTaskPipeline(IngestJobPipeline ingestJobPipeline, List<IngestModuleTemplate> moduleTemplates) {
this.ingestJobPipeline = ingestJobPipeline;
this.moduleTemplates = moduleTemplates; this.moduleTemplates = moduleTemplates;
modules = new ArrayList<>(); modules = new ArrayList<>();
} }
/** /**
* Indicates whether or not there are any ingest modules in this pipeline. * Indicates whether or not there are any ingest modules in this ingest task
* pipeline.
* *
* @return True or false. * @return True or false.
*/ */
boolean isEmpty() { synchronized boolean isEmpty() {
return modules.isEmpty(); return modules.isEmpty();
} }
/** /**
* Queries whether or not this pipeline is running, i.e., started and not * Queries whether or not this ingest task pipeline is running, i.e., the
* shut down. * startUp() method has been called and the shutDown() has not been called.
* *
* @return True or false. * @return True or false.
*/ */
@ -89,22 +102,36 @@ abstract class IngestTaskPipeline<T extends IngestTask> {
} }
/** /**
* Starts up the ingest modules in this pipeline. * Starts up this ingest task pipeline by calling the startUp() methods of
* the ingest modules in the pipeline.
* *
* @return A list of ingest module start up errors, possibly empty. * @return A list of ingest module start up errors, possibly empty.
*/ */
List<IngestModuleError> startUp() { synchronized List<IngestModuleError> startUp() {
List<IngestModuleError> errors = new ArrayList<>();
if (!running) {
/*
* The creation of ingest modules from the ingest module templates
* has been deliberately deferred to the startUp() method so that
* any and all errors in module construction or start up can be
* reported to the client code.
*/
createIngestModules(moduleTemplates); createIngestModules(moduleTemplates);
return startUpIngestModules(); errors.addAll(startUpIngestModules());
} else {
errors.add(new IngestModuleError("Ingest Task Pipeline", new IngestTaskPipelineException("Pipeline already started"))); //NON-NLS
}
return errors;
} }
/** /**
* Creates the ingest modules for this pipeline. * Creates the ingest modules for this ingest task pipeline from the given
* ingest module templates.
* *
* @param moduleTemplates The ingest module templates avaialble to this * @param moduleTemplates The ingest module templates.
* pipeline.
*/ */
private void createIngestModules(List<IngestModuleTemplate> moduleTemplates) { private void createIngestModules(List<IngestModuleTemplate> moduleTemplates) {
if (modules.isEmpty()) {
for (IngestModuleTemplate template : moduleTemplates) { for (IngestModuleTemplate template : moduleTemplates) {
Optional<PipelineModule<T>> module = acceptModuleTemplate(template); Optional<PipelineModule<T>> module = acceptModuleTemplate(template);
if (module.isPresent()) { if (module.isPresent()) {
@ -112,33 +139,40 @@ abstract class IngestTaskPipeline<T extends IngestTask> {
} }
} }
} }
}
/** /**
* Determines if the type of ingest module that can be created from a given * Determines if one of the types of ingest modules that can be created from
* ingest module template should be added to this pipeline. If so, the * a given ingest module template should be added to this ingest task
* ingest module is created and returned. * pipeline. If so, the ingest module is created and returned.
* *
* @param ingestModuleTemplate The ingest module template to be used or * @param template The ingest module template to be used or ignored, as
* ignored, as appropriate to the pipeline type. * appropriate to the pipeline type.
* *
* @return An Optional that is either empty or contains a newly created and * @return An Optional that is either empty or contains a newly created
* wrapped ingest module. * ingest module of type T, wrapped in a PipelineModule decorator.
*/ */
abstract Optional<PipelineModule<T>> acceptModuleTemplate(IngestModuleTemplate ingestModuleTemplate); abstract Optional<PipelineModule<T>> acceptModuleTemplate(IngestModuleTemplate template);
/** /**
* Starts up the ingest modules in the pipeline. * Starts up the ingest modules in this ingest task pipeline.
* *
* @return A list of ingest module start up errors, possibly empty. * @return A list of ingest module start up errors, possibly empty.
*/ */
private List<IngestModuleError> startUpIngestModules() { private List<IngestModuleError> startUpIngestModules() {
List<IngestModuleError> errors = new ArrayList<>();
startTime = new Date(); startTime = new Date();
running = true; running = true;
List<IngestModuleError> errors = new ArrayList<>();
for (PipelineModule<T> module : modules) { for (PipelineModule<T> module : modules) {
try { try {
module.startUp(new IngestJobContext(ingestJobPipeline)); module.startUp(new IngestJobContext(ingestJobPipeline));
} catch (Throwable ex) { // Catch-all exception firewall } catch (Throwable ex) {
/*
* A catch-all exception firewall. Start up errors for all of
* the ingest modules, whether checked exceptions or runtime
* exceptions, are reported to allow correction of all of the
* error conditions in one go.
*/
errors.add(new IngestModuleError(module.getDisplayName(), ex)); errors.add(new IngestModuleError(module.getDisplayName(), ex));
} }
} }
@ -146,7 +180,7 @@ abstract class IngestTaskPipeline<T extends IngestTask> {
} }
/** /**
* Returns the start up time of this pipeline. * Returns the start up time of this ingest task pipeline.
* *
* @return The file processing start time, may be null if this pipeline has * @return The file processing start time, may be null if this pipeline has
* not been started yet. * not been started yet.
@ -160,31 +194,23 @@ abstract class IngestTaskPipeline<T extends IngestTask> {
} }
/** /**
* Does any preparation required before performing a task. * Executes an ingest task by calling the process() methods of the ingest
* * modules in this ingest task pipeline.
* @param task The task.
*
* @throws IngestTaskPipelineException Thrown if there is an error preparing
* to perform the task.
*/
abstract void prepareTask(T task) throws IngestTaskPipelineException;
/**
* Performs an ingest task using the ingest modules in this pipeline.
* *
* @param task The task. * @param task The task.
* *
* @return A list of ingest module task processing errors, possibly empty. * @return A list of ingest module task processing errors, possibly empty.
*/ */
List<IngestModuleError> performTask(T task) { synchronized List<IngestModuleError> executeTask(T task) {
List<IngestModuleError> errors = new ArrayList<>(); List<IngestModuleError> errors = new ArrayList<>();
if (!this.ingestJobPipeline.isCancelled()) { if (running) {
if (!ingestJobPipeline.isCancelled()) {
pauseIfScheduled(); pauseIfScheduled();
if (ingestJobPipeline.isCancelled()) { if (ingestJobPipeline.isCancelled()) {
return errors; return errors;
} }
try { try {
prepareTask(task); prepareForTask(task);
} catch (IngestTaskPipelineException ex) { } catch (IngestTaskPipelineException ex) {
errors.add(new IngestModuleError("Ingest Task Pipeline", ex)); //NON-NLS errors.add(new IngestModuleError("Ingest Task Pipeline", ex)); //NON-NLS
return errors; return errors;
@ -197,8 +223,14 @@ abstract class IngestTaskPipeline<T extends IngestTask> {
try { try {
currentModule = module; currentModule = module;
currentModule.setProcessingStartTime(); currentModule.setProcessingStartTime();
module.performTask(ingestJobPipeline, task); module.executeTask(ingestJobPipeline, task);
} catch (Throwable ex) { // Catch-all exception firewall } catch (Throwable ex) {
/*
* A catch-all exception firewall. Note that a runtime
* exception from a single module does not stop
* processing of the task by the other modules in the
* pipeline.
*/
errors.add(new IngestModuleError(module.getDisplayName(), ex)); errors.add(new IngestModuleError(module.getDisplayName(), ex));
} }
if (ingestJobPipeline.isCancelled()) { if (ingestJobPipeline.isCancelled()) {
@ -207,10 +239,13 @@ abstract class IngestTaskPipeline<T extends IngestTask> {
} }
} }
try { try {
completeTask(task); cleanUpAfterTask(task);
} catch (IngestTaskPipelineException ex) { } catch (IngestTaskPipelineException ex) {
errors.add(new IngestModuleError("Ingest Task Pipeline", ex)); //NON-NLS errors.add(new IngestModuleError("Ingest Task Pipeline", ex)); //NON-NLS
} }
} else {
errors.add(new IngestModuleError("Ingest Task Pipeline", new IngestTaskPipelineException("Pipeline not started or shut down"))); //NON-NLS
}
currentModule = null; currentModule = null;
return errors; return errors;
} }
@ -265,7 +300,18 @@ abstract class IngestTaskPipeline<T extends IngestTask> {
} }
/** /**
* Gets the currently running module. * Does any task type specific preparation required before executing an
* ingest task.
*
* @param task The task.
*
* @throws IngestTaskPipelineException Thrown if there is an error preparing
* to execute the task.
*/
abstract void prepareForTask(T task) throws IngestTaskPipelineException;
/**
* Gets the currently running ingest module.
* *
* @return The module, possibly null if no module is currently running. * @return The module, possibly null if no module is currently running.
*/ */
@ -274,21 +320,11 @@ abstract class IngestTaskPipeline<T extends IngestTask> {
} }
/** /**
* Does any clean up required after performing a task. * Shuts down all of the ingest modules in this pipeline.
*
* @param task The task.
*
* @throws IngestTaskPipelineException Thrown if there is an error cleaning
* up after performing the task.
*/
abstract void completeTask(T task) throws IngestTaskPipelineException;
/**
* Shuts down all of the modules in the pipeline.
* *
* @return A list of shut down errors, possibly empty. * @return A list of shut down errors, possibly empty.
*/ */
List<IngestModuleError> shutDown() { synchronized List<IngestModuleError> shutDown() {
List<IngestModuleError> errors = new ArrayList<>(); List<IngestModuleError> errors = new ArrayList<>();
if (running == true) { if (running == true) {
for (PipelineModule<T> module : modules) { for (PipelineModule<T> module : modules) {
@ -315,8 +351,22 @@ abstract class IngestTaskPipeline<T extends IngestTask> {
} }
/** /**
* An abstract superclass for a wrapper that adds ingest infrastructure * Does any task type specific clean up required after executing an ingest
* task.
*
* @param task The task.
*
* @throws IngestTaskPipelineException Thrown if there is an error cleaning
* up after performing the task.
*/
abstract void cleanUpAfterTask(T task) throws IngestTaskPipelineException;
/**
* An abstract superclass for a decorator that adds ingest infrastructure
* operations to an ingest module. * operations to an ingest module.
*
* IMPORTANT: Subclasses of IngestTaskPipeline need to implement a
* specialization this class
*/ */
static abstract class PipelineModule<T extends IngestTask> implements IngestModule { static abstract class PipelineModule<T extends IngestTask> implements IngestModule {
@ -325,7 +375,7 @@ abstract class IngestTaskPipeline<T extends IngestTask> {
private volatile Date processingStartTime; private volatile Date processingStartTime;
/** /**
* Constructs an instance of an abstract superclass for a wrapper that * Constructs an instance of an abstract superclass for a decorator that
* adds ingest infrastructure operations to an ingest module. * adds ingest infrastructure operations to an ingest module.
* *
* @param module The ingest module to be wrapped. * @param module The ingest module to be wrapped.
@ -338,7 +388,7 @@ abstract class IngestTaskPipeline<T extends IngestTask> {
} }
/** /**
* Gets the class name of the wrapped ingest module. * Gets the class name of the decorated ingest module.
* *
* @return The class name. * @return The class name.
*/ */
@ -347,7 +397,7 @@ abstract class IngestTaskPipeline<T extends IngestTask> {
} }
/** /**
* Gets the display name of the wrapped ingest module. * Gets the display name of the decorated ingest module.
* *
* @return The display name. * @return The display name.
*/ */
@ -356,7 +406,7 @@ abstract class IngestTaskPipeline<T extends IngestTask> {
} }
/** /**
* Sets the processing start time for the wrapped module to the system * Sets the processing start time for the decorated module to the system
* time when this method is called. * time when this method is called.
*/ */
void setProcessingStartTime() { void setProcessingStartTime() {
@ -364,7 +414,7 @@ abstract class IngestTaskPipeline<T extends IngestTask> {
} }
/** /**
* Gets the the processing start time for the wrapped module. * Gets the the processing start time for the decorated module.
* *
* @return The start time, will be null if the module has not started * @return The start time, will be null if the module has not started
* processing the data source yet. * processing the data source yet.
@ -379,16 +429,17 @@ abstract class IngestTaskPipeline<T extends IngestTask> {
} }
/** /**
* Performs an ingest task. * Executes an ingest task using the process() method of the decorated
* module.
* *
* @param ingestJobPipeline The ingest job pipeline that owns the ingest * @param ingestJobPipeline The ingest job pipeline that owns the ingest
* module pipeline this module belongs to. * task pipeline this module belongs to.
* @param task The task to process. * @param task The task to execute.
* *
* @throws IngestModuleException Excepton thrown if there is an error * @throws IngestModuleException Exception thrown if there is an error
* performing the task. * performing the task.
*/ */
abstract void performTask(IngestJobPipeline ingestJobPipeline, T task) throws IngestModuleException; abstract void executeTask(IngestJobPipeline ingestJobPipeline, T task) throws IngestModuleException;
@Override @Override
public void shutDown() { public void shutDown() {
@ -398,19 +449,31 @@ abstract class IngestTaskPipeline<T extends IngestTask> {
} }
/** /**
* An exception for the use of ingest task pipelines. * An exception thrown by an ingest task pipeline.
*/ */
public static class IngestTaskPipelineException extends Exception { public static class IngestTaskPipelineException extends Exception {
private static final long serialVersionUID = 1L; private static final long serialVersionUID = 1L;
/**
* Constructs an exception to be thrown by an ingest task pipeline.
*
* @param message The exception message.
*/
public IngestTaskPipelineException(String message) { public IngestTaskPipelineException(String message) {
super(message); super(message);
} }
/**
* Constructs an exception to be thrown by an ingest task pipeline.
*
* @param message The exception message.
* @param cause The exception cause.
*/
public IngestTaskPipelineException(String message, Throwable cause) { public IngestTaskPipelineException(String message, Throwable cause) {
super(message, cause); super(message, cause);
} }
} }
} }

View File

@ -1,7 +1,7 @@
/* /*
* Autopsy Forensic Browser * Autopsy Forensic Browser
* *
* Copyright 2012-2018 Basis Technology Corp. * Copyright 2012-2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org * Contact: carrier <at> sleuthkit <dot> org
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
@ -36,16 +36,20 @@ import java.util.regex.Matcher;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import javax.annotation.concurrent.GuardedBy; import javax.annotation.concurrent.GuardedBy;
import javax.annotation.concurrent.ThreadSafe; import javax.annotation.concurrent.ThreadSafe;
import org.sleuthkit.autopsy.casemodule.Case;
import org.sleuthkit.autopsy.coreutils.Logger; import org.sleuthkit.autopsy.coreutils.Logger;
import org.sleuthkit.datamodel.AbstractFile; import org.sleuthkit.datamodel.AbstractFile;
import org.sleuthkit.datamodel.Blackboard;
import org.sleuthkit.datamodel.Content; import org.sleuthkit.datamodel.Content;
import org.sleuthkit.datamodel.DataArtifact;
import org.sleuthkit.datamodel.DataSource;
import org.sleuthkit.datamodel.FileSystem; import org.sleuthkit.datamodel.FileSystem;
import org.sleuthkit.datamodel.TskCoreException; import org.sleuthkit.datamodel.TskCoreException;
import org.sleuthkit.datamodel.TskData; import org.sleuthkit.datamodel.TskData;
/** /**
* Creates ingest tasks for data source ingest jobs, queueing the tasks in * Creates ingest tasks for ingest jobs, queueing the tasks in priority order
* priority order for execution by the ingest manager's ingest threads. * for execution by the ingest manager's ingest threads.
*/ */
@ThreadSafe @ThreadSafe
final class IngestTasksScheduler { final class IngestTasksScheduler {
@ -54,19 +58,20 @@ final class IngestTasksScheduler {
private static final Logger logger = Logger.getLogger(IngestTasksScheduler.class.getName()); private static final Logger logger = Logger.getLogger(IngestTasksScheduler.class.getName());
@GuardedBy("IngestTasksScheduler.this") @GuardedBy("IngestTasksScheduler.this")
private static IngestTasksScheduler instance; private static IngestTasksScheduler instance;
private final IngestTaskTrackingQueue dataSourceIngestThreadQueue; private final IngestTaskTrackingQueue dataSourceIngestTasksQueue;
@GuardedBy("this") @GuardedBy("this")
private final TreeSet<FileIngestTask> rootFileTaskQueue; private final TreeSet<FileIngestTask> topLevelFileIngestTasksQueue;
@GuardedBy("this") @GuardedBy("this")
private final Deque<FileIngestTask> pendingFileTaskQueue; private final Deque<FileIngestTask> batchedFileIngestTasksQueue;
@GuardedBy("this") @GuardedBy("this")
private final Queue<FileIngestTask> streamedTasksQueue; private final Queue<FileIngestTask> streamedFileIngestTasksQueue;
private final IngestTaskTrackingQueue fileIngestThreadsQueue; private final IngestTaskTrackingQueue fileIngestTasksQueue;
private final IngestTaskTrackingQueue artifactIngestTasksQueue;
/** /**
* Gets the ingest tasks scheduler singleton that creates ingest tasks for * Gets the ingest tasks scheduler singleton that creates ingest tasks for
* data source ingest jobs, queueing the tasks in priority order for * ingest jobs, queueing the tasks in priority order for execution by the
* execution by the ingest manager's ingest threads. * ingest manager's ingest threads.
*/ */
synchronized static IngestTasksScheduler getInstance() { synchronized static IngestTasksScheduler getInstance() {
if (IngestTasksScheduler.instance == null) { if (IngestTasksScheduler.instance == null) {
@ -76,146 +81,195 @@ final class IngestTasksScheduler {
} }
/** /**
* Constructs an ingest tasks scheduler that creates ingest tasks for data * Constructs an ingest tasks scheduler that creates ingest tasks for ingest
* source ingest jobs, queueing the tasks in priority order for execution by * jobs, queueing the tasks in priority order for execution by the ingest
* the ingest manager's ingest threads. * manager's ingest threads.
*/ */
private IngestTasksScheduler() { private IngestTasksScheduler() {
this.dataSourceIngestThreadQueue = new IngestTaskTrackingQueue(); dataSourceIngestTasksQueue = new IngestTaskTrackingQueue();
this.rootFileTaskQueue = new TreeSet<>(new RootDirectoryTaskComparator()); topLevelFileIngestTasksQueue = new TreeSet<>(new RootDirectoryTaskComparator());
this.pendingFileTaskQueue = new LinkedList<>(); batchedFileIngestTasksQueue = new LinkedList<>();
this.fileIngestThreadsQueue = new IngestTaskTrackingQueue(); fileIngestTasksQueue = new IngestTaskTrackingQueue();
this.streamedTasksQueue = new LinkedList<>(); streamedFileIngestTasksQueue = new LinkedList<>();
artifactIngestTasksQueue = new IngestTaskTrackingQueue();
} }
/** /**
* Gets the data source level ingest tasks queue. This queue is a blocking * Gets the data source level ingest tasks queue. This queue is a blocking
* queue used by the ingest manager's data source level ingest thread. * queue consumed by the ingest manager's data source level ingest thread.
* *
* @return The queue. * @return The queue.
*/ */
BlockingIngestTaskQueue getDataSourceIngestTaskQueue() { BlockingIngestTaskQueue getDataSourceIngestTaskQueue() {
return this.dataSourceIngestThreadQueue; return dataSourceIngestTasksQueue;
} }
/** /**
* Gets the file level ingest tasks queue. This queue is a blocking queue * Gets the file level ingest tasks queue. This queue is a blocking queue
* used by the ingest manager's file level ingest threads. * consumed by the ingest manager's file level ingest threads.
* *
* @return The queue. * @return The queue.
*/ */
BlockingIngestTaskQueue getFileIngestTaskQueue() { BlockingIngestTaskQueue getFileIngestTaskQueue() {
return this.fileIngestThreadsQueue; return fileIngestTasksQueue;
} }
/** /**
* Schedules a data source level ingest task and zero to many file level * Gets the data artifact ingest tasks queue. This queue is a blocking queue
* ingest tasks for an ingest job pipeline. * consumed by the ingest manager's data artifact ingest thread.
* *
* @param ingestJobPipeline The ingest job pipeline. * @return The queue.
*/ */
synchronized void scheduleIngestTasks(IngestJobPipeline ingestJobPipeline) { BlockingIngestTaskQueue getResultIngestTaskQueue() {
if (!ingestJobPipeline.isCancelled()) { return artifactIngestTasksQueue;
/* }
* Scheduling of both the data source ingest task and the initial
* file ingest tasks for an ingestJobPipeline must be an atomic operation. /**
* Otherwise, the data source task might be completed before the * Schedules ingest tasks based on the types of ingest modules that the
* file tasks are scheduled, resulting in a potential false positive * ingest pipeline that will exedute tasks has. Scheduling these tasks
* when another thread checks whether or not all the tasks for the * atomically means that it is valid to call currentTasksAreCompleted()
* ingestJobPipeline are completed. * immediately after calling this method. Note that the may cause some or
* even all of any file tasks to be discarded.
*
* @param ingestPipeline The ingest pipeline that will execute the scheduled
* tasks. A reference to the pipeline is added to each
* task so that when the task is dequeued by an ingest
* thread the task can pass the target Content of the
* task to the pipeline for processing by the
* pipeline's ingest modules.
*/ */
this.scheduleDataSourceIngestTask(ingestJobPipeline); synchronized void scheduleIngestTasks(IngestJobPipeline ingestPipeline) {
this.scheduleFileIngestTasks(ingestJobPipeline, Collections.emptyList()); if (!ingestPipeline.isCancelled()) {
if (ingestPipeline.hasDataSourceIngestModules()) {
scheduleDataSourceIngestTask(ingestPipeline);
}
if (ingestPipeline.hasFileIngestModules()) {
scheduleFileIngestTasks(ingestPipeline, Collections.emptyList());
}
if (ingestPipeline.hasDataArtifactIngestModules()) {
scheduleDataArtifactIngestTasks(ingestPipeline);
}
} }
} }
/** /**
* Schedules a data source level ingest task for an ingest job pipeline. * Schedules a data source level ingest task for an ingest job. The data
* source is obtained from the ingest pipeline passed in.
* *
* @param ingestJobPipeline The ingest job pipeline. * @param ingestPipeline The ingest pipeline that will execute the scheduled
* task. A reference to the pipeline is added to the
* task so that when the task is dequeued by an ingest
* thread the task can pass the target Content of the
* task to the pipeline for processing by the
* pipeline's ingest modules.
*/ */
synchronized void scheduleDataSourceIngestTask(IngestJobPipeline ingestJobPipeline) { synchronized void scheduleDataSourceIngestTask(IngestJobPipeline ingestPipeline) {
if (!ingestJobPipeline.isCancelled()) { if (!ingestPipeline.isCancelled()) {
DataSourceIngestTask task = new DataSourceIngestTask(ingestJobPipeline); DataSourceIngestTask task = new DataSourceIngestTask(ingestPipeline);
try { try {
this.dataSourceIngestThreadQueue.putLast(task); dataSourceIngestTasksQueue.putLast(task);
} catch (InterruptedException ex) { } catch (InterruptedException ex) {
IngestTasksScheduler.logger.log(Level.INFO, String.format("Ingest tasks scheduler interrupted while blocked adding a task to the data source level ingest task queue (jobId={%d)", ingestJobPipeline.getId()), ex); IngestTasksScheduler.logger.log(Level.INFO, String.format("Ingest tasks scheduler interrupted while blocked adding a task to the data source level ingest task queue (pipelineId={%d)", ingestPipeline.getId()), ex);
Thread.currentThread().interrupt(); Thread.currentThread().interrupt();
} }
} }
} }
/** /**
* Schedules file tasks for either all the files or a given subset of the * Schedules file tasks for either all the files, or a given subset of the
* files for an ingest job pipeline. * files, for a data source. The data source is obtained from the ingest
* pipeline passed in.
* *
* @param ingestJobPipeline The ingest job pipeline. * @param ingestPipeline The ingest pipeline that will execute the scheduled
* @param files A subset of the files for the data source; if empty, then * tasks. A reference to the pipeline is added to each
* file tasks for all files in the data source are scheduled. * task so that when the task is dequeued by an ingest
* thread the task can pass the target Content of the
* task to the pipeline for processing by the
* pipeline's ingest modules.
* @param files A subset of the files from the data source; if
* empty, then all if the files from the data source
* are candidates for scheduling.
*/ */
synchronized void scheduleFileIngestTasks(IngestJobPipeline ingestJobPipeline, Collection<AbstractFile> files) { synchronized void scheduleFileIngestTasks(IngestJobPipeline ingestPipeline, Collection<AbstractFile> files) {
if (!ingestJobPipeline.isCancelled()) { if (!ingestPipeline.isCancelled()) {
Collection<AbstractFile> candidateFiles; Collection<AbstractFile> candidateFiles;
if (files.isEmpty()) { if (files.isEmpty()) {
candidateFiles = getTopLevelFiles(ingestJobPipeline.getDataSource()); candidateFiles = getTopLevelFiles(ingestPipeline.getDataSource());
} else { } else {
candidateFiles = files; candidateFiles = files;
} }
for (AbstractFile file : candidateFiles) { for (AbstractFile file : candidateFiles) {
FileIngestTask task = new FileIngestTask(ingestJobPipeline, file); FileIngestTask task = new FileIngestTask(ingestPipeline, file);
if (IngestTasksScheduler.shouldEnqueueFileTask(task)) { if (IngestTasksScheduler.shouldEnqueueFileTask(task)) {
this.rootFileTaskQueue.add(task); topLevelFileIngestTasksQueue.add(task);
} }
} }
refillIngestThreadQueue(); refillFileIngestTasksQueue();
} }
} }
/** /**
* Schedules file tasks for the given list of file IDs. * Schedules file tasks for a collection of "streamed" files for a streaming
* ingest job.
* *
* @param ingestJobPipeline The ingest job pipeline. * @param ingestPipeline The ingest pipeline for the job. A reference to the
* @param files A subset of the files for the data source; if empty, then * pipeline is added to each task so that when the
* file tasks for all files in the data source are scheduled. * task is dequeued by an ingest thread and the task's
* execute() method is called, execute() can pass the
* target Content of the task to the pipeline for
* processing by the pipeline's ingest modules.
* @param files A list of file object IDs for the streamed files.
*/ */
synchronized void scheduleStreamedFileIngestTasks(IngestJobPipeline ingestJobPipeline, List<Long> fileIds) { synchronized void scheduleStreamedFileIngestTasks(IngestJobPipeline ingestPipeline, List<Long> fileIds) {
if (!ingestJobPipeline.isCancelled()) { if (!ingestPipeline.isCancelled()) {
for (long id : fileIds) { for (long id : fileIds) {
// Create the file ingest task. Note that we do not do the shouldEnqueueFileTask() /*
// check here in order to delay loading the AbstractFile object. * Create the file ingest task. Note that we do not do the
FileIngestTask task = new FileIngestTask(ingestJobPipeline, id); * shouldEnqueueFileTask() check here in order to delay querying
this.streamedTasksQueue.add(task); * the case database to construct the AbstractFile object. The
* file filter will be applied before the file task makes it to
* the task queue consumed by the file ingest threads.
*/
FileIngestTask task = new FileIngestTask(ingestPipeline, id);
streamedFileIngestTasksQueue.add(task);
} }
refillIngestThreadQueue(); refillFileIngestTasksQueue();
} }
} }
/** /**
* Schedules file level ingest tasks for a given set of files for an ingest * Schedules file level ingest tasks for a given set of files for an ingest
* job pipeline by adding them directly to the front of the file tasks * job by adding them directly to the front of the file tasks queue consumed
* queue for the ingest manager's file ingest threads. * by the ingest manager's file ingest threads. This method is intended to
* be used to schedule files that are products of ingest module processing,
* e.g., extracted files and carved files.
* *
* @param ingestJobPipeline The ingestJobPipeline. * @param ingestPipeline The ingest pipeline for the job. A reference to the
* @param files A set of files for the data source. * pipeline is added to each task so that when the
* task is dequeued by an ingest thread and the task's
* execute() method is called, execute() can pass the
* target Content of the task to the pipeline for
* processing by the pipeline's ingest modules.
* @param files The files.
*/ */
synchronized void fastTrackFileIngestTasks(IngestJobPipeline ingestJobPipeline, Collection<AbstractFile> files) { synchronized void fastTrackFileIngestTasks(IngestJobPipeline ingestPipeline, Collection<AbstractFile> files) {
if (!ingestJobPipeline.isCancelled()) { if (!ingestPipeline.isCancelled()) {
/* /*
* Put the files directly into the queue for the file ingest * Put the files directly into the queue for the file ingest
* threads, if they pass the file filter for the job. The files are * threads, if they pass the file filter for the job. The files are
* added to the queue for the ingest threads BEFORE the other queued * added to the queue for the ingest threads BEFORE the other queued
* tasks because the use case for this method is scheduling new * tasks because the use case for this method is scheduling new
* carved or derived files from a higher priority task that is * carved or derived files from a high priority task that is already
* already in progress. * in progress.
*/ */
for (AbstractFile file : files) { for (AbstractFile file : files) {
FileIngestTask fileTask = new FileIngestTask(ingestJobPipeline, file); FileIngestTask fileTask = new FileIngestTask(ingestPipeline, file);
if (shouldEnqueueFileTask(fileTask)) { if (shouldEnqueueFileTask(fileTask)) {
try { try {
this.fileIngestThreadsQueue.putFirst(fileTask); fileIngestTasksQueue.putFirst(fileTask);
} catch (InterruptedException ex) { } catch (InterruptedException ex) {
IngestTasksScheduler.logger.log(Level.INFO, String.format("Ingest tasks scheduler interrupted while scheduling file level ingest tasks (jobId={%d)", ingestJobPipeline.getId()), ex); DataSource dataSource = ingestPipeline.getDataSource();
logger.log(Level.WARNING, String.format("Interrupted while enqueuing file tasks for %s (data source object ID = %d)", dataSource.getName(), dataSource.getId()), ex); //NON-NLS
Thread.currentThread().interrupt(); Thread.currentThread().interrupt();
return; return;
} }
@ -224,6 +278,62 @@ final class IngestTasksScheduler {
} }
} }
/**
* Schedules data artifact ingest tasks for any data artifacts that have
* already been added to the case database for a data source. The data
* source is obtained from the ingest pipeline passed in.
*
* @param ingestPipeline The ingest pipeline for the job. A reference to the
* pipeline is added to each task so that when the
* task is dequeued by an ingest thread and the task's
* execute() method is called, execute() can pass the
* target Content of the task to the pipeline for
* processing by the pipeline's ingest modules.
*/
synchronized void scheduleDataArtifactIngestTasks(IngestJobPipeline ingestPipeline) {
if (!ingestPipeline.isCancelled()) {
Blackboard blackboard = Case.getCurrentCase().getSleuthkitCase().getBlackboard();
try {
List<DataArtifact> artifacts = blackboard.getDataArtifacts(ingestPipeline.getDataSource().getId(), null);
scheduleDataArtifactIngestTasks(ingestPipeline, artifacts);
} catch (TskCoreException ex) {
DataSource dataSource = ingestPipeline.getDataSource();
logger.log(Level.SEVERE, String.format("Failed to retrieve data artifacts for %s (data source object ID = %d)", dataSource.getName(), dataSource.getId()), ex); //NON-NLS
}
}
}
/**
* Schedules data artifact ingest tasks for an ingest job. This method is
* intended to be used to schedule artifacts that are products of ingest
* module processing.
*
* @param ingestPipeline The ingest pipeline for the job. A reference to the
* pipeline is added to each task so that when the
* task is dequeued by an ingest thread and the task's
* execute() method is called, execute() can pass the
* target Content of the task to the pipeline for
* processing by the pipeline's ingest modules.
* @param artifacts A subset of the data artifacts from the data
* source; if empty, then all of the data artifacts
* from the data source will be scheduled.
*/
synchronized void scheduleDataArtifactIngestTasks(IngestJobPipeline ingestPipeline, List<DataArtifact> artifacts) {
if (!ingestPipeline.isCancelled()) {
for (DataArtifact artifact : artifacts) {
DataArtifactIngestTask task = new DataArtifactIngestTask(ingestPipeline, artifact);
try {
this.artifactIngestTasksQueue.putLast(task);
} catch (InterruptedException ex) {
DataSource dataSource = ingestPipeline.getDataSource();
logger.log(Level.WARNING, String.format("Interrupted while enqueuing data artifact tasks for %s (data source object ID = %d)", dataSource.getName(), dataSource.getId()), ex); //NON-NLS
Thread.currentThread().interrupt();
break;
}
}
}
}
/** /**
* Allows an ingest thread to notify this ingest task scheduler that a data * Allows an ingest thread to notify this ingest task scheduler that a data
* source level task has been completed. * source level task has been completed.
@ -231,7 +341,7 @@ final class IngestTasksScheduler {
* @param task The completed task. * @param task The completed task.
*/ */
synchronized void notifyTaskCompleted(DataSourceIngestTask task) { synchronized void notifyTaskCompleted(DataSourceIngestTask task) {
this.dataSourceIngestThreadQueue.taskCompleted(task); dataSourceIngestTasksQueue.taskCompleted(task);
} }
/** /**
@ -241,46 +351,67 @@ final class IngestTasksScheduler {
* @param task The completed task. * @param task The completed task.
*/ */
synchronized void notifyTaskCompleted(FileIngestTask task) { synchronized void notifyTaskCompleted(FileIngestTask task) {
this.fileIngestThreadsQueue.taskCompleted(task); fileIngestTasksQueue.taskCompleted(task);
refillIngestThreadQueue(); refillFileIngestTasksQueue();
}
/**
* Allows an ingest thread to notify this ingest task scheduler that a data
* artifact ingest task has been completed.
*
* @param task The completed task.
*/
synchronized void notifyTaskCompleted(DataArtifactIngestTask task) {
artifactIngestTasksQueue.taskCompleted(task);
} }
/** /**
* Queries the task scheduler to determine whether or not all of the ingest * Queries the task scheduler to determine whether or not all of the ingest
* tasks for an ingest job pipeline have been completed. * tasks for an ingest job have been completed.
* *
* @param ingestJobPipeline The ingestJobPipeline. * @param ingestPipeline The ingest pipeline for the job.
* *
* @return True or false. * @return True or false.
*/ */
synchronized boolean currentTasksAreCompleted(IngestJobPipeline ingestJobPipeline) { synchronized boolean currentTasksAreCompleted(IngestJobPipeline ingestPipeline) {
long jobId = ingestJobPipeline.getId(); long pipelineId = ingestPipeline.getId();
return !(dataSourceIngestTasksQueue.hasTasksForJob(pipelineId)
return !(this.dataSourceIngestThreadQueue.hasTasksForJob(jobId) || hasTasksForJob(topLevelFileIngestTasksQueue, pipelineId)
|| hasTasksForJob(this.rootFileTaskQueue, jobId) || hasTasksForJob(batchedFileIngestTasksQueue, pipelineId)
|| hasTasksForJob(this.pendingFileTaskQueue, jobId) || hasTasksForJob(streamedFileIngestTasksQueue, pipelineId)
|| hasTasksForJob(this.streamedTasksQueue, jobId) || fileIngestTasksQueue.hasTasksForJob(pipelineId)
|| this.fileIngestThreadsQueue.hasTasksForJob(jobId)); || artifactIngestTasksQueue.hasTasksForJob(pipelineId));
} }
/** /**
* Clears the "upstream" task scheduling queues for an ingest pipeline, * Cancels the pending file ingest tasks for an ingest job, where the
* but does nothing about tasks that have already been moved into the * pending tasks are the file ingest tasks that are in the upstream
* queue that is consumed by the file ingest threads. * scheduling queues (batch and streaming) that feed into the queue consumed
* by the ingest manager's file ingest threads.
* *
* @param ingestJobPipeline The ingestJobPipeline. * Note that the "normal" way to cancel an ingest job is to mark the job as
* cancelled, which causes the execute() methods of the ingest tasks for the
* job to return immediately when called, leading to flushing all of the
* tasks for the job out of the ingest task queues by the ingest threads and
* an orderly progression through IngestTaskTrackingQueue bookkeeping and
* the ingest job stages to early job completion. However, this method is a
* cancellation speed booster. For example, it eliminates the creation of
* what could be a large number of child tasks for both the top level files
* in the batch root file tasks queue and any directories in the batch root
* children file tasks queue.
*
* @param ingestJobPipeline The ingest pipeline for the job.
*/ */
synchronized void cancelPendingTasksForIngestJob(IngestJobPipeline ingestJobPipeline) { synchronized void cancelPendingFileTasksForIngestJob(IngestJobPipeline ingestJobPipeline) {
long jobId = ingestJobPipeline.getId(); long jobId = ingestJobPipeline.getId();
IngestTasksScheduler.removeTasksForJob(rootFileTaskQueue, jobId); removeTasksForJob(topLevelFileIngestTasksQueue, jobId);
IngestTasksScheduler.removeTasksForJob(pendingFileTaskQueue, jobId); removeTasksForJob(batchedFileIngestTasksQueue, jobId);
IngestTasksScheduler.removeTasksForJob(streamedTasksQueue, jobId); removeTasksForJob(streamedFileIngestTasksQueue, jobId);
} }
/** /**
* Gets the top level files such as file system root directories, layout * Gets the top level files for a data source, such as file system root
* files and virtual directories for a data source. Used to create file * directories, layout files, and virtual directories.
* tasks to put into the root directories queue.
* *
* @param dataSource The data source. * @param dataSource The data source.
* *
@ -311,7 +442,7 @@ final class IngestTasksScheduler {
} }
} }
} catch (TskCoreException ex) { } catch (TskCoreException ex) {
logger.log(Level.WARNING, "Could not get children of root to enqueue: " + root.getId() + ": " + root.getName(), ex); //NON-NLS logger.log(Level.SEVERE, "Could not get children of root to enqueue: " + root.getId() + ": " + root.getName(), ex); //NON-NLS
} }
} }
} }
@ -319,12 +450,15 @@ final class IngestTasksScheduler {
} }
/** /**
* Schedules file ingest tasks for the ingest manager's file ingest threads. * Refills the file ingest tasks queue consumed by the ingest manager's file
* Files from streaming ingest will be prioritized. * ingest threads with tasks from the upstream file task scheduling queues
* (streamed and batch). Files from the streamed file ingest tasks queue are
* prioritized. Applies the file filter for the ingest job and attempts to
* move as many tasks as there are ingest threads.
*/ */
synchronized private void refillIngestThreadQueue() { synchronized private void refillFileIngestTasksQueue() {
try { try {
takeFromStreamingTaskQueue(); takeFromStreamingFileTasksQueue();
takeFromBatchTasksQueues(); takeFromBatchTasksQueues();
} catch (InterruptedException ex) { } catch (InterruptedException ex) {
IngestTasksScheduler.logger.log(Level.INFO, "Ingest tasks scheduler interrupted while blocked adding a task to the file level ingest task queue", ex); IngestTasksScheduler.logger.log(Level.INFO, "Ingest tasks scheduler interrupted while blocked adding a task to the file level ingest task queue", ex);
@ -333,27 +467,21 @@ final class IngestTasksScheduler {
} }
/** /**
* Move tasks from the streamedTasksQueue into the fileIngestThreadsQueue. * Moves tasks from the upstream streamed file ingest tasks queue into the
* Will attempt to move as many tasks as there are ingest threads. * file ingest tasks queue consumed by the ingest manager's file ingest
*/ * threads. Applies the file filter for the ingest job and attempts to move
synchronized private void takeFromStreamingTaskQueue() throws InterruptedException { * as many tasks as there are ingest threads.
/*
* Schedule files from the streamedTasksQueue
*/
while (fileIngestThreadsQueue.isEmpty()) {
/*
* We will attempt to schedule as many tasks as there are ingest
* queues.
*/ */
synchronized private void takeFromStreamingFileTasksQueue() throws InterruptedException {
while (fileIngestTasksQueue.isEmpty()) {
int taskCount = 0; int taskCount = 0;
while (taskCount < IngestManager.getInstance().getNumberOfFileIngestThreads()) { while (taskCount < IngestManager.getInstance().getNumberOfFileIngestThreads()) {
final FileIngestTask streamingTask = streamedTasksQueue.poll(); final FileIngestTask streamingTask = streamedFileIngestTasksQueue.poll();
if (streamingTask == null) { if (streamingTask == null) {
return; // No streaming tasks are queued right now return; // No streaming tasks are queued right now
} }
if (shouldEnqueueFileTask(streamingTask)) { if (shouldEnqueueFileTask(streamingTask)) {
fileIngestThreadsQueue.putLast(streamingTask); fileIngestTasksQueue.putLast(streamingTask);
taskCount++; taskCount++;
} }
} }
@ -361,104 +489,91 @@ final class IngestTasksScheduler {
} }
/** /**
* Schedules file ingest tasks for the ingest manager's file ingest threads * Moves tasks from the upstream batched file ingest task queues into the
* by "shuffling" them through a sequence of three queues that allows for * file ingest tasks queue consumed by the ingest manager's file ingest
* the interleaving of tasks from different data source ingest jobs based on * threads. A sequence of two upstream queues is used to interleave tasks
* priority, while limiting the number of queued tasks by only expanding * from different ingest jobs based on priority. Applies the file filter for
* directories one at a time. The sequence of queues is: * the ingest job and attempts to move as many tasks as there are ingest
* threads.
* *
* 1. The root file tasks priority queue, which contains file tasks for the * The upstream batched file task queues are:
* root objects of the data sources that are being analyzed. For example,
* the root tasks for a disk image data source are typically the tasks for
* the contents of the root directories of the file systems. This queue is a
* priority queue that attempts to ensure that user content is analyzed
* before general file system content. It feeds into the pending tasks
* queue.
* *
* 2. The pending file tasks queue, which contains root file tasks shuffled * 1. The top level file tasks queue, which contains file tasks for the root
* out of the root tasks queue, plus tasks for files with children * objects of data sources. For example, the top level file tasks for a disk
* discovered in the descent from the root tasks to the final leaf tasks in * image data source are typically the tasks for the contents of the root
* the content trees that are being analyzed for the data source ingest * directories of the file systems. This queue is a priority queue that
* jobs. This queue is a FIFO queue that attempts to throttle the total * attempts to ensure that user content is analyzed before general file
* number of file tasks by deferring queueing tasks for the children of * system content. It feeds into the batched file ingest tasks queue.
* files until the queue for the file ingest threads is emptied. It feeds
* into the file tasks queue for the ingest manager's file ingest threads.
* *
* 3. The file tasks queue for the ingest manager's file ingest threads. * 2. The batch file tasks queue, which contains top level file tasks moved
* This queue is a blocking deque that is FIFO during a shuffle to maintain * in from the top level file tasks queue, plus tasks for child files in the
* task prioritization, but LIFO when adding derived files to it directly * descent from the root tasks to the final leaf tasks in the content trees
* during ingest. The reason for the LIFO additions is to give priority to * that are being analyzed for any given data source. This queue is a FIFO
* files derived from prioritized files. * queue that attempts to throttle the total number of file tasks by
* deferring queueing of tasks for the children of files until the queue for
* the file ingest threads is emptied.
*/ */
synchronized private void takeFromBatchTasksQueues() throws InterruptedException { synchronized private void takeFromBatchTasksQueues() throws InterruptedException {
while (this.fileIngestThreadsQueue.isEmpty()) { while (fileIngestTasksQueue.isEmpty()) {
/* /*
* If the pending file task queue is empty, move the highest * If the batched file task queue is empty, move the highest
* priority root file task, if there is one, into it. * priority top level file task into it.
*/ */
if (this.pendingFileTaskQueue.isEmpty()) { if (batchedFileIngestTasksQueue.isEmpty()) {
final FileIngestTask rootTask = this.rootFileTaskQueue.pollFirst(); final FileIngestTask topLevelTask = topLevelFileIngestTasksQueue.pollFirst();
if (rootTask != null) { if (topLevelTask != null) {
this.pendingFileTaskQueue.addLast(rootTask); batchedFileIngestTasksQueue.addLast(topLevelTask);
} }
} }
/* /*
* Try to move the next task from the pending task queue into the * Try to move the next task from the batched file tasks queue into
* queue for the file ingest threads, if it passes the filter for * the queue for the file ingest threads.
* the job.
*/ */
final FileIngestTask pendingTask = this.pendingFileTaskQueue.pollFirst(); final FileIngestTask nextTask = batchedFileIngestTasksQueue.pollFirst();
if (pendingTask == null) { if (nextTask == null) {
return; return;
} }
if (shouldEnqueueFileTask(pendingTask)) { if (shouldEnqueueFileTask(nextTask)) {
/* fileIngestTasksQueue.putLast(nextTask);
* The task is added to the queue for the ingest threads
* AFTER the higher priority tasks that preceded it.
*/
this.fileIngestThreadsQueue.putLast(pendingTask);
} }
/* /*
* If the task that was just queued for the file ingest threads has * If the task that was just queued for the file ingest threads has
* children, try to queue tasks for the children. Each child task * children, queue tasks for the children as well.
* will go into either the directory queue if it has children of its
* own, or into the queue for the file ingest threads, if it passes
* the filter for the job.
*/ */
AbstractFile file = null; AbstractFile file = null;
try { try {
file = pendingTask.getFile(); file = nextTask.getFile();
for (Content child : file.getChildren()) { for (Content child : file.getChildren()) {
if (child instanceof AbstractFile) { if (child instanceof AbstractFile) {
AbstractFile childFile = (AbstractFile) child; AbstractFile childFile = (AbstractFile) child;
FileIngestTask childTask = new FileIngestTask(pendingTask.getIngestJobPipeline(), childFile); FileIngestTask childTask = new FileIngestTask(nextTask.getIngestJobPipeline(), childFile);
if (childFile.hasChildren()) { if (childFile.hasChildren()) {
this.pendingFileTaskQueue.add(childTask); batchedFileIngestTasksQueue.add(childTask);
} else if (shouldEnqueueFileTask(childTask)) { } else if (shouldEnqueueFileTask(childTask)) {
this.fileIngestThreadsQueue.putLast(childTask); fileIngestTasksQueue.putLast(childTask);
} }
} }
} }
} catch (TskCoreException ex) { } catch (TskCoreException ex) {
if (file != null) { if (file != null) {
logger.log(Level.SEVERE, String.format("Error getting the children of %s (objId=%d)", file.getName(), file.getId()), ex); //NON-NLS logger.log(Level.SEVERE, String.format("Error getting the children of %s (object ID = %d)", file.getName(), file.getId()), ex); //NON-NLS
} else { } else {
// In practice, the task would have already returned false from the call to shouldEnqueueFileTask() logger.log(Level.SEVERE, "Error loading file with object ID = {0}", nextTask.getFileId()); //NON-NLS
logger.log(Level.SEVERE, "Error loading file with object ID {0}", pendingTask.getFileId());
} }
} }
} }
} }
/** /**
* Examines the file associated with a file ingest task to determine whether * Evaluates the file for a file ingest task to determine whether or not the
* or not the file should be processed and therefore whether or not the task * file should be processed and therefore whether or not the task should be
* should be enqueued. * enqueued. The evaluation includes applying the file filter for the task's
* parent ingest job.
* *
* @param task The task to be scrutinized. * @param task The task.
* *
* @return True or false. * @return True or false.
*/ */
@ -544,8 +659,7 @@ final class IngestTasksScheduler {
} }
/** /**
* Check whether or not a file should be carved for a data source ingest * Checks whether or not a file should be carved for an ingest job.
* ingest job.
* *
* @param task The file ingest task for the file. * @param task The file ingest task for the file.
* *
@ -561,8 +675,8 @@ final class IngestTasksScheduler {
} }
/** /**
* Checks whether or not a file is accepted (passes) the file filter for a data * Checks whether or not a file is accepted (passes) the file filter for an
* source ingest job. * ingest job.
* *
* @param task The file ingest task for the file. * @param task The file ingest task for the file.
* *
@ -579,16 +693,16 @@ final class IngestTasksScheduler {
/** /**
* Checks whether or not a collection of ingest tasks includes a task for a * Checks whether or not a collection of ingest tasks includes a task for a
* given data source ingest job. * given ingest job.
* *
* @param tasks The tasks. * @param tasks The tasks.
* @param jobId The data source ingest job id. * @param pipelineId The ID of the ingest pipeline for the job.
* *
* @return True if there are no tasks for the job, false otherwise. * @return True if there are no tasks for the job, false otherwise.
*/ */
synchronized private static boolean hasTasksForJob(Collection<? extends IngestTask> tasks, long jobId) { synchronized private static boolean hasTasksForJob(Collection<? extends IngestTask> tasks, long pipelineId) {
for (IngestTask task : tasks) { for (IngestTask task : tasks) {
if (task.getIngestJobPipeline().getId() == jobId) { if (task.getIngestJobPipeline().getId() == pipelineId) {
return true; return true;
} }
} }
@ -596,34 +710,35 @@ final class IngestTasksScheduler {
} }
/** /**
* Removes all of the ingest tasks associated with a data source ingest job * Removes all of the ingest tasks associated with an ingest job from a
* from a tasks collection. * collection of tasks.
* *
* @param tasks The collection from which to remove the tasks. * @param tasks The tasks.
* @param jobId The data source ingest job id. * @param pipelineId The ID of the ingest pipeline for the job.
*/ */
private static void removeTasksForJob(Collection<? extends IngestTask> tasks, long jobId) { private static void removeTasksForJob(Collection<? extends IngestTask> tasks, long pipelineId) {
Iterator<? extends IngestTask> iterator = tasks.iterator(); Iterator<? extends IngestTask> iterator = tasks.iterator();
while (iterator.hasNext()) { while (iterator.hasNext()) {
IngestTask task = iterator.next(); IngestTask task = iterator.next();
if (task.getIngestJobPipeline().getId() == jobId) { if (task.getIngestJobPipeline().getId() == pipelineId) {
iterator.remove(); iterator.remove();
} }
} }
} }
/** /**
* Counts the number of ingest tasks in a tasks collection for a given job. * Counts the number of ingest tasks in a collection of tasks for a given
* ingest job.
* *
* @param queue The queue for which to count tasks. * @param tasks The tasks.
* @param jobId The id of the job for which the tasks are to be counted. * @param pipelineId The ID of the ingest pipeline for the job.
* *
* @return The count. * @return The count.
*/ */
private static int countTasksForJob(Collection<? extends IngestTask> queue, long jobId) { private static int countTasksForJob(Collection<? extends IngestTask> tasks, long pipelineId) {
int count = 0; int count = 0;
for (IngestTask task : queue) { for (IngestTask task : tasks) {
if (task.getIngestJobPipeline().getId() == jobId) { if (task.getIngestJobPipeline().getId() == pipelineId) {
count++; count++;
} }
} }
@ -639,12 +754,13 @@ final class IngestTasksScheduler {
* @return * @return
*/ */
synchronized IngestJobTasksSnapshot getTasksSnapshotForJob(long jobId) { synchronized IngestJobTasksSnapshot getTasksSnapshotForJob(long jobId) {
return new IngestJobTasksSnapshot(jobId, this.dataSourceIngestThreadQueue.countQueuedTasksForJob(jobId), return new IngestJobTasksSnapshot(jobId, dataSourceIngestTasksQueue.countQueuedTasksForJob(jobId),
countTasksForJob(this.rootFileTaskQueue, jobId), countTasksForJob(topLevelFileIngestTasksQueue, jobId),
countTasksForJob(this.pendingFileTaskQueue, jobId), countTasksForJob(batchedFileIngestTasksQueue, jobId),
this.fileIngestThreadsQueue.countQueuedTasksForJob(jobId), fileIngestTasksQueue.countQueuedTasksForJob(jobId),
this.dataSourceIngestThreadQueue.countRunningTasksForJob(jobId) + this.fileIngestThreadsQueue.countRunningTasksForJob(jobId), dataSourceIngestTasksQueue.countRunningTasksForJob(jobId) + fileIngestTasksQueue.countRunningTasksForJob(jobId) + artifactIngestTasksQueue.countRunningTasksForJob(jobId),
countTasksForJob(this.streamedTasksQueue, jobId)); countTasksForJob(streamedFileIngestTasksQueue, jobId),
artifactIngestTasksQueue.countQueuedTasksForJob(jobId));
} }
/** /**
@ -883,7 +999,7 @@ final class IngestTasksScheduler {
/** /**
* Handles the completion of an ingest task by removing it from the * Handles the completion of an ingest task by removing it from the
* running tasks list. * tasks in progress list.
* *
* @param task The completed task. * @param task The completed task.
*/ */
@ -895,43 +1011,41 @@ final class IngestTasksScheduler {
/** /**
* Checks whether there are any ingest tasks are queued and/or running * Checks whether there are any ingest tasks are queued and/or running
* for a given data source ingest job. * for a given ingest job.
* *
* @param jobId The id of the data source ingest job. * @param pipelineId The ID of the ingest pipeline for the job.
* *
* @return * @return
*/ */
boolean hasTasksForJob(long jobId) { boolean hasTasksForJob(long pipelineId) {
synchronized (this) { synchronized (this) {
return IngestTasksScheduler.hasTasksForJob(this.queuedTasks, jobId) || IngestTasksScheduler.hasTasksForJob(this.tasksInProgress, jobId); return IngestTasksScheduler.hasTasksForJob(queuedTasks, pipelineId) || IngestTasksScheduler.hasTasksForJob(tasksInProgress, pipelineId);
} }
} }
/** /**
* Gets a count of the queued ingest tasks for a given data source * Gets a count of the queued ingest tasks for a given ingest job.
* ingest job.
* *
* @param jobId * @param pipelineId The ID of the ingest pipeline for the job.
* *
* @return * @return
*/ */
int countQueuedTasksForJob(long jobId) { int countQueuedTasksForJob(long pipelineId) {
synchronized (this) { synchronized (this) {
return IngestTasksScheduler.countTasksForJob(this.queuedTasks, jobId); return IngestTasksScheduler.countTasksForJob(queuedTasks, pipelineId);
} }
} }
/** /**
* Gets a count of the running ingest tasks for a given data source * Gets a count of the running ingest tasks for a given ingest job.
* ingest job.
* *
* @param jobId * @param pipelineId The ID of the ingest pipeline for the job.
* *
* @return * @return
*/ */
int countRunningTasksForJob(long jobId) { int countRunningTasksForJob(long pipelineId) {
synchronized (this) { synchronized (this) {
return IngestTasksScheduler.countTasksForJob(this.tasksInProgress, jobId); return IngestTasksScheduler.countTasksForJob(tasksInProgress, pipelineId);
} }
} }
@ -950,14 +1064,24 @@ final class IngestTasksScheduler {
private final long fileQueueSize; private final long fileQueueSize;
private final long runningListSize; private final long runningListSize;
private final long streamingQueueSize; private final long streamingQueueSize;
private final long artifactsQueueSize;
/** /**
* RJCTODO
*
* Constructs a snapshot of ingest tasks data for an ingest job. * Constructs a snapshot of ingest tasks data for an ingest job.
* *
* @param jobId The identifier associated with the job. * @param jobId The identifier associated with the job.
* @param dsQueueSize
* @param rootQueueSize
* @param dirQueueSize
* @param fileQueueSize
* @param runningListSize
* @param streamingQueueSize
* @param artifactsQueueSize
*/ */
IngestJobTasksSnapshot(long jobId, long dsQueueSize, long rootQueueSize, long dirQueueSize, long fileQueueSize, IngestJobTasksSnapshot(long jobId, long dsQueueSize, long rootQueueSize, long dirQueueSize, long fileQueueSize,
long runningListSize, long streamingQueueSize) { long runningListSize, long streamingQueueSize, long artifactsQueueSize) {
this.jobId = jobId; this.jobId = jobId;
this.dsQueueSize = dsQueueSize; this.dsQueueSize = dsQueueSize;
this.rootQueueSize = rootQueueSize; this.rootQueueSize = rootQueueSize;
@ -965,6 +1089,7 @@ final class IngestTasksScheduler {
this.fileQueueSize = fileQueueSize; this.fileQueueSize = fileQueueSize;
this.runningListSize = runningListSize; this.runningListSize = runningListSize;
this.streamingQueueSize = streamingQueueSize; this.streamingQueueSize = streamingQueueSize;
this.artifactsQueueSize = artifactsQueueSize;
} }
/** /**
@ -1013,6 +1138,10 @@ final class IngestTasksScheduler {
return runningListSize; return runningListSize;
} }
long getArtifactsQueueSize() {
return artifactsQueueSize;
}
} }
} }

View File

@ -193,6 +193,13 @@ public final class Snapshot implements Serializable {
return this.tasksSnapshot.getRunningListSize(); return this.tasksSnapshot.getRunningListSize();
} }
long getArtifactTasksQueueSize() {
if (tasksSnapshot == null) {
return 0;
}
return tasksSnapshot.getArtifactsQueueSize();
}
boolean isCancelled() { boolean isCancelled() {
return this.jobCancelled; return this.jobCancelled;
} }

View File

@ -12,7 +12,12 @@ ExtractArchiveWithPasswordAction.progress.text=Unpacking contents of archive: {0
ExtractArchiveWithPasswordAction.prompt.text=Enter Password ExtractArchiveWithPasswordAction.prompt.text=Enter Password
ExtractArchiveWithPasswordAction.prompt.title=Enter Password ExtractArchiveWithPasswordAction.prompt.title=Enter Password
OpenIDE-Module-Display-Category=Ingest Module OpenIDE-Module-Display-Category=Ingest Module
OpenIDE-Module-Long-Description=Embedded File Extraction Ingest Module\n\nThe Embedded File Extraction Ingest Module processes document files (such as doc, docx, ppt, pptx, xls, xlsx) and archive files (such as zip and others archive types supported by the 7zip extractor).\nContents of these files are extracted and the derived files are added back to the current ingest to be processed by the configured ingest modules.\nIf the derived file happens to be an archive file, it will be re-processed by the 7zip extractor - the extractor will process archive files N-levels deep.\n\nThe extracted files are navigable in the directory tree.\n\nThe module is supported on Windows, Linux and Mac operating systems. OpenIDE-Module-Long-Description=\
Embedded File Extraction Ingest Module\n\nThe Embedded File Extraction Ingest Module processes document files (such as doc, docx, ppt, pptx, xls, xlsx) and archive files (such as zip and others archive types supported by the 7zip extractor).\n\
Contents of these files are extracted and the derived files are added back to the current ingest to be processed by the configured ingest modules.\n\
If the derived file happens to be an archive file, it will be re-processed by the 7zip extractor - the extractor will process archive files N-levels deep.\n\n\
The extracted files are navigable in the directory tree.\n\n\
The module is supported on Windows, Linux and Mac operating systems.
OpenIDE-Module-Name=Embedded File Extraction OpenIDE-Module-Name=Embedded File Extraction
OpenIDE-Module-Short-Description=Embedded File Extraction Ingest Module OpenIDE-Module-Short-Description=Embedded File Extraction Ingest Module
EmbeddedFileExtractorIngestModule.SevenZipContentReadStream.seek.exception.invalidOrigin=Invalid seek origin: {0} EmbeddedFileExtractorIngestModule.SevenZipContentReadStream.seek.exception.invalidOrigin=Invalid seek origin: {0}

View File

@ -36,27 +36,27 @@ FileExtMismatchSettingsPanel.jLabel1.text=File Types:
FileExtMismatchSettingsPanel.newExtButton.text=New Extension FileExtMismatchSettingsPanel.newExtButton.text=New Extension
FileExtMismatchSettingsPanel.newMimePrompt.message=Add a new MIME file type: FileExtMismatchSettingsPanel.newMimePrompt.message=Add a new MIME file type:
FileExtMismatchSettingsPanel.newMimePrompt.title=New MIME FileExtMismatchSettingsPanel.newMimePrompt.title=New MIME
FileExtMismatchSettingsPanel.newMimePrompt.emptyMime.message=MIME type text is empty! FileExtMismatchSettingsPanel.newMimePrompt.emptyMime.message=MIME type text is empty\!
FileExtMismatchSettingsPanel.newMimePrompt.emptyMime.title=Empty type FileExtMismatchSettingsPanel.newMimePrompt.emptyMime.title=Empty type
FileExtMismatchSettingsPanel.newMimePrompt.mimeTypeNotSupported.message=MIME type not supported! FileExtMismatchSettingsPanel.newMimePrompt.mimeTypeNotSupported.message=MIME type not supported\!
FileExtMismatchSettingsPanel.newMimePrompt.mimeTypeNotSupported.title=Type not supported FileExtMismatchSettingsPanel.newMimePrompt.mimeTypeNotSupported.title=Type not supported
FileExtMismatchSettingsPanel.newMimePrompt.mimeTypeExists.message=MIME type already exists! FileExtMismatchSettingsPanel.newMimePrompt.mimeTypeExists.message=MIME type already exists\!
FileExtMismatchSettingsPanel.newMimePrompt.mimeTypeExists.title=Type already exists FileExtMismatchSettingsPanel.newMimePrompt.mimeTypeExists.title=Type already exists
FileExtMismatchSettingsPanel.newMimePrompt.mimeTypeNotDetectable.message=MIME type is not detectable by this module. FileExtMismatchSettingsPanel.newMimePrompt.mimeTypeNotDetectable.message=MIME type is not detectable by this module.
FileExtMismatchSettingsPanel.newMimePrompt.mimeTypeNotDetectable.title=Type not detectable FileExtMismatchSettingsPanel.newMimePrompt.mimeTypeNotDetectable.title=Type not detectable
FileExtMismatchSettingsPanel.removeTypeButton.noneSelected.message=No MIME type selected! FileExtMismatchSettingsPanel.removeTypeButton.noneSelected.message=No MIME type selected\!
FileExtMismatchSettingsPanel.removeTypeButton.noneSelected.title=No type selected FileExtMismatchSettingsPanel.removeTypeButton.noneSelected.title=No type selected
FileExtMismatchSettingsPanel.newExtPrompt.message=Add an allowed extension: FileExtMismatchSettingsPanel.newExtPrompt.message=Add an allowed extension:
FileExtMismatchSettingsPanel.newExtPrompt.title=New allowed extension FileExtMismatchSettingsPanel.newExtPrompt.title=New allowed extension
FileExtMismatchSettingsPanel.newExtPrompt.empty.message=Extension text is empty! FileExtMismatchSettingsPanel.newExtPrompt.empty.message=Extension text is empty\!
FileExtMismatchSettingsPanel.newExtPrompt.empty.title=Extension text empty FileExtMismatchSettingsPanel.newExtPrompt.empty.title=Extension text empty
FileExtMismatchSettingsPanel.newExtPrompt.noMimeType.message=No MIME type selected! FileExtMismatchSettingsPanel.newExtPrompt.noMimeType.message=No MIME type selected\!
FileExtMismatchSettingsPanel.newExtPrompt.noMimeType.title=No MIME type selected FileExtMismatchSettingsPanel.newExtPrompt.noMimeType.title=No MIME type selected
FileExtMismatchSettingsPanel.newExtPrompt.extExists.message=Extension already exists! FileExtMismatchSettingsPanel.newExtPrompt.extExists.message=Extension already exists\!
FileExtMismatchSettingsPanel.newExtPrompt.extExists.title=Extension already exists FileExtMismatchSettingsPanel.newExtPrompt.extExists.title=Extension already exists
FileExtMismatchSettingsPanel.removeExtButton.noneSelected.message=No extension selected! FileExtMismatchSettingsPanel.removeExtButton.noneSelected.message=No extension selected\!
FileExtMismatchSettingsPanel.removeExtButton.noneSelected.title=No extension selected FileExtMismatchSettingsPanel.removeExtButton.noneSelected.title=No extension selected
FileExtMismatchSettingsPanel.removeExtButton.noMimeTypeSelected.message=No MIME type selected! FileExtMismatchSettingsPanel.removeExtButton.noMimeTypeSelected.message=No MIME type selected\!
FileExtMismatchSettingsPanel.removeExtButton.noMimeTypeSelected.title=No MIME type selected FileExtMismatchSettingsPanel.removeExtButton.noMimeTypeSelected.title=No MIME type selected
FileExtMismatchSettingsPanel.removeTypeButton.toolTipText= FileExtMismatchSettingsPanel.removeTypeButton.toolTipText=
FileExtMismatchModuleSettingsPanel.checkAllRadioButton.text=Check all file types FileExtMismatchModuleSettingsPanel.checkAllRadioButton.text=Check all file types

View File

@ -61,7 +61,10 @@ ImportCentralRepoDbProgressDialog.errorParsingFile.message=Error parsing hash se
ImportCentralRepoDbProgressDialog.linesProcessed.message=\ hashes processed ImportCentralRepoDbProgressDialog.linesProcessed.message=\ hashes processed
ImportCentralRepoDbProgressDialog.title.text=Central Repository Import Progress ImportCentralRepoDbProgressDialog.title.text=Central Repository Import Progress
OpenIDE-Module-Display-Category=Ingest Module OpenIDE-Module-Display-Category=Ingest Module
OpenIDE-Module-Long-Description=Hash Set ingest module. \n\nThe ingest module analyzes files in the disk image and marks them as "known" (based on NSRL hashset lookup for "known" files) and "bad / interesting" (based on one or more hash sets supplied by the user).\n\nThe module also contains additional non-ingest tools that are integrated in the GUI, such as file lookup by hash and hash set configuration. OpenIDE-Module-Long-Description=\
Hash Set ingest module. \n\n\
The ingest module analyzes files in the disk image and marks them as "known" (based on NSRL hashset lookup for "known" files) and "bad / interesting" (based on one or more hash sets supplied by the user).\n\n\
The module also contains additional non-ingest tools that are integrated in the GUI, such as file lookup by hash and hash set configuration.
OpenIDE-Module-Name=HashDatabases OpenIDE-Module-Name=HashDatabases
OptionsCategory_Name_HashDatabase=Hash Sets OptionsCategory_Name_HashDatabase=Hash Sets
OptionsCategory_Keywords_HashDatabase=Hash Sets OptionsCategory_Keywords_HashDatabase=Hash Sets
@ -188,7 +191,10 @@ HashDbSearchThread.name.searching=Searching
HashDbSearchThread.noMoreFilesWithMD5Msg=No other files with the same MD5 hash were found. HashDbSearchThread.noMoreFilesWithMD5Msg=No other files with the same MD5 hash were found.
ModalNoButtons.indexingDbsTitle=Indexing hash sets ModalNoButtons.indexingDbsTitle=Indexing hash sets
ModalNoButtons.indexingDbTitle=Indexing hash set ModalNoButtons.indexingDbTitle=Indexing hash set
ModalNoButtons.exitHashDbIndexingMsg=You are about to exit out of indexing your hash sets. \nThe generated index will be left unusable. If you choose to continue,\nplease delete the corresponding -md5.idx file in the hash folder.\nExit indexing? ModalNoButtons.exitHashDbIndexingMsg=You are about to exit out of indexing your hash sets. \n\
The generated index will be left unusable. If you choose to continue,\n\
please delete the corresponding -md5.idx file in the hash folder.\n\
Exit indexing?
ModalNoButtons.dlgTitle.unfinishedIndexing=Unfinished Indexing ModalNoButtons.dlgTitle.unfinishedIndexing=Unfinished Indexing
ModalNoButtons.indexThis.currentlyIndexing1Db=Currently indexing 1 hash set ModalNoButtons.indexThis.currentlyIndexing1Db=Currently indexing 1 hash set
ModalNoButtons.indexThese.currentlyIndexing1OfNDbs=Currently indexing 1 of {0} ModalNoButtons.indexThese.currentlyIndexing1OfNDbs=Currently indexing 1 of {0}

View File

@ -2,6 +2,7 @@ FilesIdentifierIngestJobSettingsPanel.getError=Error getting interesting files s
FilesIdentifierIngestJobSettingsPanel.updateError=Error updating interesting files sets settings file. FilesIdentifierIngestJobSettingsPanel.updateError=Error updating interesting files sets settings file.
FilesIdentifierIngestModule.getFilesError=Error getting interesting files sets from file. FilesIdentifierIngestModule.getFilesError=Error getting interesting files sets from file.
FilesIdentifierIngestModule.indexError.message=Failed to index interesting file hit artifact for keyword search. FilesIdentifierIngestModule.indexError.message=Failed to index interesting file hit artifact for keyword search.
# {0} - daysIncluded
FilesSet.rule.dateRule.toString=(modified within {0} day(s)) FilesSet.rule.dateRule.toString=(modified within {0} day(s))
FilesSetDefsPanel.bytes=Bytes FilesSetDefsPanel.bytes=Bytes
FilesSetDefsPanel.cancelImportMsg=Cancel import FilesSetDefsPanel.cancelImportMsg=Cancel import
@ -121,8 +122,8 @@ FilesSetRulePanel.nameTextField.text=
FilesSetRulePanel.ruleNameLabel.text=Rule Name (Optional): FilesSetRulePanel.ruleNameLabel.text=Rule Name (Optional):
FilesSetRulePanel.messages.emptyNameCondition=You must specify a name pattern for this rule. FilesSetRulePanel.messages.emptyNameCondition=You must specify a name pattern for this rule.
FilesSetRulePanel.messages.invalidNameRegex=The name regular expression is not valid:\n\n{0} FilesSetRulePanel.messages.invalidNameRegex=The name regular expression is not valid:\n\n{0}
FilesSetRulePanel.messages.invalidCharInName=The name cannot contain \\, /, :, *, ?, ", <, or > unless it is a regular expression. FilesSetRulePanel.messages.invalidCharInName=The name cannot contain \\, /, :, *, ?, \", <, or > unless it is a regular expression.
FilesSetRulePanel.messages.invalidCharInPath=The path cannot contain \\, :, *, ?, ", <, or > unless it is a regular expression. FilesSetRulePanel.messages.invalidCharInPath=The path cannot contain \\, :, *, ?, \", <, or > unless it is a regular expression.
FilesSetRulePanel.messages.invalidPathRegex=The path regular expression is not valid:\n\n{0} FilesSetRulePanel.messages.invalidPathRegex=The path regular expression is not valid:\n\n{0}
FilesSetDefsPanel.doFileSetsDialog.duplicateRuleSet.text=Rule set with name {0} already exists. FilesSetDefsPanel.doFileSetsDialog.duplicateRuleSet.text=Rule set with name {0} already exists.
FilesSetRulePanel.pathSeparatorInfoLabel.text=Folder must be in parent path. Use '/' to give consecutive names FilesSetRulePanel.pathSeparatorInfoLabel.text=Folder must be in parent path. Use '/' to give consecutive names

View File

@ -24,7 +24,7 @@ PhotoRecIngestModule.complete.totalParsetime=Total Parsing Time:
PhotoRecIngestModule.complete.photoRecResults=PhotoRec Results PhotoRecIngestModule.complete.photoRecResults=PhotoRec Results
PhotoRecIngestModule.NotEnoughDiskSpace.detail.msg=PhotoRec error processing {0} with {1} Not enough space on primary disk to save unallocated space. PhotoRecIngestModule.NotEnoughDiskSpace.detail.msg=PhotoRec error processing {0} with {1} Not enough space on primary disk to save unallocated space.
PhotoRecIngestModule.cancelledByUser=PhotoRec cancelled by user. PhotoRecIngestModule.cancelledByUser=PhotoRec cancelled by user.
PhotoRecIngestModule.error.exitValue=PhotoRec carver returned error exit value = {0} when scanning {1} PhotoRecIngestModule.error.exitValue=PhotoRec carver returned error exit value \= {0} when scanning {1}
PhotoRecIngestModule.error.msg=Error processing {0} with PhotoRec carver. PhotoRecIngestModule.error.msg=Error processing {0} with PhotoRec carver.
PhotoRecIngestModule.complete.numberOfErrors=Number of Errors while Carving: PhotoRecIngestModule.complete.numberOfErrors=Number of Errors while Carving:
PhotoRecCarverIngestJobSettingsPanel.detectionSettingsLabel.text=PhotoRec Settings PhotoRecCarverIngestJobSettingsPanel.detectionSettingsLabel.text=PhotoRec Settings

View File

@ -1,2 +1 @@
ExifProcessor.indexError.message=Failed to post EXIF Metadata artifact(s).
ExifProcessor.userContent.description=EXIF metadata data exists for this file. ExifProcessor.userContent.description=EXIF metadata data exists for this file.

View File

@ -1,7 +1,7 @@
/* /*
* Autopsy Forensic Browser * Autopsy Forensic Browser
* *
* Copyright 2011-2021 Basis Technology Corp. * Copyright 2020-2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org * Contact: carrier <at> sleuthkit <dot> org
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
@ -34,6 +34,7 @@ import java.util.Collection;
import java.util.Date; import java.util.Date;
import java.util.Set; import java.util.Set;
import java.util.HashSet; import java.util.HashSet;
import java.util.List;
import java.util.TimeZone; import java.util.TimeZone;
import java.util.logging.Level; import java.util.logging.Level;
import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.StringUtils;
@ -44,7 +45,6 @@ import org.sleuthkit.autopsy.casemodule.NoCurrentCaseException;
import org.sleuthkit.autopsy.ingest.IngestJobContext; import org.sleuthkit.autopsy.ingest.IngestJobContext;
import org.sleuthkit.datamodel.AbstractFile; import org.sleuthkit.datamodel.AbstractFile;
import org.sleuthkit.autopsy.coreutils.Logger; import org.sleuthkit.autopsy.coreutils.Logger;
import org.sleuthkit.autopsy.coreutils.MessageNotifyUtil;
import org.sleuthkit.autopsy.modules.pictureanalyzer.PictureAnalyzerIngestModuleFactory; import org.sleuthkit.autopsy.modules.pictureanalyzer.PictureAnalyzerIngestModuleFactory;
import org.sleuthkit.datamodel.Blackboard; import org.sleuthkit.datamodel.Blackboard;
import org.sleuthkit.datamodel.BlackboardArtifact; import org.sleuthkit.datamodel.BlackboardArtifact;
@ -68,10 +68,10 @@ import org.sleuthkit.datamodel.Score;
public class EXIFProcessor implements PictureProcessor { public class EXIFProcessor implements PictureProcessor {
private static final Logger logger = Logger.getLogger(EXIFProcessor.class.getName()); private static final Logger logger = Logger.getLogger(EXIFProcessor.class.getName());
private static final BlackboardArtifact.Type EXIF_METADATA = new BlackboardArtifact.Type(TSK_METADATA_EXIF);
@Override @Override
@NbBundle.Messages({ @NbBundle.Messages({
"ExifProcessor.indexError.message=Failed to post EXIF Metadata artifact(s).",
"ExifProcessor.userContent.description=EXIF metadata data exists for this file." "ExifProcessor.userContent.description=EXIF metadata data exists for this file."
}) })
public void process(IngestJobContext context, AbstractFile file) { public void process(IngestJobContext context, AbstractFile file) {
@ -148,37 +148,37 @@ public class EXIFProcessor implements PictureProcessor {
final Blackboard blackboard = Case.getCurrentCaseThrows().getSleuthkitCase().getBlackboard(); final Blackboard blackboard = Case.getCurrentCaseThrows().getSleuthkitCase().getBlackboard();
if (!attributes.isEmpty() && !blackboard.artifactExists(file, TSK_METADATA_EXIF, attributes)) { if (!attributes.isEmpty() && !blackboard.artifactExists(file, TSK_METADATA_EXIF, attributes)) {
List<BlackboardArtifact> artifacts = new ArrayList<>();
final BlackboardArtifact exifArtifact = (file.newAnalysisResult( final BlackboardArtifact exifArtifact = (file.newAnalysisResult(
BlackboardArtifact.Type.TSK_METADATA_EXIF, BlackboardArtifact.Type.TSK_METADATA_EXIF,
Score.SCORE_NONE, Score.SCORE_NONE,
null, null, null, null, null, null,
attributes)).getAnalysisResult(); attributes)).getAnalysisResult();
artifacts.add(exifArtifact);
final BlackboardArtifact userSuspectedArtifact = file.newAnalysisResult( final BlackboardArtifact userSuspectedArtifact = file.newAnalysisResult(
BlackboardArtifact.Type.TSK_USER_CONTENT_SUSPECTED, Score.SCORE_UNKNOWN, null, null, null, BlackboardArtifact.Type.TSK_USER_CONTENT_SUSPECTED,
Arrays.asList(new BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_COMMENT, MODULE_NAME, Bundle.ExifProcessor_userContent_description()))) Score.SCORE_UNKNOWN,
null, null, null,
Arrays.asList(new BlackboardAttribute(
BlackboardAttribute.ATTRIBUTE_TYPE.TSK_COMMENT,
MODULE_NAME,
Bundle.ExifProcessor_userContent_description())))
.getAnalysisResult(); .getAnalysisResult();
artifacts.add(userSuspectedArtifact);
try { try {
// index the artifact for keyword search blackboard.postArtifacts(artifacts, MODULE_NAME);
blackboard.postArtifact(exifArtifact, MODULE_NAME);
blackboard.postArtifact(userSuspectedArtifact, MODULE_NAME);
} catch (Blackboard.BlackboardException ex) { } catch (Blackboard.BlackboardException ex) {
logger.log(Level.SEVERE, "Unable to index blackboard artifact " + exifArtifact.getArtifactID(), ex); //NON-NLS logger.log(Level.SEVERE, String.format("Error posting TSK_METADATA_EXIF and TSK_USER_CONTENT_SUSPECTED artifacts for %s (object ID = %d)", file.getName(), file.getId()), ex); //NON-NLS
MessageNotifyUtil.Notify.error(
Bundle.ExifProcessor_indexError_message(), exifArtifact.getDisplayName());
} }
} }
} catch (TskCoreException ex) { } catch (TskCoreException ex) {
logger.log(Level.WARNING, "Failed to create blackboard artifact for " //NON-NLS logger.log(Level.SEVERE, String.format("Error creating TSK_METADATA_EXIF and TSK_USER_CONTENT_SUSPECTED artifacts for %s (object ID = %d)", file.getName(), file.getId()), ex); //NON-NLS
+ "exif metadata ({0}).", ex.getLocalizedMessage()); //NON-NLS } catch (IOException | ImageProcessingException ex) {
} catch (IOException | ImageProcessingException unused) { logger.log(Level.WARNING, String.format("Error parsing %s (object ID = %d), presumed corrupt", file.getName(), file.getId()), ex); //NON-NLS
// In this case the stack trace is not needed in the log.
logger.log(Level.WARNING, String.format("Error parsing " //NON-NLS
+ "image file '%s/%s' (id=%d).", file.getParentPath(), file.getName(), file.getId())); //NON-NLS
} catch (NoCurrentCaseException ex) { } catch (NoCurrentCaseException ex) {
logger.log(Level.INFO, "Exception while getting open case.", ex); //NON-NLS logger.log(Level.SEVERE, String.format("Error processing %s (object ID = %d)", file.getName(), file.getId()), ex); //NON-NLS
} }
} }

View File

@ -1,7 +1,7 @@
/* /*
* Autopsy Forensic Browser * Autopsy Forensic Browser
* *
* Copyright 2011-2020 Basis Technology Corp. * Copyright 2018-2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org * Contact: carrier <at> sleuthkit <dot> org
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
@ -154,7 +154,7 @@ final class TikaTextExtractor implements TextExtractor {
private static final String TESSERACT_OUTPUT_FILE_NAME = "tess_output"; //NON-NLS private static final String TESSERACT_OUTPUT_FILE_NAME = "tess_output"; //NON-NLS
// documents where OCR is performed // documents where OCR is performed
private static final ImmutableSet OCR_DOCUMENTS = ImmutableSet.of( private static final ImmutableSet<String> OCR_DOCUMENTS = ImmutableSet.of(
"application/pdf", "application/pdf",
"application/msword", "application/msword",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document", "application/vnd.openxmlformats-officedocument.wordprocessingml.document",

View File

@ -5,15 +5,10 @@ ChromeCacheExtract_adding_artifacts_msg=Chrome Cache: Adding %d artifacts for an
ChromeCacheExtract_adding_extracted_files_msg=Chrome Cache: Adding %d extracted files for analysis. ChromeCacheExtract_adding_extracted_files_msg=Chrome Cache: Adding %d extracted files for analysis.
ChromeCacheExtract_loading_files_msg=Chrome Cache: Loading files from %s. ChromeCacheExtract_loading_files_msg=Chrome Cache: Loading files from %s.
ChromeCacheExtractor.moduleName=ChromeCacheExtractor ChromeCacheExtractor.moduleName=ChromeCacheExtractor
# {0} - module name
# {1} - row number
# {2} - table length
# {3} - cache path
ChromeCacheExtractor.progressMsg={0}: Extracting cache entry {1} of {2} entries from {3} ChromeCacheExtractor.progressMsg={0}: Extracting cache entry {1} of {2} entries from {3}
DataSourceUsage_AndroidMedia=Android Media Card DataSourceUsage_AndroidMedia=Android Media Card
DataSourceUsage_DJU_Drone_DAT=DJI Internal SD Card DataSourceUsage_DJU_Drone_DAT=DJI Internal SD Card
DataSourceUsage_FlashDrive=Flash Drive DataSourceUsage_FlashDrive=Flash Drive
# {0} - OS name
DataSourceUsageAnalyzer.customVolume.label=OS Drive ({0}) DataSourceUsageAnalyzer.customVolume.label=OS Drive ({0})
DataSourceUsageAnalyzer.parentModuleName=Recent Activity DataSourceUsageAnalyzer.parentModuleName=Recent Activity
DefaultPriorityDomainCategorizer_searchEngineCategory=Search Engine DefaultPriorityDomainCategorizer_searchEngineCategory=Search Engine
@ -28,7 +23,6 @@ ExtractEdge_process_errMsg_errGettingWebCacheFiles=Error trying to retrieving Ed
ExtractEdge_process_errMsg_spartanFail=Failure processing Microsoft Edge spartan.edb file ExtractEdge_process_errMsg_spartanFail=Failure processing Microsoft Edge spartan.edb file
ExtractEdge_process_errMsg_unableFindESEViewer=Unable to find ESEDatabaseViewer ExtractEdge_process_errMsg_unableFindESEViewer=Unable to find ESEDatabaseViewer
ExtractEdge_process_errMsg_webcacheFail=Failure processing Microsoft Edge WebCacheV01.dat file ExtractEdge_process_errMsg_webcacheFail=Failure processing Microsoft Edge WebCacheV01.dat file
# {0} - sub module name
ExtractIE_executePasco_errMsg_errorRunningPasco={0}: Error analyzing Internet Explorer web history ExtractIE_executePasco_errMsg_errorRunningPasco={0}: Error analyzing Internet Explorer web history
ExtractOs.androidOs.label=Android ExtractOs.androidOs.label=Android
ExtractOs.androidVolume.label=OS Drive (Android) ExtractOs.androidVolume.label=OS Drive (Android)
@ -61,7 +55,6 @@ ExtractOs.windowsVolume.label=OS Drive (Windows)
ExtractOs.yellowDogLinuxOs.label=Linux (Yellow Dog) ExtractOs.yellowDogLinuxOs.label=Linux (Yellow Dog)
ExtractOs.yellowDogLinuxVolume.label=OS Drive (Linux Yellow Dog) ExtractOs.yellowDogLinuxVolume.label=OS Drive (Linux Yellow Dog)
ExtractOS_progressMessage=Checking for OS ExtractOS_progressMessage=Checking for OS
# {0} - sub module name
ExtractPrefetch_errMsg_prefetchParsingFailed={0}: Error analyzing prefetch files ExtractPrefetch_errMsg_prefetchParsingFailed={0}: Error analyzing prefetch files
ExtractPrefetch_module_name=Windows Prefetch Extractor ExtractPrefetch_module_name=Windows Prefetch Extractor
ExtractRecycleBin_module_name=Recycle Bin ExtractRecycleBin_module_name=Recycle Bin
@ -88,6 +81,8 @@ ExtractZone_process_errMsg_find=A failure occured while searching for :Zone.Inde
ExtractZone_progress_Msg=Extracting :Zone.Identifer files ExtractZone_progress_Msg=Extracting :Zone.Identifer files
ExtractZone_Restricted=Restricted Sites Zone ExtractZone_Restricted=Restricted Sites Zone
ExtractZone_Trusted=Trusted Sites Zone ExtractZone_Trusted=Trusted Sites Zone
Jumplist_adding_extracted_files_msg=Chrome Cache: Adding %d extracted files for analysis.
Jumplist_module_name=Windows Jumplist Extractor
OpenIDE-Module-Display-Category=Ingest Module OpenIDE-Module-Display-Category=Ingest Module
OpenIDE-Module-Long-Description=Recent Activity ingest module.\n\n The module extracts useful information about the recent user activity on the disk image being ingested, such as:\n\n- Recently open documents,\n- Web activity (sites visited, stored cookies, book marked sites, search engine queries, file downloads),\n- Recently attached devices,\n- Installed programs.\n\nThe module currently supports Windows only disk images.\nThe plugin is also fully functional when deployed on Windows version of Autopsy. OpenIDE-Module-Long-Description=Recent Activity ingest module.\n\n The module extracts useful information about the recent user activity on the disk image being ingested, such as:\n\n- Recently open documents,\n- Web activity (sites visited, stored cookies, book marked sites, search engine queries, file downloads),\n- Recently attached devices,\n- Installed programs.\n\nThe module currently supports Windows only disk images.\nThe plugin is also fully functional when deployed on Windows version of Autopsy.
OpenIDE-Module-Name=RecentActivity OpenIDE-Module-Name=RecentActivity
@ -157,19 +152,13 @@ Firefox.getDlV24.errMsg.errAnalyzeFile={0}: Error while trying to analyze file:{
Firefox.getDlV24.errMsg.errParsingArtifacts={0}: Error parsing {1} Firefox web download artifacts. Firefox.getDlV24.errMsg.errParsingArtifacts={0}: Error parsing {1} Firefox web download artifacts.
Progress_Message_Analyze_Registry=Analyzing Registry Files Progress_Message_Analyze_Registry=Analyzing Registry Files
Progress_Message_Analyze_Usage=Data Sources Usage Analysis Progress_Message_Analyze_Usage=Data Sources Usage Analysis
# {0} - browserName
Progress_Message_Chrome_AutoFill=Chrome Auto Fill Browser {0} Progress_Message_Chrome_AutoFill=Chrome Auto Fill Browser {0}
# {0} - browserName
Progress_Message_Chrome_Bookmarks=Chrome Bookmarks Browser {0} Progress_Message_Chrome_Bookmarks=Chrome Bookmarks Browser {0}
Progress_Message_Chrome_Cache=Chrome Cache Progress_Message_Chrome_Cache=Chrome Cache
# {0} - browserName
Progress_Message_Chrome_Cookies=Chrome Cookies Browser {0} Progress_Message_Chrome_Cookies=Chrome Cookies Browser {0}
# {0} - browserName
Progress_Message_Chrome_Downloads=Chrome Downloads Browser {0} Progress_Message_Chrome_Downloads=Chrome Downloads Browser {0}
Progress_Message_Chrome_FormHistory=Chrome Form History Progress_Message_Chrome_FormHistory=Chrome Form History
# {0} - browserName
Progress_Message_Chrome_History=Chrome History Browser {0} Progress_Message_Chrome_History=Chrome History Browser {0}
# {0} - browserName
Progress_Message_Chrome_Logins=Chrome Logins Browser {0} Progress_Message_Chrome_Logins=Chrome Logins Browser {0}
Progress_Message_Edge_Bookmarks=Microsoft Edge Bookmarks Progress_Message_Edge_Bookmarks=Microsoft Edge Bookmarks
Progress_Message_Edge_Cookies=Microsoft Edge Cookies Progress_Message_Edge_Cookies=Microsoft Edge Cookies
@ -224,7 +213,6 @@ Recently_Used_Artifacts_Winrar=Recently opened according to WinRAR MRU
Registry_System_Bam=Recently Executed according to Background Activity Moderator (BAM) Registry_System_Bam=Recently Executed according to Background Activity Moderator (BAM)
RegRipperFullNotFound=Full version RegRipper executable not found. RegRipperFullNotFound=Full version RegRipper executable not found.
RegRipperNotFound=Autopsy RegRipper executable not found. RegRipperNotFound=Autopsy RegRipper executable not found.
# {0} - file name
SearchEngineURLQueryAnalyzer.init.exception.msg=Unable to find {0}. SearchEngineURLQueryAnalyzer.init.exception.msg=Unable to find {0}.
SearchEngineURLQueryAnalyzer.moduleName.text=Search Engine SearchEngineURLQueryAnalyzer.moduleName.text=Search Engine
SearchEngineURLQueryAnalyzer.engineName.none=NONE SearchEngineURLQueryAnalyzer.engineName.none=NONE

View File

@ -0,0 +1,237 @@
/*
*
* Autopsy Forensic Browser
*
* Copyright 2021 Basis Technology Corp.
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.autopsy.recentactivity;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;
import java.util.logging.Level;
import org.apache.poi.poifs.filesystem.DirectoryEntry;
import org.apache.poi.poifs.filesystem.DocumentEntry;
import org.apache.poi.poifs.filesystem.DocumentInputStream;
import org.apache.poi.poifs.filesystem.Entry;
import org.apache.poi.poifs.filesystem.POIFSFileSystem;
import org.openide.util.NbBundle.Messages;
import org.sleuthkit.autopsy.casemodule.Case;
import org.sleuthkit.autopsy.casemodule.services.FileManager;
import org.sleuthkit.autopsy.coreutils.JLNK;
import org.sleuthkit.autopsy.coreutils.JLnkParser;
import org.sleuthkit.autopsy.coreutils.JLnkParserException;
import org.sleuthkit.autopsy.coreutils.Logger;
import org.sleuthkit.autopsy.datamodel.ContentUtils;
import org.sleuthkit.autopsy.ingest.DataSourceIngestModuleProgress;
import org.sleuthkit.autopsy.ingest.IngestJobContext;
import org.sleuthkit.autopsy.ingest.IngestServices;
import org.sleuthkit.autopsy.ingest.ModuleContentEvent;
import org.sleuthkit.datamodel.AbstractFile;
import org.sleuthkit.datamodel.Content;
import org.sleuthkit.datamodel.DerivedFile;
import org.sleuthkit.datamodel.TskCoreException;
import org.sleuthkit.datamodel.TskData;
/**
* Extract the LNK files from the jumplists and save them to ModuleOutput\RecentActivity\Jumplists
* and then add them back into the case as a dervived file.
*/
final class ExtractJumpLists extends Extract {
private static final Logger logger = Logger.getLogger(ExtractJumpLists.class.getName());
private IngestJobContext context;
private static final String JUMPLIST_TSK_COMMENT = "Jumplist File";
private static final String RA_DIR_NAME = "RecentActivity"; //NON-NLS
private static final String MODULE_OUTPUT_DIR = "ModuleOutput"; //NON-NLS
private static final String AUTOMATIC_DESTINATIONS_FILE_DIRECTORY = "%/AppData/Roaming/Microsoft/Windows/Recent/AutomaticDestinations/";
private static final String JUMPLIST_DIR_NAME = "jumplists"; //NON-NLS
private static final String VERSION_NUMBER = "1.0.0"; //NON-NLS
private String moduleName;
private FileManager fileManager;
private final IngestServices services = IngestServices.getInstance();
@Messages({
"Jumplist_module_name=Windows Jumplist Extractor",
"Jumplist_adding_extracted_files_msg=Chrome Cache: Adding %d extracted files for analysis."
})
ExtractJumpLists() {
super(Bundle.Jumplist_module_name());
}
@Override
void process(Content dataSource, IngestJobContext context, DataSourceIngestModuleProgress progressBar) {
this.context = context;
moduleName = Bundle.Jumplist_module_name();
fileManager = currentCase.getServices().getFileManager();
long ingestJobId = context.getJobId();
List<AbstractFile> jumpListFiles = extractJumplistFiles(dataSource, ingestJobId);
if (jumpListFiles.isEmpty()) {
return;
}
if (context.dataSourceIngestIsCancelled()) {
return;
}
List<AbstractFile> derivedFiles = new ArrayList<>();
String derivedPath = null;
String baseRaTempPath = RAImageIngestModule.getRATempPath(Case.getCurrentCase(), JUMPLIST_DIR_NAME + "_" + dataSource.getId(), ingestJobId);
for (AbstractFile jumplistFile : jumpListFiles) {
if (!jumplistFile.getName().toLowerCase().contains("-slack") && !jumplistFile.getName().equals("..") &&
!jumplistFile.getName().equals(".") && jumplistFile.getSize() > 0) {
String jlFile = Paths.get(baseRaTempPath, jumplistFile.getName() + "_" + jumplistFile.getId()).toString();
String moduleOutPath = Case.getCurrentCase().getModuleDirectory() + File.separator + RA_DIR_NAME + File.separator + JUMPLIST_DIR_NAME + "_" + dataSource.getId() + File.separator + jumplistFile.getName() + "_" + jumplistFile.getId();
derivedPath = RA_DIR_NAME + File.separator + JUMPLIST_DIR_NAME + "_" + dataSource.getId() + File.separator + jumplistFile.getName() + "_" + jumplistFile.getId();
File jlDir = new File(moduleOutPath);
if (jlDir.exists() == false) {
boolean dirMade = jlDir.mkdirs();
if (!dirMade) {
logger.log(Level.WARNING, "Error creating directory to store Jumplist LNK files %s", moduleOutPath); //NON-NLS
continue;
}
}
derivedFiles.addAll(extractLnkFiles(jlFile, moduleOutPath, jumplistFile, derivedPath));
}
}
// notify listeners of new files and schedule for analysis
progressBar.progress(String.format(Bundle.Jumplist_adding_extracted_files_msg(), derivedFiles.size()));
derivedFiles.forEach((derived) -> { services.fireModuleContentEvent(new ModuleContentEvent(derived)); });
context.addFilesToJob(derivedFiles);
}
/**
* Find jumplist and extract jumplist files to temp directory
*
* @return - list of jumplist abstractfiles or empty list
*/
private List<AbstractFile> extractJumplistFiles(Content dataSource, Long ingestJobId) {
List<AbstractFile> jumpListFiles = new ArrayList<>();;
List<AbstractFile> tempJumpListFiles = new ArrayList<>();;
FileManager fileManager = Case.getCurrentCase().getServices().getFileManager();
try {
tempJumpListFiles = fileManager.findFiles(dataSource, "%", AUTOMATIC_DESTINATIONS_FILE_DIRECTORY); //NON-NLS
if (!tempJumpListFiles.isEmpty()) {
jumpListFiles.addAll(tempJumpListFiles);
}
} catch (TskCoreException ex) {
logger.log(Level.WARNING, "Unable to find jumplist files.", ex); //NON-NLS
return jumpListFiles; // No need to continue
}
for (AbstractFile jumpListFile : jumpListFiles) {
if (context.dataSourceIngestIsCancelled()) {
return jumpListFiles;
}
if (!jumpListFile.getName().toLowerCase().contains("-slack") && !jumpListFile.getName().equals("..") &&
!jumpListFile.getName().equals(".") && jumpListFile.getSize() > 0) {
String fileName = jumpListFile.getName() + "_" + jumpListFile.getId();
String baseRaTempPath = RAImageIngestModule.getRATempPath(Case.getCurrentCase(), JUMPLIST_DIR_NAME+ "_" + dataSource.getId(), ingestJobId);
String jlFile = Paths.get(baseRaTempPath, fileName).toString();
try {
ContentUtils.writeToFile(jumpListFile, new File(jlFile));
} catch (IOException ex) {
logger.log(Level.WARNING, String.format("Unable to write %s to temp directory. File name: %s", fileName, jlFile), ex); //NON-NLS
}
}
}
return jumpListFiles;
}
/*
* Read each jumplist file and extract the lnk files to moduleoutput
*/
private List<DerivedFile> extractLnkFiles(String jumpListFile, String moduleOutPath, AbstractFile jumpListAbsFile, String derivedPath) {
List<DerivedFile> derivedFiles = new ArrayList<>();
DerivedFile derivedFile;
String lnkFileName = "";
try (POIFSFileSystem fs = new POIFSFileSystem(new File(jumpListFile))) {
DirectoryEntry root = fs.getRoot();
for (Entry entry : root) {
if (entry instanceof DirectoryEntry) {
//If this data structure needed to recurse this is where it would do it but jumplists do not need to at this time
continue;
} else if (entry instanceof DocumentEntry) {
String jmpListFileName = entry.getName();
int fileSize = ((DocumentEntry) entry).getSize();
if (fileSize > 0) {
try (DocumentInputStream stream = fs.createDocumentInputStream(jmpListFileName)) {
byte[] buffer = new byte[stream.available()];
stream.read(buffer);
JLnkParser lnkParser = new JLnkParser(fs.createDocumentInputStream(jmpListFileName), fileSize);
JLNK lnk = lnkParser.parse();
lnkFileName = lnk.getBestName() + ".lnk";
File targetFile = new File(moduleOutPath + File.separator + entry.getName() + "-" + lnkFileName);
String derivedFileName = MODULE_OUTPUT_DIR + File.separator + derivedPath + File.separator + entry.getName() + "-" + lnkFileName;
OutputStream outStream = new FileOutputStream(targetFile);
outStream.write(buffer);
outStream.close();
derivedFile = fileManager.addDerivedFile(lnkFileName, derivedFileName,
fileSize,
0,
0,
0,
0, // TBD
true,
jumpListAbsFile,
"",
moduleName,
VERSION_NUMBER,
"",
TskData.EncodingType.NONE);
derivedFiles.add(derivedFile);
} catch (IOException | JLnkParserException e) {
logger.log(Level.WARNING, String.format("No such document, or the Entry represented by documentName is not a DocumentEntry link file is %s", jumpListFile), e); //NON-NLS
}
}
} else {
// currently, either an Entry is a DirectoryEntry or a DocumentEntry,
// but in the future, there may be other entry subinterfaces.
// The internal data structure certainly allows for a lot more entry types.
continue;
}
}
} catch (IOException | TskCoreException ex) {
logger.log(Level.WARNING, String.format("Error lnk parsing the file to get recent files $s", jumpListFile), ex); //NON-NLS
}
return derivedFiles;
}
}

View File

@ -88,6 +88,16 @@ final class ExtractPrefetch extends Extract {
super(Bundle.ExtractPrefetch_module_name()); super(Bundle.ExtractPrefetch_module_name());
} }
/**
* Get the temp folder name.
*
* @param dataSource Current data source
* @return The folder name
*/
private String getPrefetchTempFolder(Content dataSource) {
return dataSource.getId() + "-" + PREFETCH_PARSER_DB_FILE;
}
@Override @Override
void process(Content dataSource, IngestJobContext context, DataSourceIngestModuleProgress progressBar) { void process(Content dataSource, IngestJobContext context, DataSourceIngestModuleProgress progressBar) {
@ -116,9 +126,9 @@ final class ExtractPrefetch extends Extract {
return; return;
} }
String modOutFile = modOutPath + File.separator + dataSource.getName() + "-" + PREFETCH_PARSER_DB_FILE; String modOutFile = modOutPath + File.separator + getPrefetchTempFolder(dataSource);
try { try {
String tempDirPath = RAImageIngestModule.getRATempPath(Case.getCurrentCase(), dataSource.getName() + "-" + PREFETCH_DIR_NAME, ingestJobId); String tempDirPath = RAImageIngestModule.getRATempPath(Case.getCurrentCase(), getPrefetchTempFolder(dataSource), ingestJobId);
parsePrefetchFiles(prefetchDumper, tempDirPath, modOutFile, modOutPath); parsePrefetchFiles(prefetchDumper, tempDirPath, modOutFile, modOutPath);
File prefetchDatabase = new File(modOutFile); File prefetchDatabase = new File(modOutFile);
if (prefetchDatabase.exists()) { if (prefetchDatabase.exists()) {
@ -159,7 +169,7 @@ final class ExtractPrefetch extends Extract {
String ext = FilenameUtils.getExtension(origFileName); String ext = FilenameUtils.getExtension(origFileName);
String baseName = FilenameUtils.getBaseName(origFileName); String baseName = FilenameUtils.getBaseName(origFileName);
String fileName = escapeFileName(String.format("%s_%d.%s", baseName, pFile.getId(), ext)); String fileName = escapeFileName(String.format("%s_%d.%s", baseName, pFile.getId(), ext));
String baseRaTempPath = RAImageIngestModule.getRATempPath(Case.getCurrentCase(), dataSource.getName() + "-" + PREFETCH_DIR_NAME, ingestJobId); String baseRaTempPath = RAImageIngestModule.getRATempPath(Case.getCurrentCase(), getPrefetchTempFolder(dataSource), ingestJobId);
String prefetchFile = Paths.get(baseRaTempPath, fileName).toString(); String prefetchFile = Paths.get(baseRaTempPath, fileName).toString();
try { try {
ContentUtils.writeToFile(pFile, new File(prefetchFile)); ContentUtils.writeToFile(pFile, new File(prefetchFile));

View File

@ -91,6 +91,7 @@ import static org.sleuthkit.datamodel.BlackboardAttribute.ATTRIBUTE_TYPE.TSK_NAM
import static org.sleuthkit.datamodel.BlackboardAttribute.ATTRIBUTE_TYPE.TSK_PATH; import static org.sleuthkit.datamodel.BlackboardAttribute.ATTRIBUTE_TYPE.TSK_PATH;
import static org.sleuthkit.datamodel.BlackboardAttribute.ATTRIBUTE_TYPE.TSK_HOME_DIR; import static org.sleuthkit.datamodel.BlackboardAttribute.ATTRIBUTE_TYPE.TSK_HOME_DIR;
import org.sleuthkit.datamodel.Content; import org.sleuthkit.datamodel.Content;
import org.sleuthkit.datamodel.DataArtifact;
import org.sleuthkit.datamodel.DataSource; import org.sleuthkit.datamodel.DataSource;
import org.sleuthkit.datamodel.Host; import org.sleuthkit.datamodel.Host;
import org.sleuthkit.datamodel.HostManager; import org.sleuthkit.datamodel.HostManager;
@ -1769,6 +1770,7 @@ class ExtractRegistry extends Extract {
*/ */
void createShellBagArtifacts(AbstractFile regFile, List<ShellBag> shellbags) throws TskCoreException { void createShellBagArtifacts(AbstractFile regFile, List<ShellBag> shellbags) throws TskCoreException {
List<BlackboardArtifact> artifacts = new ArrayList<>(); List<BlackboardArtifact> artifacts = new ArrayList<>();
List<DataArtifact> dataArtifacts = new ArrayList<>();
try { try {
for (ShellBag bag : shellbags) { for (ShellBag bag : shellbags) {
Collection<BlackboardAttribute> attributes = new ArrayList<>(); Collection<BlackboardAttribute> attributes = new ArrayList<>();
@ -1796,11 +1798,14 @@ class ExtractRegistry extends Extract {
attributes.add(new BlackboardAttribute(TSK_DATETIME_ACCESSED, getName(), time)); attributes.add(new BlackboardAttribute(TSK_DATETIME_ACCESSED, getName(), time));
} }
artifacts.add(createArtifactWithAttributes(getShellBagArtifact(), regFile, attributes)); BlackboardArtifact artifact = createArtifactWithAttributes(getShellBagArtifact(), regFile, attributes);
artifacts.add(artifact);
dataArtifacts.add((DataArtifact)artifact);
} }
} finally { } finally {
if(!context.dataSourceIngestIsCancelled()) { if(!context.dataSourceIngestIsCancelled()) {
postArtifacts(artifacts); postArtifacts(artifacts);
context.addDataArtifactsToJob(dataArtifacts);
} }
} }
} }

View File

@ -84,8 +84,10 @@ public final class RAImageIngestModule implements DataSourceIngestModule {
Extract prefetch = new ExtractPrefetch(); Extract prefetch = new ExtractPrefetch();
Extract webAccountType = new ExtractWebAccountType(); Extract webAccountType = new ExtractWebAccountType();
Extract messageDomainType = new DomainCategoryRunner(); Extract messageDomainType = new DomainCategoryRunner();
Extract jumpList = new ExtractJumpLists();
extractors.add(recycleBin); extractors.add(recycleBin);
extractors.add(jumpList);
extractors.add(recentDocuments); extractors.add(recentDocuments);
extractors.add(registry); // needs to run before the DataSourceUsageAnalyzer extractors.add(registry); // needs to run before the DataSourceUsageAnalyzer
extractors.add(osExtract); // this needs to run before the DataSourceUsageAnalyzer extractors.add(osExtract); // this needs to run before the DataSourceUsageAnalyzer

View File

@ -22,6 +22,7 @@
*/ */
package org.sleuthkit.autopsy.recentactivity; package org.sleuthkit.autopsy.recentactivity;
import java.io.File;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.logging.Level; import java.util.logging.Level;
@ -29,6 +30,7 @@ import org.apache.commons.io.FilenameUtils;
import org.openide.util.NbBundle; import org.openide.util.NbBundle;
import org.sleuthkit.autopsy.coreutils.Logger; import org.sleuthkit.autopsy.coreutils.Logger;
import java.util.Collection; import java.util.Collection;
import java.util.HashMap;
import org.openide.util.NbBundle.Messages; import org.openide.util.NbBundle.Messages;
import org.sleuthkit.autopsy.coreutils.JLNK; import org.sleuthkit.autopsy.coreutils.JLNK;
import org.sleuthkit.autopsy.coreutils.JLnkParser; import org.sleuthkit.autopsy.coreutils.JLnkParser;
@ -87,6 +89,7 @@ class RecentDocumentsByLnk extends Extract {
dataFound = true; dataFound = true;
List<BlackboardArtifact> bbartifacts = new ArrayList<>(); List<BlackboardArtifact> bbartifacts = new ArrayList<>();
HashMap<String, String> recentFileMap = new HashMap<>();
for (AbstractFile recentFile : recentFiles) { for (AbstractFile recentFile : recentFiles) {
if (context.dataSourceIngestIsCancelled()) { if (context.dataSourceIngestIsCancelled()) {
break; break;
@ -111,6 +114,8 @@ class RecentDocumentsByLnk extends Extract {
Collection<BlackboardAttribute> bbattributes = new ArrayList<>(); Collection<BlackboardAttribute> bbattributes = new ArrayList<>();
String path = lnk.getBestPath(); String path = lnk.getBestPath();
if (recentFileMap.get(path + File.separator + recentFile.getName()) == null) {
recentFileMap.put(path + File.separator + recentFile.getName(), recentFile.getName());
bbattributes.add(new BlackboardAttribute(ATTRIBUTE_TYPE.TSK_PATH, bbattributes.add(new BlackboardAttribute(ATTRIBUTE_TYPE.TSK_PATH,
NbBundle.getMessage(this.getClass(), NbBundle.getMessage(this.getClass(),
"RecentDocumentsByLnk.parentModuleName.noSpace"), "RecentDocumentsByLnk.parentModuleName.noSpace"),
@ -136,6 +141,7 @@ class RecentDocumentsByLnk extends Extract {
logger.log(Level.SEVERE, String.format("Failed to create TSK_RECENT_OBJECT artifact for file %d", recentFile.getId()), ex); logger.log(Level.SEVERE, String.format("Failed to create TSK_RECENT_OBJECT artifact for file %d", recentFile.getId()), ex);
} }
} }
}
if (!context.dataSourceIngestIsCancelled()) { if (!context.dataSourceIngestIsCancelled()) {
postArtifacts(bbartifacts); postArtifacts(bbartifacts);

View File

@ -173,7 +173,7 @@
<!-- Update configuration file to include jre --> <!-- Update configuration file to include jre -->
<property name="inst.property.file" value="${inst-path}/etc/${app.name}.conf" /> <property name="inst.property.file" value="${inst-path}/etc/${app.name}.conf" />
<!-- Sets max heap size to be ${jvm.max.mem} which is set in the run-ai-(32/64) target --> <!-- Sets max heap size to be ${jvm.max.mem} which is set in the run-ai-(32/64) target -->
<var name="jvm.args" value="&quot;--branding ${app.name} -J-Xms24m -J-Xmx4G -J-Xverify:none -J-XX:+UseG1GC -J-XX:+UseStringDeduplication -J-Dprism.order=sw &quot;" /> <var name="jvm.args" value="&quot;--branding ${app.name} -J-Xms24m -J-Xmx${jvm.max.mem} -J-XX:MaxPermSize=128M -J-Xverify:none -J-XX:+UseG1GC -J-XX:+UseStringDeduplication -J-Dprism.order=sw &quot;" />
<propertyfile file="${inst.property.file}"> <propertyfile file="${inst.property.file}">
<!-- Note: can be higher on 64 bit systems, should be in sync with project.properties --> <!-- Note: can be higher on 64 bit systems, should be in sync with project.properties -->
<entry key="default_options" value="@JVM_OPTIONS" /> <entry key="default_options" value="@JVM_OPTIONS" />

View File

@ -80,7 +80,7 @@ Les résultats apparaitront dans une visionneuse de résultats distincte pour ch
\section ad_hoc_kw_lists Keyword Lists (Listes de mots-clés) \section ad_hoc_kw_lists Keyword Lists (Listes de mots-clés)
En plus d'être sélectionnées lors de l'acquisition, les listes de mots-clés peuvent également être exécutées via le bouton "Keyword Lists". Pour plus d'informations sur la configuration de ces listes de mots clés, consultez la section \ref keywordListsTab de la documentation du module d'acquisition. En plus d'être sélectionnées lors de l'acquisition, les listes de mots-clés peuvent également être exécutées via le bouton "Keyword Lists". Pour plus d'informations sur la configuration de ces listes de mots clés, consultez la section \ref keyword_keywordListsTab de la documentation du module d'acquisition.
Les listes créées à l'aide de la boîte de dialogue "Keyword Search Configuration" peuvent être recherchées manuellement par l'utilisateur en appuyant sur le bouton "Keyword Lists" et en cochant les cases correspondant aux listes à rechercher. La recherche peut être limitée à certaines sources de données uniquement en cochant la case située en bas, puis en mettant en surbrillance les sources de données dans lesquelles effectuer la recherche. Plusieurs sources de données peuvent être sélectionnées en utilisant les touches Maj+clic gauche ou Ctrl+clic gauche. Une fois que tout a été configuré, appuyez sur "Search" pour lancer la recherche. La case à cocher "Save search results" détermine si les résultats de la recherche seront enregistrés dans la base de données du cas. Les listes créées à l'aide de la boîte de dialogue "Keyword Search Configuration" peuvent être recherchées manuellement par l'utilisateur en appuyant sur le bouton "Keyword Lists" et en cochant les cases correspondant aux listes à rechercher. La recherche peut être limitée à certaines sources de données uniquement en cochant la case située en bas, puis en mettant en surbrillance les sources de données dans lesquelles effectuer la recherche. Plusieurs sources de données peuvent être sélectionnées en utilisant les touches Maj+clic gauche ou Ctrl+clic gauche. Une fois que tout a été configuré, appuyez sur "Search" pour lancer la recherche. La case à cocher "Save search results" détermine si les résultats de la recherche seront enregistrés dans la base de données du cas.

View File

@ -1,4 +1,4 @@
/*! \page central_repo_page Référentiel central /*! \page central_repo_page Référentiel central
[TOC] [TOC]
@ -111,6 +111,10 @@ Descriptions des types de propriétés:
- Les propriétés d'ICCID ne sont actuellement créées que par des modules Autopsy personnalisés. - Les propriétés d'ICCID ne sont actuellement créées que par des modules Autopsy personnalisés.
- <b>Credit Card</b> - <b>Credit Card</b>
- Les propriétés de carte de crédit sont créées par le module \ref keyword_search_page. - Les propriétés de carte de crédit sont créées par le module \ref keyword_search_page.
- <b>OS Account</b>
- Les propriétés de comptes de système d'exploitation sont créés par le processeur de source de données d'image disque et le module \ref recent_activity_page.
- <b>Installed Programs</b>
- Les propriétés des programmes installés sont créées principalement par le module \ref recent_activity_page.
- <b> App-specific Accounts (Facebook, Twitter, etc...)</b> - <b> App-specific Accounts (Facebook, Twitter, etc...)</b>
- Ces propriétés proviennent principalement du module \ref android_analyzer_page. - Ces propriétés proviennent principalement du module \ref android_analyzer_page.
@ -146,7 +150,7 @@ Il existe trois paramètres pour le module d'acquisition "Central Repository":
<ul> <ul>
<li><b>Save items to the Central Repository</b> - Cette option ne doit être désélectionnée que dans les rares cas où vous ne souhaitez pas ajouter de propriétés de la source de données actuelle au référentiel central, mais souhaitez quand même signaler les occurrences passées. <li><b>Save items to the Central Repository</b> - Cette option ne doit être désélectionnée que dans les rares cas où vous ne souhaitez pas ajouter de propriétés de la source de données actuelle au référentiel central, mais souhaitez quand même signaler les occurrences passées.
<li><b>Flag items previously tagged as notable</b> - L'activation de cette option entraîne la création d'artefacts d'éléments/fichiers intéressants lorsque des propriétés correspondant à celles précédemment marquées sont trouvées. Voir la section suivante \ref cr_tagging pour plus de détails. <li><b>Flag items previously tagged as notable</b> - L'activation de cette option entraîne la création d'artefacts d'éléments/fichiers intéressants lorsque des propriétés correspondant à celles précédemment marquées sont trouvées. Voir la section suivante \ref cr_tagging pour plus de détails.
<li><b>Flag previously seen devices</b> - Lorsque cette option est activée, un artefact "Interesting Item" sera créé si une propriété liée au périphérique (USB, adresse MAC, IMSI, IMEI, ICCID) est détectée et se trouve déjà dans le référentiel central, qu'elle ait été ou non signalée. <li><b>Flag previously seen devices and users</b> - Lorsque cette option est activée, un artefact "Interesting Item" sera créé si une propriété liée au périphérique (USB, adresse MAC, IMSI, IMEI, ICCID) ou un compte de système d'exploitation sont détectés et se trouvent déjà dans le référentiel central, qu'ils aient été ou non marqués.
</li> </li>
\subsection cr_tagging Marquage de fichiers et d'artefacts \subsection cr_tagging Marquage de fichiers et d'artefacts

View File

@ -75,9 +75,9 @@ Les fichiers des ruches de la Base de registre peuvent être affichés dans un f
\image html content_viewer_registry.png \image html content_viewer_registry.png
\section cv_metadata File Metadata \section cv_metadata File Metadata / Source File Metadata
L'onglet "File Metadata" affiche des informations de base sur le fichier, telles que le type, la taille et le hachage. Il affiche également la sortie de l'outil istat du Sleuth Kit. L'onglet "File Metadata" affiche des informations de base sur le fichier sélectionné ou le fichier associé au résultat, telles que le type, la taille et le hachage. Il affiche également la sortie de l'outil istat du Sleuth Kit.
\image html content_viewer_metadata.png \image html content_viewer_metadata.png
@ -87,14 +87,20 @@ L'onglet "OS Accounts" affiche des informations sur le compte du système d'expl
\image html content_viewer_os_account.png \image html content_viewer_os_account.png
\section cv_results Results \section cv_results Data Artifacts
L'onglet "Results" est activé lors de la sélection d'éléments avec des résultats associés tels que des hits sur des mots clés, des journaux d'appels et des messages. Les champs exacts affichés dépendent du type de résultat. Les deux images ci-dessous montrent l'onglet "Results" pour un journal d'appels et un signet Web. L'onglet "Data Artifacts" affiche les artefacts associés à l'élément sélectionné dans la visionneuse de résultats, tels que des signets Web, des journaux d'appels et des messages. Les champs exacts affichés dépendent du type d'artefact de données. Les deux images ci-dessous montrent l'onglet "Data Artifacts" pour un journal d'appels et un signet Web.
\image html content_viewer_results_call.png \image html content_viewer_results_call.png
<br> <br>
\image html content_viewer_results_bookmark.png \image html content_viewer_results_bookmark.png
\section cv_analysis_results Analysis Results
L'onglet "Analysis Results" affiche tous les résultats d'analyse associés à l'élément sélectionné dans la visionneuse de résultats. Si vous sélectionnez un résultat d'analyse, la liste défilera automatiquement jusqu'à ce résultat. Ces résultats d'analyse proviennent de données telles que les résultats de recherches de hachage, les éléments intéressants et les résultats de recherches de mots clés. L'image ci-dessous montre les résultats de l'analyse des catégories Web.
\image html content_viewer_analysis_result_webcat.png
\section cv_context Context \section cv_context Context
L'onglet "Context" affiche des informations sur l'origine d'un fichier et vous permet d'accéder au résultat d'origine. Par exemple, il peut afficher l'URL des fichiers téléchargés et le message électronique auquel un fichier était joint. Dans l'image ci-dessous, vous pouvez voir le contexte d'une image qui a été envoyée en tant que pièce jointe à un e-mail. L'onglet "Context" affiche des informations sur l'origine d'un fichier et vous permet d'accéder au résultat d'origine. Par exemple, il peut afficher l'URL des fichiers téléchargés et le message électronique auquel un fichier était joint. Dans l'image ci-dessous, vous pouvez voir le contexte d'une image qui a été envoyée en tant que pièce jointe à un e-mail.

View File

@ -1,4 +1,4 @@
/*! \page file_search_page Recherche de fichier /*! \page file_search_page Recherche de fichier
[TOC] [TOC]
@ -21,7 +21,7 @@ ou sélectionnez le menu "Tools", "File Search by Attributes".
Il existe plusieurs catégories que vous pouvez utiliser pour filtrer et afficher les répertoires et les fichiers dans les images du cas ouvert actuellement. Il existe plusieurs catégories que vous pouvez utiliser pour filtrer et afficher les répertoires et les fichiers dans les images du cas ouvert actuellement.
Ces catégories sont: Ces catégories sont:
\li Name: \li Name:
Recherchez tous les fichiers et répertoires dont le nom contient le modèle donné. Recherchez tous les fichiers et répertoires dont le nom contient le modèle donné. La recherche porte uniquement sur le nom du fichier/répertoire et ne prends pas en compte le chemin parent.
Remarque: il ne prend pas en charge la correspondance d'expressions régulières et de mots clés. Remarque: il ne prend pas en charge la correspondance d'expressions régulières et de mots clés.
\li Size: \li Size:
Recherchez tous les fichiers et répertoires dont la taille correspond au modèle donné. Le motif peut être "equal to" (égal à), "greater than" (supérieur à) et "less than" (inférieur à). L'unité pour la taille peut être "Octet(s)", "Ko", "Mo", "Go" et "To". Recherchez tous les fichiers et répertoires dont la taille correspond au modèle donné. Le motif peut être "equal to" (égal à), "greater than" (supérieur à) et "less than" (inférieur à). L'unité pour la taille peut être "Octet(s)", "Ko", "Mo", "Go" et "To".
@ -32,7 +32,7 @@ Recherchez tous les fichiers avec le hachage MD5 donné.
\li Date: \li Date:
Recherchez tous les fichiers et répertoires dont la propriété "date" est comprise dans la plage de dates indiquée. Les propriétés de date sont "Modified Date" (Date de modification), "Accessed Date" (Date d'accès), "Changed Date" (Date de changement), and "Created Date" (Date de création). Vous devez également spécifier le fuseau horaire de la date donnée. Recherchez tous les fichiers et répertoires dont la propriété "date" est comprise dans la plage de dates indiquée. Les propriétés de date sont "Modified Date" (Date de modification), "Accessed Date" (Date d'accès), "Changed Date" (Date de changement), and "Created Date" (Date de création). Vous devez également spécifier le fuseau horaire de la date donnée.
\li Known Status: \li Known Status:
Recherchez tous les fichiers et répertoires dont l'état est reconnu comme "Unknown" (Inconnu), "Known" (Connu) ou "Notable" (Connu défavorablement). Pour plus d'informations sur ces états, consultez la page \ref hash_db_page. Recherchez tous les fichiers dont l'état est reconnu comme "Unknown" (Inconnu), "Known" (Connu) ou "Notable" (Connu défavorablement). Pour plus d'informations sur ces états, consultez la page \ref hash_db_page.
Pour utiliser l'un de ces filtres, cochez la case à côté de la catégorie et cliquez sur le bouton "Search" pour démarrer le processus de recherche. Le résultat apparaîtra dans la visionneuse de résultats. Pour utiliser l'un de ces filtres, cochez la case à côté de la catégorie et cliquez sur le bouton "Search" pour démarrer le processus de recherche. Le résultat apparaîtra dans la visionneuse de résultats.
\li Data Source: \li Data Source:
Rechercher uniquement dans la source de données spécifiée au lieu de l'ensemble du cas. Notez que plusieurs sources de données peuvent être sélectionnées en maintenant MAJ ou CTRL pendant la sélection. Rechercher uniquement dans la source de données spécifiée au lieu de l'ensemble du cas. Notez que plusieurs sources de données peuvent être sélectionnées en maintenant MAJ ou CTRL pendant la sélection.

View File

@ -1,4 +1,4 @@
/*! \page host_page Hosts (Hôtes) /*! \page host_page Hosts (Hôtes)
[TOC] [TOC]
@ -19,7 +19,7 @@ Les hôtes sont affichés dans l'\ref tree_viewer_page. En fonction des \ref vie
\subsection host_os_accounts Comptes de système d'exploitation \subsection host_os_accounts Comptes de système d'exploitation
Les comptes de système d'exploitation peuvent être affichés dans le nœud "OS Accounts" sous "Results". Chaque compte de système d'exploitation est associé à un hôte et les informations sur l'hôte sont affichées dans l'onglet "OS Accounts" de la visionneuse de contenu. Les comptes de système d'exploitation peuvent être affichés dans le nœud "OS Accounts" de l'arborescence. Chaque compte de système d'exploitation est associé à un hôte et les informations sur l'hôte sont affichées dans l'onglet "OS Accounts" de la visionneuse de contenu.
\image html host_os_accounts.png \image html host_os_accounts.png

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 19 KiB

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 47 KiB

After

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 55 KiB

After

Width:  |  Height:  |  Size: 59 KiB

View File

@ -1,4 +1,4 @@
/*! \page keyword_search_page Keyword Search (Recherches par mots clés) /*! \page keyword_search_page Keyword Search (Recherches par mots clés)
[TOC] [TOC]
@ -18,11 +18,11 @@ Référez vous à la page \ref ad_hoc_keyword_search_page pour plus de détails
\section keyword_search_configuration_dialog Configuration de la recherche par mot-clé \section keyword_search_configuration_dialog Configuration de la recherche par mot-clé
L'option de configuration de la recherche par mot-clé ("Keyword Search") comporte trois onglets, chacun ayant son propre objectif: L'option de configuration de la recherche par mot-clé ("Keyword Search") comporte trois onglets, chacun ayant son propre objectif:
\li L'\ref keywordLists est utilisé pour ajouter, supprimer et modifier des listes de recherche par mot-clé. \li L'\ref keyword_keywordListsTab est utilisé pour ajouter, supprimer et modifier des listes de recherche par mot-clé.
\li L'\ref stringExtraction est utilisé pour activer les scripts de langage et le type d'extraction. \li L'\ref keyword_stringExtractionTab est utilisé pour activer les scripts de langage et le type d'extraction.
\li L'\ref generalSettings est utilisé pour configurer les horaires d'acquisition et afficher les informations. \li L'\ref keyword_generalSettingsTab est utilisé pour configurer les horaires d'acquisition et afficher les informations.
\subsection keywordLists Onglet "Lists" \subsection keyword_keywordListsTab Onglet "Lists"
L'onglet "Lists" est utilisé pour créer/importer et ajouter du contenu aux listes de mots clés. Pour créer une liste, sélectionnez le bouton "New List" et choisissez un nom pour la nouvelle liste de mots clés. Une fois la liste créée, des mots clés peuvent y être ajoutés (voir la section \ref ad_hoc_kw_types_section pour plus d'informations sur les types de mots-clés). Des listes peuvent être ajoutées au processus d'acquisition de la recherche par mot-clé; les recherches auront lieu à intervalles réguliers au fur et à mesure que le contenu est ajouté à l'index. L'onglet "Lists" est utilisé pour créer/importer et ajouter du contenu aux listes de mots clés. Pour créer une liste, sélectionnez le bouton "New List" et choisissez un nom pour la nouvelle liste de mots clés. Une fois la liste créée, des mots clés peuvent y être ajoutés (voir la section \ref ad_hoc_kw_types_section pour plus d'informations sur les types de mots-clés). Des listes peuvent être ajoutées au processus d'acquisition de la recherche par mot-clé; les recherches auront lieu à intervalles réguliers au fur et à mesure que le contenu est ajouté à l'index.
@ -40,7 +40,7 @@ Sous la liste "Keywords", vous pouvez solliciter la réception d'un messages dan
\image html keyword-search-inbox.PNG \image html keyword-search-inbox.PNG
\subsection stringExtraction Onglet "String Extraction" \subsection keyword_stringExtractionTab Onglet "String Extraction"
Le paramètre "String Extraction" définit comment les chaînes de caractères sont extraites des fichiers dont le texte ne peut pas être extrait normalement car les formats de ces fichier ne sont pas pris en charge. C'est le cas des fichiers binaires arbitraires (tels que les fichiers d'échanges) et des morceaux d'espace non alloué qui représentent des fichiers supprimés. Le paramètre "String Extraction" définit comment les chaînes de caractères sont extraites des fichiers dont le texte ne peut pas être extrait normalement car les formats de ces fichier ne sont pas pris en charge. C'est le cas des fichiers binaires arbitraires (tels que les fichiers d'échanges) et des morceaux d'espace non alloué qui représentent des fichiers supprimés.
Lorsque nous extrayons des chaînes de caractères de fichiers binaires, nous devons interpréter les séquences d'octets comme du texte différemment en fonction du codage de texte possible et du script/langage utilisé. Dans de nombreux cas, nous ne savons pas à l'avance dans quel encodage/langue spécifique le texte est encodé. Cependant, cela peut être intéressant si l'enquêteur recherche une langue spécifique, car en sélectionnant moins de langues, les performances d'indexation seront améliorées et le nombre des faux positifs seront réduits. Lorsque nous extrayons des chaînes de caractères de fichiers binaires, nous devons interpréter les séquences d'octets comme du texte différemment en fonction du codage de texte possible et du script/langage utilisé. Dans de nombreux cas, nous ne savons pas à l'avance dans quel encodage/langue spécifique le texte est encodé. Cependant, cela peut être intéressant si l'enquêteur recherche une langue spécifique, car en sélectionnant moins de langues, les performances d'indexation seront améliorées et le nombre des faux positifs seront réduits.
@ -50,20 +50,36 @@ Le paramètre par défaut consiste à rechercher uniquement les chaînes anglais
L'utilisateur peut également utiliser en premier le "String Viewer" et essayer différents paramètres de script/langue, et voir quels paramètres donnent des résultats satisfaisants pour le type de texte pertinent pour l'enquête. Ensuite, ce même paramètre qui fonctionne pour l'enquête peut être appliqué au module d'acquisition de recherche par mot-clé. L'utilisateur peut également utiliser en premier le "String Viewer" et essayer différents paramètres de script/langue, et voir quels paramètres donnent des résultats satisfaisants pour le type de texte pertinent pour l'enquête. Ensuite, ce même paramètre qui fonctionne pour l'enquête peut être appliqué au module d'acquisition de recherche par mot-clé.
\subsection generalSettings Onglet "General" \subsection keyword_generalSettingsTab Onglet "General"
\image html keyword-search-configuration-dialog-general.PNG \image html keyword-search-configuration-dialog-general.PNG
### Prise en charge du NIST NSRL \subsubsection keyword_nsrl Prise en charge du NIST NSRL
Le module d'acquisition "Hash Lookup" peut être configuré pour utiliser l'ensemble de hachage NIST NSRL de fichiers connus. L'onglet "General" de la boîte de dialogue de configuration avancée de la recherche par mot-clé contient une option permettant d'ignorer l'indexation par mot-clé et de rechercher des fichiers précédemment marqués comme "connus" ("Known") et sans intérêt. La sélection de cette option peut réduire considérablement la taille de l'index et améliorer les performances d'acquisition. Dans la plupart des cas, l'utilisateur n'a pas besoin de rechercher par mot-clé les fichiers "connus". Le module d'acquisition "Hash Lookup" peut être configuré pour utiliser l'ensemble de hachage NIST NSRL de fichiers connus. L'onglet "General" de la boîte de dialogue de configuration avancée de la recherche par mot-clé contient une option permettant d'ignorer l'indexation par mot-clé et de rechercher des fichiers précédemment marqués comme "connus" ("Known") et sans intérêt. La sélection de cette option peut réduire considérablement la taille de l'index et améliorer les performances d'acquisition. Dans la plupart des cas, l'utilisateur n'a pas besoin de rechercher par mot-clé les fichiers "connus".
### Fréquence de mise à jour des résultats lors de l'acquisition \subsubsection keyword_update_freq Fréquence de mise à jour des résultats lors de l'acquisition
Pour contrôler la fréquence à laquelle les recherches sont exécutées pendant l'acquisition, l'utilisateur peut ajuster le paramètre de synchronisation disponible dans l'onglet "General" de la boîte de dialogue de configuration avancée de la recherche par mot-clé. La réduction du nombre de minutes entraînera des mises à jour d'index et des recherches plus fréquentes et l'utilisateur pourra voir les résultats davantage en temps réel. Cependant, des mises à jour plus fréquentes peuvent affecter les performances globales, en particulier sur les systèmes peu performants, et peuvent potentiellement allonger le temps total nécessaire à l'acquisition. Pour contrôler la fréquence à laquelle les recherches sont exécutées pendant l'acquisition, l'utilisateur peut ajuster le paramètre de synchronisation disponible dans l'onglet "General" de la boîte de dialogue de configuration avancée de la recherche par mot-clé. La réduction du nombre de minutes entraînera des mises à jour d'index et des recherches plus fréquentes et l'utilisateur pourra voir les résultats davantage en temps réel. Cependant, des mises à jour plus fréquentes peuvent affecter les performances globales, en particulier sur les systèmes peu performants, et peuvent potentiellement allonger le temps total nécessaire à l'acquisition.
On peut également choisir de ne pas effectuer de recherches périodiques. Cela accélérera l'acquisition. Les utilisateurs qui choisissent cette option peuvent exécuter leurs recherches par mots-clés une fois que l'index de recherche par mots-clés est complet. On peut également choisir de ne pas effectuer de recherches périodiques. Cela accélérera l'acquisition. Les utilisateurs qui choisissent cette option peuvent exécuter leurs recherches par mots-clés une fois que l'index de recherche par mots-clés est complet.
### Reconnaissance optique de caractères (OCR) \section keyword_usage Utilisation du module
Il existe également un paramètre pour activer le Optical Character Recognition (OCR). Si cette option est activée, le texte peut être extrait des types d'images pris en charge. L'activation de cette fonctionnalité rendra le module de recherche par mot-clé plus long à exécuter et les résultats ne sont pas parfaits. La deuxième case à cocher peut accélérer l'exécution de l'OCR en ne traitant que les grandes images et les images extraites de documents.
Les recherche peuvent être exécutées manuellement par l'utilisateur à tout moment, tant qu'il existe des fichiers déjà indexés et prêts à être analysés. Une recherche effectuée avant la fin de l'indexation ne s'exécutera naturellement que les index déjà compilés.
Voir la page sur les \ref ingest_page "Ingest Modules" pour plus d'information sur les modules d'acquisition en général.
Une fois qu'il y a des fichiers dans l'index, la \ref ad_hoc_keyword_search_page sera disponible pour une recherche manuelle à tout moment.
\subsection keyword_ingest_settings Paramétrages
Les options de paramétrages du module de recherches par mots clés permettent à l'utilisateur d'activer ou de désactiver les expressions de recherche intégrées spécifiques : Phone Numbers (numéros de téléphone), IP Addresses (adresses IP), Email Addresses (adresses mail), et URLs. En utilisant le bouton "Global Settings" (expliqué ci-dessous), on peut ajouter des ensembles de mots clés personnalisés.
\image html keyword-search-ingest-settings.PNG
\subsubsection keyword_ocr Optical Character Recognition (reconnaissance optique de caractères)
\anchor keyword_search_ocr_config
Il y a aussi un paramètre pour activer la reconnaissance optique de caractères (ou Optical Character Recognition - OCR). S'il est activé, le texte peut être extrait des types d'images pris en charge. L'activation de cette fonctionnalité rendra l'exécution du module de recherche par mot-clé plus longue et les résultats pourront ne pas être parfaits.
Voici un exemple d'image contenant du texte: Voici un exemple d'image contenant du texte:
@ -73,7 +89,12 @@ L'onglet "Indexed Text" affiche les résultats lors de l'exécution du module de
\image html keyword-search-ocr-indexed-text.png \image html keyword-search-ocr-indexed-text.png
\anchor keyword_search_ocr_config Les deux options liées à l'OCR sont les suivantes:
<ul>
<li>Only index text extracted using OCR. Cela n'indexera que le texte détecté par l'OCR et empêchera l'indexation de texte trouvé dans les fichiers texte, les documents, etc...
<li>Only process PDFs, MS Office docs and images which are over 100KB in size or extracted from another file. Avec cette option sélectionnée, l'OCR ne sera effectuée que sur les images de plus de 100 Ko et les documents PDF/Office. Il fonctionnera également sur des images de toute taille extraites d'un autre fichier.
</ul>
Par défaut, l'OCR n'est configuré que pour le texte anglais. Sa configuration dépend de la présence de fichiers de langue (appelés fichiers "traineddata") Par défaut, l'OCR n'est configuré que pour le texte anglais. Sa configuration dépend de la présence de fichiers de langue (appelés fichiers "traineddata")
qui existent dans un endroit qu'Autopsy peut atteindre. Pour ajouter la prise en charge de plusieurs langues, vous devrez télécharger des "traineddata" supplémentaires qui existent dans un endroit qu'Autopsy peut atteindre. Pour ajouter la prise en charge de plusieurs langues, vous devrez télécharger des "traineddata" supplémentaires
et les déplacer au bon endroit. Les étapes suivantes décrivent ce processus: et les déplacer au bon endroit. Les étapes suivantes décrivent ce processus:
@ -88,28 +109,7 @@ et les déplacer au bon endroit. Les étapes suivantes décrivent ce processus:
Les fichiers de langue seront désormais pris en charge lorsque l'OCR est activé dans les paramètres de "Keyword Search". Les fichiers de langue seront désormais pris en charge lorsque l'OCR est activé dans les paramètres de "Keyword Search".
<!-----------------------------------------> \section keyword_results Voir les résultats
<br>
Utilisation du module
======
Les requêtes de recherche peuvent être exécutées manuellement par l'utilisateur à tout moment, à condition que certains fichiers soient déjà indexés et prêts à être recherchés. La recherche avant que l'indexation ne soit terminée ne prendra naturellement en compte que les index déjà compilés.
Voir la page \ref ingest_page "Modules d'acquisition" pour plus d'informations sur l'acquisition en général.
Une fois qu'il y a des fichiers dans l'index, la \ref ad_hoc_keyword_search_page sera disponible pour une recherche manuelle à tout moment.
<!----------------------------------->
Paramètres d'acquisition
------
Les paramètres d'acquisition du module "Keyword Search" permettent à l'utilisateur d'activer ou de désactiver les expressions de recherche intégrées spécifiques : Phone Numbers, IP Addresses, Email Addresses, and URLs. En utilisant le bouton "Global Settings" (voir ci-dessous), on peut ajouter des groupes de mots clés personnalisés.
\image html keyword-search-ingest-settings.PNG
Voir les résultats
------
Le module "Keyword Search" enregistrera les résultats de la recherche, que celle-ci ait été effectuée par le processus d'acquisition ou manuellement par l'utilisateur. Les résultats enregistrés sont disponibles dans l'arborescence des répertoires dans le panneau de gauche. Le module "Keyword Search" enregistrera les résultats de la recherche, que celle-ci ait été effectuée par le processus d'acquisition ou manuellement par l'utilisateur. Les résultats enregistrés sont disponibles dans l'arborescence des répertoires dans le panneau de gauche.

View File

@ -300,7 +300,7 @@ En bref:
<li>Les validations "souples" ne transfèrent pas les documents nouvellement indexés sur le disque, mais ils les rendent "visibles" pour la recherche. <li>Les validations "souples" ne transfèrent pas les documents nouvellement indexés sur le disque, mais ils les rendent "visibles" pour la recherche.
</ul> </ul>
Par défaut (lors de l'utilisation d'AutopsyConfig), les serveurs Solr effectuent une validation "dure" toutes les 5 minutes et rendent également les documents nouvellement indexés "visibles" pour la recherche. Ces opérations peuvent être coûteuses en ressources lorsqu'elles sont effectuées sur un index volumineux situé sur un lecteur réseau partagé. Dans cette situation, il peut être très intéressant de modifier la configuration de Solr (située dans \c "REPERTOIRE_INSTALLATION_SOLR\server\solr\configsets\AutopsyConfig\conf\solrconfig.xml") avec les changements suivantes : Par défaut (lors de l'utilisation d'AutopsyConfig), les serveurs Solr effectuent une validation "dure" toutes les 5 minutes et rendent également les documents nouvellement indexés "visibles" pour la recherche. Ces opérations peuvent être coûteuses en ressources lorsqu'elles sont effectuées sur un index volumineux situé sur un lecteur réseau partagé. Dans cette situation, il peut être très intéressant de modifier la configuration de Solr (située dans \c "REPERTOIRE_INSTALLATION_SOLR\server\solr\configsets\AutopsyConfig\conf\solrconfig.xml") avec les changements suivants :
<ol> <ol>
<li>Modifiez les validations "dures" pour transférer systématiquement les documents nouvellement créés toutes les 5 minutes sans les rendre "visibles". Ceci peut être fixé en définissant "openSearcher" sur "false" dans la section "autoCommit" du fichier de configuration Solr. <li>Modifiez les validations "dures" pour transférer systématiquement les documents nouvellement créés toutes les 5 minutes sans les rendre "visibles". Ceci peut être fixé en définissant "openSearcher" sur "false" dans la section "autoCommit" du fichier de configuration Solr.
<li>Activez les validations "souples" à effectuer toutes les 30 minutes, rendant ainsi les documents nouvellement indexés "visibles" pour une recherche toutes les 30 minutes. Ceci peut être fixé en activant la section "autoSoftCommit" du fichier de configuration Solr. L'inconvénient est que la recherche du dernier document peut prendre jusqu'à 30 minutes. Gardez à l'esprit que cela n'a d'incidence que dans le cas où un analyste recherche un dossier alors que l'acquisition est toujours en cours. Autopsy effectue automatiquement une validation une fois l'acquisition terminée afin que tous les documents soient immédiatement visibles à ce moment-là. <li>Activez les validations "souples" à effectuer toutes les 30 minutes, rendant ainsi les documents nouvellement indexés "visibles" pour une recherche toutes les 30 minutes. Ceci peut être fixé en activant la section "autoSoftCommit" du fichier de configuration Solr. L'inconvénient est que la recherche du dernier document peut prendre jusqu'à 30 minutes. Gardez à l'esprit que cela n'a d'incidence que dans le cas où un analyste recherche un dossier alors que l'acquisition est toujours en cours. Autopsy effectue automatiquement une validation une fois l'acquisition terminée afin que tous les documents soient immédiatement visibles à ce moment-là.

View File

@ -1,4 +1,4 @@
/*! \page result_viewer_page Visionneuse de résultats /*! \page result_viewer_page Visionneuse de résultats
[TOC] [TOC]
@ -25,8 +25,8 @@ Ces colonnes affichent les informations suivantes:
<ul> <ul>
<li> (S)core - indique si l'élément est intéressant ou notable. <li> (S)core - indique si l'élément est intéressant ou notable.
<ul> <ul>
<li>Affiche une icône rouge si le fichier correspond à un hachage notable ou a été marqué avec une balise notable. <li>Affiche une icône rouge si au moins un résultat d'analyse enfant est notable ou si le fichier est marqué avec une balise notable.
<li>Affiche une icône jaune si le fichier correspond à un élément intéressant ou a été marqué avec une balise non notable. <li>Affiche une icône jaune si au moins un résultat d'analyse enfant est probablement notable ou si le fichier est marqué.
</ul> </ul>
<li> (C)omment - indique si l'élément a un commentaire dans le référentiel central ou si un commentaire est associé à une balise. <li> (C)omment - indique si l'élément a un commentaire dans le référentiel central ou si un commentaire est associé à une balise.
<li> (O)ther occurrences - indique combien de sources de données dans le référentiel central contiennent cet élément. Le décompte comprendra l'élément sélectionné. <li> (O)ther occurrences - indique combien de sources de données dans le référentiel central contiennent cet élément. Le décompte comprendra l'élément sélectionné.

View File

@ -1,12 +1,14 @@
/*! \page tree_viewer_page Arborescence /*! \page tree_viewer_page Arborescence
[TOC] [TOC]
L'arborescence sur le côté gauche de la fenêtre principale est l'endroit où vous pouvez parcourir les fichiers dans les sources de données du cas et trouver les résultats enregistrés à partir des analyses automatisées (Ingest). L'arborescence a cinq zones principales: L'arborescence sur le côté gauche de la fenêtre principale est l'endroit où vous pouvez parcourir les fichiers dans les sources de données du cas et trouver les résultats enregistrés à partir des analyses automatisées (Ingest). L'arborescence a sept zones principales:
- <b>Persons / Hosts / Data Sources:</b> Cela montre la hiérarchie arborescente de répertoires des sources de données. Vous pouvez accéder à un fichier ou à un répertoire spécifique ici. Chaque source de données ajoutée au cas est représentée sous la forme d'une sous-arborescence distincte. Si vous ajoutez une source de données plusieurs fois, elle apparaît plusieurs fois. - <b>Persons / Hosts / Data Sources:</b> Cela montre la hiérarchie arborescente de répertoires des sources de données. Vous pouvez accéder à un fichier ou à un répertoire spécifique ici. Chaque source de données ajoutée au cas est représentée sous la forme d'une sous-arborescence distincte. Si vous ajoutez une source de données plusieurs fois, elle apparaît plusieurs fois.
- <b>Views:</b> Des types spécifiques de fichiers provenant des sources de données sont affichés ici, agrégés par type ou par d'autres propriétés. Les fichiers ici peuvent provenir de plusieurs sources de données. - <b>File Views:</b> Des types spécifiques de fichiers provenant des sources de données sont affichés ici, agrégés par type ou par d'autres propriétés. Les fichiers ici peuvent provenir de plusieurs sources de données.
- <b>Results:</b> C'est ici que vous pouvez voir les résultats des analyses automatisées (Ingest) exécutée en arrière-plan ainsi que vos résultats de recherche. - <b>Data Artifacts:</b> C'est l'un des principaux endroits où les résultats des \ref ingest_page en cours apparaissent.
- <b>Analysis Results:</b> C'est l'un des autres principaux endroits où les résultats des \ref ingest_page en cours apparaissent.
- <b>OS Accounts:</b> C'est ici que vous pouvez voir à la fois les résultats de l'analyse automatisée exécutée en arrière-plan et les résultats de votre recherche.
- <b>Tags:</b> C'est là que les fichiers et les résultats qui ont été \ref tagging_page "marqués" sont affichés. - <b>Tags:</b> C'est là que les fichiers et les résultats qui ont été \ref tagging_page "marqués" sont affichés.
- <b>Reports:</b> Les rapports que vous avez générés ou que les modules d'acquisition ont créés s'affichent ici. - <b>Reports:</b> Les rapports que vous avez générés ou que les modules d'acquisition ont créés s'affichent ici.
@ -43,22 +45,28 @@ L'espace non alloué (Unallocated space) représente les parties d'un système d
Un exemple de l'option d'extraction de fichier unique est illustré ci-dessous. Un exemple de l'option d'extraction de fichier unique est illustré ci-dessous.
\image html extracting-unallocated-space.PNG \image html extracting-unallocated-space.PNG
\section ui_tree_views Views (Vues) \section ui_tree_views File Views (Vues des fichiers)
La zone "Views" filtre tous les fichiers du cas en fonction de certaines propriétés du fichier. La zone "Views" filtre tous les fichiers du cas en fonction de certaines propriétés du fichier.
- <b>File Types</b> - Trie les fichiers par extension ou par type MIME et les affiche dans le groupe approprié. Par exemple, les fichiers avec les extensions .mp3 et .wav se retrouvent dans le groupe "Audio". - <b>File Types</b> - Trie les fichiers par extension ou par type MIME et les affiche dans le groupe approprié. Par exemple, les fichiers avec les extensions .mp3 et .wav se retrouvent dans le groupe "Audio".
- <b>Deleted Files</b> - Affiche les fichiers qui ont été supprimés, mais dont les noms ont été récupérés. - <b>Deleted Files</b> - Affiche les fichiers qui ont été supprimés, mais dont les noms ont été récupérés.
- <b>File Size</b> - Trie les fichiers en fonction de leur taille. - <b>File Size</b> - Trie les fichiers en fonction de leur taille.
\section ui_tree_results Data Artifacts (Artefacts de données)
\section ui_tree_results Results (Résultats) Cette section présente les artefacts de données créés en exécutant les modules d'acquisition. En général, les artefacts de données contiennent des informations concrètes extraites de la source de données. Par exemple, des journaux d'appels et des messages de journaux de communication ou des signets Web extraits d'une base de données de navigateur.
- <b>Extracted Content:</b> De nombreux modules d'acquisition placeront les résultats ici: métadonnées EXIF, emplacements GPS ou historiques Web par exemple.
- <b>Keyword Hits:</b> Les résultats de la recherche par mot-clé s'affichent ici. \section ui_tree_analysis_results Analysis Results (Résultats d'analyse)
- <b>Hashset Hits:</b> Les résultats de la recherche de hachage s'affichent ici.
- <b>E-Mail Messages:</b> Les e-mails s'affichent ici. Cette section affiche les résultats d'analyse créés en exécutant les modules d'acquisition. En général, les résultats d'analyse contiennent des informations intéressantes pour l'utilisateur. Par exemple, si l'utilisateur établit une liste de \ref hash_db_page "hachages notables", tous les hits de l'ensemble de hachage apparaîtront ici.
- <b>Interesting Items:</b> Les éléments jugés intéressants apparaissent ici.
- <b>Accounts:</b> Les comptes de carte de crédit s'affichent ici. \section ui_tree_os_accounts OS Accounts (Comptes de système d'exploitation)
- <b>Tags:</b> Tout élément que vous avez marqué apparaît ici pour que vous puissiez le retrouver facilement.
Cette section montre les comptes de système d'exploitation trouvés dans le cas. Voir la rubrique \ref host_os_accounts à titre d'exemple.
\section ui_tree_tags Tags (Marquages)
Tout élément que vous marquez s'affiche ici pour que vous puissiez le retrouver facilement. Voir la page \ref tagging_page pour plus d'informations.
\section ui_tree_reports Reports (Rapports) \section ui_tree_reports Reports (Rapports)

View File

@ -40,7 +40,7 @@ from java.lang import Class
from java.lang import System from java.lang import System
from java.sql import DriverManager, SQLException from java.sql import DriverManager, SQLException
from java.util.logging import Level from java.util.logging import Level
from java.util import ArrayList from java.util import Arrays
from java.io import File from java.io import File
from org.sleuthkit.datamodel import SleuthkitCase from org.sleuthkit.datamodel import SleuthkitCase
from org.sleuthkit.datamodel import AbstractFile from org.sleuthkit.datamodel import AbstractFile
@ -113,7 +113,7 @@ class ContactsDbIngestModule(DataSourceIngestModule):
progressBar.switchToIndeterminate() progressBar.switchToIndeterminate()
# Use blackboard class to index blackboard artifacts for keyword search # Use blackboard class to index blackboard artifacts for keyword search
blackboard = Case.getCurrentCase().getServices().getBlackboard() blackboard = Case.getCurrentCase().getSleuthkitCase().getBlackboard()
# Find files named contacts.db, regardless of parent path # Find files named contacts.db, regardless of parent path
fileManager = Case.getCurrentCase().getServices().getFileManager() fileManager = Case.getCurrentCase().getServices().getFileManager()
@ -162,30 +162,21 @@ class ContactsDbIngestModule(DataSourceIngestModule):
# Make an artifact on the blackboard, TSK_CONTACT and give it attributes for each of the fields # Make an artifact on the blackboard, TSK_CONTACT and give it attributes for each of the fields
art = file.newArtifact(BlackboardArtifact.ARTIFACT_TYPE.TSK_CONTACT) art = file.newDataArtifact(BlackboardArtifact.Type.TSK_CONTACT, Arrays.asList(
attributes = ArrayList() BlackboardAttribute(BlackboardAttribute.Type.TSK_NAME_PERSON,
ContactsDbIngestModuleFactory.moduleName, name),
BlackboardAttribute(BlackboardAttribute.Type.TSK_EMAIL,
ContactsDbIngestModuleFactory.moduleName, email),
BlackboardAttribute(BlackboardAttribute.Type.TSK_PHONE_NUMBER,
ContactsDbIngestModuleFactory.moduleName, phone)
))
attributes.add(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_NAME_PERSON.getTypeID(),
ContactsDbIngestModuleFactory.moduleName, name))
attributes.add(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_EMAIL.getTypeID(),
ContactsDbIngestModuleFactory.moduleName, email))
attributes.add(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_PHONE_NUMBER.getTypeID(),
ContactsDbIngestModuleFactory.moduleName, phone))
art.addAttributes(attributes)
try: try:
# index the artifact for keyword search # index the artifact for keyword search
blackboard.indexArtifact(art) blackboard.postArtifact(art, ContactsDbIngestModuleFactory.moduleName)
except Blackboard.BlackboardException as e: except Blackboard.BlackboardException as e:
self.log(Level.SEVERE, "Error indexing artifact " + art.getDisplayName()) self.log(Level.SEVERE, "Error indexing artifact " + art.getDisplayName())
# Fire an event to notify the UI and others that there are new artifacts
IngestServices.getInstance().fireModuleDataEvent(
ModuleDataEvent(ContactsDbIngestModuleFactory.moduleName,
BlackboardArtifact.ARTIFACT_TYPE.TSK_CONTACT, None))
# Clean up # Clean up
stmt.close() stmt.close()
dbConn.close() dbConn.close()

View File

@ -145,7 +145,7 @@ class RunExeIngestModule(DataSourceIngestModule):
# Add each argument in its own line. I.e. "-f foo" would be two calls to .add() # Add each argument in its own line. I.e. "-f foo" would be two calls to .add()
cmd.add(imagePaths[0]) cmd.add(imagePaths[0])
processBuilder = ProcessBuilder(cmd); processBuilder = ProcessBuilder(cmd)
processBuilder.redirectOutput(reportFile) processBuilder.redirectOutput(reportFile)
ExecUtil.execute(processBuilder, DataSourceIngestModuleProcessTerminator(self.context)) ExecUtil.execute(processBuilder, DataSourceIngestModuleProcessTerminator(self.context))

View File

@ -57,6 +57,8 @@ from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.casemodule.services import Services from org.sleuthkit.autopsy.casemodule.services import Services
from org.sleuthkit.autopsy.casemodule.services import FileManager from org.sleuthkit.autopsy.casemodule.services import FileManager
from org.sleuthkit.autopsy.casemodule.services import Blackboard from org.sleuthkit.autopsy.casemodule.services import Blackboard
from org.sleuthkit.datamodel import Score
from java.util import Arrays
# Factory that defines the name and details of the module and allows Autopsy # Factory that defines the name and details of the module and allows Autopsy
# to create instances of the modules that will do the anlaysis. # to create instances of the modules that will do the anlaysis.
@ -107,7 +109,7 @@ class FindBigRoundFilesIngestModule(FileIngestModule):
def process(self, file): def process(self, file):
# Use blackboard class to index blackboard artifacts for keyword search # Use blackboard class to index blackboard artifacts for keyword search
blackboard = Case.getCurrentCase().getServices().getBlackboard() blackboard = Case.getCurrentCase().getSleuthkitCase().getBlackboard()
# Skip non-files # Skip non-files
if ((file.getType() == TskData.TSK_DB_FILES_TYPE_ENUM.UNALLOC_BLOCKS) or if ((file.getType() == TskData.TSK_DB_FILES_TYPE_ENUM.UNALLOC_BLOCKS) or
@ -120,22 +122,19 @@ class FindBigRoundFilesIngestModule(FileIngestModule):
# Make an artifact on the blackboard. TSK_INTERESTING_FILE_HIT is a generic type of # Make an artifact on the blackboard. TSK_INTERESTING_FILE_HIT is a generic type of
# artifact. Refer to the developer docs for other examples. # artifact. Refer to the developer docs for other examples.
art = file.newArtifact(BlackboardArtifact.ARTIFACT_TYPE.TSK_INTERESTING_FILE_HIT) art = file.newAnalysisResult(BlackboardArtifact.Type.TSK_INTERESTING_FILE_HIT, Score.SCORE_LIKELY_NOTABLE,
att = BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_SET_NAME.getTypeID(), None, "Big and Round Files", None,
FindBigRoundFilesIngestModuleFactory.moduleName, "Big and Round Files") Arrays.asList(
art.addAttribute(att) BlackboardAttribute(BlackboardAttribute.Type.TSK_SET_NAME,
FindBigRoundFilesIngestModuleFactory.moduleName,
"Big and Round Files"))).getAnalysisResult()
try: try:
# index the artifact for keyword search # post the artifact for listeners of artifact events
blackboard.indexArtifact(art) blackboard.postArtifact(art, FindBigRoundFilesIngestModuleFactory.moduleName)
except Blackboard.BlackboardException as e: except Blackboard.BlackboardException as e:
self.log(Level.SEVERE, "Error indexing artifact " + art.getDisplayName()) self.log(Level.SEVERE, "Error indexing artifact " + art.getDisplayName())
# Fire an event to notify the UI and others that there is a new artifact
IngestServices.getInstance().fireModuleDataEvent(
ModuleDataEvent(FindBigRoundFilesIngestModuleFactory.moduleName,
BlackboardArtifact.ARTIFACT_TYPE.TSK_INTERESTING_FILE_HIT, None))
return IngestModule.ProcessResult.OK return IngestModule.ProcessResult.OK
# Where any shutdown code is run and resources are freed. # Where any shutdown code is run and resources are freed.

View File

@ -45,12 +45,13 @@ from java.lang import Class
from java.lang import System from java.lang import System
from java.sql import DriverManager, SQLException from java.sql import DriverManager, SQLException
from java.util.logging import Level from java.util.logging import Level
from java.util import ArrayList from java.util import Arrays
from org.sleuthkit.datamodel import SleuthkitCase from org.sleuthkit.datamodel import SleuthkitCase
from org.sleuthkit.datamodel import AbstractFile from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.datamodel import ReadContentInputStream from org.sleuthkit.datamodel import ReadContentInputStream
from org.sleuthkit.datamodel import BlackboardArtifact from org.sleuthkit.datamodel import BlackboardArtifact
from org.sleuthkit.datamodel import BlackboardAttribute from org.sleuthkit.datamodel import BlackboardAttribute
from org.sleuthkit.datamodel import Blackboard
from org.sleuthkit.datamodel import TskData from org.sleuthkit.datamodel import TskData
from org.sleuthkit.autopsy.ingest import IngestModule from org.sleuthkit.autopsy.ingest import IngestModule
from org.sleuthkit.autopsy.ingest.IngestModule import IngestModuleException from org.sleuthkit.autopsy.ingest.IngestModule import IngestModuleException
@ -135,7 +136,8 @@ class RegistryExampleIngestModule(DataSourceIngestModule):
self.log(Level.INFO, "ExampleRegistry Directory already exists " + tempDir) self.log(Level.INFO, "ExampleRegistry Directory already exists " + tempDir)
# Set the database to be read to the once created by the prefetch parser program # Set the database to be read to the once created by the prefetch parser program
skCase = Case.getCurrentCase().getSleuthkitCase(); skCase = Case.getCurrentCase().getSleuthkitCase()
blackboard = Case.getCurrentCase().getSleuthkitCase().getBlackboard()
fileManager = Case.getCurrentCase().getServices().getFileManager() fileManager = Case.getCurrentCase().getServices().getFileManager()
# Look for files to process # Look for files to process
@ -170,12 +172,12 @@ class RegistryExampleIngestModule(DataSourceIngestModule):
# Setup Artifact and Attributes # Setup Artifact and Attributes
artType = skCase.getArtifactType("TSK_REGISTRY_RUN_KEYS")
if not artType:
try: try:
artID = skCase.addArtifactType( "TSK_REGISTRY_RUN_KEYS", "Registry Run Keys") artType = skCase.addBlackboardArtifactType( "TSK_REGISTRY_RUN_KEYS", "Registry Run Keys")
except: except:
self.log(Level.INFO, "Artifacts Creation Error, some artifacts may not exist now. ==> ") self.log(Level.WARNING, "Artifacts Creation Error, some artifacts may not exist now. ==> ")
artId = skCase.getArtifactTypeID("TSK_REGISTRY_RUN_KEYS")
try: try:
attributeIdRunKeyName = skCase.addArtifactAttributeType("TSK_REG_RUN_KEY_NAME", BlackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.STRING, "Run Key Name") attributeIdRunKeyName = skCase.addArtifactAttributeType("TSK_REG_RUN_KEY_NAME", BlackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.STRING, "Run Key Name")
@ -198,19 +200,18 @@ class RegistryExampleIngestModule(DataSourceIngestModule):
# RefistryKeysFound is a list that contains a list with the following records abstractFile, Registry Key Location, Key Name, Key value # RefistryKeysFound is a list that contains a list with the following records abstractFile, Registry Key Location, Key Name, Key value
for registryKey in self.registryKeysFound: for registryKey in self.registryKeysFound:
attributes = ArrayList() self.log(Level.INFO, "Creating artifact for registry key with path: " + registryKey[1] + " and key: " + registryKey[2])
art = registryKey[0].newArtifact(artId) art = registryKey[0].newDataArtifact(artType, Arrays.asList(
BlackboardAttribute(attributeIdRegKeyLoc, moduleName, registryKey[1]),
attributes.add(BlackboardAttribute(attributeIdRegKeyLoc, moduleName, registryKey[1])) BlackboardAttribute(attributeIdRunKeyName, moduleName, registryKey[2]),
attributes.add(BlackboardAttribute(attributeIdRunKeyName, moduleName, registryKey[2])) BlackboardAttribute(attributeIdRunKeyValue, moduleName, registryKey[3])
attributes.add(BlackboardAttribute(attributeIdRunKeyValue, moduleName, registryKey[3])) ))
art.addAttributes(attributes)
# index the artifact for keyword search # index the artifact for keyword search
try: try:
blackboard.indexArtifact(art) blackboard.postArtifact(art, moduleName)
except: except Blackboard.BlackboardException as ex:
self._logger.log(Level.WARNING, "Error indexing artifact " + art.getDisplayName()) self.log(Level.SEVERE, "Unable to index blackboard artifact " + str(art.getArtifactTypeName()), ex)
#Clean up registryExample directory and files #Clean up registryExample directory and files
try: try:
@ -236,7 +237,7 @@ class RegistryExampleIngestModule(DataSourceIngestModule):
softwareRegFile = RegistryHiveFile(File(softwareHive)) softwareRegFile = RegistryHiveFile(File(softwareHive))
for runKey in self.registrySoftwareRunKeys: for runKey in self.registrySoftwareRunKeys:
currentKey = self.findRegistryKey(softwareRegFile, runKey) currentKey = self.findRegistryKey(softwareRegFile, runKey)
if len(currentKey.getValueList()) > 0: if currentKey and len(currentKey.getValueList()) > 0:
skValues = currentKey.getValueList() skValues = currentKey.getValueList()
for skValue in skValues: for skValue in skValues:
regKey = [] regKey = []
@ -255,7 +256,7 @@ class RegistryExampleIngestModule(DataSourceIngestModule):
ntuserRegFile = RegistryHiveFile(File(ntuserHive)) ntuserRegFile = RegistryHiveFile(File(ntuserHive))
for runKey in self.registryNTUserRunKeys: for runKey in self.registryNTUserRunKeys:
currentKey = self.findRegistryKey(ntuserRegFile, runKey) currentKey = self.findRegistryKey(ntuserRegFile, runKey)
if len(currentKey.getValueList()) > 0: if currentKey and len(currentKey.getValueList()) > 0:
skValues = currentKey.getValueList() skValues = currentKey.getValueList()
for skValue in skValues: for skValue in skValues:
regKey = [] regKey = []
@ -276,9 +277,10 @@ class RegistryExampleIngestModule(DataSourceIngestModule):
for key in regKeyList: for key in regKeyList:
currentKey = currentKey.getSubkey(key) currentKey = currentKey.getSubkey(key)
return currentKey return currentKey
except: except Exception as ex:
# Key not found # Key not found
return null self.log(Level.SEVERE, "registry key parsing issue:", ex)
return None

View File

@ -37,6 +37,7 @@
import os import os
import codecs
from java.lang import System from java.lang import System
from java.util.logging import Level from java.util.logging import Level
from org.sleuthkit.datamodel import TskData from org.sleuthkit.datamodel import TskData
@ -72,11 +73,11 @@ class CSVReportModule(GeneralReportModuleAdapter):
# The 'baseReportDir' object being passed in is a string with the directory that reports are being stored in. Report should go into baseReportDir + getRelativeFilePath(). # The 'baseReportDir' object being passed in is a string with the directory that reports are being stored in. Report should go into baseReportDir + getRelativeFilePath().
# The 'progressBar' object is of type ReportProgressPanel. # The 'progressBar' object is of type ReportProgressPanel.
# See: http://sleuthkit.org/autopsy/docs/api-docs/latest/classorg_1_1sleuthkit_1_1autopsy_1_1report_1_1_report_progress_panel.html # See: http://sleuthkit.org/autopsy/docs/api-docs/latest/classorg_1_1sleuthkit_1_1autopsy_1_1report_1_1_report_progress_panel.html
def generateReport(self, baseReportDir, progressBar): def generateReport(self, reportSettings, progressBar):
# Open the output file. # Open the output file.
fileName = os.path.join(baseReportDir, self.getRelativeFilePath()) fileName = os.path.join(reportSettings.getReportDirectoryPath(), self.getRelativeFilePath())
report = open(fileName, 'w') report = codecs.open(fileName, "w", "utf-8")
# Query the database for the files (ignore the directories) # Query the database for the files (ignore the directories)
sleuthkitCase = Case.getCurrentCase().getSleuthkitCase() sleuthkitCase = Case.getCurrentCase().getSleuthkitCase()

View File

@ -53,9 +53,8 @@ from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.casemodule.services import Services from org.sleuthkit.autopsy.casemodule.services import Services
from org.sleuthkit.autopsy.casemodule.services import FileManager from org.sleuthkit.autopsy.casemodule.services import FileManager
from org.sleuthkit.autopsy.casemodule.services import Blackboard from org.sleuthkit.autopsy.casemodule.services import Blackboard
from org.sleuthkit.autopsy.casemodule.services import Blackboard
from org.sleuthkit.datamodel import Score from org.sleuthkit.datamodel import Score
from java.util import ArrayList from java.util import Arrays
# Factory that defines the name and details of the module and allows Autopsy # Factory that defines the name and details of the module and allows Autopsy
# to create instances of the modules that will do the analysis. # to create instances of the modules that will do the analysis.
@ -86,8 +85,6 @@ class SampleJythonDataSourceIngestModuleFactory(IngestModuleFactoryAdapter):
# Data Source-level ingest module. One gets created per data source. # Data Source-level ingest module. One gets created per data source.
# TODO: Rename this to something more specific. Could just remove "Factory" from above name. # TODO: Rename this to something more specific. Could just remove "Factory" from above name.
class SampleJythonDataSourceIngestModule(DataSourceIngestModule): class SampleJythonDataSourceIngestModule(DataSourceIngestModule):
LIKELY_NOTABLE_SCORE = Score(Score.Significance.LIKELY_NOTABLE, Score.MethodCategory.AUTO)
_logger = Logger.getLogger(SampleJythonDataSourceIngestModuleFactory.moduleName) _logger = Logger.getLogger(SampleJythonDataSourceIngestModuleFactory.moduleName)
def log(self, level, msg): def log(self, level, msg):
@ -118,7 +115,7 @@ class SampleJythonDataSourceIngestModule(DataSourceIngestModule):
progressBar.switchToIndeterminate() progressBar.switchToIndeterminate()
# Use blackboard class to index blackboard artifacts for keyword search # Use blackboard class to index blackboard artifacts for keyword search
blackboard = Case.getCurrentCase().getServices().getBlackboard() blackboard = Case.getCurrentCase().getSleuthkitCase().getBlackboard()
# For our example, we will use FileManager to get all # For our example, we will use FileManager to get all
# files with the word "test" # files with the word "test"
@ -142,13 +139,15 @@ class SampleJythonDataSourceIngestModule(DataSourceIngestModule):
# Make an artifact on the blackboard. TSK_INTERESTING_FILE_HIT is a generic type of # Make an artifact on the blackboard. TSK_INTERESTING_FILE_HIT is a generic type of
# artfiact. Refer to the developer docs for other examples. # artfiact. Refer to the developer docs for other examples.
attrs = ArrayList() attrs = Arrays.asList(BlackboardAttribute(BlackboardAttribute.Type.TSK_SET_NAME,
attrs.add(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_SET_NAME, SampleJythonDataSourceIngestModuleFactory.moduleName, "Test file")) SampleJythonDataSourceIngestModuleFactory.moduleName,
art = file.newAnalysisResult(BlackboardArtifact.ARTIFACT_TYPE.TSK_INTERESTING_FILE_HIT, self.LIKELY_NOTABLE_SCORE, None, "Test file", None, attrs) "Test file"))
art = file.newAnalysisResult(BlackboardArtifact.Type.TSK_INTERESTING_FILE_HIT, Score.SCORE_LIKELY_NOTABLE,
None, "Test file", None, attrs).getAnalysisResult()
try: try:
# index the artifact for keyword search # post the artifact for listeners of artifact events.
blackboard.indexArtifact(art) blackboard.postArtifact(art, SampleJythonDataSourceIngestModuleFactory.moduleName)
except Blackboard.BlackboardException as e: except Blackboard.BlackboardException as e:
self.log(Level.SEVERE, "Error indexing artifact " + art.getDisplayName()) self.log(Level.SEVERE, "Error indexing artifact " + art.getDisplayName())

View File

@ -55,8 +55,7 @@ from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.casemodule.services import Services from org.sleuthkit.autopsy.casemodule.services import Services
from org.sleuthkit.autopsy.casemodule.services import FileManager from org.sleuthkit.autopsy.casemodule.services import FileManager
from org.sleuthkit.autopsy.casemodule.services import Blackboard from org.sleuthkit.autopsy.casemodule.services import Blackboard
from org.sleuthkit.datamodel import Score from java.util import Arrays
from java.util import ArrayList
# Factory that defines the name and details of the module and allows Autopsy # Factory that defines the name and details of the module and allows Autopsy
# to create instances of the modules that will do the anlaysis. # to create instances of the modules that will do the anlaysis.
@ -89,7 +88,6 @@ class SampleJythonFileIngestModuleFactory(IngestModuleFactoryAdapter):
# TODO: Rename this to something more specific. Could just remove "Factory" from above name. # TODO: Rename this to something more specific. Could just remove "Factory" from above name.
# Looks at the attributes of the passed in file. # Looks at the attributes of the passed in file.
class SampleJythonFileIngestModule(FileIngestModule): class SampleJythonFileIngestModule(FileIngestModule):
LIKELY_NOTABLE_SCORE = Score(Score.Significance.LIKELY_NOTABLE, Score.MethodCategory.AUTO)
_logger = Logger.getLogger(SampleJythonFileIngestModuleFactory.moduleName) _logger = Logger.getLogger(SampleJythonFileIngestModuleFactory.moduleName)
@ -119,7 +117,7 @@ class SampleJythonFileIngestModule(FileIngestModule):
return IngestModule.ProcessResult.OK return IngestModule.ProcessResult.OK
# Use blackboard class to index blackboard artifacts for keyword search # Use blackboard class to index blackboard artifacts for keyword search
blackboard = Case.getCurrentCase().getServices().getBlackboard() blackboard = Case.getCurrentCase().getSleuthkitCase().getBlackboard()
# For an example, we will flag files with .txt in the name and make a blackboard artifact. # For an example, we will flag files with .txt in the name and make a blackboard artifact.
if file.getName().lower().endswith(".txt"): if file.getName().lower().endswith(".txt"):
@ -129,23 +127,18 @@ class SampleJythonFileIngestModule(FileIngestModule):
# Make an artifact on the blackboard. TSK_INTERESTING_FILE_HIT is a generic type of # Make an artifact on the blackboard. TSK_INTERESTING_FILE_HIT is a generic type of
# artifact. Refer to the developer docs for other examples. # artifact. Refer to the developer docs for other examples.
attrs = ArrayList() attrs = Arrays.asList(BlackboardAttribute(BlackboardAttribute.Type.TSK_SET_NAME,
attrs.add(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_SET_NAME,
SampleJythonFileIngestModuleFactory.moduleName, "Text Files")) SampleJythonFileIngestModuleFactory.moduleName, "Text Files"))
art = file.newAnalysisResult(BlackboardArtifact.ARTIFACT_TYPE.TSK_INTERESTING_FILE_HIT, self.LIKELY_NOTABLE_SCORE, None, "Text Files", None, attrs)
art = file.newAnalysisResult(BlackboardArtifact.Type.TSK_INTERESTING_FILE_HIT, Score.SCORE_LIKELY_NOTABLE,
None, "Text Files", None, attrs).getAnalysisResult()
try: try:
# index the artifact for keyword search # post the artifact for listeners of artifact events
blackboard.indexArtifact(art) blackboard.postArtifact(art, SampleJythonFileIngestModuleFactory.moduleName)
except Blackboard.BlackboardException as e: except Blackboard.BlackboardException as e:
self.log(Level.SEVERE, "Error indexing artifact " + art.getDisplayName()) self.log(Level.SEVERE, "Error indexing artifact " + art.getDisplayName())
# Fire an event to notify the UI and others that there is a new artifact
IngestServices.getInstance().fireModuleDataEvent(
ModuleDataEvent(SampleJythonFileIngestModuleFactory.moduleName,
BlackboardArtifact.ARTIFACT_TYPE.TSK_INTERESTING_FILE_HIT, None))
# For the example (this wouldn't be needed normally), we'll query the blackboard for data that was added # For the example (this wouldn't be needed normally), we'll query the blackboard for data that was added
# by other modules. We then iterate over its attributes. We'll just print them, but you would probably # by other modules. We then iterate over its attributes. We'll just print them, but you would probably
# want to do something with them. # want to do something with them.

View File

@ -67,10 +67,12 @@ class SampleGeneralReportModule(GeneralReportModuleAdapter):
return "sampleReport.txt" return "sampleReport.txt"
# TODO: Update this method to make a report # TODO: Update this method to make a report
# The 'baseReportDir' object being passed in is a string with the directory that reports are being stored in. Report should go into baseReportDir + getRelativeFilePath(). # The 'reportSettings' object being passed in is an instance of org.sleuthkit.autopsy.report.GeneralReportSettings.
# GeneralReportSettings.getReportDirectoryPath() is the directory that reports are being stored in.
# Report should go into GeneralReportSettings.getReportDirectoryPath() + getRelativeFilePath().
# The 'progressBar' object is of type ReportProgressPanel. # The 'progressBar' object is of type ReportProgressPanel.
# See: http://sleuthkit.org/autopsy/docs/api-docs/latest/classorg_1_1sleuthkit_1_1autopsy_1_1report_1_1_report_progress_panel.html # See: http://sleuthkit.org/autopsy/docs/api-docs/latest/classorg_1_1sleuthkit_1_1autopsy_1_1report_1_1_report_progress_panel.html
def generateReport(self, baseReportDir, progressBar): def generateReport(self, reportSettings, progressBar):
# For an example, we write a file with the number of files created in the past 2 weeks # For an example, we write a file with the number of files created in the past 2 weeks
# Configure progress bar for 2 tasks # Configure progress bar for 2 tasks
@ -95,7 +97,7 @@ class SampleGeneralReportModule(GeneralReportModuleAdapter):
progressBar.increment() progressBar.increment()
# Write the count to the report file. # Write the count to the report file.
fileName = os.path.join(baseReportDir, self.getRelativeFilePath()) fileName = os.path.join(reportSettings.getReportDirectoryPath(), self.getRelativeFilePath())
report = open(fileName, 'w') report = open(fileName, 'w')
report.write("file count = %d" % fileCount) report.write("file count = %d" % fileCount)
report.close() report.close()

View File

@ -452,7 +452,8 @@ class TskGuidUtils:
guid_vs_info = TskGuidUtils._get_guid_dict(db_conn, "SELECT obj_id, vs_type, img_offset FROM tsk_vs_info", "_") guid_vs_info = TskGuidUtils._get_guid_dict(db_conn, "SELECT obj_id, vs_type, img_offset FROM tsk_vs_info", "_")
guid_fs_info = TskGuidUtils._get_guid_dict(db_conn, "SELECT obj_id, img_offset, fs_type FROM tsk_fs_info", "_") guid_fs_info = TskGuidUtils._get_guid_dict(db_conn, "SELECT obj_id, img_offset, fs_type FROM tsk_fs_info", "_")
guid_image_names = TskGuidUtils._get_guid_dict(db_conn, "SELECT obj_id, name FROM tsk_image_names " guid_image_names = TskGuidUtils._get_guid_dict(db_conn, "SELECT obj_id, name FROM tsk_image_names "
"WHERE sequence=0") "WHERE sequence=0",
normalizer=get_filename)
guid_os_accounts = TskGuidUtils._get_guid_dict(db_conn, "SELECT os_account_obj_id, addr FROM tsk_os_accounts") guid_os_accounts = TskGuidUtils._get_guid_dict(db_conn, "SELECT os_account_obj_id, addr FROM tsk_os_accounts")
guid_reports = TskGuidUtils._get_guid_dict(db_conn, "SELECT obj_id, path FROM reports", guid_reports = TskGuidUtils._get_guid_dict(db_conn, "SELECT obj_id, path FROM reports",
normalizer=normalize_file_path) normalizer=normalize_file_path)
@ -625,6 +626,22 @@ def get_path_segs(path: Union[str, None]) -> Union[List[str], None]:
return None return None
def get_filename(path: Union[str, None]) -> Union[str, None]:
"""
Returns the last segment of a file path.
Args:
path: The path.
Returns: The last segment of the path
"""
path_segs = get_path_segs(path)
if path_segs is not None and len(path_segs) > 0:
return path_segs[-1]
else:
return None
def index_of(lst, search_item) -> int: def index_of(lst, search_item) -> int:
""" """
Returns the index of the item in the list or -1. Returns the index of the item in the list or -1.
@ -827,7 +844,9 @@ def normalize_tsk_event_descriptions(guid_util: TskGuidUtils, row: Dict[str, any
# replace object ids with information that is deterministic # replace object ids with information that is deterministic
row_copy['event_description_id'] = MASKED_ID row_copy['event_description_id'] = MASKED_ID
row_copy['content_obj_id'] = guid_util.get_guid_for_file_objid(row['content_obj_id']) row_copy['content_obj_id'] = guid_util.get_guid_for_file_objid(row['content_obj_id'])
row_copy['artifact_id'] = guid_util.get_guid_for_artifactid(row['artifact_id']) if row['artifact_id'] else None row_copy['artifact_id'] = guid_util.get_guid_for_artifactid(row['artifact_id']) \
if row['artifact_id'] is not None else None
row_copy['data_source_obj_id'] = guid_util.get_guid_for_file_objid(row['data_source_obj_id'])
if row['full_description'] == row['med_description'] == row['short_description']: if row['full_description'] == row['med_description'] == row['short_description']:
row_copy['full_description'] = _mask_event_desc(row['full_description']) row_copy['full_description'] = _mask_event_desc(row['full_description'])
@ -853,8 +872,8 @@ def normalize_ingest_jobs(guid_util: TskGuidUtils, row: Dict[str, any]) -> Dict[
start_time = row['start_date_time'] start_time = row['start_date_time']
end_time = row['end_date_time'] end_time = row['end_date_time']
if start_time <= end_time: if start_time <= end_time:
row_copy['start_date_time'] = 0 row_copy['start_date_time'] = MASKED_TIME
row_copy['end_date_time'] = 0 row_copy['end_date_time'] = MASKED_TIME
return row_copy return row_copy
@ -916,6 +935,7 @@ def normalize_tsk_files(guid_util: TskGuidUtils, row: Dict[str, any]) -> Dict[st
row_copy['md5'] = "MD5_IGNORED" row_copy['md5'] = "MD5_IGNORED"
row_copy['sha256'] = "SHA256_IGNORED" row_copy['sha256'] = "SHA256_IGNORED"
row_copy['data_source_obj_id'] = guid_util.get_guid_for_file_objid(row['data_source_obj_id'])
row_copy['obj_id'] = MASKED_OBJ_ID row_copy['obj_id'] = MASKED_OBJ_ID
row_copy['os_account_obj_id'] = 'MASKED_OS_ACCOUNT_OBJ_ID' row_copy['os_account_obj_id'] = 'MASKED_OS_ACCOUNT_OBJ_ID'
row_copy['parent_path'] = normalize_file_path(row['parent_path']) row_copy['parent_path'] = normalize_file_path(row['parent_path'])
@ -1009,6 +1029,7 @@ def normalize_tsk_objects(guid_util: TskGuidUtils, row: Dict[str, any]) -> Dict[
return row_copy return row_copy
MASKED_TIME = "MASKED_TIME"
MASKED_OBJ_ID = "MASKED_OBJ_ID" MASKED_OBJ_ID = "MASKED_OBJ_ID"
MASKED_ID = "MASKED_ID" MASKED_ID = "MASKED_ID"
@ -1027,14 +1048,15 @@ TABLE_NORMALIZATIONS: Dict[str, TableNormalization] = {
"added_date_time": "{dateTime}" "added_date_time": "{dateTime}"
}), }),
"image_gallery_groups": NormalizeColumns({ "image_gallery_groups": NormalizeColumns({
"group_id": MASKED_ID "group_id": MASKED_ID,
"data_source_obj_id": lambda guid_util, col: guid_util.get_guid_for_objid(col, omitted_value=None),
}), }),
"image_gallery_groups_seen": IGNORE_TABLE, "image_gallery_groups_seen": IGNORE_TABLE,
"ingest_jobs": NormalizeRow(normalize_ingest_jobs), "ingest_jobs": NormalizeRow(normalize_ingest_jobs),
"reports": NormalizeColumns({ "reports": NormalizeColumns({
"obj_id": MASKED_OBJ_ID, "obj_id": MASKED_OBJ_ID,
"path": "AutopsyTestCase", "path": "AutopsyTestCase",
"crtime": 0 "crtime": MASKED_TIME
}), }),
"tsk_aggregate_score": NormalizeColumns({ "tsk_aggregate_score": NormalizeColumns({
"obj_id": lambda guid_util, col: guid_util.get_guid_for_objid(col, omitted_value="Object ID Omitted"), "obj_id": lambda guid_util, col: guid_util.get_guid_for_objid(col, omitted_value="Object ID Omitted"),
@ -1053,8 +1075,7 @@ TABLE_NORMALIZATIONS: Dict[str, TableNormalization] = {
"tsk_event_descriptions": NormalizeRow(normalize_tsk_event_descriptions), "tsk_event_descriptions": NormalizeRow(normalize_tsk_event_descriptions),
"tsk_events": NormalizeColumns({ "tsk_events": NormalizeColumns({
"event_id": "MASKED_EVENT_ID", "event_id": "MASKED_EVENT_ID",
"event_description_id": None, "event_description_id": 'ID OMITTED'
"time": None,
}), }),
"tsk_examiners": NormalizeColumns({ "tsk_examiners": NormalizeColumns({
"login_name": "{examiner_name}" "login_name": "{examiner_name}"
@ -1064,6 +1085,9 @@ TABLE_NORMALIZATIONS: Dict[str, TableNormalization] = {
"obj_id": lambda guid_util, col: guid_util.get_guid_for_file_objid(col) "obj_id": lambda guid_util, col: guid_util.get_guid_for_file_objid(col)
}), }),
"tsk_files_path": NormalizeRow(normalize_tsk_files_path), "tsk_files_path": NormalizeRow(normalize_tsk_files_path),
"tsk_image_names": NormalizeColumns({
"name": lambda guid_util, col: get_filename(col)
}),
"tsk_objects": NormalizeRow(normalize_tsk_objects), "tsk_objects": NormalizeRow(normalize_tsk_objects),
"tsk_os_account_attributes": NormalizeColumns({ "tsk_os_account_attributes": NormalizeColumns({
"id": MASKED_ID, "id": MASKED_ID,
@ -1121,7 +1145,8 @@ def write_normalized(guid_utils: TskGuidUtils, output_file, db_conn, table: str,
# show row as json-like value # show row as json-like value
entries = [] entries = []
for column in column_names: for column in column_names:
value = get_sql_insert_value(row_dict[column] if column in row_dict and row_dict[column] else None) dict_value = row_dict[column] if column in row_dict and row_dict[column] is not None else None
value = get_sql_insert_value(dict_value)
if value is not None: if value is not None:
entries.append((column, value)) entries.append((column, value))
insert_values = ", ".join([f"{pr[0]}: {pr[1]}" for pr in entries]) insert_values = ", ".join([f"{pr[0]}: {pr[1]}" for pr in entries])