Merge branch 'Update-iLeapp-aLeapp' into my_ileapp_aleapp_updates

This commit is contained in:
Greg DiCristofaro 2023-06-10 20:20:44 -04:00
commit 050ea72781
21 changed files with 695 additions and 39 deletions

View File

@ -48,6 +48,10 @@ public interface AutopsyItemVisitor<T> {
T visit(DeletedContent dc);
T visit(DeletedContent.DeletedContentFilter dcf);
T visit(ScoreContent sc);
T visit(ScoreContent.ScoreContentFilter scf);
T visit(FileSize fs);
@ -124,6 +128,16 @@ public interface AutopsyItemVisitor<T> {
public T visit(DeletedContent.DeletedContentFilter dcf) {
return defaultVisit(dcf);
}
@Override
public T visit(ScoreContent dc) {
return defaultVisit(dc);
}
@Override
public T visit(ScoreContent.ScoreContentFilter dcf) {
return defaultVisit(dcf);
}
@Override
public T visit(FileSize fs) {

View File

@ -412,6 +412,13 @@ ReportNode.reportNameProperty.name=Report Name
ReportNode.reportNameProperty.displayName=Report Name
ReportNode.reportNameProperty.desc=Name of the report
ReportsListNode.displayName=Reports
ScoreContent_badFilter_text=Bad Items
ScoreContent_createSheet_filterType_desc=no description
ScoreContent_createSheet_filterType_displayName=Type
ScoreContent_createSheet_name_desc=no description
ScoreContent_createSheet_name_displayName=Name
ScoreContent_ScoreContentNode_name=Score
ScoreContent_susFilter_text=Suspicious Items
SlackFileNode.getActions.viewInNewWin.text=View in New Window
SlackFileNode.getActions.viewFileInDir.text=View File in Directory
SpecialDirectoryNode.getActions.viewInNewWin.text=View in New Window

View File

@ -32,6 +32,8 @@ import org.sleuthkit.autopsy.datamodel.FileSize.FileSizeRootNode;
import org.sleuthkit.autopsy.datamodel.FileTypes.FileTypesNode;
import org.sleuthkit.autopsy.datamodel.accounts.Accounts;
import org.sleuthkit.autopsy.allcasessearch.CorrelationAttributeInstanceNode;
import org.sleuthkit.autopsy.datamodel.ScoreContent.ScoreContentsNode;
import org.sleuthkit.autopsy.datamodel.ScoreContent.ScoreContentsChildren.ScoreContentNode;
/**
* Visitor pattern that goes over all nodes in the directory tree. This includes
@ -78,6 +80,10 @@ public interface DisplayableItemNodeVisitor<T> {
T visit(DeletedContentsNode dcn);
T visit(ScoreContentNode scn);
T visit(ScoreContentsNode scn);
T visit(FileSizeRootNode fsrn);
T visit(FileSizeNode fsn);
@ -335,6 +341,16 @@ public interface DisplayableItemNodeVisitor<T> {
return defaultVisit(dcn);
}
@Override
public T visit(ScoreContentNode scn) {
return defaultVisit(scn);
}
@Override
public T visit(ScoreContentsNode scn) {
return defaultVisit(scn);
}
@Override
public T visit(DeletedContentsNode dcn) {
return defaultVisit(dcn);

View File

@ -24,6 +24,8 @@ import org.openide.nodes.AbstractNode;
import org.openide.nodes.Children;
import org.openide.nodes.Node;
import org.openide.util.NbBundle;
import org.sleuthkit.autopsy.datamodel.ScoreContent.ScoreContentsChildren;
import org.sleuthkit.autopsy.datamodel.ScoreContent.ScoreContentsChildren.ScoreContentNode;
import org.sleuthkit.autopsy.datamodel.accounts.Accounts;
import org.sleuthkit.datamodel.SleuthkitVisitableItem;
@ -98,6 +100,11 @@ public class RootContentChildren extends Children.Keys<Object> {
return new DeletedContent.DeletedContentsNode(dc.getSleuthkitCase(), dc.filteringDataSourceObjId());
}
@Override
public AbstractNode visit(ScoreContent sc) {
return new ScoreContent.ScoreContentsNode(sc.getSleuthkitCase(), sc.filteringDataSourceObjId());
}
@Override
public AbstractNode visit(FileSize dc) {
return new FileSize.FileSizeRootNode(dc.getSleuthkitCase(), dc.filteringDataSourceObjId());

View File

@ -0,0 +1,596 @@
/*
* Autopsy Forensic Browser
*
* Copyright 2023 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.autopsy.datamodel;
import java.beans.PropertyChangeEvent;
import java.beans.PropertyChangeListener;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.logging.Level;
import org.apache.commons.lang3.StringUtils;
import org.openide.nodes.AbstractNode;
import org.openide.nodes.ChildFactory;
import org.openide.nodes.Children;
import org.openide.nodes.Node;
import org.openide.nodes.Sheet;
import org.openide.util.NbBundle;
import org.openide.util.WeakListeners;
import org.openide.util.lookup.Lookups;
import org.sleuthkit.autopsy.casemodule.Case;
import org.sleuthkit.autopsy.casemodule.NoCurrentCaseException;
import org.sleuthkit.autopsy.coreutils.Logger;
import org.sleuthkit.autopsy.guiutils.RefreshThrottler;
import org.sleuthkit.autopsy.ingest.IngestManager;
import org.sleuthkit.autopsy.ingest.IngestManager.IngestModuleEvent;
import org.sleuthkit.autopsy.ingest.ModuleDataEvent;
import org.sleuthkit.datamodel.AbstractFile;
import org.sleuthkit.datamodel.BlackboardArtifact.Category;
import org.sleuthkit.datamodel.Content;
import org.sleuthkit.datamodel.ContentVisitor;
import org.sleuthkit.datamodel.DerivedFile;
import org.sleuthkit.datamodel.Directory;
import org.sleuthkit.datamodel.File;
import org.sleuthkit.datamodel.FsContent;
import org.sleuthkit.datamodel.LayoutFile;
import org.sleuthkit.datamodel.LocalFile;
import org.sleuthkit.datamodel.Score.Priority;
import org.sleuthkit.datamodel.Score.Significance;
import org.sleuthkit.datamodel.SlackFile;
import org.sleuthkit.datamodel.SleuthkitCase;
import org.sleuthkit.datamodel.TskCoreException;
import org.sleuthkit.datamodel.VirtualDirectory;
/**
* Score content view nodes.
*/
public class ScoreContent implements AutopsyVisitableItem {
private SleuthkitCase skCase;
private final long filteringDSObjId; // 0 if not filtering/grouping by data source
@NbBundle.Messages({"ScoreContent_badFilter_text=Bad Items",
"ScoreContent_susFilter_text=Suspicious Items"})
public enum ScoreContentFilter implements AutopsyVisitableItem {
BAD_ITEM_FILTER(0, "BAD_ITEM_FILTER",
Bundle.ScoreContent_badFilter_text()),
SUS_ITEM_FILTER(1, "SUS_ITEM_FILTER",
Bundle.ScoreContent_susFilter_text());
private int id;
private String name;
private String displayName;
private ScoreContentFilter(int id, String name, String displayName) {
this.id = id;
this.name = name;
this.displayName = displayName;
}
public String getName() {
return this.name;
}
public int getId() {
return this.id;
}
public String getDisplayName() {
return this.displayName;
}
@Override
public <T> T accept(AutopsyItemVisitor<T> visitor) {
return visitor.visit(this);
}
}
/**
* Constructor assuming no data source filtering.
* @param skCase The sleuthkit case.
*/
public ScoreContent(SleuthkitCase skCase) {
this(skCase, 0);
}
/**
* Constructor.
* @param skCase The sleuthkit case.
* @param dsObjId The data source object id to filter on if > 0.
*/
public ScoreContent(SleuthkitCase skCase, long dsObjId) {
this.skCase = skCase;
this.filteringDSObjId = dsObjId;
}
/**
* @return The data source object id to filter on if > 0.
*/
long filteringDataSourceObjId() {
return this.filteringDSObjId;
}
@Override
public <T> T accept(AutopsyItemVisitor<T> visitor) {
return visitor.visit(this);
}
/**
* @return The sleuthkit case used.
*/
public SleuthkitCase getSleuthkitCase() {
return this.skCase;
}
private static final Set<Case.Events> CASE_EVENTS_OF_INTEREST = EnumSet.of(
Case.Events.DATA_SOURCE_ADDED,
Case.Events.CURRENT_CASE,
Case.Events.CONTENT_TAG_ADDED,
Case.Events.CONTENT_TAG_DELETED,
Case.Events.BLACKBOARD_ARTIFACT_TAG_ADDED,
Case.Events.BLACKBOARD_ARTIFACT_TAG_DELETED
);
private static final Set<IngestManager.IngestJobEvent> INGEST_JOB_EVENTS_OF_INTEREST = EnumSet.of(IngestManager.IngestJobEvent.COMPLETED, IngestManager.IngestJobEvent.CANCELLED);
private static final Set<IngestManager.IngestModuleEvent> INGEST_MODULE_EVENTS_OF_INTEREST = EnumSet.of(IngestModuleEvent.CONTENT_CHANGED);
/**
* Returns a property change listener listening for possible updates to aggregate score updates for files.
* @param onRefresh Action on refresh.
* @param onRemove Action to remove listener (i.e. case close).
* @return The property change listener.
*/
private static PropertyChangeListener getPcl(final Runnable onRefresh, final Runnable onRemove) {
return (PropertyChangeEvent evt) -> {
String eventType = evt.getPropertyName();
if (eventType.equals(IngestManager.IngestModuleEvent.CONTENT_CHANGED.toString())) {
// only refresh if there is a current case.
try {
Case.getCurrentCaseThrows();
if (onRefresh != null) {
onRefresh.run();
}
} catch (NoCurrentCaseException notUsed) {
/**
* Case is closed, do nothing.
*/
}
} else if (eventType.equals(Case.Events.CURRENT_CASE.toString())) {
// case was closed. Remove listeners so that we don't get called with a stale case handle
if (evt.getNewValue() == null && onRemove != null) {
onRemove.run();
}
} else if (CASE_EVENTS_OF_INTEREST.contains(eventType)) {
// only refresh if there is a current case.
try {
Case.getCurrentCaseThrows();
if (onRefresh != null) {
onRefresh.run();
}
} catch (NoCurrentCaseException notUsed) {
/**
* Case is closed, do nothing.
*/
}
}
};
}
/**
* The sql where statement for the files.
* @param filter The filter type.
* @param filteringDSObjId The data source object id to filter on if > 0.
* @return The sql where statement.
* @throws IllegalArgumentException
*/
static private String getFileFilter(ScoreContent.ScoreContentFilter filter, long filteringDSObjId) throws IllegalArgumentException {
String aggregateScoreFilter = "";
switch (filter) {
case SUS_ITEM_FILTER:
aggregateScoreFilter = " tsk_aggregate_score.significance = " + Significance.LIKELY_NOTABLE.getId() + " AND (tsk_aggregate_score.priority = " + Priority.NORMAL.getId() + " OR tsk_aggregate_score.priority = " + Priority.OVERRIDE.getId() + " )";
break;
case BAD_ITEM_FILTER:
aggregateScoreFilter = " tsk_aggregate_score.significance = " + Significance.NOTABLE.getId() + " AND (tsk_aggregate_score.priority = " + Priority.NORMAL.getId() + " OR tsk_aggregate_score.priority = " + Priority.OVERRIDE.getId() + " )";
break;
default:
throw new IllegalArgumentException(MessageFormat.format("Unsupported filter type to get suspect content: {0}", filter));
}
String query = " obj_id IN (SELECT tsk_aggregate_score.obj_id FROM tsk_aggregate_score WHERE " + aggregateScoreFilter + ") ";
if (filteringDSObjId > 0) {
query += " AND data_source_obj_id = " + filteringDSObjId;
}
return query;
}
/**
* Checks for analysis results added to the case that could affect the
* aggregate score of the file.
*
* @param evt The event.
* @return True if has an analysis result.
*/
private static boolean isRefreshRequired(PropertyChangeEvent evt) {
String eventType = evt.getPropertyName();
if (eventType.equals(IngestManager.IngestModuleEvent.DATA_ADDED.toString())) {
// check if current case is active before updating
try {
Case.getCurrentCaseThrows();
final ModuleDataEvent event = (ModuleDataEvent) evt.getOldValue();
if (null != event && Category.ANALYSIS_RESULT.equals(event.getBlackboardArtifactType().getCategory())) {
return true;
}
} catch (NoCurrentCaseException notUsed) {
/**
* Case is closed, do nothing.
*/
}
}
return false;
}
/**
* Parent node in views section for content with score.
*/
public static class ScoreContentsNode extends DisplayableItemNode {
@NbBundle.Messages("ScoreContent_ScoreContentNode_name=Score")
private static final String NAME = Bundle.ScoreContent_ScoreContentNode_name();
ScoreContentsNode(SleuthkitCase skCase, long datasourceObjId) {
super(Children.create(new ScoreContentsChildren(skCase, datasourceObjId), true), Lookups.singleton(NAME));
super.setName(NAME);
super.setDisplayName(NAME);
this.setIconBaseWithExtension("org/sleuthkit/autopsy/images/red-circle-exclamation.png"); //NON-NLS
}
@Override
public boolean isLeafTypeNode() {
return false;
}
@Override
public <T> T accept(DisplayableItemNodeVisitor<T> visitor) {
return visitor.visit(this);
}
@Override
@NbBundle.Messages({
"ScoreContent_createSheet_name_displayName=Name",
"ScoreContent_createSheet_name_desc=no description"})
protected Sheet createSheet() {
Sheet sheet = super.createSheet();
Sheet.Set sheetSet = sheet.get(Sheet.PROPERTIES);
if (sheetSet == null) {
sheetSet = Sheet.createPropertiesSet();
sheet.put(sheetSet);
}
sheetSet.put(new NodeProperty<>("Name", //NON-NLS
Bundle.ScoreContent_createSheet_name_displayName(),
Bundle.ScoreContent_createSheet_name_desc(),
NAME));
return sheet;
}
@Override
public String getItemType() {
return getClass().getName();
}
}
/**
* Children that display a node for Bad Items and Score Items.
*/
public static class ScoreContentsChildren extends ChildFactory.Detachable<ScoreContent.ScoreContentFilter> implements RefreshThrottler.Refresher {
private SleuthkitCase skCase;
private final long datasourceObjId;
private final RefreshThrottler refreshThrottler = new RefreshThrottler(this);
private final PropertyChangeListener pcl = getPcl(
() -> ScoreContentsChildren.this.refresh(false),
() -> ScoreContentsChildren.this.removeNotify());
private final PropertyChangeListener weakPcl = WeakListeners.propertyChange(pcl, null);
private final Map<ScoreContentFilter, ScoreContentsChildren.ScoreContentNode> typeNodeMap = new HashMap<>();
public ScoreContentsChildren(SleuthkitCase skCase, long dsObjId) {
this.skCase = skCase;
this.datasourceObjId = dsObjId;
}
@Override
protected void addNotify() {
super.addNotify();
refreshThrottler.registerForIngestModuleEvents();
IngestManager.getInstance().addIngestJobEventListener(INGEST_JOB_EVENTS_OF_INTEREST, weakPcl);
IngestManager.getInstance().addIngestModuleEventListener(INGEST_MODULE_EVENTS_OF_INTEREST, weakPcl);
Case.addEventTypeSubscriber(CASE_EVENTS_OF_INTEREST, weakPcl);
}
@Override
protected void removeNotify() {
refreshThrottler.unregisterEventListener();
IngestManager.getInstance().removeIngestJobEventListener(INGEST_JOB_EVENTS_OF_INTEREST, weakPcl);
IngestManager.getInstance().removeIngestModuleEventListener(INGEST_MODULE_EVENTS_OF_INTEREST, weakPcl);
Case.removeEventTypeSubscriber(CASE_EVENTS_OF_INTEREST, weakPcl);
typeNodeMap.clear();
}
@Override
public void refresh() {
refresh(false);
}
@Override
public boolean isRefreshRequired(PropertyChangeEvent evt) {
return ScoreContent.isRefreshRequired(evt);
}
@Override
protected boolean createKeys(List<ScoreContent.ScoreContentFilter> list) {
list.addAll(Arrays.asList(ScoreContent.ScoreContentFilter.values()));
typeNodeMap.values().forEach(nd -> nd.updateDisplayName());
return true;
}
@Override
protected Node createNodeForKey(ScoreContent.ScoreContentFilter key) {
ScoreContentsChildren.ScoreContentNode nd = new ScoreContentsChildren.ScoreContentNode(skCase, key, datasourceObjId);
typeNodeMap.put(key, nd);
return nd;
}
/**
* Parent node showing files matching a score filter.
*/
public class ScoreContentNode extends DisplayableItemNode {
private static final Logger logger = Logger.getLogger(ScoreContentNode.class.getName());
private final ScoreContent.ScoreContentFilter filter;
private final long datasourceObjId;
ScoreContentNode(SleuthkitCase skCase, ScoreContent.ScoreContentFilter filter, long dsObjId) {
super(Children.create(new ScoreContentChildren(filter, skCase, dsObjId), true), Lookups.singleton(filter.getDisplayName()));
this.filter = filter;
this.datasourceObjId = dsObjId;
init();
}
private void init() {
super.setName(filter.getName());
String tooltip = filter.getDisplayName();
this.setShortDescription(tooltip);
switch (this.filter) {
case SUS_ITEM_FILTER:
this.setIconBaseWithExtension("org/sleuthkit/autopsy/images/yellow-circle-yield.png"); //NON-NLS
break;
default:
case BAD_ITEM_FILTER:
this.setIconBaseWithExtension("org/sleuthkit/autopsy/images/red-circle-exclamation.png"); //NON-NLS
break;
}
updateDisplayName();
}
void updateDisplayName() {
//get count of children without preloading all child nodes
long count = 0;
try {
count = calculateItems(skCase, filter, datasourceObjId);
} catch (TskCoreException ex) {
logger.log(Level.WARNING, "An error occurred while fetching file counts", ex);
}
super.setDisplayName(filter.getDisplayName() + " (" + count + ")");
}
/**
* Get children count without actually loading all nodes
*
* @param sleuthkitCase
* @param filter
*
* @return
*/
private static long calculateItems(SleuthkitCase sleuthkitCase, ScoreContent.ScoreContentFilter filter, long datasourceObjId) throws TskCoreException {
return sleuthkitCase.countFilesWhere(getFileFilter(filter, datasourceObjId));
}
@Override
public <T> T accept(DisplayableItemNodeVisitor<T> visitor) {
return visitor.visit(this);
}
@Override
@NbBundle.Messages({
"ScoreContent_createSheet_filterType_displayName=Type",
"ScoreContent_createSheet_filterType_desc=no description"})
protected Sheet createSheet() {
Sheet sheet = super.createSheet();
Sheet.Set sheetSet = sheet.get(Sheet.PROPERTIES);
if (sheetSet == null) {
sheetSet = Sheet.createPropertiesSet();
sheet.put(sheetSet);
}
sheetSet.put(new NodeProperty<>("Type", //NON_NLS
Bundle.ScoreContent_createSheet_filterType_displayName(),
Bundle.ScoreContent_createSheet_filterType_desc(),
filter.getDisplayName()));
return sheet;
}
@Override
public boolean isLeafTypeNode() {
return true;
}
@Override
public String getItemType() {
return DisplayableItemNode.FILE_PARENT_NODE_KEY;
}
}
/**
* Children showing files for a score filter.
*/
static class ScoreContentChildren extends BaseChildFactory<AbstractFile> implements RefreshThrottler.Refresher {
private final RefreshThrottler refreshThrottler = new RefreshThrottler(this);
private final PropertyChangeListener pcl = getPcl(
() -> ScoreContentChildren.this.refresh(false),
() -> ScoreContentChildren.this.removeNotify());
private final PropertyChangeListener weakPcl = WeakListeners.propertyChange(pcl, null);
private final SleuthkitCase skCase;
private final ScoreContent.ScoreContentFilter filter;
private static final Logger logger = Logger.getLogger(ScoreContentChildren.class.getName());
private final long datasourceObjId;
ScoreContentChildren(ScoreContent.ScoreContentFilter filter, SleuthkitCase skCase, long datasourceObjId) {
super(filter.getName(), new ViewsKnownAndSlackFilter<>());
this.skCase = skCase;
this.filter = filter;
this.datasourceObjId = datasourceObjId;
}
@Override
protected void onAdd() {
refreshThrottler.registerForIngestModuleEvents();
IngestManager.getInstance().addIngestJobEventListener(INGEST_JOB_EVENTS_OF_INTEREST, weakPcl);
IngestManager.getInstance().addIngestModuleEventListener(INGEST_MODULE_EVENTS_OF_INTEREST, weakPcl);
Case.addEventTypeSubscriber(CASE_EVENTS_OF_INTEREST, weakPcl);
}
@Override
protected void onRemove() {
refreshThrottler.unregisterEventListener();
IngestManager.getInstance().removeIngestJobEventListener(INGEST_JOB_EVENTS_OF_INTEREST, weakPcl);
IngestManager.getInstance().removeIngestModuleEventListener(INGEST_MODULE_EVENTS_OF_INTEREST, weakPcl);
Case.removeEventTypeSubscriber(CASE_EVENTS_OF_INTEREST, weakPcl);
}
@Override
public void refresh() {
refresh(false);
}
@Override
public boolean isRefreshRequired(PropertyChangeEvent evt) {
return ScoreContent.isRefreshRequired(evt);
}
private List<AbstractFile> runFsQuery() {
List<AbstractFile> ret = new ArrayList<>();
String query = null;
try {
query = getFileFilter(filter, datasourceObjId);
ret = skCase.findAllFilesWhere(query);
} catch (TskCoreException | IllegalArgumentException e) {
logger.log(Level.SEVERE, "Error getting files for the deleted content view using: " + StringUtils.defaultString(query, "<null>"), e); //NON-NLS
}
return ret;
}
@Override
protected List<AbstractFile> makeKeys() {
return runFsQuery();
}
@Override
protected Node createNodeForKey(AbstractFile key) {
return key.accept(new ContentVisitor.Default<AbstractNode>() {
public FileNode visit(AbstractFile f) {
return new FileNode(f, false);
}
public FileNode visit(FsContent f) {
return new FileNode(f, false);
}
@Override
public FileNode visit(LayoutFile f) {
return new FileNode(f, false);
}
@Override
public FileNode visit(File f) {
return new FileNode(f, false);
}
@Override
public FileNode visit(Directory f) {
return new FileNode(f, false);
}
@Override
public FileNode visit(VirtualDirectory f) {
return new FileNode(f, false);
}
@Override
public AbstractNode visit(SlackFile sf) {
return new FileNode(sf, false);
}
@Override
public AbstractNode visit(LocalFile lf) {
return new FileNode(lf, false);
}
@Override
public AbstractNode visit(DerivedFile df) {
return new FileNode(df, false);
}
@Override
protected AbstractNode defaultVisit(Content di) {
if (di instanceof AbstractFile) {
return visit((AbstractFile) di);
} else {
throw new UnsupportedOperationException("Not supported for this type of Displayable Item: " + di.toString());
}
}
});
}
}
}
}

View File

@ -46,7 +46,8 @@ public class ViewsNode extends DisplayableItemNode {
// add it back in if we can filter the results to a more managable size.
// new RecentFiles(sleuthkitCase),
new DeletedContent(sleuthkitCase, dsObjId),
new FileSize(sleuthkitCase, dsObjId))
new FileSize(sleuthkitCase, dsObjId),
new ScoreContent(sleuthkitCase, dsObjId))
),
Lookups.singleton(NAME)
);

View File

@ -86,7 +86,7 @@ public class ALeappAnalyzerIngestModule implements DataSourceIngestModule {
@NbBundle.Messages({
"ALeappAnalyzerIngestModule.executable.not.found=aLeapp Executable Not Found.",
"ALeappAnalyzerIngestModule.requires.windows=aLeapp module requires windows.",
"ALeappAnalyzerIngestModule.error.ileapp.file.processor.init=Failure to initialize aLeappProcessFile"})
"ALeappAnalyzerIngestModule.error.aleapp.file.processor.init=Failure to initialize aLeappProcessFile"})
@Override
public void startUp(IngestJobContext context) throws IngestModuleException {
this.context = context;
@ -102,7 +102,7 @@ public class ALeappAnalyzerIngestModule implements DataSourceIngestModule {
try {
aLeappFileProcessor = new LeappFileProcessor(XMLFILE, ALeappAnalyzerModuleFactory.getModuleName(), ALEAPP, context);
} catch (IOException | IngestModuleException | NoCurrentCaseException ex) {
throw new IngestModuleException(Bundle.ALeappAnalyzerIngestModule_error_ileapp_file_processor_init(), ex);
throw new IngestModuleException(Bundle.ALeappAnalyzerIngestModule_error_aleapp_file_processor_init(), ex);
}
try {
@ -148,7 +148,7 @@ public class ALeappAnalyzerIngestModule implements DataSourceIngestModule {
writeErrorMsgToIngestInbox();
return ProcessResult.ERROR;
}
aLeappPathsToProcess = loadIleappPathFile(tempOutputPath);
aLeappPathsToProcess = loadAleappPathFile(tempOutputPath);
if (aLeappPathsToProcess.isEmpty()) {
logger.log(Level.SEVERE, String.format("Error getting file paths to search, list is empty"));
writeErrorMsgToIngestInbox();
@ -178,7 +178,7 @@ public class ALeappAnalyzerIngestModule implements DataSourceIngestModule {
}
statusHelper.switchToIndeterminate();
statusHelper.progress(Bundle.ILeappAnalyzerIngestModule_processing_iLeapp_results());
statusHelper.progress(Bundle.ALeappAnalyzerIngestModule_processing_aLeapp_results());
extractFilesFromDataSource(dataSource, aLeappPathsToProcess, tempOutputPath);
processALeappFs(dataSource, currentCase, statusHelper, tempOutputPath.toString());
@ -219,7 +219,7 @@ public class ALeappAnalyzerIngestModule implements DataSourceIngestModule {
return;
}
addILeappReportToReports(moduleOutputPath, currentCase);
addALeappReportToReports(moduleOutputPath, currentCase);
} catch (IOException ex) {
logger.log(Level.SEVERE, String.format("Error when trying to execute aLeapp program against file %s", aLeappFile.getLocalAbsPath()), ex);
@ -227,7 +227,7 @@ public class ALeappAnalyzerIngestModule implements DataSourceIngestModule {
}
if (context.dataSourceIngestIsCancelled()) {
logger.log(Level.INFO, "ILeapp Analyser ingest module run was canceled"); //NON-NLS
logger.log(Level.INFO, "aLeapp Analyser ingest module run was canceled"); //NON-NLS
return;
}
@ -262,7 +262,7 @@ public class ALeappAnalyzerIngestModule implements DataSourceIngestModule {
return;
}
addILeappReportToReports(moduleOutputPath, currentCase);
addALeappReportToReports(moduleOutputPath, currentCase);
} catch (IOException ex) {
logger.log(Level.SEVERE, String.format("Error when trying to execute aLeapp program against file system"), ex);
@ -270,7 +270,7 @@ public class ALeappAnalyzerIngestModule implements DataSourceIngestModule {
}
if (context.dataSourceIngestIsCancelled()) {
logger.log(Level.INFO, "ILeapp Analyser ingest module run was canceled"); //NON-NLS
logger.log(Level.INFO, "aLeapp Analyser ingest module run was canceled"); //NON-NLS
return;
}
@ -335,7 +335,7 @@ public class ALeappAnalyzerIngestModule implements DataSourceIngestModule {
* Find the index.html file in the aLeapp output directory so it can be
* added to reports
*/
private void addILeappReportToReports(Path aLeappOutputDir, Case currentCase) {
private void addALeappReportToReports(Path aLeappOutputDir, Case currentCase) {
List<String> allIndexFiles = new ArrayList<>();
try (Stream<Path> walk = Files.walk(aLeappOutputDir)) {
@ -363,7 +363,7 @@ public class ALeappAnalyzerIngestModule implements DataSourceIngestModule {
* Reads the aLeapp paths file to get the paths that we want to extract
*
*/
private List<String> loadIleappPathFile(Path moduleOutputPath) throws FileNotFoundException, IOException {
private List<String> loadAleappPathFile(Path moduleOutputPath) throws FileNotFoundException, IOException {
List<String> aLeappPathsToProcess = new ArrayList<>();
Path filePath = Paths.get(moduleOutputPath.toString(), ALEAPP_PATHS_FILE);

View File

@ -1,7 +1,7 @@
ALeappAnalyzerIngestModule.aLeapp.cancelled=aLeapp run was canceled
ALeappAnalyzerIngestModule.completed=aLeapp Processing Completed
ALeappAnalyzerIngestModule.error.aleapp.file.processor.init=Failure to initialize aLeappProcessFile
ALeappAnalyzerIngestModule.error.creating.output.dir=Error creating aLeapp module output directory.
ALeappAnalyzerIngestModule.error.ileapp.file.processor.init=Failure to initialize aLeappProcessFile
ALeappAnalyzerIngestModule.error.running.aLeapp=Error running aLeapp, see log file.
ALeappAnalyzerIngestModule.executable.not.found=aLeapp Executable Not Found.
ALeappAnalyzerIngestModule.has.run=aLeapp

View File

@ -1292,7 +1292,7 @@ public final class LeappFileProcessor {
private static final Set<String> ALLOWED_EXTENSIONS = new HashSet<>(Arrays.asList("zip", "tar", "tgz"));
/**
* Find the files that will be processed by the iLeapp program
* Find the files that will be processed by the Leapp program
*
* @param dataSource
*

View File

@ -10,6 +10,20 @@ The ad hoc keyword search features allows you to run single keyword terms or lis
The \ref keyword_search_page must be selected during ingest before doing an ad hoc keyword search. If you don't want to search for any of the existing keyword lists, you can deselect everything to just index the files for later searching.
\subsection adhoc_limitations Limitations of Ad Hoc Keyword Search
With the release of Autopsy 4.21.0, two types of keyword searching are supported: Solr search with full text indexing and the built-in Autopsy "In-Line" Keyword Search.
Enabling full text indexing with Solr during the ingest process allows for comprehensive ad-hoc manual text searching, encompassing all of the extracted text from files and artifacts.
On the other hand, the In-Line Keyword Search conducts the search during ingest, specifically at the time of text extraction. It only indexes small sections of the files that contain keyword matches (for display purposes). Consequently, unless full text indexing with Solr is enabled, the ad-hoc search will be restricted to these limited sections of the files that had keyword hits. This limitation significantly reduces the amount of searchable text available for ad-hoc searches.
Other situations which will result in not being able to search all of the text extracted from all of the files and artifacts include:
<ul>
<li>If file filtering was used during ingest, resulting in only a subset of files getting ingested. See \ref file_filters for information on file filtering.
<li>If Autopsy case contains multiple data sources and one or more of those data sources was not indexed during it's ingest.
</ul>
\section ad_hoc_kw_types_section Creating Keywords
The following sections will give a description of each keyword type, then will show some sample text and how various search terms would work against it.
@ -36,7 +50,7 @@ Substring match should be used where the search term is just part of a word, or
- "UMP", "oX" will match
- "y dog", "ish-brown" will not match
## Regex match
\subsection regex_match Regex match
Regex match can be used to search for a specific pattern. Regular expressions are supported using Lucene Regex Syntax which is documented here: https://www.elastic.co/guide/en/elasticsearch/reference/1.6/query-dsl-regexp-query.html#regexp-syntax. Wildcards are automatically added to the beginning and end of the regular expressions to ensure all matches are found. Additionally, the resulting hits are split on common token separator boundaries (e.g. space, newline, colon, exclamation point etc.) to make the resulting keyword hit more amenable to highlighting. As of Autopsy 4.9, regex searches are no longer case sensitive. This includes literal characters and character classes.
@ -70,9 +84,12 @@ If you want to override this default behavior:
### Non-Latin text
In general all three types of keyword searches will work as expected but the feature has not been thoroughly tested with all character sets. For example, the searches may no longer be case-insensitive. As with regex above, we suggest testing on a sample file.
### Differences between "In-Line" and Solr regular expression search
It's important to be aware that there might be occasional differences in the results of regular expression searches between the "In-Line" Keyword Search and Solr search. This is because the "In-Line" Keyword Search uses Java regular expressions, while Solr search employs Lucene regular expressions.
\section ad_hoc_kw_search Keyword Search
Individual keyword or regular expressions can quickly be searched using the search text box widget. You can select "Exact Match", "Substring Match" and "Regular Expression" match. See the earlier \ref ad_hoc_kw_types_section section for information on each keyword type. The search can be restricted to only certain data sources by selecting the checkbox near the bottom and then highlighting the data sources to search within. Multiple data sources can be selected used shift+left click or control+left click. The "Save search results" checkbox determines whether the search results will be saved to the case database.
Individual keyword or regular expressions can quickly be searched using the search text box widget. You can select "Exact Match", "Substring Match" and "Regular Expression" match. See the earlier \ref ad_hoc_kw_types_section section for information on each keyword type, as well as \ref adhoc_limitations. The search can be restricted to only certain data sources by selecting the checkbox near the bottom and then highlighting the data sources to search within. Multiple data sources can be selected used shift+left click or control+left click. The "Save search results" checkbox determines whether the search results will be saved to the case database.
\image html keyword-search-bar.PNG
@ -92,14 +109,5 @@ If the "Save search results" checkbox was enabled, the results of the keyword li
\image html keyword-search-list-results.PNG
\section ad_hoc_during_ingest Doing ad hoc searches during ingest
Ad hoc searches are intended to be used after ingest completes, but can be used in a limited capacity while ingest is ongoing.
Manual \ref ad_hoc_kw_search for individual keywords or regular expressions can be executed while ingest is ongoing, using the current index. Note however, that you may miss some results if the entire index has not yet been populated. Autopsy enables you to perform the search on an incomplete index in order to retrieve some preliminary results in real-time.
During the ingest, the normal manual search using \ref ad_hoc_kw_lists behaves differently than after ingest is complete. A selected list can instead be added to the ingest process and it will be searched in the background instead.
Most keyword management features are disabled during ingest. You can not edit keyword lists but can create new lists (but not add to them) and copy and export existing lists.
*/

Binary file not shown.

Before

Width:  |  Height:  |  Size: 52 KiB

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 118 KiB

After

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 59 KiB

After

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 73 KiB

After

Width:  |  Height:  |  Size: 500 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 101 KiB

After

Width:  |  Height:  |  Size: 113 KiB

View File

@ -5,22 +5,30 @@
\section keyword_module_overview What Does It Do
The Keyword Search module facilitates both the \ref ingest_page "ingest" portion of searching and also supports manual text searching after ingest has completed (see \ref ad_hoc_keyword_search_page). It extracts text from files being ingested, selected reports generated by other modules, and results generated by other modules. This extracted text is then added to a Solr index that can then be searched.
The Keyword Search module facilitates both the \ref ingest_page "ingest" portion of searching and also supports manual text searching after ingest has completed (see \ref ad_hoc_keyword_search_page). It extracts text from files being ingested, selected reports generated by other modules, and results generated by other modules.
Autopsy tries its best to extract the maximum amount of text from the files being indexed. First, the indexing will try to extract text from supported file formats, such as pure text file format, MS Office Documents, PDF files, Email, and many others. If the file is not supported by the standard text extractor, Autopsy will fall back to a string extraction algorithm. String extraction on unknown file formats or arbitrary binary files can often extract a sizeable amount of text from a file, often enough to provide additional clues to reviewers. String extraction will not extract text strings from encrypted files.
Autopsy tries its best to extract the maximum amount of text from the files being indexed. First, it will try to extract text from supported file formats, such as pure text file format, MS Office Documents, PDF files, Email, and many others. If the file is not supported by the standard text extractor, Autopsy will fall back to a string extraction algorithm. String extraction on unknown file formats or arbitrary binary files can often extract a sizeable amount of text from a file, often enough to provide additional clues to reviewers. String extraction will not extract text strings from encrypted files.
Autopsy ships with some built-in lists that define regular expressions and enable the user to search for Phone Numbers, IP addresses, URLs and E-mail addresses. However, enabling some of these very general lists can produce a very large number of hits, and many of them can be false-positives. Regular expressions can potentially take a long time to complete.
Once files are placed in the Solr index, they can be searched quickly for specific keywords, regular expressions, or keyword search lists that can contain a mixture of keywords and regular expressions. Search queries can be executed automatically during the ingest run or at the end of the ingest, depending on the current settings and the time it takes to ingest the image.
Refer to \ref ad_hoc_keyword_search_page for more details on specifying regular expressions and other types of searches.
With the release of Autopsy 4.21.0, two types of keyword searching are supported: Solr search with full text indexing and the built-in Autopsy "In-Line" Keyword Search. For detailed information on configuring the search type, refer to \ref keyword_ingest_settings.
\subsection keyword_SolrSearch Solr Search With Indexing
Full text indexing with Solr provides users with the flexibility to perform ad-hoc manual text searches after the ingest process is complete (see \ref ad_hoc_keyword_search_page). However, it's important to note that full text indexing can significantly slow down the ingest speed for large data sources and cases. Once files are indexed in the Solr index, they can be quickly searched for specific keywords, regular expressions, or a combination of both in keyword search lists.
\subsection keyword_InlineSearch In-Line Keyword Search
On the other hand, the In-Line Keyword Search conducts keyword searches during the ingest process at the time of text extraction. It only indexes small sections of the files that contain keyword matches for display purposes. Our profiling runs indicate that, in most cases, this approach reduces the data source ingest time by half. This means that using the In-Line Keyword Search, a data source can be ingested in approximately half the time it takes to ingest and search the same data source using Solr indexing. However, a drawback of this method is that all search terms must be specified before the ingest begins, and there is no option to perform ad-hoc searches on the entire extracted text after the ingest process is complete.
\section keyword_search_configuration_dialog Keyword Search Configuration Dialog
The keyword search configuration dialog has three tabs, each with its own purpose:
\li The \ref keyword_keywordListsTab is used to add, remove, and modify keyword search lists.
\li The \ref keyword_stringExtractionTab is used to enable language scripts and extraction type.
\li The \ref keyword_generalSettingsTab is used to configure the ingest timings and display information.
\li The \ref keyword_generalSettingsTab is used to configure display information.
\subsection keyword_keywordListsTab Lists tab
@ -57,23 +65,20 @@ The user can also use the String Viewer first and try different script/language
\subsubsection keyword_nsrl NIST NSRL Support
The hash lookup ingest service can be configured to use the NIST NSRL hash set of known files. The keyword search advanced configuration dialog "General" tab contains an option to skip keyword indexing and search on files that have previously marked as "known" and uninteresting files. Selecting this option can greatly reduce size of the index and improve ingest performance. In most cases, user does not need to keyword search for "known" files.
\subsubsection keyword_update_freq Result update frequency during ingest
To control how frequently searches are executed during ingest, the user can adjust the timing setting available in the keyword search advanced configuration dialog "General" tab. Setting the number of minutes lower will result in more frequent index updates and searches being executed and the user will be able to see results more in real-time. However, more frequent updates can affect the overall performance, especially on lower-end systems, and can potentially lengthen the overall time needed for the ingest to complete.
One can also choose to have no periodic searches. This will speed up the ingest. Users choosing this option can run their keyword searches once the entire keyword search index is complete.
\section keyword_usage Using the Module
Search queries can be executed manually by the user at any time, as long as there are some files already indexed and ready to be searched. Searching before indexing is complete will naturally only search indexes that are already compiled.
See \ref ingest_page "Ingest" for more information on ingest in general.
Once there are files in the index, \ref ad_hoc_keyword_search_page will be available for use to manually search at any time.
After the ingest has completed, \ref ad_hoc_keyword_search_page will be available for manual search. The amount of files/text available for Ad Hoc Search depends on the Keyword Search module settings at the time of the ingest. See section \ref adhoc_limitations for details.
\subsection keyword_ingest_settings Ingest Settings
The Ingest Settings for the Keyword Search module allow the user to enable or disable the specific built-in search expressions, Phone Numbers, IP Addresses, Email Addresses, and URLs. Using the Advanced button (covered below), one can add custom keyword groups.
With the release of Autopsy 4.21.0, two types of keyword searching are supported: Solr search with full text indexing and the built-in Autopsy "In-Line" Keyword Search. See \ref keyword_ingest_settings on details regarding search type configuraiton. See sections \ref keyword_SolrSearch and \ref keyword_InlineSearch for details of each search type.
To select the keyword search type, you can use the "Add text to Solr Index" checkbox. When this checkbox is unchecked, Autopsy will perform the "In-Line" Keyword Search during ingest. However, most of the extracted text will not be indexed by Solr, effectively disabling the functionality described in \ref ad_hoc_keyword_search_page. On the other hand, if the checkbox is selected, Autopsy will perform the "In-Line" Keyword Search during ingest and also add all of the extracted text to the Solr index. This allows you to search the indexed text later using \ref ad_hoc_keyword_search_page.
Additionally, it's important to be aware that there might be occasional differences in the results of regular expression searches between the "In-Line" Keyword Search and Solr search. This is because the "In-Line" Keyword Search uses Java regular expressions, while Solr search employs Lucene regular expressions. You can find more details about this in \ref regex_match.
\image html keyword-search-ingest-settings.PNG
\subsubsection keyword_ocr Optical Character Recognition

1
thirdparty/aLeapp/Version.txt vendored Normal file
View File

@ -0,0 +1 @@
ALEAPP v3.1.6: Android Logs, Events, and Protobuf Parser

Binary file not shown.

1
thirdparty/iLeapp/Version.txt vendored Normal file
View File

@ -0,0 +1 @@
iLEAPP v1.18.6: iLEAPP Logs, Events, and Properties Parser

Binary file not shown.