remove TextMarkupLookup interface

This commit is contained in:
millmanorama 2017-02-15 10:52:01 +01:00
parent 87a0e8ad69
commit edd03a66c1
6 changed files with 79 additions and 196 deletions

View File

@ -21,6 +21,7 @@ package org.sleuthkit.autopsy.corecomponents;
import org.openide.nodes.FilterNode; import org.openide.nodes.FilterNode;
import org.openide.nodes.Node; import org.openide.nodes.Node;
import org.openide.util.NbBundle; import org.openide.util.NbBundle;
import org.openide.util.lookup.Lookups;
/** /**
* A filter node that creates at most one layer of child nodes for the node it * A filter node that creates at most one layer of child nodes for the node it
@ -44,7 +45,7 @@ public class TableFilterNode extends FilterNode {
* The constructor should include column order key. (See getColumnOrderKey) * The constructor should include column order key. (See getColumnOrderKey)
*/ */
public TableFilterNode(Node wrappedNode, boolean createChildren) { public TableFilterNode(Node wrappedNode, boolean createChildren) {
super(wrappedNode, TableFilterChildren.createInstance(wrappedNode, createChildren)); super(wrappedNode, TableFilterChildren.createInstance(wrappedNode, createChildren) , Lookups.proxy(wrappedNode));
this.createChildren = createChildren; this.createChildren = createChildren;
} }

View File

@ -454,12 +454,12 @@ public class BlackboardArtifactNode extends DisplayableItemNode {
forLookup.add(content); forLookup.add(content);
} }
// if there is a text highlighted version, of the content, add it too // // if there is a text highlighted version, of the content, add it too
// currently happens from keyword search module // // currently happens from keyword search module
TextMarkupLookup highlight = getHighlightLookup(artifact, content); // TextMarkupLookup highlight = getHighlightLookup(artifact, content);
if (highlight != null) { // if (highlight != null) {
forLookup.add(highlight); // forLookup.add(highlight);
} // }
return Lookups.fixed(forLookup.toArray(new Object[forLookup.size()])); return Lookups.fixed(forLookup.toArray(new Object[forLookup.size()]));
} }
@ -474,35 +474,35 @@ public class BlackboardArtifactNode extends DisplayableItemNode {
NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.getAssocCont.exception.msg")); NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.getAssocCont.exception.msg"));
} }
private static TextMarkupLookup getHighlightLookup(BlackboardArtifact artifact, Content content) { // private static TextMarkupLookup getHighlightLookup(BlackboardArtifact artifact, Content content) {
if (artifact.getArtifactTypeID() != BlackboardArtifact.ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID()) { // if (artifact.getArtifactTypeID() != BlackboardArtifact.ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID()) {
return null; // return null;
} // }
//
Lookup lookup = Lookup.getDefault(); // Lookup lookup = Lookup.getDefault();
TextMarkupLookup highlightFactory = lookup.lookup(TextMarkupLookup.class); // TextMarkupLookup highlightFactory = lookup.lookup(TextMarkupLookup.class);
try { // try {
List<BlackboardAttribute> attributes = artifact.getAttributes(); // List<BlackboardAttribute> attributes = artifact.getAttributes();
String keyword = null; // String keyword = null;
String regexp = null; // String regexp = null;
for (BlackboardAttribute att : attributes) { // for (BlackboardAttribute att : attributes) {
final int attributeTypeID = att.getAttributeType().getTypeID(); // final int attributeTypeID = att.getAttributeType().getTypeID();
if (attributeTypeID == BlackboardAttribute.ATTRIBUTE_TYPE.TSK_KEYWORD.getTypeID()) { // if (attributeTypeID == BlackboardAttribute.ATTRIBUTE_TYPE.TSK_KEYWORD.getTypeID()) {
keyword = att.getValueString(); // keyword = att.getValueString();
} else if (attributeTypeID == BlackboardAttribute.ATTRIBUTE_TYPE.TSK_KEYWORD_REGEXP.getTypeID()) { // } else if (attributeTypeID == BlackboardAttribute.ATTRIBUTE_TYPE.TSK_KEYWORD_REGEXP.getTypeID()) {
regexp = att.getValueString(); // regexp = att.getValueString();
} // }
} // }
if (keyword != null) { // if (keyword != null) {
boolean isRegexp = StringUtils.isNotBlank(regexp); // boolean isRegexp = StringUtils.isNotBlank(regexp);
String origQuery = isRegexp ? regexp : keyword; // String origQuery = isRegexp ? regexp : keyword;
return highlightFactory.createInstance(artifact.getArtifactID(), keyword, isRegexp, origQuery); // return highlightFactory.createInstance(artifact.getArtifactID(), keyword, isRegexp, origQuery);
} // }
} catch (TskCoreException ex) { // } catch (TskCoreException ex) {
LOGGER.log(Level.WARNING, "Failed to retrieve Blackboard Attributes", ex); //NON-NLS // LOGGER.log(Level.WARNING, "Failed to retrieve Blackboard Attributes", ex); //NON-NLS
} // }
return null; // return null;
} // }
@Override @Override
public boolean isLeafTypeNode() { public boolean isLeafTypeNode() {

View File

@ -1,55 +0,0 @@
/*
* Autopsy Forensic Browser
*
* Copyright 2011-2015 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.autopsy.datamodel;
/**
* This interface acts as a sort of bridge between the Autopsy Core NetBeans
* Module (NBM) and the Autopsy KeywordSearch NBM. It is used to get indexed
* text marked up with HTML to highlight search hits for a particular keyword.
*
* Here is an example of how it works. It is used to put highlighted markup into
* the Lookups of the BlackboardArtifactNodes for keyword search hit artifacts.
* The BlackboardArtifactNode code that populates the node's Lookup asks the
* default global Lookup for an instance of TextMarkupLookup. The
* org.sleuthkit.autopsy.keywordsearch.HighlightedText class is the sole
* implementation of the interface, so the BlackboardArtifactNode gets a default
* constructed instance of HighlightedText. This otherwise useless
* instance is then used to call createInstance with parameters that are used to
* employ the Solr highlighting capability to create the markup. The
* TextMarkupLookup object goes in the BlackboardArtifactNode Lookup for later
* use by the ExtractedContentViewer, a DataContentViewer in the KeywordSearch
* NBM.
*/
public interface TextMarkupLookup {
/**
* Factory method for getting an object that encapsulates indexed text
* marked up (HTML) to highlight search hits for a particular keyword.
*
* @param objectId ID of the object (file or artifact) that is the
* source of the indexed text.
* @param keyword The keyword to be highlighted in the text.
* @param isRegex Whether or not the query that follows is a regex.
* @param originalQuery The query that produces the keyword hit.
*
* @return An object that encapsulates indexed text marked up (HTML) to
* highlight search hits for a particular keyword.
*/
public TextMarkupLookup createInstance(long objectId, String keyword, boolean isRegex, String originalQuery);
}

View File

@ -30,6 +30,7 @@ import java.util.Set;
import java.util.logging.Level; import java.util.logging.Level;
import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.StringUtils;
import org.openide.nodes.Node; import org.openide.nodes.Node;
import org.openide.util.Exceptions;
import org.openide.util.Lookup; import org.openide.util.Lookup;
import org.openide.util.NbBundle; import org.openide.util.NbBundle;
import org.openide.util.lookup.ServiceProvider; import org.openide.util.lookup.ServiceProvider;
@ -104,35 +105,30 @@ public class ExtractedContentViewer implements DataContentViewer {
IndexedText highlightedHitText = null; IndexedText highlightedHitText = null;
IndexedText rawContentText = null; IndexedText rawContentText = null;
if (null != content && solrHasContent(content.getId())) {
/* QueryResults hits = nodeLookup.lookup(QueryResults.class);
* First add the text marked up with HTML to highlight keyword hits that BlackboardArtifact artifact = nodeLookup.lookup(BlackboardArtifact.class);
* will be present in the selected node's lookup if the node is for a if (hits != null) {
* keyword hit artifact or account. highlightedHitText = new HighlightedText(content.getId(), hits);
*/ } else {
indexedTextSources.addAll(nodeLookup.lookupAll(IndexedText.class)); if (artifact != null && artifact.getArtifactTypeID()
== BlackboardArtifact.ARTIFACT_TYPE.TSK_ACCOUNT.getTypeID()) {
if (false == indexedTextSources.isEmpty()) { // if the artifact is an account artifact, get an account text .
//JMTODO: how do know the highlighted one is the first one? I think the assumption is really that it is the only one...
//if the look up had any sources use them and don't make a new one.
highlightedHitText = indexedTextSources.get(0);
} else if (null != content && solrHasContent(content.getId())) {
/*
* if the lookup didn't have any sources, and solr has indexed the
* content,get an AccountsText object that will highlight any
* account numbers.
*/
highlightedHitText = getAccountsText(content, nodeLookup); highlightedHitText = getAccountsText(content, nodeLookup);
} else if (artifact != null && artifact.getArtifactTypeID()
== BlackboardArtifact.ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID()) {
highlightedHitText = new HighlightedText(artifact);
}
}
if (highlightedHitText != null) { if (highlightedHitText != null) {
indexedTextSources.add(highlightedHitText); indexedTextSources.add(highlightedHitText);
} }
}
/* /*
* Next, add the "raw" (not highlighted) text, if any, for any content * Next, add the "raw" (not highlighted) text, if any, for any
* associated with the node. * content associated with the node.
*/ */
if (null != content && solrHasContent(content.getId())) {
rawContentText = new RawText(content, content.getId()); rawContentText = new RawText(content, content.getId());
indexedTextSources.add(rawContentText); indexedTextSources.add(rawContentText);
} }
@ -174,7 +170,8 @@ public class ExtractedContentViewer implements DataContentViewer {
* For keyword hit artifacts, add the text of the artifact that hit, * For keyword hit artifacts, add the text of the artifact that hit,
* not the hit artifact; otherwise add the text for the artifact. * not the hit artifact; otherwise add the text for the artifact.
*/ */
if (artifact.getArtifactTypeID() == TSK_KEYWORD_HIT.getTypeID() || artifact.getArtifactTypeID() == TSK_ACCOUNT.getTypeID()) { if (artifact.getArtifactTypeID() == TSK_KEYWORD_HIT.getTypeID()
|| artifact.getArtifactTypeID() == TSK_ACCOUNT.getTypeID()) {
try { try {
BlackboardAttribute attribute = artifact.getAttribute(TSK_ASSOCIATED_ARTIFACT_TYPE); BlackboardAttribute attribute = artifact.getAttribute(TSK_ASSOCIATED_ARTIFACT_TYPE);
if (attribute != null) { if (attribute != null) {

View File

@ -44,7 +44,6 @@ import org.sleuthkit.autopsy.casemodule.Case;
import org.sleuthkit.autopsy.coreutils.Logger; import org.sleuthkit.autopsy.coreutils.Logger;
import org.sleuthkit.autopsy.coreutils.MessageNotifyUtil; import org.sleuthkit.autopsy.coreutils.MessageNotifyUtil;
import org.sleuthkit.autopsy.coreutils.Version; import org.sleuthkit.autopsy.coreutils.Version;
import org.sleuthkit.autopsy.datamodel.TextMarkupLookup;
import org.sleuthkit.autopsy.keywordsearch.KeywordQueryFilter.FilterType; import org.sleuthkit.autopsy.keywordsearch.KeywordQueryFilter.FilterType;
import org.sleuthkit.datamodel.BlackboardArtifact; import org.sleuthkit.datamodel.BlackboardArtifact;
import org.sleuthkit.datamodel.BlackboardAttribute; import org.sleuthkit.datamodel.BlackboardAttribute;
@ -55,7 +54,7 @@ import org.sleuthkit.datamodel.TskCoreException;
* Highlights hits for a given document. Knows about pages and such for the * Highlights hits for a given document. Knows about pages and such for the
* content viewer. * content viewer.
*/ */
class HighlightedText implements IndexedText, TextMarkupLookup { class HighlightedText implements IndexedText {
private static final Logger logger = Logger.getLogger(HighlightedText.class.getName()); private static final Logger logger = Logger.getLogger(HighlightedText.class.getName());
@ -70,7 +69,6 @@ class HighlightedText implements IndexedText, TextMarkupLookup {
final private Server solrServer = KeywordSearch.getServer(); final private Server solrServer = KeywordSearch.getServer();
private final long objectId; private final long objectId;
// private final boolean isRegex;
private final Set<String> keywords = new HashSet<>(); private final Set<String> keywords = new HashSet<>();
private int numberPages = 0; private int numberPages = 0;
@ -90,39 +88,20 @@ class HighlightedText implements IndexedText, TextMarkupLookup {
private QueryResults hits = null; //original hits that may get passed in private QueryResults hits = null; //original hits that may get passed in
private boolean isPageInfoLoaded = false; private boolean isPageInfoLoaded = false;
private static final boolean DEBUG = (Version.getBuildType() == Version.Type.DEVELOPMENT); private static final boolean DEBUG = (Version.getBuildType() == Version.Type.DEVELOPMENT);
// private String keywordHitQuery;
private BlackboardArtifact artifact; private BlackboardArtifact artifact;
// HighlightedText(long objectId, String keyword, boolean isRegex) {
// // The keyword can be treated as a literal hit at this point so we
// // surround it in quotes.
//
// //hits are unknown
// }
/** /**
* This constructor is used when keyword hits are accessed from the ad-hoc * This constructor is used when keyword hits are accessed from the ad-hoc
* search results. In that case we have the entire QueryResults object and * search results. In that case we have the entire QueryResults object and
* need to arrange the paging. * need to arrange the paging.
* *
* @param objectId * @param objectId
* @param keyword The keyword that was found previously (e.g. during
* ingest)
* @param isRegex true if the keyword was found via a regular
* expression search
* @param originalQuery The original query string that produced the hit. If * @param originalQuery The original query string that produced the hit. If
* isRegex is true, this will be the regular expression * isRegex is true, this will be the regular expression
* that produced the hit. * that produced the hit.
*/ */
HighlightedText(long objectId, String keyword, boolean isRegex, QueryResults hits) { HighlightedText(long objectId, QueryResults hits) {
/*
* JMTODO: is this comment correct??? // The keyword can be treated as a
* literal hit at this point so we // surround it in quotes.
*
*/
this.objectId = objectId; this.objectId = objectId;
// this.keywords.add(KeywordSearchUtil.quoteQuery(keyword));
// this.isRegex = isRegex;
// keywordHitQuery = keywords.stream().collect(Collectors.joining(" "));
this.hits = hits; this.hits = hits;
} }
@ -137,16 +116,15 @@ class HighlightedText implements IndexedText, TextMarkupLookup {
* *
* @throws TskCoreException * @throws TskCoreException
*/ */
HighlightedText(BlackboardArtifact artifact) throws TskCoreException { HighlightedText(BlackboardArtifact artifact) {
this.artifact = artifact; this.artifact = artifact;
this.objectId = artifact.getObjectID(); this.objectId = artifact.getObjectID();
loadPageInfoFromArtifact();
} }
private void loadPageInfoFromArtifact() throws TskCoreException, NumberFormatException { private void loadPageInfoFromArtifact() throws TskCoreException, NumberFormatException {
KeywordSearch.QueryType qt = KeywordSearch.QueryType.values()[artifact.getAttribute(TSK_KEYWORD_SEARCH_TYPE).getValueInt()]; KeywordSearch.QueryType qt = KeywordSearch.QueryType.values()[artifact.getAttribute(TSK_KEYWORD_SEARCH_TYPE).getValueInt()];
// this.isRegex = qt == KeywordSearch.QueryType.REGEX;
this.keywords.add(artifact.getAttribute(TSK_KEYWORD).getValueString()); this.keywords.add(artifact.getAttribute(TSK_KEYWORD).getValueString());
String chunkIDsString = artifact.getAttribute(TSK_KEYWORD_HIT_DOCUMENT_IDS).getValueString(); String chunkIDsString = artifact.getAttribute(TSK_KEYWORD_HIT_DOCUMENT_IDS).getValueString();
Set<String> chunkIDs = Arrays.stream(chunkIDsString.split(",")).map(StringUtils::strip).collect(Collectors.toSet()); Set<String> chunkIDs = Arrays.stream(chunkIDsString.split(",")).map(StringUtils::strip).collect(Collectors.toSet());
@ -164,7 +142,6 @@ class HighlightedText implements IndexedText, TextMarkupLookup {
numberOfHitsPerPage.put(chunkID, 0); numberOfHitsPerPage.put(chunkID, 0);
currentHitPerPage.put(chunkID, 0); currentHitPerPage.put(chunkID, 0);
} }
// isPageInfoLoaded = true;
} }
/** /**
@ -183,6 +160,7 @@ class HighlightedText implements IndexedText, TextMarkupLookup {
return constructEscapedSolrQuery(query.getQueryString()); return constructEscapedSolrQuery(query.getQueryString());
} else //construct a Solr query using aggregated terms to get highlighting } else //construct a Solr query using aggregated terms to get highlighting
//the query is executed later on demand //the query is executed later on demand
{
if (queryResults.getKeywords().size() == 1) { if (queryResults.getKeywords().size() == 1) {
//simple case, no need to process subqueries and do special escaping //simple case, no need to process subqueries and do special escaping
Keyword keyword = queryResults.getKeywords().iterator().next(); Keyword keyword = queryResults.getKeywords().iterator().next();
@ -214,6 +192,7 @@ class HighlightedText implements IndexedText, TextMarkupLookup {
return highlightQuery.toString(); return highlightQuery.toString();
} }
} }
}
/** /**
* Constructs a complete, escaped Solr query that is ready to be used. * Constructs a complete, escaped Solr query that is ready to be used.
@ -254,8 +233,7 @@ class HighlightedText implements IndexedText, TextMarkupLookup {
/* /*
* this could go in the constructor but is here to keep it near the * this could go in the constructor but is here to keep it near the
* functionaly similar code for non regex searches * functionaly similar code for non regex searches
*/ */ loadPageInfoFromArtifact();
// loadRegexPageInfoFromArtifact();
} else if (hasChunks) { } else if (hasChunks) {
// if the file has chunks, get pages with hits, sorted // if the file has chunks, get pages with hits, sorted
if (loadPageInfoFromHits()) { if (loadPageInfoFromHits()) {
@ -318,14 +296,6 @@ class HighlightedText implements IndexedText, TextMarkupLookup {
return false; return false;
} }
/**
* Constructor for dummy singleton factory instance for Lookup
*/
private HighlightedText() {
objectId = -1; //JMTODO: dummy value, is this a legal objectID?
// isRegex = false;
}
@Override @Override
public int getNumberPages() { public int getNumberPages() {
//return number of pages that have hits //return number of pages that have hits
@ -624,36 +594,4 @@ class HighlightedText implements IndexedText, TextMarkupLookup {
return buf.toString(); return buf.toString();
} }
//JMTODO: this whole dummy istance stuff should just be separated to a factory class I think
/*
* dummy instance for Lookup only
*/
private static TextMarkupLookup instance = null;
//getter of the singleton dummy instance solely for Lookup purpose
//this instance does not actually work with Solr
public static synchronized TextMarkupLookup getDefault() {
if (instance == null) {
instance = new HighlightedText();
}
return instance;
}
@Override
// factory method to create an instance of this object
public TextMarkupLookup createInstance(long objectId, String keywordHitQuery, boolean isRegex, String originalQuery) {
if (objectId < 0) {
try {
BlackboardArtifact blackboardArtifact = Case.getCurrentCase().getSleuthkitCase().getBlackboardArtifact(objectId);
if (blackboardArtifact.getArtifactTypeID() == BlackboardArtifact.ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID()) {
return new HighlightedText(blackboardArtifact);
}
} catch (TskCoreException ex) {
//JMTODO: what to do here?
Exceptions.printStackTrace(ex);
}
}
return new HighlightedText(objectId, keywordHitQuery, isRegex, null);
}
} }

View File

@ -147,7 +147,7 @@ class KeywordSearchResultFactory extends ChildFactory<KeyValueQueryContent> {
int hitNumber = 0; int hitNumber = 0;
List<KeyValueQueryContent> tempList = new ArrayList<>(); List<KeyValueQueryContent> tempList = new ArrayList<>();
final SetMultimap<Long, KeywordHit> orgnizeByDocID = orgnizeByDocID(queryResults); // final SetMultimap<Long, KeywordHit> orgnizeByDocID = orgnizeByDocID(queryResults);
for (KeywordHit hit : getOneHitPerObject(queryResults)) { for (KeywordHit hit : getOneHitPerObject(queryResults)) {
/** /**
@ -175,7 +175,6 @@ class KeywordSearchResultFactory extends ChildFactory<KeyValueQueryContent> {
// BC: @@@ THis is really ineffecient. We should keep track of this when // BC: @@@ THis is really ineffecient. We should keep track of this when
// we flattened the list of files to the unique files. // we flattened the list of files to the unique files.
// final String highlightQueryEscaped = getHighlightQuery(queryRequest, queryRequest.isLiteral(), queryResults, content); // final String highlightQueryEscaped = getHighlightQuery(queryRequest, queryRequest.isLiteral(), queryResults, content);
String hitName = hit.isArtifactHit() String hitName = hit.isArtifactHit()
? hit.getArtifact().getDisplayName() + " Artifact" //NON-NLS ? hit.getArtifact().getDisplayName() + " Artifact" //NON-NLS
: contentName; : contentName;
@ -242,7 +241,7 @@ class KeywordSearchResultFactory extends ChildFactory<KeyValueQueryContent> {
//wrap in KeywordSearchFilterNode for the markup content, might need to override FilterNode for more customization //wrap in KeywordSearchFilterNode for the markup content, might need to override FilterNode for more customization
// store the data in HighlightedMatchesSource so that it can be looked up (in content viewer) // store the data in HighlightedMatchesSource so that it can be looked up (in content viewer)
HighlightedText highlights = new HighlightedText(key.solrObjectId, key.getName(), !key.getQuery().isLiteral(), hits); HighlightedText highlights = new HighlightedText(key.getSolrObjectId(), hits);
return new KeywordSearchFilterNode(highlights, kvNode); return new KeywordSearchFilterNode(highlights, kvNode);
} }
@ -253,6 +252,7 @@ class KeywordSearchResultFactory extends ChildFactory<KeyValueQueryContent> {
class KeyValueQueryContent extends KeyValue { class KeyValueQueryContent extends KeyValue {
private long solrObjectId; private long solrObjectId;
private final Content content; private final Content content;
private final QueryResults hits; private final QueryResults hits;
private final KeywordSearchQuery query; private final KeywordSearchQuery query;
@ -285,7 +285,9 @@ class KeywordSearchResultFactory extends ChildFactory<KeyValueQueryContent> {
return content; return content;
} }
public long getSolrObjectId() {
return solrObjectId;
}
QueryResults getHits() { QueryResults getHits() {
return hits; return hits;