mirror of
https://github.com/overcuriousity/autopsy-flatpak.git
synced 2025-07-17 10:17:41 +00:00
Preliminary version of large files string extraction support - no full hit navitation yet.
Also, some refactoring and cleanup of old keyword search code.
This commit is contained in:
parent
463acb26ea
commit
c3a2f3a13c
@ -43,8 +43,13 @@ public class FsContentStringStream extends InputStream {
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
//args
|
||||
private FsContent content;
|
||||
private String encoding;
|
||||
private boolean preserveOnBuffBoundary;
|
||||
|
||||
//internal data
|
||||
private long contentOffset = 0; //offset in fscontent read into curReadBuf
|
||||
private static final int READ_BUF_SIZE = 256;
|
||||
private static final byte[] curReadBuf = new byte[READ_BUF_SIZE];
|
||||
@ -64,15 +69,28 @@ public class FsContentStringStream extends InputStream {
|
||||
private static final Logger logger = Logger.getLogger(FsContentStringStream.class.getName());
|
||||
|
||||
/**
|
||||
*
|
||||
* Construct new string stream from FsContent
|
||||
* @param content to extract strings from
|
||||
* @param encoding target encoding, current only ASCII supported
|
||||
* @param encoding target encoding, currently UTF-8
|
||||
* @param preserveOnBuffBoundary whether to preserve or split string on a buffer boundary. If false, will pack into read buffer up to max. possible, potentially splitting a string. If false, the string will be preserved for next read.
|
||||
*/
|
||||
public FsContentStringStream(FsContent content, Encoding encoding) {
|
||||
public FsContentStringStream(FsContent content, Encoding encoding, boolean preserveOnBuffBoundary) {
|
||||
this.content = content;
|
||||
this.encoding = encoding.toString();
|
||||
this.preserveOnBuffBoundary = preserveOnBuffBoundary;
|
||||
//logger.log(Level.INFO, "FILE: " + content.getParentPath() + "/" + content.getName());
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct new string stream from FsContent
|
||||
* Do not attempt to fill entire read buffer if that would break a string
|
||||
*
|
||||
* @param content to extract strings from
|
||||
* @param encoding target encoding, currently UTF-8
|
||||
*/
|
||||
public FsContentStringStream(FsContent content, Encoding encoding) {
|
||||
this(content, encoding, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(byte[] b, int off, int len) throws IOException {
|
||||
@ -190,7 +208,7 @@ public class FsContentStringStream extends InputStream {
|
||||
//check if temp still has chars to qualify as a string
|
||||
//we might need to break up temp into 2 parts for next read() call
|
||||
//consume as many as possible to fill entire user buffer
|
||||
if (tempStringLen >= MIN_PRINTABLE_CHARS) {
|
||||
if (!this.preserveOnBuffBoundary && tempStringLen >= MIN_PRINTABLE_CHARS) {
|
||||
if (newCurLen > len) {
|
||||
int appendChars = len - curStringLen;
|
||||
//save part for next user read(), need to break up temp string
|
||||
|
@ -504,7 +504,10 @@
|
||||
<field name="atime" type="tdate" indexed="true" stored="true"/>
|
||||
<field name="mtime" type="tdate" indexed="true" stored="true"/>
|
||||
<field name="crtime" type="tdate" indexed="true" stored="true"/>
|
||||
|
||||
<!-- file chunk-specific fields (optional for others) -->
|
||||
<!-- for a parent file with no content, number of chunks are specified -->
|
||||
<field name="num_chunks" type="int" indexed="true" stored="true" required="false" />
|
||||
|
||||
<!-- Common metadata fields, named specifically to match up with
|
||||
SolrCell metadata when parsing rich documents such as Word, PDF.
|
||||
Some fields are multiValued only because Tika currently may return
|
||||
|
@ -39,3 +39,11 @@ KeywordSearchPanel.cutMenuItem.text=Cut
|
||||
KeywordSearchPanel.copyMenuItem.text=Copy
|
||||
KeywordSearchPanel.pasteMenuItem.text=Paste
|
||||
KeywordSearchPanel.selectAllMenuItem.text=Select All
|
||||
ExtractedContentPanel.pageButtonsLabel.text=Page
|
||||
ExtractedContentPanel.pageNextButton.text=
|
||||
ExtractedContentPanel.pagePreviousButton.actionCommand=pagePreviousButton
|
||||
ExtractedContentPanel.pagePreviousButton.text=
|
||||
ExtractedContentPanel.pagesLabel.text=Page:
|
||||
ExtractedContentPanel.pageOfLabel.text=of
|
||||
ExtractedContentPanel.pageCurLabel.text=-
|
||||
ExtractedContentPanel.pageTotalLabel.text=-
|
||||
|
@ -0,0 +1,102 @@
|
||||
/*
|
||||
* Autopsy Forensic Browser
|
||||
*
|
||||
* Copyright 2011 Basis Technology Corp.
|
||||
* Contact: carrier <at> sleuthkit <dot> org
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.sleuthkit.autopsy.keywordsearch;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.Reader;
|
||||
import java.util.logging.Logger;
|
||||
import org.apache.solr.common.util.ContentStream;
|
||||
import org.sleuthkit.autopsy.datamodel.FsContentStringStream.Encoding;
|
||||
import org.sleuthkit.datamodel.FsContent;
|
||||
|
||||
/**
|
||||
* Stream of bytes representing string with specified encoding
|
||||
* to feed into Solr as ContentStream
|
||||
*/
|
||||
public class ByteContentStream implements ContentStream {
|
||||
//input
|
||||
private byte[] content; //extracted subcontent
|
||||
private long contentSize;
|
||||
private FsContent fsContent; //origin
|
||||
private Encoding encoding;
|
||||
|
||||
private InputStream stream;
|
||||
|
||||
private static Logger logger = Logger.getLogger(FsContentStringContentStream.class.getName());
|
||||
|
||||
public ByteContentStream(byte [] content, long contentSize, FsContent fsContent, Encoding encoding) {
|
||||
this.content = content;
|
||||
this.fsContent = fsContent;
|
||||
this.encoding = encoding;
|
||||
stream = new ByteArrayInputStream(content, 0, (int)contentSize);
|
||||
}
|
||||
|
||||
public byte[] getByteContent() {
|
||||
return content;
|
||||
}
|
||||
|
||||
public FsContent getFsContent() {
|
||||
return fsContent;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public String getContentType() {
|
||||
return "text/plain;charset=" + encoding.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return fsContent.getName();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Reader getReader() throws IOException {
|
||||
return new InputStreamReader(stream);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getSize() {
|
||||
return contentSize;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getSourceInfo() {
|
||||
return "File:" + fsContent.getId();
|
||||
}
|
||||
|
||||
@Override
|
||||
public InputStream getStream() throws IOException {
|
||||
return stream;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void finalize() throws Throwable {
|
||||
super.finalize();
|
||||
|
||||
stream.close();
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
@ -0,0 +1,113 @@
|
||||
/*
|
||||
* Autopsy Forensic Browser
|
||||
*
|
||||
* Copyright 2011 Basis Technology Corp.
|
||||
* Contact: carrier <at> sleuthkit <dot> org
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.sleuthkit.autopsy.keywordsearch;
|
||||
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import org.sleuthkit.datamodel.FsContent;
|
||||
|
||||
/**
|
||||
* Represents result of keyword search query containing the Content it hit
|
||||
* and chunk information, if the result hit is a content chunk
|
||||
*/
|
||||
public class ContentHit {
|
||||
|
||||
private FsContent content;
|
||||
private int chunkID = 0;
|
||||
|
||||
ContentHit(FsContent content) {
|
||||
this.content = content;
|
||||
}
|
||||
|
||||
ContentHit(FsContent content, int chunkID) {
|
||||
this.content = content;
|
||||
this.chunkID = chunkID;
|
||||
}
|
||||
|
||||
FsContent getContent() {
|
||||
return content;
|
||||
}
|
||||
|
||||
long getId() {
|
||||
return content.getId();
|
||||
}
|
||||
|
||||
int getChunkId() {
|
||||
return chunkID;
|
||||
}
|
||||
|
||||
boolean isChunk() {
|
||||
return chunkID != 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
final ContentHit other = (ContentHit) obj;
|
||||
if (this.content != other.content && (this.content == null || !this.content.equals(other.content))) {
|
||||
return false;
|
||||
}
|
||||
if (this.chunkID != other.chunkID) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hash = 3;
|
||||
hash = 41 * hash + (this.content != null ? this.content.hashCode() : 0);
|
||||
hash = 41 * hash + this.chunkID;
|
||||
return hash;
|
||||
}
|
||||
|
||||
static Map<FsContent, Integer> flattenResults(List<ContentHit> hits) {
|
||||
Map<FsContent, Integer> ret = new LinkedHashMap<FsContent, Integer>();
|
||||
for (ContentHit h : hits) {
|
||||
FsContent f = h.getContent();
|
||||
if (!ret.containsKey(f)) {
|
||||
ret.put(f, h.getChunkId());
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
//flatten results to get unique fscontent per hit, with first chunk id encountered
|
||||
static LinkedHashMap<FsContent, Integer> flattenResults(Map<String, List<ContentHit>> results) {
|
||||
LinkedHashMap<FsContent, Integer> flattened = new LinkedHashMap<FsContent, Integer>();
|
||||
|
||||
for (String key : results.keySet()) {
|
||||
for (ContentHit hit : results.get(key)) {
|
||||
FsContent fsContent = hit.getContent();
|
||||
//flatten, record first chunk encountered
|
||||
if (!flattened.containsKey(fsContent)) {
|
||||
flattened.put(fsContent, hit.getChunkId());
|
||||
}
|
||||
}
|
||||
}
|
||||
return flattened;
|
||||
}
|
||||
}
|
@ -0,0 +1,147 @@
|
||||
/*
|
||||
* Autopsy Forensic Browser
|
||||
*
|
||||
* Copyright 2011 Basis Technology Corp.
|
||||
* Contact: carrier <at> sleuthkit <dot> org
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.sleuthkit.autopsy.keywordsearch;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.logging.Logger;
|
||||
|
||||
/**
|
||||
* Paging tracker / find functionality for a given content
|
||||
* Supports keeping track of paging for multiple contents.
|
||||
*/
|
||||
public class ExtractedContentPaging {
|
||||
|
||||
static class PageInfo {
|
||||
|
||||
PageInfo(int total) {
|
||||
this.total = total;
|
||||
if (this.total == 0) {
|
||||
//no chunks
|
||||
this.current = 0;
|
||||
}
|
||||
else {
|
||||
this.current = 1;
|
||||
}
|
||||
}
|
||||
int current;
|
||||
int total;
|
||||
}
|
||||
private static final Logger logger = Logger.getLogger(ExtractedContentPaging.class.getName());
|
||||
|
||||
public ExtractedContentPaging() {
|
||||
sources = new HashMap<MarkupSource, PageInfo>();
|
||||
}
|
||||
//maps markup source to page info being tracked
|
||||
private HashMap<MarkupSource, PageInfo> sources;
|
||||
|
||||
/**
|
||||
* add pages tracking for the content
|
||||
* needs to be called first for each content
|
||||
* @param source
|
||||
* @param totalPages
|
||||
*/
|
||||
void add(MarkupSource source, int totalPages) {
|
||||
sources.put(source, new PageInfo(totalPages));
|
||||
}
|
||||
|
||||
/**
|
||||
* check if the source paging if currently being tracked
|
||||
* @param contentID content to check for
|
||||
* @return true if it is being tracked already
|
||||
*/
|
||||
boolean isTracked(MarkupSource source) {
|
||||
return sources.containsKey(source);
|
||||
}
|
||||
|
||||
/**
|
||||
* get total number of pages in the source
|
||||
* @param contentID content to check for
|
||||
* @return number of matches in the source
|
||||
*/
|
||||
int getTotalPages(MarkupSource source) {
|
||||
if (!isTracked(source)) {
|
||||
throw new IllegalStateException("Source is not being tracked");
|
||||
}
|
||||
return sources.get(source).total;
|
||||
}
|
||||
|
||||
/**
|
||||
* get current page
|
||||
* @param contentID content to check for
|
||||
* @return current page
|
||||
*/
|
||||
int getCurrentPage(MarkupSource source) {
|
||||
if (!isTracked(source)) {
|
||||
throw new IllegalStateException("Source is not being tracked");
|
||||
}
|
||||
return sources.get(source).current;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if there is a next page
|
||||
* @param contentID content to check for
|
||||
* @return true if the source has next page
|
||||
*/
|
||||
boolean hasNext(MarkupSource source) {
|
||||
if (!isTracked(source)) {
|
||||
throw new IllegalStateException("Source is not being tracked");
|
||||
}
|
||||
PageInfo info = sources.get(source);
|
||||
return info.current < info.total;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if there is a previous page
|
||||
* @param contentID content to check for
|
||||
* @return true if the source has previous page
|
||||
*/
|
||||
boolean hasPrevious(MarkupSource source) {
|
||||
if (!isTracked(source)) {
|
||||
throw new IllegalStateException("Source is not being tracked");
|
||||
}
|
||||
PageInfo info = sources.get(source);
|
||||
return info.current > 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* make step toward next page
|
||||
* requires call to hasNext() first
|
||||
* @param contentID content to check for
|
||||
*
|
||||
*/
|
||||
void next(MarkupSource source) {
|
||||
if (!isTracked(source)) {
|
||||
throw new IllegalStateException("Source is not being tracked");
|
||||
}
|
||||
sources.get(source).current++;
|
||||
}
|
||||
|
||||
/**
|
||||
* make step toward previous page
|
||||
* requires call to hasPrevious() first
|
||||
* @param source
|
||||
*
|
||||
*/
|
||||
void previous(MarkupSource source) {
|
||||
if (!isTracked(source)) {
|
||||
throw new IllegalStateException("Source is not being tracked");
|
||||
}
|
||||
sources.get(source).current--;
|
||||
}
|
||||
}
|
@ -40,7 +40,7 @@
|
||||
<Layout>
|
||||
<DimensionLayout dim="0">
|
||||
<Group type="103" groupAlignment="0" attributes="0">
|
||||
<Group type="102" alignment="1" attributes="0">
|
||||
<Group type="102" attributes="0">
|
||||
<EmptySpace max="-2" attributes="0"/>
|
||||
<Component id="hitLabel" min="-2" max="-2" attributes="0"/>
|
||||
<EmptySpace type="separate" max="-2" attributes="0"/>
|
||||
@ -49,17 +49,31 @@
|
||||
<Component id="hitOfLabel" min="-2" max="-2" attributes="0"/>
|
||||
<EmptySpace type="unrelated" max="-2" attributes="0"/>
|
||||
<Component id="hitTotalLabel" min="-2" max="-2" attributes="0"/>
|
||||
<EmptySpace min="-2" pref="50" max="-2" attributes="0"/>
|
||||
<EmptySpace min="-2" pref="26" max="-2" attributes="0"/>
|
||||
<Component id="hitButtonsLabel" min="-2" max="-2" attributes="0"/>
|
||||
<EmptySpace min="-2" max="-2" attributes="0"/>
|
||||
<EmptySpace max="-2" attributes="0"/>
|
||||
<Component id="hitPreviousButton" min="-2" pref="23" max="-2" attributes="0"/>
|
||||
<EmptySpace min="-2" pref="0" max="-2" attributes="0"/>
|
||||
<Component id="hitNextButton" min="-2" pref="23" max="-2" attributes="0"/>
|
||||
<EmptySpace pref="78" max="32767" attributes="0"/>
|
||||
<EmptySpace type="unrelated" max="-2" attributes="0"/>
|
||||
<Component id="pagesLabel" min="-2" max="-2" attributes="0"/>
|
||||
<EmptySpace max="-2" attributes="0"/>
|
||||
<Component id="pageCurLabel" min="-2" pref="12" max="-2" attributes="0"/>
|
||||
<EmptySpace max="-2" attributes="0"/>
|
||||
<Component id="pageOfLabel" min="-2" max="-2" attributes="0"/>
|
||||
<EmptySpace min="-2" pref="14" max="-2" attributes="0"/>
|
||||
<Component id="pageTotalLabel" min="-2" pref="18" max="-2" attributes="0"/>
|
||||
<EmptySpace type="unrelated" max="-2" attributes="0"/>
|
||||
<Component id="pageButtonsLabel" min="-2" max="-2" attributes="0"/>
|
||||
<EmptySpace type="unrelated" min="-2" max="-2" attributes="0"/>
|
||||
<Component id="pagePreviousButton" min="-2" max="-2" attributes="0"/>
|
||||
<EmptySpace min="-2" pref="0" max="-2" attributes="0"/>
|
||||
<Component id="pageNextButton" min="-2" max="-2" attributes="0"/>
|
||||
<EmptySpace pref="65" max="32767" attributes="0"/>
|
||||
<Component id="sourceComboBox" min="-2" max="-2" attributes="0"/>
|
||||
<EmptySpace max="-2" attributes="0"/>
|
||||
</Group>
|
||||
<Component id="jScrollPane1" alignment="0" pref="400" max="32767" attributes="0"/>
|
||||
<Component id="jScrollPane1" alignment="0" pref="559" max="32767" attributes="0"/>
|
||||
</Group>
|
||||
</DimensionLayout>
|
||||
<DimensionLayout dim="1">
|
||||
@ -67,16 +81,26 @@
|
||||
<Group type="102" alignment="0" attributes="0">
|
||||
<Group type="103" groupAlignment="0" attributes="0">
|
||||
<Component id="sourceComboBox" alignment="0" min="-2" max="-2" attributes="0"/>
|
||||
<Component id="hitPreviousButton" min="-2" pref="23" max="-2" attributes="0"/>
|
||||
<Component id="hitNextButton" min="-2" pref="23" max="-2" attributes="0"/>
|
||||
<Group type="103" groupAlignment="3" attributes="0">
|
||||
<Component id="hitCountLabel" alignment="3" min="-2" max="-2" attributes="0"/>
|
||||
<Component id="hitOfLabel" alignment="3" min="-2" max="-2" attributes="0"/>
|
||||
<Component id="hitTotalLabel" alignment="3" min="-2" max="-2" attributes="0"/>
|
||||
<Component id="hitButtonsLabel" alignment="3" min="-2" max="-2" attributes="0"/>
|
||||
<Component id="hitLabel" alignment="3" min="-2" max="-2" attributes="0"/>
|
||||
<Component id="hitButtonsLabel" alignment="3" min="-2" max="-2" attributes="0"/>
|
||||
</Group>
|
||||
<Component id="hitPreviousButton" min="-2" pref="23" max="-2" attributes="0"/>
|
||||
<Component id="hitNextButton" min="-2" pref="23" max="-2" attributes="0"/>
|
||||
<Group type="103" groupAlignment="3" attributes="0">
|
||||
<Component id="pageButtonsLabel" alignment="3" min="-2" max="-2" attributes="0"/>
|
||||
<Component id="pageTotalLabel" alignment="3" min="-2" max="-2" attributes="0"/>
|
||||
<Component id="pagesLabel" alignment="3" min="-2" max="-2" attributes="0"/>
|
||||
<Component id="pageCurLabel" alignment="3" min="-2" max="-2" attributes="0"/>
|
||||
<Component id="pageOfLabel" alignment="3" min="-2" max="-2" attributes="0"/>
|
||||
</Group>
|
||||
<Component id="pageNextButton" min="-2" max="-2" attributes="0"/>
|
||||
<Component id="pagePreviousButton" min="-2" pref="23" max="-2" attributes="0"/>
|
||||
</Group>
|
||||
<EmptySpace min="-2" pref="0" max="-2" attributes="0"/>
|
||||
<Component id="jScrollPane1" pref="293" max="32767" attributes="0"/>
|
||||
</Group>
|
||||
</Group>
|
||||
@ -173,6 +197,11 @@
|
||||
<Property name="text" type="java.lang.String" editor="org.netbeans.modules.i18n.form.FormI18nStringEditor">
|
||||
<ResourceString bundle="org/sleuthkit/autopsy/keywordsearch/Bundle.properties" key="ExtractedContentPanel.hitPreviousButton.text" replaceFormat="org.openide.util.NbBundle.getMessage({sourceFileName}.class, "{key}")"/>
|
||||
</Property>
|
||||
<Property name="border" type="javax.swing.border.Border" editor="org.netbeans.modules.form.editors2.BorderEditor">
|
||||
<Border info="org.netbeans.modules.form.compat2.border.EmptyBorderInfo">
|
||||
<EmptyBorder/>
|
||||
</Border>
|
||||
</Property>
|
||||
<Property name="borderPainted" type="boolean" value="false"/>
|
||||
<Property name="contentAreaFilled" type="boolean" value="false"/>
|
||||
<Property name="disabledIcon" type="javax.swing.Icon" editor="org.netbeans.modules.form.editors2.IconEditor">
|
||||
@ -197,6 +226,11 @@
|
||||
<Property name="text" type="java.lang.String" editor="org.netbeans.modules.i18n.form.FormI18nStringEditor">
|
||||
<ResourceString bundle="org/sleuthkit/autopsy/keywordsearch/Bundle.properties" key="ExtractedContentPanel.hitNextButton.text" replaceFormat="org.openide.util.NbBundle.getMessage({sourceFileName}.class, "{key}")"/>
|
||||
</Property>
|
||||
<Property name="border" type="javax.swing.border.Border" editor="org.netbeans.modules.form.editors2.BorderEditor">
|
||||
<Border info="org.netbeans.modules.form.compat2.border.EmptyBorderInfo">
|
||||
<EmptyBorder/>
|
||||
</Border>
|
||||
</Property>
|
||||
<Property name="borderPainted" type="boolean" value="false"/>
|
||||
<Property name="contentAreaFilled" type="boolean" value="false"/>
|
||||
<Property name="disabledIcon" type="javax.swing.Icon" editor="org.netbeans.modules.form.editors2.IconEditor">
|
||||
@ -213,5 +247,92 @@
|
||||
</Property>
|
||||
</Properties>
|
||||
</Component>
|
||||
<Component class="javax.swing.JLabel" name="pageButtonsLabel">
|
||||
<Properties>
|
||||
<Property name="text" type="java.lang.String" editor="org.netbeans.modules.i18n.form.FormI18nStringEditor">
|
||||
<ResourceString bundle="org/sleuthkit/autopsy/keywordsearch/Bundle.properties" key="ExtractedContentPanel.pageButtonsLabel.text" replaceFormat="org.openide.util.NbBundle.getMessage({sourceFileName}.class, "{key}")"/>
|
||||
</Property>
|
||||
</Properties>
|
||||
</Component>
|
||||
<Component class="javax.swing.JButton" name="pagePreviousButton">
|
||||
<Properties>
|
||||
<Property name="icon" type="javax.swing.Icon" editor="org.netbeans.modules.form.editors2.IconEditor">
|
||||
<Image iconType="3" name="/org/sleuthkit/autopsy/keywordsearch/btn_step_back.png"/>
|
||||
</Property>
|
||||
<Property name="text" type="java.lang.String" editor="org.netbeans.modules.i18n.form.FormI18nStringEditor">
|
||||
<ResourceString bundle="org/sleuthkit/autopsy/keywordsearch/Bundle.properties" key="ExtractedContentPanel.pagePreviousButton.text" replaceFormat="org.openide.util.NbBundle.getMessage({sourceFileName}.class, "{key}")"/>
|
||||
</Property>
|
||||
<Property name="actionCommand" type="java.lang.String" editor="org.netbeans.modules.i18n.form.FormI18nStringEditor">
|
||||
<ResourceString bundle="org/sleuthkit/autopsy/keywordsearch/Bundle.properties" key="ExtractedContentPanel.pagePreviousButton.actionCommand" replaceFormat="org.openide.util.NbBundle.getMessage({sourceFileName}.class, "{key}")"/>
|
||||
</Property>
|
||||
<Property name="border" type="javax.swing.border.Border" editor="org.netbeans.modules.form.editors2.BorderEditor">
|
||||
<Border info="org.netbeans.modules.form.compat2.border.EmptyBorderInfo">
|
||||
<EmptyBorder/>
|
||||
</Border>
|
||||
</Property>
|
||||
<Property name="borderPainted" type="boolean" value="false"/>
|
||||
<Property name="contentAreaFilled" type="boolean" value="false"/>
|
||||
<Property name="disabledIcon" type="javax.swing.Icon" editor="org.netbeans.modules.form.editors2.IconEditor">
|
||||
<Image iconType="3" name="/org/sleuthkit/autopsy/keywordsearch/btn_step_back_disabled.png"/>
|
||||
</Property>
|
||||
<Property name="margin" type="java.awt.Insets" editor="org.netbeans.beaninfo.editors.InsetsEditor">
|
||||
<Insets value="[2, 0, 2, 0]"/>
|
||||
</Property>
|
||||
</Properties>
|
||||
</Component>
|
||||
<Component class="javax.swing.JButton" name="pageNextButton">
|
||||
<Properties>
|
||||
<Property name="icon" type="javax.swing.Icon" editor="org.netbeans.modules.form.editors2.IconEditor">
|
||||
<Image iconType="3" name="/org/sleuthkit/autopsy/keywordsearch/btn_step_forward.png"/>
|
||||
</Property>
|
||||
<Property name="text" type="java.lang.String" editor="org.netbeans.modules.i18n.form.FormI18nStringEditor">
|
||||
<ResourceString bundle="org/sleuthkit/autopsy/keywordsearch/Bundle.properties" key="ExtractedContentPanel.pageNextButton.text" replaceFormat="org.openide.util.NbBundle.getMessage({sourceFileName}.class, "{key}")"/>
|
||||
</Property>
|
||||
<Property name="border" type="javax.swing.border.Border" editor="org.netbeans.modules.form.editors2.BorderEditor">
|
||||
<Border info="org.netbeans.modules.form.compat2.border.EmptyBorderInfo">
|
||||
<EmptyBorder/>
|
||||
</Border>
|
||||
</Property>
|
||||
<Property name="borderPainted" type="boolean" value="false"/>
|
||||
<Property name="contentAreaFilled" type="boolean" value="false"/>
|
||||
<Property name="disabledIcon" type="javax.swing.Icon" editor="org.netbeans.modules.form.editors2.IconEditor">
|
||||
<Image iconType="3" name="/org/sleuthkit/autopsy/keywordsearch/btn_step_forward_disabled.png"/>
|
||||
</Property>
|
||||
<Property name="margin" type="java.awt.Insets" editor="org.netbeans.beaninfo.editors.InsetsEditor">
|
||||
<Insets value="[2, 0, 2, 0]"/>
|
||||
</Property>
|
||||
<Property name="preferredSize" type="java.awt.Dimension" editor="org.netbeans.beaninfo.editors.DimensionEditor">
|
||||
<Dimension value="[23, 23]"/>
|
||||
</Property>
|
||||
</Properties>
|
||||
</Component>
|
||||
<Component class="javax.swing.JLabel" name="pagesLabel">
|
||||
<Properties>
|
||||
<Property name="text" type="java.lang.String" editor="org.netbeans.modules.i18n.form.FormI18nStringEditor">
|
||||
<ResourceString bundle="org/sleuthkit/autopsy/keywordsearch/Bundle.properties" key="ExtractedContentPanel.pagesLabel.text" replaceFormat="org.openide.util.NbBundle.getMessage({sourceFileName}.class, "{key}")"/>
|
||||
</Property>
|
||||
</Properties>
|
||||
</Component>
|
||||
<Component class="javax.swing.JLabel" name="pageCurLabel">
|
||||
<Properties>
|
||||
<Property name="text" type="java.lang.String" editor="org.netbeans.modules.i18n.form.FormI18nStringEditor">
|
||||
<ResourceString bundle="org/sleuthkit/autopsy/keywordsearch/Bundle.properties" key="ExtractedContentPanel.pageCurLabel.text" replaceFormat="org.openide.util.NbBundle.getMessage({sourceFileName}.class, "{key}")"/>
|
||||
</Property>
|
||||
</Properties>
|
||||
</Component>
|
||||
<Component class="javax.swing.JLabel" name="pageOfLabel">
|
||||
<Properties>
|
||||
<Property name="text" type="java.lang.String" editor="org.netbeans.modules.i18n.form.FormI18nStringEditor">
|
||||
<ResourceString bundle="org/sleuthkit/autopsy/keywordsearch/Bundle.properties" key="ExtractedContentPanel.pageOfLabel.text" replaceFormat="org.openide.util.NbBundle.getMessage({sourceFileName}.class, "{key}")"/>
|
||||
</Property>
|
||||
</Properties>
|
||||
</Component>
|
||||
<Component class="javax.swing.JLabel" name="pageTotalLabel">
|
||||
<Properties>
|
||||
<Property name="text" type="java.lang.String" editor="org.netbeans.modules.i18n.form.FormI18nStringEditor">
|
||||
<ResourceString bundle="org/sleuthkit/autopsy/keywordsearch/Bundle.properties" key="ExtractedContentPanel.pageTotalLabel.text" replaceFormat="org.openide.util.NbBundle.getMessage({sourceFileName}.class, "{key}")"/>
|
||||
</Property>
|
||||
</Properties>
|
||||
</Component>
|
||||
</SubComponents>
|
||||
</Form>
|
||||
|
@ -44,8 +44,12 @@ import javax.swing.text.html.HTMLEditorKit.HTMLFactory;
|
||||
class ExtractedContentPanel extends javax.swing.JPanel {
|
||||
|
||||
private static Logger logger = Logger.getLogger(ExtractedContentPanel.class.getName());
|
||||
|
||||
private ExtractedContentViewer viewer;
|
||||
|
||||
ExtractedContentPanel() {
|
||||
ExtractedContentPanel(ExtractedContentViewer viewer) {
|
||||
this.viewer = viewer;
|
||||
|
||||
initComponents();
|
||||
|
||||
initControls();
|
||||
@ -85,7 +89,8 @@ class ExtractedContentPanel extends javax.swing.JPanel {
|
||||
@Override
|
||||
public void itemStateChanged(ItemEvent e) {
|
||||
if (e.getStateChange() == ItemEvent.SELECTED) {
|
||||
setPanelText(((MarkupSource) e.getItem()).getMarkup());
|
||||
MarkupSource source = (MarkupSource) e.getItem();
|
||||
setPanelText(viewer.getDisplayText(source));
|
||||
}
|
||||
}
|
||||
});
|
||||
@ -130,6 +135,13 @@ class ExtractedContentPanel extends javax.swing.JPanel {
|
||||
hitButtonsLabel = new javax.swing.JLabel();
|
||||
hitPreviousButton = new javax.swing.JButton();
|
||||
hitNextButton = new javax.swing.JButton();
|
||||
pageButtonsLabel = new javax.swing.JLabel();
|
||||
pagePreviousButton = new javax.swing.JButton();
|
||||
pageNextButton = new javax.swing.JButton();
|
||||
pagesLabel = new javax.swing.JLabel();
|
||||
pageCurLabel = new javax.swing.JLabel();
|
||||
pageOfLabel = new javax.swing.JLabel();
|
||||
pageTotalLabel = new javax.swing.JLabel();
|
||||
|
||||
copyMenuItem.setText(org.openide.util.NbBundle.getMessage(ExtractedContentPanel.class, "ExtractedContentPanel.copyMenuItem.text")); // NOI18N
|
||||
rightClickMenu.add(copyMenuItem);
|
||||
@ -163,6 +175,7 @@ class ExtractedContentPanel extends javax.swing.JPanel {
|
||||
|
||||
hitPreviousButton.setIcon(new javax.swing.ImageIcon(getClass().getResource("/org/sleuthkit/autopsy/keywordsearch/btn_step_back.png"))); // NOI18N
|
||||
hitPreviousButton.setText(org.openide.util.NbBundle.getMessage(ExtractedContentPanel.class, "ExtractedContentPanel.hitPreviousButton.text")); // NOI18N
|
||||
hitPreviousButton.setBorder(javax.swing.BorderFactory.createEmptyBorder(1, 1, 1, 1));
|
||||
hitPreviousButton.setBorderPainted(false);
|
||||
hitPreviousButton.setContentAreaFilled(false);
|
||||
hitPreviousButton.setDisabledIcon(new javax.swing.ImageIcon(getClass().getResource("/org/sleuthkit/autopsy/keywordsearch/btn_step_back_disabled.png"))); // NOI18N
|
||||
@ -172,6 +185,7 @@ class ExtractedContentPanel extends javax.swing.JPanel {
|
||||
|
||||
hitNextButton.setIcon(new javax.swing.ImageIcon(getClass().getResource("/org/sleuthkit/autopsy/keywordsearch/btn_step_forward.png"))); // NOI18N
|
||||
hitNextButton.setText(org.openide.util.NbBundle.getMessage(ExtractedContentPanel.class, "ExtractedContentPanel.hitNextButton.text")); // NOI18N
|
||||
hitNextButton.setBorder(javax.swing.BorderFactory.createEmptyBorder(1, 1, 1, 1));
|
||||
hitNextButton.setBorderPainted(false);
|
||||
hitNextButton.setContentAreaFilled(false);
|
||||
hitNextButton.setDisabledIcon(new javax.swing.ImageIcon(getClass().getResource("/org/sleuthkit/autopsy/keywordsearch/btn_step_forward_disabled.png"))); // NOI18N
|
||||
@ -179,11 +193,39 @@ class ExtractedContentPanel extends javax.swing.JPanel {
|
||||
hitNextButton.setPreferredSize(new java.awt.Dimension(23, 23));
|
||||
hitNextButton.setRolloverIcon(new javax.swing.ImageIcon(getClass().getResource("/org/sleuthkit/autopsy/keywordsearch/btn_step_forward_hover.png"))); // NOI18N
|
||||
|
||||
pageButtonsLabel.setText(org.openide.util.NbBundle.getMessage(ExtractedContentPanel.class, "ExtractedContentPanel.pageButtonsLabel.text")); // NOI18N
|
||||
|
||||
pagePreviousButton.setIcon(new javax.swing.ImageIcon(getClass().getResource("/org/sleuthkit/autopsy/keywordsearch/btn_step_back.png"))); // NOI18N
|
||||
pagePreviousButton.setText(org.openide.util.NbBundle.getMessage(ExtractedContentPanel.class, "ExtractedContentPanel.pagePreviousButton.text")); // NOI18N
|
||||
pagePreviousButton.setActionCommand(org.openide.util.NbBundle.getMessage(ExtractedContentPanel.class, "ExtractedContentPanel.pagePreviousButton.actionCommand")); // NOI18N
|
||||
pagePreviousButton.setBorder(javax.swing.BorderFactory.createEmptyBorder(1, 1, 1, 1));
|
||||
pagePreviousButton.setBorderPainted(false);
|
||||
pagePreviousButton.setContentAreaFilled(false);
|
||||
pagePreviousButton.setDisabledIcon(new javax.swing.ImageIcon(getClass().getResource("/org/sleuthkit/autopsy/keywordsearch/btn_step_back_disabled.png"))); // NOI18N
|
||||
pagePreviousButton.setMargin(new java.awt.Insets(2, 0, 2, 0));
|
||||
|
||||
pageNextButton.setIcon(new javax.swing.ImageIcon(getClass().getResource("/org/sleuthkit/autopsy/keywordsearch/btn_step_forward.png"))); // NOI18N
|
||||
pageNextButton.setText(org.openide.util.NbBundle.getMessage(ExtractedContentPanel.class, "ExtractedContentPanel.pageNextButton.text")); // NOI18N
|
||||
pageNextButton.setBorder(javax.swing.BorderFactory.createEmptyBorder(1, 1, 1, 1));
|
||||
pageNextButton.setBorderPainted(false);
|
||||
pageNextButton.setContentAreaFilled(false);
|
||||
pageNextButton.setDisabledIcon(new javax.swing.ImageIcon(getClass().getResource("/org/sleuthkit/autopsy/keywordsearch/btn_step_forward_disabled.png"))); // NOI18N
|
||||
pageNextButton.setMargin(new java.awt.Insets(2, 0, 2, 0));
|
||||
pageNextButton.setPreferredSize(new java.awt.Dimension(23, 23));
|
||||
|
||||
pagesLabel.setText(org.openide.util.NbBundle.getMessage(ExtractedContentPanel.class, "ExtractedContentPanel.pagesLabel.text")); // NOI18N
|
||||
|
||||
pageCurLabel.setText(org.openide.util.NbBundle.getMessage(ExtractedContentPanel.class, "ExtractedContentPanel.pageCurLabel.text")); // NOI18N
|
||||
|
||||
pageOfLabel.setText(org.openide.util.NbBundle.getMessage(ExtractedContentPanel.class, "ExtractedContentPanel.pageOfLabel.text")); // NOI18N
|
||||
|
||||
pageTotalLabel.setText(org.openide.util.NbBundle.getMessage(ExtractedContentPanel.class, "ExtractedContentPanel.pageTotalLabel.text")); // NOI18N
|
||||
|
||||
javax.swing.GroupLayout layout = new javax.swing.GroupLayout(this);
|
||||
this.setLayout(layout);
|
||||
layout.setHorizontalGroup(
|
||||
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
|
||||
.addGroup(javax.swing.GroupLayout.Alignment.TRAILING, layout.createSequentialGroup()
|
||||
.addGroup(layout.createSequentialGroup()
|
||||
.addContainerGap()
|
||||
.addComponent(hitLabel)
|
||||
.addGap(18, 18, 18)
|
||||
@ -192,30 +234,53 @@ class ExtractedContentPanel extends javax.swing.JPanel {
|
||||
.addComponent(hitOfLabel)
|
||||
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
|
||||
.addComponent(hitTotalLabel, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
|
||||
.addGap(50, 50, 50)
|
||||
.addGap(26, 26, 26)
|
||||
.addComponent(hitButtonsLabel)
|
||||
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
|
||||
.addComponent(hitPreviousButton, javax.swing.GroupLayout.PREFERRED_SIZE, 23, javax.swing.GroupLayout.PREFERRED_SIZE)
|
||||
.addGap(0, 0, 0)
|
||||
.addComponent(hitNextButton, javax.swing.GroupLayout.PREFERRED_SIZE, 23, javax.swing.GroupLayout.PREFERRED_SIZE)
|
||||
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED, 78, Short.MAX_VALUE)
|
||||
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
|
||||
.addComponent(pagesLabel)
|
||||
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
|
||||
.addComponent(pageCurLabel, javax.swing.GroupLayout.PREFERRED_SIZE, 12, javax.swing.GroupLayout.PREFERRED_SIZE)
|
||||
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
|
||||
.addComponent(pageOfLabel)
|
||||
.addGap(14, 14, 14)
|
||||
.addComponent(pageTotalLabel, javax.swing.GroupLayout.PREFERRED_SIZE, 18, javax.swing.GroupLayout.PREFERRED_SIZE)
|
||||
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
|
||||
.addComponent(pageButtonsLabel)
|
||||
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
|
||||
.addComponent(pagePreviousButton)
|
||||
.addGap(0, 0, 0)
|
||||
.addComponent(pageNextButton, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
|
||||
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED, 65, Short.MAX_VALUE)
|
||||
.addComponent(sourceComboBox, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
|
||||
.addContainerGap())
|
||||
.addComponent(jScrollPane1, javax.swing.GroupLayout.DEFAULT_SIZE, 400, Short.MAX_VALUE)
|
||||
.addComponent(jScrollPane1, javax.swing.GroupLayout.DEFAULT_SIZE, 559, Short.MAX_VALUE)
|
||||
);
|
||||
layout.setVerticalGroup(
|
||||
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
|
||||
.addGroup(layout.createSequentialGroup()
|
||||
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
|
||||
.addComponent(sourceComboBox, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
|
||||
.addComponent(hitPreviousButton, javax.swing.GroupLayout.PREFERRED_SIZE, 23, javax.swing.GroupLayout.PREFERRED_SIZE)
|
||||
.addComponent(hitNextButton, javax.swing.GroupLayout.PREFERRED_SIZE, 23, javax.swing.GroupLayout.PREFERRED_SIZE)
|
||||
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
|
||||
.addComponent(hitCountLabel, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
|
||||
.addComponent(hitOfLabel)
|
||||
.addComponent(hitTotalLabel, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
|
||||
.addComponent(hitButtonsLabel)
|
||||
.addComponent(hitLabel)))
|
||||
.addComponent(hitLabel)
|
||||
.addComponent(hitButtonsLabel))
|
||||
.addComponent(hitPreviousButton, javax.swing.GroupLayout.PREFERRED_SIZE, 23, javax.swing.GroupLayout.PREFERRED_SIZE)
|
||||
.addComponent(hitNextButton, javax.swing.GroupLayout.PREFERRED_SIZE, 23, javax.swing.GroupLayout.PREFERRED_SIZE)
|
||||
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
|
||||
.addComponent(pageButtonsLabel)
|
||||
.addComponent(pageTotalLabel)
|
||||
.addComponent(pagesLabel)
|
||||
.addComponent(pageCurLabel)
|
||||
.addComponent(pageOfLabel))
|
||||
.addComponent(pageNextButton, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
|
||||
.addComponent(pagePreviousButton, javax.swing.GroupLayout.PREFERRED_SIZE, 23, javax.swing.GroupLayout.PREFERRED_SIZE))
|
||||
.addGap(0, 0, 0)
|
||||
.addComponent(jScrollPane1, javax.swing.GroupLayout.DEFAULT_SIZE, 293, Short.MAX_VALUE))
|
||||
);
|
||||
}// </editor-fold>//GEN-END:initComponents
|
||||
@ -231,11 +296,24 @@ class ExtractedContentPanel extends javax.swing.JPanel {
|
||||
private javax.swing.JButton hitPreviousButton;
|
||||
private javax.swing.JLabel hitTotalLabel;
|
||||
private javax.swing.JScrollPane jScrollPane1;
|
||||
private javax.swing.JLabel pageButtonsLabel;
|
||||
private javax.swing.JLabel pageCurLabel;
|
||||
private javax.swing.JButton pageNextButton;
|
||||
private javax.swing.JLabel pageOfLabel;
|
||||
private javax.swing.JButton pagePreviousButton;
|
||||
private javax.swing.JLabel pageTotalLabel;
|
||||
private javax.swing.JLabel pagesLabel;
|
||||
private javax.swing.JPopupMenu rightClickMenu;
|
||||
private javax.swing.JMenuItem selectAllMenuItem;
|
||||
private javax.swing.JComboBox sourceComboBox;
|
||||
// End of variables declaration//GEN-END:variables
|
||||
|
||||
|
||||
void refreshCurrentMarkup() {
|
||||
MarkupSource ms = (MarkupSource)sourceComboBox.getSelectedItem();
|
||||
setPanelText(viewer.getDisplayText(ms));
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the available sources (selects the first source in the list by
|
||||
* default)
|
||||
@ -285,7 +363,7 @@ class ExtractedContentPanel extends javax.swing.JPanel {
|
||||
}
|
||||
|
||||
|
||||
public void scrollToAnchor(String anchor) {
|
||||
void scrollToAnchor(String anchor) {
|
||||
extractedTextPane.scrollToReference(anchor);
|
||||
}
|
||||
|
||||
@ -293,7 +371,7 @@ class ExtractedContentPanel extends javax.swing.JPanel {
|
||||
*
|
||||
* @param current, current hit to update the display with
|
||||
*/
|
||||
public void updateCurrentDisplay(int current) {
|
||||
void updateCurrentMatchDisplay(int current) {
|
||||
hitCountLabel.setText(Integer.toString(current));
|
||||
}
|
||||
|
||||
@ -301,23 +379,54 @@ class ExtractedContentPanel extends javax.swing.JPanel {
|
||||
*
|
||||
* @param total total number of hits to update the display with
|
||||
*/
|
||||
public void updateTotalDisplay(int total) {
|
||||
void updateTotaMatcheslDisplay(int total) {
|
||||
hitTotalLabel.setText(Integer.toString(total));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* reset the current/total display
|
||||
*
|
||||
* @param current, current page to update the display with
|
||||
*/
|
||||
public void resetHitDisplay() {
|
||||
void updateCurrentPageDisplay(int current) {
|
||||
pageCurLabel.setText(Integer.toString(current));
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param total total number of pages to update the display with
|
||||
*/
|
||||
void updateTotalPageslDisplay(int total) {
|
||||
pageTotalLabel.setText(Integer.toString(total));
|
||||
}
|
||||
|
||||
|
||||
void resetDisplay() {
|
||||
resetHitDisplay();
|
||||
resetPagesDisplay();
|
||||
}
|
||||
|
||||
/**
|
||||
* reset the current/total hits display
|
||||
*/
|
||||
void resetHitDisplay() {
|
||||
hitTotalLabel.setText("-");
|
||||
hitCountLabel.setText("-");
|
||||
}
|
||||
|
||||
/**
|
||||
* reset the current/total pages display
|
||||
*/
|
||||
void resetPagesDisplay() {
|
||||
pageCurLabel.setText("-");
|
||||
pageTotalLabel.setText("-");
|
||||
}
|
||||
|
||||
/**
|
||||
* enable previous match control
|
||||
* @param enable whether to enable or disable
|
||||
*/
|
||||
public void enablePrevControl(boolean enable) {
|
||||
void enablePrevMatchControl(boolean enable) {
|
||||
hitPreviousButton.setEnabled(enable);
|
||||
}
|
||||
|
||||
@ -325,19 +434,44 @@ class ExtractedContentPanel extends javax.swing.JPanel {
|
||||
* enable next match control
|
||||
* @param enable whether to enable or disable
|
||||
*/
|
||||
public void enableNextControl(boolean enable) {
|
||||
void enableNextMatchControl(boolean enable) {
|
||||
hitNextButton.setEnabled(enable);
|
||||
}
|
||||
|
||||
public void addPrevControlListener(ActionListener l) {
|
||||
void addPrevMatchControlListener(ActionListener l) {
|
||||
hitPreviousButton.addActionListener(l);
|
||||
}
|
||||
|
||||
public void addNextControlListener(ActionListener l) {
|
||||
void addNextMatchControlListener(ActionListener l) {
|
||||
hitNextButton.addActionListener(l);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* enable previous oage control
|
||||
* @param enable whether to enable or disable
|
||||
*/
|
||||
void enablePrevPageControl(boolean enable) {
|
||||
pagePreviousButton.setEnabled(enable);
|
||||
}
|
||||
|
||||
public void addSourceComboControlListener(ActionListener l) {
|
||||
/**
|
||||
* enable next page control
|
||||
* @param enable whether to enable or disable
|
||||
*/
|
||||
void enableNextPageControl(boolean enable) {
|
||||
pageNextButton.setEnabled(enable);
|
||||
}
|
||||
|
||||
void addPrevPageControlListener(ActionListener l) {
|
||||
pagePreviousButton.addActionListener(l);
|
||||
}
|
||||
|
||||
void addNextPageControlListener(ActionListener l) {
|
||||
pageNextButton.addActionListener(l);
|
||||
}
|
||||
|
||||
void addSourceComboControlListener(ActionListener l) {
|
||||
sourceComboBox.addActionListener(l);
|
||||
}
|
||||
}
|
||||
|
@ -19,16 +19,15 @@
|
||||
package org.sleuthkit.autopsy.keywordsearch;
|
||||
|
||||
import java.awt.Component;
|
||||
import java.awt.Cursor;
|
||||
import java.awt.EventQueue;
|
||||
import java.awt.event.ActionEvent;
|
||||
import java.awt.event.ActionListener;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.openide.nodes.Node;
|
||||
import org.openide.util.lookup.ServiceProvider;
|
||||
@ -49,9 +48,13 @@ public class ExtractedContentViewer implements DataContentViewer {
|
||||
private static final Logger logger = Logger.getLogger(ExtractedContentViewer.class.getName());
|
||||
private ExtractedContentPanel panel;
|
||||
private ExtractedContentFind find;
|
||||
private ExtractedContentPaging paging;
|
||||
private Node currentNode = null;
|
||||
private MarkupSource currentSource = null;
|
||||
|
||||
public ExtractedContentViewer() {
|
||||
find = new ExtractedContentFind();
|
||||
paging = new ExtractedContentPaging();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -59,25 +62,35 @@ public class ExtractedContentViewer implements DataContentViewer {
|
||||
|
||||
// to clear the viewer
|
||||
if (selectedNode == null) {
|
||||
currentNode = null;
|
||||
resetComponent();
|
||||
return;
|
||||
}
|
||||
|
||||
this.currentNode = selectedNode;
|
||||
|
||||
// sources are custom markup from the node (if available) and default
|
||||
// markup is fetched from solr
|
||||
List<MarkupSource> sources = new ArrayList<MarkupSource>();
|
||||
|
||||
//add additional registered sources for this node
|
||||
sources.addAll(selectedNode.getLookup().lookupAll(MarkupSource.class));
|
||||
|
||||
|
||||
if (solrHasContent(selectedNode)) {
|
||||
Content content = selectedNode.getLookup().lookup(Content.class);
|
||||
if (content == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
sources.add(new MarkupSource() {
|
||||
//add to page tracking if not there yet
|
||||
final long contentID = content.getId();
|
||||
|
||||
MarkupSource newSource = new MarkupSource() {
|
||||
|
||||
@Override
|
||||
public String getMarkup() {
|
||||
public String getMarkup(int pageNum) {
|
||||
try {
|
||||
String content = StringEscapeUtils.escapeHtml(getSolrContent(selectedNode));
|
||||
String content = StringEscapeUtils.escapeHtml(getSolrContent(selectedNode, this));
|
||||
return "<pre>" + content.trim() + "</pre>";
|
||||
} catch (SolrServerException ex) {
|
||||
logger.log(Level.WARNING, "Couldn't get extracted content.", ex);
|
||||
@ -104,10 +117,42 @@ public class ExtractedContentViewer implements DataContentViewer {
|
||||
public int getNumberHits() {
|
||||
return 0;
|
||||
}
|
||||
});
|
||||
|
||||
@Override
|
||||
public int getNumberPages() {
|
||||
final Server solrServer = KeywordSearch.getServer();
|
||||
int numChunks = 0;
|
||||
try {
|
||||
numChunks = solrServer.queryNumFileChunks(contentID);
|
||||
|
||||
} catch (SolrServerException ex) {
|
||||
logger.log(Level.WARNING, "Could not get number of chunks: ", ex);
|
||||
|
||||
} catch (NoOpenCoreException ex) {
|
||||
logger.log(Level.WARNING, "Could not get number of chunks: ", ex);
|
||||
}
|
||||
return numChunks;
|
||||
}
|
||||
};
|
||||
|
||||
currentSource = newSource;
|
||||
sources.add(newSource);
|
||||
|
||||
//init paging for all sources for this node, if not inited
|
||||
for (MarkupSource source : sources) {
|
||||
if (!paging.isTracked(source)) {
|
||||
int numPages = source.getNumberPages();
|
||||
paging.add(source, numPages);
|
||||
}
|
||||
}
|
||||
|
||||
final int totalPages = paging.getTotalPages(newSource);
|
||||
final int currentPage = paging.getCurrentPage(newSource);
|
||||
|
||||
updatePageControls(currentPage, totalPages);
|
||||
}
|
||||
|
||||
|
||||
// first source will be the default displayed
|
||||
setPanel(sources);
|
||||
// If node has been selected before, return to the previous position
|
||||
@ -142,9 +187,11 @@ public class ExtractedContentViewer implements DataContentViewer {
|
||||
@Override
|
||||
public Component getComponent() {
|
||||
if (panel == null) {
|
||||
panel = new ExtractedContentPanel();
|
||||
panel.addPrevControlListener(new PrevFindActionListener());
|
||||
panel.addNextControlListener(new NextFindActionListener());
|
||||
panel = new ExtractedContentPanel(this);
|
||||
panel.addPrevMatchControlListener(new PrevFindActionListener());
|
||||
panel.addNextMatchControlListener(new NextFindActionListener());
|
||||
panel.addPrevPageControlListener(new PrevPageActionListener());
|
||||
panel.addNextPageControlListener(new NextPageActionListener());
|
||||
panel.addSourceComboControlListener(new SourceChangeActionListener());
|
||||
}
|
||||
return panel;
|
||||
@ -153,7 +200,7 @@ public class ExtractedContentViewer implements DataContentViewer {
|
||||
@Override
|
||||
public void resetComponent() {
|
||||
setPanel(new ArrayList<MarkupSource>());
|
||||
panel.resetHitDisplay();
|
||||
panel.resetDisplay();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -169,7 +216,8 @@ public class ExtractedContentViewer implements DataContentViewer {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isPreferred(Node node, boolean isSupported) {
|
||||
public boolean isPreferred(Node node,
|
||||
boolean isSupported) {
|
||||
BlackboardArtifact art = node.getLookup().lookup(BlackboardArtifact.class);
|
||||
return isSupported && (art == null || art.getArtifactTypeID() == BlackboardArtifact.ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID());
|
||||
}
|
||||
@ -197,20 +245,15 @@ public class ExtractedContentViewer implements DataContentViewer {
|
||||
}
|
||||
|
||||
final Server solrServer = KeywordSearch.getServer();
|
||||
|
||||
SolrQuery q = new SolrQuery();
|
||||
q.setQuery("*:*");
|
||||
q.addFilterQuery("id:" + content.getId());
|
||||
q.setFields("id");
|
||||
|
||||
final long contentID = content.getId();
|
||||
|
||||
try {
|
||||
return !solrServer.query(q).getResults().isEmpty();
|
||||
}
|
||||
catch (NoOpenCoreException ex) {
|
||||
return solrServer.queryIsIndexed(contentID);
|
||||
} catch (NoOpenCoreException ex) {
|
||||
logger.log(Level.WARNING, "Couldn't determine whether content is supported.", ex);
|
||||
return false;
|
||||
}
|
||||
catch (SolrServerException ex) {
|
||||
} catch (SolrServerException ex) {
|
||||
logger.log(Level.WARNING, "Couldn't determine whether content is supported.", ex);
|
||||
return false;
|
||||
}
|
||||
@ -223,25 +266,38 @@ public class ExtractedContentViewer implements DataContentViewer {
|
||||
* @return the extracted content
|
||||
* @throws SolrServerException if something goes wrong
|
||||
*/
|
||||
private String getSolrContent(Node node) throws SolrServerException {
|
||||
Server solrServer = KeywordSearch.getServer();
|
||||
SolrQuery q = new SolrQuery();
|
||||
q.setQuery("*:*");
|
||||
q.addFilterQuery("id:" + node.getLookup().lookup(Content.class).getId());
|
||||
q.setFields("content");
|
||||
private String getSolrContent(Node node, MarkupSource source) throws SolrServerException {
|
||||
Content contentObj = node.getLookup().lookup(Content.class);
|
||||
|
||||
final Server solrServer = KeywordSearch.getServer();
|
||||
|
||||
String content;
|
||||
//if no paging, curChunk is 0
|
||||
int curPage = paging.getCurrentPage(source);
|
||||
|
||||
String content = null;
|
||||
try {
|
||||
content = (String) solrServer.query(q).getResults().get(0).getFieldValue("content");
|
||||
}
|
||||
catch (NoOpenCoreException ex) {
|
||||
logger.log(Level.WARNING, "Couldn't get Solr content.", ex);
|
||||
content = (String) solrServer.getSolrContent(contentObj, curPage);
|
||||
} catch (NoOpenCoreException ex) {
|
||||
logger.log(Level.WARNING, "Couldn't get text content.", ex);
|
||||
return "";
|
||||
}
|
||||
return content;
|
||||
}
|
||||
|
||||
class NextFindActionListener implements ActionListener {
|
||||
/**
|
||||
* figure out text to show from the page number and source
|
||||
*
|
||||
* @param source the current source
|
||||
* @return text for the source and the current page num
|
||||
*/
|
||||
|
||||
String getDisplayText(MarkupSource source) {
|
||||
currentSource = source;
|
||||
int pageNum = paging.getCurrentPage(currentSource);
|
||||
return source.getMarkup(pageNum);
|
||||
}
|
||||
|
||||
private class NextFindActionListener implements ActionListener {
|
||||
|
||||
@Override
|
||||
public void actionPerformed(ActionEvent e) {
|
||||
@ -253,21 +309,21 @@ public class ExtractedContentViewer implements DataContentViewer {
|
||||
panel.scrollToAnchor(source.getAnchorPrefix() + Long.toString(indexVal));
|
||||
|
||||
//update display
|
||||
panel.updateCurrentDisplay(find.getCurrentIndexI(source) + 1);
|
||||
panel.updateTotalDisplay(find.getCurrentIndexTotal(source));
|
||||
panel.updateCurrentMatchDisplay(find.getCurrentIndexI(source) + 1);
|
||||
panel.updateTotaMatcheslDisplay(find.getCurrentIndexTotal(source));
|
||||
|
||||
//update controls if needed
|
||||
if (!find.hasNext(source)) {
|
||||
panel.enableNextControl(false);
|
||||
panel.enableNextMatchControl(false);
|
||||
}
|
||||
if (find.hasPrevious(source)) {
|
||||
panel.enablePrevControl(true);
|
||||
panel.enablePrevMatchControl(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class PrevFindActionListener implements ActionListener {
|
||||
private class PrevFindActionListener implements ActionListener {
|
||||
|
||||
@Override
|
||||
public void actionPerformed(ActionEvent e) {
|
||||
@ -279,21 +335,21 @@ public class ExtractedContentViewer implements DataContentViewer {
|
||||
panel.scrollToAnchor(source.getAnchorPrefix() + Long.toString(indexVal));
|
||||
|
||||
//update display
|
||||
panel.updateCurrentDisplay(find.getCurrentIndexI(source) + 1);
|
||||
panel.updateTotalDisplay(find.getCurrentIndexTotal(source));
|
||||
panel.updateCurrentMatchDisplay(find.getCurrentIndexI(source) + 1);
|
||||
panel.updateTotaMatcheslDisplay(find.getCurrentIndexTotal(source));
|
||||
|
||||
//update controls if needed
|
||||
if (!find.hasPrevious(source)) {
|
||||
panel.enablePrevControl(false);
|
||||
panel.enablePrevMatchControl(false);
|
||||
}
|
||||
if (find.hasNext(source)) {
|
||||
panel.enableNextControl(true);
|
||||
panel.enableNextMatchControl(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class SourceChangeActionListener implements ActionListener {
|
||||
private class SourceChangeActionListener implements ActionListener {
|
||||
|
||||
@Override
|
||||
public void actionPerformed(ActionEvent e) {
|
||||
@ -302,26 +358,109 @@ public class ExtractedContentViewer implements DataContentViewer {
|
||||
//setup find controls
|
||||
if (source != null && source.isSearchable()) {
|
||||
find.init(source);
|
||||
panel.updateCurrentDisplay(find.getCurrentIndexI(source) + 1);
|
||||
panel.updateTotalDisplay(find.getCurrentIndexTotal(source));
|
||||
panel.updateCurrentMatchDisplay(find.getCurrentIndexI(source) + 1);
|
||||
panel.updateTotaMatcheslDisplay(find.getCurrentIndexTotal(source));
|
||||
|
||||
if (find.hasNext(source)) {
|
||||
panel.enableNextControl(true);
|
||||
panel.enableNextMatchControl(true);
|
||||
} else {
|
||||
panel.enableNextControl(false);
|
||||
panel.enableNextMatchControl(false);
|
||||
}
|
||||
|
||||
if (find.hasPrevious(source)) {
|
||||
panel.enablePrevControl(true);
|
||||
panel.enablePrevMatchControl(true);
|
||||
} else {
|
||||
panel.enablePrevControl(false);
|
||||
panel.enablePrevMatchControl(false);
|
||||
}
|
||||
} else {
|
||||
panel.enableNextControl(false);
|
||||
panel.enablePrevControl(false);
|
||||
panel.enableNextMatchControl(false);
|
||||
panel.enablePrevMatchControl(false);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
private void updatePageControls(int currentPage, int totalPages) {
|
||||
|
||||
if (totalPages == 0) {
|
||||
//no chunks case
|
||||
panel.updateTotalPageslDisplay(1);
|
||||
panel.updateCurrentPageDisplay(1);
|
||||
} else {
|
||||
panel.updateTotalPageslDisplay(totalPages);
|
||||
panel.updateCurrentPageDisplay(currentPage);
|
||||
}
|
||||
|
||||
if (totalPages < 2) {
|
||||
panel.enableNextPageControl(false);
|
||||
panel.enablePrevPageControl(false);
|
||||
} else {
|
||||
if (currentPage < totalPages) {
|
||||
panel.enableNextPageControl(true);
|
||||
} else {
|
||||
panel.enableNextPageControl(false);
|
||||
}
|
||||
|
||||
if (currentPage > 1) {
|
||||
panel.enablePrevPageControl(true);
|
||||
} else {
|
||||
panel.enablePrevPageControl(false);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
class NextPageActionListener implements ActionListener {
|
||||
|
||||
@Override
|
||||
public void actionPerformed(ActionEvent e) {
|
||||
|
||||
if (paging.hasNext(currentSource)) {
|
||||
paging.next(currentSource);
|
||||
|
||||
//set new text
|
||||
panel.setCursor(Cursor.getPredefinedCursor(Cursor.WAIT_CURSOR));
|
||||
panel.refreshCurrentMarkup();
|
||||
panel.setCursor(null);
|
||||
|
||||
//update display
|
||||
panel.updateCurrentPageDisplay(paging.getCurrentPage(currentSource));
|
||||
|
||||
//update controls if needed
|
||||
if (!paging.hasNext(currentSource)) {
|
||||
panel.enableNextPageControl(false);
|
||||
}
|
||||
if (paging.hasPrevious(currentSource)) {
|
||||
panel.enablePrevPageControl(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private class PrevPageActionListener implements ActionListener {
|
||||
|
||||
@Override
|
||||
public void actionPerformed(ActionEvent e) {
|
||||
if (paging.hasPrevious(currentSource)) {
|
||||
paging.previous(currentSource);
|
||||
|
||||
//set new text
|
||||
panel.setCursor(Cursor.getPredefinedCursor(Cursor.WAIT_CURSOR));
|
||||
panel.refreshCurrentMarkup();
|
||||
panel.setCursor(null);
|
||||
|
||||
//update display
|
||||
panel.updateCurrentPageDisplay(paging.getCurrentPage(currentSource));
|
||||
|
||||
//update controls if needed
|
||||
if (!paging.hasPrevious(currentSource)) {
|
||||
panel.enablePrevPageControl(false);
|
||||
}
|
||||
if (paging.hasNext(currentSource)) {
|
||||
panel.enableNextPageControl(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,157 @@
|
||||
/*
|
||||
* Autopsy Forensic Browser
|
||||
*
|
||||
* Copyright 2011 Basis Technology Corp.
|
||||
* Contact: carrier <at> sleuthkit <dot> org
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
|
||||
package org.sleuthkit.autopsy.keywordsearch;
|
||||
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import org.openide.util.Exceptions;
|
||||
import org.sleuthkit.autopsy.datamodel.FsContentStringStream;
|
||||
import org.sleuthkit.autopsy.keywordsearch.Ingester.IngesterException;
|
||||
import org.sleuthkit.datamodel.File;
|
||||
|
||||
|
||||
/**
|
||||
* Utility to extract and index a file as file chunks
|
||||
*/
|
||||
public class FileExtract {
|
||||
|
||||
private int numChunks;
|
||||
public static final long MAX_CHUNK_SIZE = 10 * 1024 * 1024L;
|
||||
private static final Logger logger = Logger.getLogger(FileExtract.class.getName());
|
||||
private static final long MAX_STRING_CHUNK_SIZE = 1 * 1024 * 1024L;
|
||||
private File sourceFile;
|
||||
|
||||
//single static buffer for all extractions. Safe, indexing can only happen in one thread
|
||||
private static final byte[] STRING_CHUNK_BUF = new byte[(int) MAX_STRING_CHUNK_SIZE];
|
||||
|
||||
public FileExtract(File sourceFile) {
|
||||
this.sourceFile = sourceFile;
|
||||
numChunks = 0; //unknown until indexing is done
|
||||
}
|
||||
|
||||
public int getNumChunks() {
|
||||
return this.numChunks;
|
||||
}
|
||||
|
||||
public File getSourceFile() {
|
||||
return sourceFile;
|
||||
}
|
||||
|
||||
|
||||
public boolean index(Ingester ingester) throws IngesterException {
|
||||
boolean success = false;
|
||||
|
||||
FsContentStringStream stringStream = null;
|
||||
try {
|
||||
success = true;
|
||||
//break string into chunks
|
||||
//Note: could use DataConversion.toString() since we are operating on fixed chunks
|
||||
//but FsContentStringStream handles string boundary case better
|
||||
stringStream = new FsContentStringStream(sourceFile, FsContentStringStream.Encoding.UTF8, true);
|
||||
long readSize = 0;
|
||||
|
||||
while ((readSize = stringStream.read(STRING_CHUNK_BUF, 0, (int) MAX_STRING_CHUNK_SIZE)) != -1) {
|
||||
//FileOutputStream debug = new FileOutputStream("c:\\temp\\" + sourceFile.getName() + Integer.toString(this.numChunks+1));
|
||||
//debug.write(STRING_CHUNK_BUF, 0, (int)readSize);
|
||||
|
||||
FileExtractedChild chunk = new FileExtractedChild(this, this.numChunks + 1);
|
||||
|
||||
try {
|
||||
chunk.index(ingester, STRING_CHUNK_BUF, readSize);
|
||||
++this.numChunks;
|
||||
} catch (IngesterException ingEx) {
|
||||
success = false;
|
||||
logger.log(Level.WARNING, "Ingester had a problem with extracted strings from file '" + sourceFile.getName() + "' (id: " + sourceFile.getId() + ").", ingEx);
|
||||
}
|
||||
//debug.close();
|
||||
}
|
||||
|
||||
|
||||
//after all chunks, ingest the parent file without content itself, and store numChunks
|
||||
ingester.ingest(this);
|
||||
|
||||
} catch (IOException ex) {
|
||||
logger.log(Level.WARNING, "Unable to read string stream and send to Solr, file: " + sourceFile.getName(), ex);
|
||||
success = false;
|
||||
} finally {
|
||||
if (stringStream != null) {
|
||||
try {
|
||||
stringStream.close();
|
||||
} catch (IOException ex) {
|
||||
Exceptions.printStackTrace(ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return success;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Represents each string chunk, a child of FileExtracted file
|
||||
*/
|
||||
class FileExtractedChild {
|
||||
|
||||
private int chunkID;
|
||||
private FileExtract parent;
|
||||
|
||||
FileExtractedChild(FileExtract parent, int chunkID) {
|
||||
this.parent = parent;
|
||||
this.chunkID = chunkID;
|
||||
}
|
||||
|
||||
public FileExtract getParentFile() {
|
||||
return parent;
|
||||
}
|
||||
|
||||
public int getChunkId() {
|
||||
return chunkID;
|
||||
}
|
||||
|
||||
/**
|
||||
* return String representation of the absolute id (parent and child)
|
||||
* @return
|
||||
*/
|
||||
public String getIdString() {
|
||||
return getFileExtractChildId(this.parent.getSourceFile().getId(), this.chunkID);
|
||||
}
|
||||
|
||||
|
||||
public boolean index(Ingester ingester, byte[] content, long contentSize) throws IngesterException {
|
||||
boolean success = true;
|
||||
ByteContentStream bcs = new ByteContentStream(content, contentSize, parent.getSourceFile(), FsContentStringStream.Encoding.UTF8);
|
||||
try {
|
||||
ingester.ingest(this, bcs);
|
||||
//logger.log(Level.INFO, "Ingesting string chunk: " + this.getName() + ": " + chunkID);
|
||||
|
||||
} catch (Exception ingEx) {
|
||||
success = false;
|
||||
throw new IngesterException("Problem ingesting file string chunk: " + parent.getSourceFile().getId() + ", chunk: " + chunkID, ingEx);
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
public static String getFileExtractChildId(long parentID, int childID) {
|
||||
return Long.toString(parentID) + "_" + Integer.toString(childID);
|
||||
}
|
||||
}
|
@ -33,14 +33,13 @@ import org.sleuthkit.datamodel.FsContent;
|
||||
* Then, an adapter back to Solr' ContentStream (which is a specific InputStream),
|
||||
* using the same encoding
|
||||
*/
|
||||
public class FsContentStringContentStream implements ContentStream {
|
||||
public class FsContentStringContentStream implements ContentStream {
|
||||
//input
|
||||
|
||||
private FsContent content;
|
||||
private Encoding encoding;
|
||||
|
||||
//converted
|
||||
private FsContentStringStream stream;
|
||||
|
||||
private static Logger logger = Logger.getLogger(FsContentStringContentStream.class.getName());
|
||||
|
||||
public FsContentStringContentStream(FsContent content, Encoding encoding) {
|
||||
@ -53,7 +52,6 @@ public class FsContentStringContentStream implements ContentStream {
|
||||
return content;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public String getContentType() {
|
||||
return "text/plain;charset=" + encoding.toString();
|
||||
@ -86,4 +84,10 @@ public class FsContentStringContentStream implements ContentStream {
|
||||
return stream;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void finalize() throws Throwable {
|
||||
super.finalize();
|
||||
|
||||
stream.close();
|
||||
}
|
||||
}
|
||||
|
@ -26,8 +26,8 @@ import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrRequest.METHOD;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
import org.openide.util.Exceptions;
|
||||
import org.sleuthkit.autopsy.datamodel.HighlightLookup;
|
||||
import org.sleuthkit.autopsy.keywordsearch.Server.Core;
|
||||
import org.sleuthkit.datamodel.Content;
|
||||
|
||||
/**
|
||||
@ -45,6 +45,7 @@ class HighlightedMatchesSource implements MarkupSource, HighlightLookup {
|
||||
private String solrQuery;
|
||||
private Server solrServer;
|
||||
private int numberHits;
|
||||
private int numberPages;
|
||||
private boolean isRegex = false;
|
||||
private boolean group = true;
|
||||
|
||||
@ -56,6 +57,15 @@ class HighlightedMatchesSource implements MarkupSource, HighlightLookup {
|
||||
|
||||
this.solrServer = KeywordSearch.getServer();
|
||||
|
||||
this.numberPages = 0;
|
||||
try {
|
||||
this.numberPages = solrServer.queryNumFileChunks(content.getId());
|
||||
} catch (SolrServerException ex) {
|
||||
logger.log(Level.WARNING, "Could not get number pages for content: " + content.getId());
|
||||
} catch (NoOpenCoreException ex) {
|
||||
logger.log(Level.WARNING, "Could not get number pages for content: " + content.getId());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
HighlightedMatchesSource(Content content, String solrQuery, boolean isRegex, boolean group) {
|
||||
@ -68,7 +78,13 @@ class HighlightedMatchesSource implements MarkupSource, HighlightLookup {
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getMarkup() {
|
||||
public int getNumberPages() {
|
||||
return this.numberPages;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public String getMarkup(int pageNum) {
|
||||
String highLightField = null;
|
||||
|
||||
String highlightQuery = solrQuery;
|
||||
@ -110,17 +126,25 @@ class HighlightedMatchesSource implements MarkupSource, HighlightLookup {
|
||||
// q.setQuery(highLightField + ":" + highlightQuery);
|
||||
//else q.setQuery(highlightQuery); //use default field, simplifies query
|
||||
|
||||
q.addFilterQuery("id:" + content.getId());
|
||||
final long contentId = content.getId();
|
||||
|
||||
String contentIdStr = Long.toString(contentId);
|
||||
if (pageNum > 0)
|
||||
contentIdStr += "_" + Integer.toString(pageNum);
|
||||
|
||||
final String filterQuery = Server.Schema.ID.toString() + ":" + contentIdStr;
|
||||
q.addFilterQuery(filterQuery);
|
||||
q.addHighlightField(highLightField); //for exact highlighting, try content_ws field (with stored="true" in Solr schema)
|
||||
q.setHighlightSimplePre(HIGHLIGHT_PRE);
|
||||
q.setHighlightSimplePost(HIGHLIGHT_POST);
|
||||
q.setHighlightFragsize(0); // don't fragment the highlight
|
||||
q.setParam("hl.maxAnalyzedChars", Server.HL_ANALYZE_CHARS_UNLIMITED); //analyze all content
|
||||
|
||||
try {
|
||||
QueryResponse response = solrServer.query(q, METHOD.POST);
|
||||
Map<String, Map<String, List<String>>> responseHighlight = response.getHighlighting();
|
||||
long contentID = content.getId();
|
||||
Map<String, List<String>> responseHighlightID = responseHighlight.get(Long.toString(contentID));
|
||||
|
||||
Map<String, List<String>> responseHighlightID = responseHighlight.get(contentIdStr);
|
||||
if (responseHighlightID == null) {
|
||||
return NO_MATCHES;
|
||||
}
|
||||
@ -135,11 +159,11 @@ class HighlightedMatchesSource implements MarkupSource, HighlightLookup {
|
||||
}
|
||||
}
|
||||
catch (NoOpenCoreException ex) {
|
||||
logger.log(Level.WARNING, "Couldn't query markup.", ex);
|
||||
logger.log(Level.WARNING, "Couldn't query markup for page: " + pageNum, ex);
|
||||
return "";
|
||||
}
|
||||
catch (SolrServerException ex) {
|
||||
logger.log(Level.INFO, "Could not query markup. ", ex);
|
||||
logger.log(Level.WARNING, "Could not query markup for page: " + pageNum, ex);
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@
|
||||
*/
|
||||
package org.sleuthkit.autopsy.keywordsearch;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.Reader;
|
||||
@ -44,7 +45,7 @@ import org.sleuthkit.datamodel.ReadContentInputStream;
|
||||
/**
|
||||
* Handles indexing files on a Solr core.
|
||||
*/
|
||||
class Ingester {
|
||||
public class Ingester {
|
||||
|
||||
private static final Logger logger = Logger.getLogger(Ingester.class.getName());
|
||||
private boolean uncommitedIngests = false;
|
||||
@ -57,6 +58,7 @@ class Ingester {
|
||||
"bmp", "gif", "png", "jpeg", "tiff", "mp3", "aiff", "au", "midi", "wav",
|
||||
"pst", "xml", "class", "dwg", "eml", "emlx", "mbox", "mht"};
|
||||
|
||||
|
||||
Ingester() {
|
||||
}
|
||||
|
||||
@ -79,9 +81,29 @@ class Ingester {
|
||||
* @throws IngesterException if there was an error processing a specific
|
||||
* file, but the Solr server is probably fine.
|
||||
*/
|
||||
public void ingest(FsContentStringContentStream fcs) throws IngesterException {
|
||||
void ingest(FsContentStringContentStream fcs) throws IngesterException {
|
||||
Map<String, String> params = getFsContentFields(fcs.getFsContent());
|
||||
ingest(fcs, params, fcs.getFsContent());
|
||||
ingest(fcs, params, fcs.getFsContent().getSize());
|
||||
}
|
||||
|
||||
void ingest(FileExtract fe) throws IngesterException {
|
||||
Map<String, String> params = getFsContentFields(fe.getSourceFile());
|
||||
|
||||
params.put(Server.Schema.NUM_CHUNKS.toString(), Integer.toString(fe.getNumChunks()));
|
||||
|
||||
ingest(new NullContentStream(fe.getSourceFile()), params, 0);
|
||||
}
|
||||
|
||||
//chunk stream
|
||||
void ingest(FileExtractedChild fec, ByteContentStream bcs) throws IngesterException {
|
||||
FsContent sourceFsContent = bcs.getFsContent();
|
||||
Map<String, String> params = getFsContentFields(sourceFsContent);
|
||||
|
||||
//overwrite, TODO set separately
|
||||
params.put(Server.Schema.ID.toString(),
|
||||
FileExtractedChild.getFileExtractChildId(sourceFsContent.getId(), fec.getChunkId()));
|
||||
|
||||
ingest(bcs, params, FileExtract.MAX_CHUNK_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -92,8 +114,8 @@ class Ingester {
|
||||
* @throws IngesterException if there was an error processing a specific
|
||||
* file, but the Solr server is probably fine.
|
||||
*/
|
||||
public void ingest(FsContent f) throws IngesterException {
|
||||
ingest(new FscContentStream(f), getFsContentFields(f), f);
|
||||
void ingest(FsContent f) throws IngesterException {
|
||||
ingest(new FscContentStream(f), getFsContentFields(f), f.getSize());
|
||||
}
|
||||
|
||||
/**
|
||||
@ -103,12 +125,12 @@ class Ingester {
|
||||
*/
|
||||
private Map<String, String> getFsContentFields(FsContent fsc) {
|
||||
Map<String, String> fields = new HashMap<String, String>();
|
||||
fields.put("id", Long.toString(fsc.getId()));
|
||||
fields.put("file_name", fsc.getName());
|
||||
fields.put("ctime", fsc.getCtimeAsDate());
|
||||
fields.put("atime", fsc.getAtimeAsDate());
|
||||
fields.put("mtime", fsc.getMtimeAsDate());
|
||||
fields.put("crtime", fsc.getMtimeAsDate());
|
||||
fields.put(Server.Schema.ID.toString(), Long.toString(fsc.getId()));
|
||||
fields.put(Server.Schema.FILE_NAME.toString(), fsc.getName());
|
||||
fields.put(Server.Schema.CTIME.toString(), fsc.getCtimeAsDate());
|
||||
fields.put(Server.Schema.ATIME.toString(), fsc.getAtimeAsDate());
|
||||
fields.put(Server.Schema.MTIME.toString(), fsc.getMtimeAsDate());
|
||||
fields.put(Server.Schema.CRTIME.toString(), fsc.getMtimeAsDate());
|
||||
return fields;
|
||||
}
|
||||
|
||||
@ -117,11 +139,11 @@ class Ingester {
|
||||
*
|
||||
* @param ContentStream to ingest
|
||||
* @param fields content specific fields
|
||||
* @param sourceContent fsContent from which the cs content stream originated from
|
||||
* @param size size of the content
|
||||
* @throws IngesterException if there was an error processing a specific
|
||||
* content, but the Solr server is probably fine.
|
||||
*/
|
||||
private void ingest(ContentStream cs, Map<String, String> fields, final FsContent sourceContent) throws IngesterException {
|
||||
private void ingest(ContentStream cs, Map<String, String> fields, final long size) throws IngesterException {
|
||||
final ContentStreamUpdateRequest up = new ContentStreamUpdateRequest("/update/extract");
|
||||
up.addContentStream(cs);
|
||||
setFields(up, fields);
|
||||
@ -138,7 +160,7 @@ class Ingester {
|
||||
final Future<?> f = upRequestExecutor.submit(new UpRequestTask(up));
|
||||
|
||||
try {
|
||||
f.get(getTimeout(sourceContent), TimeUnit.SECONDS);
|
||||
f.get(getTimeout(size), TimeUnit.SECONDS);
|
||||
} catch (TimeoutException te) {
|
||||
logger.log(Level.WARNING, "Solr timeout encountered, trying to restart Solr");
|
||||
//restart may be needed to recover from some error conditions
|
||||
@ -162,13 +184,11 @@ class Ingester {
|
||||
}
|
||||
|
||||
/**
|
||||
* return timeout that should be use to index the content
|
||||
* TODO adjust them more as needed, and handle file chunks
|
||||
* @param f the source FsContent
|
||||
* return timeout that should be used to index the content
|
||||
* @param size size of the content
|
||||
* @return time in seconds to use a timeout
|
||||
*/
|
||||
private static int getTimeout(FsContent f) {
|
||||
final long size = f.getSize();
|
||||
private static int getTimeout(long size) {
|
||||
if (size < 1024 * 1024L) //1MB
|
||||
{
|
||||
return 60;
|
||||
@ -291,6 +311,48 @@ class Ingester {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ContentStream associated with FsContent, but forced with no content
|
||||
*/
|
||||
private static class NullContentStream implements ContentStream {
|
||||
|
||||
FsContent f;
|
||||
|
||||
NullContentStream(FsContent f) {
|
||||
this.f = f;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return f.getName();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getSourceInfo() {
|
||||
return "File:" + f.getId();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getContentType() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getSize() {
|
||||
return 0L;
|
||||
}
|
||||
|
||||
@Override
|
||||
public InputStream getStream() throws IOException {
|
||||
return new ByteArrayInputStream(new byte[0]);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Reader getReader() throws IOException {
|
||||
throw new UnsupportedOperationException("Not supported yet.");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates that there was an error with the specific ingest operation,
|
||||
* but it's still okay to continue ingesting files.
|
||||
|
@ -45,17 +45,19 @@ import org.sleuthkit.datamodel.File;
|
||||
class KeywordSearchFilterNode extends FilterNode {
|
||||
|
||||
String solrQuery;
|
||||
int previewChunk;
|
||||
|
||||
KeywordSearchFilterNode(HighlightedMatchesSource highlights, Node original, String solrQuery) {
|
||||
KeywordSearchFilterNode(HighlightedMatchesSource highlights, Node original, String solrQuery, int previewChunk) {
|
||||
super(original, null, new ProxyLookup(Lookups.singleton(highlights), original.getLookup()));
|
||||
this.solrQuery = solrQuery;
|
||||
this.previewChunk = previewChunk;
|
||||
}
|
||||
|
||||
String getSnippet() {
|
||||
final Content content = this.getOriginal().getLookup().lookup(Content.class);
|
||||
String snippet;
|
||||
try {
|
||||
snippet = LuceneQuery.querySnippet(solrQuery, content.getId(), false, true);
|
||||
snippet = LuceneQuery.querySnippet(solrQuery, content.getId(), previewChunk, false, true);
|
||||
} catch (NoOpenCoreException ex) {
|
||||
//logger.log(Level.WARNING, "Could not perform the snippet query. ", ex);
|
||||
return "";
|
||||
|
@ -18,6 +18,8 @@
|
||||
*/
|
||||
package org.sleuthkit.autopsy.keywordsearch;
|
||||
|
||||
import java.io.IOException;
|
||||
import org.openide.util.Exceptions;
|
||||
import org.sleuthkit.autopsy.datamodel.FsContentStringStream;
|
||||
import java.awt.event.ActionEvent;
|
||||
import java.awt.event.ActionListener;
|
||||
@ -28,7 +30,6 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import javax.swing.SwingUtilities;
|
||||
import javax.swing.SwingWorker;
|
||||
import javax.swing.Timer;
|
||||
import org.apache.commons.lang.StringEscapeUtils;
|
||||
@ -47,6 +48,7 @@ import org.sleuthkit.autopsy.keywordsearch.Ingester.IngesterException;
|
||||
import org.sleuthkit.datamodel.BlackboardArtifact;
|
||||
import org.sleuthkit.datamodel.BlackboardArtifact.ARTIFACT_TYPE;
|
||||
import org.sleuthkit.datamodel.BlackboardAttribute;
|
||||
import org.sleuthkit.datamodel.File;
|
||||
import org.sleuthkit.datamodel.FsContent;
|
||||
import org.sleuthkit.datamodel.SleuthkitCase;
|
||||
import org.sleuthkit.datamodel.TskData;
|
||||
@ -59,7 +61,7 @@ public final class KeywordSearchIngestService implements IngestServiceFsContent
|
||||
public static final String MODULE_DESCRIPTION = "Performs file indexing and periodic search using keywords and regular expressions in lists.";
|
||||
private static KeywordSearchIngestService instance = null;
|
||||
private IngestManagerProxy managerProxy;
|
||||
private static final long MAX_STRING_EXTRACT_SIZE = 1 * (1 << 10) * (1 << 10);
|
||||
private static final long MAX_STRING_CHUNK_SIZE = 1 * (1 << 10) * (1 << 10);
|
||||
private static final long MAX_INDEX_SIZE = 100 * (1 << 10) * (1 << 10);
|
||||
private Ingester ingester = null;
|
||||
private volatile boolean commitIndex = false; //whether to commit index next time
|
||||
@ -71,7 +73,7 @@ public final class KeywordSearchIngestService implements IngestServiceFsContent
|
||||
private Indexer indexer;
|
||||
private Searcher searcher;
|
||||
private volatile boolean searcherDone = true;
|
||||
private Map<Keyword, List<FsContent>> currentResults;
|
||||
private Map<Keyword, List<ContentHit>> currentResults;
|
||||
private volatile int messageID = 0;
|
||||
private boolean processedFiles;
|
||||
private volatile boolean finalRun = false;
|
||||
@ -79,13 +81,12 @@ public final class KeywordSearchIngestService implements IngestServiceFsContent
|
||||
private final String hashDBServiceName = "Hash Lookup";
|
||||
private SleuthkitCase caseHandle = null;
|
||||
boolean initialized = false;
|
||||
|
||||
private final byte[] STRING_CHUNK_BUF = new byte[(int) MAX_STRING_CHUNK_SIZE];
|
||||
|
||||
public enum IngestStatus {
|
||||
|
||||
INGESTED, EXTRACTED_INGESTED, SKIPPED,};
|
||||
private Map<Long, IngestStatus> ingestStatus;
|
||||
private Map<String, List<FsContent>> reportedHits; //already reported hits
|
||||
|
||||
public static synchronized KeywordSearchIngestService getDefault() {
|
||||
if (instance == null) {
|
||||
@ -220,8 +221,6 @@ public final class KeywordSearchIngestService implements IngestServiceFsContent
|
||||
|
||||
ingestStatus = new HashMap<Long, IngestStatus>();
|
||||
|
||||
reportedHits = new HashMap<String, List<FsContent>>();
|
||||
|
||||
keywords = new ArrayList<Keyword>();
|
||||
keywordLists = new ArrayList<String>();
|
||||
keywordToList = new HashMap<String, String>();
|
||||
@ -237,7 +236,7 @@ public final class KeywordSearchIngestService implements IngestServiceFsContent
|
||||
finalRunComplete = false;
|
||||
searcherDone = true; //make sure to start the initial searcher
|
||||
//keeps track of all results per run not to repeat reporting the same hits
|
||||
currentResults = new HashMap<Keyword, List<FsContent>>();
|
||||
currentResults = new HashMap<Keyword, List<ContentHit>>();
|
||||
|
||||
indexer = new Indexer();
|
||||
|
||||
@ -416,18 +415,18 @@ public final class KeywordSearchIngestService implements IngestServiceFsContent
|
||||
private final Logger logger = Logger.getLogger(Indexer.class.getName());
|
||||
private static final String DELETED_MSG = "The file is an unallocated or orphan file (deleted) and entire content is no longer recoverable. ";
|
||||
|
||||
private boolean extractAndIngest(FsContent f) {
|
||||
boolean success = false;
|
||||
FsContentStringContentStream fscs = new FsContentStringContentStream(f, FsContentStringStream.Encoding.UTF8);
|
||||
private boolean extractAndIngest(File file) {
|
||||
boolean indexed = false;
|
||||
FileExtract fe = new FileExtract(file);
|
||||
try {
|
||||
ingester.ingest(fscs);
|
||||
success = true;
|
||||
} catch (IngesterException ingEx) {
|
||||
logger.log(Level.WARNING, "Ingester had a problem with extracted strings from file '" + f.getName() + "' (id: " + f.getId() + ").", ingEx);
|
||||
} catch (Exception ingEx) {
|
||||
logger.log(Level.WARNING, "Ingester had a problem with extracted strings from file '" + f.getName() + "' (id: " + f.getId() + ").", ingEx);
|
||||
indexed = fe.index(ingester);
|
||||
} catch (IngesterException ex) {
|
||||
logger.log(Level.WARNING, "Error extracting strings and indexing file: " + file.getName(), ex);
|
||||
indexed = false;
|
||||
}
|
||||
return success;
|
||||
|
||||
return indexed;
|
||||
|
||||
}
|
||||
|
||||
private void indexFile(FsContent fsContent) {
|
||||
@ -436,17 +435,21 @@ public final class KeywordSearchIngestService implements IngestServiceFsContent
|
||||
if (!fsContent.isFile()) {
|
||||
return;
|
||||
}
|
||||
File file = (File) fsContent;
|
||||
|
||||
if (size == 0 || size > MAX_INDEX_SIZE) {
|
||||
boolean ingestible = Ingester.isIngestible(file);
|
||||
|
||||
//limit size of entire file, do not limit strings
|
||||
if (size == 0 || (ingestible && size > MAX_INDEX_SIZE)) {
|
||||
ingestStatus.put(fsContent.getId(), IngestStatus.SKIPPED);
|
||||
return;
|
||||
}
|
||||
|
||||
boolean ingestible = Ingester.isIngestible(fsContent);
|
||||
final String fileName = fsContent.getName();
|
||||
|
||||
final String fileName = file.getName();
|
||||
|
||||
String deletedMessage = "";
|
||||
if ((fsContent.getMeta_flags() & (TskData.TSK_FS_META_FLAG_ENUM.ORPHAN.getMetaFlag() | TskData.TSK_FS_META_FLAG_ENUM.UNALLOC.getMetaFlag())) != 0) {
|
||||
if ((file.getMeta_flags() & (TskData.TSK_FS_META_FLAG_ENUM.ORPHAN.getMetaFlag() | TskData.TSK_FS_META_FLAG_ENUM.UNALLOC.getMetaFlag())) != 0) {
|
||||
deletedMessage = DELETED_MSG;
|
||||
}
|
||||
|
||||
@ -454,37 +457,33 @@ public final class KeywordSearchIngestService implements IngestServiceFsContent
|
||||
|
||||
try {
|
||||
//logger.log(Level.INFO, "indexing: " + fsContent.getName());
|
||||
ingester.ingest(fsContent);
|
||||
ingestStatus.put(fsContent.getId(), IngestStatus.INGESTED);
|
||||
ingester.ingest(file);
|
||||
ingestStatus.put(file.getId(), IngestStatus.INGESTED);
|
||||
} catch (IngesterException e) {
|
||||
ingestStatus.put(fsContent.getId(), IngestStatus.SKIPPED);
|
||||
ingestStatus.put(file.getId(), IngestStatus.SKIPPED);
|
||||
//try to extract strings
|
||||
boolean processed = processNonIngestible(fsContent);
|
||||
boolean processed = processNonIngestible(file);
|
||||
//postIngestibleErrorMessage(processed, fileName, deletedMessage);
|
||||
|
||||
} catch (Exception e) {
|
||||
ingestStatus.put(fsContent.getId(), IngestStatus.SKIPPED);
|
||||
ingestStatus.put(file.getId(), IngestStatus.SKIPPED);
|
||||
//try to extract strings
|
||||
boolean processed = processNonIngestible(fsContent);
|
||||
boolean processed = processNonIngestible(file);
|
||||
|
||||
//postIngestibleErrorMessage(processed, fileName, deletedMessage);
|
||||
|
||||
}
|
||||
} else {
|
||||
boolean processed = processNonIngestible(fsContent);
|
||||
boolean processed = processNonIngestible(file);
|
||||
//postNonIngestibleErrorMessage(processed, fsContent, deletedMessage);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
private void postNonIngestibleErrorMessage(boolean stringsExtracted, FsContent fsContent, String deletedMessage) {
|
||||
String fileName = fsContent.getName();
|
||||
private void postNonIngestibleErrorMessage(boolean stringsExtracted, File file, String deletedMessage) {
|
||||
String fileName = file.getName();
|
||||
if (!stringsExtracted) {
|
||||
if (fsContent.getSize() < MAX_STRING_EXTRACT_SIZE) {
|
||||
managerProxy.postMessage(IngestMessage.createErrorMessage(++messageID, KeywordSearchIngestService.instance, "Error indexing strings: " + fileName, "Error encountered extracting string content from this file (of unsupported format). " + deletedMessage + "The file will not be included in the search results.<br />File: " + fileName));
|
||||
} else {
|
||||
managerProxy.postMessage(IngestMessage.createMessage(++messageID, IngestMessage.MessageType.INFO, KeywordSearchIngestService.instance, "Skipped indexing strings: " + fileName, "Skipped extracting string content from this file (of unsupported format) due to the file size. The file will not be included in the search results.<br />File: " + fileName));
|
||||
}
|
||||
managerProxy.postMessage(IngestMessage.createMessage(++messageID, IngestMessage.MessageType.INFO, KeywordSearchIngestService.instance, "Skipped indexing strings: " + fileName, "Skipped extracting string content from this file (of unsupported format) due to the file size. The file will not be included in the search results.<br />File: " + fileName));
|
||||
}
|
||||
|
||||
}
|
||||
@ -497,20 +496,16 @@ public final class KeywordSearchIngestService implements IngestServiceFsContent
|
||||
}
|
||||
}
|
||||
|
||||
private boolean processNonIngestible(FsContent fsContent) {
|
||||
if (fsContent.getSize() < MAX_STRING_EXTRACT_SIZE) {
|
||||
if (!extractAndIngest(fsContent)) {
|
||||
logger.log(Level.WARNING, "Failed to extract strings and ingest, file '" + fsContent.getName() + "' (id: " + fsContent.getId() + ").");
|
||||
ingestStatus.put(fsContent.getId(), IngestStatus.SKIPPED);
|
||||
return false;
|
||||
} else {
|
||||
ingestStatus.put(fsContent.getId(), IngestStatus.EXTRACTED_INGESTED);
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
ingestStatus.put(fsContent.getId(), IngestStatus.SKIPPED);
|
||||
private boolean processNonIngestible(File file) {
|
||||
if (!extractAndIngest(file)) {
|
||||
logger.log(Level.WARNING, "Failed to extract strings and ingest, file '" + file.getName() + "' (id: " + file.getId() + ").");
|
||||
ingestStatus.put(file.getId(), IngestStatus.SKIPPED);
|
||||
return false;
|
||||
} else {
|
||||
ingestStatus.put(file.getId(), IngestStatus.EXTRACTED_INGESTED);
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -565,7 +560,7 @@ public final class KeywordSearchIngestService implements IngestServiceFsContent
|
||||
del = new TermComponentQuery(keywordQuery);
|
||||
}
|
||||
|
||||
Map<String, List<FsContent>> queryResult = null;
|
||||
Map<String, List<ContentHit>> queryResult = null;
|
||||
|
||||
try {
|
||||
queryResult = del.performQuery();
|
||||
@ -581,23 +576,23 @@ public final class KeywordSearchIngestService implements IngestServiceFsContent
|
||||
}
|
||||
|
||||
//calculate new results but substracting results already obtained in this run
|
||||
Map<Keyword, List<FsContent>> newResults = new HashMap<Keyword, List<FsContent>>();
|
||||
Map<Keyword, List<ContentHit>> newResults = new HashMap<Keyword, List<ContentHit>>();
|
||||
|
||||
for (String termResult : queryResult.keySet()) {
|
||||
List<FsContent> queryTermResults = queryResult.get(termResult);
|
||||
List<ContentHit> queryTermResults = queryResult.get(termResult);
|
||||
Keyword termResultK = new Keyword(termResult, !isRegex);
|
||||
List<FsContent> curTermResults = currentResults.get(termResultK);
|
||||
List<ContentHit> curTermResults = currentResults.get(termResultK);
|
||||
if (curTermResults == null) {
|
||||
currentResults.put(termResultK, queryTermResults);
|
||||
newResults.put(termResultK, queryTermResults);
|
||||
} else {
|
||||
//some fscontent hits already exist for this keyword
|
||||
for (FsContent res : queryTermResults) {
|
||||
if (!curTermResults.contains(res)) {
|
||||
for (ContentHit res : queryTermResults) {
|
||||
if (! previouslyHit(curTermResults, res)) {
|
||||
//add to new results
|
||||
List<FsContent> newResultsFs = newResults.get(termResultK);
|
||||
List<ContentHit> newResultsFs = newResults.get(termResultK);
|
||||
if (newResultsFs == null) {
|
||||
newResultsFs = new ArrayList<FsContent>();
|
||||
newResultsFs = new ArrayList<ContentHit>();
|
||||
newResults.put(termResultK, newResultsFs);
|
||||
}
|
||||
newResultsFs.add(res);
|
||||
@ -610,16 +605,32 @@ public final class KeywordSearchIngestService implements IngestServiceFsContent
|
||||
|
||||
|
||||
if (!newResults.isEmpty()) {
|
||||
|
||||
|
||||
//write results to BB
|
||||
Collection<BlackboardArtifact> newArtifacts = new ArrayList<BlackboardArtifact>(); //new artifacts to report
|
||||
for (final Keyword hitTerm : newResults.keySet()) {
|
||||
List<FsContent> fsContentHits = newResults.get(hitTerm);
|
||||
for (final FsContent hitFile : fsContentHits) {
|
||||
List<ContentHit> contentHitsAll = newResults.get(hitTerm);
|
||||
Map<FsContent,Integer>contentHitsFlattened = ContentHit.flattenResults(contentHitsAll);
|
||||
for (final FsContent hitFile : contentHitsFlattened.keySet()) {
|
||||
if (this.isCancelled()) {
|
||||
return null;
|
||||
}
|
||||
KeywordWriteResult written = del.writeToBlackBoard(hitTerm.getQuery(), hitFile, listName);
|
||||
|
||||
String snippet = null;
|
||||
final String snippetQuery = KeywordSearchUtil.escapeLuceneQuery(hitTerm.getQuery(), true, false);
|
||||
int chunkId = contentHitsFlattened.get(hitFile);
|
||||
try {
|
||||
snippet = LuceneQuery.querySnippet(snippetQuery, hitFile.getId(), chunkId, isRegex, true);
|
||||
} catch (NoOpenCoreException e) {
|
||||
logger.log(Level.WARNING, "Error querying snippet: " + snippetQuery, e);
|
||||
//no reason to continie
|
||||
return null;
|
||||
} catch (Exception e) {
|
||||
logger.log(Level.WARNING, "Error querying snippet: " + snippetQuery, e);
|
||||
continue;
|
||||
}
|
||||
|
||||
KeywordWriteResult written = del.writeToBlackBoard(hitTerm.getQuery(), hitFile, snippet, listName);
|
||||
if (written == null) {
|
||||
//logger.log(Level.INFO, "BB artifact for keyword not written: " + hitTerm.toString());
|
||||
continue;
|
||||
@ -726,4 +737,17 @@ public final class KeywordSearchIngestService implements IngestServiceFsContent
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//check if fscontent already hit, ignore chunks
|
||||
private static boolean previouslyHit(List<ContentHit> contents, ContentHit hit) {
|
||||
boolean ret = false;
|
||||
long hitId = hit.getId();
|
||||
for (ContentHit c : contents) {
|
||||
if (c.getId() == hitId) {
|
||||
ret = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
@ -18,7 +18,8 @@
|
||||
*/
|
||||
package org.sleuthkit.autopsy.keywordsearch;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Map;
|
||||
import org.openide.nodes.AbstractNode;
|
||||
import org.openide.nodes.Node;
|
||||
import org.sleuthkit.autopsy.datamodel.RootContentChildren;
|
||||
@ -30,8 +31,9 @@ import org.sleuthkit.datamodel.FsContent;
|
||||
*/
|
||||
class KeywordSearchNode extends AbstractNode {
|
||||
|
||||
KeywordSearchNode(List<FsContent> keys, final String solrQuery) {
|
||||
super(new RootContentChildren(keys) {
|
||||
KeywordSearchNode(final Map<FsContent,Integer> keys, final String solrQuery) {
|
||||
|
||||
super(new RootContentChildren(new ArrayList(keys.keySet())) {
|
||||
|
||||
@Override
|
||||
protected Node[] createNodes(Object key) {
|
||||
@ -43,7 +45,8 @@ class KeywordSearchNode extends AbstractNode {
|
||||
int i = 0;
|
||||
for (Node original : originalNodes) {
|
||||
HighlightedMatchesSource markup = new HighlightedMatchesSource((Content)key, solrQuery, false);
|
||||
filterNodes[i++] = new KeywordSearchFilterNode(markup, original, solrQuery);
|
||||
int previewChunk = keys.get((FsContent)key);
|
||||
filterNodes[i++] = new KeywordSearchFilterNode(markup, original, solrQuery, previewChunk);
|
||||
}
|
||||
|
||||
return filterNodes;
|
||||
|
@ -39,7 +39,7 @@ public interface KeywordSearchQuery {
|
||||
* @throws NoOpenCoreException if query failed due to server error, this could be a notification to stop processing
|
||||
* @return
|
||||
*/
|
||||
public Map<String,List<FsContent>> performQuery() throws NoOpenCoreException;
|
||||
public Map<String,List<ContentHit>> performQuery() throws NoOpenCoreException;
|
||||
|
||||
|
||||
|
||||
@ -59,6 +59,12 @@ public interface KeywordSearchQuery {
|
||||
*/
|
||||
public boolean isEscaped();
|
||||
|
||||
/**
|
||||
*
|
||||
* @return true if query is a literal query (non regex)
|
||||
*/
|
||||
public boolean isLiteral();
|
||||
|
||||
/**
|
||||
* return original query string
|
||||
* @return the query String supplied originally
|
||||
@ -82,11 +88,12 @@ public interface KeywordSearchQuery {
|
||||
* this method is useful if something else should keep track of partial results to write
|
||||
* @param termHit term for only which to write results
|
||||
* @param newFsHit fscontent for which to write results for this hit
|
||||
* @param snippet snippet preview with hit context, or null if there is no snippet
|
||||
* @param listName listname
|
||||
* @return collection of results (with cached bb artifacts/attributes) created and written
|
||||
* @throws NoOpenCoreException if could not write to bb because required query failed due to server error, this could be a notification to stop processing
|
||||
*/
|
||||
public KeywordWriteResult writeToBlackBoard(String termHit, FsContent newFsHit, String listName) throws NoOpenCoreException;
|
||||
public KeywordWriteResult writeToBlackBoard(String termHit, FsContent newFsHit, String snippet, String listName);
|
||||
|
||||
}
|
||||
|
||||
|
@ -143,7 +143,7 @@ public class KeywordSearchQueryManager implements KeywordSearchQuery {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, List<FsContent>> performQuery() {
|
||||
public Map<String, List<ContentHit>> performQuery() {
|
||||
throw new UnsupportedOperationException("performQuery() unsupported");
|
||||
}
|
||||
|
||||
@ -191,10 +191,17 @@ public class KeywordSearchQueryManager implements KeywordSearchQuery {
|
||||
public Collection<Term> getTerms() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isLiteral() {
|
||||
throw new UnsupportedOperationException("Not supported yet.");
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
@Override
|
||||
public KeywordWriteResult writeToBlackBoard(String termHit, FsContent newFsHit, String listName) {
|
||||
public KeywordWriteResult writeToBlackBoard(String termHit, FsContent newFsHit, String snippet, String listName) {
|
||||
throw new UnsupportedOperationException("writeToBlackBoard() unsupported by manager");
|
||||
}
|
||||
|
||||
|
@ -225,17 +225,14 @@ public class KeywordSearchResultFactory extends ChildFactory<KeyValueQuery> {
|
||||
}
|
||||
|
||||
//execute the query and get fscontents matching
|
||||
Map<String, List<FsContent>> tcqRes;
|
||||
Map<String, List<ContentHit>> tcqRes;
|
||||
try {
|
||||
tcqRes = tcq.performQuery();
|
||||
} catch (NoOpenCoreException ex) {
|
||||
logger.log(Level.WARNING, "Could not perform the query. ", ex);
|
||||
return false;
|
||||
}
|
||||
final Set<FsContent> fsContents = new HashSet<FsContent>();
|
||||
for (String key : tcqRes.keySet()) {
|
||||
fsContents.addAll(tcqRes.get(key));
|
||||
}
|
||||
final Map<FsContent, Integer> hitContents = ContentHit.flattenResults(tcqRes);
|
||||
|
||||
//get listname
|
||||
String listName = "";
|
||||
@ -247,23 +244,27 @@ public class KeywordSearchResultFactory extends ChildFactory<KeyValueQuery> {
|
||||
final boolean literal_query = tcq.isEscaped();
|
||||
|
||||
int resID = 0;
|
||||
for (final FsContent f : fsContents) {
|
||||
for (final FsContent f : hitContents.keySet()) {
|
||||
final int previewChunk = hitContents.get(f);
|
||||
//get unique match result files
|
||||
Map<String, Object> resMap = new LinkedHashMap<String, Object>();
|
||||
AbstractFsContentNode.fillPropertyMap(resMap, f);
|
||||
setCommonProperty(resMap, CommonPropertyTypes.MATCH, f.getName());
|
||||
if (literal_query) {
|
||||
|
||||
if (true) {
|
||||
try {
|
||||
String snippet;
|
||||
snippet = LuceneQuery.querySnippet(tcq.getEscapedQueryString(), f.getId(), false, true);
|
||||
//TODO reuse snippet in ResultWriter
|
||||
snippet = LuceneQuery.querySnippet(tcq.getEscapedQueryString(), f.getId(), previewChunk, !literal_query, true);
|
||||
setCommonProperty(resMap, CommonPropertyTypes.CONTEXT, snippet);
|
||||
} catch (NoOpenCoreException ex) {
|
||||
logger.log(Level.WARNING, "Could not perform the query. ", ex);
|
||||
logger.log(Level.WARNING, "Could not perform the snippet query. ", ex);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
final String highlightQueryEscaped = getHighlightQuery(tcq, literal_query, tcqRes, f);
|
||||
toPopulate.add(new KeyValueQueryContent(f.getName(), resMap, ++resID, f, highlightQueryEscaped, tcq));
|
||||
toPopulate.add(new KeyValueQueryContent(f.getName(), resMap, ++resID, f, highlightQueryEscaped, tcq, previewChunk));
|
||||
}
|
||||
//write to bb
|
||||
new ResultWriter(tcqRes, tcq, listName).execute();
|
||||
@ -272,7 +273,7 @@ public class KeywordSearchResultFactory extends ChildFactory<KeyValueQuery> {
|
||||
return true;
|
||||
}
|
||||
|
||||
private String getHighlightQuery(KeywordSearchQuery tcq, boolean literal_query, Map<String, List<FsContent>> tcqRes, FsContent f) {
|
||||
private String getHighlightQuery(KeywordSearchQuery tcq, boolean literal_query, Map<String, List<ContentHit>> tcqRes, FsContent f) {
|
||||
String highlightQueryEscaped = null;
|
||||
if (literal_query) {
|
||||
//literal, treat as non-regex, non-term component query
|
||||
@ -290,8 +291,13 @@ public class KeywordSearchResultFactory extends ChildFactory<KeyValueQuery> {
|
||||
//find terms for this file hit
|
||||
List<String> hitTerms = new ArrayList<String>();
|
||||
for (String term : tcqRes.keySet()) {
|
||||
if (tcqRes.get(term).contains(f)) {
|
||||
hitTerms.add(term);
|
||||
List<ContentHit> hitList = tcqRes.get(term);
|
||||
|
||||
for (ContentHit h : hitList) {
|
||||
if (h.getContent().equals(f)) {
|
||||
hitTerms.add(term);
|
||||
break; //go to next term
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -305,8 +311,8 @@ public class KeywordSearchResultFactory extends ChildFactory<KeyValueQuery> {
|
||||
highlightQuery.append("\"");
|
||||
if (lastTerm != curTerm) {
|
||||
highlightQuery.append(" "); //acts as OR ||
|
||||
//force white-space separated index and stored content
|
||||
//in each term after first. First term taken case by HighlightedMatchesSource
|
||||
//force HIGHLIGHT_FIELD_REGEX index and stored content
|
||||
//in each term after first. First term taken care by HighlightedMatchesSource
|
||||
highlightQuery.append(LuceneQuery.HIGHLIGHT_FIELD_REGEX).append(":");
|
||||
}
|
||||
|
||||
@ -327,11 +333,12 @@ public class KeywordSearchResultFactory extends ChildFactory<KeyValueQuery> {
|
||||
final KeyValueQueryContent thingContent = (KeyValueQueryContent) thing;
|
||||
final Content content = thingContent.getContent();
|
||||
final String queryStr = thingContent.getQueryStr();
|
||||
final int previewChunk = thingContent.getPreviewChunk();
|
||||
|
||||
Node kvNode = new KeyValueNode(thingContent, Children.LEAF, Lookups.singleton(content));
|
||||
//wrap in KeywordSearchFilterNode for the markup content, might need to override FilterNode for more customization
|
||||
HighlightedMatchesSource highlights = new HighlightedMatchesSource(content, queryStr, !thingContent.getQuery().isEscaped(), false);
|
||||
return new KeywordSearchFilterNode(highlights, kvNode, queryStr);
|
||||
return new KeywordSearchFilterNode(highlights, kvNode, queryStr, previewChunk);
|
||||
|
||||
}
|
||||
}
|
||||
@ -379,7 +386,7 @@ public class KeywordSearchResultFactory extends ChildFactory<KeyValueQuery> {
|
||||
LuceneQuery filesQuery = new LuceneQuery(keywordQuery);
|
||||
filesQuery.escape();
|
||||
|
||||
Map<String, List<FsContent>> matchesRes;
|
||||
Map<String, List<ContentHit>> matchesRes;
|
||||
try {
|
||||
matchesRes = filesQuery.performQuery();
|
||||
} catch (NoOpenCoreException ex) {
|
||||
@ -387,23 +394,18 @@ public class KeywordSearchResultFactory extends ChildFactory<KeyValueQuery> {
|
||||
return false;
|
||||
}
|
||||
|
||||
Set<FsContent> matches = new HashSet<FsContent>();
|
||||
for (String key : matchesRes.keySet()) {
|
||||
matches.addAll(matchesRes.get(key));
|
||||
}
|
||||
|
||||
//get unique match result files
|
||||
final Set<FsContent> uniqueMatches = new LinkedHashSet<FsContent>();
|
||||
uniqueMatches.addAll(matches);
|
||||
final Map<FsContent, Integer> uniqueMatches = ContentHit.flattenResults(matchesRes);
|
||||
|
||||
int resID = 0;
|
||||
|
||||
final KeywordSearchQuery origQuery = thing.getQuery();
|
||||
|
||||
for (final FsContent f : uniqueMatches) {
|
||||
for (final FsContent f : uniqueMatches.keySet()) {
|
||||
final int previewChunkId = uniqueMatches.get(f);
|
||||
Map<String, Object> resMap = new LinkedHashMap<String, Object>();
|
||||
AbstractFsContentNode.fillPropertyMap(resMap, (File) f);
|
||||
toPopulate.add(new KeyValueQueryContent(f.getName(), resMap, ++resID, f, keywordQuery, thing.getQuery()));
|
||||
toPopulate.add(new KeyValueQueryContent(f.getName(), resMap, ++resID, f, keywordQuery, thing.getQuery(), previewChunkId));
|
||||
|
||||
}
|
||||
//write to bb
|
||||
@ -417,11 +419,12 @@ public class KeywordSearchResultFactory extends ChildFactory<KeyValueQuery> {
|
||||
final KeyValueQueryContent thingContent = (KeyValueQueryContent) thing;
|
||||
final Content content = thingContent.getContent();
|
||||
final String query = thingContent.getQueryStr();
|
||||
final int previewChunk = thingContent.getPreviewChunk();
|
||||
|
||||
Node kvNode = new KeyValueNode(thingContent, Children.LEAF, Lookups.singleton(content));
|
||||
//wrap in KeywordSearchFilterNode for the markup content
|
||||
HighlightedMatchesSource highlights = new HighlightedMatchesSource(content, query, !thingContent.getQuery().isEscaped());
|
||||
return new KeywordSearchFilterNode(highlights, kvNode, query);
|
||||
return new KeywordSearchFilterNode(highlights, kvNode, query, previewChunk);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -434,6 +437,7 @@ public class KeywordSearchResultFactory extends ChildFactory<KeyValueQuery> {
|
||||
private Content content;
|
||||
private String queryStr;
|
||||
private KeywordSearchQuery query;
|
||||
private int previewChunk;
|
||||
|
||||
Content getContent() {
|
||||
return content;
|
||||
@ -443,10 +447,15 @@ public class KeywordSearchResultFactory extends ChildFactory<KeyValueQuery> {
|
||||
return queryStr;
|
||||
}
|
||||
|
||||
public KeyValueQueryContent(String name, Map<String, Object> map, int id, Content content, String queryStr, KeywordSearchQuery query) {
|
||||
int getPreviewChunk() {
|
||||
return previewChunk;
|
||||
}
|
||||
|
||||
public KeyValueQueryContent(String name, Map<String, Object> map, int id, Content content, String queryStr, KeywordSearchQuery query, int previewChunk) {
|
||||
super(name, map, id, query);
|
||||
this.content = content;
|
||||
this.queryStr = queryStr;
|
||||
this.previewChunk = previewChunk;
|
||||
}
|
||||
}
|
||||
|
||||
@ -460,13 +469,15 @@ public class KeywordSearchResultFactory extends ChildFactory<KeyValueQuery> {
|
||||
private ProgressHandle progress;
|
||||
private KeywordSearchQuery query;
|
||||
private String listName;
|
||||
private Map<String, List<FsContent>> hits;
|
||||
private Map<String, List<ContentHit>> hits;
|
||||
final Collection<BlackboardArtifact> na = new ArrayList<BlackboardArtifact>();
|
||||
private static final int QUERY_DISPLAY_LEN = 40;
|
||||
|
||||
ResultWriter(Map<String, List<FsContent>> hits, KeywordSearchQuery query, String listName) {
|
||||
ResultWriter(Map<String, List<ContentHit>> hits, KeywordSearchQuery query, String listName) {
|
||||
this.hits = hits;
|
||||
this.query = query;
|
||||
this.listName = listName;
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -484,7 +495,7 @@ public class KeywordSearchResultFactory extends ChildFactory<KeyValueQuery> {
|
||||
protected Object doInBackground() throws Exception {
|
||||
registerWriter(this);
|
||||
final String queryStr = query.getQueryString();
|
||||
final String queryDisp = queryStr.length() > 40 ? queryStr.substring(0, 39) + " ..." : queryStr;
|
||||
final String queryDisp = queryStr.length() > QUERY_DISPLAY_LEN ? queryStr.substring(0, QUERY_DISPLAY_LEN - 1) + " ..." : queryStr;
|
||||
progress = ProgressHandleFactory.createHandle("Saving results: " + queryDisp, new Cancellable() {
|
||||
|
||||
@Override
|
||||
@ -500,10 +511,26 @@ public class KeywordSearchResultFactory extends ChildFactory<KeyValueQuery> {
|
||||
if (this.isCancelled()) {
|
||||
break;
|
||||
}
|
||||
for (FsContent f : hits.get(hit)) {
|
||||
KeywordWriteResult written = query.writeToBlackBoard(hit, f, listName);
|
||||
if (written != null) {
|
||||
na.add(written.getArtifact());
|
||||
Map<FsContent, Integer> flattened = ContentHit.flattenResults(hits.get(hit));
|
||||
for (FsContent f : flattened.keySet()) {
|
||||
int chunkId = flattened.get(f);
|
||||
final String snippetQuery = KeywordSearchUtil.escapeLuceneQuery(hit, true, false);
|
||||
String snippet = null;
|
||||
try {
|
||||
snippet = LuceneQuery.querySnippet(snippetQuery, f.getId(), chunkId, !query.isLiteral(), true);
|
||||
} catch (NoOpenCoreException e) {
|
||||
logger.log(Level.WARNING, "Error querying snippet: " + snippetQuery, e);
|
||||
//no reason to continie
|
||||
return null;
|
||||
} catch (Exception e) {
|
||||
logger.log(Level.WARNING, "Error querying snippet: " + snippetQuery, e);
|
||||
continue;
|
||||
}
|
||||
if (snippet != null) {
|
||||
KeywordWriteResult written = query.writeToBlackBoard(hit, f, snippet, listName);
|
||||
if (written != null) {
|
||||
na.add(written.getArtifact());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -18,16 +18,12 @@
|
||||
*/
|
||||
package org.sleuthkit.autopsy.keywordsearch;
|
||||
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Statement;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import org.apache.commons.lang.StringEscapeUtils;
|
||||
@ -39,7 +35,6 @@ import org.apache.solr.client.solrj.response.TermsResponse.Term;
|
||||
import org.apache.solr.common.SolrDocument;
|
||||
import org.apache.solr.common.SolrDocumentList;
|
||||
import org.openide.nodes.Node;
|
||||
import org.openide.util.Exceptions;
|
||||
import org.openide.windows.TopComponent;
|
||||
import org.sleuthkit.autopsy.casemodule.Case;
|
||||
import org.sleuthkit.autopsy.corecomponents.DataResultTopComponent;
|
||||
@ -61,10 +56,10 @@ public class LuceneQuery implements KeywordSearchQuery {
|
||||
private boolean isEscaped;
|
||||
private Keyword keywordQuery = null;
|
||||
//use different highlight Solr fields for regex and literal search
|
||||
static final String HIGHLIGHT_FIELD_LITERAL = "content";
|
||||
static final String HIGHLIGHT_FIELD_REGEX = "content";
|
||||
static final String HIGHLIGHT_FIELD_LITERAL = Server.Schema.CONTENT.toString();
|
||||
static final String HIGHLIGHT_FIELD_REGEX = Server.Schema.CONTENT.toString();
|
||||
//TODO use content_ws stored="true" in solr schema for perfect highlight hits
|
||||
//static final String HIGHLIGHT_FIELD_REGEX = "content_ws";
|
||||
//static final String HIGHLIGHT_FIELD_REGEX = Server.Schema.CONTENT_WS.toString()
|
||||
|
||||
public LuceneQuery(Keyword keywordQuery) {
|
||||
this(keywordQuery.getQuery());
|
||||
@ -88,6 +83,13 @@ public class LuceneQuery implements KeywordSearchQuery {
|
||||
return isEscaped;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isLiteral() {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@Override
|
||||
public String getEscapedQueryString() {
|
||||
return this.queryEscaped;
|
||||
@ -104,8 +106,8 @@ public class LuceneQuery implements KeywordSearchQuery {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, List<FsContent>> performQuery() throws NoOpenCoreException {
|
||||
Map<String, List<FsContent>> results = new HashMap<String, List<FsContent>>();
|
||||
public Map<String, List<ContentHit>> performQuery() throws NoOpenCoreException {
|
||||
Map<String, List<ContentHit>> results = new HashMap<String, List<ContentHit>>();
|
||||
//in case of single term literal query there is only 1 term
|
||||
results.put(query, performLuceneQuery());
|
||||
|
||||
@ -115,18 +117,14 @@ public class LuceneQuery implements KeywordSearchQuery {
|
||||
@Override
|
||||
public void execute() {
|
||||
escape();
|
||||
Set<FsContent> fsMatches = new HashSet<FsContent>();
|
||||
final Map<String, List<FsContent>> matches;
|
||||
|
||||
|
||||
final Map<String, List<ContentHit>> matches;
|
||||
|
||||
try {
|
||||
matches = performQuery();
|
||||
} catch (NoOpenCoreException ex) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (String key : matches.keySet()) {
|
||||
fsMatches.addAll(matches.get(key));
|
||||
}
|
||||
|
||||
String pathText = "Keyword query: " + query;
|
||||
|
||||
@ -134,11 +132,14 @@ public class LuceneQuery implements KeywordSearchQuery {
|
||||
KeywordSearchUtil.displayDialog("Keyword Search", "No results for keyword: " + query, KeywordSearchUtil.DIALOG_MESSAGE_TYPE.INFO);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
//map of unique fs hit and chunk id or 0
|
||||
LinkedHashMap<FsContent,Integer> fsMatches = ContentHit.flattenResults(matches);
|
||||
|
||||
//get listname
|
||||
String listName = "";
|
||||
|
||||
Node rootNode = new KeywordSearchNode(new ArrayList<FsContent>(fsMatches), queryEscaped);
|
||||
Node rootNode = new KeywordSearchNode(fsMatches, queryEscaped);
|
||||
Node filteredRootNode = new TableFilterNode(rootNode, true);
|
||||
|
||||
TopComponent searchResultWin = DataResultTopComponent.createInstance("Keyword search", pathText, filteredRootNode, matches.size());
|
||||
@ -154,9 +155,8 @@ public class LuceneQuery implements KeywordSearchQuery {
|
||||
return query != null && !query.equals("");
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public KeywordWriteResult writeToBlackBoard(String termHit, FsContent newFsHit, String listName) throws NoOpenCoreException {
|
||||
public KeywordWriteResult writeToBlackBoard(String termHit, FsContent newFsHit, String snippet, String listName) {
|
||||
final String MODULE_NAME = KeywordSearchIngestService.MODULE_NAME;
|
||||
|
||||
KeywordWriteResult writeResult = null;
|
||||
@ -170,18 +170,6 @@ public class LuceneQuery implements KeywordSearchQuery {
|
||||
return null;
|
||||
}
|
||||
|
||||
String snippet = null;
|
||||
try {
|
||||
snippet = LuceneQuery.querySnippet(queryEscaped, newFsHit.getId(), false, true);
|
||||
}
|
||||
catch (NoOpenCoreException e) {
|
||||
logger.log(Level.WARNING, "Error querying snippet: " + query, e);
|
||||
throw e;
|
||||
}
|
||||
catch (Exception e) {
|
||||
logger.log(Level.WARNING, "Error querying snippet: " + query, e);
|
||||
return null;
|
||||
}
|
||||
if (snippet != null) {
|
||||
attributes.add(new BlackboardAttribute(ATTRIBUTE_TYPE.TSK_KEYWORD_PREVIEW.getTypeID(), MODULE_NAME, "", snippet));
|
||||
}
|
||||
@ -219,9 +207,9 @@ public class LuceneQuery implements KeywordSearchQuery {
|
||||
* @param query
|
||||
* @return matches List
|
||||
*/
|
||||
private List<FsContent> performLuceneQuery() throws NoOpenCoreException {
|
||||
private List<ContentHit> performLuceneQuery() throws NoOpenCoreException {
|
||||
|
||||
List<FsContent> matches = new ArrayList<FsContent>();
|
||||
List<ContentHit> matches = new ArrayList<ContentHit>();
|
||||
|
||||
boolean allMatchesFetched = false;
|
||||
final int ROWS_PER_FETCH = 10000;
|
||||
@ -232,21 +220,18 @@ public class LuceneQuery implements KeywordSearchQuery {
|
||||
|
||||
q.setQuery(queryEscaped);
|
||||
q.setRows(ROWS_PER_FETCH);
|
||||
q.setFields("id");
|
||||
q.setFields(Server.Schema.ID.toString());
|
||||
|
||||
|
||||
for (int start = 0; !allMatchesFetched; start = start + ROWS_PER_FETCH) {
|
||||
|
||||
q.setStart(start);
|
||||
|
||||
try {
|
||||
QueryResponse response = solrServer.query(q, METHOD.POST);
|
||||
SolrDocumentList resultList = response.getResults();
|
||||
long results = resultList.getNumFound();
|
||||
|
||||
allMatchesFetched = start + ROWS_PER_FETCH >= results;
|
||||
|
||||
SleuthkitCase sc;
|
||||
|
||||
SleuthkitCase sc = null;
|
||||
try {
|
||||
sc = Case.getCurrentCase().getSleuthkitCase();
|
||||
} catch (IllegalStateException ex) {
|
||||
@ -255,17 +240,39 @@ public class LuceneQuery implements KeywordSearchQuery {
|
||||
}
|
||||
|
||||
for (SolrDocument resultDoc : resultList) {
|
||||
long id = Long.parseLong((String) resultDoc.getFieldValue("id"));
|
||||
final String resultID = (String) resultDoc.getFieldValue(Server.Schema.ID.toString());
|
||||
|
||||
// TODO: has to be a better way to get files. Also, need to
|
||||
// check that we actually get 1 hit for each id
|
||||
ResultSet rs = sc.runQuery("select * from tsk_files where obj_id=" + id);
|
||||
matches.addAll(sc.resultSetToFsContents(rs));
|
||||
final Statement s = rs.getStatement();
|
||||
rs.close();
|
||||
if (s != null) {
|
||||
s.close();
|
||||
final int sepIndex = resultID.indexOf('_');
|
||||
|
||||
if (sepIndex != -1) {
|
||||
//file chunk result
|
||||
final long fileID = Long.parseLong(resultID.substring(0, sepIndex));
|
||||
final int chunkId = Integer.parseInt(resultID.substring(sepIndex+1));
|
||||
logger.log(Level.INFO, "file id: " + fileID + ", chunkID: " + chunkId);
|
||||
|
||||
try {
|
||||
FsContent resultFsContent = sc.getFsContentById(fileID);
|
||||
matches.add(new ContentHit(resultFsContent, chunkId));
|
||||
|
||||
} catch (TskException ex) {
|
||||
logger.log(Level.WARNING, "Could not get the fscontent for keyword hit, ", ex);
|
||||
//something wrong with case/db
|
||||
return matches;
|
||||
}
|
||||
|
||||
} else {
|
||||
final long fileID = Long.parseLong(resultID);
|
||||
|
||||
try {
|
||||
FsContent resultFsContent = sc.getFsContentById(fileID);
|
||||
matches.add(new ContentHit(resultFsContent));
|
||||
} catch (TskException ex) {
|
||||
logger.log(Level.WARNING, "Could not get the fscontent for keyword hit, ", ex);
|
||||
//something wrong with case/db
|
||||
return matches;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
@ -275,9 +282,6 @@ public class LuceneQuery implements KeywordSearchQuery {
|
||||
} catch (SolrServerException ex) {
|
||||
logger.log(Level.WARNING, "Error executing Lucene Solr Query: " + query, ex);
|
||||
// TODO: handle bad query strings, among other issues
|
||||
} catch (SQLException ex) {
|
||||
logger.log(Level.WARNING, "Error interpreting results from Lucene Solr Query: " + query, ex);
|
||||
return matches;
|
||||
}
|
||||
|
||||
}
|
||||
@ -293,6 +297,20 @@ public class LuceneQuery implements KeywordSearchQuery {
|
||||
* @return
|
||||
*/
|
||||
public static String querySnippet(String query, long contentID, boolean isRegex, boolean group) throws NoOpenCoreException {
|
||||
return querySnippet(query, contentID, 0, isRegex, group);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* return snippet preview context
|
||||
* @param query the keyword query for text to highlight. Lucene special cahrs should already be escaped.
|
||||
* @param contentID content id associated with the hit
|
||||
* @param chunkID chunk id associated with the content hit, or 0 if no chunks
|
||||
* @param isRegex whether the query is a regular expression (different Solr fields are then used to generate the preview)
|
||||
* @param group whether the query should look for all terms grouped together in the query order, or not
|
||||
* @return
|
||||
*/
|
||||
public static String querySnippet(String query, long contentID, int chunkID, boolean isRegex, boolean group) throws NoOpenCoreException {
|
||||
final int SNIPPET_LENGTH = 45;
|
||||
|
||||
Server solrServer = KeywordSearch.getServer();
|
||||
@ -323,17 +341,27 @@ public class LuceneQuery implements KeywordSearchQuery {
|
||||
//quote only if user supplies quotes
|
||||
q.setQuery(query);
|
||||
}
|
||||
q.addFilterQuery("id:" + contentID);
|
||||
|
||||
String contentIDStr = null;
|
||||
|
||||
if (chunkID == 0)
|
||||
contentIDStr = Long.toString(contentID);
|
||||
else
|
||||
contentIDStr = FileExtractedChild.getFileExtractChildId(contentID, chunkID);
|
||||
|
||||
String idQuery = Server.Schema.ID.toString() + ":" + contentIDStr;
|
||||
q.addFilterQuery(idQuery);
|
||||
q.addHighlightField(highlightField);
|
||||
q.setHighlightSimplePre("«");
|
||||
q.setHighlightSimplePost("»");
|
||||
q.setHighlightSnippets(1);
|
||||
q.setHighlightFragsize(SNIPPET_LENGTH);
|
||||
q.setParam("hl.maxAnalyzedChars", Server.HL_ANALYZE_CHARS_UNLIMITED); //analyze all content
|
||||
|
||||
try {
|
||||
QueryResponse response = solrServer.query(q);
|
||||
Map<String, Map<String, List<String>>> responseHighlight = response.getHighlighting();
|
||||
Map<String, List<String>> responseHighlightID = responseHighlight.get(Long.toString(contentID));
|
||||
Map<String, List<String>> responseHighlightID = responseHighlight.get(contentIDStr);
|
||||
if (responseHighlightID == null) {
|
||||
return "";
|
||||
}
|
||||
|
@ -25,10 +25,12 @@ package org.sleuthkit.autopsy.keywordsearch;
|
||||
public interface MarkupSource {
|
||||
|
||||
/**
|
||||
* @param pageNum page number to get markup for
|
||||
* @return text optionally marked up with the subsest of HTML that Swing
|
||||
* components can handle in their setText() method.
|
||||
*
|
||||
*/
|
||||
String getMarkup();
|
||||
String getMarkup(int pageNum);
|
||||
|
||||
/**
|
||||
*
|
||||
@ -53,4 +55,10 @@ public interface MarkupSource {
|
||||
*/
|
||||
@Override
|
||||
String toString();
|
||||
|
||||
/**
|
||||
* get number pages/chunks
|
||||
* @return number pages
|
||||
*/
|
||||
int getNumberPages();
|
||||
}
|
||||
|
@ -49,6 +49,7 @@ import org.openide.modules.InstalledFileLocator;
|
||||
import org.openide.util.Exceptions;
|
||||
import org.sleuthkit.autopsy.casemodule.Case;
|
||||
import org.sleuthkit.autopsy.coreutils.PlatformUtil;
|
||||
import org.sleuthkit.autopsy.coreutils.Version;
|
||||
import org.sleuthkit.datamodel.Content;
|
||||
|
||||
/**
|
||||
@ -56,6 +57,77 @@ import org.sleuthkit.datamodel.Content;
|
||||
*/
|
||||
class Server {
|
||||
|
||||
public static enum Schema {
|
||||
|
||||
ID {
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "id";
|
||||
}
|
||||
},
|
||||
CONTENT {
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "content";
|
||||
}
|
||||
},
|
||||
CONTENT_WS {
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "content_ws";
|
||||
}
|
||||
},
|
||||
FILE_NAME {
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "file_name";
|
||||
}
|
||||
},
|
||||
CTIME {
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ctime";
|
||||
}
|
||||
},
|
||||
ATIME {
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "atime";
|
||||
}
|
||||
},
|
||||
MTIME {
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "mtime";
|
||||
}
|
||||
},
|
||||
CRTIME {
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "crtime";
|
||||
}
|
||||
},
|
||||
NUM_CHUNKS {
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "num_chunks";
|
||||
}
|
||||
},};
|
||||
|
||||
public static final String HL_ANALYZE_CHARS_UNLIMITED = "-1";
|
||||
|
||||
//max content size we can send to Solr
|
||||
public static final long MAX_CONTENT_SIZE = 1L * 1024 * 1024 * 1024;
|
||||
|
||||
private static final Logger logger = Logger.getLogger(Server.class.getName());
|
||||
private static final String DEFAULT_CORE_NAME = "coreCase";
|
||||
// TODO: DEFAULT_CORE_NAME needs to be replaced with unique names to support multiple open cases
|
||||
@ -144,6 +216,10 @@ class Server {
|
||||
while ((line = br.readLine()) != null) {
|
||||
bw.write(line);
|
||||
bw.newLine();
|
||||
if (Version.getBuildType() == Version.Type.DEVELOPMENT) {
|
||||
//flush buffers if dev version for debugging
|
||||
bw.flush();
|
||||
}
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
Exceptions.printStackTrace(ex);
|
||||
@ -294,6 +370,34 @@ class Server {
|
||||
return currentCore.queryNumIndexedFiles();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return true if the file is indexed (either as a whole as a chunk)
|
||||
* @param contentID
|
||||
* @return true if it is indexed
|
||||
* @throws SolrServerException, NoOpenCoreException
|
||||
*/
|
||||
public boolean queryIsIndexed(long contentID) throws SolrServerException, NoOpenCoreException {
|
||||
if (currentCore == null) {
|
||||
throw new NoOpenCoreException();
|
||||
}
|
||||
|
||||
return currentCore.queryIsIndexed(contentID);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute query that gets number of indexed file chunks for a file
|
||||
* @param fileID file id of the original file broken into chunks and indexed
|
||||
* @return int representing number of indexed file chunks, 0 if there is no chunks
|
||||
* @throws SolrServerException
|
||||
*/
|
||||
public int queryNumFileChunks(long fileID) throws SolrServerException, NoOpenCoreException {
|
||||
if (currentCore == null) {
|
||||
throw new NoOpenCoreException();
|
||||
}
|
||||
|
||||
return currentCore.queryNumFileChunks(fileID);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute solr query
|
||||
* @param sq query
|
||||
@ -348,7 +452,22 @@ class Server {
|
||||
if (currentCore == null) {
|
||||
throw new NoOpenCoreException();
|
||||
}
|
||||
return currentCore.getSolrContent(content);
|
||||
return currentCore.getSolrContent(content.getId(), 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute Solr query to get content text from content chunk
|
||||
* @param content to get the text for
|
||||
* @param chunkID chunk number to query (starting at 1), or 0 if there is no chunks for that content
|
||||
* @return content text string
|
||||
* @throws SolrServerException
|
||||
* @throws NoOpenCoreException
|
||||
*/
|
||||
public String getSolrContent(final Content content, int chunkID) throws SolrServerException, NoOpenCoreException {
|
||||
if (currentCore == null) {
|
||||
throw new NoOpenCoreException();
|
||||
}
|
||||
return currentCore.getSolrContent(content.getId(), chunkID);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -436,15 +555,19 @@ class Server {
|
||||
}
|
||||
}
|
||||
|
||||
private String getSolrContent(final Content content) {
|
||||
|
||||
private String getSolrContent(long contentID, int chunkID) {
|
||||
final SolrQuery q = new SolrQuery();
|
||||
q.setQuery("*:*");
|
||||
q.addFilterQuery("id:" + content.getId());
|
||||
q.setFields("content");
|
||||
String filterQuery = Schema.ID.toString() + ":" + contentID;
|
||||
if (chunkID != 0)
|
||||
filterQuery = filterQuery + "_" + chunkID;
|
||||
q.addFilterQuery(filterQuery);
|
||||
q.setFields(Schema.CONTENT.toString());
|
||||
try {
|
||||
return (String) solrCore.query(q).getResults().get(0).getFieldValue("content");
|
||||
return (String) solrCore.query(q).getResults().get(0).getFieldValue(Schema.CONTENT.toString());
|
||||
} catch (SolrServerException ex) {
|
||||
logger.log(Level.WARNING, "Error getting content from Solr and validating regex match", ex);
|
||||
logger.log(Level.WARNING, "Error getting content from Solr", ex);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@ -470,6 +593,32 @@ class Server {
|
||||
q.setRows(0);
|
||||
return (int) query(q).getResults().getNumFound();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return true if the file is indexed (either as a whole as a chunk)
|
||||
* @param contentID
|
||||
* @return true if it is indexed
|
||||
* @throws SolrServerException
|
||||
*/
|
||||
private boolean queryIsIndexed(long contentID) throws SolrServerException {
|
||||
SolrQuery q = new SolrQuery("*:*");
|
||||
q.addFilterQuery(Server.Schema.ID.toString() + ":" + Long.toString(contentID));
|
||||
//q.setFields(Server.Schema.ID.toString());
|
||||
q.setRows(0);
|
||||
return (int) query(q).getResults().getNumFound() != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute query that gets number of indexed file chunks for a file
|
||||
* @param contentID file id of the original file broken into chunks and indexed
|
||||
* @return int representing number of indexed file chunks, 0 if there is no chunks
|
||||
* @throws SolrServerException
|
||||
*/
|
||||
private int queryNumFileChunks(long contentID) throws SolrServerException {
|
||||
SolrQuery q = new SolrQuery("id:" + Long.toString(contentID) + "_*");
|
||||
q.setRows(0);
|
||||
return (int) query(q).getResults().getNumFound();
|
||||
}
|
||||
}
|
||||
|
||||
class ServerAction extends AbstractAction {
|
||||
|
@ -56,7 +56,7 @@ public class TermComponentQuery implements KeywordSearchQuery {
|
||||
|
||||
private static final int TERMS_UNLIMITED = -1;
|
||||
//corresponds to field in Solr schema, analyzed with white-space tokenizer only
|
||||
private static final String TERMS_SEARCH_FIELD = "content_ws";
|
||||
private static final String TERMS_SEARCH_FIELD = Server.Schema.CONTENT_WS.toString();
|
||||
private static final String TERMS_HANDLER = "/terms";
|
||||
private static final int TERMS_TIMEOUT = 90 * 1000; //in ms
|
||||
private static Logger logger = Logger.getLogger(TermComponentQuery.class.getName());
|
||||
@ -101,6 +101,11 @@ public class TermComponentQuery implements KeywordSearchQuery {
|
||||
public boolean isEscaped() {
|
||||
return isEscaped;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isLiteral() {
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* helper method to create a Solr terms component query
|
||||
@ -154,23 +159,9 @@ public class TermComponentQuery implements KeywordSearchQuery {
|
||||
}
|
||||
|
||||
@Override
|
||||
public KeywordWriteResult writeToBlackBoard(String termHit, FsContent newFsHit, String listName) throws NoOpenCoreException {
|
||||
public KeywordWriteResult writeToBlackBoard(String termHit, FsContent newFsHit, String snippet, String listName) {
|
||||
final String MODULE_NAME = KeywordSearchIngestService.MODULE_NAME;
|
||||
|
||||
//snippet
|
||||
String snippet = null;
|
||||
try {
|
||||
snippet = LuceneQuery.querySnippet(KeywordSearchUtil.escapeLuceneQuery(termHit, true, false), newFsHit.getId(), true, true);
|
||||
}
|
||||
catch (NoOpenCoreException e) {
|
||||
logger.log(Level.WARNING, "Error querying snippet: " + termHit, e);
|
||||
throw e;
|
||||
}
|
||||
catch (Exception e) {
|
||||
logger.log(Level.WARNING, "Error querying snippet: " + termHit, e);
|
||||
return null;
|
||||
}
|
||||
|
||||
if (snippet == null || snippet.equals("")) {
|
||||
return null;
|
||||
}
|
||||
@ -225,8 +216,8 @@ public class TermComponentQuery implements KeywordSearchQuery {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, List<FsContent>> performQuery() throws NoOpenCoreException{
|
||||
Map<String, List<FsContent>> results = new HashMap<String, List<FsContent>>();
|
||||
public Map<String, List<ContentHit>> performQuery() throws NoOpenCoreException{
|
||||
Map<String, List<ContentHit>> results = new HashMap<String, List<ContentHit>>();
|
||||
|
||||
final SolrQuery q = createQuery();
|
||||
terms = executeQuery(q);
|
||||
@ -241,12 +232,12 @@ public class TermComponentQuery implements KeywordSearchQuery {
|
||||
|
||||
LuceneQuery filesQuery = new LuceneQuery(queryStr);
|
||||
try {
|
||||
Map<String, List<FsContent>> subResults = filesQuery.performQuery();
|
||||
Set<FsContent> filesResults = new HashSet<FsContent>();
|
||||
Map<String, List<ContentHit>> subResults = filesQuery.performQuery();
|
||||
Set<ContentHit> filesResults = new HashSet<ContentHit>();
|
||||
for (String key : subResults.keySet()) {
|
||||
filesResults.addAll(subResults.get(key));
|
||||
}
|
||||
results.put(term.getTerm(), new ArrayList<FsContent>(filesResults));
|
||||
results.put(term.getTerm(), new ArrayList<ContentHit>(filesResults));
|
||||
}
|
||||
catch (NoOpenCoreException e) {
|
||||
logger.log(Level.WARNING, "Error executing Solr query,", e);
|
||||
|
Loading…
x
Reference in New Issue
Block a user