Merge in latest from develop branch

This commit is contained in:
Richard Cordovano 2015-05-13 17:30:18 -04:00
commit b18fd8d9f9
8 changed files with 58 additions and 8 deletions

View File

@ -1,3 +1,19 @@
<<<<<<< HEAD
=======
file.reference.jdom-2.0.5-contrib.jar=release/modules/ext/jdom-2.0.5-contrib.jar
file.reference.jdom-2.0.5.jar=release/modules/ext/jdom-2.0.5.jar
file.reference.jython-standalone-2.7.0.jar=release/modules/ext/jython-standalone-2.7.0.jar
file.reference.jython.jar-1=release/modules/ext/jython.jar
file.reference.metadata-extractor-2.6.2.jar=release/modules/ext/metadata-extractor-2.6.2.jar
file.reference.Rejistry-1.0-SNAPSHOT.jar=release/modules/ext/Rejistry-1.0-SNAPSHOT.jar
file.reference.sevenzipjbinding-AllPlatforms.jar=release/modules/ext/sevenzipjbinding-AllPlatforms.jar
file.reference.sevenzipjbinding.jar=release/modules/ext/sevenzipjbinding.jar
file.reference.sqlite-jdbc-3.7.15-M1.jar=release/modules/ext/sqlite-jdbc-3.7.15-M1.jar
file.reference.StixLib.jar=release/modules/ext/StixLib.jar
file.reference.tika-core-1.2.jar=release/modules/ext/tika-core-1.2.jar
file.reference.Tsk_DataModel.jar=release/modules/ext/Tsk_DataModel.jar
file.reference.xmpcore.jar=release/modules/ext/xmpcore.jar
>>>>>>> upstream/develop
javac.source=1.8
javac.compilerargs=-Xlint -Xlint:-serial
license.file=../LICENSE-2.0.txt

View File

@ -210,6 +210,10 @@
<runtime-relative-path>ext/postgresql-9.4-1201-jdbc41.jar</runtime-relative-path>
<binary-origin>release/modules/ext/postgresql-9.4-1201-jdbc41.jar</binary-origin>
</class-path-extension>
<class-path-extension>
<runtime-relative-path>ext/jython-standalone-2.7.0.jar</runtime-relative-path>
<binary-origin>release/modules/ext/jython-standalone-2.7.0.jar</binary-origin>
</class-path-extension>
<class-path-extension>
<runtime-relative-path>ext/StixLib.jar</runtime-relative-path>
<binary-origin>release/modules/ext/StixLib.jar</binary-origin>
@ -247,8 +251,13 @@
<binary-origin>release/modules/ext/xmpcore.jar</binary-origin>
</class-path-extension>
<class-path-extension>
<<<<<<< HEAD
<runtime-relative-path>ext/jython.jar</runtime-relative-path>
<binary-origin>release/modules/ext/jython.jar</binary-origin>
=======
<runtime-relative-path>ext/tika-core-1.2.jar</runtime-relative-path>
<binary-origin>release/modules/ext/tika-core-1.2.jar</binary-origin>
>>>>>>> upstream/develop
</class-path-extension>
<class-path-extension>
<runtime-relative-path>ext/jdom-2.0.5-contrib.jar</runtime-relative-path>

Binary file not shown.

Binary file not shown.

View File

@ -55,7 +55,7 @@ import org.sleuthkit.datamodel.ReadContentInputStream;
private boolean fxInited = false;
private final List<String> supportedExtensions;
static private final List<String> supportedMimes = Arrays.asList("image/jpeg", "image/png", "image/gif", "image/bmp"); //NON-NLS
static private final List<String> supportedMimes = Arrays.asList("image/jpeg", "image/png", "image/gif", "image/bmp", "image/x-ms-bmp"); //NON-NLS
/**
* Creates new form MediaViewImagePanel

View File

@ -20,6 +20,7 @@ package org.sleuthkit.autopsy.keywordsearch;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
@ -214,7 +215,7 @@ class LuceneQuery implements KeywordSearchQuery {
Map<String, Map<String, List<String>>> highlightResponse = response.getHighlighting();
// get the unique set of files with hits
Set<SolrDocument> uniqueSolrDocumentsWithHits = filterDuplicateSolrDocuments(resultList);
Set<SolrDocument> uniqueSolrDocumentsWithHits = filterOneHitPerDocument(resultList);
allMatchesFetched = start + MAX_RESULTS >= resultList.getNumFound();
@ -305,7 +306,24 @@ class LuceneQuery implements KeywordSearchQuery {
* @param resultList
* @return
*/
private Set<SolrDocument> filterDuplicateSolrDocuments(SolrDocumentList resultList) {
private Set<SolrDocument> filterOneHitPerDocument(SolrDocumentList resultList) {
// sort the list so that we consistently pick the same chunk each time.
// note this sort is doing a string comparison and not an integer comparison, so
// chunk 10 will be smaller than chunk 9.
Collections.sort(resultList, new Comparator<SolrDocument>() {
@Override
public int compare(SolrDocument left, SolrDocument right) {
// ID is in the form of ObjectId_Chunk
String leftID = left.getFieldValue(Server.Schema.ID.toString()).toString();
String rightID = right.getFieldValue(Server.Schema.ID.toString()).toString();
return leftID.compareTo(rightID);
}
});
// NOTE: We could probably just iterate through the list and compare each ID with the
// previous ID to get the unique documents faster than using this set now that the list
// is sorted.
Set<SolrDocument> solrDocumentsWithMatches = new TreeSet<>(new SolrDocumentComparatorIgnoresChunkId());
solrDocumentsWithMatches.addAll(resultList);
return solrDocumentsWithMatches;
@ -464,20 +482,26 @@ class LuceneQuery implements KeywordSearchQuery {
public int compare(SolrDocument left, SolrDocument right) {
// ID is in the form of ObjectId_Chunk
String idName = Server.Schema.ID.toString();
final String idName = Server.Schema.ID.toString();
// get object id of left doc
String leftID = left.getFieldValue(idName).toString();
int index = leftID.indexOf(Server.ID_CHUNK_SEP);
if (index != -1) {
leftID = leftID.substring(0, index);
}
// get object id of right doc
String rightID = right.getFieldValue(idName).toString();
index = rightID.indexOf(Server.ID_CHUNK_SEP);
if (index != -1) {
rightID = rightID.substring(0, index);
}
return leftID.compareTo(rightID);
Integer leftInt = new Integer(leftID);
Integer rightInt = new Integer(rightID);
return leftInt.compareTo(rightInt);
}
}
}

View File

@ -1127,7 +1127,8 @@ public class Server {
filterQuery = filterQuery + Server.ID_CHUNK_SEP + chunkID;
}
q.addFilterQuery(filterQuery);
q.setFields(Schema.TEXT.toString());
// sort the TEXT field
q.setSortField(Schema.TEXT.toString(), SolrQuery.ORDER.asc);
try {
// Get the first result.
SolrDocument solrDocument = solrCore.query(q).getResults().get(0);

View File

@ -2058,7 +2058,7 @@ SKIP_FUNCTION_MACROS = YES
# the path). If a tag file is not located in the directory in which doxygen is
# run, you must also specify the path to the tagfile here.
TAGFILES = $(TSK_HOME)/bindings/java/doxygen/tskjni_doxygen.tag=http://www.sleuthkit.org/sleuthkit/docs/jni-docs/
TAGFILES = $(TSK_HOME)/tskjni_doxygen.tag=http://www.sleuthkit.org/sleuthkit/docs/jni-docs/
# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
# tag file that is based on the input files it reads. See section "Linking to