Merge remote-tracking branch 'upstream/search_improvements' into open_case_resources

This commit is contained in:
Richard Cordovano 2017-01-19 09:08:04 -05:00
commit a95ce4db51
16 changed files with 406 additions and 119 deletions

View File

@ -313,3 +313,5 @@ GlobalEditListPanel.keywordDupesSkipped.text={0} keyword was already in the list
GlobalEditListPanel.keywordDupesSkippedPlural.text={0} keywords were already in the list.
GlobalEditListPanel.keywordErrors.text={0} keyword could not be parsed. Please review and try again.
GlobalEditListPanel.keywordErrorsPlural.text={0} keywords could not be parsed. Please review and try again.
SolrSearchService.IndexUpgradeDialog.title=Index Upgrade Required In Order To Open Case
SolrSearchService.IndexUpgradeDialog.msg=<html>Index upgrade can be a very lengthy operation that involves copying existing index and calling third party tools to upgrade it. <br />Upon upgrade you will be able to see existing keyword search results and perform literal keyword searches on the existing index.<br />However, you will not be able to add new text to the index or performing regex searches.<br /> You must create a new case and re-run Keyword Search Ingest Module if you want to index new text or performing regex searches.<br /> Do you wish to proceed with the index upgrade?</html>

View File

@ -24,26 +24,37 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.logging.Level;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.io.FileUtils;
import org.openide.util.NbBundle;
import org.sleuthkit.autopsy.casemodule.Case;
import org.sleuthkit.autopsy.corecomponentinterfaces.AutopsyService;
import org.sleuthkit.autopsy.coreutils.Logger;
import org.sleuthkit.autopsy.coreutils.UNCPathUtilities;
import org.sleuthkit.autopsy.coreutils.PlatformUtil;
/**
* This class handles the task of finding KWS index folders and upgrading old
* indexes to the latest supported Solr version.
* This class handles the task of finding and identifying KWS index folders.
*/
class IndexHandling {
class IndexFinder {
private UNCPathUtilities uncPathUtilities = new UNCPathUtilities();
private static final String MODULE_OUTPUT = "ModuleOutput"; // ELTODO get "ModuleOutput" somehow...
private static final Logger logger = Logger.getLogger(IndexFinder.class.getName());
private UNCPathUtilities uncPathUtilities;
private static final String KWS_OUTPUT_FOLDER_NAME = "keywordsearch";
private static final String KWS_DATA_FOLDER_NAME = "data";
private static final String INDEX_FOLDER_NAME = "index";
private static final String CURRENT_SOLR_VERSION = "6";
private static final String CURRENT_SOLR_SCHEMA_VERSION = "2.0";
private static final Pattern INDEX_FOLDER_NAME_PATTERN = Pattern.compile("^solr\\d{1,2}_schema_\\d{1,2}.\\d{1,2}$");
// If SOLR_HOME environment variable doesn't exist, try these relative paths to find Solr config sets:
private static final String RELATIVE_PATH_TO_CONFIG_SET = "autopsy/solr/solr/configsets/";
private static final String RELATIVE_PATH_TO_CONFIG_SET_2 = "release/solr/solr/configsets/";
IndexFinder() {
uncPathUtilities = new UNCPathUtilities();
}
static String getCurrentSolrVersion() {
return CURRENT_SOLR_VERSION;
@ -63,6 +74,82 @@ class IndexHandling {
return "";
}
String copyIndexAndConfigSet(Case theCase, String oldIndexDir) throws AutopsyService.AutopsyServiceException {
// Copy the "old" index into ModuleOutput/keywordsearch/data/solrX_schema_Y/index
String newIndexDir = createReferenceIndexCopy(theCase, oldIndexDir);
// Make a reference copy of the configset and place it in ModuleOutput/keywordsearch/data/solrX_schema_Y/configset
createReferenceConfigSetCopy(new File(newIndexDir).getParent());
return newIndexDir;
}
private String createReferenceIndexCopy(Case theCase, String indexPath) throws AutopsyService.AutopsyServiceException {
logger.log(Level.INFO, "Creating a reference copy of KWS index in {0} ", indexPath); //NON-NLS
String indexFolderName = "solr" + CURRENT_SOLR_VERSION + "_schema_" + CURRENT_SOLR_SCHEMA_VERSION;
try {
// new index should be stored in "\ModuleOutput\keywordsearch\data\solrX_schema_Y\index"
File targetDirPath = Paths.get(theCase.getModuleDirectory(), KWS_OUTPUT_FOLDER_NAME, KWS_DATA_FOLDER_NAME, indexFolderName, INDEX_FOLDER_NAME).toFile(); //NON-NLS
if (targetDirPath.exists()) {
// targetDirPath should not exist, at least the target directory should be empty
List<File> contents = getAllContentsInFolder(targetDirPath.getAbsolutePath());
if (!contents.isEmpty()) {
// target directory is not empty
logger.log(Level.SEVERE, "Creating a reference copy of KWS index in {0} ", indexPath); //NON-NLS
throw new AutopsyService.AutopsyServiceException("Directory to store the upgraded index must be empty " + targetDirPath.getAbsolutePath());
}
}
targetDirPath.mkdirs();
FileUtils.copyDirectory(new File(indexPath), targetDirPath);
return targetDirPath.getAbsolutePath();
} catch (Exception ex) {
logger.log(Level.SEVERE, "Error occurred while creating a reference copy of keyword search index {0}", ex); //NON-NLS
throw new AutopsyService.AutopsyServiceException("Error occurred while creating a copy of keyword search index", ex);
}
}
// ELTODO This functionality is NTH:
private void createReferenceConfigSetCopy(String indexPath) {
logger.log(Level.INFO, "Creating a reference copy of config set in {0} ", indexPath); //NON-NLS
File pathToConfigSet = new File("");
try {
// See if there is SOLR_HOME environment variable first
String solrHome = System.getenv("SOLR_HOME");
if (solrHome != null && !solrHome.isEmpty()) {
// ELTODO pathToConfigSet =
return; // ELTODO remove
} else {
// if there is no SOLR_HOME:
// this will only work for Windows OS
if (!PlatformUtil.isWindowsOS()) {
throw new AutopsyService.AutopsyServiceException("ELTODO");
}
// config set should be located in "C:/some/directory/AutopsyXYZ/autopsy/solr/solr/configsets/"
pathToConfigSet = Paths.get(System.getProperty("user.dir"), RELATIVE_PATH_TO_CONFIG_SET).toFile();
if (!pathToConfigSet.exists() || !pathToConfigSet.isDirectory()) {
// try the "release/solr/solr/configsets/" folder instead
pathToConfigSet = Paths.get(System.getProperty("user.dir"), RELATIVE_PATH_TO_CONFIG_SET_2).toFile();
if (!pathToConfigSet.exists() || !pathToConfigSet.isDirectory()) {
logger.log(Level.WARNING, "Unable to locate KWS config set in order to create a reference copy"); //NON-NLS
return;
// ELTODO This is NTH: throw new AutopsyService.AutopsyServiceException("ELTODO");
}
}
}
File targetDirPath = new File(indexPath); //NON-NLS
if (!targetDirPath.exists()) {
targetDirPath.mkdirs();
}
// copy config set
if (!pathToConfigSet.getAbsolutePath().isEmpty() && pathToConfigSet.exists()) {
FileUtils.copyDirectory(pathToConfigSet, new File(indexPath));
}
} catch (Exception ex) {
// This feature is a NTH so don't re-throw
logger.log(Level.WARNING, "Error while copying KWS config set to {0}", indexPath); //NON-NLS
}
}
/**
* Find index directory location for the case. This is done via subdirectory
* search of all existing "ModuleOutput/node_name/keywordsearch/data/"
@ -70,9 +157,9 @@ class IndexHandling {
*
* @param theCase the case to get index dir for
*
* @return absolute path to index dir
* @return List of absolute paths to all found index directories
*/
static List<String> findAllIndexDirs(Case theCase) {
List<String> findAllIndexDirs(Case theCase) {
ArrayList<String> candidateIndexDirs = new ArrayList<>();
// first find all existing "/ModuleOutput/keywordsearch/data/" folders
if (theCase.getCaseType() == Case.CaseType.MULTI_USER_CASE) {
@ -88,16 +175,20 @@ class IndexHandling {
// create a list of all sub-directories
List<File> contents = getAllContentsInFolder(theCase.getCaseDirectory());
// ELTODO decipher "ModuleOutput" from path
if (!contents.isEmpty()) {
// decipher "ModuleOutput" directory name from module output path
// (e.g. X:\Case\ingest4\ModuleOutput\) because there is no other way to get it...
String moduleOutDirName = new File(theCase.getModuleDirectory()).getName();
// scan all topLevelOutputDir subfolders for presence of non-empty "/ModuleOutput/keywordsearch/data/" folder
for (File item : contents) {
File path = Paths.get(item.getAbsolutePath(), MODULE_OUTPUT, KWS_OUTPUT_FOLDER_NAME, KWS_DATA_FOLDER_NAME).toFile(); //NON-NLS
File path = Paths.get(item.getAbsolutePath(), moduleOutDirName, KWS_OUTPUT_FOLDER_NAME, KWS_DATA_FOLDER_NAME).toFile(); //NON-NLS
// must be a non-empty directory
if (path.exists() && path.isDirectory()) {
candidateIndexDirs.add(path.toString());
}
}
}
} else {
// single user case
/* NOTE: All of the following paths are valid single user index paths:
@ -118,8 +209,7 @@ class IndexHandling {
for (String path : candidateIndexDirs) {
List<String> validIndexPaths = containsValidIndexFolders(path);
for (String validPath : validIndexPaths) {
indexDirs.add(validPath);
// ELTODO indexDirs.add(convertPathToUNC(validPath));
indexDirs.add(convertPathToUNC(validPath));
// there can be multiple index folders (e.g. current version and "old" version) so keep looking
}
}
@ -146,7 +236,7 @@ class IndexHandling {
/**
* Returns a list of all contents in the folder of interest.
*
* @param path Absolute path of the folder of interest
* @param path Absolute targetDirPath of the folder of interest
*
* @return List of all contents in the folder of interest
*/
@ -156,12 +246,10 @@ class IndexHandling {
if (contents == null) {
// the directory file is not really a directory..
return Collections.emptyList();
}
else if (contents.length == 0) {
} else if (contents.length == 0) {
// Folder is empty
return Collections.emptyList();
}
else {
} else {
// Folder has contents
return new ArrayList<>(Arrays.asList(contents));
}
@ -217,9 +305,8 @@ class IndexHandling {
*
* @return True or false.
*/
public static boolean matchesIndexFolderNameStandard(String inputString) {
private static boolean matchesIndexFolderNameStandard(String inputString) {
Matcher m = INDEX_FOLDER_NAME_PATTERN.matcher(inputString);
return m.find();
}
}

View File

@ -0,0 +1,173 @@
/*
* Autopsy Forensic Browser
*
* Copyright 2011-2016 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.autopsy.keywordsearch;
import java.io.File;
import java.io.IOException;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;
import java.util.logging.Level;
import org.openide.modules.InstalledFileLocator;
import org.sleuthkit.autopsy.corecomponentinterfaces.AutopsyService;
import org.sleuthkit.autopsy.coreutils.ExecUtil;
import org.sleuthkit.autopsy.coreutils.Logger;
import org.sleuthkit.autopsy.coreutils.PlatformUtil;
/**
* This class handles the task of upgrading old indexes to the latest supported
* Solr version.
*/
class IndexUpgrader {
private static final Logger logger = Logger.getLogger(IndexFinder.class.getName());
private final String JAVA_PATH;
IndexUpgrader() {
JAVA_PATH = PlatformUtil.getJavaPath();
}
void performIndexUpgrade(String newIndexDir, String tempResultsDir) throws AutopsyService.AutopsyServiceException {
// ELTODO Check for cancellation at whatever points are feasible
// Run the upgrade tools on the contents (core) in ModuleOutput/keywordsearch/data/solrX_schema_Y/index
File tmpDir = Paths.get(tempResultsDir, "IndexUpgrade").toFile(); //NON-NLS
tmpDir.mkdirs();
boolean success = true;
try {
// upgrade from Solr 4 to 5. If index is newer than Solr 4 then the upgrade script will throw exception right away.
upgradeSolrIndexVersion4to5(newIndexDir, tempResultsDir);
} catch (Exception ex) {
// catch-all firewall for exceptions thrown by the Solr 4 to 5 upgrade tool itself
logger.log(Level.SEVERE, "Exception while running Sorl 4 to Solr 5 upgrade tool " + newIndexDir, ex); //NON-NLS
success = false;
}
if (success) {
try {
// upgrade from Solr 5 to 6. This one must complete successfully in order to produce a valid Solr 6 index.
upgradeSolrIndexVersion5to6(newIndexDir, tempResultsDir);
} catch (Exception ex) {
// catch-all firewall for exceptions thrown by Solr 5 to 6 upgrade tool itself
logger.log(Level.SEVERE, "Exception while running Sorl 5 to Solr 6 upgrade tool " + newIndexDir, ex); //NON-NLS
success = false;
}
}
if (!success) {
// delete the new directories
new File(newIndexDir).delete();
throw new AutopsyService.AutopsyServiceException("Failed to upgrade existing keyword search index");
}
}
/**
* Upgrades Solr index from version 4 to 5.
*
* @param solr4IndexPath Full path to Solr v4 index directory
* @param tempResultsDir Path to directory where to store log output
*
* @return True is index upgraded successfully, false otherwise
*/
private void upgradeSolrIndexVersion4to5(String solr4IndexPath, String tempResultsDir) throws AutopsyService.AutopsyServiceException, SecurityException, IOException {
String outputFileName = "output.txt";
logger.log(Level.INFO, "Upgrading KWS index {0} from Sorl 4 to Solr 5", solr4IndexPath); //NON-NLS
// find the index upgrade tool
final File upgradeToolFolder = InstalledFileLocator.getDefault().locate("Solr4to5IndexUpgrade", IndexFinder.class.getPackage().getName(), false); //NON-NLS
if (upgradeToolFolder == null) {
logger.log(Level.SEVERE, "Unable to locate Sorl 4 to Solr 5 upgrade tool"); //NON-NLS
throw new AutopsyService.AutopsyServiceException("Unable to locate Sorl 4 to Solr 5 upgrade tool");
}
// full path to index upgrade jar file
File upgradeJarPath = Paths.get(upgradeToolFolder.getAbsolutePath(), "Solr4IndexUpgrade.jar").toFile();
if (!upgradeJarPath.exists() || !upgradeJarPath.isFile()) {
logger.log(Level.SEVERE, "Unable to locate Sorl 4 to Solr 5 upgrade tool's JAR file at {0}", upgradeJarPath); //NON-NLS
throw new AutopsyService.AutopsyServiceException("Unable to locate Sorl 4 to Solr 5 upgrade tool's JAR file");
}
// create log output directory if it doesn't exist
new File(tempResultsDir).mkdirs();
final String outputFileFullPath = Paths.get(tempResultsDir, outputFileName).toString();
final String errFileFullPath = Paths.get(tempResultsDir, outputFileName + ".err").toString(); //NON-NLS
List<String> commandLine = new ArrayList<>();
commandLine.add(JAVA_PATH);
commandLine.add("-jar");
commandLine.add(upgradeJarPath.getAbsolutePath());
commandLine.add(solr4IndexPath);
ProcessBuilder processBuilder = new ProcessBuilder(commandLine);
processBuilder.redirectOutput(new File(outputFileFullPath));
processBuilder.redirectError(new File(errFileFullPath));
ExecUtil.execute(processBuilder);
// alternatively can execute lucene upgrade command from the folder where lucene jars are located
// java -cp ".;lucene-core-5.5.1.jar;lucene-backward-codecs-5.5.1.jar;lucene-codecs-5.5.1.jar;lucene-analyzers-common-5.5.1.jar" org.apache.lucene.index.IndexUpgrader \path\to\index
}
/**
* Upgrades Solr index from version 5 to 6.
*
* @param solr5IndexPath Full path to Solr v5 index directory
* @param tempResultsDir Path to directory where to store log output
*
* @return True is index upgraded successfully, false otherwise
*/
private void upgradeSolrIndexVersion5to6(String solr5IndexPath, String tempResultsDir) throws AutopsyService.AutopsyServiceException, SecurityException, IOException {
String outputFileName = "output.txt";
logger.log(Level.INFO, "Upgrading KWS index {0} from Sorl 5 to Solr 6", solr5IndexPath); //NON-NLS
// find the index upgrade tool
final File upgradeToolFolder = InstalledFileLocator.getDefault().locate("Solr5to6IndexUpgrade", IndexFinder.class.getPackage().getName(), false); //NON-NLS
if (upgradeToolFolder == null) {
logger.log(Level.SEVERE, "Unable to locate Sorl 5 to Solr 6 upgrade tool"); //NON-NLS
throw new AutopsyService.AutopsyServiceException("Unable to locate Sorl 5 to Solr 6 upgrade tool");
}
// full path to index upgrade jar file
File upgradeJarPath = Paths.get(upgradeToolFolder.getAbsolutePath(), "Solr5IndexUpgrade.jar").toFile();
if (!upgradeJarPath.exists() || !upgradeJarPath.isFile()) {
logger.log(Level.SEVERE, "Unable to locate Sorl 5 to Solr 6 upgrade tool's JAR file at {0}", upgradeJarPath); //NON-NLS
throw new AutopsyService.AutopsyServiceException("Unable to locate Sorl 5 to Solr 6 upgrade tool's JAR file");
}
// create log output directory if it doesn't exist
new File(tempResultsDir).mkdirs();
final String outputFileFullPath = Paths.get(tempResultsDir, outputFileName).toString();
final String errFileFullPath = Paths.get(tempResultsDir, outputFileName + ".err").toString(); //NON-NLS
List<String> commandLine = new ArrayList<>();
commandLine.add(JAVA_PATH);
commandLine.add("-jar");
commandLine.add(upgradeJarPath.getAbsolutePath());
commandLine.add(solr5IndexPath);
ProcessBuilder processBuilder = new ProcessBuilder(commandLine);
processBuilder.redirectOutput(new File(outputFileFullPath));
processBuilder.redirectError(new File(errFileFullPath));
ExecUtil.execute(processBuilder);
// alternatively can execute lucene upgrade command from the folder where lucene jars are located
// java -cp ".;lucene-core-6.2.1.jar;lucene-backward-codecs-6.2.1.jar;lucene-codecs-6.2.1.jar;lucene-analyzers-common-6.2.1.jar" org.apache.lucene.index.IndexUpgrader \path\to\index
}
}

View File

@ -21,9 +21,11 @@ package org.sleuthkit.autopsy.keywordsearch;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.CancellationException;
@ -262,12 +264,14 @@ public final class SearchRunner {
// mutable state:
private volatile boolean workerRunning;
private List<String> keywordListNames; //guarded by SearchJobInfo.this
private Map<Keyword, List<Long>> currentResults; //guarded by SearchJobInfo.this
// Map of keyword to the object ids that contain a hit
private Map<Keyword, Set<Long>> currentResults; //guarded by SearchJobInfo.this
private SearchRunner.Searcher currentSearcher;
private AtomicLong moduleReferenceCount = new AtomicLong(0);
private final Object finalSearchLock = new Object(); //used for a condition wait
public SearchJobInfo(long jobId, long dataSourceId, List<String> keywordListNames) {
private SearchJobInfo(long jobId, long dataSourceId, List<String> keywordListNames) {
this.jobId = jobId;
this.dataSourceId = dataSourceId;
this.keywordListNames = new ArrayList<>(keywordListNames);
@ -276,53 +280,53 @@ public final class SearchRunner {
currentSearcher = null;
}
public long getJobId() {
private long getJobId() {
return jobId;
}
public long getDataSourceId() {
private long getDataSourceId() {
return dataSourceId;
}
public synchronized List<String> getKeywordListNames() {
private synchronized List<String> getKeywordListNames() {
return new ArrayList<>(keywordListNames);
}
public synchronized void addKeywordListName(String keywordListName) {
private synchronized void addKeywordListName(String keywordListName) {
if (!keywordListNames.contains(keywordListName)) {
keywordListNames.add(keywordListName);
}
}
public synchronized List<Long> currentKeywordResults(Keyword k) {
private synchronized Set<Long> currentKeywordResults(Keyword k) {
return currentResults.get(k);
}
public synchronized void addKeywordResults(Keyword k, List<Long> resultsIDs) {
private synchronized void addKeywordResults(Keyword k, Set<Long> resultsIDs) {
currentResults.put(k, resultsIDs);
}
public boolean isWorkerRunning() {
private boolean isWorkerRunning() {
return workerRunning;
}
public void setWorkerRunning(boolean flag) {
private void setWorkerRunning(boolean flag) {
workerRunning = flag;
}
public synchronized SearchRunner.Searcher getCurrentSearcher() {
private synchronized SearchRunner.Searcher getCurrentSearcher() {
return currentSearcher;
}
public synchronized void setCurrentSearcher(SearchRunner.Searcher searchRunner) {
private synchronized void setCurrentSearcher(SearchRunner.Searcher searchRunner) {
currentSearcher = searchRunner;
}
public void incrementModuleReferenceCount() {
private void incrementModuleReferenceCount() {
moduleReferenceCount.incrementAndGet();
}
public long decrementModuleReferenceCount() {
private long decrementModuleReferenceCount() {
return moduleReferenceCount.decrementAndGet();
}
@ -331,7 +335,7 @@ public final class SearchRunner {
*
* @throws InterruptedException
*/
public void waitForCurrentWorker() throws InterruptedException {
private void waitForCurrentWorker() throws InterruptedException {
synchronized (finalSearchLock) {
while (workerRunning) {
finalSearchLock.wait(); //wait() releases the lock
@ -342,7 +346,7 @@ public final class SearchRunner {
/**
* Unset workerRunning and wake up thread(s) waiting on finalSearchLock
*/
public void searchNotify() {
private void searchNotify() {
synchronized (finalSearchLock) {
workerRunning = false;
finalSearchLock.notify();
@ -468,8 +472,8 @@ public final class SearchRunner {
return null;
}
// calculate new results by substracting results already obtained in this ingest
// this creates a map of each keyword to the list of unique files that have that hit.
// Reduce the results of the query to only those hits we
// have not already seen.
QueryResults newResults = filterResults(queryResults);
if (!newResults.getKeywords().isEmpty()) {
@ -567,40 +571,68 @@ public final class SearchRunner {
});
}
//calculate new results but substracting results already obtained in this ingest
//update currentResults map with the new results
/**
* This method filters out all of the hits found in earlier
* periodic searches and returns only the results found by the most
* recent search.
*
* This method will only return hits for objects for which we haven't
* previously seen a hit for the keyword.
*
* @param queryResult The results returned by a keyword search.
* @return The set of hits found by the most recent search for objects
* that have not previously had a hit.
*
*/
private QueryResults filterResults(QueryResults queryResult) {
// Create a new (empty) QueryResults object to hold the most recently
// found hits.
QueryResults newResults = new QueryResults(queryResult.getQuery(), queryResult.getKeywordList());
// For each keyword represented in the results.
for (Keyword keyword : queryResult.getKeywords()) {
// These are all of the hits across all objects for the most recent search.
// This may well include duplicates of hits we've seen in earlier periodic searches.
List<KeywordHit> queryTermResults = queryResult.getResults(keyword);
//translate to list of IDs that we keep track of
List<Long> queryTermResultsIDs = new ArrayList<>();
for (KeywordHit ch : queryTermResults) {
queryTermResultsIDs.add(ch.getSolrObjectId());
// This will be used to build up the hits we haven't seen before
// for this keyword.
List<KeywordHit> newUniqueHits = new ArrayList<>();
// Get the set of object ids seen in the past by this searcher
// for the given keyword.
Set<Long> curTermResults = job.currentKeywordResults(keyword);
if (curTermResults == null) {
// We create a new empty set if we haven't seen results for
// this keyword before.
curTermResults = new HashSet<>();
}
List<Long> curTermResults = job.currentKeywordResults(keyword);
if (curTermResults == null) {
job.addKeywordResults(keyword, queryTermResultsIDs);
newResults.addResult(keyword, queryTermResults);
} else {
//some AbstractFile hits already exist for this keyword
for (KeywordHit res : queryTermResults) {
if (!curTermResults.contains(res.getSolrObjectId())) {
//add to new results
List<KeywordHit> newResultsFs = newResults.getResults(keyword);
if (newResultsFs == null) {
newResultsFs = new ArrayList<>();
newResults.addResult(keyword, newResultsFs);
}
newResultsFs.add(res);
curTermResults.add(res.getSolrObjectId());
}
// For each hit for this keyword.
for (KeywordHit hit : queryTermResults) {
if (curTermResults.contains(hit.getSolrObjectId())) {
// Skip the hit if we've already seen a hit for
// this keyword in the object.
continue;
}
// We haven't seen the hit before so add it to list of new
// unique hits.
newUniqueHits.add(hit);
// Add the object id to the results we've seen for this
// keyword.
curTermResults.add(hit.getSolrObjectId());
}
// Update the job with the list of objects for which we have
// seen hits for the current keyword.
job.addKeywordResults(keyword, curTermResults);
// Add the new hits for the current keyword into the results
// to be returned.
newResults.addResult(keyword, newUniqueHits);
}
return newResults;

View File

@ -18,18 +18,21 @@
*/
package org.sleuthkit.autopsy.keywordsearch;
import java.io.File;
import java.io.IOException;
import java.net.InetAddress;
import java.nio.file.Paths;
import java.util.List;
import java.util.MissingResourceException;
import java.util.logging.Level;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.openide.util.NbBundle;
import org.openide.util.lookup.ServiceProvider;
import org.openide.util.lookup.ServiceProviders;
import org.sleuthkit.autopsy.casemodule.Case;
import org.sleuthkit.autopsy.core.RuntimeProperties;
import org.sleuthkit.autopsy.corecomponentinterfaces.AutopsyService;
import org.sleuthkit.autopsy.coreutils.Logger;
import org.sleuthkit.autopsy.keywordsearchservice.KeywordSearchService;
import org.sleuthkit.autopsy.keywordsearchservice.KeywordSearchServiceException;
import org.sleuthkit.datamodel.BlackboardArtifact;
@ -45,6 +48,7 @@ import org.sleuthkit.datamodel.TskCoreException;
)
public class SolrSearchService implements KeywordSearchService, AutopsyService {
private static final Logger logger = Logger.getLogger(IndexFinder.class.getName());
private static final String BAD_IP_ADDRESS_FORMAT = "ioexception occurred when talking to server"; //NON-NLS
private static final String SERVER_REFUSED_CONNECTION = "server refused connection"; //NON-NLS
private static final int IS_REACHABLE_TIMEOUT_MS = 1000;
@ -144,7 +148,7 @@ public class SolrSearchService implements KeywordSearchService, AutopsyService
* @param context
*
* @throws
* org.sleuthkit.autopsy.corecomponentinterfaces.AutopsyServiceProvider.AutopsyServiceProviderException
* org.sleuthkit.autopsy.corecomponentinterfaces.AutopsyService.AutopsyServiceException
*/
@Override
public void openCaseResources(CaseContext context) throws AutopsyServiceException {
@ -153,10 +157,11 @@ public class SolrSearchService implements KeywordSearchService, AutopsyService
*/
// do a case subdirectory search to check for the existence and upgrade status of KWS indexes
List<String> indexDirs = IndexHandling.findAllIndexDirs(Case.getCurrentCase());
IndexFinder indexFinder = new IndexFinder();
List<String> indexDirs = indexFinder.findAllIndexDirs(context.getCase());
// check if index needs upgrade
String currentVersionIndexDir = IndexHandling.findLatestVersionIndexDir(indexDirs);
String currentVersionIndexDir = IndexFinder.findLatestVersionIndexDir(indexDirs);
if (currentVersionIndexDir.isEmpty()) {
// ELTODO not sure what to do when there are multiple old indexes. grab the first one?
@ -165,37 +170,31 @@ public class SolrSearchService implements KeywordSearchService, AutopsyService
if (RuntimeProperties.coreComponentsAreActive()) {
//pop up a message box to indicate the restrictions on adding additional
//text and performing regex searches and give the user the option to decline the upgrade
boolean upgradeDeclined = false;
if (upgradeDeclined) {
throw new AutopsyServiceException("ELTODO");
if (!KeywordSearchUtil.displayConfirmDialog(NbBundle.getMessage(this.getClass(), "SolrSearchService.IndexUpgradeDialog.title"),
NbBundle.getMessage(this.getClass(), "SolrSearchService.IndexUpgradeDialog.msg"),
KeywordSearchUtil.DIALOG_MESSAGE_TYPE.WARN)) {
// upgrade declined - throw exception
throw new AutopsyServiceException("Index upgrade was declined by user");
}
}
// ELTODO Check for cancellation at whatever points are feasible
// Copy the contents (core) of ModuleOutput/keywordsearch/data/index into ModuleOutput/keywordsearch/data/solr6_schema_2.0/index
// Copy the "old" index and config set into ModuleOutput/keywordsearch/data/solrX_schema_Y/
String newIndexDir = indexFinder.copyIndexAndConfigSet(context.getCase(), oldIndexDir);
// Make a reference copy of the configset and place it in ModuleOutput/keywordsearch/data/solr6_schema_2.0/configset
// upgrade the "old" index to the latest supported Solr version
IndexUpgrader indexUpgrader = new IndexUpgrader();
indexUpgrader.performIndexUpgrade(newIndexDir, context.getCase().getTempDirectory());
// convert path to UNC path
// set the upgraded reference index as the index to be used for this case
currentVersionIndexDir = newIndexDir;
}
// Run the upgrade tools on the contents (core) in ModuleOutput/keywordsearch/data/solr6_schema_2.0/index
// Open the upgraded index
// open currentVersionIndexDir index
// execute a test query
boolean success = true;
if (!success) {
// delete the new directories
// close the upgraded index?
throw new AutopsyServiceException("ELTODO");
}
// currentVersionIndexDir = upgraded index dir
}
// if failed, close the upgraded index?
}
/**
@ -203,7 +202,7 @@ public class SolrSearchService implements KeywordSearchService, AutopsyService
* @param context
*
* @throws
* org.sleuthkit.autopsy.corecomponentinterfaces.AutopsyServiceProvider.AutopsyServiceProviderException
* org.sleuthkit.autopsy.corecomponentinterfaces.AutopsyService.AutopsyServiceException
*/
@Override
public void closeCaseResources(CaseContext context) throws AutopsyServiceException {

View File

@ -1,23 +1,17 @@
---------------- VERSION 4.3.0 --------------
Improvements:
- Creation and analysis (e.g., keyword search) of virtual files for slack
space.
- A preloader in an Android device image does not prevent adding the image as
a data source (reading of secondary GPT tables supported).
- User can add data sources with no file systems or unsupported file systems
as "unallocated space image files" for carving, keyword search, etc.
- File extension mismatch analysis can be configured to check all file types,
all file types except text files, or only multimedia and executable files.
- Column order changes in table views are "sticky" for each type of tree view
item.
- Tree view has new file types by MIME type sub tree.
- User can bulk add list of keywords to a keyword list.
- Support for slack space on files (as separate virtual files) to enable keyword searching and other analysis.
- Simple mode for the file extension mismatch module that focuses on only only multimedia and executable files to reduce false positives.
- New view in tree that shows the MIME types.
- Tagged items are highlighted in table views.
- Toolbar button for Image/Video Gallery
- New "Experimental" module (activate via Tools, Plugins) with auto ingest
feature.
- Ordering of columns is saved when user changes them.
- Support for Android devices with preloaders (uses backup GPT)
- Support for images with no file systems (all data is added as unallocated space)
- User can bulk add list of keywords to a keyword list.
- New "Experimental" module (activate via Tools, Plugins) with auto ingest feature.
- Assorted bug fixes and minor enhancements.
---------------- VERSION 4.2.0 --------------
Improvements:
- Credit card account search.