diff --git a/KeywordSearch/release/Solr4to5IndexUpgrade/Solr4IndexUpgrade.jar b/KeywordSearch/release/Solr4to5IndexUpgrade/Solr4IndexUpgrade.jar
new file mode 100644
index 0000000000..764fc7df8d
Binary files /dev/null and b/KeywordSearch/release/Solr4to5IndexUpgrade/Solr4IndexUpgrade.jar differ
diff --git a/KeywordSearch/release/Solr4to5IndexUpgrade/lib/lucene-analyzers-common-5.5.1.jar b/KeywordSearch/release/Solr4to5IndexUpgrade/lib/lucene-analyzers-common-5.5.1.jar
new file mode 100644
index 0000000000..ec29040eea
Binary files /dev/null and b/KeywordSearch/release/Solr4to5IndexUpgrade/lib/lucene-analyzers-common-5.5.1.jar differ
diff --git a/KeywordSearch/release/Solr4to5IndexUpgrade/lib/lucene-backward-codecs-5.5.1.jar b/KeywordSearch/release/Solr4to5IndexUpgrade/lib/lucene-backward-codecs-5.5.1.jar
new file mode 100644
index 0000000000..b956d2eba5
Binary files /dev/null and b/KeywordSearch/release/Solr4to5IndexUpgrade/lib/lucene-backward-codecs-5.5.1.jar differ
diff --git a/KeywordSearch/release/Solr4to5IndexUpgrade/lib/lucene-codecs-5.5.1.jar b/KeywordSearch/release/Solr4to5IndexUpgrade/lib/lucene-codecs-5.5.1.jar
new file mode 100644
index 0000000000..fdc493651f
Binary files /dev/null and b/KeywordSearch/release/Solr4to5IndexUpgrade/lib/lucene-codecs-5.5.1.jar differ
diff --git a/KeywordSearch/release/Solr4to5IndexUpgrade/lib/lucene-core-5.5.1.jar b/KeywordSearch/release/Solr4to5IndexUpgrade/lib/lucene-core-5.5.1.jar
new file mode 100644
index 0000000000..c52dd9e33f
Binary files /dev/null and b/KeywordSearch/release/Solr4to5IndexUpgrade/lib/lucene-core-5.5.1.jar differ
diff --git a/KeywordSearch/release/Solr5to6IndexUpgrade/Solr5IndexUpgrade.jar b/KeywordSearch/release/Solr5to6IndexUpgrade/Solr5IndexUpgrade.jar
new file mode 100644
index 0000000000..9a18d23a98
Binary files /dev/null and b/KeywordSearch/release/Solr5to6IndexUpgrade/Solr5IndexUpgrade.jar differ
diff --git a/KeywordSearch/release/Solr5to6IndexUpgrade/lib/lucene-analyzers-common-6.2.1.jar b/KeywordSearch/release/Solr5to6IndexUpgrade/lib/lucene-analyzers-common-6.2.1.jar
new file mode 100644
index 0000000000..52df2a773f
Binary files /dev/null and b/KeywordSearch/release/Solr5to6IndexUpgrade/lib/lucene-analyzers-common-6.2.1.jar differ
diff --git a/KeywordSearch/release/Solr5to6IndexUpgrade/lib/lucene-backward-codecs-6.2.1.jar b/KeywordSearch/release/Solr5to6IndexUpgrade/lib/lucene-backward-codecs-6.2.1.jar
new file mode 100644
index 0000000000..30cf3b9576
Binary files /dev/null and b/KeywordSearch/release/Solr5to6IndexUpgrade/lib/lucene-backward-codecs-6.2.1.jar differ
diff --git a/KeywordSearch/release/Solr5to6IndexUpgrade/lib/lucene-codecs-6.2.1.jar b/KeywordSearch/release/Solr5to6IndexUpgrade/lib/lucene-codecs-6.2.1.jar
new file mode 100644
index 0000000000..c61005b61b
Binary files /dev/null and b/KeywordSearch/release/Solr5to6IndexUpgrade/lib/lucene-codecs-6.2.1.jar differ
diff --git a/KeywordSearch/release/Solr5to6IndexUpgrade/lib/lucene-core-6.2.1.jar b/KeywordSearch/release/Solr5to6IndexUpgrade/lib/lucene-core-6.2.1.jar
new file mode 100644
index 0000000000..e557e1b955
Binary files /dev/null and b/KeywordSearch/release/Solr5to6IndexUpgrade/lib/lucene-core-6.2.1.jar differ
diff --git a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Bundle.properties b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Bundle.properties
index 576ce90aac..d6fc2eabed 100644
--- a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Bundle.properties
+++ b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Bundle.properties
@@ -313,3 +313,5 @@ GlobalEditListPanel.keywordDupesSkipped.text={0} keyword was already in the list
GlobalEditListPanel.keywordDupesSkippedPlural.text={0} keywords were already in the list.
GlobalEditListPanel.keywordErrors.text={0} keyword could not be parsed. Please review and try again.
GlobalEditListPanel.keywordErrorsPlural.text={0} keywords could not be parsed. Please review and try again.
+SolrSearchService.IndexUpgradeDialog.title=Index Upgrade Required In Order To Open Case
+SolrSearchService.IndexUpgradeDialog.msg=Index upgrade can be a very lengthy operation that involves copying existing index and calling third party tools to upgrade it.
Upon upgrade you will be able to see existing keyword search results and perform literal keyword searches on the existing index.
However, you will not be able to add new text to the index or performing regex searches.
You must create a new case and re-run Keyword Search Ingest Module if you want to index new text or performing regex searches.
Do you wish to proceed with the index upgrade?
\ No newline at end of file
diff --git a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/IndexHandling.java b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/IndexFinder.java
similarity index 54%
rename from KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/IndexHandling.java
rename to KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/IndexFinder.java
index 3ee8e1f118..affa77607e 100644
--- a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/IndexHandling.java
+++ b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/IndexFinder.java
@@ -24,27 +24,38 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
+import java.util.logging.Level;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
+import org.apache.commons.io.FileUtils;
+import org.openide.util.NbBundle;
import org.sleuthkit.autopsy.casemodule.Case;
+import org.sleuthkit.autopsy.corecomponentinterfaces.AutopsyService;
+import org.sleuthkit.autopsy.coreutils.Logger;
import org.sleuthkit.autopsy.coreutils.UNCPathUtilities;
+import org.sleuthkit.autopsy.coreutils.PlatformUtil;
/**
- * This class handles the task of finding KWS index folders and upgrading old
- * indexes to the latest supported Solr version.
+ * This class handles the task of finding and identifying KWS index folders.
*/
-class IndexHandling {
-
- private UNCPathUtilities uncPathUtilities = new UNCPathUtilities();
- private static final String MODULE_OUTPUT = "ModuleOutput"; // ELTODO get "ModuleOutput" somehow...
+class IndexFinder {
+
+ private static final Logger logger = Logger.getLogger(IndexFinder.class.getName());
+ private UNCPathUtilities uncPathUtilities;
private static final String KWS_OUTPUT_FOLDER_NAME = "keywordsearch";
private static final String KWS_DATA_FOLDER_NAME = "data";
private static final String INDEX_FOLDER_NAME = "index";
private static final String CURRENT_SOLR_VERSION = "6";
private static final String CURRENT_SOLR_SCHEMA_VERSION = "2.0";
- private static final Pattern INDEX_FOLDER_NAME_PATTERN = Pattern.compile("^solr\\d{1,2}_schema_\\d{1,2}.\\d{1,2}$");
-
+ private static final Pattern INDEX_FOLDER_NAME_PATTERN = Pattern.compile("^solr\\d{1,2}_schema_\\d{1,2}.\\d{1,2}$");
+ // If SOLR_HOME environment variable doesn't exist, try these relative paths to find Solr config sets:
+ private static final String RELATIVE_PATH_TO_CONFIG_SET = "autopsy/solr/solr/configsets/";
+ private static final String RELATIVE_PATH_TO_CONFIG_SET_2 = "release/solr/solr/configsets/";
+ IndexFinder() {
+ uncPathUtilities = new UNCPathUtilities();
+ }
+
static String getCurrentSolrVersion() {
return CURRENT_SOLR_VERSION;
}
@@ -52,7 +63,7 @@ class IndexHandling {
static String getCurrentSchemaVersion() {
return CURRENT_SOLR_SCHEMA_VERSION;
}
-
+
static String findLatestVersionIndexDir(List allIndexes) {
String indexFolderName = "solr" + CURRENT_SOLR_VERSION + "_schema_" + CURRENT_SOLR_SCHEMA_VERSION;
for (String path : allIndexes) {
@@ -63,6 +74,82 @@ class IndexHandling {
return "";
}
+ String copyIndexAndConfigSet(Case theCase, String oldIndexDir) throws AutopsyService.AutopsyServiceException {
+ // Copy the "old" index into ModuleOutput/keywordsearch/data/solrX_schema_Y/index
+ String newIndexDir = createReferenceIndexCopy(theCase, oldIndexDir);
+
+ // Make a “reference copy” of the configset and place it in ModuleOutput/keywordsearch/data/solrX_schema_Y/configset
+ createReferenceConfigSetCopy(new File(newIndexDir).getParent());
+
+ return newIndexDir;
+ }
+
+ private String createReferenceIndexCopy(Case theCase, String indexPath) throws AutopsyService.AutopsyServiceException {
+ logger.log(Level.INFO, "Creating a reference copy of KWS index in {0} ", indexPath); //NON-NLS
+ String indexFolderName = "solr" + CURRENT_SOLR_VERSION + "_schema_" + CURRENT_SOLR_SCHEMA_VERSION;
+ try {
+ // new index should be stored in "\ModuleOutput\keywordsearch\data\solrX_schema_Y\index"
+ File targetDirPath = Paths.get(theCase.getModuleDirectory(), KWS_OUTPUT_FOLDER_NAME, KWS_DATA_FOLDER_NAME, indexFolderName, INDEX_FOLDER_NAME).toFile(); //NON-NLS
+ if (targetDirPath.exists()) {
+ // targetDirPath should not exist, at least the target directory should be empty
+ List contents = getAllContentsInFolder(targetDirPath.getAbsolutePath());
+ if (!contents.isEmpty()) {
+ // target directory is not empty
+ logger.log(Level.SEVERE, "Creating a reference copy of KWS index in {0} ", indexPath); //NON-NLS
+ throw new AutopsyService.AutopsyServiceException("Directory to store the upgraded index must be empty " + targetDirPath.getAbsolutePath());
+ }
+ }
+ targetDirPath.mkdirs();
+ FileUtils.copyDirectory(new File(indexPath), targetDirPath);
+ return targetDirPath.getAbsolutePath();
+ } catch (Exception ex) {
+ logger.log(Level.SEVERE, "Error occurred while creating a reference copy of keyword search index {0}", ex); //NON-NLS
+ throw new AutopsyService.AutopsyServiceException("Error occurred while creating a copy of keyword search index", ex);
+ }
+ }
+
+ // ELTODO This functionality is NTH:
+ private void createReferenceConfigSetCopy(String indexPath) {
+ logger.log(Level.INFO, "Creating a reference copy of config set in {0} ", indexPath); //NON-NLS
+ File pathToConfigSet = new File("");
+ try {
+ // See if there is SOLR_HOME environment variable first
+ String solrHome = System.getenv("SOLR_HOME");
+ if (solrHome != null && !solrHome.isEmpty()) {
+ // ELTODO pathToConfigSet =
+ return; // ELTODO remove
+ } else {
+ // if there is no SOLR_HOME:
+ // this will only work for Windows OS
+ if (!PlatformUtil.isWindowsOS()) {
+ throw new AutopsyService.AutopsyServiceException("ELTODO");
+ }
+ // config set should be located in "C:/some/directory/AutopsyXYZ/autopsy/solr/solr/configsets/"
+ pathToConfigSet = Paths.get(System.getProperty("user.dir"), RELATIVE_PATH_TO_CONFIG_SET).toFile();
+ if (!pathToConfigSet.exists() || !pathToConfigSet.isDirectory()) {
+ // try the "release/solr/solr/configsets/" folder instead
+ pathToConfigSet = Paths.get(System.getProperty("user.dir"), RELATIVE_PATH_TO_CONFIG_SET_2).toFile();
+ if (!pathToConfigSet.exists() || !pathToConfigSet.isDirectory()) {
+ logger.log(Level.WARNING, "Unable to locate KWS config set in order to create a reference copy"); //NON-NLS
+ return;
+ // ELTODO This is NTH: throw new AutopsyService.AutopsyServiceException("ELTODO");
+ }
+ }
+ }
+ File targetDirPath = new File(indexPath); //NON-NLS
+ if (!targetDirPath.exists()) {
+ targetDirPath.mkdirs();
+ }
+ // copy config set
+ if (!pathToConfigSet.getAbsolutePath().isEmpty() && pathToConfigSet.exists()) {
+ FileUtils.copyDirectory(pathToConfigSet, new File(indexPath));
+ }
+ } catch (Exception ex) {
+ // This feature is a NTH so don't re-throw
+ logger.log(Level.WARNING, "Error while copying KWS config set to {0}", indexPath); //NON-NLS
+ }
+ }
+
/**
* Find index directory location for the case. This is done via subdirectory
* search of all existing "ModuleOutput/node_name/keywordsearch/data/"
@@ -70,9 +157,9 @@ class IndexHandling {
*
* @param theCase the case to get index dir for
*
- * @return absolute path to index dir
+ * @return List of absolute paths to all found index directories
*/
- static List findAllIndexDirs(Case theCase) {
+ List findAllIndexDirs(Case theCase) {
ArrayList candidateIndexDirs = new ArrayList<>();
// first find all existing "/ModuleOutput/keywordsearch/data/" folders
if (theCase.getCaseType() == Case.CaseType.MULTI_USER_CASE) {
@@ -87,15 +174,19 @@ class IndexHandling {
// create a list of all sub-directories
List contents = getAllContentsInFolder(theCase.getCaseDirectory());
-
- // ELTODO decipher "ModuleOutput" from path
- // scan all topLevelOutputDir subfolders for presence of non-empty "/ModuleOutput/keywordsearch/data/" folder
- for (File item : contents) {
- File path = Paths.get(item.getAbsolutePath(), MODULE_OUTPUT, KWS_OUTPUT_FOLDER_NAME, KWS_DATA_FOLDER_NAME).toFile(); //NON-NLS
- // must be a non-empty directory
- if (path.exists() && path.isDirectory()) {
- candidateIndexDirs.add(path.toString());
+ if (!contents.isEmpty()) {
+ // decipher "ModuleOutput" directory name from module output path
+ // (e.g. X:\Case\ingest4\ModuleOutput\) because there is no other way to get it...
+ String moduleOutDirName = new File(theCase.getModuleDirectory()).getName();
+
+ // scan all topLevelOutputDir subfolders for presence of non-empty "/ModuleOutput/keywordsearch/data/" folder
+ for (File item : contents) {
+ File path = Paths.get(item.getAbsolutePath(), moduleOutDirName, KWS_OUTPUT_FOLDER_NAME, KWS_DATA_FOLDER_NAME).toFile(); //NON-NLS
+ // must be a non-empty directory
+ if (path.exists() && path.isDirectory()) {
+ candidateIndexDirs.add(path.toString());
+ }
}
}
} else {
@@ -112,20 +203,19 @@ class IndexHandling {
candidateIndexDirs.add(path.toString());
}
}
-
+
// analyze possible index folders
ArrayList indexDirs = new ArrayList<>();
for (String path : candidateIndexDirs) {
List validIndexPaths = containsValidIndexFolders(path);
for (String validPath : validIndexPaths) {
- indexDirs.add(validPath);
- // ELTODO indexDirs.add(convertPathToUNC(validPath));
+ indexDirs.add(convertPathToUNC(validPath));
// there can be multiple index folders (e.g. current version and "old" version) so keep looking
}
}
return indexDirs;
}
-
+
String convertPathToUNC(String indexDir) {
// ELTODO do we need to do this when searching for old index?
if (uncPathUtilities == null) {
@@ -146,7 +236,7 @@ class IndexHandling {
/**
* Returns a list of all contents in the folder of interest.
*
- * @param path Absolute path of the folder of interest
+ * @param path Absolute targetDirPath of the folder of interest
*
* @return List of all contents in the folder of interest
*/
@@ -156,12 +246,10 @@ class IndexHandling {
if (contents == null) {
// the directory file is not really a directory..
return Collections.emptyList();
- }
- else if (contents.length == 0) {
+ } else if (contents.length == 0) {
// Folder is empty
return Collections.emptyList();
- }
- else {
+ } else {
// Folder has contents
return new ArrayList<>(Arrays.asList(contents));
}
@@ -189,7 +277,7 @@ class IndexHandling {
// keep looking as there may be more index folders
continue;
}
-
+
// check if the folder matches "solrX_schema_Y" patern
if (matchesIndexFolderNameStandard(item.getName())) {
File nextLevelIndexFolder = Paths.get(item.getAbsolutePath(), INDEX_FOLDER_NAME).toFile();
@@ -202,24 +290,23 @@ class IndexHandling {
}
return indexFolders;
}
-
+
private static boolean isNonEmptyIndexFolder(File path) {
if (path.exists() && path.isDirectory() && path.getName().equals(INDEX_FOLDER_NAME) && path.listFiles().length > 0) {
return true;
}
return false;
}
-
- /**
+
+ /**
* Checks whether a name matches index folder name standard
*
* @param inputString The string to check.
*
* @return True or false.
*/
- public static boolean matchesIndexFolderNameStandard(String inputString) {
+ private static boolean matchesIndexFolderNameStandard(String inputString) {
Matcher m = INDEX_FOLDER_NAME_PATTERN.matcher(inputString);
return m.find();
- }
-
+ }
}
diff --git a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/IndexUpgrader.java b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/IndexUpgrader.java
new file mode 100644
index 0000000000..28327d9215
--- /dev/null
+++ b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/IndexUpgrader.java
@@ -0,0 +1,173 @@
+/*
+ * Autopsy Forensic Browser
+ *
+ * Copyright 2011-2016 Basis Technology Corp.
+ * Contact: carrier sleuthkit org
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.sleuthkit.autopsy.keywordsearch;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.logging.Level;
+import org.openide.modules.InstalledFileLocator;
+import org.sleuthkit.autopsy.corecomponentinterfaces.AutopsyService;
+import org.sleuthkit.autopsy.coreutils.ExecUtil;
+import org.sleuthkit.autopsy.coreutils.Logger;
+import org.sleuthkit.autopsy.coreutils.PlatformUtil;
+
+/**
+ * This class handles the task of upgrading old indexes to the latest supported
+ * Solr version.
+ */
+class IndexUpgrader {
+
+ private static final Logger logger = Logger.getLogger(IndexFinder.class.getName());
+ private final String JAVA_PATH;
+
+ IndexUpgrader() {
+ JAVA_PATH = PlatformUtil.getJavaPath();
+ }
+
+ void performIndexUpgrade(String newIndexDir, String tempResultsDir) throws AutopsyService.AutopsyServiceException {
+ // ELTODO Check for cancellation at whatever points are feasible
+
+ // Run the upgrade tools on the contents (core) in ModuleOutput/keywordsearch/data/solrX_schema_Y/index
+ File tmpDir = Paths.get(tempResultsDir, "IndexUpgrade").toFile(); //NON-NLS
+ tmpDir.mkdirs();
+
+ boolean success = true;
+ try {
+ // upgrade from Solr 4 to 5. If index is newer than Solr 4 then the upgrade script will throw exception right away.
+ upgradeSolrIndexVersion4to5(newIndexDir, tempResultsDir);
+ } catch (Exception ex) {
+ // catch-all firewall for exceptions thrown by the Solr 4 to 5 upgrade tool itself
+ logger.log(Level.SEVERE, "Exception while running Sorl 4 to Solr 5 upgrade tool " + newIndexDir, ex); //NON-NLS
+ success = false;
+ }
+
+ if (success) {
+ try {
+ // upgrade from Solr 5 to 6. This one must complete successfully in order to produce a valid Solr 6 index.
+ upgradeSolrIndexVersion5to6(newIndexDir, tempResultsDir);
+ } catch (Exception ex) {
+ // catch-all firewall for exceptions thrown by Solr 5 to 6 upgrade tool itself
+ logger.log(Level.SEVERE, "Exception while running Sorl 5 to Solr 6 upgrade tool " + newIndexDir, ex); //NON-NLS
+ success = false;
+ }
+ }
+
+ if (!success) {
+ // delete the new directories
+ new File(newIndexDir).delete();
+ throw new AutopsyService.AutopsyServiceException("Failed to upgrade existing keyword search index");
+ }
+ }
+
+ /**
+ * Upgrades Solr index from version 4 to 5.
+ *
+ * @param solr4IndexPath Full path to Solr v4 index directory
+ * @param tempResultsDir Path to directory where to store log output
+ *
+ * @return True is index upgraded successfully, false otherwise
+ */
+ private void upgradeSolrIndexVersion4to5(String solr4IndexPath, String tempResultsDir) throws AutopsyService.AutopsyServiceException, SecurityException, IOException {
+
+ String outputFileName = "output.txt";
+ logger.log(Level.INFO, "Upgrading KWS index {0} from Sorl 4 to Solr 5", solr4IndexPath); //NON-NLS
+
+ // find the index upgrade tool
+ final File upgradeToolFolder = InstalledFileLocator.getDefault().locate("Solr4to5IndexUpgrade", IndexFinder.class.getPackage().getName(), false); //NON-NLS
+ if (upgradeToolFolder == null) {
+ logger.log(Level.SEVERE, "Unable to locate Sorl 4 to Solr 5 upgrade tool"); //NON-NLS
+ throw new AutopsyService.AutopsyServiceException("Unable to locate Sorl 4 to Solr 5 upgrade tool");
+ }
+
+ // full path to index upgrade jar file
+ File upgradeJarPath = Paths.get(upgradeToolFolder.getAbsolutePath(), "Solr4IndexUpgrade.jar").toFile();
+ if (!upgradeJarPath.exists() || !upgradeJarPath.isFile()) {
+ logger.log(Level.SEVERE, "Unable to locate Sorl 4 to Solr 5 upgrade tool's JAR file at {0}", upgradeJarPath); //NON-NLS
+ throw new AutopsyService.AutopsyServiceException("Unable to locate Sorl 4 to Solr 5 upgrade tool's JAR file");
+ }
+
+ // create log output directory if it doesn't exist
+ new File(tempResultsDir).mkdirs();
+
+ final String outputFileFullPath = Paths.get(tempResultsDir, outputFileName).toString();
+ final String errFileFullPath = Paths.get(tempResultsDir, outputFileName + ".err").toString(); //NON-NLS
+ List commandLine = new ArrayList<>();
+ commandLine.add(JAVA_PATH);
+ commandLine.add("-jar");
+ commandLine.add(upgradeJarPath.getAbsolutePath());
+ commandLine.add(solr4IndexPath);
+ ProcessBuilder processBuilder = new ProcessBuilder(commandLine);
+ processBuilder.redirectOutput(new File(outputFileFullPath));
+ processBuilder.redirectError(new File(errFileFullPath));
+ ExecUtil.execute(processBuilder);
+
+ // alternatively can execute lucene upgrade command from the folder where lucene jars are located
+ // java -cp ".;lucene-core-5.5.1.jar;lucene-backward-codecs-5.5.1.jar;lucene-codecs-5.5.1.jar;lucene-analyzers-common-5.5.1.jar" org.apache.lucene.index.IndexUpgrader \path\to\index
+ }
+
+ /**
+ * Upgrades Solr index from version 5 to 6.
+ *
+ * @param solr5IndexPath Full path to Solr v5 index directory
+ * @param tempResultsDir Path to directory where to store log output
+ *
+ * @return True is index upgraded successfully, false otherwise
+ */
+ private void upgradeSolrIndexVersion5to6(String solr5IndexPath, String tempResultsDir) throws AutopsyService.AutopsyServiceException, SecurityException, IOException {
+
+ String outputFileName = "output.txt";
+ logger.log(Level.INFO, "Upgrading KWS index {0} from Sorl 5 to Solr 6", solr5IndexPath); //NON-NLS
+
+ // find the index upgrade tool
+ final File upgradeToolFolder = InstalledFileLocator.getDefault().locate("Solr5to6IndexUpgrade", IndexFinder.class.getPackage().getName(), false); //NON-NLS
+ if (upgradeToolFolder == null) {
+ logger.log(Level.SEVERE, "Unable to locate Sorl 5 to Solr 6 upgrade tool"); //NON-NLS
+ throw new AutopsyService.AutopsyServiceException("Unable to locate Sorl 5 to Solr 6 upgrade tool");
+ }
+
+ // full path to index upgrade jar file
+ File upgradeJarPath = Paths.get(upgradeToolFolder.getAbsolutePath(), "Solr5IndexUpgrade.jar").toFile();
+ if (!upgradeJarPath.exists() || !upgradeJarPath.isFile()) {
+ logger.log(Level.SEVERE, "Unable to locate Sorl 5 to Solr 6 upgrade tool's JAR file at {0}", upgradeJarPath); //NON-NLS
+ throw new AutopsyService.AutopsyServiceException("Unable to locate Sorl 5 to Solr 6 upgrade tool's JAR file");
+ }
+
+ // create log output directory if it doesn't exist
+ new File(tempResultsDir).mkdirs();
+
+ final String outputFileFullPath = Paths.get(tempResultsDir, outputFileName).toString();
+ final String errFileFullPath = Paths.get(tempResultsDir, outputFileName + ".err").toString(); //NON-NLS
+ List commandLine = new ArrayList<>();
+ commandLine.add(JAVA_PATH);
+ commandLine.add("-jar");
+ commandLine.add(upgradeJarPath.getAbsolutePath());
+ commandLine.add(solr5IndexPath);
+ ProcessBuilder processBuilder = new ProcessBuilder(commandLine);
+ processBuilder.redirectOutput(new File(outputFileFullPath));
+ processBuilder.redirectError(new File(errFileFullPath));
+ ExecUtil.execute(processBuilder);
+
+ // alternatively can execute lucene upgrade command from the folder where lucene jars are located
+ // java -cp ".;lucene-core-6.2.1.jar;lucene-backward-codecs-6.2.1.jar;lucene-codecs-6.2.1.jar;lucene-analyzers-common-6.2.1.jar" org.apache.lucene.index.IndexUpgrader \path\to\index
+ }
+
+}
diff --git a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/SearchRunner.java b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/SearchRunner.java
index d570157945..b4d7e4687c 100644
--- a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/SearchRunner.java
+++ b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/SearchRunner.java
@@ -21,9 +21,11 @@ package org.sleuthkit.autopsy.keywordsearch;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
+import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
+import java.util.Set;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.CancellationException;
@@ -262,12 +264,14 @@ public final class SearchRunner {
// mutable state:
private volatile boolean workerRunning;
private List keywordListNames; //guarded by SearchJobInfo.this
- private Map> currentResults; //guarded by SearchJobInfo.this
+
+ // Map of keyword to the object ids that contain a hit
+ private Map> currentResults; //guarded by SearchJobInfo.this
private SearchRunner.Searcher currentSearcher;
private AtomicLong moduleReferenceCount = new AtomicLong(0);
private final Object finalSearchLock = new Object(); //used for a condition wait
- public SearchJobInfo(long jobId, long dataSourceId, List keywordListNames) {
+ private SearchJobInfo(long jobId, long dataSourceId, List keywordListNames) {
this.jobId = jobId;
this.dataSourceId = dataSourceId;
this.keywordListNames = new ArrayList<>(keywordListNames);
@@ -276,53 +280,53 @@ public final class SearchRunner {
currentSearcher = null;
}
- public long getJobId() {
+ private long getJobId() {
return jobId;
}
- public long getDataSourceId() {
+ private long getDataSourceId() {
return dataSourceId;
}
- public synchronized List getKeywordListNames() {
+ private synchronized List getKeywordListNames() {
return new ArrayList<>(keywordListNames);
}
- public synchronized void addKeywordListName(String keywordListName) {
+ private synchronized void addKeywordListName(String keywordListName) {
if (!keywordListNames.contains(keywordListName)) {
keywordListNames.add(keywordListName);
}
}
- public synchronized List currentKeywordResults(Keyword k) {
+ private synchronized Set currentKeywordResults(Keyword k) {
return currentResults.get(k);
}
- public synchronized void addKeywordResults(Keyword k, List resultsIDs) {
+ private synchronized void addKeywordResults(Keyword k, Set resultsIDs) {
currentResults.put(k, resultsIDs);
}
- public boolean isWorkerRunning() {
+ private boolean isWorkerRunning() {
return workerRunning;
}
- public void setWorkerRunning(boolean flag) {
+ private void setWorkerRunning(boolean flag) {
workerRunning = flag;
}
- public synchronized SearchRunner.Searcher getCurrentSearcher() {
+ private synchronized SearchRunner.Searcher getCurrentSearcher() {
return currentSearcher;
}
- public synchronized void setCurrentSearcher(SearchRunner.Searcher searchRunner) {
+ private synchronized void setCurrentSearcher(SearchRunner.Searcher searchRunner) {
currentSearcher = searchRunner;
}
- public void incrementModuleReferenceCount() {
+ private void incrementModuleReferenceCount() {
moduleReferenceCount.incrementAndGet();
}
- public long decrementModuleReferenceCount() {
+ private long decrementModuleReferenceCount() {
return moduleReferenceCount.decrementAndGet();
}
@@ -331,7 +335,7 @@ public final class SearchRunner {
*
* @throws InterruptedException
*/
- public void waitForCurrentWorker() throws InterruptedException {
+ private void waitForCurrentWorker() throws InterruptedException {
synchronized (finalSearchLock) {
while (workerRunning) {
finalSearchLock.wait(); //wait() releases the lock
@@ -342,7 +346,7 @@ public final class SearchRunner {
/**
* Unset workerRunning and wake up thread(s) waiting on finalSearchLock
*/
- public void searchNotify() {
+ private void searchNotify() {
synchronized (finalSearchLock) {
workerRunning = false;
finalSearchLock.notify();
@@ -468,8 +472,8 @@ public final class SearchRunner {
return null;
}
- // calculate new results by substracting results already obtained in this ingest
- // this creates a map of each keyword to the list of unique files that have that hit.
+ // Reduce the results of the query to only those hits we
+ // have not already seen.
QueryResults newResults = filterResults(queryResults);
if (!newResults.getKeywords().isEmpty()) {
@@ -567,40 +571,68 @@ public final class SearchRunner {
});
}
- //calculate new results but substracting results already obtained in this ingest
- //update currentResults map with the new results
+ /**
+ * This method filters out all of the hits found in earlier
+ * periodic searches and returns only the results found by the most
+ * recent search.
+ *
+ * This method will only return hits for objects for which we haven't
+ * previously seen a hit for the keyword.
+ *
+ * @param queryResult The results returned by a keyword search.
+ * @return The set of hits found by the most recent search for objects
+ * that have not previously had a hit.
+ *
+ */
private QueryResults filterResults(QueryResults queryResult) {
+ // Create a new (empty) QueryResults object to hold the most recently
+ // found hits.
QueryResults newResults = new QueryResults(queryResult.getQuery(), queryResult.getKeywordList());
+ // For each keyword represented in the results.
for (Keyword keyword : queryResult.getKeywords()) {
+ // These are all of the hits across all objects for the most recent search.
+ // This may well include duplicates of hits we've seen in earlier periodic searches.
List queryTermResults = queryResult.getResults(keyword);
- //translate to list of IDs that we keep track of
- List queryTermResultsIDs = new ArrayList<>();
- for (KeywordHit ch : queryTermResults) {
- queryTermResultsIDs.add(ch.getSolrObjectId());
+ // This will be used to build up the hits we haven't seen before
+ // for this keyword.
+ List newUniqueHits = new ArrayList<>();
+
+ // Get the set of object ids seen in the past by this searcher
+ // for the given keyword.
+ Set curTermResults = job.currentKeywordResults(keyword);
+ if (curTermResults == null) {
+ // We create a new empty set if we haven't seen results for
+ // this keyword before.
+ curTermResults = new HashSet<>();
}
- List curTermResults = job.currentKeywordResults(keyword);
- if (curTermResults == null) {
- job.addKeywordResults(keyword, queryTermResultsIDs);
- newResults.addResult(keyword, queryTermResults);
- } else {
- //some AbstractFile hits already exist for this keyword
- for (KeywordHit res : queryTermResults) {
- if (!curTermResults.contains(res.getSolrObjectId())) {
- //add to new results
- List newResultsFs = newResults.getResults(keyword);
- if (newResultsFs == null) {
- newResultsFs = new ArrayList<>();
- newResults.addResult(keyword, newResultsFs);
- }
- newResultsFs.add(res);
- curTermResults.add(res.getSolrObjectId());
- }
+ // For each hit for this keyword.
+ for (KeywordHit hit : queryTermResults) {
+ if (curTermResults.contains(hit.getSolrObjectId())) {
+ // Skip the hit if we've already seen a hit for
+ // this keyword in the object.
+ continue;
}
+
+ // We haven't seen the hit before so add it to list of new
+ // unique hits.
+ newUniqueHits.add(hit);
+
+ // Add the object id to the results we've seen for this
+ // keyword.
+ curTermResults.add(hit.getSolrObjectId());
}
+
+ // Update the job with the list of objects for which we have
+ // seen hits for the current keyword.
+ job.addKeywordResults(keyword, curTermResults);
+
+ // Add the new hits for the current keyword into the results
+ // to be returned.
+ newResults.addResult(keyword, newUniqueHits);
}
return newResults;
diff --git a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/SolrSearchService.java b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/SolrSearchService.java
index 7e2c40f1c4..d2f4df78d0 100644
--- a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/SolrSearchService.java
+++ b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/SolrSearchService.java
@@ -18,18 +18,21 @@
*/
package org.sleuthkit.autopsy.keywordsearch;
+import java.io.File;
import java.io.IOException;
import java.net.InetAddress;
+import java.nio.file.Paths;
import java.util.List;
import java.util.MissingResourceException;
+import java.util.logging.Level;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.openide.util.NbBundle;
import org.openide.util.lookup.ServiceProvider;
import org.openide.util.lookup.ServiceProviders;
-import org.sleuthkit.autopsy.casemodule.Case;
import org.sleuthkit.autopsy.core.RuntimeProperties;
import org.sleuthkit.autopsy.corecomponentinterfaces.AutopsyService;
+import org.sleuthkit.autopsy.coreutils.Logger;
import org.sleuthkit.autopsy.keywordsearchservice.KeywordSearchService;
import org.sleuthkit.autopsy.keywordsearchservice.KeywordSearchServiceException;
import org.sleuthkit.datamodel.BlackboardArtifact;
@@ -45,6 +48,7 @@ import org.sleuthkit.datamodel.TskCoreException;
)
public class SolrSearchService implements KeywordSearchService, AutopsyService {
+ private static final Logger logger = Logger.getLogger(IndexFinder.class.getName());
private static final String BAD_IP_ADDRESS_FORMAT = "ioexception occurred when talking to server"; //NON-NLS
private static final String SERVER_REFUSED_CONNECTION = "server refused connection"; //NON-NLS
private static final int IS_REACHABLE_TIMEOUT_MS = 1000;
@@ -144,7 +148,7 @@ public class SolrSearchService implements KeywordSearchService, AutopsyService
* @param context
*
* @throws
- * org.sleuthkit.autopsy.corecomponentinterfaces.AutopsyServiceProvider.AutopsyServiceProviderException
+ * org.sleuthkit.autopsy.corecomponentinterfaces.AutopsyService.AutopsyServiceException
*/
@Override
public void openCaseResources(CaseContext context) throws AutopsyServiceException {
@@ -153,10 +157,11 @@ public class SolrSearchService implements KeywordSearchService, AutopsyService
*/
// do a case subdirectory search to check for the existence and upgrade status of KWS indexes
- List indexDirs = IndexHandling.findAllIndexDirs(Case.getCurrentCase());
+ IndexFinder indexFinder = new IndexFinder();
+ List indexDirs = indexFinder.findAllIndexDirs(context.getCase());
// check if index needs upgrade
- String currentVersionIndexDir = IndexHandling.findLatestVersionIndexDir(indexDirs);
+ String currentVersionIndexDir = IndexFinder.findLatestVersionIndexDir(indexDirs);
if (currentVersionIndexDir.isEmpty()) {
// ELTODO not sure what to do when there are multiple old indexes. grab the first one?
@@ -165,37 +170,31 @@ public class SolrSearchService implements KeywordSearchService, AutopsyService
if (RuntimeProperties.coreComponentsAreActive()) {
//pop up a message box to indicate the restrictions on adding additional
//text and performing regex searches and give the user the option to decline the upgrade
- boolean upgradeDeclined = false;
- if (upgradeDeclined) {
- throw new AutopsyServiceException("ELTODO");
+ if (!KeywordSearchUtil.displayConfirmDialog(NbBundle.getMessage(this.getClass(), "SolrSearchService.IndexUpgradeDialog.title"),
+ NbBundle.getMessage(this.getClass(), "SolrSearchService.IndexUpgradeDialog.msg"),
+ KeywordSearchUtil.DIALOG_MESSAGE_TYPE.WARN)) {
+ // upgrade declined - throw exception
+ throw new AutopsyServiceException("Index upgrade was declined by user");
}
}
// ELTODO Check for cancellation at whatever points are feasible
- // Copy the contents (core) of ModuleOutput/keywordsearch/data/index into ModuleOutput/keywordsearch/data/solr6_schema_2.0/index
+ // Copy the "old" index and config set into ModuleOutput/keywordsearch/data/solrX_schema_Y/
+ String newIndexDir = indexFinder.copyIndexAndConfigSet(context.getCase(), oldIndexDir);
- // Make a “reference copy” of the configset and place it in ModuleOutput/keywordsearch/data/solr6_schema_2.0/configset
-
- // convert path to UNC path
-
- // Run the upgrade tools on the contents (core) in ModuleOutput/keywordsearch/data/solr6_schema_2.0/index
-
- // Open the upgraded index
-
- // execute a test query
-
- boolean success = true;
+ // upgrade the "old" index to the latest supported Solr version
+ IndexUpgrader indexUpgrader = new IndexUpgrader();
+ indexUpgrader.performIndexUpgrade(newIndexDir, context.getCase().getTempDirectory());
- if (!success) {
- // delete the new directories
-
- // close the upgraded index?
- throw new AutopsyServiceException("ELTODO");
- }
-
- // currentVersionIndexDir = upgraded index dir
+ // set the upgraded reference index as the index to be used for this case
+ currentVersionIndexDir = newIndexDir;
}
+
+ // open currentVersionIndexDir index
+
+ // execute a test query
+ // if failed, close the upgraded index?
}
/**
@@ -203,7 +202,7 @@ public class SolrSearchService implements KeywordSearchService, AutopsyService
* @param context
*
* @throws
- * org.sleuthkit.autopsy.corecomponentinterfaces.AutopsyServiceProvider.AutopsyServiceProviderException
+ * org.sleuthkit.autopsy.corecomponentinterfaces.AutopsyService.AutopsyServiceException
*/
@Override
public void closeCaseResources(CaseContext context) throws AutopsyServiceException {
diff --git a/NEWS.txt b/NEWS.txt
index 7a0c8cb200..daff4c1a7a 100644
--- a/NEWS.txt
+++ b/NEWS.txt
@@ -1,23 +1,17 @@
---------------- VERSION 4.3.0 --------------
Improvements:
-- Creation and analysis (e.g., keyword search) of virtual files for slack
-space.
-- A preloader in an Android device image does not prevent adding the image as
-a data source (reading of secondary GPT tables supported).
-- User can add data sources with no file systems or unsupported file systems
-as "unallocated space image files" for carving, keyword search, etc.
-- File extension mismatch analysis can be configured to check all file types,
-all file types except text files, or only multimedia and executable files.
-- Column order changes in table views are "sticky" for each type of tree view
-item.
-- Tree view has new file types by MIME type sub tree.
-- User can bulk add list of keywords to a keyword list.
+- Support for slack space on files (as separate virtual files) to enable keyword searching and other analysis.
+- Simple mode for the file extension mismatch module that focuses on only only multimedia and executable files to reduce false positives.
+- New view in tree that shows the MIME types.
- Tagged items are highlighted in table views.
-- Toolbar button for Image/Video Gallery
-- New "Experimental" module (activate via Tools, Plugins) with auto ingest
-feature.
+- Ordering of columns is saved when user changes them.
+- Support for Android devices with preloaders (uses backup GPT)
+- Support for images with no file systems (all data is added as unallocated space)
+- User can bulk add list of keywords to a keyword list.
+- New "Experimental" module (activate via Tools, Plugins) with auto ingest feature.
- Assorted bug fixes and minor enhancements.
+
---------------- VERSION 4.2.0 --------------
Improvements:
- Credit card account search.