From acb5d34aea8aa7a27d0a75366f527dacf8e4cefd Mon Sep 17 00:00:00 2001 From: Eugene Livis Date: Tue, 13 Apr 2021 15:59:23 -0400 Subject: [PATCH 1/4] No longer attemting to index new data after 5 failed consecutive attempts --- .../autopsy/keywordsearch/Server.java | 40 ++++++++++++++++++- 1 file changed, 38 insertions(+), 2 deletions(-) diff --git a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Server.java b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Server.java index 36ad8c6a29..0f9effc7ca 100644 --- a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Server.java +++ b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Server.java @@ -1,7 +1,7 @@ /* * Autopsy Forensic Browser * - * Copyright 2011-2020 Basis Technology Corp. + * Copyright 2011-2021 Basis Technology Corp. * Contact: carrier sleuthkit org * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -95,7 +95,6 @@ import org.sleuthkit.autopsy.coreutils.ThreadUtils; import org.sleuthkit.autopsy.healthmonitor.HealthMonitor; import org.sleuthkit.autopsy.healthmonitor.TimingMetric; import org.sleuthkit.autopsy.keywordsearchservice.KeywordSearchServiceException; -import org.sleuthkit.autopsy.report.GeneralReportSettings; import org.sleuthkit.autopsy.report.ReportProgressPanel; import org.sleuthkit.datamodel.Content; @@ -2030,6 +2029,13 @@ public class Server { private final List buffer; private final Object bufferLock; + /* (JIRA-7521) Sometimes we get into a situation where Solr server is no longer able to index new data. + * Typically main reason for this is Solr unning out of memory. In this case we will stop trying to send new + * data to Solr (for this collection) after certain number of consecutive batches have failed. */ + private static final int MAX_NUM_CONSECUTIVE_FAILURES = 5; + private int numConsecutiveFailures = 0; + private boolean skipIndexing = false; + private final ScheduledThreadPoolExecutor periodicTasksExecutor; private static final long PERIODIC_BATCH_SEND_INTERVAL_MINUTES = 10; private static final int NUM_BATCH_UPDATE_RETRIES = 10; @@ -2076,6 +2082,11 @@ public class Server { @Override public void run() { + + if (skipIndexing) { + return; + } + List clone; synchronized (bufferLock) { @@ -2242,6 +2253,10 @@ public class Server { * @throws KeywordSearchModuleException */ void addDocument(SolrInputDocument doc) throws KeywordSearchModuleException { + + if (skipIndexing) { + return; + } List clone; synchronized (bufferLock) { @@ -2268,6 +2283,10 @@ public class Server { * * @throws KeywordSearchModuleException */ + @NbBundle.Messages({ + "Collection.unableToIndexData.error=Unable to add data to text index. All future text indexing for the current case will be skipped.", + + }) private void sendBufferedDocs(List docBuffer) throws KeywordSearchModuleException { if (docBuffer.isEmpty()) { @@ -2293,6 +2312,7 @@ public class Server { } } if (success) { + numConsecutiveFailures = 0; if (reTryAttempt > 0) { logger.log(Level.INFO, "Batch update suceeded after {0} re-try", reTryAttempt); //NON-NLS } @@ -2304,10 +2324,26 @@ public class Server { throw new KeywordSearchModuleException(NbBundle.getMessage(this.getClass(), "Server.addDocBatch.exception.msg")); //NON-NLS } catch (Exception ex) { // Solr throws a lot of unexpected exception types + numConsecutiveFailures++; logger.log(Level.SEVERE, "Could not add batched documents to index", ex); //NON-NLS + + // display message to user that that a document batch is missing from the index + MessageNotifyUtil.Notify.error( + NbBundle.getMessage(this.getClass(), "Server.addDocBatch.exception.msg"), + NbBundle.getMessage(this.getClass(), "Server.addDocBatch.exception.msg")); throw new KeywordSearchModuleException( NbBundle.getMessage(this.getClass(), "Server.addDocBatch.exception.msg"), ex); //NON-NLS } finally { + if (numConsecutiveFailures > MAX_NUM_CONSECUTIVE_FAILURES) { + // skip all future indexing + skipIndexing = true; + + // display message to user that no more data will be added to the index + MessageNotifyUtil.Notify.error( + NbBundle.getMessage(this.getClass(), "Server.addDocBatch.exception.msg"), + Bundle.Collection_unableToIndexData_error()); + MessageNotifyUtil.Message.error(Bundle.Collection_unableToIndexData_error()); + } docBuffer.clear(); } } From 23c4f7a525c4d2048e02e843b4b3291059f3b2cc Mon Sep 17 00:00:00 2001 From: Eugene Livis Date: Tue, 13 Apr 2021 16:03:01 -0400 Subject: [PATCH 2/4] Minor --- .../src/org/sleuthkit/autopsy/keywordsearch/Server.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Server.java b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Server.java index 0f9effc7ca..ac61dfdcf1 100644 --- a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Server.java +++ b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Server.java @@ -2030,7 +2030,7 @@ public class Server { private final Object bufferLock; /* (JIRA-7521) Sometimes we get into a situation where Solr server is no longer able to index new data. - * Typically main reason for this is Solr unning out of memory. In this case we will stop trying to send new + * Typically main reason for this is Solr running out of memory. In this case we will stop trying to send new * data to Solr (for this collection) after certain number of consecutive batches have failed. */ private static final int MAX_NUM_CONSECUTIVE_FAILURES = 5; private int numConsecutiveFailures = 0; @@ -2334,7 +2334,7 @@ public class Server { throw new KeywordSearchModuleException( NbBundle.getMessage(this.getClass(), "Server.addDocBatch.exception.msg"), ex); //NON-NLS } finally { - if (numConsecutiveFailures > MAX_NUM_CONSECUTIVE_FAILURES) { + if (numConsecutiveFailures >= MAX_NUM_CONSECUTIVE_FAILURES) { // skip all future indexing skipIndexing = true; From ed2d995650a45d6754f1c7be737715b98ed06912 Mon Sep 17 00:00:00 2001 From: Eugene Livis Date: Wed, 14 Apr 2021 11:01:08 -0400 Subject: [PATCH 3/4] Not showing the error message in headless mode --- .../org/sleuthkit/autopsy/keywordsearch/Server.java | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Server.java b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Server.java index ac61dfdcf1..2b0303f696 100644 --- a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Server.java +++ b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Server.java @@ -85,6 +85,7 @@ import org.sleuthkit.autopsy.casemodule.Case; import org.sleuthkit.autopsy.casemodule.Case.CaseType; import org.sleuthkit.autopsy.casemodule.CaseMetadata; import org.sleuthkit.autopsy.casemodule.NoCurrentCaseException; +import org.sleuthkit.autopsy.core.RuntimeProperties; import org.sleuthkit.autopsy.core.UserPreferences; import org.sleuthkit.autopsy.coreutils.FileUtil; import org.sleuthkit.autopsy.coreutils.Logger; @@ -2312,11 +2313,12 @@ public class Server { } } if (success) { - numConsecutiveFailures = 0; + throw new KeywordSearchModuleException(NbBundle.getMessage(this.getClass(), "Server.addDocBatch.exception.msg")); //NON-NLS + /*numConsecutiveFailures = 0; if (reTryAttempt > 0) { logger.log(Level.INFO, "Batch update suceeded after {0} re-try", reTryAttempt); //NON-NLS } - return; + return;*/ } } // if we are here, it means all re-try attempts failed @@ -2337,12 +2339,15 @@ public class Server { if (numConsecutiveFailures >= MAX_NUM_CONSECUTIVE_FAILURES) { // skip all future indexing skipIndexing = true; + logger.log(Level.SEVERE, "Unable to add data to text index. All future text indexing for the current case will be skipped!"); //NON-NLS // display message to user that no more data will be added to the index MessageNotifyUtil.Notify.error( NbBundle.getMessage(this.getClass(), "Server.addDocBatch.exception.msg"), Bundle.Collection_unableToIndexData_error()); - MessageNotifyUtil.Message.error(Bundle.Collection_unableToIndexData_error()); + if (RuntimeProperties.runningWithGUI()) { + MessageNotifyUtil.Message.error(Bundle.Collection_unableToIndexData_error()); + } } docBuffer.clear(); } From 77c1db48062154a1f5810fbc879114afe5e5a255 Mon Sep 17 00:00:00 2001 From: Eugene Livis Date: Thu, 15 Apr 2021 12:42:50 -0400 Subject: [PATCH 4/4] Fixes --- .../autopsy/keywordsearch/Server.java | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Server.java b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Server.java index 2b0303f696..bc63f4dddf 100644 --- a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Server.java +++ b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Server.java @@ -53,6 +53,8 @@ import java.util.logging.Level; import javax.swing.AbstractAction; import org.apache.commons.io.FileUtils; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import static java.util.stream.Collectors.toList; import org.apache.solr.client.solrj.SolrQuery; @@ -2034,8 +2036,8 @@ public class Server { * Typically main reason for this is Solr running out of memory. In this case we will stop trying to send new * data to Solr (for this collection) after certain number of consecutive batches have failed. */ private static final int MAX_NUM_CONSECUTIVE_FAILURES = 5; - private int numConsecutiveFailures = 0; - private boolean skipIndexing = false; + private AtomicInteger numConsecutiveFailures = new AtomicInteger(0); + private AtomicBoolean skipIndexing = new AtomicBoolean(false); private final ScheduledThreadPoolExecutor periodicTasksExecutor; private static final long PERIODIC_BATCH_SEND_INTERVAL_MINUTES = 10; @@ -2084,7 +2086,7 @@ public class Server { @Override public void run() { - if (skipIndexing) { + if (skipIndexing.get()) { return; } @@ -2255,7 +2257,7 @@ public class Server { */ void addDocument(SolrInputDocument doc) throws KeywordSearchModuleException { - if (skipIndexing) { + if (skipIndexing.get()) { return; } @@ -2313,12 +2315,11 @@ public class Server { } } if (success) { - throw new KeywordSearchModuleException(NbBundle.getMessage(this.getClass(), "Server.addDocBatch.exception.msg")); //NON-NLS - /*numConsecutiveFailures = 0; + numConsecutiveFailures.set(0); if (reTryAttempt > 0) { logger.log(Level.INFO, "Batch update suceeded after {0} re-try", reTryAttempt); //NON-NLS } - return;*/ + return; } } // if we are here, it means all re-try attempts failed @@ -2326,7 +2327,7 @@ public class Server { throw new KeywordSearchModuleException(NbBundle.getMessage(this.getClass(), "Server.addDocBatch.exception.msg")); //NON-NLS } catch (Exception ex) { // Solr throws a lot of unexpected exception types - numConsecutiveFailures++; + numConsecutiveFailures.incrementAndGet(); logger.log(Level.SEVERE, "Could not add batched documents to index", ex); //NON-NLS // display message to user that that a document batch is missing from the index @@ -2336,9 +2337,9 @@ public class Server { throw new KeywordSearchModuleException( NbBundle.getMessage(this.getClass(), "Server.addDocBatch.exception.msg"), ex); //NON-NLS } finally { - if (numConsecutiveFailures >= MAX_NUM_CONSECUTIVE_FAILURES) { + if (numConsecutiveFailures.get() >= MAX_NUM_CONSECUTIVE_FAILURES) { // skip all future indexing - skipIndexing = true; + skipIndexing.set(true); logger.log(Level.SEVERE, "Unable to add data to text index. All future text indexing for the current case will be skipped!"); //NON-NLS // display message to user that no more data will be added to the index