diff --git a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Server.java b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Server.java index 36ad8c6a29..0f9effc7ca 100644 --- a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Server.java +++ b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Server.java @@ -1,7 +1,7 @@ /* * Autopsy Forensic Browser * - * Copyright 2011-2020 Basis Technology Corp. + * Copyright 2011-2021 Basis Technology Corp. * Contact: carrier sleuthkit org * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -95,7 +95,6 @@ import org.sleuthkit.autopsy.coreutils.ThreadUtils; import org.sleuthkit.autopsy.healthmonitor.HealthMonitor; import org.sleuthkit.autopsy.healthmonitor.TimingMetric; import org.sleuthkit.autopsy.keywordsearchservice.KeywordSearchServiceException; -import org.sleuthkit.autopsy.report.GeneralReportSettings; import org.sleuthkit.autopsy.report.ReportProgressPanel; import org.sleuthkit.datamodel.Content; @@ -2030,6 +2029,13 @@ public class Server { private final List buffer; private final Object bufferLock; + /* (JIRA-7521) Sometimes we get into a situation where Solr server is no longer able to index new data. + * Typically main reason for this is Solr unning out of memory. In this case we will stop trying to send new + * data to Solr (for this collection) after certain number of consecutive batches have failed. */ + private static final int MAX_NUM_CONSECUTIVE_FAILURES = 5; + private int numConsecutiveFailures = 0; + private boolean skipIndexing = false; + private final ScheduledThreadPoolExecutor periodicTasksExecutor; private static final long PERIODIC_BATCH_SEND_INTERVAL_MINUTES = 10; private static final int NUM_BATCH_UPDATE_RETRIES = 10; @@ -2076,6 +2082,11 @@ public class Server { @Override public void run() { + + if (skipIndexing) { + return; + } + List clone; synchronized (bufferLock) { @@ -2242,6 +2253,10 @@ public class Server { * @throws KeywordSearchModuleException */ void addDocument(SolrInputDocument doc) throws KeywordSearchModuleException { + + if (skipIndexing) { + return; + } List clone; synchronized (bufferLock) { @@ -2268,6 +2283,10 @@ public class Server { * * @throws KeywordSearchModuleException */ + @NbBundle.Messages({ + "Collection.unableToIndexData.error=Unable to add data to text index. All future text indexing for the current case will be skipped.", + + }) private void sendBufferedDocs(List docBuffer) throws KeywordSearchModuleException { if (docBuffer.isEmpty()) { @@ -2293,6 +2312,7 @@ public class Server { } } if (success) { + numConsecutiveFailures = 0; if (reTryAttempt > 0) { logger.log(Level.INFO, "Batch update suceeded after {0} re-try", reTryAttempt); //NON-NLS } @@ -2304,10 +2324,26 @@ public class Server { throw new KeywordSearchModuleException(NbBundle.getMessage(this.getClass(), "Server.addDocBatch.exception.msg")); //NON-NLS } catch (Exception ex) { // Solr throws a lot of unexpected exception types + numConsecutiveFailures++; logger.log(Level.SEVERE, "Could not add batched documents to index", ex); //NON-NLS + + // display message to user that that a document batch is missing from the index + MessageNotifyUtil.Notify.error( + NbBundle.getMessage(this.getClass(), "Server.addDocBatch.exception.msg"), + NbBundle.getMessage(this.getClass(), "Server.addDocBatch.exception.msg")); throw new KeywordSearchModuleException( NbBundle.getMessage(this.getClass(), "Server.addDocBatch.exception.msg"), ex); //NON-NLS } finally { + if (numConsecutiveFailures > MAX_NUM_CONSECUTIVE_FAILURES) { + // skip all future indexing + skipIndexing = true; + + // display message to user that no more data will be added to the index + MessageNotifyUtil.Notify.error( + NbBundle.getMessage(this.getClass(), "Server.addDocBatch.exception.msg"), + Bundle.Collection_unableToIndexData_error()); + MessageNotifyUtil.Message.error(Bundle.Collection_unableToIndexData_error()); + } docBuffer.clear(); } }