From 23c4f7a525c4d2048e02e843b4b3291059f3b2cc Mon Sep 17 00:00:00 2001 From: Eugene Livis Date: Tue, 13 Apr 2021 16:03:01 -0400 Subject: [PATCH] Minor --- .../src/org/sleuthkit/autopsy/keywordsearch/Server.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Server.java b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Server.java index 0f9effc7ca..ac61dfdcf1 100644 --- a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Server.java +++ b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Server.java @@ -2030,7 +2030,7 @@ public class Server { private final Object bufferLock; /* (JIRA-7521) Sometimes we get into a situation where Solr server is no longer able to index new data. - * Typically main reason for this is Solr unning out of memory. In this case we will stop trying to send new + * Typically main reason for this is Solr running out of memory. In this case we will stop trying to send new * data to Solr (for this collection) after certain number of consecutive batches have failed. */ private static final int MAX_NUM_CONSECUTIVE_FAILURES = 5; private int numConsecutiveFailures = 0; @@ -2334,7 +2334,7 @@ public class Server { throw new KeywordSearchModuleException( NbBundle.getMessage(this.getClass(), "Server.addDocBatch.exception.msg"), ex); //NON-NLS } finally { - if (numConsecutiveFailures > MAX_NUM_CONSECUTIVE_FAILURES) { + if (numConsecutiveFailures >= MAX_NUM_CONSECUTIVE_FAILURES) { // skip all future indexing skipIndexing = true;