mirror of
https://github.com/overcuriousity/autopsy-flatpak.git
synced 2025-07-06 21:00:22 +00:00
Fixed build issue
This commit is contained in:
parent
40861c22ab
commit
760f8e027e
@ -19,7 +19,7 @@
|
||||
|
||||
<dependency conf="solr-libs->default" name="solr-cell" rev="8.11.2" org="org.apache.solr"/>
|
||||
<!-- https://mvnrepository.com/artifact/org.apache.lucene/lucene-core -->
|
||||
<!-- <dependency org="org.apache.lucene" name="lucene-core" rev="8.11.2"/> -->
|
||||
<dependency conf="autopsy->default" org="org.apache.lucene" name="lucene-core" rev="8.11.2"/>
|
||||
<!-- Autopsy -->
|
||||
<dependency conf="autopsy->default" org="org.apache.solr" name="solr-solrj" rev="8.11.2"/>
|
||||
<dependency conf="autopsy->default" org="com.optimaize.languagedetector" name="language-detector" rev="0.6"/>
|
||||
|
@ -15,13 +15,16 @@ ExtractAllTermsReport.error.noOpenCase=No currently open case.
|
||||
ExtractAllTermsReport.export.error=Error During Unique Word Extraction
|
||||
ExtractAllTermsReport.exportComplete=Unique Word Extraction Complete
|
||||
ExtractAllTermsReport.getName.text=Extract Unique Words
|
||||
# {0} - Number of extracted terms
|
||||
ExtractAllTermsReport.numberExtractedTerms=Extracted {0} terms...
|
||||
ExtractAllTermsReport.search.ingestInProgressBody=<html>Keyword Search Ingest is currently running.<br />Not all files have been indexed and unique word extraction might yield incomplete results.<br />Do you want to proceed with unique word extraction anyway?</html>
|
||||
# {0} - Keyword search commit frequency
|
||||
ExtractAllTermsReport.search.noFilesInIdxMsg=No files are in index yet. Try again later. Index is updated every {0} minutes.
|
||||
ExtractAllTermsReport.search.noFilesInIdxMsg2=No files are in index yet. Try again later
|
||||
ExtractAllTermsReport.search.searchIngestInProgressTitle=Keyword Search Ingest in Progress
|
||||
ExtractAllTermsReport.startExport=Starting Unique Word Extraction
|
||||
ExtractedContentPanel.setMarkup.panelTxt=<span style='font-style:italic'>Loading text... Please wait</span>
|
||||
# {0} - Content name
|
||||
ExtractedContentPanel.SetMarkup.progress.loading=Loading text for {0}
|
||||
GlobalEditListPanel.editKeyword.title=Edit Keyword
|
||||
GlobalEditListPanel.warning.text=Boundary characters ^ and $ do not match word boundaries. Consider\nreplacing with an explicit list of boundary characters, such as [ \\.,]
|
||||
@ -225,6 +228,7 @@ KeywordSearchSettings.propertiesNSRL.text={0}_NSRL
|
||||
KeywordSearchSettings.propertiesScripts.text={0}_Scripts
|
||||
NoOpenCoreException.err.noOpenSorlCore.msg=No currently open Solr core.
|
||||
SearchRunner.query.exception.msg=Error performing query:
|
||||
# {0} - colelction name
|
||||
Server.deleteCore.exception.msg=Failed to delete Solr colelction {0}
|
||||
Server.exceptionMessage.unableToBackupCollection=Unable to backup Solr collection
|
||||
Server.exceptionMessage.unableToCreateCollection=Unable to create Solr collection
|
||||
|
@ -36,8 +36,6 @@ import org.sleuthkit.autopsy.healthmonitor.HealthMonitor;
|
||||
import org.sleuthkit.autopsy.healthmonitor.TimingMetric;
|
||||
import org.sleuthkit.autopsy.ingest.IngestJobContext;
|
||||
import org.sleuthkit.autopsy.keywordsearch.Chunker.Chunk;
|
||||
import org.sleuthkit.autopsy.textextractors.TextExtractor;
|
||||
import org.sleuthkit.autopsy.textextractors.TextExtractorFactory;
|
||||
import org.sleuthkit.datamodel.AbstractFile;
|
||||
import org.sleuthkit.datamodel.BlackboardArtifact;
|
||||
import org.sleuthkit.datamodel.Content;
|
||||
@ -212,7 +210,7 @@ class Ingester {
|
||||
//Get a reader for the content of the given source
|
||||
try (BufferedReader reader = new BufferedReader(sourceReader)) {
|
||||
Chunker chunker = new Chunker(reader);
|
||||
searcher.searchChunk(sourceName, sourceID);
|
||||
searcher.searchString(sourceName, sourceID);
|
||||
|
||||
while (chunker.hasNext()) {
|
||||
if (context != null && context.fileIngestIsCancelled()) {
|
||||
|
@ -33,11 +33,6 @@ import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
import org.apache.lucene.queryparser.classic.ParseException;
|
||||
import org.apache.lucene.queryparser.classic.QueryParser;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.openide.util.Exceptions;
|
||||
import org.sleuthkit.autopsy.casemodule.Case;
|
||||
import org.sleuthkit.autopsy.casemodule.NoCurrentCaseException;
|
||||
import org.sleuthkit.autopsy.coreutils.Logger;
|
||||
|
Loading…
x
Reference in New Issue
Block a user