mirror of
https://github.com/overcuriousity/autopsy-flatpak.git
synced 2025-07-15 01:07:42 +00:00
remove TextMarkupLookup interface
This commit is contained in:
parent
87a0e8ad69
commit
edd03a66c1
@ -21,6 +21,7 @@ package org.sleuthkit.autopsy.corecomponents;
|
||||
import org.openide.nodes.FilterNode;
|
||||
import org.openide.nodes.Node;
|
||||
import org.openide.util.NbBundle;
|
||||
import org.openide.util.lookup.Lookups;
|
||||
|
||||
/**
|
||||
* A filter node that creates at most one layer of child nodes for the node it
|
||||
@ -44,7 +45,7 @@ public class TableFilterNode extends FilterNode {
|
||||
* The constructor should include column order key. (See getColumnOrderKey)
|
||||
*/
|
||||
public TableFilterNode(Node wrappedNode, boolean createChildren) {
|
||||
super(wrappedNode, TableFilterChildren.createInstance(wrappedNode, createChildren));
|
||||
super(wrappedNode, TableFilterChildren.createInstance(wrappedNode, createChildren) , Lookups.proxy(wrappedNode));
|
||||
this.createChildren = createChildren;
|
||||
}
|
||||
|
||||
|
@ -454,12 +454,12 @@ public class BlackboardArtifactNode extends DisplayableItemNode {
|
||||
forLookup.add(content);
|
||||
}
|
||||
|
||||
// if there is a text highlighted version, of the content, add it too
|
||||
// currently happens from keyword search module
|
||||
TextMarkupLookup highlight = getHighlightLookup(artifact, content);
|
||||
if (highlight != null) {
|
||||
forLookup.add(highlight);
|
||||
}
|
||||
// // if there is a text highlighted version, of the content, add it too
|
||||
// // currently happens from keyword search module
|
||||
// TextMarkupLookup highlight = getHighlightLookup(artifact, content);
|
||||
// if (highlight != null) {
|
||||
// forLookup.add(highlight);
|
||||
// }
|
||||
|
||||
return Lookups.fixed(forLookup.toArray(new Object[forLookup.size()]));
|
||||
}
|
||||
@ -474,35 +474,35 @@ public class BlackboardArtifactNode extends DisplayableItemNode {
|
||||
NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.getAssocCont.exception.msg"));
|
||||
}
|
||||
|
||||
private static TextMarkupLookup getHighlightLookup(BlackboardArtifact artifact, Content content) {
|
||||
if (artifact.getArtifactTypeID() != BlackboardArtifact.ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
Lookup lookup = Lookup.getDefault();
|
||||
TextMarkupLookup highlightFactory = lookup.lookup(TextMarkupLookup.class);
|
||||
try {
|
||||
List<BlackboardAttribute> attributes = artifact.getAttributes();
|
||||
String keyword = null;
|
||||
String regexp = null;
|
||||
for (BlackboardAttribute att : attributes) {
|
||||
final int attributeTypeID = att.getAttributeType().getTypeID();
|
||||
if (attributeTypeID == BlackboardAttribute.ATTRIBUTE_TYPE.TSK_KEYWORD.getTypeID()) {
|
||||
keyword = att.getValueString();
|
||||
} else if (attributeTypeID == BlackboardAttribute.ATTRIBUTE_TYPE.TSK_KEYWORD_REGEXP.getTypeID()) {
|
||||
regexp = att.getValueString();
|
||||
}
|
||||
}
|
||||
if (keyword != null) {
|
||||
boolean isRegexp = StringUtils.isNotBlank(regexp);
|
||||
String origQuery = isRegexp ? regexp : keyword;
|
||||
return highlightFactory.createInstance(artifact.getArtifactID(), keyword, isRegexp, origQuery);
|
||||
}
|
||||
} catch (TskCoreException ex) {
|
||||
LOGGER.log(Level.WARNING, "Failed to retrieve Blackboard Attributes", ex); //NON-NLS
|
||||
}
|
||||
return null;
|
||||
}
|
||||
// private static TextMarkupLookup getHighlightLookup(BlackboardArtifact artifact, Content content) {
|
||||
// if (artifact.getArtifactTypeID() != BlackboardArtifact.ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID()) {
|
||||
// return null;
|
||||
// }
|
||||
//
|
||||
// Lookup lookup = Lookup.getDefault();
|
||||
// TextMarkupLookup highlightFactory = lookup.lookup(TextMarkupLookup.class);
|
||||
// try {
|
||||
// List<BlackboardAttribute> attributes = artifact.getAttributes();
|
||||
// String keyword = null;
|
||||
// String regexp = null;
|
||||
// for (BlackboardAttribute att : attributes) {
|
||||
// final int attributeTypeID = att.getAttributeType().getTypeID();
|
||||
// if (attributeTypeID == BlackboardAttribute.ATTRIBUTE_TYPE.TSK_KEYWORD.getTypeID()) {
|
||||
// keyword = att.getValueString();
|
||||
// } else if (attributeTypeID == BlackboardAttribute.ATTRIBUTE_TYPE.TSK_KEYWORD_REGEXP.getTypeID()) {
|
||||
// regexp = att.getValueString();
|
||||
// }
|
||||
// }
|
||||
// if (keyword != null) {
|
||||
// boolean isRegexp = StringUtils.isNotBlank(regexp);
|
||||
// String origQuery = isRegexp ? regexp : keyword;
|
||||
// return highlightFactory.createInstance(artifact.getArtifactID(), keyword, isRegexp, origQuery);
|
||||
// }
|
||||
// } catch (TskCoreException ex) {
|
||||
// LOGGER.log(Level.WARNING, "Failed to retrieve Blackboard Attributes", ex); //NON-NLS
|
||||
// }
|
||||
// return null;
|
||||
// }
|
||||
|
||||
@Override
|
||||
public boolean isLeafTypeNode() {
|
||||
|
@ -1,55 +0,0 @@
|
||||
/*
|
||||
* Autopsy Forensic Browser
|
||||
*
|
||||
* Copyright 2011-2015 Basis Technology Corp.
|
||||
* Contact: carrier <at> sleuthkit <dot> org
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.sleuthkit.autopsy.datamodel;
|
||||
|
||||
/**
|
||||
* This interface acts as a sort of bridge between the Autopsy Core NetBeans
|
||||
* Module (NBM) and the Autopsy KeywordSearch NBM. It is used to get indexed
|
||||
* text marked up with HTML to highlight search hits for a particular keyword.
|
||||
*
|
||||
* Here is an example of how it works. It is used to put highlighted markup into
|
||||
* the Lookups of the BlackboardArtifactNodes for keyword search hit artifacts.
|
||||
* The BlackboardArtifactNode code that populates the node's Lookup asks the
|
||||
* default global Lookup for an instance of TextMarkupLookup. The
|
||||
* org.sleuthkit.autopsy.keywordsearch.HighlightedText class is the sole
|
||||
* implementation of the interface, so the BlackboardArtifactNode gets a default
|
||||
* constructed instance of HighlightedText. This otherwise useless
|
||||
* instance is then used to call createInstance with parameters that are used to
|
||||
* employ the Solr highlighting capability to create the markup. The
|
||||
* TextMarkupLookup object goes in the BlackboardArtifactNode Lookup for later
|
||||
* use by the ExtractedContentViewer, a DataContentViewer in the KeywordSearch
|
||||
* NBM.
|
||||
*/
|
||||
public interface TextMarkupLookup {
|
||||
|
||||
/**
|
||||
* Factory method for getting an object that encapsulates indexed text
|
||||
* marked up (HTML) to highlight search hits for a particular keyword.
|
||||
*
|
||||
* @param objectId ID of the object (file or artifact) that is the
|
||||
* source of the indexed text.
|
||||
* @param keyword The keyword to be highlighted in the text.
|
||||
* @param isRegex Whether or not the query that follows is a regex.
|
||||
* @param originalQuery The query that produces the keyword hit.
|
||||
*
|
||||
* @return An object that encapsulates indexed text marked up (HTML) to
|
||||
* highlight search hits for a particular keyword.
|
||||
*/
|
||||
public TextMarkupLookup createInstance(long objectId, String keyword, boolean isRegex, String originalQuery);
|
||||
}
|
@ -30,6 +30,7 @@ import java.util.Set;
|
||||
import java.util.logging.Level;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.openide.nodes.Node;
|
||||
import org.openide.util.Exceptions;
|
||||
import org.openide.util.Lookup;
|
||||
import org.openide.util.NbBundle;
|
||||
import org.openide.util.lookup.ServiceProvider;
|
||||
@ -104,35 +105,30 @@ public class ExtractedContentViewer implements DataContentViewer {
|
||||
IndexedText highlightedHitText = null;
|
||||
IndexedText rawContentText = null;
|
||||
|
||||
|
||||
/*
|
||||
* First add the text marked up with HTML to highlight keyword hits that
|
||||
* will be present in the selected node's lookup if the node is for a
|
||||
* keyword hit artifact or account.
|
||||
*/
|
||||
indexedTextSources.addAll(nodeLookup.lookupAll(IndexedText.class));
|
||||
|
||||
if (false == indexedTextSources.isEmpty()) {
|
||||
//JMTODO: how do know the highlighted one is the first one? I think the assumption is really that it is the only one...
|
||||
//if the look up had any sources use them and don't make a new one.
|
||||
highlightedHitText = indexedTextSources.get(0);
|
||||
} else if (null != content && solrHasContent(content.getId())) {
|
||||
/*
|
||||
* if the lookup didn't have any sources, and solr has indexed the
|
||||
* content,get an AccountsText object that will highlight any
|
||||
* account numbers.
|
||||
*/
|
||||
if (null != content && solrHasContent(content.getId())) {
|
||||
QueryResults hits = nodeLookup.lookup(QueryResults.class);
|
||||
BlackboardArtifact artifact = nodeLookup.lookup(BlackboardArtifact.class);
|
||||
if (hits != null) {
|
||||
highlightedHitText = new HighlightedText(content.getId(), hits);
|
||||
} else {
|
||||
if (artifact != null && artifact.getArtifactTypeID()
|
||||
== BlackboardArtifact.ARTIFACT_TYPE.TSK_ACCOUNT.getTypeID()) {
|
||||
// if the artifact is an account artifact, get an account text .
|
||||
highlightedHitText = getAccountsText(content, nodeLookup);
|
||||
} else if (artifact != null && artifact.getArtifactTypeID()
|
||||
== BlackboardArtifact.ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID()) {
|
||||
highlightedHitText = new HighlightedText(artifact);
|
||||
}
|
||||
}
|
||||
if (highlightedHitText != null) {
|
||||
indexedTextSources.add(highlightedHitText);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Next, add the "raw" (not highlighted) text, if any, for any content
|
||||
* associated with the node.
|
||||
* Next, add the "raw" (not highlighted) text, if any, for any
|
||||
* content associated with the node.
|
||||
*/
|
||||
if (null != content && solrHasContent(content.getId())) {
|
||||
rawContentText = new RawText(content, content.getId());
|
||||
indexedTextSources.add(rawContentText);
|
||||
}
|
||||
@ -174,7 +170,8 @@ public class ExtractedContentViewer implements DataContentViewer {
|
||||
* For keyword hit artifacts, add the text of the artifact that hit,
|
||||
* not the hit artifact; otherwise add the text for the artifact.
|
||||
*/
|
||||
if (artifact.getArtifactTypeID() == TSK_KEYWORD_HIT.getTypeID() || artifact.getArtifactTypeID() == TSK_ACCOUNT.getTypeID()) {
|
||||
if (artifact.getArtifactTypeID() == TSK_KEYWORD_HIT.getTypeID()
|
||||
|| artifact.getArtifactTypeID() == TSK_ACCOUNT.getTypeID()) {
|
||||
try {
|
||||
BlackboardAttribute attribute = artifact.getAttribute(TSK_ASSOCIATED_ARTIFACT_TYPE);
|
||||
if (attribute != null) {
|
||||
|
@ -44,7 +44,6 @@ import org.sleuthkit.autopsy.casemodule.Case;
|
||||
import org.sleuthkit.autopsy.coreutils.Logger;
|
||||
import org.sleuthkit.autopsy.coreutils.MessageNotifyUtil;
|
||||
import org.sleuthkit.autopsy.coreutils.Version;
|
||||
import org.sleuthkit.autopsy.datamodel.TextMarkupLookup;
|
||||
import org.sleuthkit.autopsy.keywordsearch.KeywordQueryFilter.FilterType;
|
||||
import org.sleuthkit.datamodel.BlackboardArtifact;
|
||||
import org.sleuthkit.datamodel.BlackboardAttribute;
|
||||
@ -55,7 +54,7 @@ import org.sleuthkit.datamodel.TskCoreException;
|
||||
* Highlights hits for a given document. Knows about pages and such for the
|
||||
* content viewer.
|
||||
*/
|
||||
class HighlightedText implements IndexedText, TextMarkupLookup {
|
||||
class HighlightedText implements IndexedText {
|
||||
|
||||
private static final Logger logger = Logger.getLogger(HighlightedText.class.getName());
|
||||
|
||||
@ -70,7 +69,6 @@ class HighlightedText implements IndexedText, TextMarkupLookup {
|
||||
final private Server solrServer = KeywordSearch.getServer();
|
||||
|
||||
private final long objectId;
|
||||
// private final boolean isRegex;
|
||||
private final Set<String> keywords = new HashSet<>();
|
||||
|
||||
private int numberPages = 0;
|
||||
@ -90,39 +88,20 @@ class HighlightedText implements IndexedText, TextMarkupLookup {
|
||||
private QueryResults hits = null; //original hits that may get passed in
|
||||
private boolean isPageInfoLoaded = false;
|
||||
private static final boolean DEBUG = (Version.getBuildType() == Version.Type.DEVELOPMENT);
|
||||
// private String keywordHitQuery;
|
||||
private BlackboardArtifact artifact;
|
||||
|
||||
// HighlightedText(long objectId, String keyword, boolean isRegex) {
|
||||
// // The keyword can be treated as a literal hit at this point so we
|
||||
// // surround it in quotes.
|
||||
//
|
||||
// //hits are unknown
|
||||
// }
|
||||
/**
|
||||
* This constructor is used when keyword hits are accessed from the ad-hoc
|
||||
* search results. In that case we have the entire QueryResults object and
|
||||
* need to arrange the paging.
|
||||
*
|
||||
* @param objectId
|
||||
* @param keyword The keyword that was found previously (e.g. during
|
||||
* ingest)
|
||||
* @param isRegex true if the keyword was found via a regular
|
||||
* expression search
|
||||
* @param originalQuery The original query string that produced the hit. If
|
||||
* isRegex is true, this will be the regular expression
|
||||
* that produced the hit.
|
||||
*/
|
||||
HighlightedText(long objectId, String keyword, boolean isRegex, QueryResults hits) {
|
||||
/*
|
||||
* JMTODO: is this comment correct??? // The keyword can be treated as a
|
||||
* literal hit at this point so we // surround it in quotes.
|
||||
*
|
||||
*/
|
||||
HighlightedText(long objectId, QueryResults hits) {
|
||||
this.objectId = objectId;
|
||||
// this.keywords.add(KeywordSearchUtil.quoteQuery(keyword));
|
||||
// this.isRegex = isRegex;
|
||||
// keywordHitQuery = keywords.stream().collect(Collectors.joining(" "));
|
||||
this.hits = hits;
|
||||
}
|
||||
|
||||
@ -137,16 +116,15 @@ class HighlightedText implements IndexedText, TextMarkupLookup {
|
||||
*
|
||||
* @throws TskCoreException
|
||||
*/
|
||||
HighlightedText(BlackboardArtifact artifact) throws TskCoreException {
|
||||
HighlightedText(BlackboardArtifact artifact) {
|
||||
this.artifact = artifact;
|
||||
this.objectId = artifact.getObjectID();
|
||||
loadPageInfoFromArtifact();
|
||||
|
||||
}
|
||||
|
||||
private void loadPageInfoFromArtifact() throws TskCoreException, NumberFormatException {
|
||||
|
||||
KeywordSearch.QueryType qt = KeywordSearch.QueryType.values()[artifact.getAttribute(TSK_KEYWORD_SEARCH_TYPE).getValueInt()];
|
||||
// this.isRegex = qt == KeywordSearch.QueryType.REGEX;
|
||||
this.keywords.add(artifact.getAttribute(TSK_KEYWORD).getValueString());
|
||||
String chunkIDsString = artifact.getAttribute(TSK_KEYWORD_HIT_DOCUMENT_IDS).getValueString();
|
||||
Set<String> chunkIDs = Arrays.stream(chunkIDsString.split(",")).map(StringUtils::strip).collect(Collectors.toSet());
|
||||
@ -164,7 +142,6 @@ class HighlightedText implements IndexedText, TextMarkupLookup {
|
||||
numberOfHitsPerPage.put(chunkID, 0);
|
||||
currentHitPerPage.put(chunkID, 0);
|
||||
}
|
||||
// isPageInfoLoaded = true;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -183,6 +160,7 @@ class HighlightedText implements IndexedText, TextMarkupLookup {
|
||||
return constructEscapedSolrQuery(query.getQueryString());
|
||||
} else //construct a Solr query using aggregated terms to get highlighting
|
||||
//the query is executed later on demand
|
||||
{
|
||||
if (queryResults.getKeywords().size() == 1) {
|
||||
//simple case, no need to process subqueries and do special escaping
|
||||
Keyword keyword = queryResults.getKeywords().iterator().next();
|
||||
@ -214,6 +192,7 @@ class HighlightedText implements IndexedText, TextMarkupLookup {
|
||||
return highlightQuery.toString();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a complete, escaped Solr query that is ready to be used.
|
||||
@ -254,8 +233,7 @@ class HighlightedText implements IndexedText, TextMarkupLookup {
|
||||
/*
|
||||
* this could go in the constructor but is here to keep it near the
|
||||
* functionaly similar code for non regex searches
|
||||
*/
|
||||
// loadRegexPageInfoFromArtifact();
|
||||
*/ loadPageInfoFromArtifact();
|
||||
} else if (hasChunks) {
|
||||
// if the file has chunks, get pages with hits, sorted
|
||||
if (loadPageInfoFromHits()) {
|
||||
@ -318,14 +296,6 @@ class HighlightedText implements IndexedText, TextMarkupLookup {
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor for dummy singleton factory instance for Lookup
|
||||
*/
|
||||
private HighlightedText() {
|
||||
objectId = -1; //JMTODO: dummy value, is this a legal objectID?
|
||||
// isRegex = false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getNumberPages() {
|
||||
//return number of pages that have hits
|
||||
@ -435,7 +405,7 @@ class HighlightedText implements IndexedText, TextMarkupLookup {
|
||||
|
||||
String contentIdStr = Long.toString(this.objectId);
|
||||
if (hasChunks) {
|
||||
final String chunkID = Integer.toString(this.currentPage );
|
||||
final String chunkID = Integer.toString(this.currentPage);
|
||||
contentIdStr += "0".equals(chunkID) ? "" : "_" + chunkID;
|
||||
}
|
||||
final String filterQuery = Server.Schema.ID.toString() + ":" + KeywordSearchUtil.escapeLuceneQuery(contentIdStr);
|
||||
@ -624,36 +594,4 @@ class HighlightedText implements IndexedText, TextMarkupLookup {
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
//JMTODO: this whole dummy istance stuff should just be separated to a factory class I think
|
||||
/*
|
||||
* dummy instance for Lookup only
|
||||
*/
|
||||
private static TextMarkupLookup instance = null;
|
||||
|
||||
//getter of the singleton dummy instance solely for Lookup purpose
|
||||
//this instance does not actually work with Solr
|
||||
public static synchronized TextMarkupLookup getDefault() {
|
||||
if (instance == null) {
|
||||
instance = new HighlightedText();
|
||||
}
|
||||
return instance;
|
||||
}
|
||||
|
||||
@Override
|
||||
// factory method to create an instance of this object
|
||||
public TextMarkupLookup createInstance(long objectId, String keywordHitQuery, boolean isRegex, String originalQuery) {
|
||||
if (objectId < 0) {
|
||||
try {
|
||||
BlackboardArtifact blackboardArtifact = Case.getCurrentCase().getSleuthkitCase().getBlackboardArtifact(objectId);
|
||||
if (blackboardArtifact.getArtifactTypeID() == BlackboardArtifact.ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID()) {
|
||||
return new HighlightedText(blackboardArtifact);
|
||||
}
|
||||
} catch (TskCoreException ex) {
|
||||
//JMTODO: what to do here?
|
||||
Exceptions.printStackTrace(ex);
|
||||
}
|
||||
}
|
||||
|
||||
return new HighlightedText(objectId, keywordHitQuery, isRegex, null);
|
||||
}
|
||||
}
|
||||
|
@ -147,7 +147,7 @@ class KeywordSearchResultFactory extends ChildFactory<KeyValueQueryContent> {
|
||||
|
||||
int hitNumber = 0;
|
||||
List<KeyValueQueryContent> tempList = new ArrayList<>();
|
||||
final SetMultimap<Long, KeywordHit> orgnizeByDocID = orgnizeByDocID(queryResults);
|
||||
// final SetMultimap<Long, KeywordHit> orgnizeByDocID = orgnizeByDocID(queryResults);
|
||||
for (KeywordHit hit : getOneHitPerObject(queryResults)) {
|
||||
|
||||
/**
|
||||
@ -175,7 +175,6 @@ class KeywordSearchResultFactory extends ChildFactory<KeyValueQueryContent> {
|
||||
// BC: @@@ THis is really ineffecient. We should keep track of this when
|
||||
// we flattened the list of files to the unique files.
|
||||
// final String highlightQueryEscaped = getHighlightQuery(queryRequest, queryRequest.isLiteral(), queryResults, content);
|
||||
|
||||
String hitName = hit.isArtifactHit()
|
||||
? hit.getArtifact().getDisplayName() + " Artifact" //NON-NLS
|
||||
: contentName;
|
||||
@ -242,7 +241,7 @@ class KeywordSearchResultFactory extends ChildFactory<KeyValueQueryContent> {
|
||||
|
||||
//wrap in KeywordSearchFilterNode for the markup content, might need to override FilterNode for more customization
|
||||
// store the data in HighlightedMatchesSource so that it can be looked up (in content viewer)
|
||||
HighlightedText highlights = new HighlightedText(key.solrObjectId, key.getName(), !key.getQuery().isLiteral(), hits);
|
||||
HighlightedText highlights = new HighlightedText(key.getSolrObjectId(), hits);
|
||||
return new KeywordSearchFilterNode(highlights, kvNode);
|
||||
}
|
||||
|
||||
@ -253,6 +252,7 @@ class KeywordSearchResultFactory extends ChildFactory<KeyValueQueryContent> {
|
||||
class KeyValueQueryContent extends KeyValue {
|
||||
|
||||
private long solrObjectId;
|
||||
|
||||
private final Content content;
|
||||
private final QueryResults hits;
|
||||
private final KeywordSearchQuery query;
|
||||
@ -285,7 +285,9 @@ class KeywordSearchResultFactory extends ChildFactory<KeyValueQueryContent> {
|
||||
return content;
|
||||
}
|
||||
|
||||
|
||||
public long getSolrObjectId() {
|
||||
return solrObjectId;
|
||||
}
|
||||
|
||||
QueryResults getHits() {
|
||||
return hits;
|
||||
|
Loading…
x
Reference in New Issue
Block a user