Merge branch 'develop' of github.com:sleuthkit/autopsy into 6896-testing

This commit is contained in:
Greg DiCristofaro 2021-01-25 15:38:24 -05:00
commit dc9ab38e7f
68 changed files with 1550 additions and 414 deletions

View File

@ -28,6 +28,8 @@
<dependency conf="core->default" org="org.jsoup" name="jsoup" rev="1.10.3"/>
<dependency conf="core->default" org="com.fasterxml.jackson.core" name="jackson-databind" rev="2.9.7"/>
<dependency org="com.fasterxml.jackson.dataformat" name="jackson-dataformat-csv" rev="2.9.7"/>
<dependency conf="core->default" org="com.drewnoakes" name="metadata-extractor" rev="2.11.0"/>
<dependency conf="core->default" org="com.google.cloud" name="google-cloud-translate" rev="1.70.0"/>

View File

@ -69,6 +69,7 @@ file.reference.jackcess-encrypt-2.1.4.jar=release\\modules\\ext\\jackcess-encryp
file.reference.jackson-annotations-2.9.0.jar=release\\modules\\ext\\jackson-annotations-2.9.0.jar
file.reference.jackson-core-2.9.7.jar=release\\modules\\ext\\jackson-core-2.9.7.jar
file.reference.jackson-databind-2.9.7.jar=release\\modules\\ext\\jackson-databind-2.9.7.jar
file.reference.jackson-dataformat-csv-2.9.7.jar=release\\modules\\ext\\jackson-dataformat-csv-2.9.7.jar
file.reference.jai_core-1.1.3.jar=release\\modules\\ext\\jai_core-1.1.3.jar
file.reference.jai_imageio-1.1.jar=release\\modules\\ext\\jai_imageio-1.1.jar
file.reference.javax.annotation-api-1.3.2.jar=release\\modules\\ext\\javax.annotation-api-1.3.2.jar

View File

@ -393,6 +393,10 @@
<runtime-relative-path>ext/jackson-databind-2.9.7.jar</runtime-relative-path>
<binary-origin>release\modules\ext\jackson-databind-2.9.7.jar</binary-origin>
</class-path-extension>
<class-path-extension>
<runtime-relative-path>ext/jackson-dataformat-csv-2.9.7.jar</runtime-relative-path>
<binary-origin>release\modules\ext\jackson-dataformat-csv-2.9.7.jar</binary-origin>
</class-path-extension>
<class-path-extension>
<runtime-relative-path>ext/okhttp-2.7.5.jar</runtime-relative-path>
<binary-origin>release\modules\ext\okhttp-2.7.5.jar</binary-origin>

View File

@ -59,7 +59,7 @@ SummaryViewer_Device_Account_Description=This account was referenced by a device
SummaryViewer_Fetching_References=<Fetching File References>
SummaryViewer_FileRef_Message=<Select a single account to see File References>
SummaryViewer_FileRefNameColumn_Title=Path
SummaryViewer_Persona_Message=<Enable Central Repository to view Personas>
SummaryViewer_Persona_CR_Message=<Enable Central Repository to view Personas>
SummaryViewer_Select_account_for_persona=<Select a single account to see Persona(s)>
SummaryViewer_TabTitle=Summary
ThreadRootMessagePanel.showAllCheckBox.text=Show All Messages

View File

@ -205,7 +205,7 @@ public class SummaryViewer extends javax.swing.JPanel implements RelationshipsVi
@Messages({
"SummaryViewer_Fetching_References=<Fetching File References>",
"SummaryViewer_Persona_Message=<Enable Central Repository to view Personas>"
"SummaryViewer_Persona_CR_Message=<Enable Central Repository to view Personas>"
})
private void updateOtherAccountInfo(final Account account) {
SummaryPanelWorker worker = new SummaryPanelWorker(account) {
@ -228,7 +228,7 @@ public class SummaryViewer extends javax.swing.JPanel implements RelationshipsVi
if (CentralRepository.isEnabled()) {
((SummaryPersonaPane) personaPanel).updatePersonaList(account, results.getCRAccount(), personaList);
} else {
((SummaryPersonaPane) personaPanel).setMessage("Bundle.SummaryViewer_Persona_Message()");
((SummaryPersonaPane) personaPanel).setMessage(Bundle.SummaryViewer_Persona_CR_Message());
((SummaryPersonaPane) personaPanel).showMessagePanel();
}

View File

@ -82,7 +82,7 @@ final class AutopsyOptionsPanel extends javax.swing.JPanel {
private static final String CONFIG_FILE_EXTENSION = ".conf";
private static final long ONE_BILLION = 1000000000L; //used to roughly convert system memory from bytes to gigabytes
private static final int MEGA_IN_GIGA = 1024; //used to convert memory settings saved as megabytes to gigabytes
private static final int DEFAULT_SOLR_HEAP_SIZE_MB = 2048;
private static final int JVM_MEMORY_STEP_SIZE_MB = 512;
private static final int MIN_MEMORY_IN_GB = 2; //the enforced minimum memory in gigabytes
private static final Logger logger = Logger.getLogger(AutopsyOptionsPanel.class.getName());
private String initialMemValue = Long.toString(Runtime.getRuntime().maxMemory() / ONE_BILLION);
@ -114,7 +114,7 @@ final class AutopsyOptionsPanel extends javax.swing.JPanel {
// The cast to int in the following is to ensure that the correct SpinnerNumberModel
// constructor is called.
solrMaxHeapSpinner.setModel(new javax.swing.SpinnerNumberModel(UserPreferences.getMaxSolrVMSize(),
DEFAULT_SOLR_HEAP_SIZE_MB, ((int) getSystemMemoryInGB()) * MEGA_IN_GIGA, DEFAULT_SOLR_HEAP_SIZE_MB));
JVM_MEMORY_STEP_SIZE_MB, ((int) getSystemMemoryInGB()) * MEGA_IN_GIGA, JVM_MEMORY_STEP_SIZE_MB));
textFieldListener = new TextFieldListener();
agencyLogoPathField.getDocument().addDocumentListener(textFieldListener);

View File

@ -404,15 +404,22 @@ final public class Accounts implements AutopsyVisitableItem {
}
@Override
protected Node[] createNodesForKey(String acountTypeName) {
protected Node[] createNodesForKey(String accountTypeName) {
if (Account.Type.CREDIT_CARD.getTypeName().equals(acountTypeName)) {
if (Account.Type.CREDIT_CARD.getTypeName().equals(accountTypeName)) {
return getNodeArr(new CreditCardNumberAccountTypeNode());
} else {
try {
Account.Type accountType = skCase.getCommunicationsManager().getAccountType(acountTypeName);
return getNodeArr(new DefaultAccountTypeNode(accountType));
Account.Type accountType = skCase.getCommunicationsManager().getAccountType(accountTypeName);
if (accountType != null) {
return getNodeArr(new DefaultAccountTypeNode(accountType));
} else {
// This can only happen if a TSK_ACCOUNT artifact was created not using CommunicationManager
LOGGER.log(Level.SEVERE, "Unknown account type '" + accountTypeName + "' found - account will not be displayed.\n"
+ "Account type names must match an entry in the display_name column of the account_types table.\n"
+ "Accounts should be created using the CommunicationManager API.");
}
} catch (TskCoreException ex) {
LOGGER.log(Level.SEVERE, "Error getting display name for account type. ", ex);
}

View File

@ -1,3 +1,2 @@
DataSourceUserActivitySummary_getRecentAccounts_calllogMessage=Call Log
DataSourceUserActivitySummary_getRecentAccounts_emailMessage=Email Message
IngestModuleCheckUtil_recentActivityModuleName=Recent Activity

View File

@ -31,7 +31,6 @@ import java.util.concurrent.BlockingQueue;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.commons.lang3.tuple.Pair;
import org.sleuthkit.autopsy.coreutils.Logger;
import org.sleuthkit.autopsy.datasourcesummary.datamodel.SleuthkitCaseProvider.SleuthkitCaseProviderException;
import org.sleuthkit.autopsy.datasourcesummary.uiutils.DefaultArtifactUpdateGovernor;
import org.sleuthkit.autopsy.geolocation.AbstractWaypointFetcher;
@ -47,6 +46,7 @@ import org.sleuthkit.datamodel.DataSource;
* Gathers summary data about Geolocation information for a data source.
*/
public class GeolocationSummary implements DefaultArtifactUpdateGovernor {
/**
* A count of hits for a particular city.
*/
@ -169,6 +169,53 @@ public class GeolocationSummary implements DefaultArtifactUpdateGovernor {
}
}
/**
* Carries data retrieved from the Geolocation API to be processed for
* closest cities.
*/
private static class GeoResult {
private final Set<MapWaypoint> mapWaypoints;
private final List<Set<MapWaypoint>> tracks;
private final List<Set<MapWaypoint>> areas;
/**
* Main constructor.
*
* @param mapWaypoints The way points found for the data source.
* @param tracks A list of sets where each set is a track in the data
* source.
* @param areas A list of areas where each set is an area in the data
* source.
*/
private GeoResult(Set<MapWaypoint> mapWaypoints, List<Set<MapWaypoint>> tracks, List<Set<MapWaypoint>> areas) {
this.mapWaypoints = mapWaypoints;
this.tracks = tracks;
this.areas = areas;
}
/**
* @return The way points found for the data source.
*/
private Set<MapWaypoint> getMapWaypoints() {
return mapWaypoints;
}
/**
* @return A list of sets where each set is a track in the data source.
*/
private List<Set<MapWaypoint>> getTracks() {
return tracks;
}
/**
* @return A list of areas where each set is an area in the data source.
*/
private List<Set<MapWaypoint>> getAreas() {
return areas;
}
}
// taken from GeoFilterPanel: all of the GPS artifact types.
@SuppressWarnings("deprecation")
private static final List<ARTIFACT_TYPE> GPS_ARTIFACT_TYPES = Arrays.asList(
@ -178,7 +225,8 @@ public class GeolocationSummary implements DefaultArtifactUpdateGovernor {
BlackboardArtifact.ARTIFACT_TYPE.TSK_GPS_SEARCH,
BlackboardArtifact.ARTIFACT_TYPE.TSK_GPS_TRACK,
BlackboardArtifact.ARTIFACT_TYPE.TSK_GPS_TRACKPOINT,
BlackboardArtifact.ARTIFACT_TYPE.TSK_METADATA_EXIF
BlackboardArtifact.ARTIFACT_TYPE.TSK_METADATA_EXIF,
BlackboardArtifact.ARTIFACT_TYPE.TSK_GPS_AREA
);
// all GPS types
@ -186,8 +234,11 @@ public class GeolocationSummary implements DefaultArtifactUpdateGovernor {
.map(artifactType -> artifactType.getTypeID())
.collect(Collectors.toSet());
private static final Pair<Integer, Integer> EMPTY_COUNT = Pair.of(0, 0);
private static final long DAY_SECS = 24 * 60 * 60;
private final SleuthkitCaseProvider provider;
private final java.util.logging.Logger logger;
private final SupplierWithException<ClosestCityMapper, IOException> cityMapper;
/**
@ -208,7 +259,7 @@ public class GeolocationSummary implements DefaultArtifactUpdateGovernor {
* Default constructor.
*/
public GeolocationSummary() {
this(() -> ClosestCityMapper.getInstance(), SleuthkitCaseProvider.DEFAULT, Logger.getLogger(GeolocationSummary.class.getName()));
this(() -> ClosestCityMapper.getInstance(), SleuthkitCaseProvider.DEFAULT);
}
/**
@ -217,12 +268,10 @@ public class GeolocationSummary implements DefaultArtifactUpdateGovernor {
* @param cityMapper A means of acquiring a ClosestCityMapper that can throw
* an IOException.
* @param provider A means of acquiring a SleuthkitCaseProvider.
* @param logger The logger.
*/
public GeolocationSummary(SupplierWithException<ClosestCityMapper, IOException> cityMapper, SleuthkitCaseProvider provider, java.util.logging.Logger logger) {
public GeolocationSummary(SupplierWithException<ClosestCityMapper, IOException> cityMapper, SleuthkitCaseProvider provider) {
this.cityMapper = cityMapper;
this.provider = provider;
this.logger = logger;
}
/**
@ -256,8 +305,6 @@ public class GeolocationSummary implements DefaultArtifactUpdateGovernor {
}
}
private static final Pair<Integer, Integer> EMPTY_COUNT = Pair.of(0, 0);
/**
* Based on a set of waypoints, determines the count of total waypoints and
* a total of waypoints whose time stamp is greater than or equal to
@ -268,28 +315,116 @@ public class GeolocationSummary implements DefaultArtifactUpdateGovernor {
* @return A pair where the left value is the total count of way points and
* the right is the total list of way points that are >= minTime.
*/
private Pair<Integer, Integer> getCounts(List<MapWaypoint> points, Long minTime) {
private Pair<Integer, Integer> getCounts(List<Long> points, Long minTime) {
if (points == null) {
return EMPTY_COUNT;
}
return points.stream().reduce(
EMPTY_COUNT,
(total, w) -> Pair.of(total.getLeft() + 1, total.getRight() + (greaterThanOrEqual(minTime, w.getTimestamp()) ? 1 : 0)),
(total, time) -> Pair.of(total.getLeft() + 1, total.getRight() + (greaterThanOrEqual(minTime, time) ? 1 : 0)),
(pair1, pair2) -> Pair.of(pair1.getLeft() + pair2.getLeft(), pair1.getRight() + pair2.getRight()));
}
/**
* Retrieves a tuple of the closest city (or null if a closest city cannot
* be determined) and the time stamp of the point in seconds from epoch. If
* the point is null, null is returned.
*
* @param cityMapper The means of mapping a point to the closest city.
* @param pt The geolocation point.
* @return A tuple of the closest city and timestamp in seconds from epoch.
*/
private Pair<CityRecord, Long> getClosestWithTime(ClosestCityMapper cityMapper, MapWaypoint pt) {
if (pt == null) {
return null;
}
private static final long DAY_SECS = 24 * 60 * 60;
CityRecord city = cityMapper.findClosest(new CityRecord(null, null, null, pt.getX(), pt.getY()));
Long time = pt.getTimestamp();
return Pair.of(city, time);
}
/**
* Converts a set of waypoints representing a grouping (i.e. track, area)
* into a stream of the unique cities identified in this grouping and the
* latest time stamp for each grouping.
*
* @param points The points in the grouping.
* @param cityMapper The means of mapping a point to the closest city.
* @return A stream of tuples where each tuple will be a unique city (or
* null if a closest is not determined) and the latest timestamp for each.
*/
private Stream<Pair<CityRecord, Long>> reduceGrouping(Set<MapWaypoint> points, ClosestCityMapper cityMapper) {
if (points == null) {
return Stream.empty();
}
Map<CityRecord, Long> timeMapping = new HashMap<>();
for (MapWaypoint pt : points) {
Pair<CityRecord, Long> pair = getClosestWithTime(cityMapper, pt);
if (pair == null) {
continue;
}
CityRecord city = pair.getLeft();
Long prevTime = timeMapping.get(city);
Long curTime = pair.getRight();
if (prevTime == null || (curTime != null && curTime > prevTime)) {
timeMapping.put(city, curTime);
}
}
return timeMapping.entrySet().stream()
.map(e -> Pair.of(e.getKey(), e.getValue()));
}
/**
* Convert a geo result taken from the Geolocation and convert to a stream
* of tuples where each tuple represents a point with the closest city and
* the time stamp in seconds from epoch.
*
* @param geoResult The result from the Geolocation API.
* @param cityMapper The means of mapping a point to the closest city.
* @return A list of tuples where each tuple represents a point to be
* counted with a combination of the closest city and the timestamp.
* @throws IOException
*/
private Stream<Pair<CityRecord, Long>> processGeoResult(GeoResult geoResult, ClosestCityMapper cityMapper) {
if (geoResult == null) {
return Stream.empty();
}
List<Set<MapWaypoint>> areas = (geoResult.getAreas() == null) ? Collections.emptyList() : geoResult.getAreas();
List<Set<MapWaypoint>> tracks = (geoResult.getTracks() == null) ? Collections.emptyList() : geoResult.getTracks();
Stream<Pair<CityRecord, Long>> reducedGroupings = Stream.of(areas, tracks)
.flatMap((groupingList) -> groupingList.stream())
.flatMap((grouping) -> reduceGrouping(grouping, cityMapper));
final Set<MapWaypoint> allTracksAndAreas = Stream.of(areas, tracks)
.flatMap((groupingList) -> groupingList.stream())
.flatMap((group) -> group.stream())
.collect(Collectors.toSet());
Set<MapWaypoint> pointSet = geoResult.getMapWaypoints() == null ? Collections.emptySet() : geoResult.getMapWaypoints();
Stream<Pair<CityRecord, Long>> citiesForPoints = pointSet.stream()
// it appears that AbstractWaypointFetcher.handleFilteredWaypointSet returns all points
// (including track and area points) in the set of MapWaypoints. This filters those points out of the remaining.
.filter(pt -> !allTracksAndAreas.contains(pt))
.map(pt -> getClosestWithTime(cityMapper, pt));
return Stream.concat(reducedGroupings, citiesForPoints);
}
/**
* Get this list of hits per city where the list is sorted descending by
* number of found hits (i.e. most hits is first index).
*
* @param dataSource The data source.
* @param daysCount Number of days to go back.
* @param maxCount Maximum number of results.
* @param daysCount Number of days to go back.
* @param maxCount Maximum number of results.
*
* @return The sorted list.
*
@ -300,31 +435,36 @@ public class GeolocationSummary implements DefaultArtifactUpdateGovernor {
public CityData getCityCounts(DataSource dataSource, int daysCount, int maxCount)
throws SleuthkitCaseProviderException, GeoLocationDataException, InterruptedException, IOException {
ClosestCityMapper closestCityMapper = ClosestCityMapper.getInstance();
ClosestCityMapper closestCityMapper = this.cityMapper.get();
GeoResult geoResult = getGeoResult(dataSource);
List<Pair<CityRecord, Long>> dataSourcePoints = processGeoResult(geoResult, closestCityMapper)
.collect(Collectors.toList());
List<MapWaypoint> dataSourcePoints = getPoints(dataSource);
Map<CityRecord, List<MapWaypoint>> allCityPoints = new HashMap<>();
List<MapWaypoint> others = new ArrayList<>();
Map<CityRecord, List<Long>> allCityPoints = new HashMap<>();
List<Long> others = new ArrayList<>();
Long mostRecent = null;
for (MapWaypoint pt : dataSourcePoints) {
CityRecord city = closestCityMapper.findClosest(new CityRecord(null, null, null, pt.getX(), pt.getY()));
Long curTime = pt.getTimestamp();
for (Pair<CityRecord, Long> pt : dataSourcePoints) {
if (pt == null) {
continue;
}
Long curTime = pt.getRight();
if (curTime != null && (mostRecent == null || curTime > mostRecent)) {
mostRecent = curTime;
}
CityRecord city = pt.getLeft();
if (city == null) {
others.add(pt);
others.add(curTime);
} else {
List<MapWaypoint> cityPoints = allCityPoints.get(city);
List<Long> cityPoints = allCityPoints.get(city);
if (cityPoints == null) {
cityPoints = new ArrayList<>();
allCityPoints.put(city, cityPoints);
}
cityPoints.add(pt);
cityPoints.add(curTime);
}
}
@ -361,7 +501,7 @@ public class GeolocationSummary implements DefaultArtifactUpdateGovernor {
*/
private static class PointFetcher extends AbstractWaypointFetcher {
private final BlockingQueue<List<MapWaypoint>> asyncResult;
private final BlockingQueue<GeoResult> asyncResult;
/**
* Main constructor.
@ -371,26 +511,16 @@ public class GeolocationSummary implements DefaultArtifactUpdateGovernor {
* BlockingQueue blocks until a result is received from geolocation.
* @param filters The applicable filters for geolocation.
*/
public PointFetcher(BlockingQueue<List<MapWaypoint>> asyncResult, GeoFilter filters) {
public PointFetcher(BlockingQueue<GeoResult> asyncResult, GeoFilter filters) {
super(filters);
this.asyncResult = asyncResult;
}
@Override
public void handleFilteredWaypointSet(Set<MapWaypoint> mapWaypoints, List<Set<MapWaypoint>> tracks, List<Set<MapWaypoint>> areas, boolean wasEntirelySuccessful) {
Stream<List<Set<MapWaypoint>>> stream = Stream.of(
Arrays.asList(mapWaypoints),
tracks == null ? Collections.emptyList() : tracks,
areas == null ? Collections.emptyList() : areas);
List<MapWaypoint> wayPoints = stream
.flatMap((List<Set<MapWaypoint>> list) -> list.stream())
.flatMap((Set<MapWaypoint> set) -> set.stream())
.collect(Collectors.toList());
// push to blocking queue to continue
try {
asyncResult.put(wayPoints);
asyncResult.put(new GeoResult(mapWaypoints, tracks, areas));
} catch (InterruptedException ignored) {
// ignored cancellations
}
@ -406,10 +536,12 @@ public class GeolocationSummary implements DefaultArtifactUpdateGovernor {
* @throws GeoLocationDataException
* @throws InterruptedException
*/
private List<MapWaypoint> getPoints(DataSource dataSource) throws SleuthkitCaseProviderException, GeoLocationDataException, InterruptedException {
private GeoResult getGeoResult(DataSource dataSource)
throws SleuthkitCaseProviderException, GeoLocationDataException, InterruptedException {
// make asynchronous callback synchronous (the callback nature will be handled in a different level)
// see the following: https://stackoverflow.com/questions/20659961/java-synchronous-callback
final BlockingQueue<List<MapWaypoint>> asyncResult = new ArrayBlockingQueue<>(1);
final BlockingQueue<GeoResult> asyncResult = new ArrayBlockingQueue<>(1);
GeoFilter geoFilter = new GeoFilter(true, false, 0, Arrays.asList(dataSource), GPS_ARTIFACT_TYPES);

View File

@ -44,9 +44,9 @@ PastCasesPanel.sameIdLabel.text=Past Cases with the Same Device IDs
DataSourceSummaryTabbedPane.noDataSourceLabel.text=No data source has been selected.
TimelinePanel.activityRangeLabel.text=Activity Range
GeolocationPanel.withinDistanceLabel.text=Locations further than 150km from a city will be listed as 'Unknown'
GeolocationPanel.mostRecentLabel.text=Recent Cities from Geolocation Artifacts
GeolocationPanel.mostRecentLabel.text=Recent Cities from Geolocation Results
GeolocationPanel.withinDistanceLabel1.text=Locations further than 150km from a city will be listed as 'Unknown'
GeolocationPanel.mostCommonLabel.text=Most Common Cities from Geolocation Artifacts
GeolocationPanel.mostCommonLabel.text=Most Common Cities from Geolocation Results
GeolocationPanel.recentViewInGeolocationBtn.text=View in Map
GeolocationPanel.commonViewInGeolocationBtn.text=View in Map
RecentFilesPanel.rightClickForMoreOptions1.text=Right click on row for more options

View File

@ -1,8 +1,6 @@
AnalysisPanel_countColumn_title=Count
AnalysisPanel_keyColumn_title=Name
AnalysisPanel_keywordSearchModuleName=Keyword Search
# {0} - module name
BaseDataSourceSummaryPanel_defaultNotIngestMessage=The {0} ingest module has not been run on this data source.
BaseDataSourceSummaryPanel_goToArtifact=View Source Result
BaseDataSourceSummaryPanel_goToFile=View Source File in Directory
ContainerPanel_setFieldsForNonImageDataSource_na=N/A
@ -111,9 +109,9 @@ PastCasesPanel.sameIdLabel.text=Past Cases with the Same Device IDs
DataSourceSummaryTabbedPane.noDataSourceLabel.text=No data source has been selected.
TimelinePanel.activityRangeLabel.text=Activity Range
GeolocationPanel.withinDistanceLabel.text=Locations further than 150km from a city will be listed as 'Unknown'
GeolocationPanel.mostRecentLabel.text=Recent Cities from Geolocation Artifacts
GeolocationPanel.mostRecentLabel.text=Recent Cities from Geolocation Results
GeolocationPanel.withinDistanceLabel1.text=Locations further than 150km from a city will be listed as 'Unknown'
GeolocationPanel.mostCommonLabel.text=Most Common Cities from Geolocation Artifacts
GeolocationPanel.mostCommonLabel.text=Most Common Cities from Geolocation Results
GeolocationPanel.recentViewInGeolocationBtn.text=View in Map
GeolocationPanel.commonViewInGeolocationBtn.text=View in Map
RecentFilesPanel.rightClickForMoreOptions1.text=Right click on row for more options

View File

@ -18,14 +18,19 @@
*/
package org.sleuthkit.autopsy.modules.leappanalyzers;
import com.fasterxml.jackson.databind.MappingIterator;
import com.fasterxml.jackson.dataformat.csv.CsvMapper;
import com.fasterxml.jackson.dataformat.csv.CsvParser;
import com.fasterxml.jackson.dataformat.csv.CsvSchema;
import com.google.common.collect.ImmutableMap;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.text.DateFormat;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.List;
@ -33,7 +38,6 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import static java.util.Locale.US;
@ -46,8 +50,10 @@ import java.util.stream.Stream;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.collections4.MapUtils;
import org.apache.commons.io.FilenameUtils;
import org.apache.commons.lang3.StringUtils;
import org.openide.util.NbBundle;
import org.sleuthkit.autopsy.casemodule.Case;
import static org.sleuthkit.autopsy.casemodule.Case.getCurrentCase;
@ -80,30 +86,30 @@ public final class LeappFileProcessor {
*/
private static class TsvColumn {
private final String attributeName;
private final BlackboardAttribute.Type attributeType;
private final String columnName;
private final boolean required;
/**
* Main constructor.
*
* @param attributeName The BlackboardAttribute name or null if not
* used.
* @param attributeType The BlackboardAttribute type or null if not
* used. used.
* @param columnName The name of the column in the tsv file.
* @param required Whether or not this attribute is required to be
* present.
*/
TsvColumn(String attributeName, String columnName, boolean required) {
this.attributeName = attributeName;
TsvColumn(BlackboardAttribute.Type attributeType, String columnName, boolean required) {
this.attributeType = attributeType;
this.columnName = columnName;
this.required = required;
}
/**
* @return The BlackboardAttribute name or null if not used.
* @return The BlackboardAttribute type or null if not used.
*/
String getAttributeName() {
return attributeName;
BlackboardAttribute.Type getAttributeType() {
return attributeType;
}
/**
@ -126,10 +132,14 @@ public final class LeappFileProcessor {
private final String moduleName;
private final Map<String, String> tsvFiles;
private final Map<String, String> tsvFileArtifacts;
private final Map<String, BlackboardArtifact.Type> tsvFileArtifacts;
private final Map<String, String> tsvFileArtifactComments;
private final Map<String, List<TsvColumn>> tsvFileAttributes;
private static final Map<String, String> CUSTOM_ARTIFACT_MAP = ImmutableMap.<String, String>builder()
.put("TSK_IP_DHCP", "DHCP Information")
.build();
Blackboard blkBoard;
public LeappFileProcessor(String xmlFile, String moduleName) throws IOException, IngestModuleException, NoCurrentCaseException {
@ -142,6 +152,7 @@ public final class LeappFileProcessor {
blkBoard = Case.getCurrentCaseThrows().getSleuthkitCase().getBlackboard();
createCustomArtifacts(blkBoard);
configExtractor();
loadConfigFile();
@ -224,17 +235,15 @@ public final class LeappFileProcessor {
String fileName = FilenameUtils.getName(LeappFileName);
File LeappFile = new File(LeappFileName);
if (tsvFileAttributes.containsKey(fileName)) {
List<TsvColumn> attrList = tsvFileAttributes.get(fileName);
BlackboardArtifact.Type artifactType = null;
try {
BlackboardArtifact.Type artifactType = Case.getCurrentCase().getSleuthkitCase().getArtifactType(tsvFileArtifacts.get(fileName));
List<TsvColumn> attrList = tsvFileAttributes.get(fileName);
artifactType = tsvFileArtifacts.get(fileName);
processFile(LeappFile, attrList, fileName, artifactType, bbartifacts, LeappImageFile);
} catch (TskCoreException ex) {
throw new IngestModuleException(String.format("Error getting Blackboard Artifact Type for %s", tsvFileArtifacts.get(fileName)), ex);
throw new IngestModuleException(String.format("Error getting Blackboard Artifact Type for %s", artifactType == null ? "<null>" : artifactType.toString()), ex);
}
}
}
if (!bbartifacts.isEmpty()) {
@ -260,16 +269,7 @@ public final class LeappFileProcessor {
File LeappFile = new File(LeappFileName);
if (tsvFileAttributes.containsKey(fileName)) {
List<TsvColumn> attrList = tsvFileAttributes.get(fileName);
BlackboardArtifact.Type artifactType = null;
try {
artifactType = Case.getCurrentCase().getSleuthkitCase().getArtifactType(tsvFileArtifacts.get(fileName));
} catch (TskCoreException ex) {
logger.log(Level.SEVERE, String.format("Error getting Blackboard Artifact Type for %s", tsvFileArtifacts.get(fileName)), ex);
}
if (artifactType == null) {
continue;
}
BlackboardArtifact.Type artifactType = tsvFileArtifacts.get(fileName);
try {
processFile(LeappFile, attrList, fileName, artifactType, bbartifacts, dataSource);
@ -298,193 +298,201 @@ public final class LeappFileProcessor {
return;
}
try (BufferedReader reader = new BufferedReader(new FileReader(LeappFile))) {
String header = reader.readLine();
// Check first line, if it is null then no heading so nothing to match to, close and go to next file.
if (header != null) {
Map<Integer, String> columnNumberToProcess = findColumnsToProcess(fileName, header, attrList);
String line = reader.readLine();
while (line != null) {
Collection<BlackboardAttribute> bbattributes = processReadLine(line, columnNumberToProcess, fileName);
// based on https://stackoverflow.com/questions/56921465/jackson-csv-schema-for-array
try (MappingIterator<List<String>> iterator = new CsvMapper()
.enable(CsvParser.Feature.WRAP_AS_ARRAY)
.readerFor(List.class)
.with(CsvSchema.emptySchema().withColumnSeparator('\t'))
.readValues(LeappFile)) {
if (!bbattributes.isEmpty() && !blkBoard.artifactExists(dataSource, BlackboardArtifact.ARTIFACT_TYPE.fromID(artifactType.getTypeID()), bbattributes)) {
if (iterator.hasNext()) {
List<String> headerItems = iterator.next();
Map<String, Integer> columnIndexes = IntStream.range(0, headerItems.size())
.mapToObj(idx -> idx)
.collect(Collectors.toMap(
idx -> headerItems.get(idx) == null ? null : headerItems.get(idx).trim().toLowerCase(),
idx -> idx,
(val1, val2) -> val1));
int lineNum = 2;
while (iterator.hasNext()) {
List<String> columnItems = iterator.next();
Collection<BlackboardAttribute> bbattributes = processReadLine(columnItems, columnIndexes, attrList, fileName, lineNum);
if (!bbattributes.isEmpty()) {
BlackboardArtifact bbartifact = createArtifactWithAttributes(artifactType.getTypeID(), dataSource, bbattributes);
if (bbartifact != null) {
bbartifacts.add(bbartifact);
}
}
line = reader.readLine();
lineNum++;
}
}
}
}
/**
* Process the line read and create the necessary attributes for it
* Process the line read and create the necessary attributes for it.
*
* @param line a tsv line to process that was read
* @param columnNumberToProcess Which columns to process in the tsv line
* @param fileName name of file begin processed
*
* @return
* @param lineValues List of column values.
* @param columnIndexes Mapping of column headers (trimmed; to lower case)
* to column index. All header columns and only all header columns should be
* present.
* @param attrList The list of attributes as specified for the schema of
* this file.
* @param fileName The name of the file being processed.
* @param lineNum The line number in the file.
* @return The collection of blackboard attributes for the artifact created
* from this line.
* @throws IngestModuleException
*/
private Collection<BlackboardAttribute> processReadLine(String line, Map<Integer, String> columnNumberToProcess, String fileName) throws IngestModuleException {
if (MapUtils.isEmpty(columnNumberToProcess)) {
private Collection<BlackboardAttribute> processReadLine(List<String> lineValues, Map<String, Integer> columnIndexes,
List<TsvColumn> attrList, String fileName, int lineNum) throws IngestModuleException {
if (MapUtils.isEmpty(columnIndexes) || CollectionUtils.isEmpty(lineValues)
|| (lineValues.size() == 1 && StringUtils.isEmpty(lineValues.get(0)))) {
return Collections.emptyList();
} else if (line == null) {
logger.log(Level.WARNING, "Line is null. Returning empty list for attributes.");
} else if (lineValues.size() != columnIndexes.size()) {
logger.log(Level.WARNING, String.format(
"Row at line number %d in file %s has %d columns when %d were expected based on the header row.",
lineNum, fileName, lineValues.size(), columnIndexes.size()));
return Collections.emptyList();
}
String[] columnValues;
// Check to see if the 2 values are equal, they may not be equal if there is no corresponding data in the line.
// or if the size of the line to split is not equal to the column numbers we are looking to process. This
// can happen when the last value of the tsv line has no data in it.
// If this happens then adding an empty value(s) for each columnValue where data does not exist
Integer maxColumnNumber = Collections.max(columnNumberToProcess.keySet());
if ((maxColumnNumber > line.split("\\t").length) || (columnNumberToProcess.size() > line.split("\\t").length)) {
columnValues = Arrays.copyOf(line.split("\\t"), maxColumnNumber + 1);
} else {
columnValues = line.split("\\t");
}
Collection<BlackboardAttribute> bbattributes = new ArrayList<BlackboardAttribute>();
for (Map.Entry<Integer, String> columnToProcess : columnNumberToProcess.entrySet()) {
Integer columnNumber = columnToProcess.getKey();
String attributeName = columnToProcess.getValue();
if (columnValues[columnNumber] != null) {
try {
BlackboardAttribute.Type attributeType = Case.getCurrentCase().getSleuthkitCase().getAttributeType(attributeName.toUpperCase());
if (attributeType == null) {
continue;
}
String attrType = attributeType.getValueType().getLabel().toUpperCase();
checkAttributeType(bbattributes, attrType, columnValues, columnNumber, attributeType, fileName);
} catch (TskCoreException ex) {
throw new IngestModuleException(String.format("Error getting Attribute type for Attribute Name %s", attributeName), ex); //NON-NLS
}
List<BlackboardAttribute> attrsToRet = new ArrayList<>();
for (TsvColumn colAttr : attrList) {
if (colAttr.getAttributeType() == null) {
// this handles columns that are currently ignored.
continue;
}
Integer columnIdx = columnIndexes.get(colAttr.getColumnName());
if (columnIdx == null) {
logger.log(Level.WARNING, String.format("No column mapping found for %s in file %s. Omitting column.", colAttr.getColumnName(), fileName));
continue;
}
String value = (columnIdx >= lineValues.size() || columnIdx < 0) ? null : lineValues.get(columnIdx);
if (value == null) {
logger.log(Level.WARNING, String.format("No value found for column %s at line %d in file %s. Omitting row.", colAttr.getColumnName(), lineNum, fileName));
return Collections.emptyList();
}
BlackboardAttribute attr = (value == null) ? null : getAttribute(colAttr.getAttributeType(), value, fileName);
if (attr == null) {
logger.log(Level.WARNING, String.format("Blackboard attribute could not be parsed column %s at line %d in file %s. Omitting row.", colAttr.getColumnName(), lineNum, fileName));
return Collections.emptyList();
}
attrsToRet.add(attr);
}
if (tsvFileArtifactComments.containsKey(fileName)) {
bbattributes.add(new BlackboardAttribute(ATTRIBUTE_TYPE.TSK_COMMENT, moduleName, tsvFileArtifactComments.get(fileName)));
}
return bbattributes;
}
private void checkAttributeType(Collection<BlackboardAttribute> bbattributes, String attrType, String[] columnValues, int columnNumber, BlackboardAttribute.Type attributeType,
String fileName) {
if (columnValues == null || columnNumber < 0 || columnNumber > columnValues.length || columnValues[columnNumber] == null) {
logger.log(Level.WARNING, String.format("Unable to determine column value at index %d in columnValues: %s",
columnNumber,
columnValues == null ? "<null>" : "[" + String.join(", ", columnValues) + "]"));
return;
}
String columnValue = columnValues[columnNumber];
if (attrType.matches("STRING")) {
bbattributes.add(new BlackboardAttribute(attributeType, moduleName, columnValue));
} else if (attrType.matches("INTEGER")) {
try {
// parse as double to handle values of format like '21.0' and then convert to int
bbattributes.add(new BlackboardAttribute(attributeType, moduleName, Double.valueOf(columnValue).intValue()));
} catch (NumberFormatException ex) {
logger.log(Level.WARNING, String.format("Unable to format %s as an integer.", columnValue), ex);
}
} else if (attrType.matches("LONG")) {
try {
// parse as double to handle values of format like '21.0' and then convert to long
bbattributes.add(new BlackboardAttribute(attributeType, moduleName, Double.valueOf(columnValue).longValue()));
} catch (NumberFormatException ex) {
logger.log(Level.WARNING, String.format("Unable to format %s as an long.", columnValue), ex);
}
} else if (attrType.matches("DOUBLE")) {
try {
bbattributes.add(new BlackboardAttribute(attributeType, moduleName, Double.valueOf(columnValue)));
} catch (NumberFormatException ex) {
logger.log(Level.WARNING, String.format("Unable to format %s as an double.", columnValue), ex);
}
} else if (attrType.matches("BYTE")) {
try {
bbattributes.add(new BlackboardAttribute(attributeType, moduleName, Byte.valueOf(columnValue)));
} catch (NumberFormatException ex) {
logger.log(Level.WARNING, String.format("Unable to format %s as an byte.", columnValue), ex);
}
} else if (attrType.matches("DATETIME")) {
// format of data should be the same in all the data and the format is 2020-03-28 01:00:17
SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-d HH:mm:ss", US);
Long dateLong = Long.valueOf(0);
try {
Date newDate = dateFormat.parse(columnValue);
dateLong = newDate.getTime() / 1000;
bbattributes.add(new BlackboardAttribute(attributeType, moduleName, dateLong));
} catch (ParseException ex) {
// catching error and displaying date that could not be parsed
// we set the timestamp to 0 and continue on processing
logger.log(Level.WARNING, String.format("Failed to parse date/time %s for attribute type %s in file %s.", columnValue, attributeType.getDisplayName(), fileName)); //NON-NLS
}
} else if (attrType.matches("JSON")) {
bbattributes.add(new BlackboardAttribute(attributeType, moduleName, columnValue));
} else {
// Log this and continue on with processing
logger.log(Level.WARNING, String.format("Attribute Type %s not defined.", attrType)); //NON-NLS
attrsToRet.add(new BlackboardAttribute(ATTRIBUTE_TYPE.TSK_COMMENT, moduleName, tsvFileArtifactComments.get(fileName)));
}
return attrsToRet;
}
/**
* Process the first line of the tsv file which has the headings. Match the
* headings to the columns in the XML mapping file so we know which columns
* to process.
*
* @param fileName The name of the file in which these column headers exist.
* @param line a tsv heading line of the columns in the file
* @param attrList the list of headings we want to process
*
* @return the numbered column(s) and attribute(s) we want to use for the
* column(s)
* The format of time stamps in tsv.
*/
private Map<Integer, String> findColumnsToProcess(String fileName, String line, List<TsvColumn> attrList) {
String[] columnNames = line.split("\\t");
HashMap<Integer, String> columnsToProcess = new HashMap<>();
private static final DateFormat TIMESTAMP_FORMAT = new SimpleDateFormat("yyyy-MM-d HH:mm:ss", US);
Integer columnPosition = 0;
for (String columnName : columnNames) {
// for some reason the first column of the line has unprintable characters so removing them
String cleanColumnName = columnName.trim().replaceAll("[^\\n\\r\\t\\p{Print}]", "");
for (TsvColumn tsvColumn : attrList) {
if (cleanColumnName.equalsIgnoreCase(tsvColumn.getColumnName())) {
columnsToProcess.put(columnPosition, tsvColumn.getAttributeName());
break;
}
}
columnPosition++;
/**
* Gets an appropriate attribute based on the attribute type and string
* value.
*
* @param attrType The attribute type.
* @param value The string value to be converted to the appropriate data
* type for the attribute type.
* @param fileName The file name that the value comes from.
* @return The generated blackboard attribute.
*/
private BlackboardAttribute getAttribute(BlackboardAttribute.Type attrType, String value, String fileName) {
if (attrType == null || value == null) {
logger.log(Level.WARNING, String.format("Unable to parse attribute type %s for value '%s' in fileName %s",
attrType == null ? "<null>" : attrType.toString(),
value == null ? "<null>" : value,
fileName == null ? "<null>" : fileName));
return null;
}
if (columnsToProcess.size() != attrList.size()) {
String missingColumns = IntStream.range(0, attrList.size())
.filter((idx) -> !columnsToProcess.containsKey(attrList.get(idx).getAttributeName()))
.mapToObj((idx) -> String.format("'%s'", attrList.get(idx).getColumnName() == null ? "<null>" : attrList.get(idx).getColumnName()))
.collect(Collectors.joining(", "));
switch (attrType.getValueType()) {
case JSON:
case STRING:
return parseAttrValue(value, attrType, fileName, false, false,
(v) -> new BlackboardAttribute(attrType, moduleName, v));
case INTEGER:
return parseAttrValue(value.trim(), attrType, fileName, true, false,
(v) -> new BlackboardAttribute(attrType, moduleName, Double.valueOf(v).intValue()));
case LONG:
return parseAttrValue(value.trim(), attrType, fileName, true, false,
(v) -> new BlackboardAttribute(attrType, moduleName, Double.valueOf(v).longValue()));
case DOUBLE:
return parseAttrValue(value.trim(), attrType, fileName, true, false,
(v) -> new BlackboardAttribute(attrType, moduleName, (double) Double.valueOf(v)));
case BYTE:
return parseAttrValue(value.trim(), attrType, fileName, true, false,
(v) -> new BlackboardAttribute(attrType, moduleName, new byte[]{Byte.valueOf(v)}));
case DATETIME:
return parseAttrValue(value.trim(), attrType, fileName, true, true,
(v) -> new BlackboardAttribute(attrType, moduleName, TIMESTAMP_FORMAT.parse(v).getTime() / 1000));
default:
// Log this and continue on with processing
logger.log(Level.WARNING, String.format("Attribute Type %s for file %s not defined.", attrType, fileName)); //NON-NLS
return null;
}
}
logger.log(Level.WARNING, String.format("Columns size expected not found in file %s based on xml from %s. Column Keys Missing = [%s]; Header Line = '%s'.",
this.xmlFile == null ? "<null>" : this.xmlFile,
fileName,
missingColumns,
line));
/**
* Handles converting a string to a blackboard attribute.
*/
private interface ParseExceptionFunction {
/**
* Handles converting a string value to a blackboard attribute.
*
* @param orig The original string value.
* @return The generated blackboard attribute.
* @throws ParseException
* @throws NumberFormatException
*/
BlackboardAttribute apply(String orig) throws ParseException, NumberFormatException;
}
/**
* Runs parsing function on string value to convert to right data type and
* generates a blackboard attribute for that converted data type.
*
* @param value The string value.
* @param attrType The blackboard attribute type.
* @param fileName The name of the file from which the value comes.
* @param blankIsNull If string is blank return null attribute.
* @param zeroIsNull If string is some version of 0, return null attribute.
* @param valueConverter The means of converting the string value to an
* appropriate blackboard attribute.
* @return The generated blackboard attribute or null if not determined.
*/
private BlackboardAttribute parseAttrValue(String value, BlackboardAttribute.Type attrType, String fileName, boolean blankIsNull, boolean zeroIsNull, ParseExceptionFunction valueConverter) {
// remove non-printable characters from tsv input
// https://stackoverflow.com/a/6199346
value = value.replaceAll("\\p{C}", "");
if (blankIsNull && StringUtils.isBlank(value)) {
return null;
}
return columnsToProcess;
if (zeroIsNull && value.matches("^\\s*[0\\.]*\\s*$")) {
return null;
}
try {
return valueConverter.apply(value);
} catch (NumberFormatException | ParseException ex) {
logger.log(Level.WARNING, String.format("Unable to format '%s' as value type %s while converting to attributes from %s.", value, attrType.getValueType().getLabel(), fileName), ex);
return null;
}
}
@NbBundle.Messages({
@ -552,10 +560,10 @@ public final class LeappFileProcessor {
if (foundArtifactType == null) {
logger.log(Level.SEVERE, String.format("No known artifact mapping found for [artifact: %s, %s]",
artifactName, getXmlFileIdentifier(parentName)));
} else {
tsvFileArtifacts.put(parentName, foundArtifactType);
}
tsvFileArtifacts.put(parentName, artifactName);
if (!comment.toLowerCase().matches("null")) {
tsvFileArtifactComments.put(parentName, comment);
}
@ -612,8 +620,8 @@ public final class LeappFileProcessor {
}
TsvColumn thisCol = new TsvColumn(
attributeName.toLowerCase(),
columnName.toLowerCase(),
foundAttrType,
columnName.trim().toLowerCase(),
"yes".compareToIgnoreCase(required) == 0);
if (tsvFileAttributes.containsKey(parentName)) {
@ -630,29 +638,6 @@ public final class LeappFileProcessor {
}
}
/**
* Generic method for creating a blackboard artifact with attributes
*
* @param type is a blackboard.artifact_type enum to determine which type
* the artifact should be
* @param abstractFile is the AbstractFile object that needs to have the
* artifact added for it
* @param bbattributes is the collection of blackboard attributes that need
* to be added to the artifact after the artifact has been created
*
* @return The newly-created artifact, or null on error
*/
private BlackboardArtifact createArtifactWithAttributes(int type, AbstractFile abstractFile, Collection<BlackboardAttribute> bbattributes) {
try {
BlackboardArtifact bbart = abstractFile.newArtifact(type);
bbart.addAttributes(bbattributes);
return bbart;
} catch (TskException ex) {
logger.log(Level.WARNING, Bundle.LeappFileProcessor_error_creating_new_artifacts(), ex); //NON-NLS
}
return null;
}
/**
* Generic method for creating a blackboard artifact with attributes
*
@ -704,7 +689,6 @@ public final class LeappFileProcessor {
xmlFile, true);
}
private static final Set<String> ALLOWED_EXTENSIONS = new HashSet<>(Arrays.asList("zip", "tar", "tgz"));
/**
@ -740,4 +724,23 @@ public final class LeappFileProcessor {
return leappFilesToProcess;
}
/**
* Create custom artifacts that are defined in the xLeapp xml file(s).
*
*/
private void createCustomArtifacts(Blackboard blkBoard) {
for (Map.Entry<String, String> customArtifact : CUSTOM_ARTIFACT_MAP.entrySet()) {
String artifactName = customArtifact.getKey();
String artifactDescription = customArtifact.getValue();
try {
BlackboardArtifact.Type customArtifactType = blkBoard.getOrAddArtifactType(artifactName, artifactDescription);
} catch (Blackboard.BlackboardException ex) {
logger.log(Level.WARNING, String.format("Failed to create custom artifact type %s.", artifactName), ex);
}
}
}
}

View File

@ -66,7 +66,7 @@
<FileName filename="Browser cookies.tsv" description="Browser Cookies">
<ArtifactName artifactname="TSK_WEB_COOKIE" comment="Browser Cookies">
<AttributeName attributename="TSK_DATETIME_ACCESSED" columnName="Last Access Date" required="yes" />
<AttributeName attributename="TSK_DATETIME_START" columnName="Last Access Date" required="yes" />
<AttributeName attributename="TSK_DOMAIN" columnName="Host" required="yes" />
<AttributeName attributename="TSK_NAME" columnName="Name" required="yes" />
<AttributeName attributename="TSK_VALUE" columnName="Value" required="yes" />
@ -97,7 +97,7 @@
<FileName filename="Browser login data.tsv" description="Browser Login Data">
<ArtifactName artifactname="TSK_SERVICE_ACCOUNT" comment="Browser Login">
<AttributeName attributename="TSK_DATETIME_CREATED" columnName="Created Time" required="yes" />
<AttributeName attributename="TSK_USER_NAME" columnName="Username" required="yes" />
<AttributeName attributename="TSK_USER_ID" columnName="Username" required="yes" />
<AttributeName attributename="TSK_PASSWORD" columnName="Password" required="yes" />
<AttributeName attributename="TSK_URL" columnName="Origin URL" required="no" />
<AttributeName attributename="null" columnName="Blacklisted by User" required="no" />
@ -163,7 +163,7 @@
<FileName filename="Chrome cookies.tsv" description="Chrome Cookies">
<ArtifactName artifactname="TSK_WEB_COOKIE" comment="Chrome Cookies">
<AttributeName attributename="TSK_DATETIME_ACCESSED" columnName="Last Access Date" required="yes" />
<AttributeName attributename="TSK_DATETIME_START" columnName="Last Access Date" required="yes" />
<AttributeName attributename="TSK_DOMAIN" columnName="Host" required="yes" />
<AttributeName attributename="TSK_NAME" columnName="Name" required="yes" />
<AttributeName attributename="TSK_VALUE" columnName="Value" required="yes" />
@ -186,7 +186,7 @@
<FileName filename="Chrome login data.tsv" description="Chrome Login Data">
<ArtifactName artifactname="TSK_SERVICE_ACCOUNT" comment="Chrome Login">
<AttributeName attributename="TSK_DATETIME_CREATED" columnName="Created Time" required="yes" />
<AttributeName attributename="TSK_USER_NAME" columnName="Username" required="yes" />
<AttributeName attributename="TSK_USER_ID" columnName="Username" required="yes" />
<AttributeName attributename="TSK_PASSWORD" columnName="Password" required="yes" />
<AttributeName attributename="TSK_URL" columnName="Origin URL" required="no" />
<AttributeName attributename="null" columnName="Blacklisted by User" required="no" />
@ -236,7 +236,7 @@
<FileName filename="Edge cookies.tsv" description="Edge Cookies">
<ArtifactName artifactname="TSK_WEB_COOKIE" comment="Edge Cookies">
<AttributeName attributename="TSK_DATETIME_ACCESSED" columnName="Last Access Date" required="yes" />
<AttributeName attributename="TSK_DATETIME_START" columnName="Last Access Date" required="yes" />
<AttributeName attributename="TSK_DOMAIN" columnName="Host" required="yes" />
<AttributeName attributename="TSK_NAME" columnName="Name" required="yes" />
<AttributeName attributename="TSK_VALUE" columnName="Value" required="yes" />
@ -259,7 +259,7 @@
<FileName filename="Edge login data.tsv" description="Edge Login Data">
<ArtifactName artifactname="TSK_SERVICE_ACCOUNT" comment="Edge Login">
<AttributeName attributename="TSK_DATETIME_CREATED" columnName="Created Time" required="yes" />
<AttributeName attributename="TSK_USER_NAME" columnName="Username" required="yes" />
<AttributeName attributename="TSK_USER_ID" columnName="Username" required="yes" />
<AttributeName attributename="TSK_PASSWORD" columnName="Password" required="yes" />
<AttributeName attributename="TSK_URL" columnName="Origin URL" required="no" />
<AttributeName attributename="null" columnName="Blacklisted by User" required="no" />

View File

@ -28,16 +28,16 @@
-->
<iLeap_Files_To_Process>
<FileName filename="Account Data.tsv" description="Account Data">
<!-- <FileName filename="Account Data.tsv" description="Account Data">
<ArtifactName artifactname="TSK_ACCOUNT" comment="Account Data">
<AttributeName attributename="TSK_DATETIME" columnName="Timestamp" required="yes" />
<AttributeName attributename="TSK_PROG_NAME" columnName="Account Desc." required="yes" />
<AttributeName attributename="TSK_USER_NAME" columnName="Username" required="yes" />
<AttributeName attributename="TSK_ACCOUNT_TYPE" columnName="Account Desc." required="yes" />
<AttributeName attributename="TSK_ID" columnName="Username" required="yes" />
<AttributeName attributename="null" columnName="Description" required="no" />
<AttributeName attributename="TSK_PATH" columnName="Identifier" required="yes" />
<AttributeName attributename="null" columnName="Bundle ID" required="no" />
</ArtifactName>
</FileName>
</FileName> -->
<FileName filename="Application State.tsv" description="Application State">
<ArtifactName artifactname="TSK_INSTALLED_PROG" comment="Application State">
@ -68,7 +68,7 @@
<FileName filename="Bluetooth paired.tsv" description="Bluetooth Paired">
<ArtifactName artifactname="TSK_BLUETOOTH_PAIRING" comment="Bluetooth Paired">
<AttributeName attributename="TSK_DEVICE_ID" columnName="UUID" required="yes" />
<AttributeName attributename="TSK_NAME" columnName="Name" required="yes" />
<AttributeName attributename="TSK_DEVICE_NAME" columnName="Name" required="yes" />
<AttributeName attributename="null" columnName="Name Origin" required="no" />
<AttributeName attributename="null" columnName="Address" required="no" />
<AttributeName attributename="null" columnName="Resolved Address" required="no" />
@ -143,7 +143,7 @@
<AttributeName attributename="null" columnName="Start" required="no" />
<AttributeName attributename="null" columnName="End" required="no" />
<AttributeName attributename="null" columnName="ZSTREAMNAME" required="no" />
<AttributeName attributename="TSK_PROG_NAME" columnName="ZVALUESTRING" required="no" />
<AttributeName attributename="TSK_PROG_NAME" columnName="Value String" required="no" />
<AttributeName attributename="null" columnName="Activity Type" required="no" />
<AttributeName attributename="null" columnName="Title" required="no" />
<AttributeName attributename="null" columnName="Expiration Date" required="no" />

View File

@ -28,6 +28,7 @@ import java.util.function.Predicate;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import org.sleuthkit.autopsy.core.UserPreferences;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.tidy.Tidy;
@ -74,7 +75,7 @@ public interface ManifestFileParser {
static Path makeTidyManifestFile(Path filePath) throws IOException {
File tempFile = null;
try{
tempFile = File.createTempFile("mani", "tdy", filePath.getParent().toFile());
tempFile = File.createTempFile("mani", "tdy", new File(System.getProperty("java.io.tmpdir")));
try (FileInputStream br = new FileInputStream(filePath.toFile()); FileOutputStream out = new FileOutputStream(tempFile);) {
Tidy tidy = new Tidy();

View File

@ -1184,7 +1184,7 @@ public class Server {
private boolean collectionExists(String collectionName) throws SolrServerException, IOException {
CollectionAdminRequest.List req = new CollectionAdminRequest.List();
CollectionAdminResponse response = req.process(remoteSolrServer);
List<String> existingCollections = (List<String>) response.getResponse().get("collections");
List<?> existingCollections = (List<?>) response.getResponse().get("collections");
if (existingCollections == null) {
existingCollections = new ArrayList<>();
}
@ -1515,7 +1515,7 @@ public class Server {
}
}
NamedList<Object> request(SolrRequest request) throws SolrServerException, RemoteSolrException, NoOpenCoreException {
NamedList<Object> request(SolrRequest<?> request) throws SolrServerException, RemoteSolrException, NoOpenCoreException {
currentCoreLock.readLock().lock();
try {
if (null == currentCollection) {
@ -1931,12 +1931,13 @@ public class Server {
return Collections.emptyList();
}
NamedList error = (NamedList) statusResponse.getResponse().get("error");
NamedList<?> error = (NamedList) statusResponse.getResponse().get("error");
if (error != null) {
return Collections.emptyList();
}
NamedList cluster = (NamedList) statusResponse.getResponse().get("cluster");
NamedList<?> cluster = (NamedList) statusResponse.getResponse().get("cluster");
@SuppressWarnings("unchecked")
ArrayList<String> liveNodes = (ArrayList) cluster.get("live_nodes");
return liveNodes;
} catch (Exception ex) {
@ -2091,7 +2092,7 @@ public class Server {
return queryClient.query(sq);
}
private NamedList<Object> request(SolrRequest request) throws SolrServerException, RemoteSolrException {
private NamedList<Object> request(SolrRequest<?> request) throws SolrServerException, RemoteSolrException {
try {
return queryClient.request(request);
} catch (Exception e) {

View File

@ -1,5 +1,5 @@
#Updated by build script
#Tue, 19 Jan 2021 11:34:51 -0500
#Mon, 25 Jan 2021 12:41:22 -0500
LBL_splash_window_title=Starting Autopsy
SPLASH_HEIGHT=314
SPLASH_WIDTH=538

View File

@ -1,4 +1,4 @@
#Updated by build script
#Tue, 19 Jan 2021 11:34:51 -0500
#Mon, 25 Jan 2021 12:41:22 -0500
CTL_MainWindow_Title=Autopsy 4.18.0
CTL_MainWindow_Title_No_Project=Autopsy 4.18.0

View File

@ -124,6 +124,18 @@ When there are multiple path options in the filter, they will be applied as foll
This allows you to, for example, make rules to include both the "My Documents" and the "My Pictures" folders.
\subsubsection file_disc_prev_notable_filter Previously Notable Filter
The previously notable filter is for domain searches only and is used to restrict results to only those domains that have previously been marked as "Notable" in the \ref central_repo_page.
\image html FileDiscovery/fd_notableFilter.png
\subsubsection file_disc_known_account_filter Known Account Type Filter
The previously notable filter is for domain searches only and is used to restrict results to only those domains that have a known account type.
\image html FileDiscovery/fd_knownAccountFilter.png
\subsubsection file_disc_result_filter Result Type Filter
The result type filter is for domain searches only and can be used to restrict which types of web results the domains can come from.
@ -158,7 +170,7 @@ The last grouping and sorting option is choosing how to sort the results within
\subsection file_disc_results_overview Overview
Once you select your options and click "Search", you'll see a new window with the list of groups on the left side. Selecting one of these groups will display the results from that group on the right side. For image, video, and document searches, selecting a result will cause a panel to rise showing more details about each instance of that result. You can manually raise and lower this panel using the large arrows on the right side of the divider. This panel is disabled for domain searches.
Once you select your options and click "Search", you'll see a new window with the list of groups on the left side. Selecting one of these groups will display the results from that group on the right side. Selecting a result will cause a panel to rise showing more details about each instance of that result. You can manually raise and lower this panel using the large arrows on the right side of the divider.
If your results are images, you'll see thumbnails for each image in the top area of the right panel.
@ -182,6 +194,10 @@ For image, video, and document searches, when you select a result from the top o
The bottom section of the panel is identical to the standard \ref content_viewer_page and displays data corresponding to the file instance selected in the middle of the panel.
For domain searches, when you select a domain in the top of the right panel you'll see a details area that is a variation on the \ref content_viewer_page. The first tab on details panel displays a simple timeline - selecting a date will show all the results from that date in the center of the panel, with details for the selected result on the right. The other tabs (Web Bookmarks, Web Cookies, etc.) display results of the selected type with a list of results on the left and more details on the right. You can right-click on results to use most of options available in the normal \ref result_viewer_page.
\image html FileDiscovery/fd_domainDetails.png
\subsection file_disc_dedupe De-duplication
This section only applies to image, video and document searches.

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 59 KiB

After

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 83 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 72 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 105 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 74 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 64 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 92 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 73 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 100 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 70 KiB

View File

@ -13,6 +13,8 @@ Help Topics
The following topics are available here:
- \subpage installation_page
- Notable Upgrades
- \subpage upgrade_solr8_page
- Configuration
- \subpage config_page
- \subpage performance_page

View File

@ -4,19 +4,18 @@
\section multiuser_install_clients Overview
Once the infrastructure is in place, you can configure Autopsy clients to use them.
- Install Autopsy on each client system. Use the normal installer and pick the defaults.
- Test that the user has access to the shared storage by opening the shared storage folders using Windows Explorer. If a password prompt is given, then enter the password and store the credentials (see \ref multiuser_users_store).
- Start Autopsy and open the multi-user settings panel from "Tools", "Options", "Multi-user". As shown in the screenshot below, you can then enter all of the address and authentication information for the network-based services. Note that in order to create or open Multi-user cases, "Enable Multi-user cases" must be checked and the settings below must be correct.
\image html multiuser_settings.PNG
- For each setting, press the "Test" button to ensure that Autopsy can communicate with each service. If any fail, then refer to the specific setup page for testing options. Also check that a firewall is not blocking the communications.
- NOTE: None of these tests are for permissions on the shared storage because Autopsy does not know about the shared storage. It can't test that until you make a case.
- Make a test case (see \ref creating_multi_user_cases). You can add a single file in as a logical data source. The key concept is to look for errors.
- If you find errors, look for errors in the log file on the Autopsy client.
- If you followed all of the previous steps in all of the previous pages, then a common error at this point is that Solr cannot access the shared storage and it is running as a Service account. When this happens, you'll see an error message about Solr not being able to create or access a "core". If this happens, review what user Solr should be running as (see \ref multiuser_users_solr) and change the shared storage configuration or ensure that credentials are stored.
<ol>
<li>Install Autopsy on each client system. Use the normal installer and pick the defaults.
<li>Test that the user has access to the shared storage by opening the shared storage folders using Windows Explorer. If a password prompt is given, then enter the password and store the credentials (see \ref multiuser_users_store).
<li>Start Autopsy and open the multi-user settings panel from "Tools", "Options", "Multi-user". As shown in the screenshot below, you can then enter all of the address and authentication information for the network-based services. Note that in order to create or open Multi-user cases, "Enable Multi-user cases" must be checked and the settings below must be correct.
\image html solr_autopsy.png
<li>For each setting, press the "Test Connection" button to ensure that Autopsy can communicate with each service. If any fail, then refer to the specific setup page for testing options. Also check that a firewall is not blocking the communications.
<ul><li>NOTE: None of these tests are for permissions on the shared storage because Autopsy does not know about the shared storage. It can't test that until you make a case.</ul>
<li>Make a test case (see \ref creating_multi_user_cases). You can add a single file in as a logical data source. The key concept is to look for errors.
<ul>
<li>If you find errors, look for errors in the log file on the Autopsy client.
<li>If you followed all of the previous steps in all of the previous pages, then a common error at this point is that Solr cannot access the shared storage and it is running as a Service account. When this happens, you'll see an error message about Solr not being able to create or access a "core". If this happens, review what user Solr should be running as (see the \ref multiuser_users_solr section) and change the shared storage configuration or ensure that credentials are stored.</ul>
</ol>
*/

View File

@ -4,7 +4,7 @@
The cluster will need shared storage that can be accessed from:
- Autopsy clients
- Solr server
- Solr server (depending on configuration)
This shared storage will be used for both data sources and case outputs, so you will need lots of space.

View File

@ -1,149 +1,215 @@
/*! \page install_solr_page Install and Configure Solr
[TOC]
\section install_solr_overview Overview
Autopsy uses Apache Solr to store keyword text indexes. A central server is needed in a multi-user cluster to maintain and search the indexes.
Autopsy uses Apache Solr to store keyword text indexes. A central server is needed in a multi-user cluster to maintain and search the indexes.
A new text index is created for each case and is stored in the case folder on shared storage (not on the local drive of the Solr server).
A new text index is created for each case. The index can be stored either on shared storage or on the local drive of the Solr server(s) (large amount of local storage is required).
Solr's embedded ZooKeeper is also used as a coordination service for Autopsy.
If you have already installed Solr 4 with a previous version of Autopsy, please see the \ref upgrade_solr8_page page for information on how open older cases after the upgrade and migrate data.
NOTE: This document assumes you will be running Solr on Windows as a service. You can run it as a non-service or on another platform, but you'll need to understand the steps in this document to make that happen.
\section install_solr_prereq Prerequisites
We use Bitnami Solr, which packages Solr as a Windows service.
You will need:
- A 64-bit version of the Java 8 Runtime Environment (JRE) from https://github.com/ojdkbuild/ojdkbuild. (<a href="https://github.com/ojdkbuild/ojdkbuild/releases/download/java-1.8.0-openjdk-1.8.0.242-1.b08/java-1.8.0-openjdk-1.8.0.242-1.b08.ojdkbuild.windows.x86_64.msi"> Link to installer</a>)
- The Apache Solr 4.10.3-0 installation package. This is no longer available from its original source, but you can find it on our site: https://sourceforge.net/projects/autopsy/files/CollaborativeServices/Solr.
-- NOTE: We tested Solr 6 at one point, but ran into stability problems when loading and unloading cores. For now, you need to use Solr 4.
- An installed version of Autopsy so that you can copy files from it. You can install Autopsy on one of the planned client systems. You do not need to install it on the Solr server.
- A network-accessible machine to install Solr on. Note that the Solr process will need to write data out to the main shared storage drive, and needs adequate permissions to write to this location, which may be across a network.
<ul>
<li>A 64-bit version of the Java 8 Runtime Environment (JRE) from <a href="https://github.com/ojdkbuild/ojdkbuild">https://github.com/ojdkbuild/ojdkbuild</a>. (<a href="https://github.com/ojdkbuild/ojdkbuild/blob/master/README.md">Download links</a>)
<li>Pre-packaged Autopsy version of Solr from <a href="https://sourceforge.net/projects/autopsy/files/CollaborativeServices/Solr/SOLR_8.6.3_AutopsyService.zip/download">here</a>. This contains Solr, <a href="https://nssm.cc/">NSSM</a> to make it run as a service, and the needed schema config files.
<li>A network-accessible machine to install Solr on. Note that the Solr process may need to write to a shared storage drive (if that is how you configure it) and will therefore need adequate permissions.
</ul>
\section install_solr_install Solr Installation
\section install_solr_install Installation
\subsection install_solr_jre JRE Installation
Solr requires a Java Runtime Environment (JRE), which may already be installed. You can test this by running \c "where java" from the command line. If you see output similar to the results below, you have a JRE.
\subsection install_solr_install_java JRE Installation
1. Install the Java JRE if needed. You can test this by running _where java_ from the command line. If you see output similar to the results below, you have a JRE.
<br><br>
\image html wherejava.PNG
<br><br>
If you need the JRE, install it with the default settings.
\subsection install_solr_install_solr Solr Installation
The following steps will configure Solr to run using an account that will have access to the network storage.
1. Run the Bitnami installer, <i>"bitnami-solr-4.10.3-0-windows-installer.exe"</i>
2. If Windows prompts with User Account Control, click _Yes_
3. Follow the prompts through to completion. You do not need to <i>"Learn more about Bitnami cloud hosting"</i> so you can clear the check box.
4. If you see an error dialog like the following, you may safely ignore it.
<br><br>
\image html apachebadmessage.PNG
<br>
5. When the installation completes, clear the <i>"Launch Bitnami Apache Solr Stack Now?"</i> checkbox and click _Finish_.
If you need the JRE, use the link in the \ref install_solr_prereq section above to download an installer. Accept the default settings during installation.
\subsection install_solr_config Solr Configuration
1. Stop the _solrJetty_ service by pressing _Start_, typing _services.msc_, pressing _Enter_, and locating the _solrJetty_ Windows service. Select the service and press _Stop the service_. If the service is already stopped and there is no _Stop the service_ available, this is okay.
2. <b>Service Configuration</b>: Edit the <i>"C:\Bitnami\solr-4.10.3-0\apache-solr\scripts\serviceinstall.bat"</i> script. You need administrator rights to change this file. The easiest way around this is to save a copy on the Desktop, edit the Desktop version, and copy the new one back over the top of the old. Windows will ask for permission to overwrite the old file; allow it. You should make the following changes to this file:
<br>
<br>
- Add the following options in the line that begins with <i>"C:\Bitnami\solr-4.10.3-0/apache-solr\scripts\prunsrv.exe"</i> :
+ <i>++JvmOptions=-Dcollection.configName=AutopsyConfig</i>
+ <i>++JvmOptions=-Dbootstrap_confdir="C:\Bitnami\solr-4.10.3-0\apache-solr\solr\configsets\AutopsyConfig\conf"</i>
+ <i>++JvmOptions=-DzkRun </i>
<br>
- Replace the path to JavaHome with the path to your 64-bit version of the JRE. If you do not know the path, the correct JavaHome path can be obtained by running the command "where java" from the Windows command line. An example is shown below. The text in yellow is what we are interested in. Do not include the "bin" folder in the path you place into the JavaHome variable. A correct example of the final result will look something like this: <i>-JavaHome="C:\Program Files\ojdkbuild\java-1.8.0-openjdk-1.8.0.222-1"</i>
<br><br>
A portion of an updated _serviceinstall.bat_ is shown below, with the changes marked in yellow.
<br><br>
\image html serviceinstall.PNG
<br><br>
3. <b>Solr Configuration</b>: Edit <i>"C:\Bitnami\solr-4.10.3-0\apache-solr\solr\solr.xml"</i> to set the _transientCacheSize_ to the maximum number of cases expected to be open concurrently. If you expect ten concurrent cases, the text to add is
<i>\<int name="transientCacheSize">10\</int></i>
<br><br>
The added part is highlighted in yellow below. Ensure that it is inside the <i>\<solr></i> tag as follows:
<br>
\image html transientcache.PNG
<br><br>
4. <b>Log Configuration</b>: Edit <i>"C:\Bitnami\solr-4.10.3-0\apache-solr\resources/log4j.properties"</i> to configure Solr log settings:
Follow these steps to configure Solr:
<ol>
<li>Increase the log rotation size threshold (_log4j\.appender\.file\.MaxFileSize_) from 4MB to 100MB.
<li>Remove the _CONSOLE_ appender from the _log4j\.rootLogger_ line.
<li>Add the line "log4j.logger.org.apache.solr.update.processor.LogUpdateProcessor=WARN".
<li>Extract the solr-8.6.3.zip archive from the location given in the \ref install_solr_prereq section into a directory of your choice. The rest of this document assumes that the archive is extracted into \c "C:\solr-8.6.3" directory.
<li>Go to the \c "C:\solr-8.6.3\bin" directory and open the \c "solr.in.cmd" file in a text editor.
\image html solr_config_folder.png
<li>Search for each "TODO" and specify a valid path for each of the required configuration parameters. These parameters will be described in detail \ref install_solr_params "below".
\image html solr_config_todo.png
<br>
\image html solr_config_param.png
</ol>
The log file should end up looking like this (modified lines are highlighted in yellow
\image html log4j.PNG
\subsubsection install_solr_params Solr Configuration Parameters
Required Solr Configuration Parameters:
<ul>
<li><b>JAVA_HOME</b> path to 64-bit JRE installation. For example \c "JAVA_HOME=C:\Program Files\Java\jre1.8.0_151" or \c "JAVA_HOME=C:\Program Files\ojdkbuild\java-1.8.0-openjdk-1.8.0.222-1"
<li><b>DEFAULT_CONFDIR</b> path to Autopsy configuration directory. If the Solr archive was extracted into \c "C:\solr-8.6.3" directory, then this path will be \c "C:\ solr-8.6.3\server\solr\configsets\AutopsyConfig\conf".
<li><b>Dbootstrap_confdir</b> same path as <b>DEFAULT_CONFDIR</b>
<li><b>SOLR_JAVA_MEM</b> - Solr JVM heap size should be somewhere between one third and one half of the total RAM available on the machine. A rule of thumb would be use \c "set SOLR_JAVA_MEM=-Xms2G -Xmx14G" for a machine with 32GB of RAM or more, and \c "set SOLR_JAVA_MEM=-Xms2G -Xmx8G" for a machine with 16GB of RAM.
<li><b>SOLR_DATA_HOME</b> location where Solr indexes will be stored. If this is not configured, the indexes will be stored in the \c "C:\solr-8.6.3\server\solr" directory. NOTE: for Autopsy cases consisting of large number of data sources, Solr indexes can get very large (hundreds of GBs, or TBs) so they should probably be stored on a larger network share.
</ul>
5. <b>Schema Configuration</b>: From an Autopsy installation, copy the following into <i>"C:\Bitnami\solr-4.10.3-0\apache-solr\solr"</i>:
- The folder <i>"C:\Program Files\Autopsy-XXX(current version)\autopsy\solr\solr\configsets"</i>
- The folder <i>"C:\Program Files\Autopsy-XXX(current version)\autopsy\solr\solr\lib"</i>
- The file <i>"C:\Program Files\Autopsy-XXX(current version)\autopsy\solr\solr\zoo.cfg"</i>
Optional Solr Configuration Parameters:
<ul>
<li><b>SOLR_HOST</b> by default, the Solr node name is "localhost". If multiple Solr nodes are going to be used as part of Solr Cloud, then specify the current computers host name in the SOLR_HOST variable.
</ul>
\subsubsection install_sorl_index_file_loc Solr Text Index File Location
\subsection install_solr_reinstall Reinstall Service
<b>Important note:</b> previous versions of Autopsy (Autopsy 4.17.0 and earlier) stored the Solr text indexes in the case output directory. As a result, the Solr indexes would get deleted if a user deleted the case output directory. Solr 8 (i.e. Autpsy 4.18.0 and later) no longer stores the Solr text index files in the case output directory but instead stores them in location defined by the <b>SOLR_DATA_HOME</b> parameter. As a consequence, if a user choses to manually delete case output directories (for example, to free up disk space), the Solr index directories located in <b>SOLR_DATA_HOME</b> need to be manually deleted as well.
Because we made changes to the service configuration, we need to reinstall it.
Text index for an Autopsy case will follow a naming structure according to following rules: \c "[Autopsy case name] [Case creation time stamp] [Text index creation time stamp] [shardX_replica_nY]". For example, the text index for an Autopsy case "Test Case" will be located in the following directory inside <b>SOLR_DATA_HOME</b>:
1. Start a Windows command prompt as administrator by pressing Start, typing <i>command</i>, right clicking on <i>Command Prompt</i>, and clicking on <i>Run as administrator</i>. Then run the following command to uninstall the solrJetty service:
\image html solr_config_case.png
cmd /c C:\Bitnami\solr-4.10.3-0\apache-solr\scripts\serviceinstall.bat UNINSTALL
\section install_solr_service Solr Windows Service Installation
You will very likely see a result that says "The solrJetty service is not started." This is okay.
2. In the same prompt, run the following command to install the solrJetty service:
At this point Solr has been configured and ready to use. The last step is to configure it as a Windows service so that it starts each time the computer starts.
cmd /c C:\Bitnami\solr-4.10.3-0\apache-solr\scripts\serviceinstall.bat INSTALL
<br> Note the argument "INSTALL" is case sensitive. Your command prompt should look like the screenshot below. Very likely your command prompt will say "The solrJetty service could not be started." This is okay.
<br><br>
\image html solrinstall1.PNG
<br><br>
Open a command line console as Administrator and navigate to the \c "C:\solr-8.6.3\bin" directory. From there, run the following command: \c "nssm install Solr_8.6.3".
\image html solr_install_1.png
At this point you should be able to access the Solr admin panel in a web browser via the URL http://localhost:8983/solr/#/
An NSSM UI window will appear. Click the "Path" navigation button:
\image html solr_install_2.png
Select the \c "C:\solr-8.6.3\bin\solr.cmd" file. NOTE: Make sure you dont select the \c "solr.in.cmd" file by accident. In the "Arguments" parameter, type in \c "start f c":
\image html solr_install_3.png
Optionally, configure services display name, startup type, and account info:
\image html solr_install_4.png
\subsection install_solr_service_user Configure Service User
Back in \ref install_multiuseruser_page, you should have decided what user to run Solr as. To configure Solr to run as that user, you'll use Windows Service Manager.
In the \ref install_multiuseruser_page section, you should have decided what user to run Solr as. To configure Solr to run as that user, you'll use Windows Service Manager.
Switch to the "Log On" tab to change the logon credentials to the chosen user who will have access to the shared storage.
<ul><li>If you specify a domain account, the account name will be in the form of \c "DOMAINNAME\username" as shown in the example below</ul>
- Press _Start_, type _services.msc_, and press _Enter_.
- Find _solrJetty_. If the service is running, press _Stop the service_,
- Double click the service and switch to the _Log On_ tab to change the logon credentials to the chosen user who will have access to the shared storage.
- If you specify a domain account, the account name will be in the form of _DOMAINNAME\\username_ as shown in the example below
\image html solr_user_1.png
\image html solrinstall2.PNG
Click \c "Install Service". You should see the following UI window appear:
\image html solr_user_2.png
- Start the service again.
\subsection install_solr_start Start Solr Service
\section install_solr_test Testing
At this point the Solr service has been configured and installed. You can verify this by opening Windows "Services" window:
\image html solr_start_1.png
Start the "Solr_8.6.3" service, and verify that the service status changes to "Running".
\image html solr_start_2.png
\section install_solr_testing Testing
There are two tests that you should perform to confirm that the Solr machine is configured correctly.
- <b>Web Interface</b>: You should attempt to access the Solr admin panel in a web browser from another machine on the network. Replace the IP address in the following URL with the IP address or the host name that the Solr service is running on: <i>http://172.16.12.61:8983/solr/#/</i>.
<br><br>
\image html solrinstall3.PNG
<br><br>
<ul>
<li><b>Web Interface:</b> You should attempt to access the Solr admin panel in a web browser. On the Solr machine, navigate to http://localhost:8983/solr/#/ and verify that the Solr admin console gets displayed. You should also attempt to access the Solr admin panel in a web browser from another machine on the network. Replace "localhost" in the previous URL with the IP address or the host name that the Solr service is running on.
\image html solr_testing_1.png
If the service is appropriately started but you are unable to see the screenshot above, then it could be that port 8983 for Solr and port 9983 for ZooKeeper are blocked by your firewall. Contact your network administrator to open these ports.
- <b>Shared Storage</b>: Log into the Solr computer as the user you decided to run the Solr service as and attempt to access the shared storage paths. Ensure that you can access the UNC paths (or drive letters if you have hardware NAS). If everything is configured correctly you should be able to access the storage paths without having to provide credentials.
If you are prompted for a password to access the shared storage, then either enter the password and choose to save the credentials or reconfigure the setup so that the same passwords are used, etc. See \ref multiuser_users_store for steps on storing credentials. If you needed to store the credentials, then you should restart the service or reboot the computer (we have observed that a running service does not get the updated credentials).
<li><b>Shared Storage:</b> Log in to the Solr computer as the user you decided to run the Solr service as and attempt to access the shared storage paths. Ensure that you can access the UNC paths (or drive letters if you have hardware NAS). If everything is configured correctly you should be able to access the storage paths without having to provide credentials. If you are prompted for a password to access the shared storage, then either enter the password and choose to save the credentials or reconfigure the setup so that the same passwords are used. See the \ref multiuser_users_store section for steps on storing credentials. If you needed to store the credentials, then you should restart the service or reboot the computer (we have observed that a running service does not get the updated credentials).
</ul>
NOTE: You can not do a full test of permissions until you make a test case after all of the services are configured.
\section install_solr_autopsy Configuring Autopsy Clients
Once the rest of the services are configured you will \ref install_multiuserclient_page "configure Autopsy to enable multi-user cases". For the Solr 8 server, configure the Solr 8 Service and the ZooKeeper service connection info. ZooKeeper connection info is required. The ZooKeeper port number is 1000 higher than Solr service port number. By default, Solr service port is 8983 making the embedded ZooKeeper port 9983. You may also use a \ref install_solr_standalone_zk "standalone ZooKeeper service".
\section install_sorl_adding_nodes Adding More Solr Nodes (SolrCloud)
Solr 8 has ability for multiple Solr nodes to work together as a Solr cluster. In this mode (SolrCloud mode) each Solr collection/index is split across all of the available Solr nodes. This is called sharding. For example, if there are 4 Solr nodes in a SolrCloud cluster, then the text index will be split across the 4 Solr nodes, thus greatly reducing the load on each individual Solr server and improving Solr indexing and searching performance.
To create a Solr cluster, the following steps need to be taken:
<ol>
<li>Follow steps in the \ref install_solr_config and \ref install_solr_service sections to create a Solr node (e.g. "Solr1"). Start the Solr service on the Solr1 machine. This machine will host the ZooKeeper service for the SolrCloud cluster.
<li>To add an additional Solr node (e.g. "Solr2") to the SolrCloud cluster, follow the steps in the \ref install_solr_config and \ref install_solr_service sections on the Solr2 machine. Do not start the Solr service on Solr2 yet.
<li>Solr uses ZooKeeper for its internal coordination, so all of the Solr nodes in SolrCloud need to be pointed at the same ZooKeeper service instance. Therefore in order for Solr2 node to be part of SolrCloud, it needs to use the ZooKeeper service that all the other Solr nodes in the SolrCloud cluster are using. In step 1 we have configured Solr1 node to start its embedded ZooKeeper service (this is default Solr behavior). To achieve that, <b>ZK_HOST</b> setting on Solr2 needs to be changed to point at the ZooKeeper service that is running on Solr1 node. The ZooKeeper port number is 1000 higher than <b>SOLR_PORT</b>. By default, <b>SOLR_PORT</b> is 8983 so the embedded ZooKeeper port is 9983. Therefore the <b>ZK_HOST</b> setting in \c "C:\solr-8.6.3\bin\solr.in.cmd" file (assuming that the Solr package ZIP was extracted into \c "C:\solr-8.6.3\" directory) on Solr2 machine needs to be modified to use ZooKeeper service running on Solr1:9983.
\image html solr_adding_nodes_1.png
<li>Start Solr service on Solr2 machine.
<li>When you log into a Solr admin console on either Solr1 or Solr2 (via either going to http://localhost:8983/solr/#/ on the machine, or via http://solr1:8983/solr/#/), and then navigate to "Cloud" -> "Nodes" section of the admin tree, you should see all of the Solr nodes that are part of the SolrCloud:
\image html solr_adding_nodes_2.png
<li>Additional Solr nodes can be added to the SolrCloud by repeating the previous steps.
</ol>
\section install_solr_autopsy_zk Autopsys Use of ZooKeeper Service
Autopsy uses ZooKeeper service for multi-user coordination purposes. Autopsy uses ZooKeeper to obtain locks on case level resources before modifying them. Most importantly, Autopsy stores some of its internal state data in ZooKeeper which cases have been created, their processing state (pending, processing, or completed), as well as other case and job level state data. This is especially important if you are running Autopsy in Auto Ingest Mode, as auto ingest needs to know which jobs have already been processed.
In the screen shot below, for coordination purposes Autopsy will be using the ZooKeeper server that is running on the Solr 8 server ("Solr1" machine).
\image html solr_autopsy_zk.png
\subsection install_solr_standalone_zk Standalone ZooKeeper Server
In our testing, for Autopsy purposes it is not necessary to have a standalone ZooKeeper server. For the regular Autopsy use case it is sufficient to use the "embedded" ZooKeeper service that is started by Solr service (on port 9983). However, Apache Solr documentation recommends that a standalone ZooKeeper service (running on separate a machine) is used in production environments. Below are instructions on how to setup a standalone ZooKeeper server and how to configure Solr & Autopsy to use it.
General Solr-related steps for this process are outlined in the Solr user guide below, in section "SolrCloud Configuration and Parameters":
https://lucene.apache.org/solr/guide/8_6/solrcloud-configuration-and-parameters.html
<ol>
<li>Download the appropriate Zookeeper installation from http://zookeeper.apache.org/releases.html . Solr 8.6.3 is integrated with Zookeeper 3.5.7. There are several options for download binaries or source code. The file that you are looking for is \c "apache-zookeeper-3.5.7-bin.tar.gz":
https://archive.apache.org/dist/zookeeper/zookeeper-3.5.7/
<li>Extract the downloaded tar file containing Zookeeper installation
<li>Create/edit the \c "/conf/zoo.cfg" file to have the following:
<ul>
<li>Specify "dataDir" directory this is where ZK database will be stored.
<li>Specify "clientPort". For example, "clientPort=9983".
<li>"Four letter commands" need to be enabled in ZK config file: https://lucene.apache.org/solr/guide/8_6/setting-up-an-external-zookeeper-ensemble.html#configuration-for-a-zookeeper-ensemble
</ul>
<li>There are Windows and Linux Zookeeeper startup scripts. For Windows, open a command prompt (admin NOT required), go to the directory where the tar file was extracted (e.g. \c "C:\Bitnami\zookeeper-3.5.7"), and type in \c "bin\zkServer.cmd". We have been using Cygwin in our testing and therefore using Linux commands in our examples. For Linux/CygWin, go to the same directory (e.g. \c "C:\Bitnami\zookeeper-3.5.7"), and type in \c "bin/zkServer.sh start".
<li>To verify that Zookeeper is running, in command prompt one can type in \c "bin/zkServer.sh status" (or equivalent Windows command).
\image html solr_standalone_zk_1.png
<li>To make Solr use the external ZooKeeper, the following needs to be done. Navigate to the directory where Solr startup scripts are located (usually \c "C:\solr-8.6.3\apache-solr\bin"). Open the \c "solr.in.cmd" file in text editor. If standalone ZooKeeper service is running on the same machine (not recommended), edit the <b>ZK_HOST</b> variable to be \c "set ZK_HOST=localhost:9983". If Zookeeper is running a different machine (e.g. "Solr5"), then enter the Zookeeper machine's host name or IP address instead of "localhost" (e.g. \c "set ZK_HOST=Solr5:9983").
\image html solr_standalone_zk_2.png
<li>Re-installation of Solr service is not necessary. Simply stop the Solr service and re-start it.
<li>Once the Solr service has been restarted, you can navigate to Solr admin console (Cloud -> ZK Status) and verify that Solr is using the correct Zookeeper and that the Zookeeper is running.
\image html solr_standalone_zk_3.png
<li>Configure Autopsy Multi-User panel to use the standalone ZooKeeper server. Start Autopsy and open the multi-user settings panel from "Tools", "Options", "Multi-user". Note that in order to create or open Multi-user cases, "Enable Multi-user cases" must be checked and the settings below must be correct.
\image html solr_standalone_zk_4.png
</ol>
\section install_solr_backup Backing Up
Solr creates two types of data that need to be backed up:
- <b>Text Indexes</b>: These are stored in the case folder on the shared storage.
- <b>ZooKeeper Data</b>: Autopsy uses a service called ZooKeeper embedded in Solr that stores data about what cases exist and who has them open. This data needs to be backed up so that you can have a list of all available multi-user cases.
- In a default installation that data is stored in C:\\Bitnami\\solr-4.10.3-0\\apache-solr\\solr\\zoo_data.
<ul>
<li><b>Text Indexes:</b> These are stored in directory that was specified in <b>SOLR_DATA_HOME</b> parameter (see \ref install_solr_params).
<li><b>ZooKeeper Data:</b> Autopsy uses a service called ZooKeeper embedded in Solr that stores data about what cases exist and who has them open. This data needs to be backed up so that you can have a list of all available multi-user cases.
<ol><li>In a default installation that data is stored in \c "C:\solr-8.6.3\server\solr zoo_data" (assuming that the Solr package ZIP was extracted into \c "C:\solr-8.6.3" directory).</ol>
</ul>
\section install_solr_delayed_start Delayed Start Problems With Large Number Of Solr Collections
In our testing, we have encountered an issue when a very large number (thousands) of Autopsy multi-user cases was created. Each new Autopsy multi-user case creates a Solr "collection" that contains the Solr text index. With 2,000 existing collections, when Solr service is restarted, Solr appears to internally be "loading" roughly 250 collections per minute (in chronological order, starting with oldest collections). After 4 minutes roughly half of the 2,000 collections were loaded. Users are able to search the collections that have been loaded, but they are unable to open or search the collections that have not yet been internally loaded by Solr. After 7-8 minutes all collections were loaded. These numbers will vary depending on the specific cluster configuration, text index file location (network or local storage), network throughput, number of Solr servers, etc.
*/

View File

@ -14,8 +14,7 @@ You can run each of these on their own dedicated VM, but that is not necessary.
Apache Solr uses a lot of memory, so we recommend keeping it by itself. The exception is if you are using Windows File Sharing for shared storage. You can get better Solr performance if it is writing to local storage instead of over the network. So, you can consider using the same computer for both Solr and shared storage.
Also note that because all computers need to access the shared storage at the same path, you cannot mix operating systems. A Linux system running Solr will not be able to access the shared storage at the same path as a Windows Autopsy client.
Also note that because all computers need to access the shared storage at the same path, you cannot mix operating systems.
We recommend:

View File

@ -29,9 +29,13 @@ The user account that Autopsy runs as will need access to the shared storage. Th
\subsection multiuser_users_solr Solr Service
Solr will run as a Windows service and will need access to the shared storage. The default user, which is "LocalService", will not have access to network-based storage.
Solr will run as a Windows service and may need access to shared storage if it does not have enough local storage. Solr performs best when it has fast access to storage, so keeping the indexes on local SSD drives is best. But, some clusters will need to store the indexes on the same shared storage that are used for images and other case outputs.
So, if you have network-based shared storage, you have three options:
NOTE: Autopsy 4.17.0 and prior required that indexes were stored on the shared storage drives. Autopsy 4.18.0 and beyond (which now use Solr 8) can use either local or shared storage.
If you are using local storage for Solr, then you can run the Solr service as "LocalService".
If you are going to use network storage for Solr, then you have three options:
- <b>NetworkService</b>: If you are on a domain, you may be able to run Solr as the "NetworkService" account. This account has access to the network, but the challenge can be granting access for this account to the shared storage.
- If your shared storage is a Windows file share, you'll need to grant access to the computer account running Solr as follows:
<ol> <li> Right click on the shared storage folder choose "Properties" and select the "Security" tab.

View File

@ -0,0 +1,90 @@
/*! \page upgrade_solr8_page Upgrading to Autopsy 4.18.0 (with Solr 8)
[TOC]
Autopsy 4.18.0 and beyond will make new cases with Solr 8 instead of Solr 4. Because Solr 8 is not backward compatable with Solr 4, this will have some impact on you. Notably:
- Cases made with 4.17.0 and earlier will continue to use Solr 4 indexes and can be opened with Autopsy 4.18.0+
- Cases made with 4.18.0 and later will use Solr 8 and cannot be opened by Autopsy 4.17.0 and earlier.
The main take away is that you can open older cases, but new cases can't be opened by older software.
If you are using single-user cases, there is nothing else for you to do. Autopsy ships with both Solr 4 and 8 embedded.
If you are using a multi-user cluster, then you'll need to install a new Solr 8 server and may choose to keep Solr 4 around too.
\section solr8_upgrade_cluster Multi-User Cluster Options
There are two considerations when ugprading a multi-user cluster:
- Which versions of Solr servers to run
- Where to store ZooKeeper data
The following sections cover these topics in more detail.
\subsection solr8_upgrade_cluster_solr Multiple Solr Servers
You will most likely want to have both Solr 4 and 8 servers running so that you can open older cases. If so, then:
- Get a new server and install Solr 8 on it using the \ref install_solr_page page instructions.
- Configure each Autopsy client to have the addresses for both servers (see below).
If you want to have only Solr 8, then simply follow the instructions for Solr 8 and get rid of the Solr 4 server. You will not be able to open older cases. It is possible to upgrade Solr indexes, but we have not tested this.
To configure Autopsy to be able to connect to both Solr 8 and Solr 4 multi-user servers, start Autopsy and open the multi-user settings panel from "Tools", "Options", "Multi-user". Note that in order to create or open Multi-user cases, "Enable Multi-user cases" must be checked and the settings below must be correct.
It is recommended that you run Solr 8 and Solr 4 servers on separate machines. In the example below, Solr 8 server is running on a machine with host name "Solr1" and Solr 4 server is running on a machine with host name "Solr6".
\image html solr_running_in_parallel.png
Once both the Solr 8 and Solr 4 multi-user server connection info is entered and saved, Autopsy will be able to open both Solr 8 multi-user cases (Autopsy version 4.18.0 and later), as well as legacy existing Solr 4 multi-user cases (cases created with Autopsy versions 4.17.0 and earlier).
<b>IMPORTANT</b>: The "Test Connection" button does not verify which version of Solr Autopsy is connecting to. It only verifies that Autopsy is connecting to a Solr server and is able to receive a response. Therefore it is important that the user enters correct server connection info in appropriate fields.
If you intend to run Solr 4 and Solr 8 servers on the same machine at the same time, you need to change the port of the Solr 8 service using the setting <b>SOLR_PORT</b> in \c "C:\solr-8.6.3\bin\solr.in.cmd" file (assuming that the Solr package ZIP was extracted into \c "C:\solr-8.6.3\" directory). By default the Solr service starts on port 8983.
\subsection install_solr_zk_migration Migration Of ZooKeeper Data
In addition to text indexing, the Solr service also stores "coordination" data using Apache ZooKeeper. You'll want to move this data if you get rid of your Solr 4 server. This data allows you to:
- Know what multi-user cases you can open
- Know which disk images were already processed for auto-ingest
You can continue to use your Solr 4 instance of ZooKeeper, but we also have a utility that allows you to migrate the data to a new server, such as the one running Solr 8 (or a stand-alone instance).
In our example we will be migrating ZooKeeper data from a ZooKeeper server running on a Solr 4 server (on machine "Solr6") to a brand new ZooKeeper server running on a Solr 8 server (on machine "Solr1").
You can browse the existing ZooKeeper data if you go to Solr6 machine and open the Solr admin console (http://localhost:8983/solr/#/). In the Solr admin console, navigate to "Cloud"-> "Tree", and expand the "autopsy" section of the tree:
\image html solr_zk_migration_1.png
You can follow the same steps to browse the ZooKeeper data on the new Solr 8 server (on "Solr1" machine). If Autopsy has not been used with this server yet, the "autopsy" folder will be missing, as in the example below:
\image html solr_zk_migration_2.png
The ZooKeeper migration utility (ZookeeperNodeMigration.jar) is located in \c "C:\Program Files\(current version of Autopsy)\autopsy\ZookeeperNodeMigration" directory:
\image html solr_zk_migration_3.png
ZookeeperNodeMigration utility requires the following inputs:
<ul>
<li>Input Zookeeper IP Address or Hostname
<li>Input Zookeeper Port Number
<li>Output Zookeeper IP Address or Hostname
<li>Output Zookeeper Port Number
</ul>
For example, if you execute the following command from command line, the Zookeeper nodes will get copied from Zookeeper server on Solr6:9983 to Zookeeper server on Solr1:9983 :
> java -jar ZookeeperNodeMigration.jar Solr6 9983 Solr1 9983
\image html solr_zk_migration_4.png
If you do not have Java installed on the machine, you can use the packaged version of Java that is distributed along with Autopsy. For example:
> \c "C:\Program Files\Autopsy-4.18.0\jre\bin\java.exe" -jar ZookeeperNodeMigration.jar Solr6 9983 Solr1 9983
To verify that the ZooKeeper data has been copied from the Solr6 server to the Solr1 server, refresh the Solr admin console on the Solr1 machine. You should now see the "autopsy" directory, along with its contents, when you go to the "Cloud" -> "Tree" section of the Solr admin console:
\image html solr_zk_migration_5.png
*/

View File

@ -0,0 +1,674 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.

View File

@ -0,0 +1,17 @@
vcruntime140.dll is part of the Visual Studio redistribuable code.
https://visualstudio.microsoft.com/license-terms/microsoft-visual-studio-community-2015/
DISTRIBUTABLE CODE. The software contains code that you are permitted to distribute in applications you develop if you comply with the terms below. (For this Section the term “distribution” also means deployment of your applications for third parties to access over the Internet.)
1. Distribution Rights. The code and text files listed below are “Distributable Code.”
* REDIST.TXT Files. You may copy and distribute the object code form of code listed on the REDIST list located at http://go.microsoft.com/fwlink/?LinkId=523763&clcid=0x409.
* Sample Code, Templates and Styles. You may copy, modify and distribute the source and object code form of code marked as “sample”, “template”, “Simple Styles” or “Sketch Styles”.
* Image Library. You may copy and distribute images, graphics and animations in the Image Library as described in the software documentation.
* Third Party Distribution. You may permit distributors of your applications to copy and distribute the Distributable Code as part of those applications.
2. Distribution Requirements. For any Distributable Code you distribute, you must:
*add significant primary functionality to it in your applications; and
* require distributors and external end users to agree to terms that protect the Distributable Code at least as much as this agreement.
3. Distribution Restrictions. You may not:
* use Microsofts trademarks in your applications names or branding in a way that suggests your applications come from or are endorsed by Microsoft; or
* modify or distribute the source code of any Distributable Code so that any part of it becomes subject to an Excluded License. An “Excluded License” is one that requires, as a condition of use, modification or distribution, that (i) the code be disclosed or distributed in source code form; or (ii) others have the right to modify it.

View File

@ -0,0 +1,25 @@
Copyright (C) 1995-2013 Jean-loup Gailly and Mark Adler
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Jean-loup Gailly Mark Adler
jloup@gzip.org madler@alumni.caltech.edu
The data format used by the zlib library is described by RFCs (Request for
Comments) 1950 to 1952 in the files http://tools.ietf.org/html/rfc1950
(zlib format), rfc1951 (deflate format) and rfc1952 (gzip format).

34
thirdparty/rr-full/license_p2x.txt vendored Normal file
View File

@ -0,0 +1,34 @@
LICENSE AGREEMENT
You should carefully read the following terms and conditions before using this software. Unless you have a different license agreement signed by IndigoSTAR Software, your use of this software indicates your acceptance of this license agreement and warranty.
Registered Version
Each registered copy of Perl2Exe may be used at a single workstation to create an unlimited number of exe files, subject to the following conditions:
* A separate registered copy of Perl2Exe must be obtained for each workstation on which Perl2Exe will be used even if such use is only temporary. This is not a "concurrent use" license.
* Exe files created by Perl2Exe are shipped with Run-time portions of Perl2Exe. No registered user, nor anyone else, may alter or modify the generated Exe files. You cannot give anyone else permission to modify the Exe files.
* Exe files generated by the registered version of Perl2exe may be freely distributed.
All rights not expressly granted in this license agreement are reserved entirely to IndigoSTAR Software
Governing Law
This agreement shall be governed by the laws of the Province of Ontario, Canada.
Limited Warranty
IndigoSTAR Software represents and warrants that the software and accompanying files will operate and function as documented, and that IndigoSTAR has full and sufficient right, title and authority to assign or grant the rights and/or licenses granted under this License Agreement. IndigoSTAR further warrants that neither the Software nor accompanying files infringe any intellectual property rights or similar rights of any 3rd party and agrees to indemnify you for any loss or damage related to a claim of infringement.
Except for these limited warranties, this software and the accompanying files are sold "as is" and without warranties as to performance of merchantability or any other warranties whether expressed or implied. Because of the various hardware and software environments into which Perl2Exe may be put, NO WARRANTY OF FITNESS FOR A PARTICULAR PURPOSE IS OFFERED. Good data processing procedure dictates that any program be thoroughly tested with non-critical data before relying on it. The user must assume the entire risk of using the program. Except for claims based on breach of the limited warranties or the indemnity provided above, the liability of either party for claims arising under this Agreement will be limited exclusively to the amount of fees paid under this agreement.
Shareware Version
You are hereby licensed to use the shareware evaluation version of Perl2Exe for evaluation purposes without charge for a period of 30 days. This is not free software. If you use this software after the 30 day evaluation period a registration fee is required. Under no circumstances are you licensed to distribute Exe files created by the shareware evaluation version of Perl2Exe. Unregistered use of Perl2Exe after the 30 day evaluation period is in violation of copyright laws.
Distribution of Perl2Exe
You are hereby licensed to make as many copies of the shareware evaluation version of this software and documentation as you wish; give exact copies of the original shareware version to anyone; and distribute the shareware version of the software and documentation in its unmodified form via electronic means. There is no charge for any of the above.
You are specifically prohibited from charging, or requesting donations, for any such copies, however made; and from distributing the software and/or documentation with other products (commercial or otherwise) without prior written permission, with one exception: Disk Vendors approved by the Association of Shareware Professionals are permitted to redistribute Perl2Exe subject to the conditions in this license, without specific written permission.

22
thirdparty/rr/license.txt vendored Normal file
View File

@ -0,0 +1,22 @@
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
This project is licensed under terms of the MIT License -
https://opensource.org/licenses/MIT
See also:
https://en.wikipedia.org/wiki/MIT_License
Questions, comments, etc., can be sent to keydet89 at yahoo dot com.

34
thirdparty/rr/license_p2x.txt vendored Normal file
View File

@ -0,0 +1,34 @@
LICENSE AGREEMENT
You should carefully read the following terms and conditions before using this software. Unless you have a different license agreement signed by IndigoSTAR Software, your use of this software indicates your acceptance of this license agreement and warranty.
Registered Version
Each registered copy of Perl2Exe may be used at a single workstation to create an unlimited number of exe files, subject to the following conditions:
* A separate registered copy of Perl2Exe must be obtained for each workstation on which Perl2Exe will be used even if such use is only temporary. This is not a "concurrent use" license.
* Exe files created by Perl2Exe are shipped with Run-time portions of Perl2Exe. No registered user, nor anyone else, may alter or modify the generated Exe files. You cannot give anyone else permission to modify the Exe files.
* Exe files generated by the registered version of Perl2exe may be freely distributed.
All rights not expressly granted in this license agreement are reserved entirely to IndigoSTAR Software
Governing Law
This agreement shall be governed by the laws of the Province of Ontario, Canada.
Limited Warranty
IndigoSTAR Software represents and warrants that the software and accompanying files will operate and function as documented, and that IndigoSTAR has full and sufficient right, title and authority to assign or grant the rights and/or licenses granted under this License Agreement. IndigoSTAR further warrants that neither the Software nor accompanying files infringe any intellectual property rights or similar rights of any 3rd party and agrees to indemnify you for any loss or damage related to a claim of infringement.
Except for these limited warranties, this software and the accompanying files are sold "as is" and without warranties as to performance of merchantability or any other warranties whether expressed or implied. Because of the various hardware and software environments into which Perl2Exe may be put, NO WARRANTY OF FITNESS FOR A PARTICULAR PURPOSE IS OFFERED. Good data processing procedure dictates that any program be thoroughly tested with non-critical data before relying on it. The user must assume the entire risk of using the program. Except for claims based on breach of the limited warranties or the indemnity provided above, the liability of either party for claims arising under this Agreement will be limited exclusively to the amount of fees paid under this agreement.
Shareware Version
You are hereby licensed to use the shareware evaluation version of Perl2Exe for evaluation purposes without charge for a period of 30 days. This is not free software. If you use this software after the 30 day evaluation period a registration fee is required. Under no circumstances are you licensed to distribute Exe files created by the shareware evaluation version of Perl2Exe. Unregistered use of Perl2Exe after the 30 day evaluation period is in violation of copyright laws.
Distribution of Perl2Exe
You are hereby licensed to make as many copies of the shareware evaluation version of this software and documentation as you wish; give exact copies of the original shareware version to anyone; and distribute the shareware version of the software and documentation in its unmodified form via electronic means. There is no charge for any of the above.
You are specifically prohibited from charging, or requesting donations, for any such copies, however made; and from distributing the software and/or documentation with other products (commercial or otherwise) without prior written permission, with one exception: Disk Vendors approved by the Association of Shareware Professionals are permitted to redistribute Perl2Exe subject to the conditions in this license, without specific written permission.

View File

@ -14,5 +14,6 @@
<dependency conf="autopsy->default" org="com.googlecode.ez-vcard" name="ez-vcard" rev="0.10.5"/>
<dependency conf="autopsy->default" org="com.github.mangstadt" name="vinnie" rev="2.0.2"/>
<dependency org="com.google.guava" name="guava" rev="19.0"/>
<dependency org="commons-validator" name="commons-validator" rev="1.6"/>
</dependencies>
</ivy-module>

View File

@ -160,7 +160,7 @@ class MboxParser extends MimeJ4MessageParser implements Iterator<EmailMessage> {
}
@Override
public void close() throws Exception {
public void close() throws IOException{
if(mboxIterable != null) {
mboxIterable.close();
}

View File

@ -26,6 +26,7 @@ import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import java.util.logging.Level;
import org.apache.james.mime4j.dom.BinaryBody;
import org.apache.james.mime4j.dom.Body;
import org.apache.james.mime4j.dom.Entity;
import org.apache.james.mime4j.dom.Message;
@ -164,7 +165,11 @@ class MimeJ4MessageParser implements AutoCloseable{
if (msg.isMultipart()) {
handleMultipart(email, (Multipart) msg.getBody(), sourceFileID);
} else {
handleTextBody(email, (TextBody) msg.getBody(), msg.getMimeType(), msg.getHeader().getFields());
if(msg.getBody() instanceof TextBody) {
handleTextBody(email, (TextBody) msg.getBody(), msg.getMimeType(), msg.getHeader().getFields());
} else {
handleAttachment(email, msg, sourceFileID, 1);
}
}
return email;
@ -227,7 +232,7 @@ class MimeJ4MessageParser implements AutoCloseable{
handleAttachment(email, e, fileID, index);
} else if ((e.getMimeType().equals(HTML_TYPE) && (email.getHtmlBody() == null || email.getHtmlBody().isEmpty()))
|| (e.getMimeType().equals(ContentTypeField.TYPE_TEXT_PLAIN) && (email.getTextBody() == null || email.getTextBody().isEmpty()))) {
handleTextBody(email, (TextBody) e.getBody(), e.getMimeType(), e.getHeader().getFields());
handleTextBody(email, (TextBody) e.getBody(), e.getMimeType(), e.getHeader().getFields());
} else {
handleAttachment(email, e, fileID, index);
}
@ -372,7 +377,7 @@ class MimeJ4MessageParser implements AutoCloseable{
}
@Override
public void close() throws Exception {
public void close() throws IOException{
}
}

View File

@ -123,7 +123,7 @@ class PstParser implements AutoCloseable{
}
@Override
public void close() throws Exception{
public void close() throws IOException{
if(pstFile != null) {
RandomAccessFile file = pstFile.getFileHandle();
if(file != null) {