diff --git a/BUILDING.txt b/BUILDING.txt index 570cadbf87..a77d6c8add 100644 --- a/BUILDING.txt +++ b/BUILDING.txt @@ -37,16 +37,16 @@ to the root 64-bit JRE directory. 2) Get Sleuth Kit Setup 2a) Download and build a Release version of Sleuth Kit (TSK) 4.0. See win32\BUILDING.txt in the TSK package for more information. You need to - build the tsk_jni project. Select the Release_PostgreSQL Win32 or x64 target, + build the tsk_jni project. Select the Release Win32 or x64 target, depending upon your target build. You can use a released version or download the latest from github: - git://github.com/sleuthkit/sleuthkit.git -2b) Build the TSK JAR file by typing 'ant dist-PostgreSQL' in +2b) Build the TSK JAR file by typing 'ant dist' in bindings/java in the TSK source code folder from a command line. Note it is case sensitive. You can also add the code to a NetBeans project and build - it from there, selecting the dist-PostgreSQL target. + it from there, selecting the dist target. 2c) Set TSK_HOME environment variable to the root directory of TSK @@ -103,7 +103,7 @@ the build process. - The Sleuth Kit Java datamodel JAR file has native JNI libraries that are copied into it. These JNI libraries have dependencies on -libewf, zlib, libpq, libintl-8, libeay32, and ssleay32 DLL files. On non-Windows +libewf, zlib, libintl-8, libeay32, and ssleay32 DLL files. On non-Windows platforms, the JNI library also has a dependency on libtsk (on Windows, it is compiled into libtsk_jni). diff --git a/Core/build.xml b/Core/build.xml index 8ac5c13d99..52f12a4b0b 100644 --- a/Core/build.xml +++ b/Core/build.xml @@ -59,6 +59,11 @@ + + + + + diff --git a/Core/nbproject/project.properties b/Core/nbproject/project.properties index 1898db811c..8dbd7f8d92 100644 --- a/Core/nbproject/project.properties +++ b/Core/nbproject/project.properties @@ -83,7 +83,7 @@ file.reference.sevenzipjbinding.jar=release/modules/ext/sevenzipjbinding.jar file.reference.sis-metadata-0.8.jar=release\\modules\\ext\\sis-metadata-0.8.jar file.reference.sis-netcdf-0.8.jar=release\\modules\\ext\\sis-netcdf-0.8.jar file.reference.sis-utility-0.8.jar=release\\modules\\ext\\sis-utility-0.8.jar -file.reference.sleuthkit-caseuco-4.9.0.jar=release\\modules\\ext\\sleuthkit-caseuco-4.9.0.jar +file.reference.sleuthkit-caseuco-4.10.0.jar=release/modules/ext/sleuthkit-caseuco-4.10.0.jar file.reference.slf4j-api-1.7.25.jar=release\\modules\\ext\\slf4j-api-1.7.25.jar file.reference.sqlite-jdbc-3.25.2.jar=release/modules/ext/sqlite-jdbc-3.25.2.jar file.reference.StixLib.jar=release/modules/ext/StixLib.jar @@ -91,7 +91,7 @@ file.reference.javax.ws.rs-api-2.0.1.jar=release/modules/ext/javax.ws.rs-api-2.0 file.reference.cxf-core-3.0.16.jar=release/modules/ext/cxf-core-3.0.16.jar file.reference.cxf-rt-frontend-jaxrs-3.0.16.jar=release/modules/ext/cxf-rt-frontend-jaxrs-3.0.16.jar file.reference.cxf-rt-transports-http-3.0.16.jar=release/modules/ext/cxf-rt-transports-http-3.0.16.jar -file.reference.sleuthkit-4.9.0.jar=release/modules/ext/sleuthkit-4.9.0.jar +file.reference.sleuthkit-4.10.0.jar=release/modules/ext/sleuthkit-4.10.0.jar file.reference.curator-client-2.8.0.jar=release/modules/ext/curator-client-2.8.0.jar file.reference.curator-framework-2.8.0.jar=release/modules/ext/curator-framework-2.8.0.jar file.reference.curator-recipes-2.8.0.jar=release/modules/ext/curator-recipes-2.8.0.jar diff --git a/Core/nbproject/project.xml b/Core/nbproject/project.xml index b751ffbf07..61e6a86b04 100644 --- a/Core/nbproject/project.xml +++ b/Core/nbproject/project.xml @@ -472,8 +472,8 @@ release/modules/ext/commons-pool2-2.4.2.jar - ext/sleuthkit-4.9.0.jar - release/modules/ext/sleuthkit-4.9.0.jar + ext/sleuthkit-4.10.0.jar + release/modules/ext/sleuthkit-4.10.0.jar ext/jxmapviewer2-2.4.jar @@ -780,8 +780,8 @@ release/modules/ext/curator-client-2.8.0.jar - ext/sleuthkit-caseuco-4.9.0.jar - release\modules\ext\sleuthkit-caseuco-4.9.0.jar + ext/sleuthkit-caseuco-4.10.0.jar + release/modules/ext/sleuthkit-caseuco-4.10.0.jar ext/fontbox-2.0.13.jar diff --git a/Core/src/org/sleuthkit/autopsy/actions/DeleteContentTagAction.java b/Core/src/org/sleuthkit/autopsy/actions/DeleteContentTagAction.java index b7ff3a73a0..b6c882cd5e 100644 --- a/Core/src/org/sleuthkit/autopsy/actions/DeleteContentTagAction.java +++ b/Core/src/org/sleuthkit/autopsy/actions/DeleteContentTagAction.java @@ -29,6 +29,9 @@ import org.openide.util.Utilities; import org.openide.windows.WindowManager; import org.sleuthkit.autopsy.casemodule.Case; import org.sleuthkit.autopsy.casemodule.NoCurrentCaseException; +import org.sleuthkit.autopsy.casemodule.services.contentviewertags.ContentViewerTagManager; +import org.sleuthkit.autopsy.casemodule.services.contentviewertags.ContentViewerTagManager.ContentViewerTag; +import org.sleuthkit.autopsy.contentviewers.imagetagging.ImageTagRegion; import org.sleuthkit.autopsy.coreutils.Logger; import org.sleuthkit.datamodel.ContentTag; import org.sleuthkit.datamodel.TskCoreException; @@ -72,6 +75,12 @@ public class DeleteContentTagAction extends AbstractAction { new Thread(() -> { for (ContentTag tag : selectedTags) { try { + // Check if there is an image tag before deleting the content tag. + ContentViewerTag imageTag = ContentViewerTagManager.getTag(tag, ImageTagRegion.class); + if(imageTag != null) { + ContentViewerTagManager.deleteTag(imageTag); + } + Case.getCurrentCaseThrows().getServices().getTagsManager().deleteContentTag(tag); } catch (TskCoreException | NoCurrentCaseException ex) { Logger.getLogger(DeleteContentTagAction.class.getName()).log(Level.SEVERE, "Error deleting tag", ex); //NON-NLS diff --git a/Core/src/org/sleuthkit/autopsy/actions/DeleteFileContentTagAction.java b/Core/src/org/sleuthkit/autopsy/actions/DeleteFileContentTagAction.java index cb719e2f0a..4141e3d29d 100644 --- a/Core/src/org/sleuthkit/autopsy/actions/DeleteFileContentTagAction.java +++ b/Core/src/org/sleuthkit/autopsy/actions/DeleteFileContentTagAction.java @@ -39,6 +39,9 @@ import org.openide.util.actions.Presenter; import org.sleuthkit.autopsy.casemodule.Case; import org.sleuthkit.autopsy.casemodule.NoCurrentCaseException; import org.sleuthkit.autopsy.casemodule.services.TagsManager; +import org.sleuthkit.autopsy.casemodule.services.contentviewertags.ContentViewerTagManager; +import org.sleuthkit.autopsy.casemodule.services.contentviewertags.ContentViewerTagManager.ContentViewerTag; +import org.sleuthkit.autopsy.contentviewers.imagetagging.ImageTagRegion; import org.sleuthkit.autopsy.coreutils.Logger; import org.sleuthkit.autopsy.tags.TagUtils; import org.sleuthkit.datamodel.AbstractFile; @@ -123,6 +126,13 @@ public class DeleteFileContentTagAction extends AbstractAction implements Presen try { logger.log(Level.INFO, "Removing tag {0} from {1}", new Object[]{tagName.getDisplayName(), contentTag.getContent().getName()}); //NON-NLS + + // Check if there is an image tag before deleting the content tag. + ContentViewerTag imageTag = ContentViewerTagManager.getTag(contentTag, ImageTagRegion.class); + if(imageTag != null) { + ContentViewerTagManager.deleteTag(imageTag); + } + tagsManager.deleteContentTag(contentTag); } catch (TskCoreException tskCoreException) { logger.log(Level.SEVERE, "Error untagging file", tskCoreException); //NON-NLS diff --git a/Core/src/org/sleuthkit/autopsy/actions/ReplaceContentTagAction.java b/Core/src/org/sleuthkit/autopsy/actions/ReplaceContentTagAction.java index 51b898eb84..1c69bc133c 100644 --- a/Core/src/org/sleuthkit/autopsy/actions/ReplaceContentTagAction.java +++ b/Core/src/org/sleuthkit/autopsy/actions/ReplaceContentTagAction.java @@ -29,6 +29,9 @@ import org.openide.util.Utilities; import org.sleuthkit.autopsy.casemodule.Case; import org.sleuthkit.autopsy.casemodule.NoCurrentCaseException; import org.sleuthkit.autopsy.casemodule.services.TagsManager; +import org.sleuthkit.autopsy.casemodule.services.contentviewertags.ContentViewerTagManager; +import org.sleuthkit.autopsy.casemodule.services.contentviewertags.ContentViewerTagManager.ContentViewerTag; +import org.sleuthkit.autopsy.contentviewers.imagetagging.ImageTagRegion; import org.sleuthkit.autopsy.coreutils.Logger; import org.sleuthkit.datamodel.ContentTag; import org.sleuthkit.datamodel.TagName; @@ -83,9 +86,19 @@ public final class ReplaceContentTagAction extends ReplaceTagAction try { logger.log(Level.INFO, "Replacing tag {0} with tag {1} for artifact {2}", new Object[]{oldTag.getName().getDisplayName(), newTagName.getDisplayName(), oldTag.getContent().getName()}); //NON-NLS + // Check if there is an image tag before deleting the content tag. + ContentViewerTag imageTag = ContentViewerTagManager.getTag(oldTag, ImageTagRegion.class); + if(imageTag != null) { + ContentViewerTagManager.deleteTag(imageTag); + } + tagsManager.deleteContentTag(oldTag); - tagsManager.addContentTag(oldTag.getContent(), newTagName, newComment); - + ContentTag newTag = tagsManager.addContentTag(oldTag.getContent(), newTagName, newComment); + + // Resave the image tag if present. + if(imageTag != null) { + ContentViewerTagManager.saveTag(newTag, imageTag.getDetails()); + } } catch (TskCoreException tskCoreException) { logger.log(Level.SEVERE, "Error replacing artifact tag", tskCoreException); //NON-NLS Platform.runLater(() diff --git a/Core/src/org/sleuthkit/autopsy/casemodule/datasourceSummary/Bundle.properties-MERGED b/Core/src/org/sleuthkit/autopsy/casemodule/datasourcesummary/Bundle.properties-MERGED old mode 100755 new mode 100644 similarity index 100% rename from Core/src/org/sleuthkit/autopsy/casemodule/datasourceSummary/Bundle.properties-MERGED rename to Core/src/org/sleuthkit/autopsy/casemodule/datasourcesummary/Bundle.properties-MERGED diff --git a/Core/src/org/sleuthkit/autopsy/casemodule/datasourceSummary/Bundle_ja.properties b/Core/src/org/sleuthkit/autopsy/casemodule/datasourcesummary/Bundle_ja.properties similarity index 100% rename from Core/src/org/sleuthkit/autopsy/casemodule/datasourceSummary/Bundle_ja.properties rename to Core/src/org/sleuthkit/autopsy/casemodule/datasourcesummary/Bundle_ja.properties diff --git a/Core/src/org/sleuthkit/autopsy/centralrepository/datamodel/CentralRepoAccount.java b/Core/src/org/sleuthkit/autopsy/centralrepository/datamodel/CentralRepoAccount.java index 47575ad896..88c74a4fe8 100644 --- a/Core/src/org/sleuthkit/autopsy/centralrepository/datamodel/CentralRepoAccount.java +++ b/Core/src/org/sleuthkit/autopsy/centralrepository/datamodel/CentralRepoAccount.java @@ -305,8 +305,40 @@ public final class CentralRepoAccount { normalizedAccountIdentifier = accountIdentifier.toLowerCase().trim(); } } catch (CorrelationAttributeNormalizationException ex) { - throw new InvalidAccountIDException("Failed to normalize the account idenitier.", ex); + throw new InvalidAccountIDException("Failed to normalize the account idenitier " + accountIdentifier, ex); } return normalizedAccountIdentifier; } + + /** + * Normalizes an account identifier, based on the given account type. + * + * @param crAccountType Account type. + * @param accountIdentifier Account identifier to be normalized. + * @return Normalized identifier. + * + * @throws InvalidAccountIDException If the account identifier is invalid. + */ + public static String normalizeAccountIdentifier(CentralRepoAccountType crAccountType, String accountIdentifier) throws InvalidAccountIDException { + + if (StringUtils.isBlank(accountIdentifier)) { + throw new InvalidAccountIDException("Account identifier is null or empty."); + } + + String normalizedAccountIdentifier; + try { + if (crAccountType.getAcctType().equals(Account.Type.PHONE)) { + normalizedAccountIdentifier = CorrelationAttributeNormalizer.normalizePhone(accountIdentifier); + } else if (crAccountType.getAcctType().equals(Account.Type.EMAIL)) { + normalizedAccountIdentifier = CorrelationAttributeNormalizer.normalizeEmail(accountIdentifier); + } else { + // convert to lowercase + normalizedAccountIdentifier = accountIdentifier.toLowerCase(); + } + } catch (CorrelationAttributeNormalizationException ex) { + throw new InvalidAccountIDException("Invalid account identifier", ex); + } + + return normalizedAccountIdentifier; + } } diff --git a/Core/src/org/sleuthkit/autopsy/centralrepository/datamodel/CentralRepoDbUtil.java b/Core/src/org/sleuthkit/autopsy/centralrepository/datamodel/CentralRepoDbUtil.java index 6ba23b65b5..5105aed2e9 100644 --- a/Core/src/org/sleuthkit/autopsy/centralrepository/datamodel/CentralRepoDbUtil.java +++ b/Core/src/org/sleuthkit/autopsy/centralrepository/datamodel/CentralRepoDbUtil.java @@ -262,9 +262,7 @@ public class CentralRepoDbUtil { * used */ public static void setUseCentralRepo(boolean centralRepoCheckBoxIsSelected) { - if (!centralRepoCheckBoxIsSelected) { - closePersonasTopComponent(); - } + closePersonasTopComponent(); ModuleSettings.setConfigSetting(CENTRAL_REPO_NAME, CENTRAL_REPO_USE_KEY, Boolean.toString(centralRepoCheckBoxIsSelected)); } diff --git a/Core/src/org/sleuthkit/autopsy/centralrepository/datamodel/CentralRepository.java b/Core/src/org/sleuthkit/autopsy/centralrepository/datamodel/CentralRepository.java index bdae5a727b..842c8e3f04 100755 --- a/Core/src/org/sleuthkit/autopsy/centralrepository/datamodel/CentralRepository.java +++ b/Core/src/org/sleuthkit/autopsy/centralrepository/datamodel/CentralRepository.java @@ -27,6 +27,7 @@ import org.sleuthkit.autopsy.casemodule.Case; import org.sleuthkit.autopsy.centralrepository.datamodel.CentralRepoAccount.CentralRepoAccountType; import org.sleuthkit.autopsy.coordinationservice.CoordinationService; import org.sleuthkit.datamodel.HashHitInfo; +import org.sleuthkit.datamodel.InvalidAccountIDException; /** * Main interface for interacting with the database @@ -880,9 +881,24 @@ public interface CentralRepository { * @param crAccountType CR account type to look for or create * @param accountUniqueID type specific unique account id * @return CR account - * - * @throws CentralRepoException + * + * @throws CentralRepoException If there is an error accessing Central Repository. + * @throws InvalidAccountIDException If the account identifier is not valid. */ - CentralRepoAccount getOrCreateAccount(CentralRepoAccount.CentralRepoAccountType crAccountType, String accountUniqueID) throws CentralRepoException; + CentralRepoAccount getOrCreateAccount(CentralRepoAccount.CentralRepoAccountType crAccountType, String accountUniqueID) throws InvalidAccountIDException, CentralRepoException; + + /** + * Gets an account from the accounts table matching the given type/ID, if + * one exists. + * + * @param crAccountType CR account type to look for or create + * @param accountUniqueID type specific unique account id + * + * @return CR account, if found, null otherwise. + * + * @throws CentralRepoException If there is an error accessing Central Repository. + * @throws InvalidAccountIDException If the account identifier is not valid. + */ + CentralRepoAccount getAccount(CentralRepoAccount.CentralRepoAccountType crAccountType, String accountUniqueID) throws InvalidAccountIDException, CentralRepoException; } diff --git a/Core/src/org/sleuthkit/autopsy/centralrepository/datamodel/CorrelationAttributeUtil.java b/Core/src/org/sleuthkit/autopsy/centralrepository/datamodel/CorrelationAttributeUtil.java index cb14bf5fa2..9af6fcde3b 100755 --- a/Core/src/org/sleuthkit/autopsy/centralrepository/datamodel/CorrelationAttributeUtil.java +++ b/Core/src/org/sleuthkit/autopsy/centralrepository/datamodel/CorrelationAttributeUtil.java @@ -35,6 +35,7 @@ import org.sleuthkit.datamodel.BlackboardArtifact.ARTIFACT_TYPE; import org.sleuthkit.datamodel.BlackboardAttribute; import org.sleuthkit.datamodel.BlackboardAttribute.ATTRIBUTE_TYPE; import org.sleuthkit.datamodel.HashUtility; +import org.sleuthkit.datamodel.InvalidAccountIDException; import org.sleuthkit.datamodel.TskCoreException; import org.sleuthkit.datamodel.TskData; @@ -184,7 +185,11 @@ public class CorrelationAttributeUtil { } } } catch (CorrelationAttributeNormalizationException ex) { - logger.log(Level.SEVERE, String.format("Error normalizing correlation attribute (%s)", artifact), ex); // NON-NLS + logger.log(Level.WARNING, String.format("Error normalizing correlation attribute (%s)", artifact), ex); // NON-NLS + return correlationAttrs; + } + catch (InvalidAccountIDException ex) { + logger.log(Level.WARNING, String.format("Invalid account identifier (artifactID: %d)", artifact.getId())); // NON-NLS return correlationAttrs; } catch (CentralRepoException ex) { @@ -281,7 +286,7 @@ public class CorrelationAttributeUtil { * * @return The correlation attribute instance. */ - private static void makeCorrAttrFromAcctArtifact(List corrAttrInstances, BlackboardArtifact acctArtifact) throws TskCoreException, CentralRepoException { + private static void makeCorrAttrFromAcctArtifact(List corrAttrInstances, BlackboardArtifact acctArtifact) throws InvalidAccountIDException, TskCoreException, CentralRepoException { // Get the account type from the artifact BlackboardAttribute accountTypeAttribute = acctArtifact.getAttribute(new BlackboardAttribute.Type(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_ACCOUNT_TYPE)); diff --git a/Core/src/org/sleuthkit/autopsy/centralrepository/datamodel/RdbmsCentralRepo.java b/Core/src/org/sleuthkit/autopsy/centralrepository/datamodel/RdbmsCentralRepo.java index d3195a6d3d..b48797e3fc 100644 --- a/Core/src/org/sleuthkit/autopsy/centralrepository/datamodel/RdbmsCentralRepo.java +++ b/Core/src/org/sleuthkit/autopsy/centralrepository/datamodel/RdbmsCentralRepo.java @@ -52,6 +52,7 @@ import org.sleuthkit.autopsy.healthmonitor.TimingMetric; import org.sleuthkit.datamodel.Account; import org.sleuthkit.datamodel.CaseDbSchemaVersionNumber; import org.sleuthkit.datamodel.HashHitInfo; +import org.sleuthkit.datamodel.InvalidAccountIDException; import org.sleuthkit.datamodel.SleuthkitCase; import org.sleuthkit.datamodel.TskData; @@ -1080,21 +1081,34 @@ abstract class RdbmsCentralRepo implements CentralRepository { * within TSK core */ @Override - public CentralRepoAccount getOrCreateAccount(CentralRepoAccountType crAccountType, String accountUniqueID) throws CentralRepoException { + public CentralRepoAccount getOrCreateAccount(CentralRepoAccountType crAccountType, String accountUniqueID) throws InvalidAccountIDException, CentralRepoException { + // Get the account fom the accounts table + String normalizedAccountID = CentralRepoAccount.normalizeAccountIdentifier(crAccountType, accountUniqueID); - String insertSQL = "INSERT INTO accounts (account_type_id, account_unique_identifier) " - + "VALUES (?, ?) " + getConflictClause(); + // insert the account. If there is a conflict, ignore it. + String insertSQL; + switch (CentralRepoDbManager.getSavedDbChoice().getDbPlatform()) { + case POSTGRESQL: + insertSQL = "INSERT INTO accounts (account_type_id, account_unique_identifier) VALUES (?, ?) " + getConflictClause(); //NON-NLS + break; + case SQLITE: + insertSQL = "INSERT OR IGNORE INTO accounts (account_type_id, account_unique_identifier) VALUES (?, ?) "; //NON-NLS + break; + default: + throw new CentralRepoException(String.format("Cannot add account to currently selected CR database platform %s", CentralRepoDbManager.getSavedDbChoice().getDbPlatform())); //NON-NLS + } + try (Connection connection = connect(); PreparedStatement preparedStatement = connection.prepareStatement(insertSQL);) { preparedStatement.setInt(1, crAccountType.getAccountTypeId()); - preparedStatement.setString(2, accountUniqueID); // TBD: fill in the normalized ID + preparedStatement.setString(2, normalizedAccountID); preparedStatement.executeUpdate(); // get the account from the db - should exist now. - return getAccount(crAccountType, accountUniqueID); + return getAccount(crAccountType, normalizedAccountID); } catch (SQLException ex) { throw new CentralRepoException("Error adding an account to CR database.", ex); } @@ -1177,15 +1191,17 @@ abstract class RdbmsCentralRepo implements CentralRepository { * @return CentralRepoAccount for the give type/id. May return null if not * found. * - * @throws CentralRepoException + * @throws CentralRepoException If there is an error accessing Central Repository. + * @throws InvalidAccountIDException If the account identifier is not valid. */ - private CentralRepoAccount getAccount(CentralRepoAccountType crAccountType, String accountUniqueID) throws CentralRepoException { - - CentralRepoAccount crAccount = accountsCache.getIfPresent(Pair.of(crAccountType, accountUniqueID)); + @Override + public CentralRepoAccount getAccount(CentralRepoAccountType crAccountType, String accountUniqueID) throws InvalidAccountIDException, CentralRepoException { + String normalizedAccountID = CentralRepoAccount.normalizeAccountIdentifier(crAccountType, accountUniqueID); + CentralRepoAccount crAccount = accountsCache.getIfPresent(Pair.of(crAccountType, normalizedAccountID)); if (crAccount == null) { - crAccount = getCRAccountFromDb(crAccountType, accountUniqueID); + crAccount = getCRAccountFromDb(crAccountType, normalizedAccountID); if (crAccount != null) { - accountsCache.put(Pair.of(crAccountType, accountUniqueID), crAccount); + accountsCache.put(Pair.of(crAccountType, normalizedAccountID), crAccount); } } diff --git a/Core/src/org/sleuthkit/autopsy/centralrepository/optionspanel/EamDbSettingsDialog.java b/Core/src/org/sleuthkit/autopsy/centralrepository/optionspanel/EamDbSettingsDialog.java index 675e7c8807..f30c402513 100644 --- a/Core/src/org/sleuthkit/autopsy/centralrepository/optionspanel/EamDbSettingsDialog.java +++ b/Core/src/org/sleuthkit/autopsy/centralrepository/optionspanel/EamDbSettingsDialog.java @@ -43,6 +43,7 @@ import javax.swing.event.DocumentListener; import javax.swing.filechooser.FileFilter; import org.openide.util.NbBundle; import org.openide.util.NbBundle.Messages; +import org.openide.windows.TopComponent; import org.openide.windows.WindowManager; import org.sleuthkit.autopsy.centralrepository.datamodel.CentralRepoDbChoice; import org.sleuthkit.autopsy.centralrepository.datamodel.CentralRepoDbManager; @@ -660,6 +661,8 @@ public class EamDbSettingsDialog extends JDialog { * found. */ private static boolean testStatusAndCreate(Component parent, CentralRepoDbManager manager, EamDbSettingsDialog dialog) { + closePersonasTopComponent(); + parent.setCursor(Cursor.getPredefinedCursor(Cursor.WAIT_CURSOR)); manager.testStatus(); @@ -690,6 +693,21 @@ public class EamDbSettingsDialog extends JDialog { parent.setCursor(Cursor.getPredefinedCursor(Cursor.DEFAULT_CURSOR)); return true; } + + + + /** + * Closes Personas top component if it exists. + */ + private static void closePersonasTopComponent() { + SwingUtilities.invokeLater(() -> { + TopComponent personasWindow = WindowManager.getDefault().findTopComponent("PersonasTopComponent"); + if (personasWindow != null && personasWindow.isOpened()) { + personasWindow.close(); + } + }); + } + /** * This method returns if changes to the central repository configuration diff --git a/Core/src/org/sleuthkit/autopsy/centralrepository/persona/Bundle.properties-MERGED b/Core/src/org/sleuthkit/autopsy/centralrepository/persona/Bundle.properties-MERGED index fcc302c955..e8a7f9718a 100644 --- a/Core/src/org/sleuthkit/autopsy/centralrepository/persona/Bundle.properties-MERGED +++ b/Core/src/org/sleuthkit/autopsy/centralrepository/persona/Bundle.properties-MERGED @@ -6,6 +6,8 @@ AddMetadataDialog_empty_name_Title=Missing field(s) CreatePersonaAccountDialog.title.text=Create Account CreatePersonaAccountDialog_error_msg=Failed to create account. CreatePersonaAccountDialog_error_title=Account failure +CreatePersonaAccountDialog_invalid_account_msg=Account identifier is not valid. +CreatePersonaAccountDialog_invalid_account_Title=Invalid account identifier CreatePersonaAccountDialog_success_msg=Account added. CreatePersonaAccountDialog_success_title=Account added CTL_OpenPersonas=Personas @@ -132,4 +134,4 @@ PersonasTopComponent_delete_exception_Title=Delete failure PersonasTopComponent_Name=Personas PersonasTopComponent_noCR_msg=Central Repository is not enabled. PersonasTopComponent_search_exception_msg=Failed to search personas. -PersonasTopComponent_search_exception_Title=Search failure +PersonasTopComponent_search_exception_Title=There was a failure during the search. Try opening a case to fully initialize the central repository database. diff --git a/Core/src/org/sleuthkit/autopsy/centralrepository/persona/CreatePersonaAccountDialog.java b/Core/src/org/sleuthkit/autopsy/centralrepository/persona/CreatePersonaAccountDialog.java index ecb848da61..cfdf990710 100644 --- a/Core/src/org/sleuthkit/autopsy/centralrepository/persona/CreatePersonaAccountDialog.java +++ b/Core/src/org/sleuthkit/autopsy/centralrepository/persona/CreatePersonaAccountDialog.java @@ -36,6 +36,7 @@ import org.sleuthkit.autopsy.centralrepository.datamodel.CentralRepoAccount.Cent import org.sleuthkit.autopsy.centralrepository.datamodel.CentralRepoException; import org.sleuthkit.autopsy.centralrepository.datamodel.CentralRepository; import org.sleuthkit.autopsy.coreutils.Logger; +import org.sleuthkit.datamodel.InvalidAccountIDException; /** * Configuration dialog for creating an account. @@ -216,7 +217,8 @@ public class CreatePersonaAccountDialog extends JDialog { @Messages({ "CreatePersonaAccountDialog_error_title=Account failure", "CreatePersonaAccountDialog_error_msg=Failed to create account.", - }) + "CreatePersonaAccountDialog_invalid_account_Title=Invalid account identifier", + "CreatePersonaAccountDialog_invalid_account_msg=Account identifier is not valid.",}) private CentralRepoAccount createAccount(CentralRepoAccount.CentralRepoAccountType type, String identifier) { CentralRepoAccount ret = null; try { @@ -227,8 +229,14 @@ public class CreatePersonaAccountDialog extends JDialog { } catch (CentralRepoException e) { logger.log(Level.SEVERE, "Failed to create account", e); JOptionPane.showMessageDialog(this, - Bundle.CreatePersonaAccountDialog_error_title(), Bundle.CreatePersonaAccountDialog_error_msg(), + Bundle.CreatePersonaAccountDialog_error_title(), + JOptionPane.ERROR_MESSAGE); + } catch (InvalidAccountIDException e) { + logger.log(Level.WARNING, "Invalid account identifier", e); + JOptionPane.showMessageDialog(this, + Bundle.CreatePersonaAccountDialog_invalid_account_msg(), + Bundle.CreatePersonaAccountDialog_invalid_account_Title(), JOptionPane.ERROR_MESSAGE); } return ret; diff --git a/Core/src/org/sleuthkit/autopsy/centralrepository/persona/PersonasTopComponent.java b/Core/src/org/sleuthkit/autopsy/centralrepository/persona/PersonasTopComponent.java index d38b379078..e051529f11 100644 --- a/Core/src/org/sleuthkit/autopsy/centralrepository/persona/PersonasTopComponent.java +++ b/Core/src/org/sleuthkit/autopsy/centralrepository/persona/PersonasTopComponent.java @@ -20,6 +20,8 @@ package org.sleuthkit.autopsy.centralrepository.persona; import java.awt.event.ActionEvent; import java.awt.event.ActionListener; +import java.awt.event.ComponentAdapter; +import java.awt.event.ComponentEvent; import java.util.ArrayList; import java.util.Collection; import java.util.List; @@ -60,28 +62,6 @@ public final class PersonasTopComponent extends TopComponent { private List currentResults = null; private Persona selectedPersona = null; - /** - * Listens for when this component will be rendered and executes a search to - * update gui when it is displayed. - */ - private final AncestorListener onAddListener = new AncestorListener() { - @Override - public void ancestorAdded(AncestorEvent event) { - resetSearchControls(); - setKeywordSearchEnabled(false, true); - } - - @Override - public void ancestorRemoved(AncestorEvent event) { - //Empty - } - - @Override - public void ancestorMoved(AncestorEvent event) { - //Empty - } - }; - @Messages({ "PersonasTopComponent_Name=Personas", "PersonasTopComponent_delete_exception_Title=Delete failure", @@ -165,7 +145,17 @@ public final class PersonasTopComponent extends TopComponent { } }); - addAncestorListener(onAddListener); + /** + * Listens for when this component will be rendered and executes a + * search to update gui when it is displayed. + */ + addComponentListener(new ComponentAdapter() { + @Override + public void componentShown(ComponentEvent e) { + resetSearchControls(); + setKeywordSearchEnabled(false, true); + } + }); } /** @@ -276,7 +266,7 @@ public final class PersonasTopComponent extends TopComponent { } @Messages({ - "PersonasTopComponent_search_exception_Title=Search failure", + "PersonasTopComponent_search_exception_Title=There was a failure during the search. Try opening a case to fully initialize the central repository database.", "PersonasTopComponent_search_exception_msg=Failed to search personas.", "PersonasTopComponent_noCR_msg=Central Repository is not enabled.",}) private void executeSearch() { diff --git a/Core/src/org/sleuthkit/autopsy/communications/CVTFilterRefresher.java b/Core/src/org/sleuthkit/autopsy/communications/CVTFilterRefresher.java index 1ba9d6c81e..b5560ab825 100755 --- a/Core/src/org/sleuthkit/autopsy/communications/CVTFilterRefresher.java +++ b/Core/src/org/sleuthkit/autopsy/communications/CVTFilterRefresher.java @@ -65,8 +65,10 @@ abstract class CVTFilterRefresher implements RefreshThrottler.Refresher { try (SleuthkitCase.CaseDbQuery dbQuery = skCase.executeQuery("SELECT MAX(date_time) as end, MIN(date_time) as start from account_relationships")) { // ResultSet is closed by CasDBQuery ResultSet rs = dbQuery.getResultSet(); + rs.next(); startTime = rs.getInt("start"); // NON-NLS endTime = rs.getInt("end"); // NON-NLS + } // Get the devices with CVT artifacts List deviceObjIds = new ArrayList<>(); diff --git a/Core/src/org/sleuthkit/autopsy/communications/FiltersPanel.java b/Core/src/org/sleuthkit/autopsy/communications/FiltersPanel.java index 7bdf3a46e3..90c41e467f 100644 --- a/Core/src/org/sleuthkit/autopsy/communications/FiltersPanel.java +++ b/Core/src/org/sleuthkit/autopsy/communications/FiltersPanel.java @@ -269,6 +269,7 @@ final public class FiltersPanel extends JPanel { * Populate the Account Types filter widgets. * * @param accountTypesInUse List of accountTypes currently in use + * @param checkNewOnes * * @return True, if a new accountType was found */ @@ -314,9 +315,8 @@ final public class FiltersPanel extends JPanel { /** * Populate the devices filter widgets. * - * @param selected Sets the initial state of device check box. - * @param sleuthkitCase The sleuthkit case for containing the data source - * information. + * @param dataSourceMap + * @param checkNewOnes * * @return true if a new device was found */ diff --git a/Core/src/org/sleuthkit/autopsy/communications/relationships/Bundle.properties b/Core/src/org/sleuthkit/autopsy/communications/relationships/Bundle.properties index b14d8a2688..6ecb170c10 100755 --- a/Core/src/org/sleuthkit/autopsy/communications/relationships/Bundle.properties +++ b/Core/src/org/sleuthkit/autopsy/communications/relationships/Bundle.properties @@ -9,7 +9,6 @@ SummaryViewer.callLogsLabel.text=Call Logs: ThreadRootMessagePanel.showAllCheckBox.text=Show All Messages ThreadPane.backButton.text=<--- SummaryViewer.caseReferencesPanel.border.title=Other Occurrences -SummaryViewer.fileReferencesPanel.border.title=File References in Current Case MessageViewer.threadsLabel.text=Select a Thread to View MessageViewer.threadNameLabel.text= MessageViewer.showingMessagesLabel.text=Showing Messages for Thread: @@ -27,3 +26,5 @@ SummaryViewer.referencesLabel.text=Communication References: SummaryViewer.referencesDataLabel.text= SummaryViewer.contactsLabel.text=Book Entries: SummaryViewer.accountCountry.text= +SummaryViewer.fileRefPane.border.title=File References in Current Case +SummaryViewer.selectAccountFileRefLabel.text= SummaryViewer_FileRefNameColumn_Title=Path SummaryViewer_TabTitle=Summary ThreadRootMessagePanel.showAllCheckBox.text=Show All Messages ThreadPane.backButton.text=<--- SummaryViewer.caseReferencesPanel.border.title=Other Occurrences -SummaryViewer.fileReferencesPanel.border.title=File References in Current Case MessageViewer.threadsLabel.text=Select a Thread to View MessageViewer.threadNameLabel.text= MessageViewer.showingMessagesLabel.text=Showing Messages for Thread: @@ -73,3 +73,5 @@ SummaryViewer.referencesLabel.text=Communication References: SummaryViewer.referencesDataLabel.text= SummaryViewer.contactsLabel.text=Book Entries: SummaryViewer.accountCountry.text= +SummaryViewer.fileRefPane.border.title=File Referernce(s) in Current Case +SummaryViewer.selectAccountFileRefLabel.text= -l ` to update properties files based on the newly generated csv file. The csv file should be formatted such that the columns are bundle relative path, property files key, translated value and commit id for the latest commit id for which these changes represent. The commit id only needs to be in the header row. The output path should be specified as a relative path with the dot slash notation (i.e. `./outputpath.csv`) or an absolute path. + +## Localization Generation for the First Time +First-time updates should follow a similar procedure except that instead of calling `diffscript.py`, call `python3 allbundlesscript ` to generate a csv file with relative paths of bundle files, property file keys, property file values. The output path should be specified as a relative path with the dot slash notation (i.e. `./inputpath.csv`) or an absolute path. + +##Unit Tests +Unit tests can be run from this directory using `python3 -m unittest`. \ No newline at end of file diff --git a/release_scripts/localization_scripts/__init__.py b/release_scripts/localization_scripts/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/release_scripts/localization_scripts/allbundlesscript.py b/release_scripts/localization_scripts/allbundlesscript.py new file mode 100644 index 0000000000..139dec15a5 --- /dev/null +++ b/release_scripts/localization_scripts/allbundlesscript.py @@ -0,0 +1,73 @@ +"""This script finds all '.properties-MERGED' files and writes relative path, key, and value to a CSV file. +This script requires the python libraries: gitpython and jproperties. As a consequence, it also requires +git >= 1.7.0 and python >= 3.4. This script relies on fetching 'HEAD' from current branch. So make sure +repo is on correct branch (i.e. develop). +""" + +import sys + +from envutil import get_proj_dir +from fileutil import get_filename_addition, OMITTED_ADDITION +from gitutil import get_property_file_entries, get_commit_id, get_git_root +from csvutil import records_to_csv +from typing import Union +import re +import argparse + + +def write_items_to_csv(repo_path: str, output_path: str, show_commit: bool, value_regex: Union[str, None] = None): + """Determines the contents of '.properties-MERGED' files and writes to a csv file. + + Args: + repo_path (str): The local path to the git repo. + output_path (str): The output path for the csv file. + show_commit (bool): Whether or not to include the commit id in the header + value_regex (Union[str, None]): If non-none, only key value pairs where the value is a regex match with this + value will be included. + """ + + row_header = ['Relative path', 'Key', 'Value'] + if show_commit: + row_header.append(get_commit_id(repo_path, 'HEAD')) + + rows = [] + omitted = [] + + for entry in get_property_file_entries(repo_path): + new_entry = [entry.rel_path, entry.key, entry.value] + if value_regex is None or re.match(value_regex, entry.value): + rows.append(new_entry) + else: + omitted.append(new_entry) + + records_to_csv(output_path, [row_header] + rows) + + if len(omitted) > 0: + records_to_csv(get_filename_addition(output_path, OMITTED_ADDITION), [row_header] + omitted) + + +def main(): + # noinspection PyTypeChecker + parser = argparse.ArgumentParser(description='Gathers all key-value pairs within .properties-MERGED files into ' + 'one csv file.', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument(dest='output_path', type=str, help='The path to the output csv file. The output path should be' + ' specified as a relative path with the dot slash notation ' + '(i.e. \'./outputpath.csv\') or an absolute path.') + parser.add_argument('-r', '--repo', dest='repo_path', type=str, required=False, + help='The path to the repo. If not specified, path of script is used.') + parser.add_argument('-nc', '--no_commit', dest='no_commit', action='store_true', default=False, + required=False, help="Suppresses adding commits to the generated csv header.") + + args = parser.parse_args() + repo_path = args.repo_path if args.repo_path is not None else get_git_root(get_proj_dir()) + output_path = args.output_path + show_commit = not args.no_commit + + write_items_to_csv(repo_path, output_path, show_commit) + + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/release_scripts/localization_scripts/csvutil.py b/release_scripts/localization_scripts/csvutil.py new file mode 100644 index 0000000000..daa66f5396 --- /dev/null +++ b/release_scripts/localization_scripts/csvutil.py @@ -0,0 +1,51 @@ +"""Provides tools for parsing and writing to a csv file. +""" +from typing import List, Iterable, Tuple +import csv +import os + + +def records_to_csv(output_path: str, rows: Iterable[List[str]]): + """Writes rows to a csv file at the specified path. + + Args: + output_path (str): The path where the csv file will be written. + rows (List[List[str]]): The rows to be written. Each row of a + list of strings will be written according + to their index (i.e. column 3 will be index 2). + """ + + parent_dir, file = os.path.split(output_path) + if not os.path.exists(parent_dir): + os.makedirs(parent_dir) + + with open(output_path, 'w', encoding="utf-8-sig", newline='') as csvfile: + writer = csv.writer(csvfile) + for row in rows: + writer.writerow(row) + + +def csv_to_records(input_path: str, header_row: bool) -> Tuple[List[List[str]], List[str]]: + """Writes rows to a csv file at the specified path. + + Args: + input_path (str): The path where the csv file will be written. + header_row (bool): Whether or not there is a header row to be skipped. + """ + + with open(input_path, encoding='utf-8-sig') as csv_file: + csv_reader = csv.reader(csv_file, delimiter=',') + + header = None + results = [] + try: + for row in csv_reader: + if header_row: + header = row + header_row = False + else: + results.append(row) + except Exception as e: + raise Exception("There was an error parsing csv {path}".format(path=input_path), e) + + return results, header diff --git a/release_scripts/localization_scripts/diffscript.py b/release_scripts/localization_scripts/diffscript.py new file mode 100644 index 0000000000..2713fef518 --- /dev/null +++ b/release_scripts/localization_scripts/diffscript.py @@ -0,0 +1,97 @@ +"""This script determines the updated, added, and deleted properties from the '.properties-MERGED' files +and generates a csv file containing the items changed. This script requires the python libraries: +gitpython and jproperties. As a consequence, it also requires git >= 1.7.0 and python >= 3.4. +""" +import re +import sys +from envutil import get_proj_dir +from fileutil import get_filename_addition, OMITTED_ADDITION +from gitutil import get_property_files_diff, get_commit_id, get_git_root +from itemchange import ItemChange, ChangeType +from csvutil import records_to_csv +import argparse +from typing import Union +from langpropsutil import get_commit_for_language, LANG_FILENAME + + +def write_diff_to_csv(repo_path: str, output_path: str, commit_1_id: str, commit_2_id: str, show_commits: bool, + value_regex: Union[str, None] = None): + """Determines the changes made in '.properties-MERGED' files from one commit to another commit. + + Args: + repo_path (str): The local path to the git repo. + output_path (str): The output path for the csv file. + commit_1_id (str): The initial commit for the diff. + commit_2_id (str): The latest commit for the diff. + show_commits (bool): Show commits in the header row. + value_regex (Union[str, None]): If non-none, only key value pairs where the value is a regex match with this + value will be included. + """ + + row_header = ItemChange.get_headers() + if show_commits: + row_header += [get_commit_id(repo_path, commit_1_id), get_commit_id(repo_path, commit_2_id)] + + rows = [] + omitted = [] + + for entry in get_property_files_diff(repo_path, commit_1_id, commit_2_id): + new_entry = entry.get_row() + if value_regex is not None and (entry.type == ChangeType.DELETION or not re.match(value_regex, entry.cur_val)): + omitted.append(new_entry) + else: + rows.append(new_entry) + + records_to_csv(output_path, [row_header] + rows) + + if len(omitted) > 0: + records_to_csv(get_filename_addition(output_path, OMITTED_ADDITION), [row_header] + omitted) + + +def main(): + # noinspection PyTypeChecker + parser = argparse.ArgumentParser(description="Determines the updated, added, and deleted properties from the " + "'.properties-MERGED' files and generates a csv file containing " + "the items changed.", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument(dest='output_path', type=str, help='The path to the output csv file. The output path should ' + 'be specified as a relative path with the dot slash notation' + ' (i.e. \'./outputpath.csv\') or an absolute path.') + + parser.add_argument('-r', '--repo', dest='repo_path', type=str, required=False, + help='The path to the repo. If not specified, path of script is used.') + parser.add_argument('-fc', '--first-commit', dest='commit_1_id', type=str, required=False, + help='The commit for previous release. This flag or the language flag need to be specified' + ' in order to determine a start point for the difference.') + parser.add_argument('-lc', '--latest-commit', dest='commit_2_id', type=str, default='HEAD', required=False, + help='The commit for current release.') + parser.add_argument('-nc', '--no-commits', dest='no_commits', action='store_true', default=False, + required=False, help="Suppresses adding commits to the generated csv header.") + parser.add_argument('-l', '--language', dest='language', type=str, default=None, required=False, + help='Specify the language in order to determine the first commit to use (i.e. \'ja\' for ' + 'Japanese. This flag overrides the first-commit flag.') + + args = parser.parse_args() + repo_path = args.repo_path if args.repo_path is not None else get_git_root(get_proj_dir()) + output_path = args.output_path + commit_1_id = args.commit_1_id + lang = args.language + if lang is not None: + commit_1_id = get_commit_for_language(lang) + + if commit_1_id is None: + print('Either the first commit or language flag need to be specified. If specified, the language file, ' + + LANG_FILENAME + ', may not have the latest commit for the language.', file=sys.stderr) + parser.print_help(sys.stderr) + sys.exit(1) + + commit_2_id = args.commit_2_id + show_commits = not args.no_commits + + write_diff_to_csv(repo_path, output_path, commit_1_id, commit_2_id, show_commits) + + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/release_scripts/localization_scripts/envutil.py b/release_scripts/localization_scripts/envutil.py new file mode 100644 index 0000000000..cec2a00eda --- /dev/null +++ b/release_scripts/localization_scripts/envutil.py @@ -0,0 +1,17 @@ +"""Functions relating to the project environment. +""" + +import pathlib +from typing import Union + + +def get_proj_dir(path: Union[pathlib.PurePath, str] = __file__) -> str: + """ + Gets parent directory of this file (and subsequently, the project). + Args: + path: Can be overridden to provide a different file. This will return the parent of that file in that instance. + + Returns: + The project folder or the parent folder of the file provided. + """ + return str(pathlib.Path(path).parent.absolute()) diff --git a/release_scripts/localization_scripts/fileutil.py b/release_scripts/localization_scripts/fileutil.py new file mode 100644 index 0000000000..5139812db2 --- /dev/null +++ b/release_scripts/localization_scripts/fileutil.py @@ -0,0 +1,63 @@ +import os +from typing import Union, Tuple +from pathlib import Path + + +def get_path_pieces(orig_path: str) -> Tuple[str, Union[str, None], Union[str, None]]: + """Retrieves path pieces. This is a naive approach as it determines if a file is present based on the + presence of an extension. + Args: + orig_path: The original path to deconstruct. + + Returns: A tuple of directory, filename and extension. If no extension is present, filename and extension are None. + + """ + + potential_parent_dir, orig_file = os.path.split(str(Path(orig_path))) + filename, file_extension = os.path.splitext(orig_file) + if file_extension.startswith('.'): + file_extension = file_extension[1:] + + if file_extension is None or len(file_extension) < 1: + return str(Path(orig_path)), None, None + else: + return potential_parent_dir, filename, file_extension + + +def get_new_path(orig_path: str, new_filename: str) -> str: + """Obtains a new path. This tries to determine if the provided path is a directory or filename (has an + extension containing '.') then constructs the new path with the old parent directory and the new filename. + + Args: + orig_path (str): The original path. + new_filename (str): The new filename to use. + + Returns: + str: The new path. + """ + + parent_dir, filename, ext = get_path_pieces(orig_path) + return str(Path(parent_dir) / Path(new_filename)) + + +# For use with creating csv filenames for entries that have been omitted. +OMITTED_ADDITION = '-omitted' + + +def get_filename_addition(orig_path: str, filename_addition: str) -> str: + """Gets filename with addition. So if item is '/path/name.ext' and the filename_addition is '-add', the new result + would be '/path/name-add.ext'. + + Args: + orig_path (str): The original path. + filename_addition (str): The new addition. + + Returns: The altered path. + + """ + parent_dir, filename, extension = get_path_pieces(orig_path) + if filename is None: + return str(Path(orig_path + filename_addition)) + else: + ext = '' if extension is None else extension + return str(Path(parent_dir) / Path('{0}{1}.{2}'.format(filename, filename_addition, ext))) diff --git a/release_scripts/localization_scripts/gitutil.py b/release_scripts/localization_scripts/gitutil.py new file mode 100644 index 0000000000..43c20e2ce0 --- /dev/null +++ b/release_scripts/localization_scripts/gitutil.py @@ -0,0 +1,168 @@ +"""Functions relating to using git and GitPython with an existing repo. +""" + +from git import Repo, Diff, Blob +from typing import List, Union, Iterator, Tuple, Any +from itemchange import ItemChange, get_changed +from pathlib import Path +from propentry import PropEntry +from propsutil import DEFAULT_PROPS_EXTENSION, get_entry_dict + + +def get_text(blob: Blob) -> str: + return blob.data_stream.read().decode('utf-8') + + +def get_git_root(child_path: str) -> str: + """ + Taken from https://stackoverflow.com/questions/22081209/find-the-root-of-the-git-repository-where-the-file-lives, + this obtains the root path of the git repo in which this file exists. + Args: + child_path: The path of a child within the repo. + + Returns: The repo root path. + + """ + git_repo = Repo(child_path, search_parent_directories=True) + git_root = git_repo.git.rev_parse("--show-toplevel") + return git_root + + +def get_changed_from_diff(rel_path: str, diff: Diff) -> List[ItemChange]: + """Determines changes from a git python diff. + + Args: + rel_path (str): The relative path for the properties file. + diff (Diff): The git python diff. + + Returns: + List[ItemChange]: The changes in properties. + """ + + # an item was added + if diff.change_type == 'A': + changes = get_changed(rel_path, '', get_text(diff.b_blob)) + # an item was deleted + elif diff.change_type == 'D': + changes = get_changed(rel_path, get_text(diff.a_blob), '') + # an item was modified + elif diff.change_type == 'M': + changes = get_changed(rel_path, get_text( + diff.a_blob), get_text(diff.b_blob)) + else: + changes = [] + + return changes + + +def get_rel_path(diff: Diff) -> Union[str, None]: + """Determines the relative path based on the git python. + + Args: + diff: The git python diff. + + Returns: + str: The determined relative path. + """ + if diff.b_path is not None: + return diff.b_path + elif diff.a_path is not None: + return diff.a_path + else: + return None + + +def get_diff(repo_path: str, commit_1_id: str, commit_2_id: str) -> Any: + """Determines the diff between two commits. + + Args: + repo_path (str): The local path to the git repo. + commit_1_id (str): The initial commit for the diff. + commit_2_id (str): The latest commit for the diff. + + Returns: + The determined diff. + """ + repo = Repo(repo_path, search_parent_directories=True) + commit_1 = repo.commit(commit_1_id) + commit_2 = repo.commit(commit_2_id) + return commit_1.diff(commit_2) + + +def get_commit_id(repo_path: str, commit_id: str) -> str: + """Determines the hash for head commit. This does things like fetch the id of head if 'HEAD' is provided. + + Args: + repo_path: The path to the repo. + commit_id: The id for the commit. + + Returns: + The hash for the commit in the repo. + """ + repo = Repo(repo_path, search_parent_directories=True) + commit = repo.commit(commit_id.strip()) + return str(commit.hexsha) + + +def get_property_files_diff(repo_path: str, commit_1_id: str, commit_2_id: str, + property_file_extension: str = DEFAULT_PROPS_EXTENSION) -> Iterator[ItemChange]: + """Determines the item changes within property files as a diff between two commits. + + Args: + repo_path (str): The repo path. + commit_1_id (str): The first git commit. + commit_2_id (str): The second git commit. + property_file_extension (str): The extension for properties files to gather. + + Returns: + All found item changes in values of keys between the property files. + """ + + diffs = get_diff(repo_path, commit_1_id.strip(), commit_2_id.strip()) + for diff in diffs: + rel_path = get_rel_path(diff) + if rel_path is None or not rel_path.endswith('.' + property_file_extension): + continue + + yield from get_changed_from_diff(rel_path, diff) + + +def list_paths(root_tree, path: Path = Path('.')) -> Iterator[Tuple[str, Blob]]: + """ + Given the root path to serve as a prefix, walks the tree of a git commit returning all files and blobs. + Repurposed from: https://www.enricozini.org/blog/2019/debian/gitpython-list-all-files-in-a-git-commit/ + + Args: + root_tree: The tree of the commit to walk. + path: The path to use as a prefix. + + Returns: A tuple iterator where each tuple consists of the path as a string and a blob of the file. + + """ + for blob in root_tree.blobs: + ret_item = (str(path / blob.name), blob) + yield ret_item + for tree in root_tree.trees: + yield from list_paths(tree, path / tree.name) + + +def get_property_file_entries(repo_path: str, at_commit: str = 'HEAD', + property_file_extension: str = DEFAULT_PROPS_EXTENSION) -> Iterator[PropEntry]: + """ + Retrieves all property files entries returning as an iterator of PropEntry objects. + + Args: + repo_path: The path to the git repo. + at_commit: The commit to use. + property_file_extension: The extension to use for scanning for property files. + + Returns: An iterator of PropEntry objects. + + """ + repo = Repo(repo_path, search_parent_directories=True) + commit = repo.commit(at_commit.strip()) + for item in list_paths(commit.tree): + path, blob = item + if path.endswith(property_file_extension): + for key, val in get_entry_dict(get_text(blob)).items(): + yield PropEntry(path, key, val) diff --git a/release_scripts/localization_scripts/itemchange.py b/release_scripts/localization_scripts/itemchange.py new file mode 100644 index 0000000000..27448cb529 --- /dev/null +++ b/release_scripts/localization_scripts/itemchange.py @@ -0,0 +1,103 @@ +from typing import Iterator, List, Union +from propsutil import get_entry_dict +from enum import Enum + + +class ChangeType(Enum): + """Describes the nature of a change in the properties file.""" + ADDITION = 'ADDITION' + DELETION = 'DELETION' + CHANGE = 'CHANGE' + + def __str__(self): + return str(self.value) + + +class ItemChange: + rel_path: str + key: str + prev_val: Union[str, None] + cur_val: Union[str, None] + type: ChangeType + + def __init__(self, rel_path: str, key: str, prev_val: str, cur_val: str): + """Describes the change that occurred for a particular key of a properties file. + + Args: + rel_path (str): The relative path of the properties file. + key (str): The key in the properties file. + prev_val (str): The previous value for the key. + cur_val (str): The current value for the key. + """ + self.rel_path = rel_path + self.key = key + self.prev_val = prev_val + self.cur_val = cur_val + if cur_val is not None and prev_val is None: + self.type = ChangeType.ADDITION + elif cur_val is None and prev_val is not None: + self.type = ChangeType.DELETION + else: + self.type = ChangeType.CHANGE + + @staticmethod + def get_headers() -> List[str]: + """Returns the csv headers to insert when serializing a list of ItemChange objects to csv. + + Returns: + List[str]: The column headers + """ + return ['Relative Path', 'Key', 'Change Type', 'Previous Value', 'Current Value'] + + def get_row(self) -> List[str]: + """Returns the list of values to be entered as a row in csv serialization. + + Returns: + List[str]: The list of values to be entered as a row in csv serialization. + """ + return [ + self.rel_path, + self.key, + self.type, + self.prev_val, + self.cur_val] + + +def get_item_change(rel_path: str, key: str, prev_val: str, cur_val: str) -> Union[ItemChange, None]: + """Returns an ItemChange object if the previous value is not equal to the current value. + + Args: + rel_path (str): The relative path for the properties file. + key (str): The key within the properties file for this potential change. + prev_val (str): The previous value. + cur_val (str): The current value. + + Returns: + ItemChange: The ItemChange object or None if values are the same. + """ + if prev_val == cur_val: + return None + else: + return ItemChange(rel_path, key, prev_val, cur_val) + + +def get_changed(rel_path: str, a_str: str, b_str: str) -> Iterator[ItemChange]: + """Given the relative path of the properties file that has been provided, + determines the property items that have changed between the two property + file strings. + + Args: + rel_path (str): The relative path for the properties file. + a_str (str): The string representing the original state of the file. + b_str (str): The string representing the current state of the file. + + Returns: + List[ItemChange]: The changes determined. + """ + print('Retrieving changes for {0}...'.format(rel_path)) + a_dict = get_entry_dict(a_str) + b_dict = get_entry_dict(b_str) + all_keys = set().union(a_dict.keys(), b_dict.keys()) + mapped = map(lambda key: get_item_change( + rel_path, key, a_dict.get(key), b_dict.get(key)), all_keys) + return filter(lambda entry: entry is not None, mapped) diff --git a/release_scripts/localization_scripts/langpropsutil.py b/release_scripts/localization_scripts/langpropsutil.py new file mode 100644 index 0000000000..841574c9e9 --- /dev/null +++ b/release_scripts/localization_scripts/langpropsutil.py @@ -0,0 +1,34 @@ +"""Functions handling retrieving and storing when a language was last updated. +""" +from typing import Union +from envutil import get_proj_dir +from propsutil import get_entry_dict_from_path, update_entry_dict +from os import path + + +LANG_FILENAME = 'lastupdated.properties' + + +def _get_last_update_key(language: str) -> str: + return "bundles.{lang}.lastupdated".format(lang=language) + + +def _get_props_path(): + return path.join(get_proj_dir(), LANG_FILENAME) + + +def get_commit_for_language(language: str) -> Union[str, None]: + lang_dict = get_entry_dict_from_path(_get_props_path()) + if lang_dict is None: + return None + + key = _get_last_update_key(language) + if key not in lang_dict: + return None + + return lang_dict[key] + + +def set_commit_for_language(language: str, latest_commit: str): + key = _get_last_update_key(language) + update_entry_dict({key: latest_commit}, _get_props_path()) diff --git a/release_scripts/localization_scripts/lastupdated.properties b/release_scripts/localization_scripts/lastupdated.properties new file mode 100644 index 0000000000..db7e961472 --- /dev/null +++ b/release_scripts/localization_scripts/lastupdated.properties @@ -0,0 +1,2 @@ +# in format of bundles..lastupdated= +bundles.ja.lastupdated=d9a37c48f4bd0dff014eead73a0eb730c875ed9f \ No newline at end of file diff --git a/release_scripts/localization_scripts/propentry.py b/release_scripts/localization_scripts/propentry.py new file mode 100644 index 0000000000..99c00f749e --- /dev/null +++ b/release_scripts/localization_scripts/propentry.py @@ -0,0 +1,19 @@ +class PropEntry: + rel_path: str + key: str + value: str + should_delete: bool + + def __init__(self, rel_path: str, key: str, value: str, should_delete: bool = False): + """Defines a property file entry to be updated in a property file. + + Args: + rel_path (str): The relative path for the property file. + key (str): The key for the entry. + value (str): The value for the entry. + should_delete (bool, optional): Whether or not the key should simply be deleted. Defaults to False. + """ + self.rel_path = rel_path + self.key = key + self.value = value + self.should_delete = should_delete diff --git a/release_scripts/localization_scripts/propsutil.py b/release_scripts/localization_scripts/propsutil.py new file mode 100644 index 0000000000..3de52a7966 --- /dev/null +++ b/release_scripts/localization_scripts/propsutil.py @@ -0,0 +1,97 @@ +"""Provides tools for reading from and writing to java properties files. +""" +from typing import Dict, Union, IO +from jproperties import Properties +import os + +# The default extension for property files in autopsy repo + +DEFAULT_PROPS_EXTENSION = 'properties-MERGED' + + +def get_lang_bundle_name(language: str) -> str: + """ + Returns the bundle name for the specific language identifier provided. + Args: + language: The language identifier (i.e. 'ja' for Japanese) + + Returns: + The bundle name + """ + return 'Bundle_{lang}.properties'.format(lang=language) + + +def get_entry_dict(file_contents: Union[str, IO]) -> Dict[str, str]: + """Retrieves a dictionary mapping the properties represented in the string. + + Args: + file_contents: The string of the properties file or the file handle. + + Returns: + Dict[str,str]: The mapping of keys to values in that properties file. + """ + + props = Properties() + try: + props.load(file_contents) + except Exception as e: + raise Exception("There was an error loading properties file {file}".format(file=file_contents), e) + return props.properties + + +def get_entry_dict_from_path(props_path: str) -> Union[Dict[str, str], None]: + """ + Retrieves a dictionary mapping the properties represented in the string or None if no properties file can be found + at that path. + Args: + props_path: The path to the properties file. + + Returns: The entry dictionary for that properties file. + + """ + if os.path.isfile(props_path): + with open(props_path, "rb") as f: + return get_entry_dict(f) + else: + return None + + +def set_entry_dict(contents: Dict[str, str], file_path: str): + """Sets the property file to the key-value pairs of the contents dictionary. + + Args: + contents (Dict[str, str]): The dictionary whose contents will be the key value pairs of the properties file. + file_path (str): The path to the properties file to create. + """ + + props = Properties() + for key, val in contents.items(): + props[key] = val + + parent_dir, file = os.path.split(file_path) + if not os.path.exists(parent_dir): + os.makedirs(parent_dir) + + with open(file_path, "wb") as f: + props.store(f) + + +def update_entry_dict(contents: Dict[str, str], file_path: str): + """Updates the properties file at the given location with the key-value properties of contents. + Creates a new properties file at given path if none exists. + + Args: + contents (Dict[str, str]): The dictionary whose contents will be the key value pairs of the properties file. + file_path (str): The path to the properties file to create. + """ + contents_to_edit = contents.copy() + + cur_dict = get_entry_dict_from_path(file_path) + if cur_dict is None: + cur_dict = {} + for cur_key, cur_val in cur_dict.items(): + # only update contents if contents does not already have key + if cur_key not in contents_to_edit: + contents_to_edit[cur_key] = cur_val + + set_entry_dict(contents_to_edit, file_path) diff --git a/release_scripts/localization_scripts/test/__init__.py b/release_scripts/localization_scripts/test/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/release_scripts/localization_scripts/test/artifacts/.gitignore b/release_scripts/localization_scripts/test/artifacts/.gitignore new file mode 100644 index 0000000000..6caf68aff4 --- /dev/null +++ b/release_scripts/localization_scripts/test/artifacts/.gitignore @@ -0,0 +1 @@ +output \ No newline at end of file diff --git a/release_scripts/localization_scripts/test/test_csvutil.py b/release_scripts/localization_scripts/test/test_csvutil.py new file mode 100644 index 0000000000..a5ffd0cb71 --- /dev/null +++ b/release_scripts/localization_scripts/test/test_csvutil.py @@ -0,0 +1,41 @@ +import codecs +import os +import unittest +from typing import TypeVar, List + +from csvutil import records_to_csv, csv_to_records +from test.unittestutil import get_output_path + + +class CsvUtilTest(unittest.TestCase): + T = TypeVar('T') + + def assert_equal_arr(self, a: List[T], b: List[T]): + self.assertEqual(len(a), len(b), 'arrays are not equal length') + for i in range(0, len(a)): + if isinstance(a[i], list) and isinstance(b[i], list): + self.assert_equal_arr(a[i], b[i]) + else: + self.assertEqual(a[i], b[i], "Items: {0} and {1} at index {2} are not equal.".format(a[i], b[i], i)) + + def test_read_write(self): + data = [['header1', 'header2', 'header3', 'additional header'], + ['data1', 'data2', 'data3'], + ['', 'data2-1', 'data2-2']] + + os.makedirs(get_output_path(), exist_ok=True) + test_path = get_output_path('test.csv') + records_to_csv(test_path, data) + + byte_inf = min(32, os.path.getsize(test_path)) + with open(test_path, 'rb') as bom_test_file: + raw = bom_test_file.read(byte_inf) + if not raw.startswith(codecs.BOM_UTF8): + self.fail("written csv does not have appropriate BOM") + + read_records_no_header, no_header = csv_to_records(test_path, header_row=False) + self.assert_equal_arr(read_records_no_header, data) + + read_rows, header = csv_to_records(test_path, header_row=True) + self.assert_equal_arr(header, data[0]) + self.assert_equal_arr(read_rows, [data[1], data[2]]) diff --git a/release_scripts/localization_scripts/test/test_fileutil.py b/release_scripts/localization_scripts/test/test_fileutil.py new file mode 100644 index 0000000000..290396eba7 --- /dev/null +++ b/release_scripts/localization_scripts/test/test_fileutil.py @@ -0,0 +1,52 @@ +import os +import unittest +from typing import Tuple +from pathlib import Path +from fileutil import get_path_pieces, get_new_path, get_filename_addition + + +def joined_paths(pieces: Tuple[str, str, str]) -> str: + return os.path.join(pieces[0], pieces[1] + '.' + pieces[2]) + + +PATH_PIECES1 = ('/test/folder', 'filename', 'ext') +PATH_PIECES2 = ('/test.test2/folder.test2', 'filename.test', 'ext') +PATH_PIECES3 = ('/test.test2/folder.test2/folder', None, None) + +PATH1 = joined_paths(PATH_PIECES1) +PATH2 = joined_paths(PATH_PIECES2) +PATH3 = PATH_PIECES3[0] + +ALL_ITEMS = [ + (PATH_PIECES1, PATH1), + (PATH_PIECES2, PATH2), + (PATH_PIECES3, PATH3) +] + + +class FileUtilTest(unittest.TestCase): + def test_get_path_pieces(self): + for (expected_path, expected_filename, expected_ext), path in ALL_ITEMS: + path, filename, ext = get_path_pieces(path) + self.assertEqual(path, str(Path(expected_path))) + self.assertEqual(filename, expected_filename) + self.assertEqual(ext, expected_ext) + + def test_get_new_path(self): + for (expected_path, expected_filename, expected_ext), path in ALL_ITEMS: + new_name = "newname.file" + new_path = get_new_path(path, new_name) + self.assertEqual(new_path, str(Path(expected_path) / Path(new_name))) + + def test_get_filename_addition(self): + for (expected_path, expected_filename, expected_ext), path in ALL_ITEMS: + addition = "addition" + new_path = get_filename_addition(path, addition) + if expected_filename is None or expected_ext is None: + expected_file_path = Path(expected_path + addition) + else: + expected_file_path = Path(expected_path) / Path("{file_name}{addition}.{extension}".format( + file_name=expected_filename, addition=addition, extension=expected_ext)) + + self.assertEqual( + new_path, str(expected_file_path)) diff --git a/release_scripts/localization_scripts/test/test_itemchange.py b/release_scripts/localization_scripts/test/test_itemchange.py new file mode 100644 index 0000000000..91b7846a11 --- /dev/null +++ b/release_scripts/localization_scripts/test/test_itemchange.py @@ -0,0 +1,96 @@ +import unittest +from typing import Dict + +from itemchange import get_changed, ChangeType + + +def dict_to_prop_str(this_dict: Dict[str, str]) -> str: + toret = '' + for key, val in this_dict.items(): + toret += "{key}={value}\n".format(key=key, value=val) + + return toret + + +class ItemChangeTest(unittest.TestCase): + def test_get_changed(self): + deleted_key = 'deleted.property.key' + deleted_val = 'will be deleted' + + change_key = 'change.property.key' + change_val_a = 'original value' + change_val_b = 'new value' + + change_key2 = 'change2.property.key' + change_val2_a = 'original value 2' + change_val2_b = '' + + change_key3 = 'change3.property.key' + change_val3_a = '' + change_val3_b = 'cur value 3' + + addition_key = 'addition.property.key' + addition_new_val = 'the added value' + + same_key = 'samevalue.property.key' + same_value = 'the same value' + + same_key2 = 'samevalue2.property.key' + same_value2 = '' + + a_dict = { + deleted_key: deleted_val, + change_key: change_val_a, + change_key2: change_val2_a, + change_key3: change_val3_a, + same_key: same_value, + same_key2: same_value2 + } + + b_dict = { + change_key: change_val_b, + change_key2: change_val2_b, + change_key3: change_val3_b, + addition_key: addition_new_val, + same_key: same_value, + same_key2: same_value2 + } + + a_str = dict_to_prop_str(a_dict) + b_str = dict_to_prop_str(b_dict) + + rel_path = 'my/rel/path.properties' + + key_to_change = {} + + for item_change in get_changed(rel_path, a_str, b_str): + self.assertEqual(item_change.rel_path, rel_path) + key_to_change[item_change.key] = item_change + + deleted_item = key_to_change[deleted_key] + self.assertEqual(deleted_item.type, ChangeType.DELETION) + self.assertEqual(deleted_item.prev_val, deleted_val) + self.assertEqual(deleted_item.cur_val, None) + + addition_item = key_to_change[addition_key] + self.assertEqual(addition_item.type, ChangeType.ADDITION) + self.assertEqual(addition_item.prev_val, None) + self.assertEqual(addition_item.cur_val, addition_new_val) + + change_item = key_to_change[change_key] + self.assertEqual(change_item.type, ChangeType.CHANGE) + self.assertEqual(change_item.prev_val, change_val_a) + self.assertEqual(change_item.cur_val, change_val_b) + + change_item2 = key_to_change[change_key2] + self.assertEqual(change_item2.type, ChangeType.CHANGE) + self.assertEqual(change_item2.prev_val, change_val2_a) + self.assertEqual(change_item2.cur_val, change_val2_b) + + change_item3 = key_to_change[change_key3] + self.assertEqual(change_item3.type, ChangeType.CHANGE) + self.assertEqual(change_item3.prev_val, change_val3_a) + self.assertEqual(change_item3.cur_val, change_val3_b) + + self.assertTrue(same_key not in key_to_change) + self.assertTrue(same_key2 not in key_to_change) diff --git a/release_scripts/localization_scripts/test/test_propsutil.py b/release_scripts/localization_scripts/test/test_propsutil.py new file mode 100644 index 0000000000..f69129399a --- /dev/null +++ b/release_scripts/localization_scripts/test/test_propsutil.py @@ -0,0 +1,36 @@ +import os +import unittest + +from propsutil import set_entry_dict, get_entry_dict_from_path, update_entry_dict +from test.unittestutil import get_output_path + + +class PropsUtilTest(unittest.TestCase): + def test_update_entry_dict(self): + orig_key = 'orig_key' + orig_val = 'orig_val 片仮名 ' + to_be_altered_key = 'tobealteredkey' + first_val = 'not yet altered sábado' + second_val = 'altered Stöcke' + + orig_props = { + orig_key: orig_val, + to_be_altered_key: first_val + } + + update_props = { + to_be_altered_key: second_val + } + + os.makedirs(get_output_path(), exist_ok=True) + test_path = get_output_path('test.props') + set_entry_dict(orig_props, test_path) + + orig_read_props = get_entry_dict_from_path(test_path) + self.assertEqual(orig_read_props[orig_key], orig_val) + self.assertEqual(orig_read_props[to_be_altered_key], first_val) + + update_entry_dict(update_props, test_path) + updated_read_props = get_entry_dict_from_path(test_path) + self.assertEqual(updated_read_props[orig_key], orig_val) + self.assertEqual(updated_read_props[to_be_altered_key], second_val) diff --git a/release_scripts/localization_scripts/test/unittestutil.py b/release_scripts/localization_scripts/test/unittestutil.py new file mode 100644 index 0000000000..19face5610 --- /dev/null +++ b/release_scripts/localization_scripts/test/unittestutil.py @@ -0,0 +1,14 @@ +import os +from typing import Union + +from envutil import get_proj_dir + +TEST_ARTIFACT_FOLDER = 'artifacts' +TEST_OUTPUT_FOLDER = 'output' + + +def get_output_path(filename: Union[str, None] = None) -> str: + if filename is None: + return os.path.join(get_proj_dir(__file__), TEST_ARTIFACT_FOLDER, TEST_OUTPUT_FOLDER) + else: + return os.path.join(get_proj_dir(__file__), TEST_ARTIFACT_FOLDER, TEST_OUTPUT_FOLDER, filename) diff --git a/release_scripts/localization_scripts/updatepropsscript.py b/release_scripts/localization_scripts/updatepropsscript.py new file mode 100644 index 0000000000..3d8489af82 --- /dev/null +++ b/release_scripts/localization_scripts/updatepropsscript.py @@ -0,0 +1,259 @@ +"""This script finds all '.properties-MERGED' files and writes relative path, key, and value to a CSV file. +This script requires the python libraries: jproperties. It also requires Python 3.x. +""" + +from typing import List, Dict, Tuple, Callable, Iterator +import sys +import os + +from envutil import get_proj_dir +from fileutil import get_new_path +from gitutil import get_git_root +from langpropsutil import set_commit_for_language +from propsutil import set_entry_dict, get_entry_dict_from_path, get_lang_bundle_name +from csvutil import csv_to_records +from propentry import PropEntry +import argparse + + +def write_prop_entries(entries: Iterator[PropEntry], repo_path: str): + """Writes property entry items to their expected relative path within the repo path. + Previously existing files will be overwritten and prop entries marked as should_be_deleted will + not be included. + + Args: + entries (List[PropEntry]): the prop entry items to write to disk. + repo_path (str): The path to the git repo. + """ + items_by_file = get_by_file(entries) + for rel_path, (entries, ignored) in items_by_file.items(): + abs_path = os.path.join(repo_path, rel_path) + set_entry_dict(entries, abs_path) + + +def update_prop_entries(entries: Iterator[PropEntry], repo_path: str): + """Updates property entry items to their expected relative path within the repo path. The union of + entries provided and any previously existing entries will be created. Keys marked for deletion will be + removed from the generated property files. + + Args: + entries (List[PropEntry]): the prop entry items to write to disk. + repo_path (str): The path to the git repo. + """ + items_by_file = get_by_file(entries) + for rel_path, (entries, to_delete) in items_by_file.items(): + abs_path = os.path.join(repo_path, rel_path) + + prop_items = get_entry_dict_from_path(abs_path) + if prop_items is None: + prop_items = {} + + for key_to_delete in to_delete: + if key_to_delete in prop_items: + del prop_items[key_to_delete] + + for key, val in entries.items(): + prop_items[key] = val + + set_entry_dict(prop_items, abs_path) + + +def get_by_file(entries: Iterator[PropEntry]) -> Dict[str, Tuple[Dict[str, str], List[str]]]: + """Sorts a prop entry list by file. The return type is a dictionary mapping + the file path to a tuple containing the key-value pairs to be updated and a + list of keys to be deleted. + + Args: + entries (List[PropEntry]): The entries to be sorted. + + Returns: + Dict[str, Tuple[Dict[str,str], List[str]]]: A dictionary mapping + the file path to a tuple containing the key-value pairs to be updated and a + list of keys to be deleted. + """ + to_ret = {} + for prop_entry in entries: + rel_path = prop_entry.rel_path + key = prop_entry.key + value = prop_entry.value + + if rel_path not in to_ret: + to_ret[rel_path] = ({}, []) + + if prop_entry.should_delete: + to_ret[rel_path][1].append(prop_entry.key) + else: + to_ret[rel_path][0][key] = value + + return to_ret + + +def idx_bounded(num: int, max_exclusive: int) -> bool: + return 0 <= num < max_exclusive + + +def get_prop_entry(row: List[str], + path_idx: int = 0, + key_idx: int = 1, + value_idx: int = 2, + should_delete_converter: Callable[[List[str]], bool] = None, + path_converter: Callable[[str], str] = None) -> PropEntry: + """Parses a PropEntry object from a row of values in a csv. + + Args: + row (List[str]): The csv file row to parse. + path_idx (int, optional): The column index for the relative path of the properties file. Defaults to 0. + key_idx (int, optional): The column index for the properties key. Defaults to 1. + value_idx (int, optional): The column index for the properties value. Defaults to 2. + should_delete_converter (Callable[[List[str]], bool], optional): If not None, this determines if the key should + be deleted from the row values. Defaults to None. + path_converter (Callable[[str], str], optional): If not None, this determines the relative path to use in the + created PropEntry given the original relative path. Defaults to None. + + Returns: + PropEntry: The generated prop entry object. + """ + + path = row[path_idx] if idx_bounded(path_idx, len(row)) else None + if path_converter is not None: + path = path_converter(path) + + key = row[key_idx] if idx_bounded(key_idx, len(row)) else None + value = row[value_idx] if idx_bounded(value_idx, len(row)) else None + should_delete = False if should_delete_converter is None else should_delete_converter(row) + return PropEntry(path, key, value, should_delete) + + +def get_prop_entries(rows: List[List[str]], + path_idx: int = 0, + key_idx: int = 1, + value_idx: int = 2, + should_delete_converter: Callable[[List[str]], bool] = None, + path_converter: Callable[[str], str] = None) -> Iterator[PropEntry]: + + """Parses PropEntry objects from rows of values in a csv. + + Args: + rows (List[List[str]]): The csv file rows to parse. + path_idx (int, optional): The column index for the relative path of the properties file. Defaults to 0. + key_idx (int, optional): The column index for the properties key. Defaults to 1. + value_idx (int, optional): The column index for the properties value. Defaults to 2. + should_delete_converter (Callable[[List[str]], bool], optional): If not None, this determines if the key should + be deleted from the row values. Defaults to None. + path_converter (Callable[[str], str], optional): If not None, this determines the relative path to use in the + created PropEntry given the original relative path. Defaults to None. + + Returns: + List[PropEntry]: The generated prop entry objects. + """ + return map(lambda row: get_prop_entry( + row, path_idx, key_idx, value_idx, should_delete_converter, path_converter), + rows) + + +def get_should_deleted(row_items: List[str], requested_idx: int) -> bool: + """If there is a value at row_items[requested_idx] and that value starts with 'DELET', then this will return true. + + Args: + row_items (List[str]): The row items. + requested_idx (int): The index specifying if the property should be deleted. + + Returns: + bool: True if the row specifies it should be deleted. + """ + if idx_bounded(requested_idx, len(row_items)) and row_items[requested_idx].strip().upper().startswith('DELET'): + return True + else: + return False + + +def main(): + # noinspection PyTypeChecker + parser = argparse.ArgumentParser(description='Updates properties files in the autopsy git repo.', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument(dest='csv_file', type=str, help='The path to the csv file. The default format for the csv ' + 'file has columns of relative path, properties file key, ' + 'properties file value, whether or not the key should be ' + 'deleted, and commit id for how recent these updates are. ' + 'If the key should be deleted, the deletion row should be ' + '\'DELETION.\' A header row is expected by default and the ' + 'commit id, if specified, should only be in the first row. The' + ' input path should be specified as a relative path with the ' + 'dot slash notation (i.e. `./inputpath.csv`) or an absolute ' + 'path.') + + parser.add_argument('-r', '--repo', dest='repo_path', type=str, required=False, + help='The path to the repo. If not specified, parent repo of path of script is used.') + parser.add_argument('-p', '--path-idx', dest='path_idx', action='store', type=int, default=0, required=False, + help='The column index in the csv file providing the relative path to the properties file.') + parser.add_argument('-k', '--key-idx', dest='key_idx', action='store', type=int, default=1, required=False, + help='The column index in the csv file providing the key within the properties file.') + parser.add_argument('-v', '--value-idx', dest='value_idx', action='store', type=int, default=2, required=False, + help='The column index in the csv file providing the value within the properties file.') + parser.add_argument('-d', '--should-delete-idx', dest='should_delete_idx', action='store', type=int, default=3, + required=False, help='The column index in the csv file providing whether or not the file ' + 'should be deleted. Any non-blank content will be treated as True.') + parser.add_argument('-c', '--commit-idx', dest='latest_commit_idx', action='store', type=int, default=4, + required=False, help='The column index in the csv file providing the commit for which this ' + 'update applies. The commit should be located in the header row. ') + + parser.add_argument('-f', '--file-rename', dest='file_rename', action='store', type=str, default=None, + required=False, help='If specified, the properties file will be renamed to the argument' + ' preserving the specified relative path.') + parser.add_argument('-z', '--has-no-header', dest='has_no_header', action='store_true', default=False, + required=False, help='Specify whether or not there is a header within the csv file.') + parser.add_argument('-o', '--should-overwrite', dest='should_overwrite', action='store_true', default=False, + required=False, help="Whether or not to overwrite the previously existing properties files" + " ignoring previously existing values.") + + parser.add_argument('-l', '--language', dest='language', type=str, default='HEAD', required=False, + help='Specify the language in order to update the last updated properties file and rename ' + 'files within directories. This flag overrides the file-rename flag.') + + args = parser.parse_args() + + repo_path = args.repo_path if args.repo_path is not None else get_git_root(get_proj_dir()) + input_path = args.csv_file + path_idx = args.path_idx + key_idx = args.key_idx + value_idx = args.value_idx + has_header = not args.has_no_header + overwrite = args.should_overwrite + + # means of determining if a key should be deleted from a file + if args.should_delete_idx is None: + should_delete_converter = None + else: + def should_delete_converter(row_items: List[str]): + return get_should_deleted(row_items, args.should_delete_idx) + + # provides the means of renaming the bundle file + if args.language is not None: + def path_converter(orig_path: str): + return get_new_path(orig_path, get_lang_bundle_name(args.language)) + elif args.file_rename is not None: + def path_converter(orig_path: str): + return get_new_path(orig_path, args.file_rename) + else: + path_converter = None + + # retrieve records from csv + all_items, header = list(csv_to_records(input_path, has_header)) + prop_entries = get_prop_entries(all_items, path_idx, key_idx, value_idx, should_delete_converter, path_converter) + + # write to files + if overwrite: + write_prop_entries(prop_entries, repo_path) + else: + update_prop_entries(prop_entries, repo_path) + + # update the language last update if applicable + if args.language is not None and header is not None and len(header) > args.latest_commit_idx >= 0: + set_commit_for_language(args.language, header[args.latest_commit_idx]) + + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/release_scripts/update_sleuthkit_version.pl b/release_scripts/update_sleuthkit_version.pl index 26d8be5073..1d6bdc7e72 100755 --- a/release_scripts/update_sleuthkit_version.pl +++ b/release_scripts/update_sleuthkit_version.pl @@ -131,10 +131,14 @@ sub update_core_project_properties { my $found = 0; while () { - if (/^file\.reference\.sleuthkit\-/) { + if (/^file\.reference\.sleuthkit\-4/) { print CONF_OUT "file.reference.sleuthkit-${VER}.jar=release/modules/ext/sleuthkit-${VER}.jar\n"; $found++; } + elsif (/^file\.reference\.sleuthkit\-caseuco-4/) { + print CONF_OUT "file.reference.sleuthkit-caseuco-${VER}.jar=release/modules/ext/sleuthkit-caseuco-${VER}.jar\n"; + $found++; + } else { print CONF_OUT $_; @@ -143,8 +147,8 @@ sub update_core_project_properties { close (CONF_IN); close (CONF_OUT); - if ($found != 1) { - die "$found (instead of 1) occurrences of version found in ${orig}"; + if ($found != 2) { + die "$found (instead of 2) occurrences of version found in core ${orig}"; } unlink ($orig) or die "Error deleting ${orig}"; @@ -167,14 +171,22 @@ sub update_core_project_xml { my $found = 0; while () { - if (/ext\/sleuthkit-/) { + if (/ext\/sleuthkit-4/) { print CONF_OUT " ext/sleuthkit-${VER}.jar\n"; $found++; } - elsif (/release\/modules\/ext\/sleuthkit-/) { + elsif (/release\/modules\/ext\/sleuthkit-4/) { print CONF_OUT " release/modules/ext/sleuthkit-${VER}.jar\n"; $found++; } + elsif (/ext\/sleuthkit-caseuco-4/) { + print CONF_OUT " ext/sleuthkit-caseuco-${VER}.jar\n"; + $found++; + } + elsif (/release\/modules\/ext\/sleuthkit-caseuco-4/) { + print CONF_OUT " release/modules/ext/sleuthkit-caseuco-${VER}.jar\n"; + $found++; + } else { print CONF_OUT $_; } @@ -182,8 +194,8 @@ sub update_core_project_xml { close (CONF_IN); close (CONF_OUT); - if ($found != 2) { - die "$found (instead of 2) occurrences of version found in ${orig}"; + if ($found != 4) { + die "$found (instead of 4) occurrences of version found in case ${orig}"; } unlink ($orig) or die "Error deleting ${orig}"; diff --git a/thirdparty/OfficialHashSets/README.txt b/thirdparty/OfficialHashSets/README.txt new file mode 100644 index 0000000000..3ea1ac5801 --- /dev/null +++ b/thirdparty/OfficialHashSets/README.txt @@ -0,0 +1 @@ +.kdb files can be placed in this directory and they will be treated as Official Hash Sets within autopsy. Official Hash Sets will be readonly within the application. Official Hash Sets should have the following naming convention: '..kdb' where '' is the name of the Official Hash Set and '' is the known status of the hash set. The known status is the identifier for one of the variable names represented in the enum: HashDbManager.HashDb.KnownFilesType. As an example, a possible value could be: 'Foo.Notable.kdb' where the name of the Official Hash Set is 'Foo' and the known status is 'Notable' which is the identifier for KnownFilesType.KNOWN_BAD. \ No newline at end of file diff --git a/unix_setup.sh b/unix_setup.sh index 06fc655e32..a9d01739f6 100644 --- a/unix_setup.sh +++ b/unix_setup.sh @@ -5,7 +5,7 @@ # NOTE: update_sleuthkit_version.pl updates this value and relies # on it keeping the same name and whitespace. Don't change it. -TSK_VERSION=4.9.0 +TSK_VERSION=4.10.0 # In the beginning...