From e1384168c8351f98f25114077cf70bf532df5d89 Mon Sep 17 00:00:00 2001 From: hoyt-harness Date: Tue, 2 Aug 2016 22:43:22 -0500 Subject: [PATCH 1/6] First Linux Mint 18 build attempt. Added three-character suffix for Java version test. --- .../core/core.jar/org/netbeans/core/startup/Bundle.properties | 2 +- .../org/netbeans/core/windows/view/ui/Bundle.properties | 2 +- build.xml | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/branding/core/core.jar/org/netbeans/core/startup/Bundle.properties b/branding/core/core.jar/org/netbeans/core/startup/Bundle.properties index 2eb10546c9..cb58bb18cb 100644 --- a/branding/core/core.jar/org/netbeans/core/startup/Bundle.properties +++ b/branding/core/core.jar/org/netbeans/core/startup/Bundle.properties @@ -1,5 +1,5 @@ #Updated by build script -#Mon, 18 Jul 2016 17:58:06 -0400 +#Tue, 02 Aug 2016 22:36:15 -0500 LBL_splash_window_title=Starting Autopsy SPLASH_HEIGHT=314 SPLASH_WIDTH=538 diff --git a/branding/modules/org-netbeans-core-windows.jar/org/netbeans/core/windows/view/ui/Bundle.properties b/branding/modules/org-netbeans-core-windows.jar/org/netbeans/core/windows/view/ui/Bundle.properties index 468b249673..1f24693179 100644 --- a/branding/modules/org-netbeans-core-windows.jar/org/netbeans/core/windows/view/ui/Bundle.properties +++ b/branding/modules/org-netbeans-core-windows.jar/org/netbeans/core/windows/view/ui/Bundle.properties @@ -1,4 +1,4 @@ #Updated by build script -#Mon, 18 Jul 2016 17:58:06 -0400 +#Tue, 02 Aug 2016 22:36:15 -0500 CTL_MainWindow_Title=Autopsy 4.1.0 CTL_MainWindow_Title_No_Project=Autopsy 4.1.0 diff --git a/build.xml b/build.xml index daae174c11..864a7e8ee5 100755 --- a/build.xml +++ b/build.xml @@ -15,6 +15,7 @@ + From 546d8b3114f1e0b9935fb88b95ea572e9f085356 Mon Sep 17 00:00:00 2001 From: hoyt-harness Date: Thu, 4 Aug 2016 14:59:48 -0500 Subject: [PATCH 2/6] updated and clarified BUILDING --- BUILDING.txt | 215 +++++++++++++++++++++++++++++++++------------------ 1 file changed, 138 insertions(+), 77 deletions(-) diff --git a/BUILDING.txt b/BUILDING.txt index 1f928252e6..c4f259d399 100644 --- a/BUILDING.txt +++ b/BUILDING.txt @@ -1,6 +1,6 @@ - Last Updated: 30 October 2015 + Last Updated: 04 August 2016 -This file outlines what it takes to build Autopsy from source. +This file outlines what it takes to build Autopsy 4.1.0 from source. Note that it currently only works out-of-the-box on Windows. We are working on getting the process working under non-Windows systems. @@ -9,116 +9,177 @@ correct C libraries. STEPS: -1) Get Java Setup +1) Get Java Setup 1a) Download and install JDK version 1.8. For the current version of JavaFX -that we use, you'll need 1.8.0_66 or greater. You can now use 32-bit or 64-bit, -but special work is needed to get The Sleuth Kit to compile as 64-bit. + that we use, you'll need 1.8.0_66 or greater. You can now use 32-bit or + 64-bit, but special work is needed to get The Sleuth Kit to compile as + 64-bit. -Autopsy has been used and tested with Oracle JavaSE and the included JavaFX support -(http://www.oracle.com/technetwork/java/javase/downloads/index.html). + Autopsy has been used and tested with Oracle JavaSE and the included JavaFX + support: + (http://www.oracle.com/technetwork/java/javase/downloads/index.html) -OpenJDK and OpenJFX might work, but they are not fully tested with Autopsy. + OpenJDK and OpenJFX might work, but they are not fully tested with Autopsy. -1b) Ensure that JDK_HOME is set to the root JDK directory. +1b) Ensure that JDK_HOME is set to the root JDK directory. On FHS-compliant UNIX + systems, add the following to the bottom of your ~/.bashrc: -1c) (optional) Download and install Netbeans IDE (http://netbeans.org/) -Note: Netbeans IDE is not required to build and run Autopsy, -but it is a recommended IDE to use for development of Autopsy modules. + # Java specific + export JAVA_HOME=/usr/lib/jvm/java-1.8.0-oracle + export PATH=$PATH:$JAVA_HOME + export JDK_HOME=/usr/lib/jvm/java-1.8.0-oracle + export PATH=$PATH:$JDK_HOME + + This file will be read at the next login. To re-load without logout/login, + run the following command from a terminal: + + source ~/.bashrc + +1c) (optional) Download and install Netbeans IDE (https://netbeans.org/) + Note: Netbeans IDE is not required to build and run Autopsy, + but it is a recommended IDE to use for development of Autopsy modules. 1d) (optional) If you are going to package Autopsy, then you'll also -need to set JRE_HOME_32 to the root 32-bit JRE directory and/or JRE_HOME_64 -to the root 64-bit JRE directory. + need to set JRE_HOME_32 to the root 32-bit JRE directory and/or JRE_HOME_64 + to the root 64-bit JRE directory. On FHS-compliant UNIX systems, add the + following in the Java specific section of your ~/.bashrc that you added + earlier and re-load bashrc as above: + + export JRE_HOME_64=/usr/lib/jvm/java-1.8.0-oracle/jre + export PATH=$PATH:$JRE_HOME_64 1e) (optional) For some Autopsy features to be functional, you need to add the - java executable to the system PATH. + java executable to the system PATH. For UNIX, add the following to the Java + section of your ~/.bashrc and re-load: + + export JAVA_HOME=/usr/lib/jvm/java-1.8.0-oracle + export PATH=$PATH:$JAVA_HOME -2) Get Sleuth Kit Setup -2a) Download and build a Release version of Sleuth Kit (TSK) 4.0. See - win32\BUILDING.txt in the TSK package for more information. You need to - build the tsk_jni project. Select the Release_PostgreSQL Win32 or x64 target, - depending upon your target build. You can use a released version or download - the latest from github: -- git://github.com/sleuthkit/sleuthkit.git +2) Get The Sleuth Kit Setup -2b) Build the TSK JAR file by typing 'ant dist-PostgreSQL' in - bindings/java in the - TSK source code folder from a command line. Note it is case - sensitive. You can also add the code to a NetBeans project and build - it from there, selecting the dist-PostgreSQL target. +2a) Download and build a Release version of The Sleuth Kit (TSK) 4.3. For + Windows systems,see win32\BUILDING.txt in the TSK package for more + information. UNIX systems are explained below.You need to build the tsk_jni + project. Select the Release_PostgreSQL Win32 or x64 target, depending upon + your target build. You can use a released version or download the latest + from GitHub: + + git://github.com/sleuthkit/sleuthkit.git -2c) Set TSK_HOME environment variable to the root directory of TSK +2b) Build the TSK JAR file by typing 'ant dist-PostgreSQL' in bindings/java in + the TSK source code folder from a command line. Note it is case sensitive. + You can also add the code to a NetBeans project and build it from there, + selecting the dist-PostgreSQL target. -2d) On Non-Windows systems, you will need to do a 'make install' -from the TSK root directory to install the libraries and such in -the needed places (i.e. '/usr/local'). +2c) Set TSK_HOME environment variable to the root directory of TSK. + +2d) On UNIX systems, you will need to make sure AFFLIB and libewf are installed. + For Ubuntu 16.04 and Linux Mint 18, these are .deb packaqges in the Xenial + repositories as afflib-dbg and libewf-dbg. Be sure that these installations + also pull in afflib-tools, libafflib-dev, libafflib0v5, libewf-dev, + python-libewf, and libewf2. They should install in the proper directories + under /usr/. + + Run 'configure' from the TSK root directory, adding options for AFFLIB and + libewf support: + + ./configure --with-afflib=/usr --with-libewf=/usr + + Run 'make' from the TSK root directory. If you want the standalone TSK + tools, run 'make install' after that. TSK components will be installed to + the appropriate directories in /usr/local. + + You'll need to rename a file. At the folowing path: + + your-tsk-source-directory/bindings/java/dist/ + + ...you'll find a file named 'Tsk_DataModel.jar'. Rename this to + 'Tsk_DataModel_PostgreSQL.jar' to prevent 'file not found' errors when + compiling Autopsy later. + + Add the following lines to the bottom of your bashrc and re-load: + + # TSK specific + export TSK_HOME=path-to-tsk* + export PATH=$PATH:$TSK_HOME + + * = ...such as ~/tsk/ or /usr/local/src/tsk/ -3) For 32-bit targets, get GStreamer Setup. GStreamer is used to view video files. -You can either download it and install it or manually by unzipping the -version that is included in the 'thirdparty/gstreamer' folder. You -will need the 'bin' and 'lib/gstreamer-0.10' folders to be in your -Windows PATH environment variable. +3) For 32-bit targets, get GStreamer Setup. GStreamer is used to view video + files. You can either download it and install it or manually by unzipping + the version that is included in the 'thirdparty/gstreamer' folder. You will + need the 'bin' and 'lib/gstreamer-0.10' folders to be in your Windows PATH + environment variable. -NOTE: This has not been fully tested in non-Windows environments -yet, so we don't have instructions for that yet. + NOTE: This has not been fully tested in non-Windows environments yet, so we + don't have instructions for that yet. -4) Get Autopsy source. -4a) If you are not planning to contribute to Autopsy development, clone a read-only repository: +4) Get Autopsy source. -git clone https://github.com/sleuthkit/autopsy.git +4a) If you are not planning to contribute to Autopsy development, either clone a + read-only repository: -4b) If you plan to contribute and submit patches, login to Github and create your own Autopsy fork. -Then, clone your fork and work on that source-tree: + git clone https://github.com/sleuthkit/autopsy.git + + ...or download the zip (Windows) or tarball (UNIX) and unpack to the + development directory of your choice: + + https://github.com/sleuthkit/autopsy/archive/autopsy-4.1.0.tar.gz -git clone https://github.com/YOUR_USERNAME/autopsy.git +4b) If you plan to contribute and submit patches, login to Github and create + your own Autopsy fork. Then, clone your fork and work on that source-tree: -You will be able to submit patches by committing and pushing changes to your fork -and by submitting pull requests to the main Autopsy repository. + git clone https://github.com/YOUR_USERNAME/autopsy.git + + You will be able to submit patches by committing and pushing changes to your + fork and by submitting pull requests to the main Autopsy repository. + +5) Compile Autopsy -5) Compile Autopsy 5a) Using Netbeans IDE: -- Start NetBeans IDE and open the Autopsy project. -- Choose to build the Autopsy project / module. It is the highest level project - that will cause the other modules to be compiled. + - Start NetBeans IDE and open the Autopsy project. + - Choose to build the Autopsy project / module. It is the highest level + project that will cause the other modules to be compiled. -5b) Without Netbeans IDE (requires JDK and ant >= 1.7.1): -- From root directory of Autopsy source execute: -ant -(to build Autopsy) -ant run -(to run Autopsy) +5b) Without NetBeans IDE (requires JDK and ant >= 1.7.1): + - From root directory of Autopsy source execute: + + ant + (to build Autopsy) + + ant run + (to run Autopsy) BACKGROUND: -Here are some notes to shed some light on what is going on during -the build process. +Here are some notes to shed some light on what is going on during the build +process: -- The Sleuth Kit Java datamodel JAR file has native JNI libraries -that are copied into it. These JNI libraries have dependencies on -libewf, zlib, libpq, libintl-8, libeay32, and ssleay32 DLL files. On non-Windows -platforms, the JNI library also has a dependency on libtsk (on Windows, -it is compiled into libtsk_jni). + - The Sleuth Kit Java datamodel JAR file has native JNI libraries that are + copied into it. These JNI libraries have dependencies on libewf, zlib, + libpq, libintl-8, libeay32, and ssleay32 DLL files. On UNIX platforms, the + JNI library also has a dependency on libtsk (on Windows, it is compiled into + libtsk_jni). -- NetBeans uses ant to build Autopsy. The build target copies the -TSK datamodel JAR file into the project. + - NetBeans uses ant to build Autopsy. The build target copies the TSK + datamodel JAR file into the project. -- On a Windows system, the compile-time ant target copies the -dependency libraries into the Autopsy code structure so that they can -be found when Autopsy is run and packaged. At run-time, the native -library inside of the JAR file will be extracted and used. + - On a Windows system, the compile-time ant target copies the dependency + libraries into the Autopsy code structure so that they can be found when + Autopsy is run and packaged. At runtime, the native library inside of the + JAR file will be extracted and used. -- On a Unix system, the ant target copies only the JNI library and -then relies on the other libraries (libtsk, libewf, zilb, etc.) to -be installed on the system in their standard locations (i.e. -/usr/local). - -- Every time that you do a source code update of TSK, make sure you -rebuild both the libtsk_dll, the JAR file, and then rebuild Autopsy -so that it copies the latest data model JAR file. + - On a UNIX system, the ant target copies only the JNI library and then + relies on the other libraries (libtsk, libewf, zilb, etc.) to be installed + on the system in their standard locations (i.e. /usr/local). + - Every time that you do a source code update of TSK, make sure you rebuild + both the libtsk_dll, the JAR file, and then rebuild Autopsy so that it + copies the latest data model JAR file. --------------- Brian Carrier From 35f9522cad3fb559a1b13a0e8834730395995c53 Mon Sep 17 00:00:00 2001 From: hoyt-harness Date: Fri, 19 Aug 2016 09:29:36 -0500 Subject: [PATCH 3/6] replaced modified BUILDING.txt with original to match upstream source --- BUILDING.txt | 215 +++++++----------- .../netbeans/core/startup/Bundle.properties | 2 +- .../core/windows/view/ui/Bundle.properties | 2 +- 3 files changed, 79 insertions(+), 140 deletions(-) diff --git a/BUILDING.txt b/BUILDING.txt index c4f259d399..1f928252e6 100644 --- a/BUILDING.txt +++ b/BUILDING.txt @@ -1,6 +1,6 @@ - Last Updated: 04 August 2016 + Last Updated: 30 October 2015 -This file outlines what it takes to build Autopsy 4.1.0 from source. +This file outlines what it takes to build Autopsy from source. Note that it currently only works out-of-the-box on Windows. We are working on getting the process working under non-Windows systems. @@ -9,177 +9,116 @@ correct C libraries. STEPS: -1) Get Java Setup +1) Get Java Setup 1a) Download and install JDK version 1.8. For the current version of JavaFX - that we use, you'll need 1.8.0_66 or greater. You can now use 32-bit or - 64-bit, but special work is needed to get The Sleuth Kit to compile as - 64-bit. +that we use, you'll need 1.8.0_66 or greater. You can now use 32-bit or 64-bit, +but special work is needed to get The Sleuth Kit to compile as 64-bit. - Autopsy has been used and tested with Oracle JavaSE and the included JavaFX - support: - (http://www.oracle.com/technetwork/java/javase/downloads/index.html) +Autopsy has been used and tested with Oracle JavaSE and the included JavaFX support +(http://www.oracle.com/technetwork/java/javase/downloads/index.html). - OpenJDK and OpenJFX might work, but they are not fully tested with Autopsy. +OpenJDK and OpenJFX might work, but they are not fully tested with Autopsy. -1b) Ensure that JDK_HOME is set to the root JDK directory. On FHS-compliant UNIX - systems, add the following to the bottom of your ~/.bashrc: +1b) Ensure that JDK_HOME is set to the root JDK directory. - # Java specific - export JAVA_HOME=/usr/lib/jvm/java-1.8.0-oracle - export PATH=$PATH:$JAVA_HOME - export JDK_HOME=/usr/lib/jvm/java-1.8.0-oracle - export PATH=$PATH:$JDK_HOME - - This file will be read at the next login. To re-load without logout/login, - run the following command from a terminal: - - source ~/.bashrc - -1c) (optional) Download and install Netbeans IDE (https://netbeans.org/) - Note: Netbeans IDE is not required to build and run Autopsy, - but it is a recommended IDE to use for development of Autopsy modules. +1c) (optional) Download and install Netbeans IDE (http://netbeans.org/) +Note: Netbeans IDE is not required to build and run Autopsy, +but it is a recommended IDE to use for development of Autopsy modules. 1d) (optional) If you are going to package Autopsy, then you'll also - need to set JRE_HOME_32 to the root 32-bit JRE directory and/or JRE_HOME_64 - to the root 64-bit JRE directory. On FHS-compliant UNIX systems, add the - following in the Java specific section of your ~/.bashrc that you added - earlier and re-load bashrc as above: - - export JRE_HOME_64=/usr/lib/jvm/java-1.8.0-oracle/jre - export PATH=$PATH:$JRE_HOME_64 +need to set JRE_HOME_32 to the root 32-bit JRE directory and/or JRE_HOME_64 +to the root 64-bit JRE directory. 1e) (optional) For some Autopsy features to be functional, you need to add the - java executable to the system PATH. For UNIX, add the following to the Java - section of your ~/.bashrc and re-load: - - export JAVA_HOME=/usr/lib/jvm/java-1.8.0-oracle - export PATH=$PATH:$JAVA_HOME + java executable to the system PATH. -2) Get The Sleuth Kit Setup +2) Get Sleuth Kit Setup +2a) Download and build a Release version of Sleuth Kit (TSK) 4.0. See + win32\BUILDING.txt in the TSK package for more information. You need to + build the tsk_jni project. Select the Release_PostgreSQL Win32 or x64 target, + depending upon your target build. You can use a released version or download + the latest from github: +- git://github.com/sleuthkit/sleuthkit.git -2a) Download and build a Release version of The Sleuth Kit (TSK) 4.3. For - Windows systems,see win32\BUILDING.txt in the TSK package for more - information. UNIX systems are explained below.You need to build the tsk_jni - project. Select the Release_PostgreSQL Win32 or x64 target, depending upon - your target build. You can use a released version or download the latest - from GitHub: - - git://github.com/sleuthkit/sleuthkit.git +2b) Build the TSK JAR file by typing 'ant dist-PostgreSQL' in + bindings/java in the + TSK source code folder from a command line. Note it is case + sensitive. You can also add the code to a NetBeans project and build + it from there, selecting the dist-PostgreSQL target. -2b) Build the TSK JAR file by typing 'ant dist-PostgreSQL' in bindings/java in - the TSK source code folder from a command line. Note it is case sensitive. - You can also add the code to a NetBeans project and build it from there, - selecting the dist-PostgreSQL target. +2c) Set TSK_HOME environment variable to the root directory of TSK -2c) Set TSK_HOME environment variable to the root directory of TSK. - -2d) On UNIX systems, you will need to make sure AFFLIB and libewf are installed. - For Ubuntu 16.04 and Linux Mint 18, these are .deb packaqges in the Xenial - repositories as afflib-dbg and libewf-dbg. Be sure that these installations - also pull in afflib-tools, libafflib-dev, libafflib0v5, libewf-dev, - python-libewf, and libewf2. They should install in the proper directories - under /usr/. - - Run 'configure' from the TSK root directory, adding options for AFFLIB and - libewf support: - - ./configure --with-afflib=/usr --with-libewf=/usr - - Run 'make' from the TSK root directory. If you want the standalone TSK - tools, run 'make install' after that. TSK components will be installed to - the appropriate directories in /usr/local. - - You'll need to rename a file. At the folowing path: - - your-tsk-source-directory/bindings/java/dist/ - - ...you'll find a file named 'Tsk_DataModel.jar'. Rename this to - 'Tsk_DataModel_PostgreSQL.jar' to prevent 'file not found' errors when - compiling Autopsy later. - - Add the following lines to the bottom of your bashrc and re-load: - - # TSK specific - export TSK_HOME=path-to-tsk* - export PATH=$PATH:$TSK_HOME - - * = ...such as ~/tsk/ or /usr/local/src/tsk/ +2d) On Non-Windows systems, you will need to do a 'make install' +from the TSK root directory to install the libraries and such in +the needed places (i.e. '/usr/local'). -3) For 32-bit targets, get GStreamer Setup. GStreamer is used to view video - files. You can either download it and install it or manually by unzipping - the version that is included in the 'thirdparty/gstreamer' folder. You will - need the 'bin' and 'lib/gstreamer-0.10' folders to be in your Windows PATH - environment variable. +3) For 32-bit targets, get GStreamer Setup. GStreamer is used to view video files. +You can either download it and install it or manually by unzipping the +version that is included in the 'thirdparty/gstreamer' folder. You +will need the 'bin' and 'lib/gstreamer-0.10' folders to be in your +Windows PATH environment variable. - NOTE: This has not been fully tested in non-Windows environments yet, so we - don't have instructions for that yet. +NOTE: This has not been fully tested in non-Windows environments +yet, so we don't have instructions for that yet. -4) Get Autopsy source. +4) Get Autopsy source. +4a) If you are not planning to contribute to Autopsy development, clone a read-only repository: -4a) If you are not planning to contribute to Autopsy development, either clone a - read-only repository: +git clone https://github.com/sleuthkit/autopsy.git - git clone https://github.com/sleuthkit/autopsy.git - - ...or download the zip (Windows) or tarball (UNIX) and unpack to the - development directory of your choice: - - https://github.com/sleuthkit/autopsy/archive/autopsy-4.1.0.tar.gz +4b) If you plan to contribute and submit patches, login to Github and create your own Autopsy fork. +Then, clone your fork and work on that source-tree: -4b) If you plan to contribute and submit patches, login to Github and create - your own Autopsy fork. Then, clone your fork and work on that source-tree: +git clone https://github.com/YOUR_USERNAME/autopsy.git - git clone https://github.com/YOUR_USERNAME/autopsy.git - - You will be able to submit patches by committing and pushing changes to your - fork and by submitting pull requests to the main Autopsy repository. - -5) Compile Autopsy +You will be able to submit patches by committing and pushing changes to your fork +and by submitting pull requests to the main Autopsy repository. +5) Compile Autopsy 5a) Using Netbeans IDE: - - Start NetBeans IDE and open the Autopsy project. - - Choose to build the Autopsy project / module. It is the highest level - project that will cause the other modules to be compiled. +- Start NetBeans IDE and open the Autopsy project. +- Choose to build the Autopsy project / module. It is the highest level project + that will cause the other modules to be compiled. -5b) Without NetBeans IDE (requires JDK and ant >= 1.7.1): - - From root directory of Autopsy source execute: - - ant - (to build Autopsy) - - ant run - (to run Autopsy) +5b) Without Netbeans IDE (requires JDK and ant >= 1.7.1): +- From root directory of Autopsy source execute: +ant +(to build Autopsy) +ant run +(to run Autopsy) BACKGROUND: -Here are some notes to shed some light on what is going on during the build -process: +Here are some notes to shed some light on what is going on during +the build process. - - The Sleuth Kit Java datamodel JAR file has native JNI libraries that are - copied into it. These JNI libraries have dependencies on libewf, zlib, - libpq, libintl-8, libeay32, and ssleay32 DLL files. On UNIX platforms, the - JNI library also has a dependency on libtsk (on Windows, it is compiled into - libtsk_jni). +- The Sleuth Kit Java datamodel JAR file has native JNI libraries +that are copied into it. These JNI libraries have dependencies on +libewf, zlib, libpq, libintl-8, libeay32, and ssleay32 DLL files. On non-Windows +platforms, the JNI library also has a dependency on libtsk (on Windows, +it is compiled into libtsk_jni). - - NetBeans uses ant to build Autopsy. The build target copies the TSK - datamodel JAR file into the project. +- NetBeans uses ant to build Autopsy. The build target copies the +TSK datamodel JAR file into the project. - - On a Windows system, the compile-time ant target copies the dependency - libraries into the Autopsy code structure so that they can be found when - Autopsy is run and packaged. At runtime, the native library inside of the - JAR file will be extracted and used. +- On a Windows system, the compile-time ant target copies the +dependency libraries into the Autopsy code structure so that they can +be found when Autopsy is run and packaged. At run-time, the native +library inside of the JAR file will be extracted and used. - - On a UNIX system, the ant target copies only the JNI library and then - relies on the other libraries (libtsk, libewf, zilb, etc.) to be installed - on the system in their standard locations (i.e. /usr/local). +- On a Unix system, the ant target copies only the JNI library and +then relies on the other libraries (libtsk, libewf, zilb, etc.) to +be installed on the system in their standard locations (i.e. +/usr/local). + +- Every time that you do a source code update of TSK, make sure you +rebuild both the libtsk_dll, the JAR file, and then rebuild Autopsy +so that it copies the latest data model JAR file. - - Every time that you do a source code update of TSK, make sure you rebuild - both the libtsk_dll, the JAR file, and then rebuild Autopsy so that it - copies the latest data model JAR file. --------------- Brian Carrier diff --git a/branding/core/core.jar/org/netbeans/core/startup/Bundle.properties b/branding/core/core.jar/org/netbeans/core/startup/Bundle.properties index cb58bb18cb..d26a5a1390 100644 --- a/branding/core/core.jar/org/netbeans/core/startup/Bundle.properties +++ b/branding/core/core.jar/org/netbeans/core/startup/Bundle.properties @@ -1,5 +1,5 @@ #Updated by build script -#Tue, 02 Aug 2016 22:36:15 -0500 +#Thu, 18 Aug 2016 14:09:08 -0500 LBL_splash_window_title=Starting Autopsy SPLASH_HEIGHT=314 SPLASH_WIDTH=538 diff --git a/branding/modules/org-netbeans-core-windows.jar/org/netbeans/core/windows/view/ui/Bundle.properties b/branding/modules/org-netbeans-core-windows.jar/org/netbeans/core/windows/view/ui/Bundle.properties index 1f24693179..4c9bdabcdd 100644 --- a/branding/modules/org-netbeans-core-windows.jar/org/netbeans/core/windows/view/ui/Bundle.properties +++ b/branding/modules/org-netbeans-core-windows.jar/org/netbeans/core/windows/view/ui/Bundle.properties @@ -1,4 +1,4 @@ #Updated by build script -#Tue, 02 Aug 2016 22:36:15 -0500 +#Thu, 18 Aug 2016 14:09:08 -0500 CTL_MainWindow_Title=Autopsy 4.1.0 CTL_MainWindow_Title_No_Project=Autopsy 4.1.0 From 401ac636b9a6e5dcbc59677e5db36e8efb027a44 Mon Sep 17 00:00:00 2001 From: esaunders Date: Tue, 17 Jan 2017 21:34:44 -0500 Subject: [PATCH 4/6] Modified filterResults() to take entire document id into consideration when determining whether a hit has been seen before. --- .../autopsy/keywordsearch/SearchRunner.java | 88 ++++++++++++------- 1 file changed, 56 insertions(+), 32 deletions(-) diff --git a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/SearchRunner.java b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/SearchRunner.java index f80fe86404..ca68ace102 100644 --- a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/SearchRunner.java +++ b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/SearchRunner.java @@ -21,9 +21,11 @@ package org.sleuthkit.autopsy.keywordsearch; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.Set; import java.util.Timer; import java.util.TimerTask; import java.util.concurrent.CancellationException; @@ -262,12 +264,12 @@ public final class SearchRunner { // mutable state: private volatile boolean workerRunning; private List keywordListNames; //guarded by SearchJobInfo.this - private Map> currentResults; //guarded by SearchJobInfo.this + private Map> currentResults; //guarded by SearchJobInfo.this private SearchRunner.Searcher currentSearcher; private AtomicLong moduleReferenceCount = new AtomicLong(0); private final Object finalSearchLock = new Object(); //used for a condition wait - public SearchJobInfo(long jobId, long dataSourceId, List keywordListNames) { + private SearchJobInfo(long jobId, long dataSourceId, List keywordListNames) { this.jobId = jobId; this.dataSourceId = dataSourceId; this.keywordListNames = new ArrayList<>(keywordListNames); @@ -276,53 +278,53 @@ public final class SearchRunner { currentSearcher = null; } - public long getJobId() { + private long getJobId() { return jobId; } - public long getDataSourceId() { + private long getDataSourceId() { return dataSourceId; } - public synchronized List getKeywordListNames() { + private synchronized List getKeywordListNames() { return new ArrayList<>(keywordListNames); } - public synchronized void addKeywordListName(String keywordListName) { + private synchronized void addKeywordListName(String keywordListName) { if (!keywordListNames.contains(keywordListName)) { keywordListNames.add(keywordListName); } } - public synchronized List currentKeywordResults(Keyword k) { + private synchronized Set currentKeywordResults(Keyword k) { return currentResults.get(k); } - public synchronized void addKeywordResults(Keyword k, List resultsIDs) { + private synchronized void addKeywordResults(Keyword k, Set resultsIDs) { currentResults.put(k, resultsIDs); } - public boolean isWorkerRunning() { + private boolean isWorkerRunning() { return workerRunning; } - public void setWorkerRunning(boolean flag) { + private void setWorkerRunning(boolean flag) { workerRunning = flag; } - public synchronized SearchRunner.Searcher getCurrentSearcher() { + private synchronized SearchRunner.Searcher getCurrentSearcher() { return currentSearcher; } - public synchronized void setCurrentSearcher(SearchRunner.Searcher searchRunner) { + private synchronized void setCurrentSearcher(SearchRunner.Searcher searchRunner) { currentSearcher = searchRunner; } - public void incrementModuleReferenceCount() { + private void incrementModuleReferenceCount() { moduleReferenceCount.incrementAndGet(); } - public long decrementModuleReferenceCount() { + private long decrementModuleReferenceCount() { return moduleReferenceCount.decrementAndGet(); } @@ -331,7 +333,7 @@ public final class SearchRunner { * * @throws InterruptedException */ - public void waitForCurrentWorker() throws InterruptedException { + private void waitForCurrentWorker() throws InterruptedException { synchronized (finalSearchLock) { while (workerRunning) { finalSearchLock.wait(); //wait() releases the lock @@ -342,7 +344,7 @@ public final class SearchRunner { /** * Unset workerRunning and wake up thread(s) waiting on finalSearchLock */ - public void searchNotify() { + private void searchNotify() { synchronized (finalSearchLock) { workerRunning = false; finalSearchLock.notify(); @@ -567,37 +569,59 @@ public final class SearchRunner { }); } - //calculate new results but substracting results already obtained in this ingest - //update currentResults map with the new results + /** + * Over time, periodic searches can produce keyword hits that were seen + * in earlier searches (in addition to new hits). + * This method filters out all of the hits found in earlier + * periodic searches and returns only the results found by the most + * recent search. + * + * @param queryResult The results returned by a keyword search. + * @return The set of hits found by the most recent search. + * + */ private QueryResults filterResults(QueryResults queryResult) { + // Create a new (empty) QueryResults object to hold the most recently found hits. QueryResults newResults = new QueryResults(queryResult.getQuery(), queryResult.getKeywordList()); for (Keyword keyword : queryResult.getKeywords()) { List queryTermResults = queryResult.getResults(keyword); - //translate to list of IDs that we keep track of - List queryTermResultsIDs = new ArrayList<>(); - for (KeywordHit ch : queryTermResults) { - queryTermResultsIDs.add(ch.getSolrObjectId()); + // Grab the set of solr document ids that have a hit for this + // keyword. + Set queryTermResultsIDs = new HashSet<>(); + for (KeywordHit hit : queryTermResults) { + queryTermResultsIDs.add(hit.getSolrDocumentId()); } - List curTermResults = job.currentKeywordResults(keyword); + // Get the set of document ids seen in the past by this searcher + // for the given keyword. + Set curTermResults = job.currentKeywordResults(keyword); if (curTermResults == null) { + // If we haven't seen any results in the past, we simply + // add all of the incoming results to the outgoing collection. job.addKeywordResults(keyword, queryTermResultsIDs); newResults.addResult(keyword, queryTermResults); } else { - //some AbstractFile hits already exist for this keyword + // Otherwise, we have seen results for this keyword in the past. + // For each of the keyword hits in the incoming results we check to + // see if we have seen the document id previously. for (KeywordHit res : queryTermResults) { - if (!curTermResults.contains(res.getSolrObjectId())) { - //add to new results - List newResultsFs = newResults.getResults(keyword); - if (newResultsFs == null) { - newResultsFs = new ArrayList<>(); - newResults.addResult(keyword, newResultsFs); + if (!curTermResults.contains(res.getSolrDocumentId())) { + // We have not seen the document id before so we add + // the keyword hit to the results + List newKeywordHits = newResults.getResults(keyword); + if (newKeywordHits == null) { + // Create an empty list to hold the new hits. + newKeywordHits = new ArrayList<>(); + newResults.addResult(keyword, newKeywordHits); } - newResultsFs.add(res); - curTermResults.add(res.getSolrObjectId()); + // Add the new hit to the list. + newKeywordHits.add(res); + // Add the document id to the set of document ids + // that have been seen for this keyword. + curTermResults.add(res.getSolrDocumentId()); } } } From 2717163d021307ba926a9915206568bcfd3ce522 Mon Sep 17 00:00:00 2001 From: Brian Carrier Date: Wed, 18 Jan 2017 16:08:05 -0500 Subject: [PATCH 5/6] updated NEWS --- NEWS.txt | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/NEWS.txt b/NEWS.txt index 7a0c8cb200..daff4c1a7a 100644 --- a/NEWS.txt +++ b/NEWS.txt @@ -1,23 +1,17 @@ ---------------- VERSION 4.3.0 -------------- Improvements: -- Creation and analysis (e.g., keyword search) of virtual files for slack -space. -- A preloader in an Android device image does not prevent adding the image as -a data source (reading of secondary GPT tables supported). -- User can add data sources with no file systems or unsupported file systems -as "unallocated space image files" for carving, keyword search, etc. -- File extension mismatch analysis can be configured to check all file types, -all file types except text files, or only multimedia and executable files. -- Column order changes in table views are "sticky" for each type of tree view -item. -- Tree view has new file types by MIME type sub tree. -- User can bulk add list of keywords to a keyword list. +- Support for slack space on files (as separate virtual files) to enable keyword searching and other analysis. +- Simple mode for the file extension mismatch module that focuses on only only multimedia and executable files to reduce false positives. +- New view in tree that shows the MIME types. - Tagged items are highlighted in table views. -- Toolbar button for Image/Video Gallery -- New "Experimental" module (activate via Tools, Plugins) with auto ingest -feature. +- Ordering of columns is saved when user changes them. +- Support for Android devices with preloaders (uses backup GPT) +- Support for images with no file systems (all data is added as unallocated space) +- User can bulk add list of keywords to a keyword list. +- New "Experimental" module (activate via Tools, Plugins) with auto ingest feature. - Assorted bug fixes and minor enhancements. + ---------------- VERSION 4.2.0 -------------- Improvements: - Credit card account search. From 4bf5988c40fb7f4cfe567e20e95ec87b1a0e150b Mon Sep 17 00:00:00 2001 From: esaunders Date: Wed, 18 Jan 2017 17:31:50 -0500 Subject: [PATCH 6/6] Modified filterResults() to only return a a single hit per keyword per object where a hit for that keyword on that object has not been seen before. --- .../autopsy/keywordsearch/SearchRunner.java | 92 ++++++++++--------- 1 file changed, 50 insertions(+), 42 deletions(-) diff --git a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/SearchRunner.java b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/SearchRunner.java index ca68ace102..989f1ac71b 100644 --- a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/SearchRunner.java +++ b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/SearchRunner.java @@ -264,7 +264,9 @@ public final class SearchRunner { // mutable state: private volatile boolean workerRunning; private List keywordListNames; //guarded by SearchJobInfo.this - private Map> currentResults; //guarded by SearchJobInfo.this + + // Map of keyword to the object ids that contain a hit + private Map> currentResults; //guarded by SearchJobInfo.this private SearchRunner.Searcher currentSearcher; private AtomicLong moduleReferenceCount = new AtomicLong(0); private final Object finalSearchLock = new Object(); //used for a condition wait @@ -296,11 +298,11 @@ public final class SearchRunner { } } - private synchronized Set currentKeywordResults(Keyword k) { + private synchronized Set currentKeywordResults(Keyword k) { return currentResults.get(k); } - private synchronized void addKeywordResults(Keyword k, Set resultsIDs) { + private synchronized void addKeywordResults(Keyword k, Set resultsIDs) { currentResults.put(k, resultsIDs); } @@ -470,8 +472,8 @@ public final class SearchRunner { return null; } - // calculate new results by substracting results already obtained in this ingest - // this creates a map of each keyword to the list of unique files that have that hit. + // Reduce the results of the query to only those hits we + // have not already seen. QueryResults newResults = filterResults(queryResults); if (!newResults.getKeywords().isEmpty()) { @@ -570,61 +572,67 @@ public final class SearchRunner { } /** - * Over time, periodic searches can produce keyword hits that were seen - * in earlier searches (in addition to new hits). * This method filters out all of the hits found in earlier * periodic searches and returns only the results found by the most * recent search. * + * This method will only return hits for objects for which we haven't + * previously seen a hit for the keyword. + * * @param queryResult The results returned by a keyword search. - * @return The set of hits found by the most recent search. + * @return The set of hits found by the most recent search for objects + * that have not previously had a hit. * */ private QueryResults filterResults(QueryResults queryResult) { - // Create a new (empty) QueryResults object to hold the most recently found hits. + // Create a new (empty) QueryResults object to hold the most recently + // found hits. QueryResults newResults = new QueryResults(queryResult.getQuery(), queryResult.getKeywordList()); + // For each keyword represented in the results. for (Keyword keyword : queryResult.getKeywords()) { + // These are all of the hits across all objects for the most recent search. + // This may well include duplicates of hits we've seen in earlier periodic searches. List queryTermResults = queryResult.getResults(keyword); - // Grab the set of solr document ids that have a hit for this - // keyword. - Set queryTermResultsIDs = new HashSet<>(); - for (KeywordHit hit : queryTermResults) { - queryTermResultsIDs.add(hit.getSolrDocumentId()); + // This will be used to build up the hits we haven't seen before + // for this keyword. + List newUniqueHits = new ArrayList<>(); + + // Get the set of object ids seen in the past by this searcher + // for the given keyword. + Set curTermResults = job.currentKeywordResults(keyword); + if (curTermResults == null) { + // We create a new empty set if we haven't seen results for + // this keyword before. + curTermResults = new HashSet<>(); } - // Get the set of document ids seen in the past by this searcher - // for the given keyword. - Set curTermResults = job.currentKeywordResults(keyword); - if (curTermResults == null) { - // If we haven't seen any results in the past, we simply - // add all of the incoming results to the outgoing collection. - job.addKeywordResults(keyword, queryTermResultsIDs); - newResults.addResult(keyword, queryTermResults); - } else { - // Otherwise, we have seen results for this keyword in the past. - // For each of the keyword hits in the incoming results we check to - // see if we have seen the document id previously. - for (KeywordHit res : queryTermResults) { - if (!curTermResults.contains(res.getSolrDocumentId())) { - // We have not seen the document id before so we add - // the keyword hit to the results - List newKeywordHits = newResults.getResults(keyword); - if (newKeywordHits == null) { - // Create an empty list to hold the new hits. - newKeywordHits = new ArrayList<>(); - newResults.addResult(keyword, newKeywordHits); - } - // Add the new hit to the list. - newKeywordHits.add(res); - // Add the document id to the set of document ids - // that have been seen for this keyword. - curTermResults.add(res.getSolrDocumentId()); - } + // For each hit for this keyword. + for (KeywordHit hit : queryTermResults) { + if (curTermResults.contains(hit.getSolrObjectId())) { + // Skip the hit if we've already seen a hit for + // this keyword in the object. + continue; } + + // We haven't seen the hit before so add it to list of new + // unique hits. + newUniqueHits.add(hit); + + // Add the object id to the results we've seen for this + // keyword. + curTermResults.add(hit.getSolrObjectId()); } + + // Update the job with the list of objects for which we have + // seen hits for the current keyword. + job.addKeywordResults(keyword, curTermResults); + + // Add the new hits for the current keyword into the results + // to be returned. + newResults.addResult(keyword, newUniqueHits); } return newResults;