Safekeep ingest framework improvements

This commit is contained in:
Richard Cordovano 2014-04-29 17:49:12 -04:00
parent 45ac9090a5
commit ae1793bfd3
15 changed files with 1203 additions and 1196 deletions

View File

@ -52,12 +52,6 @@ Ensure the Case drive has at least 1GB free space and restart ingest.
IngestJobConfigurationPanel.advancedButton.text=Advanced
IngestJobConfigurationPanel.advancedButton.actionCommand=Advanced
IngestScheduler.DataSourceScheduler.toString.size=DataSourceQueue, size\:
IngestScheduler.FileSched.toString.curFiles.text=\
CurFiles, size\:
IngestScheduler.FileSched.toString.curDirs.text=\
CurDirs(stack), size\:
IngestScheduler.FileSched.toString.rootDirs.text=\
RootDirs(sorted), size\:
IngestManager.StartIngestJobsTask.run.displayName=Queueing ingest tasks
IngestManager.StartIngestJobsTask.run.cancelling={0} (Cancelling...)
IngestManager.StartIngestJobsTask.run.catchException.msg=An error occurred while starting ingest. Results may only be partial

View File

@ -1,74 +1,68 @@
CTL_IngestMessageTopComponent=\u30E1\u30C3\u30BB\u30FC\u30B8
HINT_IngestMessageTopComponent=\u30E1\u30C3\u30BB\u30FC\u30B8\u30A6\u30A3\u30F3\u30C9\u30A6
IngestDialog.closeButton.title=\u9589\u3058\u308B
IngestDialog.startButton.title=\u30B9\u30BF\u30FC\u30C8
IngestDialog.title.text=\u30A4\u30F3\u30B8\u30A7\u30B9\u30C8\u30E2\u30B8\u30E5\u30FC\u30EB
IngestJob.progress.cancelling={0}\uFF08\u30AD\u30E3\u30F3\u30BB\u30EB\u4E2D\u2026\uFF09
IngestJob.progress.dataSourceIngest.displayName={0}\u306E\u30C7\u30FC\u30BF\u30BD\u30FC\u30B9\u3092\u30A4\u30F3\u30B8\u30A7\u30B9\u30C8
IngestJob.progress.fileIngest.displayName={0}\u306E\u30D5\u30A1\u30A4\u30EB\u3092\u30A4\u30F3\u30B8\u30A7\u30B9\u30C8
IngestJobConfigurationPanel.advancedButton.actionCommand=\u30A2\u30C9\u30D0\u30F3\u30B9
IngestJobConfigurationPanel.advancedButton.text=\u30A2\u30C9\u30D0\u30F3\u30B9
IngestJobConfigurationPanel.processUnallocCheckbox.text=\u672A\u5272\u308A\u5F53\u3066\u9818\u57DF\u306E\u51E6\u7406
IngestJobConfigurationPanel.processUnallocCheckbox.toolTipText=\u524A\u9664\u3055\u308C\u305F\u30D5\u30A1\u30A4\u30EB\u7B49\u306E\u672A\u5272\u308A\u5F53\u3066\u9818\u57DF\u3092\u51E6\u7406\u3002\u3088\u308A\u5B8C\u5168\u306A\u7D50\u679C\u304C\u51FA\u307E\u3059\u304C\u3001\u5927\u304D\u3044\u30A4\u30E1\u30FC\u30B8\u3067\u306F\u51E6\u7406\u6642\u9593\u304C\u9577\u304F\u306A\u308B\u304B\u3082\u3057\u308C\u307E\u305B\u3093\u3002
IngestManager.moduleErr=\u30E2\u30B8\u30E5\u30FC\u30EB\u30A8\u30E9\u30FC
IngestManager.moduleErr.errListenToUpdates.msg=Ingest Manager\u30A2\u30C3\u30D7\u30C7\u30FC\u30C8\u3092\u78BA\u8A8D\u4E2D\u306B\u30E2\u30B8\u30E5\u30FC\u30EB\u304C\u30A8\u30E9\u30FC\u3092\u8D77\u3053\u3057\u307E\u3057\u305F\u3002\u3069\u306E\u30E2\u30B8\u30E5\u30FC\u30EB\u304B\u30ED\u30B0\u3067\u78BA\u8A8D\u3057\u3066\u4E0B\u3055\u3044\u3002\u4E00\u90E8\u306E\u30C7\u30FC\u30BF\u304C\u4E0D\u5B8C\u5168\u304B\u3082\u3057\u308C\u307E\u305B\u3093\u3002
IngestManager.StartIngestJobsTask.run.cancelling={0}\uFF08\u30AD\u30E3\u30F3\u30BB\u30EB\u4E2D\u2026\uFF09
IngestManager.StartIngestJobsTask.run.catchException.msg=\u30A4\u30F3\u30B8\u30A7\u30B9\u30C8\u306E\u958B\u59CB\u4E2D\u306B\u30A8\u30E9\u30FC\u304C\u767A\u751F\u3057\u307E\u3057\u305F\u3002\u7D50\u679C\u304C\u4E00\u90E8\u306E\u3082\u306E
IngestManager.StartIngestJobsTask.run.displayName=\u30A4\u30F3\u30B8\u30A7\u30B9\u30C8\u30BF\u30B9\u30AF\u3092\u30AD\u30E5\u30FC\u30A4\u30F3\u30B0
IngestMessage.exception.srcSubjDetailsDataNotNull.msg=\u30BD\u30FC\u30B9\u3001\u30B5\u30D6\u30B8\u30A7\u30AF\u30C8\u3001\u8A73\u7D30\u304A\u3088\u3073\u30C7\u30FC\u30BF\u306F\u30CC\u30EB\u3067\u3042\u3063\u3066\u306F\u3044\u3051\u307E\u305B\u3093
IngestMessage.exception.srcSubjNotNull.msg=\u30BD\u30FC\u30B9\u304A\u3088\u3073\u30B5\u30D6\u30B8\u30A7\u30AF\u30C8\u306F\u30CC\u30EB\u3067\u3042\u3063\u3066\u306F\u3044\u3051\u307E\u305B\u3093
IngestMessage.exception.typeSrcSubjNotNull.msg=\u30E1\u30C3\u30BB\u30FC\u30B8\u30BF\u30A4\u30D7\u3001\u30BD\u30FC\u30B9\u304A\u3088\u3073\u30B5\u30D6\u30B8\u30A7\u30AF\u30C8\u306F\u30CC\u30EB\u3067\u3042\u3063\u3066\u306F\u3044\u3051\u307E\u305B\u3093
IngestMessage.toString.data.text=\ \u30C7\u30FC\u30BF\uFF1A{0}
IngestMessage.toString.date.text=\ \u65E5\u4ED8\uFF1A{0}
IngestMessage.toString.details.text=\ \u8A73\u7D30\uFF1A{0}
IngestMessage.toString.subject.text=\ \u30B5\u30D6\u30B8\u30A7\u30AF\u30C8\uFF1A{0}
IngestMessage.toString.type.text=\u30BF\u30A4\u30D7\uFF1A{0}
IngestMessageDetailsPanel.copyMenuItem.text=\u30B3\u30D4\u30FC
IngestMessageDetailsPanel.messageDetailsPane.contentType=\u30C6\u30AD\u30B9\u30C8\uFF0Fhtml
IngestMessageDetailsPanel.selectAllMenuItem.text=\u3059\u3079\u3066\u9078\u629E
IngestMessageDetailsPanel.viewArtifactButton.text=\u7D50\u679C\u3078\u79FB\u52D5
IngestMessageDetailsPanel.viewContentButton.text=\u30C7\u30A3\u30EC\u30AF\u30C8\u30EA\u3078\u79FB\u52D5
IngestMessagePanel.BooleanRenderer.exception.nonBoolVal.msg=\u30D6\u30FC\u30EB\u5024\u3067\u306F\u306A\u3044\u3082\u306E\u306BBooleanRenderer\u3092\u4F7F\u7528\u3057\u3088\u3046\u3068\u3057\u307E\u3057\u305F
IngestMessagePanel.DateRenderer.exception.nonDateVal.text=\u65E5\u4ED8\u3067\u306F\u306A\u3044\u3082\u306E\u306BDateRenderer\u3092\u4F7F\u7528\u3057\u3088\u3046\u3068\u3057\u307E\u3057\u305F\u3002
IngestMessagePanel.moduleErr=\u30E2\u30B8\u30E5\u30FC\u30EB\u30A8\u30E9\u30FC
IngestMessagePanel.moduleErr.errListenUpdates.text=IngestMessagePanel\u30A2\u30C3\u30D7\u30C7\u30FC\u30C8\u3092\u78BA\u8A8D\u4E2D\u306B\u30E2\u30B8\u30E5\u30FC\u30EB\u304C\u30A8\u30E9\u30FC\u3092\u8D77\u3053\u3057\u307E\u3057\u305F\u3002\u3069\u306E\u30E2\u30B8\u30E5\u30FC\u30EB\u304B\u30ED\u30B0\u3067\u78BA\u8A8D\u3057\u3066\u4E0B\u3055\u3044\u3002\u4E00\u90E8\u306E\u30C7\u30FC\u30BF\u304C\u4E0D\u5B8C\u5168\u304B\u3082\u3057\u308C\u307E\u305B\u3093\u3002
IngestMessagePanel.MsgTableMod.colNames.module=\u30E2\u30B8\u30E5\u30FC\u30EB
IngestMessagePanel.MsgTableMod.colNames.new=\u65B0\u898F\uFF1F
IngestMessagePanel.MsgTableMod.colNames.num=\u756A\u53F7
IngestMessagePanel.MsgTableMod.colNames.subject=\u30B5\u30D6\u30B8\u30A7\u30AF\u30C8
IngestMessagePanel.MsgTableMod.colNames.timestamp=\u30BF\u30A4\u30E0\u30B9\u30BF\u30F3\u30D7
IngestMessagePanel.sortByComboBox.model.priority=\u512A\u5148\u5EA6
CTL_IngestMessageTopComponent=\u30e1\u30c3\u30bb\u30fc\u30b8
HINT_IngestMessageTopComponent=\u30e1\u30c3\u30bb\u30fc\u30b8\u30a6\u30a3\u30f3\u30c9\u30a6
IngestDialog.closeButton.title=\u9589\u3058\u308b
IngestDialog.startButton.title=\u30b9\u30bf\u30fc\u30c8
IngestDialog.title.text=\u30a4\u30f3\u30b8\u30a7\u30b9\u30c8\u30e2\u30b8\u30e5\u30fc\u30eb
IngestJob.progress.cancelling={0}\uff08\u30ad\u30e3\u30f3\u30bb\u30eb\u4e2d\u2026\uff09
IngestJob.progress.dataSourceIngest.displayName={0}\u306e\u30c7\u30fc\u30bf\u30bd\u30fc\u30b9\u3092\u30a4\u30f3\u30b8\u30a7\u30b9\u30c8
IngestJob.progress.fileIngest.displayName={0}\u306e\u30d5\u30a1\u30a4\u30eb\u3092\u30a4\u30f3\u30b8\u30a7\u30b9\u30c8
IngestJobConfigurationPanel.advancedButton.actionCommand=\u30a2\u30c9\u30d0\u30f3\u30b9
IngestJobConfigurationPanel.advancedButton.text=\u30a2\u30c9\u30d0\u30f3\u30b9
IngestJobConfigurationPanel.processUnallocCheckbox.text=\u672a\u5272\u308a\u5f53\u3066\u9818\u57df\u306e\u51e6\u7406
IngestJobConfigurationPanel.processUnallocCheckbox.toolTipText=\u524a\u9664\u3055\u308c\u305f\u30d5\u30a1\u30a4\u30eb\u7b49\u306e\u672a\u5272\u308a\u5f53\u3066\u9818\u57df\u3092\u51e6\u7406\u3002\u3088\u308a\u5b8c\u5168\u306a\u7d50\u679c\u304c\u51fa\u307e\u3059\u304c\u3001\u5927\u304d\u3044\u30a4\u30e1\u30fc\u30b8\u3067\u306f\u51e6\u7406\u6642\u9593\u304c\u9577\u304f\u306a\u308b\u304b\u3082\u3057\u308c\u307e\u305b\u3093\u3002
IngestManager.moduleErr=\u30e2\u30b8\u30e5\u30fc\u30eb\u30a8\u30e9\u30fc
IngestManager.moduleErr.errListenToUpdates.msg=Ingest Manager\u30a2\u30c3\u30d7\u30c7\u30fc\u30c8\u3092\u78ba\u8a8d\u4e2d\u306b\u30e2\u30b8\u30e5\u30fc\u30eb\u304c\u30a8\u30e9\u30fc\u3092\u8d77\u3053\u3057\u307e\u3057\u305f\u3002\u3069\u306e\u30e2\u30b8\u30e5\u30fc\u30eb\u304b\u30ed\u30b0\u3067\u78ba\u8a8d\u3057\u3066\u4e0b\u3055\u3044\u3002\u4e00\u90e8\u306e\u30c7\u30fc\u30bf\u304c\u4e0d\u5b8c\u5168\u304b\u3082\u3057\u308c\u307e\u305b\u3093\u3002
IngestManager.StartIngestJobsTask.run.cancelling={0}\uff08\u30ad\u30e3\u30f3\u30bb\u30eb\u4e2d\u2026\uff09
IngestManager.StartIngestJobsTask.run.catchException.msg=\u30a4\u30f3\u30b8\u30a7\u30b9\u30c8\u306e\u958b\u59cb\u4e2d\u306b\u30a8\u30e9\u30fc\u304c\u767a\u751f\u3057\u307e\u3057\u305f\u3002\u7d50\u679c\u304c\u4e00\u90e8\u306e\u3082\u306e
IngestManager.StartIngestJobsTask.run.displayName=\u30a4\u30f3\u30b8\u30a7\u30b9\u30c8\u30bf\u30b9\u30af\u3092\u30ad\u30e5\u30fc\u30a4\u30f3\u30b0
IngestMessage.exception.srcSubjDetailsDataNotNull.msg=\u30bd\u30fc\u30b9\u3001\u30b5\u30d6\u30b8\u30a7\u30af\u30c8\u3001\u8a73\u7d30\u304a\u3088\u3073\u30c7\u30fc\u30bf\u306f\u30cc\u30eb\u3067\u3042\u3063\u3066\u306f\u3044\u3051\u307e\u305b\u3093
IngestMessage.exception.srcSubjNotNull.msg=\u30bd\u30fc\u30b9\u304a\u3088\u3073\u30b5\u30d6\u30b8\u30a7\u30af\u30c8\u306f\u30cc\u30eb\u3067\u3042\u3063\u3066\u306f\u3044\u3051\u307e\u305b\u3093
IngestMessage.exception.typeSrcSubjNotNull.msg=\u30e1\u30c3\u30bb\u30fc\u30b8\u30bf\u30a4\u30d7\u3001\u30bd\u30fc\u30b9\u304a\u3088\u3073\u30b5\u30d6\u30b8\u30a7\u30af\u30c8\u306f\u30cc\u30eb\u3067\u3042\u3063\u3066\u306f\u3044\u3051\u307e\u305b\u3093
IngestMessage.toString.data.text=\ \u30c7\u30fc\u30bf\uff1a{0}
IngestMessage.toString.date.text=\ \u65e5\u4ed8\uff1a{0}
IngestMessage.toString.details.text=\ \u8a73\u7d30\uff1a{0}
IngestMessage.toString.subject.text=\ \u30b5\u30d6\u30b8\u30a7\u30af\u30c8\uff1a{0}
IngestMessage.toString.type.text=\u30bf\u30a4\u30d7\uff1a{0}
IngestMessageDetailsPanel.copyMenuItem.text=\u30b3\u30d4\u30fc
IngestMessageDetailsPanel.messageDetailsPane.contentType=\u30c6\u30ad\u30b9\u30c8\uff0fhtml
IngestMessageDetailsPanel.selectAllMenuItem.text=\u3059\u3079\u3066\u9078\u629e
IngestMessageDetailsPanel.viewArtifactButton.text=\u7d50\u679c\u3078\u79fb\u52d5
IngestMessageDetailsPanel.viewContentButton.text=\u30c7\u30a3\u30ec\u30af\u30c8\u30ea\u3078\u79fb\u52d5
IngestMessagePanel.BooleanRenderer.exception.nonBoolVal.msg=\u30d6\u30fc\u30eb\u5024\u3067\u306f\u306a\u3044\u3082\u306e\u306bBooleanRenderer\u3092\u4f7f\u7528\u3057\u3088\u3046\u3068\u3057\u307e\u3057\u305f
IngestMessagePanel.DateRenderer.exception.nonDateVal.text=\u65e5\u4ed8\u3067\u306f\u306a\u3044\u3082\u306e\u306bDateRenderer\u3092\u4f7f\u7528\u3057\u3088\u3046\u3068\u3057\u307e\u3057\u305f\u3002
IngestMessagePanel.moduleErr=\u30e2\u30b8\u30e5\u30fc\u30eb\u30a8\u30e9\u30fc
IngestMessagePanel.moduleErr.errListenUpdates.text=IngestMessagePanel\u30a2\u30c3\u30d7\u30c7\u30fc\u30c8\u3092\u78ba\u8a8d\u4e2d\u306b\u30e2\u30b8\u30e5\u30fc\u30eb\u304c\u30a8\u30e9\u30fc\u3092\u8d77\u3053\u3057\u307e\u3057\u305f\u3002\u3069\u306e\u30e2\u30b8\u30e5\u30fc\u30eb\u304b\u30ed\u30b0\u3067\u78ba\u8a8d\u3057\u3066\u4e0b\u3055\u3044\u3002\u4e00\u90e8\u306e\u30c7\u30fc\u30bf\u304c\u4e0d\u5b8c\u5168\u304b\u3082\u3057\u308c\u307e\u305b\u3093\u3002
IngestMessagePanel.MsgTableMod.colNames.module=\u30e2\u30b8\u30e5\u30fc\u30eb
IngestMessagePanel.MsgTableMod.colNames.new=\u65b0\u898f\uff1f
IngestMessagePanel.MsgTableMod.colNames.num=\u756a\u53f7
IngestMessagePanel.MsgTableMod.colNames.subject=\u30b5\u30d6\u30b8\u30a7\u30af\u30c8
IngestMessagePanel.MsgTableMod.colNames.timestamp=\u30bf\u30a4\u30e0\u30b9\u30bf\u30f3\u30d7
IngestMessagePanel.sortByComboBox.model.priority=\u512a\u5148\u5ea6
IngestMessagePanel.sortByComboBox.model.time=\u6642\u9593
IngestMessagePanel.sortByComboBox.toolTipText=\u6642\u9593\u9806\uFF08\u6642\u7CFB\u5217\uFF09\u307E\u305F\u306F\u30E1\u30C3\u30BB\u30FC\u30B8\u306E\u512A\u5148\u5EA6\u3067\u30BD\u30FC\u30C8
IngestMessagePanel.sortByLabel.text=\u4E0B\u8A18\u3067\u30BD\u30FC\u30C8\uFF1A
IngestMessagePanel.totalMessagesNameLabel.text=\u5408\u8A08\uFF1A
IngestMessagePanel.sortByComboBox.toolTipText=\u6642\u9593\u9806\uff08\u6642\u7cfb\u5217\uff09\u307e\u305f\u306f\u30e1\u30c3\u30bb\u30fc\u30b8\u306e\u512a\u5148\u5ea6\u3067\u30bd\u30fc\u30c8
IngestMessagePanel.sortByLabel.text=\u4e0b\u8a18\u3067\u30bd\u30fc\u30c8\uff1a
IngestMessagePanel.totalMessagesNameLabel.text=\u5408\u8a08\uff1a
IngestMessagePanel.totalMessagesNameVal.text=-
IngestMessagePanel.totalUniqueMessagesNameLabel.text=\u30E6\u30CB\u30FC\u30AF\uFF1A
IngestMessagePanel.totalUniqueMessagesNameLabel.text=\u30e6\u30cb\u30fc\u30af\uff1a
IngestMessagePanel.totalUniqueMessagesNameVal.text=-
IngestMessagesToolbar.customizeButton.toolTipText=\u30A4\u30F3\u30B8\u30A7\u30B9\u30C8\u30E1\u30C3\u30BB\u30FC\u30B8
IngestMessageTopComponent.displayName=\u30A4\u30F3\u30B8\u30A7\u30B9\u30C8\u30A4\u30F3\u30DC\u30C3\u30AF\u30B9
IngestMessageTopComponent.displayReport.option.GenRpt=\u30EC\u30DD\u30FC\u30C8\u751F\u6210
IngestMessagesToolbar.customizeButton.toolTipText=\u30a4\u30f3\u30b8\u30a7\u30b9\u30c8\u30e1\u30c3\u30bb\u30fc\u30b8
IngestMessageTopComponent.displayName=\u30a4\u30f3\u30b8\u30a7\u30b9\u30c8\u30a4\u30f3\u30dc\u30c3\u30af\u30b9
IngestMessageTopComponent.displayReport.option.GenRpt=\u30ec\u30dd\u30fc\u30c8\u751f\u6210
IngestMessageTopComponent.displayReport.option.OK=OK
IngestMessageTopComponent.initComponents.name=\u30A4\u30F3\u30B8\u30A7\u30B9\u30C8\u30A4\u30F3\u30DC\u30C3\u30AF\u30B9
IngestMessageTopComponent.msgDlg.ingestRpt.text=\u30A4\u30F3\u30B8\u30A7\u30B9\u30C8\u30EC\u30DD\u30FC\u30C8
IngestMonitor.mgrErrMsg.lowDiskSpace.msg=\u30C7\u30A3\u30B9\u30AF{0}\u306E\u30C7\u30A3\u30B9\u30AF\u9818\u57DF\u4E0D\u8DB3\u306E\u305F\u3081\u30A4\u30F3\u30B8\u30A7\u30B9\u30C8\u3092\u4E2D\u6B62\u3057\u307E\u3059\u3002\
\u30B1\u30FC\u30B9\u30C9\u30E9\u30A4\u30D6\u306B\u6700\u4F4E1GB\u306E\u7A7A\u304D\u9818\u57DF\u304C\u3042\u308B\u306E\u3092\u78BA\u8A8D\u3057\u3001\u30A4\u30F3\u30B8\u30A7\u30B9\u30C8\u3092\u518D\u30B9\u30BF\u30FC\u30C8\u3057\u3066\u4E0B\u3055\u3044\u3002
IngestMonitor.mgrErrMsg.lowDiskSpace.title=\u30A4\u30F3\u30B8\u30A7\u30B9\u30C8\u304C\u4E2D\u6B62\u3055\u308C\u307E\u3057\u305F\u30FC{0}\u306E\u30C7\u30A3\u30B9\u30AF\u9818\u57DF\u4E0D\u8DB3
IngestScheduler.DataSourceScheduler.toString.size=DataSourceQueue, \u30B5\u30A4\u30BA\uFF1A
IngestScheduler.FileSched.toString.curDirs.text=\
CurDirs(stack), \u30B5\u30A4\u30BA\uFF1A
IngestScheduler.FileSched.toString.curFiles.text=\
CurFiles, \u30B5\u30A4\u30BA\uFF1A
IngestScheduler.FileSched.toString.rootDirs.text=\
RootDirs(sorted), \u30B5\u30A4\u30BA\uFF1A
OpenIDE-Module-Name=\u30A4\u30F3\u30B8\u30A7\u30B9\u30C8
IngestManager.StartIngestJobsTask.run.progress.msg1={0}\u306E\u30C7\u30FC\u30BF\u30BD\u30FC\u30B9\u30A4\u30F3\u30B8\u30A7\u30B9\u30C8\u30BF\u30B9\u30AF
IngestManager.StartIngestJobsTask.run.progress.msg2={0}\u306E\u30C7\u30FC\u30BF\u30BD\u30FC\u30B9\u30A4\u30F3\u30B8\u30A7\u30B9\u30C8\u30BF\u30B9\u30AF
IngestManager.StartIngestJobsTask.run.progress.msg3={0}\u306E\u30C7\u30FC\u30BF\u30BD\u30FC\u30B9\u30A4\u30F3\u30B8\u30A7\u30B9\u30C8\u30BF\u30B9\u30AF
IngestManager.StartIngestJobsTask.run.progress.msg4={0}\u306E\u30C7\u30FC\u30BF\u30BD\u30FC\u30B9\u30A4\u30F3\u30B8\u30A7\u30B9\u30C8\u30BF\u30B9\u30AF
IngestManager.StartIngestJobsTask.run.startupErr.dlgErrorList=\u30A8\u30E9\u30FC\uFF1A\
IngestMessageTopComponent.initComponents.name=\u30a4\u30f3\u30b8\u30a7\u30b9\u30c8\u30a4\u30f3\u30dc\u30c3\u30af\u30b9
IngestMessageTopComponent.msgDlg.ingestRpt.text=\u30a4\u30f3\u30b8\u30a7\u30b9\u30c8\u30ec\u30dd\u30fc\u30c8
IngestMonitor.mgrErrMsg.lowDiskSpace.msg=\u30c7\u30a3\u30b9\u30af{0}\u306e\u30c7\u30a3\u30b9\u30af\u9818\u57df\u4e0d\u8db3\u306e\u305f\u3081\u30a4\u30f3\u30b8\u30a7\u30b9\u30c8\u3092\u4e2d\u6b62\u3057\u307e\u3059\u3002\
\u30b1\u30fc\u30b9\u30c9\u30e9\u30a4\u30d6\u306b\u6700\u4f4e1GB\u306e\u7a7a\u304d\u9818\u57df\u304c\u3042\u308b\u306e\u3092\u78ba\u8a8d\u3057\u3001\u30a4\u30f3\u30b8\u30a7\u30b9\u30c8\u3092\u518d\u30b9\u30bf\u30fc\u30c8\u3057\u3066\u4e0b\u3055\u3044\u3002
IngestMonitor.mgrErrMsg.lowDiskSpace.title=\u30a4\u30f3\u30b8\u30a7\u30b9\u30c8\u304c\u4e2d\u6b62\u3055\u308c\u307e\u3057\u305f\u30fc{0}\u306e\u30c7\u30a3\u30b9\u30af\u9818\u57df\u4e0d\u8db3
IngestScheduler.DataSourceScheduler.toString.size=DataSourceQueue, \u30b5\u30a4\u30ba\uff1a
OpenIDE-Module-Name=\u30a4\u30f3\u30b8\u30a7\u30b9\u30c8
IngestManager.StartIngestJobsTask.run.progress.msg1={0}\u306e\u30c7\u30fc\u30bf\u30bd\u30fc\u30b9\u30a4\u30f3\u30b8\u30a7\u30b9\u30c8\u30bf\u30b9\u30af
IngestManager.StartIngestJobsTask.run.progress.msg2={0}\u306e\u30c7\u30fc\u30bf\u30bd\u30fc\u30b9\u30a4\u30f3\u30b8\u30a7\u30b9\u30c8\u30bf\u30b9\u30af
IngestManager.StartIngestJobsTask.run.progress.msg3={0}\u306e\u30c7\u30fc\u30bf\u30bd\u30fc\u30b9\u30a4\u30f3\u30b8\u30a7\u30b9\u30c8\u30bf\u30b9\u30af
IngestManager.StartIngestJobsTask.run.progress.msg4={0}\u306e\u30c7\u30fc\u30bf\u30bd\u30fc\u30b9\u30a4\u30f3\u30b8\u30a7\u30b9\u30c8\u30bf\u30b9\u30af
IngestManager.StartIngestJobsTask.run.startupErr.dlgErrorList=\u30a8\u30e9\u30fc\uff1a\
\
{0}
IngestManager.StartIngestJobsTask.run.startupErr.dlgMsg=\uFF11\u3064\u307E\u305F\u306F\u8907\u6570\u306E\u30A4\u30F3\u30B8\u30A7\u30B9\u30C8\u30E2\u30B8\u30E5\u30FC\u30EB\u3092\u30B9\u30BF\u30FC\u30C8\u3067\u304D\u307E\u305B\u3093\u3067\u3057\u305F\u3002\u30A4\u30F3\u30B8\u30A7\u30B9\u30C8\u30B8\u30E7\u30D6\u306F\u30AD\u30E3\u30F3\u30BB\u30EB\u3055\u308C\u307E\u3057\u305F\u3002
IngestManager.StartIngestJobsTask.run.startupErr.dlgSolution=\u5931\u6557\u3057\u305F\u30E2\u30B8\u30E5\u30FC\u30EB\u3092\u7121\u52B9\u5316\u3059\u308B\u304B\u30A8\u30E9\u30FC\u3092\u89E3\u6C7A\u3057\u3001\u305D\u306E\u5F8C\u30C7\u30FC\u30BF\u30BD\u30FC\u30B9\u3092\u53F3\u30AF\u30EA\u30C3\u30AF\u3057\u3001\u30A4\u30F3\u30B8\u30A7\u30B9\u30C8\u30E2\u30B8\u30E5\u30FC\u30EB\u3092\u5B9F\u884C\u3092\u9078\u629E\u3057\u3066\u3001\u30A4\u30F3\u30B8\u30A7\u30B9\u30C8\u3092\u30EA\u30B9\u30BF\u30FC\u30C8\u3057\u3066\u4E0B\u3055\u3044\u3002
IngestManager.StartIngestJobsTask.run.startupErr.dlgTitle=\u30A4\u30F3\u30B8\u30A7\u30B9\u30C8\u5931\u6557
IngestManager.StartIngestJobsTask.run.startupErr.dlgMsg=\uff11\u3064\u307e\u305f\u306f\u8907\u6570\u306e\u30a4\u30f3\u30b8\u30a7\u30b9\u30c8\u30e2\u30b8\u30e5\u30fc\u30eb\u3092\u30b9\u30bf\u30fc\u30c8\u3067\u304d\u307e\u305b\u3093\u3067\u3057\u305f\u3002\u30a4\u30f3\u30b8\u30a7\u30b9\u30c8\u30b8\u30e7\u30d6\u306f\u30ad\u30e3\u30f3\u30bb\u30eb\u3055\u308c\u307e\u3057\u305f\u3002
IngestManager.StartIngestJobsTask.run.startupErr.dlgSolution=\u5931\u6557\u3057\u305f\u30e2\u30b8\u30e5\u30fc\u30eb\u3092\u7121\u52b9\u5316\u3059\u308b\u304b\u30a8\u30e9\u30fc\u3092\u89e3\u6c7a\u3057\u3001\u305d\u306e\u5f8c\u30c7\u30fc\u30bf\u30bd\u30fc\u30b9\u3092\u53f3\u30af\u30ea\u30c3\u30af\u3057\u3001\u30a4\u30f3\u30b8\u30a7\u30b9\u30c8\u30e2\u30b8\u30e5\u30fc\u30eb\u3092\u5b9f\u884c\u3092\u9078\u629e\u3057\u3066\u3001\u30a4\u30f3\u30b8\u30a7\u30b9\u30c8\u3092\u30ea\u30b9\u30bf\u30fc\u30c8\u3057\u3066\u4e0b\u3055\u3044\u3002
IngestManager.StartIngestJobsTask.run.startupErr.dlgTitle=\u30a4\u30f3\u30b8\u30a7\u30b9\u30c8\u5931\u6557

View File

@ -34,8 +34,8 @@ final class DataSourceIngestPipeline {
private final List<IngestModuleTemplate> moduleTemplates;
private List<DataSourceIngestModuleDecorator> modules = new ArrayList<>();
DataSourceIngestPipeline(IngestJob task, List<IngestModuleTemplate> moduleTemplates) {
this.job = task;
DataSourceIngestPipeline(IngestJob job, List<IngestModuleTemplate> moduleTemplates) {
this.job = job;
this.moduleTemplates = moduleTemplates;
}

View File

@ -0,0 +1,44 @@
/*
* Autopsy Forensic Browser
*
* Copyright 2014 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.autopsy.ingest;
import org.sleuthkit.datamodel.Content;
final class DataSourceIngestTask {
private final IngestJob ingestJob;
private final Content dataSource;
DataSourceIngestTask(IngestJob ingestJob, Content dataSource) {
this.ingestJob = ingestJob;
this.dataSource = dataSource;
}
IngestJob getIngestJob() {
return ingestJob;
}
Content getDataSource() {
return dataSource;
}
void execute() {
ingestJob.process();
}
}

View File

@ -0,0 +1,57 @@
/*
* Autopsy Forensic Browser
*
* Copyright 2012-2014 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.autopsy.ingest;
import java.util.concurrent.LinkedBlockingQueue;
final class DataSourceIngestTaskScheduler {
private static DataSourceIngestTaskScheduler instance = new DataSourceIngestTaskScheduler();
private final LinkedBlockingQueue<DataSourceIngestTask> tasks = new LinkedBlockingQueue<>();
static DataSourceIngestTaskScheduler getInstance() {
return instance;
}
private DataSourceIngestTaskScheduler() {
}
synchronized void addTask(DataSourceIngestTask task) throws InterruptedException {
task.getIngestJob().notifyTaskPending();
try {
tasks.put(task);
}
catch (InterruptedException ex) {
// RJCTOD: Need a safety notification to undo above
}
}
DataSourceIngestTask getNextTask() throws InterruptedException {
return tasks.take();
}
boolean hasTasksForIngestJob(long jobId) {
for (DataSourceIngestTask task : tasks) {
if (task.getIngestJobId() == jobId) {
return true;
}
}
return false;
}
}

View File

@ -0,0 +1,93 @@
/*
* Autopsy Forensic Browser
*
* Copyright 2012-2014 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.autopsy.ingest;
import java.util.Objects;
import java.util.logging.Level;
import org.sleuthkit.datamodel.AbstractFile;
import org.sleuthkit.datamodel.TskCoreException;
final class FileIngestTask {
final AbstractFile file;
private final IngestJob ingestJob;
FileIngestTask(AbstractFile file, IngestJob task) {
this.file = file;
this.ingestJob = task;
}
public IngestJob getIngestJob() {
return ingestJob;
}
public AbstractFile getFile() {
return file;
}
void execute(long threadId) {
ingestJob.process(file);
}
@Override
public String toString() { //RJCTODO: May not keep this
try {
return "ProcessTask{" + "file=" + file.getId() + ": " + file.getUniquePath() + "}"; // + ", dataSourceTask=" + dataSourceTask + '}';
} catch (TskCoreException ex) {
// RJCTODO
// FileIngestTaskScheduler.logger.log(Level.SEVERE, "Cound not get unique path of file in queue, ", ex); //NON-NLS
}
return "ProcessTask{" + "file=" + file.getId() + ": " + file.getName() + '}';
}
/**
* two process tasks are equal when the file/dir and modules are the
* same this enables are not to queue up the same file/dir, modules
* tuples into the root dir set
*
* @param obj
* @return
*/
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final FileIngestTask other = (FileIngestTask) obj;
if (this.file != other.file && (this.file == null || !this.file.equals(other.file))) {
return false;
}
IngestJob thisTask = this.getIngestJob();
IngestJob otherTask = other.getIngestJob();
if (thisTask != otherTask && (thisTask == null || !thisTask.equals(otherTask))) {
return false;
}
return true;
}
@Override
public int hashCode() {
int hash = 5;
hash = 47 * hash + Objects.hashCode(this.file);
hash = 47 * hash + Objects.hashCode(this.ingestJob);
return hash;
}
}

View File

@ -0,0 +1,418 @@
/*
* Autopsy Forensic Browser
*
* Copyright 2012-2014 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.autopsy.ingest;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Comparator;
import java.util.List;
import java.util.TreeSet;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.sleuthkit.datamodel.AbstractFile;
import org.sleuthkit.datamodel.Content;
import org.sleuthkit.datamodel.DerivedFile;
import org.sleuthkit.datamodel.Directory;
import org.sleuthkit.datamodel.File;
import org.sleuthkit.datamodel.FileSystem;
import org.sleuthkit.datamodel.LayoutFile;
import org.sleuthkit.datamodel.LocalFile;
import org.sleuthkit.datamodel.TskCoreException;
import org.sleuthkit.datamodel.TskData;
import org.sleuthkit.datamodel.VirtualDirectory;
final class FileIngestTaskScheduler {
private static final Logger logger = Logger.getLogger(FileIngestTaskScheduler.class.getName());
private static FileIngestTaskScheduler instance;
private final TreeSet<FileIngestTask> rootDirectoryTasks = new TreeSet<>(new RootDirectoryTaskComparator());
private final List<FileIngestTask> directoryTasks = new ArrayList<>();
private final LinkedBlockingQueue<FileIngestTask> fileTasks = new LinkedBlockingQueue<>(); // Unlimited capacity
private static final int FAT_NTFS_FLAGS = TskData.TSK_FS_TYPE_ENUM.TSK_FS_TYPE_FAT12.getValue() | TskData.TSK_FS_TYPE_ENUM.TSK_FS_TYPE_FAT16.getValue() | TskData.TSK_FS_TYPE_ENUM.TSK_FS_TYPE_FAT32.getValue() | TskData.TSK_FS_TYPE_ENUM.TSK_FS_TYPE_NTFS.getValue();
static synchronized FileIngestTaskScheduler getInstance() {
if (instance == null) {
instance = new FileIngestTaskScheduler();
}
return instance;
}
private FileIngestTaskScheduler() {
}
synchronized void addTasks(IngestJob dataSourceTask, Content dataSource) {
Collection<AbstractFile> rootObjects = dataSource.accept(new GetRootDirectoryVisitor());
List<AbstractFile> firstLevelFiles = new ArrayList<>();
if (rootObjects.isEmpty() && dataSource instanceof AbstractFile) {
// The data source is file.
firstLevelFiles.add((AbstractFile) dataSource);
} else {
for (AbstractFile root : rootObjects) {
List<Content> children;
try {
children = root.getChildren();
if (children.isEmpty()) {
//add the root itself, could be unalloc file, child of volume or image
firstLevelFiles.add(root);
} else {
//root for fs root dir, schedule children dirs/files
for (Content child : children) {
if (child instanceof AbstractFile) {
firstLevelFiles.add((AbstractFile) child);
}
}
}
} catch (TskCoreException ex) {
logger.log(Level.WARNING, "Could not get children of root to enqueue: " + root.getId() + ": " + root.getName(), ex); //NON-NLS
}
}
}
for (AbstractFile firstLevelFile : firstLevelFiles) {
FileIngestTask fileTask = new FileIngestTask(firstLevelFile, dataSourceTask);
if (shouldEnqueueTask(fileTask)) {
rootDirectoryTasks.add(fileTask);
}
}
// Reshuffle/update the dir and file level queues if needed
updateQueues();
}
synchronized void addTask(IngestJob ingestJob, AbstractFile file) {
try {
FileIngestTask fileTask = new FileIngestTask(file, ingestJob);
if (shouldEnqueueTask(fileTask)) {
fileTask.getIngestJob().notifyTaskPending();
fileTasks.put(fileTask); // Queue has unlimited capacity, does not block.
}
} catch (InterruptedException ex) {
// RJCTODO: Perhaps this is the convenience method?
// RJCTODO: Need undo
}
}
FileIngestTask getNextTask() throws InterruptedException {
FileIngestTask task = fileTasks.take();
updateQueues();
return task;
}
synchronized boolean hasTasksForJob(long ingestJobId) {
for (FileIngestTask task : rootDirectoryTasks) {
if (task.getIngestJob().getJobId() == ingestJobId) {
return true;
}
}
for (FileIngestTask task : directoryTasks) {
if (task.getIngestJob().getJobId() == ingestJobId) {
return true;
}
}
for (FileIngestTask task : fileTasks) {
if (task.getIngestJob().getJobId() == ingestJobId) {
return true;
}
}
return false;
}
private void updateQueues() {
// we loop because we could have a directory that has all files
// that do not get enqueued
while (true) {
// There are files in the queue, we're done
if (fileTasks.isEmpty() == false) {
return;
}
// fill in the directory queue if it is empty.
if (this.directoryTasks.isEmpty()) {
// bail out if root is also empty -- we are done
if (rootDirectoryTasks.isEmpty()) {
return;
}
FileIngestTask rootTask = this.rootDirectoryTasks.pollFirst();
directoryTasks.add(rootTask);
}
//pop and push AbstractFile directory children if any
//add the popped and its leaf children onto cur file list
FileIngestTask parentTask = directoryTasks.remove(directoryTasks.size() - 1);
final AbstractFile parentFile = parentTask.file;
// add itself to the file list
if (shouldEnqueueTask(parentTask)) {
// RJCTODO
try {
parentTask.getIngestJob().notifyTaskPending();
fileTasks.put(parentTask);
} catch (InterruptedException ex) {
// RJCTODO: Maybe make a convenience method
// RJCTODO: Need undo
}
}
// add its children to the file and directory lists
try {
List<Content> children = parentFile.getChildren();
for (Content c : children) {
if (c instanceof AbstractFile) {
AbstractFile childFile = (AbstractFile) c;
FileIngestTask childTask = new FileIngestTask(childFile, parentTask.getIngestJob());
if (childFile.hasChildren()) {
this.directoryTasks.add(childTask);
} else if (shouldEnqueueTask(childTask)) {
// RJCTODO
try {
childTask.getIngestJob().notifyTaskPending();
fileTasks.put(childTask);
} catch (InterruptedException ex) {
// RJCTODO: Maybe make a convenience method
// RJCTODO: Need undo
}
}
}
}
} catch (TskCoreException ex) {
logger.log(Level.SEVERE, "Could not get children of file and update file queues: " + parentFile.getName(), ex);
}
}
}
synchronized void emptyQueues() { // RJCTODO: Perhaps clear all...
this.rootDirectoryTasks.clear();
this.directoryTasks.clear();
this.fileTasks.clear();
}
/**
* Check if the file is a special file that we should skip
*
* @param processTask a task whose file to check if should be queued of
* skipped
* @return true if should be enqueued, false otherwise
*/
private static boolean shouldEnqueueTask(final FileIngestTask processTask) {
final AbstractFile aFile = processTask.file;
//if it's unalloc file, skip if so scheduled
if (processTask.getIngestJob().shouldProcessUnallocatedSpace() == false && aFile.getType().equals(TskData.TSK_DB_FILES_TYPE_ENUM.UNALLOC_BLOCKS)) {
return false;
}
String fileName = aFile.getName();
if (fileName.equals(".") || fileName.equals("..")) {
return false;
} else if (aFile instanceof org.sleuthkit.datamodel.File) {
final org.sleuthkit.datamodel.File f = (File) aFile;
//skip files in root dir, starting with $, containing : (not default attributes)
//with meta address < 32, i.e. some special large NTFS and FAT files
FileSystem fs = null;
try {
fs = f.getFileSystem();
} catch (TskCoreException ex) {
logger.log(Level.SEVERE, "Could not get FileSystem for " + f, ex); //NON-NLS
}
TskData.TSK_FS_TYPE_ENUM fsType = TskData.TSK_FS_TYPE_ENUM.TSK_FS_TYPE_UNSUPP;
if (fs != null) {
fsType = fs.getFsType();
}
if ((fsType.getValue() & FAT_NTFS_FLAGS) == 0) {
//not fat or ntfs, accept all files
return true;
}
boolean isInRootDir = false;
try {
isInRootDir = f.getParentDirectory().isRoot();
} catch (TskCoreException ex) {
logger.log(Level.WARNING, "Could not check if should enqueue the file: " + f.getName(), ex); //NON-NLS
}
if (isInRootDir && f.getMetaAddr() < 32) {
String name = f.getName();
if (name.length() > 0 && name.charAt(0) == '$' && name.contains(":")) {
return false;
}
} else {
return true;
}
}
return true;
}
/**
* Visitor that gets a collection of top level objects to be scheduled, such
* as root directories (if there is FS) or LayoutFiles and virtual
* directories, also if there is no FS.
*/
static class GetRootDirectoryVisitor extends GetFilesContentVisitor {
@Override
public Collection<AbstractFile> visit(VirtualDirectory ld) {
//case when we hit a layout directoryor local file container, not under a real FS
//or when root virt dir is scheduled
Collection<AbstractFile> ret = new ArrayList<>();
ret.add(ld);
return ret;
}
@Override
public Collection<AbstractFile> visit(LayoutFile lf) {
//case when we hit a layout file, not under a real FS
Collection<AbstractFile> ret = new ArrayList<>();
ret.add(lf);
return ret;
}
@Override
public Collection<AbstractFile> visit(Directory drctr) {
//we hit a real directory, a child of real FS
Collection<AbstractFile> ret = new ArrayList<>();
ret.add(drctr);
return ret;
}
@Override
public Collection<AbstractFile> visit(FileSystem fs) {
return getAllFromChildren(fs);
}
@Override
public Collection<AbstractFile> visit(File file) {
//can have derived files
return getAllFromChildren(file);
}
@Override
public Collection<AbstractFile> visit(DerivedFile derivedFile) {
//can have derived files
//TODO test this and overall scheduler with derived files
return getAllFromChildren(derivedFile);
}
@Override
public Collection<AbstractFile> visit(LocalFile localFile) {
//can have local files
//TODO test this and overall scheduler with local files
return getAllFromChildren(localFile);
}
}
/**
* Root directory sorter
*/
private static class RootDirectoryTaskComparator implements Comparator<FileIngestTask> {
@Override
public int compare(FileIngestTask q1, FileIngestTask q2) {
AbstractFilePriority.Priority p1 = AbstractFilePriority.getPriority(q1.file);
AbstractFilePriority.Priority p2 = AbstractFilePriority.getPriority(q2.file);
if (p1 == p2) {
return (int) (q2.file.getId() - q1.file.getId());
} else {
return p2.ordinal() - p1.ordinal();
}
}
/**
* Priority determination for sorted AbstractFile, used by
* RootDirComparator
*/
private static class AbstractFilePriority {
enum Priority {
LAST, LOW, MEDIUM, HIGH
}
static final List<Pattern> LAST_PRI_PATHS = new ArrayList<>();
static final List<Pattern> LOW_PRI_PATHS = new ArrayList<>();
static final List<Pattern> MEDIUM_PRI_PATHS = new ArrayList<>();
static final List<Pattern> HIGH_PRI_PATHS = new ArrayList<>();
/* prioritize root directory folders based on the assumption that we are
* looking for user content. Other types of investigations may want different
* priorities. */
static /* prioritize root directory folders based on the assumption that we are
* looking for user content. Other types of investigations may want different
* priorities. */ {
// these files have no structure, so they go last
//unalloc files are handled as virtual files in getPriority()
//LAST_PRI_PATHS.schedule(Pattern.compile("^\\$Unalloc", Pattern.CASE_INSENSITIVE));
//LAST_PRI_PATHS.schedule(Pattern.compile("^\\Unalloc", Pattern.CASE_INSENSITIVE));
LAST_PRI_PATHS.add(Pattern.compile("^pagefile", Pattern.CASE_INSENSITIVE));
LAST_PRI_PATHS.add(Pattern.compile("^hiberfil", Pattern.CASE_INSENSITIVE));
// orphan files are often corrupt and windows does not typically have
// user content, so put them towards the bottom
LOW_PRI_PATHS.add(Pattern.compile("^\\$OrphanFiles", Pattern.CASE_INSENSITIVE));
LOW_PRI_PATHS.add(Pattern.compile("^Windows", Pattern.CASE_INSENSITIVE));
// all other files go into the medium category too
MEDIUM_PRI_PATHS.add(Pattern.compile("^Program Files", Pattern.CASE_INSENSITIVE));
// user content is top priority
HIGH_PRI_PATHS.add(Pattern.compile("^Users", Pattern.CASE_INSENSITIVE));
HIGH_PRI_PATHS.add(Pattern.compile("^Documents and Settings", Pattern.CASE_INSENSITIVE));
HIGH_PRI_PATHS.add(Pattern.compile("^home", Pattern.CASE_INSENSITIVE));
HIGH_PRI_PATHS.add(Pattern.compile("^ProgramData", Pattern.CASE_INSENSITIVE));
}
/**
* Get the scheduling priority for a given file.
*
* @param abstractFile
* @return
*/
static AbstractFilePriority.Priority getPriority(final AbstractFile abstractFile) {
if (!abstractFile.getType().equals(TskData.TSK_DB_FILES_TYPE_ENUM.FS)) {
//quickly filter out unstructured content
//non-fs virtual files and dirs, such as representing unalloc space
return AbstractFilePriority.Priority.LAST;
}
//determine the fs files priority by name
final String path = abstractFile.getName();
if (path == null) {
return AbstractFilePriority.Priority.MEDIUM;
}
for (Pattern p : HIGH_PRI_PATHS) {
Matcher m = p.matcher(path);
if (m.find()) {
return AbstractFilePriority.Priority.HIGH;
}
}
for (Pattern p : MEDIUM_PRI_PATHS) {
Matcher m = p.matcher(path);
if (m.find()) {
return AbstractFilePriority.Priority.MEDIUM;
}
}
for (Pattern p : LOW_PRI_PATHS) {
Matcher m = p.matcher(path);
if (m.find()) {
return AbstractFilePriority.Priority.LOW;
}
}
for (Pattern p : LAST_PRI_PATHS) {
Matcher m = p.matcher(path);
if (m.find()) {
return AbstractFilePriority.Priority.LAST;
}
}
//default is medium
return AbstractFilePriority.Priority.MEDIUM;
}
}
}
}

View File

@ -0,0 +1,99 @@
/*
* Autopsy Forensic Browser
*
* Copyright 2012-2014 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.autopsy.ingest;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.sleuthkit.autopsy.casemodule.Case;
import org.sleuthkit.datamodel.Content;
import org.sleuthkit.datamodel.ContentVisitor;
import org.sleuthkit.datamodel.FileSystem;
import org.sleuthkit.datamodel.LayoutFile;
import org.sleuthkit.datamodel.SleuthkitCase;
import org.sleuthkit.datamodel.TskCoreException;
import org.sleuthkit.datamodel.TskData;
/**
* Get counts of ingestable files/dirs for the content input source.
*
* Note, also includes counts of all unalloc children files (for the fs, image,
* volume) even if ingest didn't ask for them
*/
final class GetFilesCountVisitor extends ContentVisitor.Default<Long> {
private static final Logger logger = Logger.getLogger(FileIngestTaskScheduler.class.getName());
@Override
public Long visit(FileSystem fs) {
//recursion stop here
//case of a real fs, query all files for it
SleuthkitCase sc = Case.getCurrentCase().getSleuthkitCase();
StringBuilder queryB = new StringBuilder();
queryB.append("( (fs_obj_id = ").append(fs.getId()); //NON-NLS
//queryB.append(") OR (fs_obj_id = NULL) )");
queryB.append(") )");
queryB.append(" AND ( (meta_type = ").append(TskData.TSK_FS_META_TYPE_ENUM.TSK_FS_META_TYPE_REG.getValue()); //NON-NLS
queryB.append(") OR (meta_type = ").append(TskData.TSK_FS_META_TYPE_ENUM.TSK_FS_META_TYPE_DIR.getValue()); //NON-NLS
queryB.append(" AND (name != '.') AND (name != '..')"); //NON-NLS
queryB.append(") )");
//queryB.append( "AND (type = ");
//queryB.append(TskData.TSK_DB_FILES_TYPE_ENUM.FS.getFileType());
//queryB.append(")");
try {
final String query = queryB.toString();
logger.log(Level.INFO, "Executing count files query: {0}", query); //NON-NLS
return sc.countFilesWhere(query);
} catch (TskCoreException ex) {
logger.log(Level.SEVERE, "Couldn't get count of all files in FileSystem", ex); //NON-NLS
return 0L;
}
}
@Override
public Long visit(LayoutFile lf) {
//recursion stop here
//case of LayoutFile child of Image or Volume
return 1L;
}
private long getCountFromChildren(Content content) {
long count = 0;
try {
List<Content> children = content.getChildren();
if (children.size() > 0) {
for (Content child : children) {
count += child.accept(this);
}
} else {
count = 1;
}
} catch (TskCoreException ex) {
logger.log(Level.WARNING, "Could not get count of objects from children to get num of total files to be ingested", ex); //NON-NLS
}
return count;
}
@Override
protected Long defaultVisit(Content cntnt) {
//recurse assuming this is image/vs/volume
//recursion stops at fs or unalloc file
return getCountFromChildren(cntnt);
}
}

View File

@ -19,12 +19,16 @@
package org.sleuthkit.autopsy.ingest;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.netbeans.api.progress.ProgressHandle;
import org.netbeans.api.progress.ProgressHandleFactory;
import org.openide.util.Cancellable;
import org.openide.util.NbBundle;
import org.sleuthkit.datamodel.AbstractFile;
import org.sleuthkit.datamodel.Content;
/**
@ -33,31 +37,63 @@ import org.sleuthkit.datamodel.Content;
*/
final class IngestJob {
private final long id;
private static final AtomicLong nextIngestJobId = new AtomicLong(0L);
private static final ConcurrentHashMap<Long, IngestJob> ingestJobs = new ConcurrentHashMap<>(); // Maps job ids to jobs.
private final long jobId;
private final Content dataSource;
private final List<IngestModuleTemplate> ingestModuleTemplates;
private final boolean processUnallocatedSpace;
private final HashMap<Long, FileIngestPipeline> fileIngestPipelines = new HashMap<>();
private final HashMap<Long, DataSourceIngestPipeline> dataSourceIngestPipelines = new HashMap<>();
private final IngestScheduler.FileIngestScheduler fileScheduler = IngestScheduler.getInstance().getFileIngestScheduler();
private FileIngestPipeline initialFileIngestPipeline = null;
private DataSourceIngestPipeline initialDataSourceIngestPipeline = null;
private ProgressHandle dataSourceTaskProgress;
private final LinkedBlockingQueue<DataSourceIngestPipeline> dataSourceIngestPipelines = new LinkedBlockingQueue<>();
private final LinkedBlockingQueue<FileIngestPipeline> fileIngestPipelines = new LinkedBlockingQueue<>();
private final AtomicInteger tasksInProgress = new AtomicInteger(0);
private final AtomicLong processedFiles = new AtomicLong(0L);
private ProgressHandle dataSourceTasksProgress;
private ProgressHandle fileTasksProgress;
int totalEnqueuedFiles = 0;
private int processedFiles = 0;
private long filesToIngestEstimate = 0;
private volatile boolean cancelled;
IngestJob(long id, Content dataSource, List<IngestModuleTemplate> ingestModuleTemplates, boolean processUnallocatedSpace) {
this.id = id;
static List<IngestModuleError> startIngestJob(Content dataSource, List<IngestModuleTemplate> ingestModuleTemplates, boolean processUnallocatedSpace) { // RJCTODO: return errors
long jobId = nextIngestJobId.incrementAndGet();
IngestJob ingestJob = new IngestJob(jobId, dataSource, ingestModuleTemplates, processUnallocatedSpace);
List<IngestModuleError> errors = ingestJob.start();
if (errors.isEmpty()) {
ingestJobs.put(jobId, ingestJob);
}
return errors;
}
static boolean jobsAreRunning() {
for (IngestJob job : ingestJobs.values()) {
if (!job.isCancelled()) {
return true;
}
}
return false;
}
static void addFileToIngestJob(long ingestJobId, AbstractFile file) { // RJCTODO: Move back to IngestManager
IngestJob job = ingestJobs.get(ingestJobId);
if (job != null) {
FileIngestTaskScheduler.getInstance().addTask(job, file);
}
}
static void cancelAllIngestJobs() {
for (IngestJob job : ingestJobs.values()) {
job.cancel();
}
}
private IngestJob(long id, Content dataSource, List<IngestModuleTemplate> ingestModuleTemplates, boolean processUnallocatedSpace) {
this.jobId = id;
this.dataSource = dataSource;
this.ingestModuleTemplates = ingestModuleTemplates;
this.processUnallocatedSpace = processUnallocatedSpace;
this.cancelled = false;
}
long getId() {
return id;
long getJobId() {
return jobId;
}
Content getDataSource() {
@ -68,34 +104,78 @@ final class IngestJob {
return processUnallocatedSpace;
}
synchronized List<IngestModuleError> startUpIngestPipelines() {
startDataSourceIngestProgressBar();
startFileIngestProgressBar();
return startUpInitialIngestPipelines();
List<IngestModuleError> start() {
List<IngestModuleError> errors = startUpIngestPipelines();
if (errors.isEmpty()) {
DataSourceIngestTaskScheduler.getInstance().addTask(new DataSourceIngestTask(this, dataSource));
FileIngestTaskScheduler.getInstance().addTasks(this, dataSource);
startDataSourceIngestProgressBar();
startFileIngestProgressBar();
}
return errors;
}
private List<IngestModuleError> startUpIngestPipelines() {
List<IngestModuleError> errors = new ArrayList<>();
int maxNumberOfPipelines = IngestManager.getMaxNumberOfDataSourceIngestThreads();
for (int i = 0; i < maxNumberOfPipelines; ++i) {
DataSourceIngestPipeline pipeline = new DataSourceIngestPipeline(this, ingestModuleTemplates);
errors.addAll(pipeline.startUp());
try {
dataSourceIngestPipelines.put(pipeline);
} catch (InterruptedException ex) {
// RJCTODO: log unexpected block and interrupt, or throw
}
if (errors.isEmpty()) {
// No need to accumulate presumably redundant erros.
break;
}
}
maxNumberOfPipelines = IngestManager.getMaxNumberOfFileIngestThreads();
for (int i = 0; i < maxNumberOfPipelines; ++i) {
FileIngestPipeline pipeline = new FileIngestPipeline(this, ingestModuleTemplates);
errors.addAll(pipeline.startUp());
try {
fileIngestPipelines.put(pipeline);
} catch (InterruptedException ex) {
// RJCTODO: log unexpected block and interrupt, or throw
}
if (errors.isEmpty()) {
// No need to accumulate presumably redundant erros.
break;
}
}
return errors;
}
private void startDataSourceIngestProgressBar() {
final String displayName = NbBundle
.getMessage(this.getClass(), "IngestJob.progress.dataSourceIngest.displayName", this.dataSource.getName());
dataSourceTaskProgress = ProgressHandleFactory.createHandle(displayName, new Cancellable() {
final String displayName = NbBundle.getMessage(this.getClass(),
"IngestJob.progress.dataSourceIngest.displayName",
dataSource.getName());
dataSourceTasksProgress = ProgressHandleFactory.createHandle(displayName, new Cancellable() {
@Override
public boolean cancel() {
if (dataSourceTaskProgress != null) {
dataSourceTaskProgress.setDisplayName(NbBundle.getMessage(this.getClass(),
if (dataSourceTasksProgress != null) {
dataSourceTasksProgress.setDisplayName(
NbBundle.getMessage(this.getClass(),
"IngestJob.progress.cancelling",
displayName));
}
IngestManager.getInstance().cancelIngestJobs();
IngestJob.this.cancel();
return true;
}
});
dataSourceTaskProgress.start();
dataSourceTaskProgress.switchToIndeterminate();
dataSourceTasksProgress.start();
dataSourceTasksProgress.switchToIndeterminate(); // RJCTODO: check out the logic in the pipleine class
}
private void startFileIngestProgressBar() {
final String displayName = NbBundle
.getMessage(this.getClass(), "IngestJob.progress.fileIngest.displayName", this.dataSource.getName());
final String displayName = NbBundle.getMessage(this.getClass(),
"IngestJob.progress.fileIngest.displayName",
dataSource.getName());
fileTasksProgress = ProgressHandleFactory.createHandle(displayName, new Cancellable() {
@Override
public boolean cancel() {
@ -104,124 +184,90 @@ final class IngestJob {
NbBundle.getMessage(this.getClass(), "IngestJob.progress.cancelling",
displayName));
}
IngestManager.getInstance().cancelIngestJobs();
IngestJob.this.cancel();
return true;
}
});
filesToIngestEstimate = dataSource.accept(new GetFilesCountVisitor());
fileTasksProgress.start();
fileTasksProgress.switchToIndeterminate();
totalEnqueuedFiles = fileScheduler.getFilesEnqueuedEst();
fileTasksProgress.switchToDeterminate(totalEnqueuedFiles);
fileTasksProgress.switchToDeterminate((int) filesToIngestEstimate);
}
private List<IngestModuleError> startUpInitialIngestPipelines() {
// Create a per thread instance of each pipeline type right now to make
// (reasonably) sure that the ingest modules can be started.
initialDataSourceIngestPipeline = new DataSourceIngestPipeline(this, ingestModuleTemplates);
initialFileIngestPipeline = new FileIngestPipeline(this, ingestModuleTemplates);
List<IngestModuleError> errors = new ArrayList<>();
errors.addAll(initialDataSourceIngestPipeline.startUp());
errors.addAll(initialFileIngestPipeline.startUp());
return errors;
/**
* Called by the ingest task schedulers when an ingest task for this ingest
* job is added to the scheduler's task queue.
*/
void notifyTaskScheduled() {
// Increment the task counter when a task is scheduled so that there is
// a persistent record of the task's existence even after it is removed
// from the scheduler by an ingest thread. The task counter is used by
// the job to determine when it is done.
tasksInProgress.incrementAndGet();
}
synchronized DataSourceIngestPipeline getDataSourceIngestPipelineForThread(long threadId) {
DataSourceIngestPipeline pipeline;
if (initialDataSourceIngestPipeline != null) {
pipeline = initialDataSourceIngestPipeline;
initialDataSourceIngestPipeline = null;
dataSourceIngestPipelines.put(threadId, pipeline);
} else if (!dataSourceIngestPipelines.containsKey(threadId)) {
pipeline = new DataSourceIngestPipeline(this, ingestModuleTemplates);
pipeline.startUp();
dataSourceIngestPipelines.put(threadId, pipeline);
} else {
pipeline = dataSourceIngestPipelines.get(threadId);
}
return pipeline;
/**
* Called by the ingest schedulers as an "undo" operation for
* notifyTaskScheduled().
*/
void notifyTaskCompleted() {
// Decrement the task counter when a task is discarded by a scheduler.
// The task counter is used by the job to determine when it is done.
tasksInProgress.decrementAndGet();
}
synchronized FileIngestPipeline getFileIngestPipelineForThread(long threadId) {
FileIngestPipeline pipeline;
if (initialFileIngestPipeline != null) {
pipeline = initialFileIngestPipeline;
initialFileIngestPipeline = null;
fileIngestPipelines.put(threadId, pipeline);
} else if (!fileIngestPipelines.containsKey(threadId)) {
pipeline = new FileIngestPipeline(this, ingestModuleTemplates);
pipeline.startUp();
fileIngestPipelines.put(threadId, pipeline);
} else {
pipeline = fileIngestPipelines.get(threadId);
void process() throws InterruptedException {
if (!isCancelled()) {
try {
DataSourceIngestPipeline pipeline = dataSourceIngestPipelines.take();
pipeline.process(); // RJCTODO: Pass data source through?
dataSourceIngestPipelines.put(pipeline);
} catch (InterruptedException ex) {
// RJCTODO:
}
}
return pipeline;
ifCompletedShutDown();
}
synchronized List<IngestModuleError> releaseIngestPipelinesForThread(long threadId) {
List<IngestModuleError> errors = new ArrayList<>();
DataSourceIngestPipeline dataSourceIngestPipeline = dataSourceIngestPipelines.get(threadId);
if (dataSourceIngestPipeline != null) {
errors.addAll(dataSourceIngestPipeline.shutDown(cancelled));
dataSourceIngestPipelines.remove(threadId);
void process(AbstractFile file) {
if (!isCancelled()) {
try {
FileIngestPipeline pipeline = fileIngestPipelines.take();
fileTasksProgress.progress(file.getName(), (int) processedFiles.incrementAndGet());
pipeline.process(file);
fileIngestPipelines.put(pipeline);
} catch (InterruptedException ex) {
// RJCTODO: Log block and interrupt
}
}
if (initialDataSourceIngestPipeline == null && dataSourceIngestPipelines.isEmpty() && dataSourceTaskProgress != null) {
dataSourceTaskProgress.finish();
dataSourceTaskProgress = null;
}
FileIngestPipeline fileIngestPipeline = fileIngestPipelines.get(threadId);
if (fileIngestPipeline != null) {
errors.addAll(fileIngestPipeline.shutDown(cancelled));
fileIngestPipelines.remove(threadId);
}
if (initialFileIngestPipeline == null && fileIngestPipelines.isEmpty() && fileTasksProgress != null) {
fileTasksProgress.finish();
fileTasksProgress = null;
}
return errors;
ifCompletedShutDown();
}
synchronized boolean areIngestPipelinesShutDown() {
return (initialDataSourceIngestPipeline == null
&& dataSourceIngestPipelines.isEmpty()
&& initialFileIngestPipeline == null
&& fileIngestPipelines.isEmpty());
void ifCompletedShutDown() {
if (tasksInProgress.decrementAndGet() == 0) {
while (!dataSourceIngestPipelines.isEmpty()) {
DataSourceIngestPipeline pipeline = dataSourceIngestPipelines.poll();
pipeline.shutDown(cancelled);
}
while (!fileIngestPipelines.isEmpty()) {
FileIngestPipeline pipeline = fileIngestPipelines.poll();
pipeline.shutDown(cancelled);
}
ingestJobs.remove(jobId);
IngestManager.getInstance().fireIngestJobCompleted(jobId);
}
}
synchronized ProgressHandle getDataSourceTaskProgressBar() {
return this.dataSourceTaskProgress;
}
synchronized void updateFileTasksProgressBar(String currentFileName) {
int newTotalEnqueuedFiles = fileScheduler.getFilesEnqueuedEst();
if (newTotalEnqueuedFiles > totalEnqueuedFiles) {
totalEnqueuedFiles = newTotalEnqueuedFiles + 1;
fileTasksProgress.switchToIndeterminate();
fileTasksProgress.switchToDeterminate(totalEnqueuedFiles);
}
if (processedFiles < totalEnqueuedFiles) {
++processedFiles;
}
fileTasksProgress.progress(currentFileName, processedFiles);
}
synchronized void cancel() {
if (initialDataSourceIngestPipeline != null) {
initialDataSourceIngestPipeline.shutDown(true);
initialDataSourceIngestPipeline = null;
}
if (initialFileIngestPipeline != null) {
initialFileIngestPipeline.shutDown(true);
initialFileIngestPipeline = null;
}
cancelled = true;
ProgressHandle getDataSourceTaskProgressBar() {
return dataSourceTasksProgress; // RJCTODO: Should just pass the progress handle or the object to the pipeline
}
boolean isCancelled() {
return cancelled;
}
void cancel() {
cancelled = true;
fileTasksProgress.finish();
IngestManager.getInstance().fireIngestJobCancelled(jobId);
}
}

View File

@ -39,7 +39,7 @@ public final class IngestJobContext {
* @return The ingest job identifier.
*/
public long getJobId() {
return this.ingestJob.getId();
return this.ingestJob.getJobId();
}
/**
@ -60,7 +60,7 @@ public final class IngestJobContext {
*/
public void addFiles(List<AbstractFile> files) {
for (AbstractFile file : files) {
IngestManager.getInstance().addFileToIngestJob(ingestJob.getId(), file);
IngestJob.addFileToIngestJob(ingestJob.getJobId(), file);
}
}
}

View File

@ -20,9 +20,7 @@ package org.sleuthkit.autopsy.ingest;
import java.beans.PropertyChangeListener;
import java.beans.PropertyChangeSupport;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
@ -36,72 +34,146 @@ import org.netbeans.api.progress.ProgressHandleFactory;
import org.openide.util.Cancellable;
import org.openide.util.NbPreferences;
import org.sleuthkit.autopsy.coreutils.MessageNotifyUtil;
import org.sleuthkit.datamodel.AbstractFile;
import org.sleuthkit.datamodel.Content;
import java.util.prefs.Preferences;
import javax.swing.JOptionPane;
import javax.swing.SwingWorker;
import org.sleuthkit.autopsy.ingest.IngestScheduler.FileIngestScheduler.FileIngestTask;
/**
* Manages the execution of ingest jobs.
*/
public class IngestManager {
private static final int MAX_NUMBER_OF_DATA_SOURCE_INGEST_THREADS = 1;
private static final String NUMBER_OF_FILE_INGEST_THREADS_KEY = "NumberOfFileingestThreads"; //NON-NLS
private static final int MIN_NUMBER_OF_FILE_INGEST_THREADS = 1;
private static final int MAX_NUMBER_OF_FILE_INGEST_THREADS = 4;
private static final int DEFAULT_NUMBER_OF_FILE_INGEST_THREADS = 2;
private static final Logger logger = Logger.getLogger(IngestManager.class.getName());
private static final PropertyChangeSupport pcs = new PropertyChangeSupport(IngestManager.class);
private static final Preferences userPreferences = NbPreferences.forModule(IngestManager.class);
private static final IngestManager instance = new IngestManager();
private final IngestScheduler scheduler = IngestScheduler.getInstance();
private final PropertyChangeSupport pcs = new PropertyChangeSupport(IngestManager.class);
private final IngestMonitor ingestMonitor = new IngestMonitor();
private final ExecutorService startIngestJobsExecutor = Executors.newSingleThreadExecutor();
private final ExecutorService dataSourceIngestTasksExecutor = Executors.newSingleThreadExecutor();
private final ExecutorService fileIngestTasksExecutor = Executors.newFixedThreadPool(MAX_NUMBER_OF_FILE_INGEST_THREADS);
private final ExecutorService fireEventTasksExecutor = Executors.newSingleThreadExecutor();
private final ConcurrentHashMap<Long, IngestJob> ingestJobs = new ConcurrentHashMap<>(1, 0.9f, 4); // Maps job ids to jobs.
private final ConcurrentHashMap<Long, Future<?>> ingestTasks = new ConcurrentHashMap<>(); // Maps task ids to task cancellation handles. Guarded by this.
private final AtomicLong ingestJobId = new AtomicLong(0L);
private final AtomicLong ingestTaskId = new AtomicLong(0L);
private final ExecutorService startIngestJobsThreadPool = Executors.newSingleThreadExecutor();
private final ConcurrentHashMap<Long, Future<?>> startIngestJobThreads = new ConcurrentHashMap<>(); // Maps thread ids to cancellation handles.
private final ExecutorService dataSourceIngestThreadPool = Executors.newSingleThreadExecutor();
private final ConcurrentHashMap<Long, Future<?>> dataSourceIngestThreads = new ConcurrentHashMap<>(); // Maps thread ids to cancellation handles.
private final ExecutorService fileIngestThreadPool = Executors.newFixedThreadPool(MAX_NUMBER_OF_FILE_INGEST_THREADS);
private final ExecutorService fireIngestJobEventsThreadPool = Executors.newSingleThreadExecutor();
private final ConcurrentHashMap<Long, Future<?>> fileIngestThreads = new ConcurrentHashMap<>(); // Maps thread ids to cancellation handles.
private final AtomicLong nextThreadId = new AtomicLong(0L);
private volatile IngestMessageTopComponent ingestMessageBox;
/**
* Gets the IngestManager singleton, creating it if necessary.
* Gets the ingest manager.
*
* @returns The IngestManager singleton.
* @returns A singleton IngestManager object.
*/
public static IngestManager getInstance() {
return instance;
}
private IngestManager() {
}
/**
* Signals to the ingest manager that it can go find the top component for
* the ingest messages in box. Called by the custom installer for this
* package once the window system is initialized.
* Starts the ingest monitor and the data source ingest and file ingest
* threads.
*/
void initIngestMessageInbox() {
if (this.ingestMessageBox == null) {
this.ingestMessageBox = IngestMessageTopComponent.findInstance();
private IngestManager() {
startDataSourceIngestThread();
int numberOfFileIngestThreads = getNumberOfFileIngestThreads();
for (int i = 0; i < numberOfFileIngestThreads; ++i) {
startFileIngestThread();
}
}
/**
* Signals to the ingest manager that it can go about finding the top
* component for the ingest messages in box. Called by the custom installer
* for this package once the window system is initialized.
*/
void initIngestMessageInbox() {
if (ingestMessageBox == null) {
ingestMessageBox = IngestMessageTopComponent.findInstance();
}
}
/**
* Gets the maximum number of data source ingest threads the ingest manager
* will use.
*/
public static int getMaxNumberOfDataSourceIngestThreads() {
return MAX_NUMBER_OF_DATA_SOURCE_INGEST_THREADS;
}
/**
* Gets the maximum number of file ingest threads the ingest manager will
* use.
*/
public static int getMaxNumberOfFileIngestThreads() {
return MAX_NUMBER_OF_FILE_INGEST_THREADS;
}
/**
* Gets the number of file ingest threads the ingest manager will use.
*/
public synchronized static int getNumberOfFileIngestThreads() {
return userPreferences.getInt(NUMBER_OF_FILE_INGEST_THREADS_KEY, DEFAULT_NUMBER_OF_FILE_INGEST_THREADS);
}
/**
* Changes the number of file ingest threads the ingest manager will use to
* no more than MAX_NUMBER_OF_FILE_INGEST_THREADS and no less than
* MIN_NUMBER_OF_FILE_INGEST_THREADS. Out of range requests are converted to
* requests for DEFAULT_NUMBER_OF_FILE_INGEST_THREADS.
*
* @param numberOfThreads The desired number of file ingest threads.
*/
public synchronized static void setNumberOfFileIngestThreads(int numberOfThreads) {
if (numberOfThreads < MIN_NUMBER_OF_FILE_INGEST_THREADS
|| numberOfThreads > MAX_NUMBER_OF_FILE_INGEST_THREADS) {
if ((numberOfThreads < MIN_NUMBER_OF_FILE_INGEST_THREADS) || (numberOfThreads > MAX_NUMBER_OF_FILE_INGEST_THREADS)) {
numberOfThreads = DEFAULT_NUMBER_OF_FILE_INGEST_THREADS;
}
userPreferences.putInt(NUMBER_OF_FILE_INGEST_THREADS_KEY, numberOfThreads);
if (instance.fileIngestThreads.size() != numberOfThreads) {
if (instance.fileIngestThreads.size() > numberOfThreads) {
Long[] threadIds = instance.fileIngestThreads.keySet().toArray(new Long[instance.fileIngestThreads.size()]);
int numberOfThreadsToCancel = instance.fileIngestThreads.size() - numberOfThreads;
for (int i = 0; i < numberOfThreadsToCancel; ++i) {
instance.cancelFileIngestThread(threadIds[i]);
}
} else if (instance.fileIngestThreads.size() < numberOfThreads) {
int numberOfThreadsToAdd = numberOfThreads - instance.fileIngestThreads.size();
for (int i = 0; i < numberOfThreadsToAdd; ++i) {
instance.startFileIngestThread();
}
}
}
}
/**
* Submits a DataSourceIngestThread Runnable to the data source ingest
* thread pool.
*/
private void startDataSourceIngestThread() {
long threadId = nextThreadId.incrementAndGet();
Future<?> handle = dataSourceIngestThreadPool.submit(new DataSourceIngestThread(threadId));
dataSourceIngestThreads.put(threadId, handle);
}
/**
* Submits a DataSourceIngestThread Runnable to the data source ingest
* thread pool.
*/
private void startFileIngestThread() {
long threadId = nextThreadId.incrementAndGet();
Future<?> handle = fileIngestThreadPool.submit(new FileIngestThread(threadId));
fileIngestThreads.put(threadId, handle);
}
/**
* Cancels a DataSourceIngestThread Runnable in the file ingest thread pool.
*/
private void cancelFileIngestThread(long threadId) {
Future<?> handle = fileIngestThreads.remove(threadId);
handle.cancel(true);
}
synchronized void startIngestJobs(final List<Content> dataSources, final List<IngestModuleTemplate> moduleTemplates, boolean processUnallocatedSpace) {
@ -109,9 +181,9 @@ public class IngestManager {
ingestMessageBox.clearMessages();
}
long taskId = ingestTaskId.incrementAndGet();
Future<?> task = startIngestJobsExecutor.submit(new StartIngestJobsTask(taskId, dataSources, moduleTemplates, processUnallocatedSpace));
ingestTasks.put(taskId, task);
long taskId = nextThreadId.incrementAndGet();
Future<?> task = startIngestJobsThreadPool.submit(new StartIngestJobsThread(taskId, dataSources, moduleTemplates, processUnallocatedSpace));
fileIngestThreads.put(taskId, task);
if (ingestMessageBox != null) {
ingestMessageBox.restoreMessages();
@ -121,43 +193,42 @@ public class IngestManager {
/**
* Test if any ingest jobs are in progress.
*
* @return True if any ingest jobs are in progress, false otherwise
* @return True if any ingest jobs are in progress, false otherwise.
*/
public boolean isIngestRunning() {
return (ingestJobs.isEmpty() == false);
return IngestJob.jobsAreRunning();
}
void addFileToIngestJob(long ingestJobId, AbstractFile file) {
IngestJob job = ingestJobs.get(ingestJobId);
if (job != null) {
scheduler.getFileIngestScheduler().queueFile(job, file);
public void cancelAllIngestJobs() {
cancelStartIngestJobsTasks();
IngestJob.cancelAllIngestJobs();
}
private void cancelStartIngestJobsTasks() {
for (Future<?> future : startIngestJobThreads.values()) {
future.cancel(true);
}
}
void cancelIngestJobs() {
new IngestCancellationWorker().execute();
startIngestJobThreads.clear();
}
/**
* Ingest events.
*/
public enum IngestEvent {
public enum IngestEvent { // RJCTODO: Update comments if time permits
/**
* Property change event fired when an ingest job is started. The ingest
* job id is in old value field of the PropertyChangeEvent object.
* Property change event fired when an ingest job is started. The old
* and new values of the PropertyChangeEvent object are set to null.
*/
INGEST_JOB_STARTED,
/**
* Property change event fired when an ingest job is completed. The
* ingest job id is in old value field of the PropertyChangeEvent
* object.
* Property change event fired when an ingest job is completed. The old
* and new values of the PropertyChangeEvent object are set to null.
*/
INGEST_JOB_COMPLETED,
/**
* Property change event fired when an ingest job is canceled. The
* ingest job id is in old value field of the PropertyChangeEvent
* object.
* Property change event fired when an ingest job is canceled. The old
* and new values of the PropertyChangeEvent object are set to null.
*/
INGEST_JOB_CANCELLED,
/**
@ -182,79 +253,84 @@ public class IngestManager {
};
/**
* Add property change listener to listen to ingest events.
* Add an ingest event property change listener.
*
* @param listener PropertyChangeListener to register
* @param listener The PropertyChangeListener to register.
*/
public static void addPropertyChangeListener(final PropertyChangeListener listener) {
public void addPropertyChangeListener(final PropertyChangeListener listener) {
pcs.addPropertyChangeListener(listener);
}
public static void removePropertyChangeListener(final PropertyChangeListener listener) {
/**
* Remove an ingest event property change listener.
*
* @param listener The PropertyChangeListener to unregister.
*/
public void removePropertyChangeListener(final PropertyChangeListener listener) {
pcs.removePropertyChangeListener(listener);
}
static void fireIngestJobEvent(String eventType, long jobId) {
try {
pcs.firePropertyChange(eventType, jobId, null);
} catch (Exception e) {
logger.log(Level.SEVERE, "Ingest manager listener threw exception", e); //NON-NLS
MessageNotifyUtil.Notify.show(NbBundle.getMessage(IngestManager.class, "IngestManager.moduleErr"),
NbBundle.getMessage(IngestManager.class, "IngestManager.moduleErr.errListenToUpdates.msg"),
MessageNotifyUtil.MessageType.ERROR);
}
/**
* Fire an ingest event signifying an ingest job started.
*
* @param ingestJobId The ingest job id.
*/
void fireIngestJobStarted(long ingestJobId) {
fireIngestJobEventsThreadPool.submit(new FireIngestEventThread(IngestEvent.INGEST_JOB_STARTED, ingestJobId, null));
}
/**
* Fire event when file is done with a pipeline run
* Fire an ingest event signifying an ingest job finished.
*
* @param fileId ID of file that is done
* @param ingestJobId The ingest job id.
*/
static void fireFileIngestDone(long fileId) {
try {
pcs.firePropertyChange(IngestEvent.FILE_DONE.toString(), fileId, null);
} catch (Exception e) {
logger.log(Level.SEVERE, "Ingest manager listener threw exception", e); //NON-NLS
MessageNotifyUtil.Notify.show(NbBundle.getMessage(IngestManager.class, "IngestManager.moduleErr"),
NbBundle.getMessage(IngestManager.class, "IngestManager.moduleErr.errListenToUpdates.msg"),
MessageNotifyUtil.MessageType.ERROR);
}
void fireIngestJobCompleted(long ingestJobId) {
fireIngestJobEventsThreadPool.submit(new FireIngestEventThread(IngestEvent.INGEST_JOB_COMPLETED, ingestJobId, null));
}
/**
* Fire event for ModuleDataEvent (when modules post data to blackboard,
* etc.)
* Fire an ingest event signifying an ingest job was canceled.
*
* @param moduleDataEvent
* @param ingestJobId The ingest job id.
*/
static void fireModuleDataEvent(ModuleDataEvent moduleDataEvent) {
try {
pcs.firePropertyChange(IngestEvent.DATA.toString(), moduleDataEvent, null);
} catch (Exception e) {
logger.log(Level.SEVERE, "Ingest manager listener threw exception", e); //NON-NLS
MessageNotifyUtil.Notify.show(NbBundle.getMessage(IngestManager.class, "IngestManager.moduleErr"),
NbBundle.getMessage(IngestManager.class, "IngestManager.moduleErr.errListenToUpdates.msg"),
MessageNotifyUtil.MessageType.ERROR);
}
void fireIngestJobCancelled(long ingestJobId) {
fireIngestJobEventsThreadPool.submit(new FireIngestEventThread(IngestEvent.INGEST_JOB_CANCELLED, ingestJobId, null));
}
/**
* Fire event for ModuleContentChanged (when modules create new content that
* needs to be analyzed)
* Fire an ingest event signifying the ingest of a file is completed.
*
* @param moduleContentEvent
* @param fileId The object id of file.
*/
static void fireModuleContentEvent(ModuleContentEvent moduleContentEvent) {
try {
pcs.firePropertyChange(IngestEvent.CONTENT_CHANGED.toString(), moduleContentEvent, null);
} catch (Exception e) {
logger.log(Level.SEVERE, "Ingest manager listener threw exception", e); //NON-NLS
MessageNotifyUtil.Notify.show(NbBundle.getMessage(IngestManager.class, "IngestManager.moduleErr"),
NbBundle.getMessage(IngestManager.class, "IngestManager.moduleErr.errListenToUpdates.msg"),
MessageNotifyUtil.MessageType.ERROR);
}
void fireFileIngestDone(long fileId) {
fireIngestJobEventsThreadPool.submit(new FireIngestEventThread(IngestEvent.FILE_DONE, fileId, null));
}
/**
* Fire an event signifying a blackboard post by an ingest module.
*
* @param moduleDataEvent A ModuleDataEvent with the details of the posting.
*/
void fireIngestModuleDataEvent(ModuleDataEvent moduleDataEvent) {
fireIngestJobEventsThreadPool.submit(new FireIngestEventThread(IngestEvent.DATA, moduleDataEvent, null));
}
/**
* Fire an event signifying discovery of additional content by an ingest
* module.
*
* @param moduleDataEvent A ModuleContentEvent with the details of the new
* content.
*/
void fireIngestModuleContentEvent(ModuleContentEvent moduleContentEvent) {
fireIngestJobEventsThreadPool.submit(new FireIngestEventThread(IngestEvent.CONTENT_CHANGED, moduleContentEvent, null));
}
/**
* Post a message to the ingest messages in box.
*
* @param message The message to be posted.
*/
void postIngestMessage(IngestMessage message) {
if (ingestMessageBox != null) {
ingestMessageBox.displayMessage(message);
@ -262,11 +338,10 @@ public class IngestManager {
}
/**
* Get free disk space of a drive where ingest data are written to That
* drive is being monitored by IngestMonitor thread when ingest is running.
* Use this method to get amount of free disk space anytime.
* Get the free disk space of the drive where to which ingest data is being
* written, as reported by the ingest monitor.
*
* @return amount of disk space, -1 if unknown
* @return Free disk space, -1 if unknown // RJCTODO: What units?
*/
long getFreeDiskSpace() {
if (ingestMonitor != null) {
@ -276,33 +351,20 @@ public class IngestManager {
}
}
private void reportRunIngestModulesTaskDone(long taskId) {
ingestTasks.remove(taskId);
/**
* A Runnable that creates ingest jobs and submits the initial data source
* and file ingest tasks to the task schedulers.
*/
private class StartIngestJobsThread implements Runnable {
List<Long> completedJobs = new ArrayList<>();
for (IngestJob job : ingestJobs.values()) {
job.releaseIngestPipelinesForThread(taskId);
if (job.areIngestPipelinesShutDown() == true) {
completedJobs.add(job.getId());
}
}
for (Long jobId : completedJobs) {
IngestJob job = ingestJobs.remove(jobId);
fireEventTasksExecutor.submit(new FireIngestJobEventTask(jobId, job.isCancelled() ? IngestEvent.INGEST_JOB_CANCELLED : IngestEvent.INGEST_JOB_COMPLETED));
}
}
private class StartIngestJobsTask implements Runnable {
private final long id;
private final long threadId;
private final List<Content> dataSources;
private final List<IngestModuleTemplate> moduleTemplates;
private final boolean processUnallocatedSpace;
private ProgressHandle progress;
StartIngestJobsTask(long taskId, List<Content> dataSources, List<IngestModuleTemplate> moduleTemplates, boolean processUnallocatedSpace) {
this.id = taskId;
StartIngestJobsThread(long threadId, List<Content> dataSources, List<IngestModuleTemplate> moduleTemplates, boolean processUnallocatedSpace) {
this.threadId = threadId;
this.dataSources = dataSources;
this.moduleTemplates = moduleTemplates;
this.processUnallocatedSpace = processUnallocatedSpace;
@ -321,12 +383,16 @@ public class IngestManager {
"IngestManager.StartIngestJobsTask.run.cancelling",
displayName));
}
IngestManager.getInstance().cancelIngestJobs();
cancelFileIngestThread(threadId);
return true;
}
});
progress.start(dataSources.size() * 2);
if (!ingestMonitor.isRunning()) {
ingestMonitor.start();
}
progress.start(2 * dataSources.size());
int workUnitsCompleted = 0;
for (Content dataSource : dataSources) {
if (Thread.currentThread().isInterrupted()) {
@ -334,14 +400,7 @@ public class IngestManager {
}
// Create an ingest job.
IngestJob ingestJob = new IngestJob(IngestManager.this.ingestJobId.incrementAndGet(), dataSource, moduleTemplates, processUnallocatedSpace);
ingestJobs.put(ingestJob.getId(), ingestJob);
// Start at least one instance of each kind of ingest
// pipeline for this ingest job. This allows for an early out
// if the full ingest module lineup specified by the user
// cannot be started up.
List<IngestModuleError> errors = ingestJob.startUpIngestPipelines();
List<IngestModuleError> errors = IngestJob.startIngestJob(dataSource, moduleTemplates, processUnallocatedSpace);
if (!errors.isEmpty()) {
// Report the error to the user.
StringBuilder moduleStartUpErrors = new StringBuilder();
@ -367,169 +426,116 @@ public class IngestManager {
JOptionPane.showMessageDialog(null, notifyMessage.toString(),
NbBundle.getMessage(this.getClass(),
"IngestManager.StartIngestJobsTask.run.startupErr.dlgTitle"), JOptionPane.ERROR_MESSAGE);
// Jettison the ingest job and move on to the next one.
ingestJob.cancel();
ingestJobs.remove(ingestJob.getId());
break;
}
// Queue the data source ingest tasks for the ingest job.
fireIngestJobEventsThreadPool.submit(new FireIngestEventThread(IngestEvent.INGEST_JOB_STARTED));
// Queue a data source ingest task for the ingest job.
final String inputName = dataSource.getName();
progress.progress(
NbBundle.getMessage(this.getClass(), "IngestManager.StartIngestJobsTask.run.progress.msg1",
NbBundle.getMessage(this.getClass(), "IngestManager.StartIngestJobsThread.run.progress.msg1",
inputName), workUnitsCompleted);
scheduler.getDataSourceIngestScheduler().queueForIngest(ingestJob);
DataSourceIngestTaskScheduler.getInstance().addTask(new DataSourceIngestTask(ingestJob, ingestJob.getDataSource()));
progress.progress(
NbBundle.getMessage(this.getClass(), "IngestManager.StartIngestJobsTask.run.progress.msg2",
NbBundle.getMessage(this.getClass(), "IngestManager.StartIngestJobsThread.run.progress.msg2",
inputName), ++workUnitsCompleted);
// Queue the file ingest tasks for the ingest job.
progress.progress(
NbBundle.getMessage(this.getClass(), "IngestManager.StartIngestJobsTask.run.progress.msg3",
NbBundle.getMessage(this.getClass(), "IngestManager.StartIngestJobsThread.run.progress.msg3",
inputName), workUnitsCompleted);
scheduler.getFileIngestScheduler().queueForIngest(ingestJob);
FileIngestTaskScheduler.getInstance().addTasks(ingestJob, ingestJob.getDataSource());
progress.progress(
NbBundle.getMessage(this.getClass(), "IngestManager.StartIngestJobsTask.run.progress.msg4",
NbBundle.getMessage(this.getClass(), "IngestManager.StartIngestJobsThread.run.progress.msg4",
inputName), ++workUnitsCompleted);
if (!Thread.currentThread().isInterrupted()) {
if (!ingestMonitor.isRunning()) {
ingestMonitor.start();
}
long taskId = ingestTaskId.incrementAndGet();
Future<?> task = dataSourceIngestTasksExecutor.submit(new RunDataSourceIngestModulesTask(taskId));
ingestTasks.put(taskId, task);
int numberOfFileTasksRequested = getNumberOfFileIngestThreads();
for (int i = 0; i < numberOfFileTasksRequested; ++i) {
taskId = ingestTaskId.incrementAndGet();
task = fileIngestTasksExecutor.submit(new RunFileSourceIngestModulesTask(taskId));
ingestTasks.put(taskId, task);
}
fireEventTasksExecutor.submit(new FireIngestJobEventTask(ingestJob.getId(), IngestEvent.INGEST_JOB_STARTED));
break;
}
}
} catch (Exception ex) {
String message = String.format("StartIngestJobsTask (id=%d) caught exception", id); //NON-NLS
String message = String.format("StartIngestJobsTask (id=%d) caught exception", threadId); //NON-NLS
logger.log(Level.SEVERE, message, ex);
MessageNotifyUtil.Message.error(
NbBundle.getMessage(this.getClass(), "IngestManager.StartIngestJobsTask.run.catchException.msg"));
} finally {
progress.finish();
ingestTasks.remove(id);
startIngestJobThreads.remove(threadId);
}
}
}
private class RunDataSourceIngestModulesTask implements Runnable {
private final long id;
RunDataSourceIngestModulesTask(long taskId) {
id = taskId;
}
/**
* A Runnable that acts as a consumer for the data ingest task scheduler's
* task queue.
*/
private class DataSourceIngestThread implements Runnable {
@Override
public void run() {
try {
IngestScheduler.DataSourceIngestScheduler scheduler = IngestScheduler.getInstance().getDataSourceIngestScheduler();
IngestJob job = scheduler.getNextTask();
while (job != null) {
if (Thread.currentThread().isInterrupted()) {
break;
}
job.getDataSourceIngestPipelineForThread(id).process();
job = scheduler.getNextTask();
DataSourceIngestTaskScheduler scheduler = DataSourceIngestTaskScheduler.getInstance();
while (true) {
try {
DataSourceIngestTask task = scheduler.getNextTask(); // Blocks.
task.execute();
} catch (InterruptedException ex) {
break;
}
if (Thread.currentThread().isInterrupted()) {
break;
}
} catch (Exception ex) {
String message = String.format("RunDataSourceIngestModulesTask (id=%d) caught exception", id); //NON-NLS
logger.log(Level.SEVERE, message, ex);
} finally {
reportRunIngestModulesTaskDone(id);
}
}
}
private class RunFileSourceIngestModulesTask implements Runnable {
private final long id;
RunFileSourceIngestModulesTask(long taskId) {
id = taskId;
}
/**
* A Runnable that acts as a consumer for the file task scheduler's task
* queue.
*/
private static class FileIngestThread implements Runnable {
@Override
public void run() {
try {
IngestScheduler.FileIngestScheduler fileScheduler = IngestScheduler.getInstance().getFileIngestScheduler();
FileIngestTask task = fileScheduler.getNextTask();
while (task != null) {
if (Thread.currentThread().isInterrupted()) {
break;
}
IngestJob job = task.getJob();
job.updateFileTasksProgressBar(task.getFile().getName());
job.getFileIngestPipelineForThread(id).process(task.getFile());
task = fileScheduler.getNextTask();
FileIngestTaskScheduler scheduler = FileIngestTaskScheduler.getInstance();
while (true) {
try {
FileIngestTask task = scheduler.getNextTask(); // Blocks.
task.execute();
} catch (InterruptedException ex) {
break;
}
if (Thread.currentThread().isInterrupted()) {
break;
}
} catch (Exception ex) {
String message = String.format("RunFileSourceIngestModulesTask (id=%d) caught exception", id); //NON-NLS
logger.log(Level.SEVERE, message, ex);
} finally {
reportRunIngestModulesTaskDone(id);
}
}
}
private class FireIngestJobEventTask implements Runnable {
/**
* A Runnable that fire ingest events to ingest manager property change
* listeners.
*/
private class FireIngestEventThread implements Runnable {
private final long ingestJobId;
private final IngestEvent event;
private final Object oldValue;
private final Object newValue;
FireIngestJobEventTask(long ingestJobId, IngestEvent event) {
this.ingestJobId = ingestJobId;
FireIngestEventThread(IngestEvent event, Object oldValue, Object newValue) {
this.event = event;
this.oldValue = oldValue;
this.newValue = newValue;
}
@Override
public void run() {
fireIngestJobEvent(event.toString(), ingestJobId);
}
}
private class IngestCancellationWorker extends SwingWorker<Void, Void> {
@Override
protected Void doInBackground() throws Exception {
// First mark all of the ingest jobs as cancelled. This way the
// ingest modules will know they are being shut down due to
// cancellation when the cancelled run ingest module tasks release
// their pipelines.
for (IngestJob job : ingestJobs.values()) {
job.cancel();
}
for (Future<?> task : ingestTasks.values()) {
task.cancel(true);
}
// Jettision the remaining data source and file ingest tasks.
scheduler.getFileIngestScheduler().emptyQueues();
scheduler.getDataSourceIngestScheduler().emptyQueues();
return null;
}
@Override
protected void done() {
try {
super.get();
} catch (CancellationException | InterruptedException ex) {
} catch (Exception ex) {
logger.log(Level.SEVERE, "Error while cancelling ingest jobs", ex); //NON-NLS
pcs.firePropertyChange(event.toString(), oldValue, newValue);
} catch (Exception e) {
logger.log(Level.SEVERE, "Ingest manager listener threw exception", e); //NON-NLS
MessageNotifyUtil.Notify.show(NbBundle.getMessage(IngestManager.class, "IngestManager.moduleErr"), // RJCTODO: Oddly named strings
NbBundle.getMessage(IngestManager.class, "IngestManager.moduleErr.errListenToUpdates.msg"),
MessageNotifyUtil.MessageType.ERROR);
}
}
}

View File

@ -224,7 +224,7 @@ import org.sleuthkit.datamodel.Content;
manager = IngestManager.getInstance();
}
try {
manager.cancelIngestJobs();
manager.cancelAllIngestJobs();
} finally {
//clear inbox
clearMessages();

View File

@ -165,7 +165,7 @@ public final class IngestMonitor {
final String diskPath = root.getAbsolutePath();
MONITOR_LOGGER.log(Level.SEVERE, "Stopping ingest due to low disk space on disk {0}", diskPath); //NON-NLS
logger.log(Level.SEVERE, "Stopping ingest due to low disk space on disk {0}", diskPath); //NON-NLS
manager.cancelIngestJobs();
manager.cancelAllIngestJobs();
IngestServices.getInstance().postMessage(IngestMessage.createManagerErrorMessage(
NbBundle.getMessage(this.getClass(), "IngestMonitor.mgrErrMsg.lowDiskSpace.title", diskPath),
NbBundle.getMessage(this.getClass(), "IngestMonitor.mgrErrMsg.lowDiskSpace.msg", diskPath)));

View File

@ -1,744 +0,0 @@
/*
* Autopsy Forensic Browser
*
* Copyright 2012-2014 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.autopsy.ingest;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Comparator;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Objects;
import java.util.Set;
import java.util.TreeSet;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.openide.util.NbBundle;
import org.sleuthkit.autopsy.casemodule.Case;
import org.sleuthkit.autopsy.ingest.IngestScheduler.FileIngestScheduler.FileIngestTask;
import org.sleuthkit.datamodel.AbstractFile;
import org.sleuthkit.datamodel.Content;
import org.sleuthkit.datamodel.ContentVisitor;
import org.sleuthkit.datamodel.DerivedFile;
import org.sleuthkit.datamodel.Directory;
import org.sleuthkit.datamodel.File;
import org.sleuthkit.datamodel.FileSystem;
import org.sleuthkit.datamodel.VirtualDirectory;
import org.sleuthkit.datamodel.LayoutFile;
import org.sleuthkit.datamodel.LocalFile;
import org.sleuthkit.datamodel.SleuthkitCase;
import org.sleuthkit.datamodel.TskCoreException;
import org.sleuthkit.datamodel.TskData;
import org.sleuthkit.datamodel.TskData.TSK_DB_FILES_TYPE_ENUM;
import org.sleuthkit.datamodel.TskData.TSK_FS_META_TYPE_ENUM;
/**
* Enqueues data source ingest and file ingest tasks for processing.
*/
final class IngestScheduler {
private static IngestScheduler instance;
private static final Logger logger = Logger.getLogger(IngestScheduler.class.getName());
private final DataSourceIngestScheduler dataSourceIngestScheduler = new DataSourceIngestScheduler();
private final FileIngestScheduler fileIngestScheduler = new FileIngestScheduler();
private IngestScheduler() {
}
static synchronized IngestScheduler getInstance() {
if (instance == null) {
instance = new IngestScheduler();
}
return instance;
}
DataSourceIngestScheduler getDataSourceIngestScheduler() {
return dataSourceIngestScheduler;
}
FileIngestScheduler getFileIngestScheduler() {
return fileIngestScheduler;
}
static class FileIngestScheduler {
private TreeSet<FileIngestTask> rootDirectoryTasks;
private List<FileIngestTask> directoryTasks;
private LinkedList<FileIngestTask> fileTasks; //need to add to start and end quickly
private int filesEnqueuedEst = 0;
private int filesDequeued = 0;
private final static int FAT_NTFS_FLAGS = TskData.TSK_FS_TYPE_ENUM.TSK_FS_TYPE_FAT12.getValue()
| TskData.TSK_FS_TYPE_ENUM.TSK_FS_TYPE_FAT16.getValue()
| TskData.TSK_FS_TYPE_ENUM.TSK_FS_TYPE_FAT32.getValue()
| TskData.TSK_FS_TYPE_ENUM.TSK_FS_TYPE_NTFS.getValue();
private FileIngestScheduler() {
rootDirectoryTasks = new TreeSet<>(new RootTaskComparator());
directoryTasks = new ArrayList<>();
fileTasks = new LinkedList<>();
resetCounters();
}
private void resetCounters() {
filesEnqueuedEst = 0;
filesDequeued = 0;
}
@Override
public synchronized String toString() {
StringBuilder sb = new StringBuilder();
sb.append(NbBundle.getMessage(this.getClass(), "IngestScheduler.FileSched.toString.rootDirs.text")).append(rootDirectoryTasks.size());
for (FileIngestTask task : rootDirectoryTasks) {
sb.append(task.toString()).append(" ");
}
sb.append(NbBundle.getMessage(this.getClass(), "IngestScheduler.FileSched.toString.curDirs.text")).append(directoryTasks.size());
for (FileIngestTask task : directoryTasks) {
sb.append(task.toString()).append(" ");
}
sb.append(NbBundle.getMessage(this.getClass(), "IngestScheduler.FileSched.toString.curFiles.text")).append(fileTasks.size());
for (FileIngestTask task : fileTasks) {
sb.append(task.toString()).append(" ");
}
return sb.toString();
}
synchronized void queueForIngest(IngestJob dataSourceTask) {
Content dataSource = dataSourceTask.getDataSource();
Collection<AbstractFile> rootObjects = dataSource.accept(new GetRootDirVisitor());
List<AbstractFile> firstLevelFiles = new ArrayList<>();
if (rootObjects.isEmpty() && dataSource instanceof AbstractFile) {
// The data source is file.
firstLevelFiles.add((AbstractFile) dataSource);
} else {
for (AbstractFile root : rootObjects) {
List<Content> children;
try {
children = root.getChildren();
if (children.isEmpty()) {
//add the root itself, could be unalloc file, child of volume or image
firstLevelFiles.add(root);
} else {
//root for fs root dir, schedule children dirs/files
for (Content child : children) {
if (child instanceof AbstractFile) {
firstLevelFiles.add((AbstractFile) child);
}
}
}
} catch (TskCoreException ex) {
logger.log(Level.WARNING, "Could not get children of root to enqueue: " + root.getId() + ": " + root.getName(), ex); //NON-NLS
}
}
}
for (AbstractFile firstLevelFile : firstLevelFiles) {
FileIngestTask fileTask = new FileIngestTask(firstLevelFile, dataSourceTask);
if (shouldEnqueueTask(fileTask)) {
rootDirectoryTasks.add(fileTask);
}
}
// Update approx count of files to process in queues
filesEnqueuedEst = queryNumFilesinEnqueuedContents();
// Reshuffle/update the dir and file level queues if needed
updateQueues();
}
synchronized void queueFile(IngestJob ingestJob, AbstractFile file) {
FileIngestTask fileTask = new FileIngestTask(file, ingestJob);
if (shouldEnqueueTask(fileTask)) {
fileTasks.addFirst(fileTask);
++filesEnqueuedEst;
}
}
float getPercentageDone() {
if (filesEnqueuedEst == 0) {
return 0;
}
return ((100.f) * filesDequeued) / filesEnqueuedEst;
}
/**
* query num files enqueued total num of files to be enqueued.
*
* Counts all files for all the sources currently in the queues.
*
* @return approx. total num of files enqueued (or to be enqueued)
*/
private synchronized int queryNumFilesinEnqueuedContents() {
int totalFiles = 0;
List<Content> contents = this.getSourceContent();
final GetFilesCountVisitor countVisitor =
new GetFilesCountVisitor();
for (Content content : contents) {
totalFiles += content.accept(countVisitor);
}
logger.log(Level.INFO, "Total files to queue up: {0}", totalFiles); //NON-NLS
return totalFiles;
}
/**
* get total est. number of files to be enqueued for current ingest
* input sources in queues
*
* @return total number of files
*/
int getFilesEnqueuedEst() {
return filesEnqueuedEst;
}
/**
* Get number of files dequeued so far. This is reset after the same
* content is enqueued that is already in a queue
*
* @return number of files dequeued so far
*/
int getFilesDequeued() {
return filesDequeued;
}
synchronized FileIngestTask getNextTask() {
final FileIngestTask task = fileTasks.pollLast();
if (task != null) {
filesDequeued++;
updateQueues();
}
return task;
}
/**
* Shuffle the queues so that there are files in the files queue.
*
* @returns true if no more data in queue
*/
private synchronized void updateQueues() {
// we loop because we could have a directory that has all files
// that do not get enqueued
while (true) {
// There are files in the queue, we're done
if (this.fileTasks.isEmpty() == false) {
return;
}
// fill in the directory queue if it is empty.
if (this.directoryTasks.isEmpty()) {
// bail out if root is also empty -- we are done
if (rootDirectoryTasks.isEmpty()) {
return;
}
FileIngestTask rootTask = this.rootDirectoryTasks.pollFirst();
directoryTasks.add(rootTask);
}
//pop and push AbstractFile directory children if any
//add the popped and its leaf children onto cur file list
FileIngestTask parentTask = directoryTasks.remove(directoryTasks.size() - 1);
final AbstractFile parentFile = parentTask.file;
// add itself to the file list
if (shouldEnqueueTask(parentTask)) {
this.fileTasks.addLast(parentTask);
}
// add its children to the file and directory lists
try {
List<Content> children = parentFile.getChildren();
for (Content c : children) {
if (c instanceof AbstractFile) {
AbstractFile childFile = (AbstractFile) c;
FileIngestTask childTask = new FileIngestTask(childFile, parentTask.getJob());
if (childFile.hasChildren()) {
this.directoryTasks.add(childTask);
} else if (shouldEnqueueTask(childTask)) {
this.fileTasks.addLast(childTask);
}
}
}
} catch (TskCoreException ex) {
logger.log(Level.SEVERE, "Could not get children of file and update file queues: " //NON-NLS
+ parentFile.getName(), ex);
}
}
}
/**
* Return list of content objects that are in the queue to be processed.
*
* Helpful to determine whether ingest for particular input Content is
* active
*
* @return list of parent source content objects for files currently
* enqueued
*/
synchronized List<Content> getSourceContent() {
final Set<Content> contentSet = new HashSet<>();
for (FileIngestTask task : rootDirectoryTasks) {
contentSet.add(task.getJob().getDataSource());
}
for (FileIngestTask task : directoryTasks) {
contentSet.add(task.getJob().getDataSource());
}
for (FileIngestTask task : fileTasks) {
contentSet.add(task.getJob().getDataSource());
}
return new ArrayList<>(contentSet);
}
synchronized void emptyQueues() {
this.rootDirectoryTasks.clear();
this.directoryTasks.clear();
this.fileTasks.clear();
}
/**
* Check if the file is a special file that we should skip
*
* @param processTask a task whose file to check if should be queued of
* skipped
* @return true if should be enqueued, false otherwise
*/
private static boolean shouldEnqueueTask(final FileIngestTask processTask) {
final AbstractFile aFile = processTask.file;
//if it's unalloc file, skip if so scheduled
if (processTask.getJob().shouldProcessUnallocatedSpace() == false
&& aFile.getType().equals(TSK_DB_FILES_TYPE_ENUM.UNALLOC_BLOCKS //unalloc files
)) {
return false;
}
String fileName = aFile.getName();
if (fileName.equals(".") || fileName.equals("..")) {
return false;
} else if (aFile instanceof org.sleuthkit.datamodel.File) {
final org.sleuthkit.datamodel.File f = (File) aFile;
//skip files in root dir, starting with $, containing : (not default attributes)
//with meta address < 32, i.e. some special large NTFS and FAT files
FileSystem fs = null;
try {
fs = f.getFileSystem();
} catch (TskCoreException ex) {
logger.log(Level.SEVERE, "Could not get FileSystem for " + f, ex); //NON-NLS
}
TskData.TSK_FS_TYPE_ENUM fsType = TskData.TSK_FS_TYPE_ENUM.TSK_FS_TYPE_UNSUPP;
if (fs != null) {
fsType = fs.getFsType();
}
if ((fsType.getValue() & FAT_NTFS_FLAGS) == 0) {
//not fat or ntfs, accept all files
return true;
}
boolean isInRootDir = false;
try {
isInRootDir = f.getParentDirectory().isRoot();
} catch (TskCoreException ex) {
logger.log(Level.WARNING, "Could not check if should enqueue the file: " + f.getName(), ex); //NON-NLS
}
if (isInRootDir && f.getMetaAddr() < 32) {
String name = f.getName();
if (name.length() > 0
&& name.charAt(0) == '$'
&& name.contains(":")) {
return false;
}
} else {
return true;
}
}
return true;
}
static class FileIngestTask {
private final AbstractFile file;
private final IngestJob task;
private FileIngestTask(AbstractFile file, IngestJob task) {
this.file = file;
this.task = task;
}
public IngestJob getJob() {
return task;
}
public AbstractFile getFile() {
return file;
}
@Override
public String toString() {
try {
return "ProcessTask{" + "file=" + file.getId() + ": " //NON-NLS
+ file.getUniquePath() + "}"; // + ", dataSourceTask=" + dataSourceTask + '}';
} catch (TskCoreException ex) {
logger.log(Level.SEVERE, "Cound not get unique path of file in queue, ", ex); //NON-NLS
}
return "ProcessTask{" + "file=" + file.getId() + ": " //NON-NLS
+ file.getName() + '}';
}
/**
* two process tasks are equal when the file/dir and modules are the
* same this enables are not to queue up the same file/dir, modules
* tuples into the root dir set
*
* @param obj
* @return
*/
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final FileIngestTask other = (FileIngestTask) obj;
if (this.file != other.file && (this.file == null || !this.file.equals(other.file))) {
return false;
}
IngestJob thisTask = this.getJob();
IngestJob otherTask = other.getJob();
if (thisTask != otherTask
&& (thisTask == null || !thisTask.equals(otherTask))) {
return false;
}
return true;
}
@Override
public int hashCode() {
int hash = 5;
hash = 47 * hash + Objects.hashCode(this.file);
hash = 47 * hash + Objects.hashCode(this.task);
return hash;
}
}
/**
* Root dir sorter
*/
private static class RootTaskComparator implements Comparator<FileIngestTask> {
@Override
public int compare(FileIngestTask q1, FileIngestTask q2) {
AbstractFilePriority.Priority p1 = AbstractFilePriority.getPriority(q1.file);
AbstractFilePriority.Priority p2 = AbstractFilePriority.getPriority(q2.file);
if (p1 == p2) {
return (int) (q2.file.getId() - q1.file.getId());
} else {
return p2.ordinal() - p1.ordinal();
}
}
/**
* Priority determination for sorted AbstractFile, used by
* RootDirComparator
*/
private static class AbstractFilePriority {
enum Priority {
LAST, LOW, MEDIUM, HIGH
};
static final List<Pattern> LAST_PRI_PATHS = new ArrayList<>();
static final List<Pattern> LOW_PRI_PATHS = new ArrayList<>();
static final List<Pattern> MEDIUM_PRI_PATHS = new ArrayList<>();
static final List<Pattern> HIGH_PRI_PATHS = new ArrayList<>();
/* prioritize root directory folders based on the assumption that we are
* looking for user content. Other types of investigations may want different
* priorities. */
static {
// these files have no structure, so they go last
//unalloc files are handled as virtual files in getPriority()
//LAST_PRI_PATHS.schedule(Pattern.compile("^\\$Unalloc", Pattern.CASE_INSENSITIVE));
//LAST_PRI_PATHS.schedule(Pattern.compile("^\\Unalloc", Pattern.CASE_INSENSITIVE));
LAST_PRI_PATHS.add(Pattern.compile("^pagefile", Pattern.CASE_INSENSITIVE));
LAST_PRI_PATHS.add(Pattern.compile("^hiberfil", Pattern.CASE_INSENSITIVE));
// orphan files are often corrupt and windows does not typically have
// user content, so put them towards the bottom
LOW_PRI_PATHS.add(Pattern.compile("^\\$OrphanFiles", Pattern.CASE_INSENSITIVE));
LOW_PRI_PATHS.add(Pattern.compile("^Windows", Pattern.CASE_INSENSITIVE));
// all other files go into the medium category too
MEDIUM_PRI_PATHS.add(Pattern.compile("^Program Files", Pattern.CASE_INSENSITIVE));
// user content is top priority
HIGH_PRI_PATHS.add(Pattern.compile("^Users", Pattern.CASE_INSENSITIVE));
HIGH_PRI_PATHS.add(Pattern.compile("^Documents and Settings", Pattern.CASE_INSENSITIVE));
HIGH_PRI_PATHS.add(Pattern.compile("^home", Pattern.CASE_INSENSITIVE));
HIGH_PRI_PATHS.add(Pattern.compile("^ProgramData", Pattern.CASE_INSENSITIVE));
}
/**
* Get the scheduling priority for a given file.
*
* @param abstractFile
* @return
*/
static AbstractFilePriority.Priority getPriority(final AbstractFile abstractFile) {
if (!abstractFile.getType().equals(TskData.TSK_DB_FILES_TYPE_ENUM.FS)) {
//quickly filter out unstructured content
//non-fs virtual files and dirs, such as representing unalloc space
return AbstractFilePriority.Priority.LAST;
}
//determine the fs files priority by name
final String path = abstractFile.getName();
if (path == null) {
return AbstractFilePriority.Priority.MEDIUM;
}
for (Pattern p : HIGH_PRI_PATHS) {
Matcher m = p.matcher(path);
if (m.find()) {
return AbstractFilePriority.Priority.HIGH;
}
}
for (Pattern p : MEDIUM_PRI_PATHS) {
Matcher m = p.matcher(path);
if (m.find()) {
return AbstractFilePriority.Priority.MEDIUM;
}
}
for (Pattern p : LOW_PRI_PATHS) {
Matcher m = p.matcher(path);
if (m.find()) {
return AbstractFilePriority.Priority.LOW;
}
}
for (Pattern p : LAST_PRI_PATHS) {
Matcher m = p.matcher(path);
if (m.find()) {
return AbstractFilePriority.Priority.LAST;
}
}
//default is medium
return AbstractFilePriority.Priority.MEDIUM;
}
}
}
/**
* Get counts of ingestable files/dirs for the content input source.
*
* Note, also includes counts of all unalloc children files (for the fs,
* image, volume) even if ingest didn't ask for them
*/
static class GetFilesCountVisitor extends ContentVisitor.Default<Long> {
@Override
public Long visit(FileSystem fs) {
//recursion stop here
//case of a real fs, query all files for it
SleuthkitCase sc = Case.getCurrentCase().getSleuthkitCase();
StringBuilder queryB = new StringBuilder();
queryB.append("( (fs_obj_id = ").append(fs.getId()); //NON-NLS
//queryB.append(") OR (fs_obj_id = NULL) )");
queryB.append(") )");
queryB.append(" AND ( (meta_type = ").append(TSK_FS_META_TYPE_ENUM.TSK_FS_META_TYPE_REG.getValue()); //NON-NLS
queryB.append(") OR (meta_type = ").append(TSK_FS_META_TYPE_ENUM.TSK_FS_META_TYPE_DIR.getValue()); //NON-NLS
queryB.append(" AND (name != '.') AND (name != '..')"); //NON-NLS
queryB.append(") )");
//queryB.append( "AND (type = ");
//queryB.append(TskData.TSK_DB_FILES_TYPE_ENUM.FS.getFileType());
//queryB.append(")");
try {
final String query = queryB.toString();
logger.log(Level.INFO, "Executing count files query: {0}", query); //NON-NLS
return sc.countFilesWhere(query);
} catch (TskCoreException ex) {
logger.log(Level.SEVERE, "Couldn't get count of all files in FileSystem", ex); //NON-NLS
return 0L;
}
}
@Override
public Long visit(LayoutFile lf) {
//recursion stop here
//case of LayoutFile child of Image or Volume
return 1L;
}
private long getCountFromChildren(Content content) {
long count = 0;
try {
List<Content> children = content.getChildren();
if (children.size() > 0) {
for (Content child : children) {
count += child.accept(this);
}
} else {
count = 1;
}
} catch (TskCoreException ex) {
logger.log(Level.WARNING, "Could not get count of objects from children to get num of total files to be ingested", ex); //NON-NLS
}
return count;
}
@Override
protected Long defaultVisit(Content cntnt) {
//recurse assuming this is image/vs/volume
//recursion stops at fs or unalloc file
return getCountFromChildren(cntnt);
}
}
/**
* Visitor that gets a collection of top level objects to be scheduled,
* such as root Dirs (if there is FS) or LayoutFiles and virtual
* directories, also if there is no FS.
*/
static class GetRootDirVisitor extends GetFilesContentVisitor {
@Override
public Collection<AbstractFile> visit(VirtualDirectory ld) {
//case when we hit a layout directoryor local file container, not under a real FS
//or when root virt dir is scheduled
Collection<AbstractFile> ret = new ArrayList<>();
ret.add(ld);
return ret;
}
@Override
public Collection<AbstractFile> visit(LayoutFile lf) {
//case when we hit a layout file, not under a real FS
Collection<AbstractFile> ret = new ArrayList<>();
ret.add(lf);
return ret;
}
@Override
public Collection<AbstractFile> visit(Directory drctr) {
//we hit a real directory, a child of real FS
Collection<AbstractFile> ret = new ArrayList<>();
ret.add(drctr);
return ret;
}
@Override
public Collection<AbstractFile> visit(FileSystem fs) {
return getAllFromChildren(fs);
}
@Override
public Collection<AbstractFile> visit(File file) {
//can have derived files
return getAllFromChildren(file);
}
@Override
public Collection<AbstractFile> visit(DerivedFile derivedFile) {
//can have derived files
//TODO test this and overall scheduler with derived files
return getAllFromChildren(derivedFile);
}
@Override
public Collection<AbstractFile> visit(LocalFile localFile) {
//can have local files
//TODO test this and overall scheduler with local files
return getAllFromChildren(localFile);
}
}
}
static class DataSourceIngestScheduler {
private final LinkedList<IngestJob> tasks = new LinkedList<>();
private DataSourceIngestScheduler() {
}
synchronized void queueForIngest(IngestJob job) {
try {
if (job.getDataSource().getParent() != null) {
logger.log(Level.SEVERE, "Only parent-less Content (data sources) can be scheduled for DataSource ingest, skipping: {0}", job.getDataSource()); //NON-NLS
return;
}
} catch (TskCoreException e) {
logger.log(Level.SEVERE, "Error validating data source to be scheduled for DataSource ingest" + job.getDataSource(), e); //NON-NLS
return;
}
tasks.addLast(job);
}
public synchronized IngestJob getNextTask() {
return tasks.pollFirst();
}
synchronized void emptyQueues() {
tasks.clear();
}
synchronized int getCount() {
return tasks.size();
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(NbBundle.getMessage(this.getClass(), "IngestScheduler.DataSourceScheduler.toString.size"))
.append(getCount());
for (IngestJob task : tasks) {
sb.append(task.toString()).append(" ");
}
return sb.toString();
}
}
}

View File

@ -95,7 +95,7 @@ public final class IngestServices {
* artifact data
*/
public void fireModuleDataEvent(ModuleDataEvent moduleDataEvent) {
IngestManager.fireModuleDataEvent(moduleDataEvent);
IngestManager.fireIngestModuleDataEvent(moduleDataEvent);
}
/**
@ -107,7 +107,7 @@ public final class IngestServices {
* changed
*/
public void fireModuleContentEvent(ModuleContentEvent moduleContentEvent) {
IngestManager.fireModuleContentEvent(moduleContentEvent);
IngestManager.fireIngestModuleContentEvent(moduleContentEvent);
}
/**