prod staging
This commit is contained in:
@@ -187,29 +187,17 @@ class Scanner:
|
||||
"""Execute the reconnaissance scan with simplified recursion and forensic tracking."""
|
||||
print(f"_execute_scan started for {target_domain} with depth {max_depth}")
|
||||
self.executor = ThreadPoolExecutor(max_workers=self.max_workers)
|
||||
processed_targets = set()
|
||||
|
||||
# Initialize variables outside try block
|
||||
processed_targets = set() # Fix: Initialize here
|
||||
|
||||
try:
|
||||
print("Setting status to RUNNING")
|
||||
self.status = ScanStatus.RUNNING
|
||||
|
||||
# Log scan start
|
||||
enabled_providers = [provider.get_name() for provider in self.providers]
|
||||
self.logger.log_scan_start(target_domain, max_depth, enabled_providers)
|
||||
print(f"Logged scan start with providers: {enabled_providers}")
|
||||
|
||||
# Initialize with target domain and track it
|
||||
print(f"Adding target domain '{target_domain}' as initial node")
|
||||
self.graph.add_node(target_domain, NodeType.DOMAIN)
|
||||
self._initialize_provider_states(target_domain)
|
||||
|
||||
# BFS-style exploration with simplified recursion
|
||||
current_level_targets = {target_domain}
|
||||
all_discovered_targets = set() # Track all discovered targets for large entity detection
|
||||
|
||||
print("Starting BFS exploration with simplified recursion...")
|
||||
all_discovered_targets = {target_domain}
|
||||
|
||||
for depth in range(max_depth + 1):
|
||||
if self.stop_event.is_set():
|
||||
@@ -217,32 +205,25 @@ class Scanner:
|
||||
break
|
||||
|
||||
self.current_depth = depth
|
||||
print(f"Processing depth level {depth} with {len(current_level_targets)} targets")
|
||||
|
||||
if not current_level_targets:
|
||||
print("No targets to process at this level")
|
||||
targets_to_process = current_level_targets - processed_targets
|
||||
if not targets_to_process:
|
||||
print("No new targets to process at this level.")
|
||||
break
|
||||
|
||||
self.total_indicators_found += len(current_level_targets)
|
||||
|
||||
# Process targets and collect newly discovered ones
|
||||
print(f"Processing depth level {depth} with {len(targets_to_process)} new targets")
|
||||
self.total_indicators_found += len(targets_to_process)
|
||||
|
||||
target_results = self._process_targets_concurrent_forensic(
|
||||
current_level_targets, processed_targets, all_discovered_targets, depth
|
||||
targets_to_process, processed_targets, all_discovered_targets, depth
|
||||
)
|
||||
processed_targets.update(targets_to_process)
|
||||
|
||||
next_level_targets = set()
|
||||
for target, new_targets in target_results:
|
||||
processed_targets.add(target)
|
||||
for _target, new_targets in target_results:
|
||||
all_discovered_targets.update(new_targets)
|
||||
|
||||
# Simple recursion rule: only valid IPs and domains within depth limit
|
||||
if depth < max_depth:
|
||||
for new_target in new_targets:
|
||||
if self._should_recurse_on_target(new_target, processed_targets, all_discovered_targets):
|
||||
next_level_targets.add(new_target)
|
||||
|
||||
next_level_targets.update(new_targets)
|
||||
|
||||
current_level_targets = next_level_targets
|
||||
print(f"Completed depth {depth}, {len(next_level_targets)} targets for next level")
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR: Scan execution failed with error: {e}")
|
||||
@@ -252,19 +233,15 @@ class Scanner:
|
||||
finally:
|
||||
if self.stop_event.is_set():
|
||||
self.status = ScanStatus.STOPPED
|
||||
print("Scan completed with STOPPED status")
|
||||
else:
|
||||
self.status = ScanStatus.COMPLETED
|
||||
print("Scan completed with COMPLETED status")
|
||||
|
||||
self.logger.log_scan_complete()
|
||||
self.executor.shutdown(wait=False, cancel_futures=True)
|
||||
|
||||
stats = self.graph.get_statistics()
|
||||
print("Final scan statistics:")
|
||||
print(f" - Total nodes: {stats['basic_metrics']['total_nodes']}")
|
||||
print(f" - Total edges: {stats['basic_metrics']['total_edges']}")
|
||||
print(f" - Targets processed: {len(processed_targets)}")
|
||||
print(f" - Targets processed: {len(processed_targets)}")
|
||||
|
||||
def _initialize_provider_states(self, target: str) -> None:
|
||||
"""Initialize provider states for forensic tracking."""
|
||||
@@ -382,9 +359,12 @@ class Scanner:
|
||||
except (Exception, CancelledError) as e:
|
||||
self._log_provider_error(target, provider.get_name(), str(e))
|
||||
|
||||
# Update node with collected metadata
|
||||
if target_metadata[target]:
|
||||
self.graph.add_node(target, target_type, metadata=dict(target_metadata[target]))
|
||||
for node_id, metadata_dict in target_metadata.items():
|
||||
if self.graph.graph.has_node(node_id):
|
||||
node_is_ip = _is_valid_ip(node_id)
|
||||
node_type_to_add = NodeType.IP if node_is_ip else NodeType.DOMAIN
|
||||
# This call updates the existing node with the new metadata
|
||||
self.graph.add_node(node_id, node_type_to_add, metadata=metadata_dict)
|
||||
|
||||
return new_targets
|
||||
|
||||
@@ -573,8 +553,6 @@ class Scanner:
|
||||
def _collect_node_metadata_forensic(self, node_id: str, provider_name: str, rel_type: RelationshipType,
|
||||
target: str, raw_data: Dict[str, Any], metadata: Dict[str, Any]) -> None:
|
||||
"""Collect and organize metadata for forensic tracking with enhanced logging."""
|
||||
|
||||
# Log metadata collection
|
||||
self.logger.logger.debug(f"Collecting metadata for {node_id} from {provider_name}: {rel_type.relationship_name}")
|
||||
|
||||
if provider_name == 'dns':
|
||||
@@ -599,7 +577,6 @@ class Scanner:
|
||||
if key not in metadata.get('shodan', {}) or not metadata.get('shodan', {}).get(key):
|
||||
metadata.setdefault('shodan', {})[key] = value
|
||||
|
||||
# Track ASN data
|
||||
if rel_type == RelationshipType.ASN_MEMBERSHIP:
|
||||
metadata['asn_data'] = {
|
||||
'asn': target,
|
||||
|
||||
Reference in New Issue
Block a user