also allow ip lookups in scan

This commit is contained in:
overcuriousity
2025-09-15 21:00:57 +02:00
parent c076ee028f
commit e2d4e12057
5 changed files with 108 additions and 106 deletions

View File

@@ -204,7 +204,7 @@ class Scanner:
self._initialize_providers()
print("Session configuration updated")
def start_scan(self, target_domain: str, max_depth: int = 2, clear_graph: bool = True) -> bool:
def start_scan(self, target: str, max_depth: int = 2, clear_graph: bool = True) -> bool:
"""Start a new reconnaissance scan with proper cleanup of previous scans."""
print(f"=== STARTING SCAN IN SCANNER {id(self)} ===")
print(f"Session ID: {self.session_id}")
@@ -268,7 +268,7 @@ class Scanner:
if clear_graph:
self.graph.clear()
self.current_target = target_domain.lower().strip()
self.current_target = target.lower().strip()
self.max_depth = max_depth
self.current_depth = 0
@@ -304,76 +304,80 @@ class Scanner:
self._update_session_state()
return False
def _execute_scan(self, target_domain: str, max_depth: int) -> None:
def _execute_scan(self, target: str, max_depth: int) -> None:
"""Execute the reconnaissance scan with proper termination handling."""
print(f"_execute_scan started for {target_domain} with depth {max_depth}")
print(f"_execute_scan started for {target} with depth {max_depth}")
self.executor = ThreadPoolExecutor(max_workers=self.max_workers)
processed_targets = set()
self.task_queue.append((target_domain, 0, False))
self.task_queue.append((target, 0, False))
try:
self.status = ScanStatus.RUNNING
self._update_session_state()
enabled_providers = [provider.get_name() for provider in self.providers]
self.logger.log_scan_start(target_domain, max_depth, enabled_providers)
self.graph.add_node(target_domain, NodeType.DOMAIN)
self._initialize_provider_states(target_domain)
self.logger.log_scan_start(target, max_depth, enabled_providers)
# Determine initial node type
node_type = NodeType.IP if _is_valid_ip(target) else NodeType.DOMAIN
self.graph.add_node(target, node_type)
self._initialize_provider_states(target)
# **IMPROVED**: Better termination checking in main loop
# Better termination checking in main loop
while self.task_queue and not self._is_stop_requested():
try:
target, depth, is_large_entity_member = self.task_queue.popleft()
target_item, depth, is_large_entity_member = self.task_queue.popleft()
except IndexError:
# Queue became empty during processing
break
if target in processed_targets:
if target_item in processed_targets:
continue
if depth > max_depth:
continue
# **NEW**: Track this target as currently processing
# Track this target as currently processing
with self.processing_lock:
if self._is_stop_requested():
print(f"Stop requested before processing {target}")
print(f"Stop requested before processing {target_item}")
break
self.currently_processing.add(target)
self.currently_processing.add(target_item)
try:
self.current_depth = depth
self.current_indicator = target
self.current_indicator = target_item
self._update_session_state()
# **IMPROVED**: More frequent stop checking during processing
# More frequent stop checking during processing
if self._is_stop_requested():
print(f"Stop requested during processing setup for {target}")
print(f"Stop requested during processing setup for {target_item}")
break
new_targets, large_entity_members, success = self._query_providers_for_target(target, depth, is_large_entity_member)
new_targets, large_entity_members, success = self._query_providers_for_target(target_item, depth, is_large_entity_member)
# **NEW**: Check stop signal after provider queries
# Check stop signal after provider queries
if self._is_stop_requested():
print(f"Stop requested after querying providers for {target}")
print(f"Stop requested after querying providers for {target_item}")
break
if not success:
self.target_retries[target] += 1
if self.target_retries[target] <= self.config.max_retries_per_target:
print(f"Re-queueing target {target} (attempt {self.target_retries[target]})")
self.task_queue.append((target, depth, is_large_entity_member))
self.target_retries[target_item] += 1
if self.target_retries[target_item] <= self.config.max_retries_per_target:
print(f"Re-queueing target {target_item} (attempt {self.target_retries[target_item]})")
self.task_queue.append((target_item, depth, is_large_entity_member))
self.tasks_re_enqueued += 1
else:
print(f"ERROR: Max retries exceeded for target {target}")
print(f"ERROR: Max retries exceeded for target {target_item}")
self.scan_failed_due_to_retries = True
self._log_target_processing_error(target, "Max retries exceeded")
self._log_target_processing_error(target_item, "Max retries exceeded")
else:
processed_targets.add(target)
processed_targets.add(target_item)
self.indicators_completed += 1
# **NEW**: Only add new targets if not stopped
# Only add new targets if not stopped
if not self._is_stop_requested():
for new_target in new_targets:
if new_target not in processed_targets:
@@ -384,11 +388,11 @@ class Scanner:
self.task_queue.append((member, depth, True))
finally:
# **NEW**: Always remove from processing set
# Always remove from processing set
with self.processing_lock:
self.currently_processing.discard(target)
self.currently_processing.discard(target_item)
# **NEW**: Log termination reason
# Log termination reason
if self._is_stop_requested():
print("Scan terminated due to stop request")
self.logger.logger.info("Scan terminated by user request")
@@ -402,7 +406,7 @@ class Scanner:
self.status = ScanStatus.FAILED
self.logger.logger.error(f"Scan failed: {e}")
finally:
# **NEW**: Clear processing state on exit
# Clear processing state on exit
with self.processing_lock:
self.currently_processing.clear()