This commit is contained in:
overcuriousity
2025-09-14 13:14:02 +02:00
parent b26002eff9
commit 4378146d0c
8 changed files with 1765 additions and 683 deletions

View File

@@ -8,6 +8,7 @@ from .scanner import Scanner, ScanStatus
from .logger import ForensicLogger, get_forensic_logger, new_session
from .session_manager import session_manager
from .session_config import SessionConfig, create_session_config
from .task_manager import TaskManager, TaskType, ReconTask
__all__ = [
'GraphManager',
@@ -19,7 +20,10 @@ __all__ = [
'new_session',
'session_manager',
'SessionConfig',
'create_session_config'
'create_session_config',
'TaskManager',
'TaskType',
'ReconTask'
]
__version__ = "1.0.0-phase2"

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
"""
Per-session configuration management for DNSRecon.
Provides isolated configuration instances for each user session.
Enhanced per-session configuration management for DNSRecon.
Provides isolated configuration instances for each user session while supporting global caching.
"""
import os
@@ -9,12 +9,12 @@ from typing import Dict, Optional
class SessionConfig:
"""
Session-specific configuration that inherits from global config
but maintains isolated API keys and provider settings.
Enhanced session-specific configuration that inherits from global config
but maintains isolated API keys and provider settings while supporting global caching.
"""
def __init__(self):
"""Initialize session config with global defaults."""
"""Initialize enhanced session config with global cache support."""
# Copy all attributes from global config
self.api_keys: Dict[str, Optional[str]] = {
'shodan': None
@@ -26,20 +26,39 @@ class SessionConfig:
self.max_concurrent_requests = 5
self.large_entity_threshold = 100
# Rate limiting settings (per session)
# Enhanced rate limiting settings (per session)
self.rate_limits = {
'crtsh': 60,
'shodan': 60,
'dns': 100
}
# Provider settings (per session)
# Enhanced provider settings (per session)
self.enabled_providers = {
'crtsh': True,
'dns': True,
'shodan': False
}
# Task-based execution settings
self.task_retry_settings = {
'max_retries': 3,
'base_backoff_seconds': 1.0,
'max_backoff_seconds': 60.0,
'retry_on_rate_limit': True,
'retry_on_connection_error': True,
'retry_on_timeout': True
}
# Cache settings (global across all sessions)
self.cache_settings = {
'enabled': True,
'expiry_hours': 12,
'cache_base_dir': '.cache',
'per_provider_directories': True,
'thread_safe_operations': True
}
# Logging configuration
self.log_level = 'INFO'
self.log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
@@ -48,6 +67,22 @@ class SessionConfig:
self.flask_host = '127.0.0.1'
self.flask_port = 5000
self.flask_debug = True
# Session isolation settings
self.session_isolation = {
'enforce_single_session_per_user': True,
'consolidate_session_data_on_replacement': True,
'user_fingerprinting_enabled': True,
'session_timeout_minutes': 60
}
# Circuit breaker settings for provider reliability
self.circuit_breaker = {
'enabled': True,
'failure_threshold': 5, # Failures before opening circuit
'recovery_timeout_seconds': 300, # 5 minutes before trying again
'half_open_max_calls': 3 # Test calls when recovering
}
def set_api_key(self, provider: str, api_key: str) -> bool:
"""
@@ -55,14 +90,19 @@ class SessionConfig:
Args:
provider: Provider name (shodan, etc)
api_key: API key string
api_key: API key string (empty string to clear)
Returns:
bool: True if key was set successfully
"""
if provider in self.api_keys:
self.api_keys[provider] = api_key
self.enabled_providers[provider] = True if api_key else False
# Handle clearing of API keys
if api_key and api_key.strip():
self.api_keys[provider] = api_key.strip()
self.enabled_providers[provider] = True
else:
self.api_keys[provider] = None
self.enabled_providers[provider] = False
return True
return False
@@ -102,19 +142,231 @@ class SessionConfig:
"""
return self.rate_limits.get(provider, 60)
def get_task_retry_config(self) -> Dict[str, any]:
"""
Get task retry configuration for this session.
Returns:
Dictionary with retry settings
"""
return self.task_retry_settings.copy()
def get_cache_config(self) -> Dict[str, any]:
"""
Get cache configuration (global settings).
Returns:
Dictionary with cache settings
"""
return self.cache_settings.copy()
def is_circuit_breaker_enabled(self) -> bool:
"""Check if circuit breaker is enabled for provider reliability."""
return self.circuit_breaker.get('enabled', True)
def get_circuit_breaker_config(self) -> Dict[str, any]:
"""Get circuit breaker configuration."""
return self.circuit_breaker.copy()
def update_provider_settings(self, provider_updates: Dict[str, Dict[str, any]]) -> bool:
"""
Update provider-specific settings in bulk.
Args:
provider_updates: Dictionary of provider -> settings updates
Returns:
bool: True if updates were applied successfully
"""
try:
for provider_name, updates in provider_updates.items():
# Update rate limits
if 'rate_limit' in updates:
self.rate_limits[provider_name] = updates['rate_limit']
# Update enabled status
if 'enabled' in updates:
self.enabled_providers[provider_name] = updates['enabled']
# Update API key
if 'api_key' in updates:
self.set_api_key(provider_name, updates['api_key'])
return True
except Exception as e:
print(f"Error updating provider settings: {e}")
return False
def validate_configuration(self) -> Dict[str, any]:
"""
Validate the current configuration and return validation results.
Returns:
Dictionary with validation results and any issues found
"""
validation_result = {
'valid': True,
'warnings': [],
'errors': [],
'provider_status': {}
}
# Validate provider configurations
for provider_name, enabled in self.enabled_providers.items():
provider_status = {
'enabled': enabled,
'has_api_key': bool(self.api_keys.get(provider_name)),
'rate_limit': self.rate_limits.get(provider_name, 60)
}
# Check for potential issues
if enabled and provider_name in ['shodan'] and not provider_status['has_api_key']:
validation_result['warnings'].append(
f"Provider '{provider_name}' is enabled but missing API key"
)
validation_result['provider_status'][provider_name] = provider_status
# Validate task settings
if self.task_retry_settings['max_retries'] > 10:
validation_result['warnings'].append(
f"High retry count ({self.task_retry_settings['max_retries']}) may cause long delays"
)
# Validate concurrent settings
if self.max_concurrent_requests > 10:
validation_result['warnings'].append(
f"High concurrency ({self.max_concurrent_requests}) may overwhelm providers"
)
# Validate cache settings
if not os.path.exists(self.cache_settings['cache_base_dir']):
try:
os.makedirs(self.cache_settings['cache_base_dir'], exist_ok=True)
except Exception as e:
validation_result['errors'].append(f"Cannot create cache directory: {e}")
validation_result['valid'] = False
return validation_result
def load_from_env(self):
"""Load configuration from environment variables (only if not already set)."""
"""Load configuration from environment variables with enhanced validation."""
# Load API keys from environment
if os.getenv('SHODAN_API_KEY') and not self.api_keys['shodan']:
self.set_api_key('shodan', os.getenv('SHODAN_API_KEY'))
print("Loaded Shodan API key from environment")
# Override default settings from environment
self.default_recursion_depth = int(os.getenv('DEFAULT_RECURSION_DEPTH', '2'))
self.default_timeout = 30
self.max_concurrent_requests = 5
self.default_timeout = int(os.getenv('DEFAULT_TIMEOUT', '30'))
self.max_concurrent_requests = int(os.getenv('MAX_CONCURRENT_REQUESTS', '5'))
# Load task retry settings from environment
if os.getenv('TASK_MAX_RETRIES'):
self.task_retry_settings['max_retries'] = int(os.getenv('TASK_MAX_RETRIES'))
if os.getenv('TASK_BASE_BACKOFF'):
self.task_retry_settings['base_backoff_seconds'] = float(os.getenv('TASK_BASE_BACKOFF'))
# Load cache settings from environment
if os.getenv('CACHE_EXPIRY_HOURS'):
self.cache_settings['expiry_hours'] = int(os.getenv('CACHE_EXPIRY_HOURS'))
if os.getenv('CACHE_DISABLED'):
self.cache_settings['enabled'] = os.getenv('CACHE_DISABLED').lower() != 'true'
# Load circuit breaker settings
if os.getenv('CIRCUIT_BREAKER_DISABLED'):
self.circuit_breaker['enabled'] = os.getenv('CIRCUIT_BREAKER_DISABLED').lower() != 'true'
# Flask settings
self.flask_debug = os.getenv('FLASK_DEBUG', 'True').lower() == 'true'
print("Enhanced configuration loaded from environment")
def export_config_summary(self) -> Dict[str, any]:
"""
Export a summary of the current configuration for debugging/logging.
Returns:
Dictionary with configuration summary (API keys redacted)
"""
return {
'providers': {
provider: {
'enabled': self.enabled_providers.get(provider, False),
'has_api_key': bool(self.api_keys.get(provider)),
'rate_limit': self.rate_limits.get(provider, 60)
}
for provider in self.enabled_providers.keys()
},
'task_settings': {
'max_retries': self.task_retry_settings['max_retries'],
'max_concurrent_requests': self.max_concurrent_requests,
'large_entity_threshold': self.large_entity_threshold
},
'cache_settings': {
'enabled': self.cache_settings['enabled'],
'expiry_hours': self.cache_settings['expiry_hours'],
'base_directory': self.cache_settings['cache_base_dir']
},
'session_settings': {
'isolation_enabled': self.session_isolation['enforce_single_session_per_user'],
'consolidation_enabled': self.session_isolation['consolidate_session_data_on_replacement'],
'timeout_minutes': self.session_isolation['session_timeout_minutes']
},
'circuit_breaker': {
'enabled': self.circuit_breaker['enabled'],
'failure_threshold': self.circuit_breaker['failure_threshold'],
'recovery_timeout': self.circuit_breaker['recovery_timeout_seconds']
}
}
def create_session_config() -> SessionConfig:
"""Create a new session configuration instance."""
"""
Create a new enhanced session configuration instance.
Returns:
Configured SessionConfig instance
"""
session_config = SessionConfig()
session_config.load_from_env()
return session_config
# Validate configuration and log any issues
validation = session_config.validate_configuration()
if validation['warnings']:
print("Configuration warnings:")
for warning in validation['warnings']:
print(f" WARNING: {warning}")
if validation['errors']:
print("Configuration errors:")
for error in validation['errors']:
print(f" ERROR: {error}")
if not validation['valid']:
raise ValueError("Configuration validation failed - see errors above")
print(f"Enhanced session configuration created successfully")
return session_config
def create_test_config() -> SessionConfig:
"""
Create a test configuration with safe defaults for testing.
Returns:
Test-safe SessionConfig instance
"""
test_config = SessionConfig()
# Override settings for testing
test_config.max_concurrent_requests = 2
test_config.task_retry_settings['max_retries'] = 1
test_config.task_retry_settings['base_backoff_seconds'] = 0.1
test_config.cache_settings['expiry_hours'] = 1
test_config.session_isolation['session_timeout_minutes'] = 10
print("Test configuration created")
return test_config

View File

@@ -5,37 +5,153 @@ import time
import uuid
import redis
import pickle
from typing import Dict, Optional, Any, List
import hashlib
from typing import Dict, Optional, Any, List, Tuple
from core.scanner import Scanner
# WARNING: Using pickle can be a security risk if the data source is not trusted.
# In this case, we are only serializing/deserializing our own trusted Scanner objects,
# which is generally safe. Do not unpickle data from untrusted sources.
class UserIdentifier:
"""Handles user identification for session management."""
@staticmethod
def generate_user_fingerprint(client_ip: str, user_agent: str) -> str:
"""
Generate a unique fingerprint for a user based on IP and User-Agent.
Args:
client_ip: Client IP address
user_agent: User-Agent header value
Returns:
Unique user fingerprint hash
"""
# Create deterministic user identifier
user_data = f"{client_ip}:{user_agent[:100]}" # Limit UA to 100 chars
fingerprint = hashlib.sha256(user_data.encode()).hexdigest()[:16] # 16 char fingerprint
return f"user_{fingerprint}"
@staticmethod
def extract_request_info(request) -> Tuple[str, str]:
"""
Extract client IP and User-Agent from Flask request.
Args:
request: Flask request object
Returns:
Tuple of (client_ip, user_agent)
"""
# Handle proxy headers for real IP
client_ip = request.headers.get('X-Forwarded-For', '').split(',')[0].strip()
if not client_ip:
client_ip = request.headers.get('X-Real-IP', '')
if not client_ip:
client_ip = request.remote_addr or 'unknown'
user_agent = request.headers.get('User-Agent', 'unknown')
return client_ip, user_agent
class SessionConsolidator:
"""Handles consolidation of session data when replacing sessions."""
@staticmethod
def consolidate_scanner_data(old_scanner: 'Scanner', new_scanner: 'Scanner') -> 'Scanner':
"""
Consolidate useful data from old scanner into new scanner.
Args:
old_scanner: Scanner from terminated session
new_scanner: New scanner instance
Returns:
Enhanced new scanner with consolidated data
"""
try:
# Consolidate graph data if old scanner has valuable data
if old_scanner and hasattr(old_scanner, 'graph') and old_scanner.graph:
old_stats = old_scanner.graph.get_statistics()
if old_stats['basic_metrics']['total_nodes'] > 0:
print(f"Consolidating graph data: {old_stats['basic_metrics']['total_nodes']} nodes, {old_stats['basic_metrics']['total_edges']} edges")
# Transfer nodes and edges to new scanner's graph
for node_id, node_data in old_scanner.graph.graph.nodes(data=True):
# Add node to new graph with all attributes
new_scanner.graph.graph.add_node(node_id, **node_data)
for source, target, edge_data in old_scanner.graph.graph.edges(data=True):
# Add edge to new graph with all attributes
new_scanner.graph.graph.add_edge(source, target, **edge_data)
# Update correlation index
if hasattr(old_scanner.graph, 'correlation_index'):
new_scanner.graph.correlation_index = old_scanner.graph.correlation_index.copy()
# Update timestamps
new_scanner.graph.creation_time = old_scanner.graph.creation_time
new_scanner.graph.last_modified = old_scanner.graph.last_modified
# Consolidate provider statistics
if old_scanner and hasattr(old_scanner, 'providers') and old_scanner.providers:
for old_provider in old_scanner.providers:
# Find matching provider in new scanner
matching_new_provider = None
for new_provider in new_scanner.providers:
if new_provider.get_name() == old_provider.get_name():
matching_new_provider = new_provider
break
if matching_new_provider:
# Transfer cumulative statistics
matching_new_provider.total_requests += old_provider.total_requests
matching_new_provider.successful_requests += old_provider.successful_requests
matching_new_provider.failed_requests += old_provider.failed_requests
matching_new_provider.total_relationships_found += old_provider.total_relationships_found
# Transfer cache statistics if available
if hasattr(old_provider, 'cache_hits'):
matching_new_provider.cache_hits += getattr(old_provider, 'cache_hits', 0)
matching_new_provider.cache_misses += getattr(old_provider, 'cache_misses', 0)
print(f"Consolidated {old_provider.get_name()} provider stats: {old_provider.total_requests} requests")
return new_scanner
except Exception as e:
print(f"Warning: Error during session consolidation: {e}")
return new_scanner
class SessionManager:
"""
Manages multiple scanner instances for concurrent user sessions using Redis.
Manages single scanner session per user using Redis with user identification.
Enforces one active session per user for consistent state management.
"""
def __init__(self, session_timeout_minutes: int = 60):
"""
Initialize session manager with a Redis backend.
Initialize session manager with Redis backend and user tracking.
"""
self.redis_client = redis.StrictRedis(db=0, decode_responses=False)
self.session_timeout = session_timeout_minutes * 60 # Convert to seconds
self.lock = threading.Lock() # Lock for local operations, Redis handles atomic ops
self.lock = threading.Lock()
# User identification helper
self.user_identifier = UserIdentifier()
self.consolidator = SessionConsolidator()
# Start cleanup thread
self.cleanup_thread = threading.Thread(target=self._cleanup_loop, daemon=True)
self.cleanup_thread.start()
print(f"SessionManager initialized with Redis backend and {session_timeout_minutes}min timeout")
print(f"SessionManager initialized with Redis backend, user tracking, and {session_timeout_minutes}min timeout")
def __getstate__(self):
"""Prepare SessionManager for pickling."""
state = self.__dict__.copy()
# Exclude unpickleable attributes - Redis client and threading objects
# Exclude unpickleable attributes
unpicklable_attrs = ['lock', 'cleanup_thread', 'redis_client']
for attr in unpicklable_attrs:
if attr in state:
@@ -53,67 +169,108 @@ class SessionManager:
self.cleanup_thread.start()
def _get_session_key(self, session_id: str) -> str:
"""Generates the Redis key for a session."""
"""Generate Redis key for a session."""
return f"dnsrecon:session:{session_id}"
def _get_user_session_key(self, user_fingerprint: str) -> str:
"""Generate Redis key for user -> session mapping."""
return f"dnsrecon:user:{user_fingerprint}"
def _get_stop_signal_key(self, session_id: str) -> str:
"""Generates the Redis key for a session's stop signal."""
"""Generate Redis key for session stop signal."""
return f"dnsrecon:stop:{session_id}"
def create_session(self) -> str:
def create_or_replace_user_session(self, client_ip: str, user_agent: str) -> str:
"""
Create a new user session and store it in Redis.
Create new session for user, replacing any existing session.
Consolidates data from previous session if it exists.
Args:
client_ip: Client IP address
user_agent: User-Agent header
Returns:
New session ID
"""
session_id = str(uuid.uuid4())
print(f"=== CREATING SESSION {session_id} IN REDIS ===")
user_fingerprint = self.user_identifier.generate_user_fingerprint(client_ip, user_agent)
new_session_id = str(uuid.uuid4())
print(f"=== CREATING/REPLACING SESSION FOR USER {user_fingerprint} ===")
try:
# Check for existing user session
existing_session_id = self._get_user_current_session(user_fingerprint)
old_scanner = None
if existing_session_id:
print(f"Found existing session {existing_session_id} for user {user_fingerprint}")
# Get old scanner data for consolidation
old_scanner = self.get_session(existing_session_id)
# Terminate old session
self._terminate_session_internal(existing_session_id, cleanup_user_mapping=False)
print(f"Terminated old session {existing_session_id}")
# Create new session config and scanner
from core.session_config import create_session_config
session_config = create_session_config()
scanner_instance = Scanner(session_config=session_config)
new_scanner = Scanner(session_config=session_config)
# Set the session ID on the scanner for cross-process stop signal management
scanner_instance.session_id = session_id
# Set session ID on scanner for cross-process operations
new_scanner.session_id = new_session_id
# Consolidate data from old session if available
if old_scanner:
new_scanner = self.consolidator.consolidate_scanner_data(old_scanner, new_scanner)
print(f"Consolidated data from previous session")
# Create session data
session_data = {
'scanner': scanner_instance,
'scanner': new_scanner,
'config': session_config,
'created_at': time.time(),
'last_activity': time.time(),
'status': 'active'
'status': 'active',
'user_fingerprint': user_fingerprint,
'client_ip': client_ip,
'user_agent': user_agent[:200] # Truncate for storage
}
# Serialize the entire session data dictionary using pickle
# Store session in Redis
session_key = self._get_session_key(new_session_id)
serialized_data = pickle.dumps(session_data)
# Store in Redis
session_key = self._get_session_key(session_id)
self.redis_client.setex(session_key, self.session_timeout, serialized_data)
# Initialize stop signal as False
stop_key = self._get_stop_signal_key(session_id)
# Update user -> session mapping
user_session_key = self._get_user_session_key(user_fingerprint)
self.redis_client.setex(user_session_key, self.session_timeout, new_session_id.encode('utf-8'))
# Initialize stop signal
stop_key = self._get_stop_signal_key(new_session_id)
self.redis_client.setex(stop_key, self.session_timeout, b'0')
print(f"Session {session_id} stored in Redis with stop signal initialized")
return session_id
print(f"Created new session {new_session_id} for user {user_fingerprint}")
return new_session_id
except Exception as e:
print(f"ERROR: Failed to create session {session_id}: {e}")
print(f"ERROR: Failed to create session for user {user_fingerprint}: {e}")
raise
def _get_user_current_session(self, user_fingerprint: str) -> Optional[str]:
"""Get current session ID for a user."""
try:
user_session_key = self._get_user_session_key(user_fingerprint)
session_id_bytes = self.redis_client.get(user_session_key)
if session_id_bytes:
return session_id_bytes.decode('utf-8')
return None
except Exception as e:
print(f"Error getting user session: {e}")
return None
def set_stop_signal(self, session_id: str) -> bool:
"""
Set the stop signal for a session (cross-process safe).
Args:
session_id: Session identifier
Returns:
bool: True if signal was set successfully
"""
"""Set stop signal for session (cross-process safe)."""
try:
stop_key = self._get_stop_signal_key(session_id)
# Set stop signal to '1' with the same TTL as the session
self.redis_client.setex(stop_key, self.session_timeout, b'1')
print(f"Stop signal set for session {session_id}")
return True
@@ -122,15 +279,7 @@ class SessionManager:
return False
def is_stop_requested(self, session_id: str) -> bool:
"""
Check if stop is requested for a session (cross-process safe).
Args:
session_id: Session identifier
Returns:
bool: True if stop is requested
"""
"""Check if stop is requested for session (cross-process safe)."""
try:
stop_key = self._get_stop_signal_key(session_id)
value = self.redis_client.get(stop_key)
@@ -140,15 +289,7 @@ class SessionManager:
return False
def clear_stop_signal(self, session_id: str) -> bool:
"""
Clear the stop signal for a session.
Args:
session_id: Session identifier
Returns:
bool: True if signal was cleared successfully
"""
"""Clear stop signal for session."""
try:
stop_key = self._get_stop_signal_key(session_id)
self.redis_client.setex(stop_key, self.session_timeout, b'0')
@@ -159,13 +300,13 @@ class SessionManager:
return False
def _get_session_data(self, session_id: str) -> Optional[Dict[str, Any]]:
"""Retrieves and deserializes session data from Redis."""
"""Retrieve and deserialize session data from Redis."""
try:
session_key = self._get_session_key(session_id)
serialized_data = self.redis_client.get(session_key)
if serialized_data:
session_data = pickle.loads(serialized_data)
# Ensure the scanner has the correct session ID for stop signal checking
# Ensure scanner has correct session ID
if 'scanner' in session_data and session_data['scanner']:
session_data['scanner'].session_id = session_id
return session_data
@@ -175,37 +316,32 @@ class SessionManager:
return None
def _save_session_data(self, session_id: str, session_data: Dict[str, Any]) -> bool:
"""
Serializes and saves session data back to Redis with updated TTL.
Returns:
bool: True if save was successful
"""
"""Serialize and save session data to Redis with updated TTL."""
try:
session_key = self._get_session_key(session_id)
serialized_data = pickle.dumps(session_data)
result = self.redis_client.setex(session_key, self.session_timeout, serialized_data)
# Also refresh user mapping TTL if available
if 'user_fingerprint' in session_data:
user_session_key = self._get_user_session_key(session_data['user_fingerprint'])
self.redis_client.setex(user_session_key, self.session_timeout, session_id.encode('utf-8'))
return result
except Exception as e:
print(f"ERROR: Failed to save session data for {session_id}: {e}")
return False
def update_session_scanner(self, session_id: str, scanner: 'Scanner') -> bool:
"""
Updates just the scanner object in a session with immediate persistence.
Returns:
bool: True if update was successful
"""
"""Update scanner object in session with immediate persistence."""
try:
session_data = self._get_session_data(session_id)
if session_data:
# Ensure scanner has the session ID
# Ensure scanner has session ID
scanner.session_id = session_id
session_data['scanner'] = scanner
session_data['last_activity'] = time.time()
# Immediately save to Redis for GUI updates
success = self._save_session_data(session_id, session_data)
if success:
print(f"Scanner state updated for session {session_id} (status: {scanner.status})")
@@ -220,16 +356,7 @@ class SessionManager:
return False
def update_scanner_status(self, session_id: str, status: str) -> bool:
"""
Quickly update just the scanner status for immediate GUI feedback.
Args:
session_id: Session identifier
status: New scanner status
Returns:
bool: True if update was successful
"""
"""Quickly update scanner status for immediate GUI feedback."""
try:
session_data = self._get_session_data(session_id)
if session_data and 'scanner' in session_data:
@@ -248,9 +375,7 @@ class SessionManager:
return False
def get_session(self, session_id: str) -> Optional[Scanner]:
"""
Get scanner instance for a session from Redis with session ID management.
"""
"""Get scanner instance for session with session ID management."""
if not session_id:
return None
@@ -265,21 +390,13 @@ class SessionManager:
scanner = session_data.get('scanner')
if scanner:
# Ensure the scanner can check the Redis-based stop signal
# Ensure scanner can check Redis-based stop signal
scanner.session_id = session_id
return scanner
def get_session_status_only(self, session_id: str) -> Optional[str]:
"""
Get just the scanner status without full session retrieval (for performance).
Args:
session_id: Session identifier
Returns:
Scanner status string or None if not found
"""
"""Get scanner status without full session retrieval (for performance)."""
try:
session_data = self._get_session_data(session_id)
if session_data and 'scanner' in session_data:
@@ -290,16 +407,18 @@ class SessionManager:
return None
def terminate_session(self, session_id: str) -> bool:
"""
Terminate a specific session in Redis with reliable stop signal and immediate status update.
"""
"""Terminate specific session with reliable stop signal and immediate status update."""
return self._terminate_session_internal(session_id, cleanup_user_mapping=True)
def _terminate_session_internal(self, session_id: str, cleanup_user_mapping: bool = True) -> bool:
"""Internal session termination with configurable user mapping cleanup."""
print(f"=== TERMINATING SESSION {session_id} ===")
try:
# First, set the stop signal
# Set stop signal first
self.set_stop_signal(session_id)
# Update scanner status to stopped immediately for GUI feedback
# Update scanner status immediately for GUI feedback
self.update_scanner_status(session_id, 'stopped')
session_data = self._get_session_data(session_id)
@@ -310,16 +429,19 @@ class SessionManager:
scanner = session_data.get('scanner')
if scanner and scanner.status == 'running':
print(f"Stopping scan for session: {session_id}")
# The scanner will check the Redis stop signal
scanner.stop_scan()
# Update the scanner state immediately
self.update_session_scanner(session_id, scanner)
# Wait a moment for graceful shutdown
# Wait for graceful shutdown
time.sleep(0.5)
# Delete session data and stop signal from Redis
# Clean up user mapping if requested
if cleanup_user_mapping and 'user_fingerprint' in session_data:
user_session_key = self._get_user_session_key(session_data['user_fingerprint'])
self.redis_client.delete(user_session_key)
print(f"Cleaned up user mapping for {session_data['user_fingerprint']}")
# Delete session data and stop signal
session_key = self._get_session_key(session_id)
stop_key = self._get_stop_signal_key(session_id)
self.redis_client.delete(session_key)
@@ -333,22 +455,30 @@ class SessionManager:
return False
def _cleanup_loop(self) -> None:
"""
Background thread to cleanup inactive sessions and orphaned stop signals.
"""
"""Background thread to cleanup inactive sessions and orphaned signals."""
while True:
try:
# Clean up orphaned stop signals
stop_keys = self.redis_client.keys("dnsrecon:stop:*")
for stop_key in stop_keys:
# Extract session ID from stop key
session_id = stop_key.decode('utf-8').split(':')[-1]
session_key = self._get_session_key(session_id)
# If session doesn't exist but stop signal does, clean it up
if not self.redis_client.exists(session_key):
self.redis_client.delete(stop_key)
print(f"Cleaned up orphaned stop signal for session {session_id}")
# Clean up orphaned user mappings
user_keys = self.redis_client.keys("dnsrecon:user:*")
for user_key in user_keys:
session_id_bytes = self.redis_client.get(user_key)
if session_id_bytes:
session_id = session_id_bytes.decode('utf-8')
session_key = self._get_session_key(session_id)
if not self.redis_client.exists(session_key):
self.redis_client.delete(user_key)
print(f"Cleaned up orphaned user mapping for session {session_id}")
except Exception as e:
print(f"Error in cleanup loop: {e}")
@@ -369,6 +499,8 @@ class SessionManager:
scanner = session_data.get('scanner')
sessions.append({
'session_id': session_id,
'user_fingerprint': session_data.get('user_fingerprint', 'unknown'),
'client_ip': session_data.get('client_ip', 'unknown'),
'created_at': session_data.get('created_at'),
'last_activity': session_data.get('last_activity'),
'scanner_status': scanner.status if scanner else 'unknown',
@@ -384,9 +516,11 @@ class SessionManager:
"""Get session manager statistics."""
try:
session_keys = self.redis_client.keys("dnsrecon:session:*")
user_keys = self.redis_client.keys("dnsrecon:user:*")
stop_keys = self.redis_client.keys("dnsrecon:stop:*")
active_sessions = len(session_keys)
unique_users = len(user_keys)
running_scans = 0
for session_key in session_keys:
@@ -397,16 +531,46 @@ class SessionManager:
return {
'total_active_sessions': active_sessions,
'unique_users': unique_users,
'running_scans': running_scans,
'total_stop_signals': len(stop_keys)
'total_stop_signals': len(stop_keys),
'average_sessions_per_user': round(active_sessions / unique_users, 2) if unique_users > 0 else 0
}
except Exception as e:
print(f"ERROR: Failed to get statistics: {e}")
return {
'total_active_sessions': 0,
'unique_users': 0,
'running_scans': 0,
'total_stop_signals': 0
'total_stop_signals': 0,
'average_sessions_per_user': 0
}
def get_session_info(self, session_id: str) -> Dict[str, Any]:
"""Get detailed information about a specific session."""
try:
session_data = self._get_session_data(session_id)
if not session_data:
return {'error': 'Session not found'}
scanner = session_data.get('scanner')
return {
'session_id': session_id,
'user_fingerprint': session_data.get('user_fingerprint', 'unknown'),
'client_ip': session_data.get('client_ip', 'unknown'),
'user_agent': session_data.get('user_agent', 'unknown'),
'created_at': session_data.get('created_at'),
'last_activity': session_data.get('last_activity'),
'status': session_data.get('status'),
'scanner_status': scanner.status if scanner else 'unknown',
'current_target': scanner.current_target if scanner else None,
'session_age_minutes': round((time.time() - session_data.get('created_at', time.time())) / 60, 1)
}
except Exception as e:
print(f"ERROR: Failed to get session info for {session_id}: {e}")
return {'error': f'Failed to get session info: {str(e)}'}
# Global session manager instance
session_manager = SessionManager(session_timeout_minutes=60)

564
core/task_manager.py Normal file
View File

@@ -0,0 +1,564 @@
# dnsrecon/core/task_manager.py
import threading
import time
import uuid
from enum import Enum
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Any, Set
from datetime import datetime, timezone, timedelta
from collections import deque
from utils.helpers import _is_valid_ip, _is_valid_domain
class TaskStatus(Enum):
"""Enumeration of task execution statuses."""
PENDING = "pending"
RUNNING = "running"
SUCCEEDED = "succeeded"
FAILED_RETRYING = "failed_retrying"
FAILED_PERMANENT = "failed_permanent"
CANCELLED = "cancelled"
class TaskType(Enum):
"""Enumeration of task types for provider queries."""
DOMAIN_QUERY = "domain_query"
IP_QUERY = "ip_query"
GRAPH_UPDATE = "graph_update"
@dataclass
class TaskResult:
"""Result of a task execution."""
success: bool
data: Optional[Any] = None
error: Optional[str] = None
metadata: Dict[str, Any] = field(default_factory=dict)
@dataclass
class ReconTask:
"""Represents a single reconnaissance task with retry logic."""
task_id: str
task_type: TaskType
target: str
provider_name: str
depth: int
status: TaskStatus = TaskStatus.PENDING
created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
# Retry configuration
max_retries: int = 3
current_retry: int = 0
base_backoff_seconds: float = 1.0
max_backoff_seconds: float = 60.0
# Execution tracking
last_attempt_at: Optional[datetime] = None
next_retry_at: Optional[datetime] = None
execution_history: List[Dict[str, Any]] = field(default_factory=list)
# Results
result: Optional[TaskResult] = None
def __post_init__(self):
"""Initialize additional fields after creation."""
if not self.task_id:
self.task_id = str(uuid.uuid4())[:8]
def calculate_next_retry_time(self) -> datetime:
"""Calculate next retry time with exponential backoff and jitter."""
if self.current_retry >= self.max_retries:
return None
# Exponential backoff with jitter
backoff_time = min(
self.max_backoff_seconds,
self.base_backoff_seconds * (2 ** self.current_retry)
)
# Add jitter (±25%)
jitter = backoff_time * 0.25 * (0.5 - hash(self.task_id) % 1000 / 1000.0)
final_backoff = max(self.base_backoff_seconds, backoff_time + jitter)
return datetime.now(timezone.utc) + timedelta(seconds=final_backoff)
def should_retry(self) -> bool:
"""Determine if task should be retried based on status and retry count."""
if self.status != TaskStatus.FAILED_RETRYING:
return False
if self.current_retry >= self.max_retries:
return False
if self.next_retry_at and datetime.now(timezone.utc) < self.next_retry_at:
return False
return True
def mark_failed(self, error: str, metadata: Dict[str, Any] = None):
"""Mark task as failed and prepare for retry or permanent failure."""
self.current_retry += 1
self.last_attempt_at = datetime.now(timezone.utc)
# Record execution history
execution_record = {
'attempt': self.current_retry,
'timestamp': self.last_attempt_at.isoformat(),
'error': error,
'metadata': metadata or {}
}
self.execution_history.append(execution_record)
if self.current_retry >= self.max_retries:
self.status = TaskStatus.FAILED_PERMANENT
self.result = TaskResult(success=False, error=f"Permanent failure after {self.max_retries} attempts: {error}")
else:
self.status = TaskStatus.FAILED_RETRYING
self.next_retry_at = self.calculate_next_retry_time()
def mark_succeeded(self, data: Any = None, metadata: Dict[str, Any] = None):
"""Mark task as successfully completed."""
self.status = TaskStatus.SUCCEEDED
self.last_attempt_at = datetime.now(timezone.utc)
self.result = TaskResult(success=True, data=data, metadata=metadata or {})
# Record successful execution
execution_record = {
'attempt': self.current_retry + 1,
'timestamp': self.last_attempt_at.isoformat(),
'success': True,
'metadata': metadata or {}
}
self.execution_history.append(execution_record)
def get_summary(self) -> Dict[str, Any]:
"""Get task summary for progress reporting."""
return {
'task_id': self.task_id,
'task_type': self.task_type.value,
'target': self.target,
'provider': self.provider_name,
'status': self.status.value,
'current_retry': self.current_retry,
'max_retries': self.max_retries,
'created_at': self.created_at.isoformat(),
'last_attempt_at': self.last_attempt_at.isoformat() if self.last_attempt_at else None,
'next_retry_at': self.next_retry_at.isoformat() if self.next_retry_at else None,
'total_attempts': len(self.execution_history),
'has_result': self.result is not None
}
class TaskQueue:
"""Thread-safe task queue with retry logic and priority handling."""
def __init__(self, max_concurrent_tasks: int = 5):
"""Initialize task queue."""
self.max_concurrent_tasks = max_concurrent_tasks
self.tasks: Dict[str, ReconTask] = {}
self.pending_queue = deque()
self.retry_queue = deque()
self.running_tasks: Set[str] = set()
self._lock = threading.Lock()
self._stop_event = threading.Event()
def __getstate__(self):
"""Prepare TaskQueue for pickling by excluding unpicklable objects."""
state = self.__dict__.copy()
# Exclude the unpickleable '_lock' and '_stop_event' attributes
if '_lock' in state:
del state['_lock']
if '_stop_event' in state:
del state['_stop_event']
return state
def __setstate__(self, state):
"""Restore TaskQueue after unpickling by reconstructing threading objects."""
self.__dict__.update(state)
# Re-initialize the '_lock' and '_stop_event' attributes
self._lock = threading.Lock()
self._stop_event = threading.Event()
def add_task(self, task: ReconTask) -> str:
"""Add task to queue."""
with self._lock:
self.tasks[task.task_id] = task
self.pending_queue.append(task.task_id)
print(f"Added task {task.task_id}: {task.provider_name} query for {task.target}")
return task.task_id
def get_next_ready_task(self) -> Optional[ReconTask]:
"""Get next task ready for execution."""
with self._lock:
# Check if we have room for more concurrent tasks
if len(self.running_tasks) >= self.max_concurrent_tasks:
return None
# First priority: retry queue (tasks ready for retry)
while self.retry_queue:
task_id = self.retry_queue.popleft()
if task_id in self.tasks:
task = self.tasks[task_id]
if task.should_retry():
task.status = TaskStatus.RUNNING
self.running_tasks.add(task_id)
print(f"Retrying task {task_id} (attempt {task.current_retry + 1})")
return task
# Second priority: pending queue (new tasks)
while self.pending_queue:
task_id = self.pending_queue.popleft()
if task_id in self.tasks:
task = self.tasks[task_id]
if task.status == TaskStatus.PENDING:
task.status = TaskStatus.RUNNING
self.running_tasks.add(task_id)
print(f"Starting task {task_id}")
return task
return None
def complete_task(self, task_id: str, success: bool, data: Any = None,
error: str = None, metadata: Dict[str, Any] = None):
"""Mark task as completed (success or failure)."""
with self._lock:
if task_id not in self.tasks:
return
task = self.tasks[task_id]
self.running_tasks.discard(task_id)
if success:
task.mark_succeeded(data=data, metadata=metadata)
print(f"Task {task_id} succeeded")
else:
task.mark_failed(error or "Unknown error", metadata=metadata)
if task.status == TaskStatus.FAILED_RETRYING:
self.retry_queue.append(task_id)
print(f"Task {task_id} failed, scheduled for retry at {task.next_retry_at}")
else:
print(f"Task {task_id} permanently failed after {task.current_retry} attempts")
def cancel_all_tasks(self):
"""Cancel all pending and running tasks."""
with self._lock:
self._stop_event.set()
for task in self.tasks.values():
if task.status in [TaskStatus.PENDING, TaskStatus.RUNNING, TaskStatus.FAILED_RETRYING]:
task.status = TaskStatus.CANCELLED
self.pending_queue.clear()
self.retry_queue.clear()
self.running_tasks.clear()
print("All tasks cancelled")
def is_complete(self) -> bool:
"""Check if all tasks are complete (succeeded, permanently failed, or cancelled)."""
with self._lock:
for task in self.tasks.values():
if task.status in [TaskStatus.PENDING, TaskStatus.RUNNING, TaskStatus.FAILED_RETRYING]:
return False
return True
def get_statistics(self) -> Dict[str, Any]:
"""Get queue statistics."""
with self._lock:
stats = {
'total_tasks': len(self.tasks),
'pending': len(self.pending_queue),
'running': len(self.running_tasks),
'retry_queue': len(self.retry_queue),
'succeeded': 0,
'failed_permanent': 0,
'cancelled': 0,
'failed_retrying': 0
}
for task in self.tasks.values():
if task.status == TaskStatus.SUCCEEDED:
stats['succeeded'] += 1
elif task.status == TaskStatus.FAILED_PERMANENT:
stats['failed_permanent'] += 1
elif task.status == TaskStatus.CANCELLED:
stats['cancelled'] += 1
elif task.status == TaskStatus.FAILED_RETRYING:
stats['failed_retrying'] += 1
stats['completion_rate'] = (stats['succeeded'] / stats['total_tasks'] * 100) if stats['total_tasks'] > 0 else 0
stats['is_complete'] = self.is_complete()
return stats
def get_task_summaries(self) -> List[Dict[str, Any]]:
"""Get summaries of all tasks for detailed progress reporting."""
with self._lock:
return [task.get_summary() for task in self.tasks.values()]
def get_failed_tasks(self) -> List[ReconTask]:
"""Get all permanently failed tasks for analysis."""
with self._lock:
return [task for task in self.tasks.values() if task.status == TaskStatus.FAILED_PERMANENT]
class TaskExecutor:
"""Executes reconnaissance tasks using providers."""
def __init__(self, providers: List, graph_manager, logger):
"""Initialize task executor."""
self.providers = {provider.get_name(): provider for provider in providers}
self.graph = graph_manager
self.logger = logger
def execute_task(self, task: ReconTask) -> TaskResult:
"""
Execute a single reconnaissance task.
Args:
task: Task to execute
Returns:
TaskResult with success/failure information
"""
try:
print(f"Executing task {task.task_id}: {task.provider_name} query for {task.target}")
provider = self.providers.get(task.provider_name)
if not provider:
return TaskResult(
success=False,
error=f"Provider {task.provider_name} not available"
)
if not provider.is_available():
return TaskResult(
success=False,
error=f"Provider {task.provider_name} is not available (missing API key or configuration)"
)
# Execute provider query based on task type
if task.task_type == TaskType.DOMAIN_QUERY:
if not _is_valid_domain(task.target):
return TaskResult(success=False, error=f"Invalid domain: {task.target}")
relationships = provider.query_domain(task.target)
elif task.task_type == TaskType.IP_QUERY:
if not _is_valid_ip(task.target):
return TaskResult(success=False, error=f"Invalid IP: {task.target}")
relationships = provider.query_ip(task.target)
else:
return TaskResult(success=False, error=f"Unsupported task type: {task.task_type}")
# Process results and update graph
new_targets = set()
relationships_added = 0
for source, target, rel_type, confidence, raw_data in relationships:
# Add nodes to graph
from core.graph_manager import NodeType
if _is_valid_ip(target):
self.graph.add_node(target, NodeType.IP)
new_targets.add(target)
elif target.startswith('AS') and target[2:].isdigit():
self.graph.add_node(target, NodeType.ASN)
elif _is_valid_domain(target):
self.graph.add_node(target, NodeType.DOMAIN)
new_targets.add(target)
# Add edge to graph
if self.graph.add_edge(source, target, rel_type, confidence, task.provider_name, raw_data):
relationships_added += 1
# Log forensic information
self.logger.logger.info(
f"Task {task.task_id} completed: {len(relationships)} relationships found, "
f"{relationships_added} added to graph, {len(new_targets)} new targets"
)
return TaskResult(
success=True,
data={
'relationships': relationships,
'new_targets': list(new_targets),
'relationships_added': relationships_added
},
metadata={
'provider': task.provider_name,
'target': task.target,
'depth': task.depth,
'execution_time': datetime.now(timezone.utc).isoformat()
}
)
except Exception as e:
error_msg = f"Task execution failed: {str(e)}"
print(f"ERROR: {error_msg} for task {task.task_id}")
self.logger.logger.error(error_msg)
return TaskResult(
success=False,
error=error_msg,
metadata={
'provider': task.provider_name,
'target': task.target,
'exception_type': type(e).__name__
}
)
class TaskManager:
"""High-level task management for reconnaissance scans."""
def __init__(self, providers: List, graph_manager, logger, max_concurrent_tasks: int = 5):
"""Initialize task manager."""
self.task_queue = TaskQueue(max_concurrent_tasks)
self.task_executor = TaskExecutor(providers, graph_manager, logger)
self.logger = logger
# Execution control
self._stop_event = threading.Event()
self._execution_threads: List[threading.Thread] = []
self._is_running = False
def create_provider_tasks(self, target: str, depth: int, providers: List) -> List[str]:
"""
Create tasks for querying all eligible providers for a target.
Args:
target: Domain or IP to query
depth: Current recursion depth
providers: List of available providers
Returns:
List of created task IDs
"""
task_ids = []
is_ip = _is_valid_ip(target)
target_key = 'ips' if is_ip else 'domains'
task_type = TaskType.IP_QUERY if is_ip else TaskType.DOMAIN_QUERY
for provider in providers:
if provider.get_eligibility().get(target_key) and provider.is_available():
task = ReconTask(
task_id=str(uuid.uuid4())[:8],
task_type=task_type,
target=target,
provider_name=provider.get_name(),
depth=depth,
max_retries=3 # Configure retries per task type/provider
)
task_id = self.task_queue.add_task(task)
task_ids.append(task_id)
return task_ids
def start_execution(self, max_workers: int = 3):
"""Start task execution with specified number of worker threads."""
if self._is_running:
print("Task execution already running")
return
self._is_running = True
self._stop_event.clear()
print(f"Starting task execution with {max_workers} workers")
for i in range(max_workers):
worker_thread = threading.Thread(
target=self._worker_loop,
name=f"TaskWorker-{i+1}",
daemon=True
)
worker_thread.start()
self._execution_threads.append(worker_thread)
def stop_execution(self):
"""Stop task execution and cancel all tasks."""
print("Stopping task execution")
self._stop_event.set()
self.task_queue.cancel_all_tasks()
self._is_running = False
# Wait for worker threads to finish
for thread in self._execution_threads:
thread.join(timeout=5.0)
self._execution_threads.clear()
print("Task execution stopped")
def _worker_loop(self):
"""Worker thread loop for executing tasks."""
thread_name = threading.current_thread().name
print(f"{thread_name} started")
while not self._stop_event.is_set():
try:
# Get next task to execute
task = self.task_queue.get_next_ready_task()
if task is None:
# No tasks ready, check if we should exit
if self.task_queue.is_complete() or self._stop_event.is_set():
break
time.sleep(0.1) # Brief sleep before checking again
continue
# Execute the task
result = self.task_executor.execute_task(task)
# Complete the task in queue
self.task_queue.complete_task(
task.task_id,
success=result.success,
data=result.data,
error=result.error,
metadata=result.metadata
)
except Exception as e:
print(f"ERROR: Worker {thread_name} encountered error: {e}")
# Continue running even if individual task fails
continue
print(f"{thread_name} finished")
def wait_for_completion(self, timeout_seconds: int = 300) -> bool:
"""
Wait for all tasks to complete.
Args:
timeout_seconds: Maximum time to wait
Returns:
True if all tasks completed, False if timeout
"""
start_time = time.time()
while time.time() - start_time < timeout_seconds:
if self.task_queue.is_complete():
return True
if self._stop_event.is_set():
return False
time.sleep(1.0) # Check every second
print(f"Timeout waiting for task completion after {timeout_seconds} seconds")
return False
def get_progress_report(self) -> Dict[str, Any]:
"""Get detailed progress report for UI updates."""
stats = self.task_queue.get_statistics()
failed_tasks = self.task_queue.get_failed_tasks()
return {
'statistics': stats,
'failed_tasks': [task.get_summary() for task in failed_tasks],
'is_running': self._is_running,
'worker_count': len(self._execution_threads),
'detailed_tasks': self.task_queue.get_task_summaries() if stats['total_tasks'] < 50 else [] # Limit detail for performance
}