Compare commits
17 Commits
41d556e2ce
...
database_c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9f3b17e658 | ||
|
|
eb9eea127b | ||
|
|
ae07635ab6 | ||
|
|
d7adf9ad8b | ||
|
|
39ce0e9d11 | ||
|
|
926f9e1096 | ||
|
|
9499e62ccc | ||
|
|
89ae06482e | ||
|
|
7fe7ca41ba | ||
|
|
949fbdbb45 | ||
|
|
689e8c00d4 | ||
|
|
3511f18f9a | ||
|
|
72f7056bc7 | ||
|
|
2ae33bc5ba | ||
|
|
c91913fa13 | ||
|
|
2185177a84 | ||
|
|
b7a57f1552 |
34
.env.example
Normal file
34
.env.example
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
# ===============================================
|
||||||
|
# DNSRecon Environment Variables
|
||||||
|
# ===============================================
|
||||||
|
# Copy this file to .env and fill in your values.
|
||||||
|
|
||||||
|
# --- API Keys ---
|
||||||
|
# Add your Shodan API key for the Shodan provider to be enabled.
|
||||||
|
SHODAN_API_KEY=
|
||||||
|
|
||||||
|
# --- Flask & Session Settings ---
|
||||||
|
# A strong, random secret key is crucial for session security.
|
||||||
|
FLASK_SECRET_KEY=your-very-secret-and-random-key-here
|
||||||
|
FLASK_HOST=127.0.0.1
|
||||||
|
FLASK_PORT=5000
|
||||||
|
FLASK_DEBUG=True
|
||||||
|
# How long a user's session in the browser lasts (in hours).
|
||||||
|
FLASK_PERMANENT_SESSION_LIFETIME_HOURS=2
|
||||||
|
# How long inactive scanner data is stored in Redis (in minutes).
|
||||||
|
SESSION_TIMEOUT_MINUTES=60
|
||||||
|
|
||||||
|
|
||||||
|
# --- Application Core Settings ---
|
||||||
|
# The default number of levels to recurse when scanning.
|
||||||
|
DEFAULT_RECURSION_DEPTH=2
|
||||||
|
# Default timeout for provider API requests in seconds.
|
||||||
|
DEFAULT_TIMEOUT=30
|
||||||
|
# The number of concurrent provider requests to make.
|
||||||
|
MAX_CONCURRENT_REQUESTS=5
|
||||||
|
# The number of results from a provider that triggers the "large entity" grouping.
|
||||||
|
LARGE_ENTITY_THRESHOLD=100
|
||||||
|
# The number of times to retry a target if a provider fails.
|
||||||
|
MAX_RETRIES_PER_TARGET=3
|
||||||
|
# How long cached provider responses are stored (in hours).
|
||||||
|
CACHE_EXPIRY_HOURS=12
|
||||||
233
app.py
233
app.py
@@ -1,7 +1,8 @@
|
|||||||
|
# dnsrecon-reduced/app.py
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Flask application entry point for DNSRecon web interface.
|
Flask application entry point for DNSRecon web interface.
|
||||||
Provides REST API endpoints and serves the web interface with user session support.
|
Provides REST API endpoints and serves the web interface with user session support.
|
||||||
Enhanced with better session debugging and isolation.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import json
|
import json
|
||||||
@@ -15,48 +16,36 @@ from config import config
|
|||||||
|
|
||||||
|
|
||||||
app = Flask(__name__)
|
app = Flask(__name__)
|
||||||
app.config['SECRET_KEY'] = 'dnsrecon-dev-key-change-in-production'
|
# Use centralized configuration for Flask settings
|
||||||
app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(hours=2) # 2 hour session lifetime
|
app.config['SECRET_KEY'] = config.flask_secret_key
|
||||||
|
app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(hours=config.flask_permanent_session_lifetime_hours)
|
||||||
|
|
||||||
def get_user_scanner():
|
def get_user_scanner():
|
||||||
"""
|
"""
|
||||||
Enhanced user scanner retrieval with better error handling and debugging.
|
Retrieves the scanner for the current session, or creates a new
|
||||||
|
session and scanner if one doesn't exist.
|
||||||
"""
|
"""
|
||||||
# Get current Flask session info for debugging
|
# Get current Flask session info for debugging
|
||||||
current_flask_session_id = session.get('dnsrecon_session_id')
|
current_flask_session_id = session.get('dnsrecon_session_id')
|
||||||
client_ip = request.remote_addr
|
|
||||||
user_agent = request.headers.get('User-Agent', '')[:100] # Truncate for logging
|
|
||||||
|
|
||||||
# Try to get existing session
|
# Try to get existing session
|
||||||
if current_flask_session_id:
|
if current_flask_session_id:
|
||||||
existing_scanner = session_manager.get_session(current_flask_session_id)
|
existing_scanner = session_manager.get_session(current_flask_session_id)
|
||||||
if existing_scanner:
|
if existing_scanner:
|
||||||
# Ensure session ID is set
|
|
||||||
existing_scanner.session_id = current_flask_session_id
|
|
||||||
return current_flask_session_id, existing_scanner
|
return current_flask_session_id, existing_scanner
|
||||||
else:
|
|
||||||
print(f"Session {current_flask_session_id} not found in session manager")
|
|
||||||
|
|
||||||
# Create new session
|
# Create new session if none exists
|
||||||
print("Creating new session...")
|
print("Creating new session as none was found...")
|
||||||
new_session_id = session_manager.create_session()
|
new_session_id = session_manager.create_session()
|
||||||
new_scanner = session_manager.get_session(new_session_id)
|
new_scanner = session_manager.get_session(new_session_id)
|
||||||
|
|
||||||
if not new_scanner:
|
if not new_scanner:
|
||||||
print(f"ERROR: Failed to retrieve newly created session {new_session_id}")
|
|
||||||
raise Exception("Failed to create new scanner session")
|
raise Exception("Failed to create new scanner session")
|
||||||
|
|
||||||
# Store in Flask session
|
# Store in Flask session
|
||||||
session['dnsrecon_session_id'] = new_session_id
|
session['dnsrecon_session_id'] = new_session_id
|
||||||
session.permanent = True
|
session.permanent = True
|
||||||
|
|
||||||
# Ensure session ID is set on scanner
|
|
||||||
new_scanner.session_id = new_session_id
|
|
||||||
|
|
||||||
print(f"Created new session: {new_session_id}")
|
|
||||||
print(f"New scanner status: {new_scanner.status}")
|
|
||||||
print("=== END SESSION DEBUG ===")
|
|
||||||
|
|
||||||
return new_session_id, new_scanner
|
return new_session_id, new_scanner
|
||||||
|
|
||||||
@app.route('/')
|
@app.route('/')
|
||||||
@@ -68,101 +57,68 @@ def index():
|
|||||||
@app.route('/api/scan/start', methods=['POST'])
|
@app.route('/api/scan/start', methods=['POST'])
|
||||||
def start_scan():
|
def start_scan():
|
||||||
"""
|
"""
|
||||||
Start a new reconnaissance scan with immediate GUI feedback.
|
Start a new reconnaissance scan. Creates a new isolated scanner if
|
||||||
|
clear_graph is true, otherwise adds to the existing one.
|
||||||
"""
|
"""
|
||||||
print("=== API: /api/scan/start called ===")
|
print("=== API: /api/scan/start called ===")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
print("Getting JSON data from request...")
|
|
||||||
data = request.get_json()
|
data = request.get_json()
|
||||||
print(f"Request data: {data}")
|
|
||||||
|
|
||||||
if not data or 'target_domain' not in data:
|
if not data or 'target_domain' not in data:
|
||||||
print("ERROR: Missing target_domain in request")
|
return jsonify({'success': False, 'error': 'Missing target_domain in request'}), 400
|
||||||
return jsonify({
|
|
||||||
'success': False,
|
|
||||||
'error': 'Missing target_domain in request'
|
|
||||||
}), 400
|
|
||||||
|
|
||||||
target_domain = data['target_domain'].strip()
|
target_domain = data['target_domain'].strip()
|
||||||
max_depth = data.get('max_depth', config.default_recursion_depth)
|
max_depth = data.get('max_depth', config.default_recursion_depth)
|
||||||
clear_graph = data.get('clear_graph', True)
|
clear_graph = data.get('clear_graph', True)
|
||||||
|
|
||||||
print(f"Parsed - target_domain: '{target_domain}', max_depth: {max_depth}")
|
print(f"Parsed - target_domain: '{target_domain}', max_depth: {max_depth}, clear_graph: {clear_graph}")
|
||||||
|
|
||||||
# Validation
|
# Validation
|
||||||
if not target_domain:
|
if not target_domain:
|
||||||
print("ERROR: Target domain cannot be empty")
|
return jsonify({'success': False, 'error': 'Target domain cannot be empty'}), 400
|
||||||
return jsonify({
|
if not isinstance(max_depth, int) or not 1 <= max_depth <= 5:
|
||||||
'success': False,
|
return jsonify({'success': False, 'error': 'Max depth must be an integer between 1 and 5'}), 400
|
||||||
'error': 'Target domain cannot be empty'
|
|
||||||
}), 400
|
|
||||||
|
|
||||||
if not isinstance(max_depth, int) or max_depth < 1 or max_depth > 5:
|
user_session_id, scanner = None, None
|
||||||
print(f"ERROR: Invalid max_depth: {max_depth}")
|
|
||||||
return jsonify({
|
|
||||||
'success': False,
|
|
||||||
'error': 'Max depth must be an integer between 1 and 5'
|
|
||||||
}), 400
|
|
||||||
|
|
||||||
print("Validation passed, getting user scanner...")
|
if clear_graph:
|
||||||
|
print("Clear graph requested: Creating a new, isolated scanner session.")
|
||||||
|
old_session_id = session.get('dnsrecon_session_id')
|
||||||
|
if old_session_id:
|
||||||
|
session_manager.terminate_session(old_session_id)
|
||||||
|
|
||||||
# Get user-specific scanner
|
user_session_id = session_manager.create_session()
|
||||||
|
session['dnsrecon_session_id'] = user_session_id
|
||||||
|
session.permanent = True
|
||||||
|
scanner = session_manager.get_session(user_session_id)
|
||||||
|
else:
|
||||||
|
print("Adding to existing graph: Reusing the current scanner session.")
|
||||||
user_session_id, scanner = get_user_scanner()
|
user_session_id, scanner = get_user_scanner()
|
||||||
|
|
||||||
# Ensure session ID is properly set
|
if not scanner:
|
||||||
if not scanner.session_id:
|
return jsonify({'success': False, 'error': 'Failed to get or create a scanner instance.'}), 500
|
||||||
scanner.session_id = user_session_id
|
|
||||||
|
|
||||||
print(f"Using session: {user_session_id}")
|
print(f"Using scanner {id(scanner)} in session {user_session_id}")
|
||||||
print(f"Scanner object ID: {id(scanner)}")
|
|
||||||
|
|
||||||
# Start scan
|
|
||||||
print(f"Calling start_scan on scanner {id(scanner)}...")
|
|
||||||
success = scanner.start_scan(target_domain, max_depth, clear_graph=clear_graph)
|
success = scanner.start_scan(target_domain, max_depth, clear_graph=clear_graph)
|
||||||
|
|
||||||
# Immediately update session state regardless of success
|
|
||||||
session_manager.update_session_scanner(user_session_id, scanner)
|
|
||||||
|
|
||||||
if success:
|
if success:
|
||||||
scan_session_id = scanner.logger.session_id
|
|
||||||
print(f"Scan started successfully with scan session ID: {scan_session_id}")
|
|
||||||
return jsonify({
|
return jsonify({
|
||||||
'success': True,
|
'success': True,
|
||||||
'message': 'Scan started successfully',
|
'message': 'Scan started successfully',
|
||||||
'scan_id': scan_session_id,
|
'scan_id': scanner.logger.session_id,
|
||||||
'user_session_id': user_session_id,
|
'user_session_id': user_session_id,
|
||||||
'scanner_status': scanner.status,
|
|
||||||
'debug_info': {
|
|
||||||
'scanner_object_id': id(scanner),
|
|
||||||
'scanner_status': scanner.status
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
else:
|
else:
|
||||||
print("ERROR: Scanner returned False")
|
|
||||||
|
|
||||||
# Provide more detailed error information
|
|
||||||
error_details = {
|
|
||||||
'scanner_status': scanner.status,
|
|
||||||
'scanner_object_id': id(scanner),
|
|
||||||
'session_id': user_session_id,
|
|
||||||
'providers_count': len(scanner.providers) if hasattr(scanner, 'providers') else 0
|
|
||||||
}
|
|
||||||
|
|
||||||
return jsonify({
|
return jsonify({
|
||||||
'success': False,
|
'success': False,
|
||||||
'error': f'Failed to start scan (scanner status: {scanner.status})',
|
'error': f'Failed to start scan (scanner status: {scanner.status})',
|
||||||
'debug_info': error_details
|
|
||||||
}), 409
|
}), 409
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"ERROR: Exception in start_scan endpoint: {e}")
|
print(f"ERROR: Exception in start_scan endpoint: {e}")
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
return jsonify({
|
return jsonify({'success': False, 'error': f'Internal server error: {str(e)}'}), 500
|
||||||
'success': False,
|
|
||||||
'error': f'Internal server error: {str(e)}'
|
|
||||||
}), 500
|
|
||||||
|
|
||||||
|
|
||||||
@app.route('/api/scan/stop', methods=['POST'])
|
@app.route('/api/scan/stop', methods=['POST'])
|
||||||
def stop_scan():
|
def stop_scan():
|
||||||
@@ -184,7 +140,7 @@ def stop_scan():
|
|||||||
if not scanner.session_id:
|
if not scanner.session_id:
|
||||||
scanner.session_id = user_session_id
|
scanner.session_id = user_session_id
|
||||||
|
|
||||||
# Use the enhanced stop mechanism
|
# Use the stop mechanism
|
||||||
success = scanner.stop_scan()
|
success = scanner.stop_scan()
|
||||||
|
|
||||||
# Also set the Redis stop signal directly for extra reliability
|
# Also set the Redis stop signal directly for extra reliability
|
||||||
@@ -203,7 +159,7 @@ def stop_scan():
|
|||||||
'message': 'Scan stop requested - termination initiated',
|
'message': 'Scan stop requested - termination initiated',
|
||||||
'user_session_id': user_session_id,
|
'user_session_id': user_session_id,
|
||||||
'scanner_status': scanner.status,
|
'scanner_status': scanner.status,
|
||||||
'stop_method': 'enhanced_cross_process'
|
'stop_method': 'cross_process'
|
||||||
})
|
})
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -217,7 +173,7 @@ def stop_scan():
|
|||||||
|
|
||||||
@app.route('/api/scan/status', methods=['GET'])
|
@app.route('/api/scan/status', methods=['GET'])
|
||||||
def get_scan_status():
|
def get_scan_status():
|
||||||
"""Get current scan status with enhanced error handling."""
|
"""Get current scan status with error handling."""
|
||||||
try:
|
try:
|
||||||
# Get user-specific scanner
|
# Get user-specific scanner
|
||||||
user_session_id, scanner = get_user_scanner()
|
user_session_id, scanner = get_user_scanner()
|
||||||
@@ -279,7 +235,7 @@ def get_scan_status():
|
|||||||
|
|
||||||
@app.route('/api/graph', methods=['GET'])
|
@app.route('/api/graph', methods=['GET'])
|
||||||
def get_graph_data():
|
def get_graph_data():
|
||||||
"""Get current graph data with enhanced error handling."""
|
"""Get current graph data with error handling."""
|
||||||
try:
|
try:
|
||||||
# Get user-specific scanner
|
# Get user-specific scanner
|
||||||
user_session_id, scanner = get_user_scanner()
|
user_session_id, scanner = get_user_scanner()
|
||||||
@@ -368,12 +324,18 @@ def export_results():
|
|||||||
@app.route('/api/providers', methods=['GET'])
|
@app.route('/api/providers', methods=['GET'])
|
||||||
def get_providers():
|
def get_providers():
|
||||||
"""Get information about available providers for the user session."""
|
"""Get information about available providers for the user session."""
|
||||||
print("=== API: /api/providers called ===")
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Get user-specific scanner
|
# Get user-specific scanner
|
||||||
user_session_id, scanner = get_user_scanner()
|
user_session_id, scanner = get_user_scanner()
|
||||||
|
|
||||||
|
if scanner:
|
||||||
|
completed_tasks = scanner.indicators_completed
|
||||||
|
enqueued_tasks = len(scanner.task_queue)
|
||||||
|
print(f"DEBUG: Tasks - Completed: {completed_tasks}, Enqueued: {enqueued_tasks}")
|
||||||
|
else:
|
||||||
|
print("DEBUG: No active scanner session found.")
|
||||||
|
|
||||||
provider_info = scanner.get_provider_info()
|
provider_info = scanner.get_provider_info()
|
||||||
|
|
||||||
return jsonify({
|
return jsonify({
|
||||||
@@ -448,113 +410,6 @@ def set_api_keys():
|
|||||||
'error': f'Internal server error: {str(e)}'
|
'error': f'Internal server error: {str(e)}'
|
||||||
}), 500
|
}), 500
|
||||||
|
|
||||||
|
|
||||||
@app.route('/api/session/info', methods=['GET'])
|
|
||||||
def get_session_info():
|
|
||||||
"""Get information about the current user session."""
|
|
||||||
try:
|
|
||||||
user_session_id, scanner = get_user_scanner()
|
|
||||||
session_info = session_manager.get_session_info(user_session_id)
|
|
||||||
|
|
||||||
return jsonify({
|
|
||||||
'success': True,
|
|
||||||
'session_info': session_info
|
|
||||||
})
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"ERROR: Exception in get_session_info endpoint: {e}")
|
|
||||||
traceback.print_exc()
|
|
||||||
return jsonify({
|
|
||||||
'success': False,
|
|
||||||
'error': f'Internal server error: {str(e)}'
|
|
||||||
}), 500
|
|
||||||
|
|
||||||
|
|
||||||
@app.route('/api/session/terminate', methods=['POST'])
|
|
||||||
def terminate_session():
|
|
||||||
"""Terminate the current user session."""
|
|
||||||
try:
|
|
||||||
user_session_id = session.get('dnsrecon_session_id')
|
|
||||||
|
|
||||||
if user_session_id:
|
|
||||||
success = session_manager.terminate_session(user_session_id)
|
|
||||||
# Clear Flask session
|
|
||||||
session.pop('dnsrecon_session_id', None)
|
|
||||||
|
|
||||||
return jsonify({
|
|
||||||
'success': success,
|
|
||||||
'message': 'Session terminated' if success else 'Session not found'
|
|
||||||
})
|
|
||||||
else:
|
|
||||||
return jsonify({
|
|
||||||
'success': False,
|
|
||||||
'error': 'No active session to terminate'
|
|
||||||
}), 400
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"ERROR: Exception in terminate_session endpoint: {e}")
|
|
||||||
traceback.print_exc()
|
|
||||||
return jsonify({
|
|
||||||
'success': False,
|
|
||||||
'error': f'Internal server error: {str(e)}'
|
|
||||||
}), 500
|
|
||||||
|
|
||||||
|
|
||||||
@app.route('/api/admin/sessions', methods=['GET'])
|
|
||||||
def list_sessions():
|
|
||||||
"""Admin endpoint to list all active sessions."""
|
|
||||||
try:
|
|
||||||
sessions = session_manager.list_active_sessions()
|
|
||||||
stats = session_manager.get_statistics()
|
|
||||||
|
|
||||||
return jsonify({
|
|
||||||
'success': True,
|
|
||||||
'sessions': sessions,
|
|
||||||
'statistics': stats
|
|
||||||
})
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"ERROR: Exception in list_sessions endpoint: {e}")
|
|
||||||
traceback.print_exc()
|
|
||||||
return jsonify({
|
|
||||||
'success': False,
|
|
||||||
'error': f'Internal server error: {str(e)}'
|
|
||||||
}), 500
|
|
||||||
|
|
||||||
|
|
||||||
@app.route('/api/health', methods=['GET'])
|
|
||||||
def health_check():
|
|
||||||
"""Health check endpoint with enhanced Phase 2 information."""
|
|
||||||
try:
|
|
||||||
# Get session stats
|
|
||||||
session_stats = session_manager.get_statistics()
|
|
||||||
|
|
||||||
return jsonify({
|
|
||||||
'success': True,
|
|
||||||
'status': 'healthy',
|
|
||||||
'timestamp': datetime.now(timezone.utc).isoformat(),
|
|
||||||
'version': '1.0.0-phase2',
|
|
||||||
'phase': 2,
|
|
||||||
'features': {
|
|
||||||
'multi_provider': True,
|
|
||||||
'concurrent_processing': True,
|
|
||||||
'real_time_updates': True,
|
|
||||||
'api_key_management': True,
|
|
||||||
'enhanced_visualization': True,
|
|
||||||
'retry_logic': True,
|
|
||||||
'user_sessions': True,
|
|
||||||
'session_isolation': True
|
|
||||||
},
|
|
||||||
'session_statistics': session_stats
|
|
||||||
})
|
|
||||||
except Exception as e:
|
|
||||||
print(f"ERROR: Exception in health_check endpoint: {e}")
|
|
||||||
return jsonify({
|
|
||||||
'success': False,
|
|
||||||
'error': f'Health check failed: {str(e)}'
|
|
||||||
}), 500
|
|
||||||
|
|
||||||
|
|
||||||
@app.errorhandler(404)
|
@app.errorhandler(404)
|
||||||
def not_found(error):
|
def not_found(error):
|
||||||
"""Handle 404 errors."""
|
"""Handle 404 errors."""
|
||||||
|
|||||||
133
config.py
133
config.py
@@ -5,110 +5,97 @@ Handles API key storage, rate limiting, and default settings.
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
from typing import Dict, Optional
|
from typing import Dict, Optional
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
# Load environment variables from .env file
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
class Config:
|
class Config:
|
||||||
"""Configuration manager for DNSRecon application."""
|
"""Configuration manager for DNSRecon application."""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
"""Initialize configuration with default values."""
|
"""Initialize configuration with default values."""
|
||||||
self.api_keys: Dict[str, Optional[str]] = {
|
self.api_keys: Dict[str, Optional[str]] = {}
|
||||||
'shodan': None
|
|
||||||
}
|
|
||||||
|
|
||||||
# Default settings
|
# --- General Settings ---
|
||||||
self.default_recursion_depth = 2
|
self.default_recursion_depth = 2
|
||||||
self.default_timeout = 10
|
self.default_timeout = 15
|
||||||
self.max_concurrent_requests = 5
|
self.max_concurrent_requests = 5
|
||||||
self.large_entity_threshold = 100
|
self.large_entity_threshold = 100
|
||||||
|
self.max_retries_per_target = 3
|
||||||
|
self.cache_expiry_hours = 12
|
||||||
|
|
||||||
# Rate limiting settings (requests per minute)
|
# --- Provider Caching Settings ---
|
||||||
|
self.cache_timeout_hours = 6 # Provider-specific cache timeout
|
||||||
|
|
||||||
|
# --- Rate Limiting (requests per minute) ---
|
||||||
self.rate_limits = {
|
self.rate_limits = {
|
||||||
'crtsh': 60, # Free service, be respectful
|
'crtsh': 30,
|
||||||
'shodan': 60, # API dependent
|
'shodan': 60,
|
||||||
'dns': 100 # Local DNS queries
|
'dns': 100
|
||||||
}
|
}
|
||||||
|
|
||||||
# Provider settings
|
# --- Provider Settings ---
|
||||||
self.enabled_providers = {
|
self.enabled_providers = {
|
||||||
'crtsh': True, # Always enabled (free)
|
'crtsh': True,
|
||||||
'dns': True, # Always enabled (free)
|
'dns': True,
|
||||||
'shodan': False # Requires API key
|
'shodan': False
|
||||||
}
|
}
|
||||||
|
|
||||||
# Logging configuration
|
# --- Logging ---
|
||||||
self.log_level = 'INFO'
|
self.log_level = 'INFO'
|
||||||
self.log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
self.log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||||
|
|
||||||
# Flask configuration
|
# --- Flask & Session Settings ---
|
||||||
self.flask_host = '127.0.0.1'
|
self.flask_host = '127.0.0.1'
|
||||||
self.flask_port = 5000
|
self.flask_port = 5000
|
||||||
self.flask_debug = True
|
self.flask_debug = True
|
||||||
|
self.flask_secret_key = 'default-secret-key-change-me'
|
||||||
|
self.flask_permanent_session_lifetime_hours = 2
|
||||||
|
self.session_timeout_minutes = 60
|
||||||
|
|
||||||
def set_api_key(self, provider: str, api_key: str) -> bool:
|
# Load environment variables to override defaults
|
||||||
"""
|
self.load_from_env()
|
||||||
Set API key for a provider.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
provider: Provider name (shodan, etc)
|
|
||||||
api_key: API key string
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
bool: True if key was set successfully
|
|
||||||
"""
|
|
||||||
if provider in self.api_keys:
|
|
||||||
self.api_keys[provider] = api_key
|
|
||||||
self.enabled_providers[provider] = True if api_key else False
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def get_api_key(self, provider: str) -> Optional[str]:
|
|
||||||
"""
|
|
||||||
Get API key for a provider.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
provider: Provider name
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
API key or None if not set
|
|
||||||
"""
|
|
||||||
return self.api_keys.get(provider)
|
|
||||||
|
|
||||||
def is_provider_enabled(self, provider: str) -> bool:
|
|
||||||
"""
|
|
||||||
Check if a provider is enabled.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
provider: Provider name
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
bool: True if provider is enabled
|
|
||||||
"""
|
|
||||||
return self.enabled_providers.get(provider, False)
|
|
||||||
|
|
||||||
def get_rate_limit(self, provider: str) -> int:
|
|
||||||
"""
|
|
||||||
Get rate limit for a provider.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
provider: Provider name
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Rate limit in requests per minute
|
|
||||||
"""
|
|
||||||
return self.rate_limits.get(provider, 60)
|
|
||||||
|
|
||||||
def load_from_env(self):
|
def load_from_env(self):
|
||||||
"""Load configuration from environment variables."""
|
"""Load configuration from environment variables."""
|
||||||
if os.getenv('SHODAN_API_KEY'):
|
|
||||||
self.set_api_key('shodan', os.getenv('SHODAN_API_KEY'))
|
self.set_api_key('shodan', os.getenv('SHODAN_API_KEY'))
|
||||||
|
|
||||||
# Override default settings from environment
|
# Override settings from environment
|
||||||
self.default_recursion_depth = int(os.getenv('DEFAULT_RECURSION_DEPTH', '2'))
|
self.default_recursion_depth = int(os.getenv('DEFAULT_RECURSION_DEPTH', self.default_recursion_depth))
|
||||||
self.flask_debug = os.getenv('FLASK_DEBUG', 'True').lower() == 'true'
|
self.default_timeout = int(os.getenv('DEFAULT_TIMEOUT', self.default_timeout))
|
||||||
self.default_timeout = 30
|
self.max_concurrent_requests = int(os.getenv('MAX_CONCURRENT_REQUESTS', self.max_concurrent_requests))
|
||||||
self.max_concurrent_requests = 5
|
self.large_entity_threshold = int(os.getenv('LARGE_ENTITY_THRESHOLD', self.large_entity_threshold))
|
||||||
|
self.max_retries_per_target = int(os.getenv('MAX_RETRIES_PER_TARGET', self.max_retries_per_target))
|
||||||
|
self.cache_expiry_hours = int(os.getenv('CACHE_EXPIRY_HOURS', self.cache_expiry_hours))
|
||||||
|
self.cache_timeout_hours = int(os.getenv('CACHE_TIMEOUT_HOURS', self.cache_timeout_hours))
|
||||||
|
|
||||||
|
# Override Flask and session settings
|
||||||
|
self.flask_host = os.getenv('FLASK_HOST', self.flask_host)
|
||||||
|
self.flask_port = int(os.getenv('FLASK_PORT', self.flask_port))
|
||||||
|
self.flask_debug = os.getenv('FLASK_DEBUG', str(self.flask_debug)).lower() == 'true'
|
||||||
|
self.flask_secret_key = os.getenv('FLASK_SECRET_KEY', self.flask_secret_key)
|
||||||
|
self.flask_permanent_session_lifetime_hours = int(os.getenv('FLASK_PERMANENT_SESSION_LIFETIME_HOURS', self.flask_permanent_session_lifetime_hours))
|
||||||
|
self.session_timeout_minutes = int(os.getenv('SESSION_TIMEOUT_MINUTES', self.session_timeout_minutes))
|
||||||
|
|
||||||
|
def set_api_key(self, provider: str, api_key: Optional[str]) -> bool:
|
||||||
|
"""Set API key for a provider."""
|
||||||
|
self.api_keys[provider] = api_key
|
||||||
|
if api_key:
|
||||||
|
self.enabled_providers[provider] = True
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_api_key(self, provider: str) -> Optional[str]:
|
||||||
|
"""Get API key for a provider."""
|
||||||
|
return self.api_keys.get(provider)
|
||||||
|
|
||||||
|
def is_provider_enabled(self, provider: str) -> bool:
|
||||||
|
"""Check if a provider is enabled."""
|
||||||
|
return self.enabled_providers.get(provider, False)
|
||||||
|
|
||||||
|
def get_rate_limit(self, provider: str) -> int:
|
||||||
|
"""Get rate limit for a provider."""
|
||||||
|
return self.rate_limits.get(provider, 60)
|
||||||
|
|
||||||
# Global configuration instance
|
# Global configuration instance
|
||||||
config = Config()
|
config = Config()
|
||||||
@@ -1,28 +1,25 @@
|
|||||||
"""
|
"""
|
||||||
Core modules for DNSRecon passive reconnaissance tool.
|
Core modules for DNSRecon passive reconnaissance tool.
|
||||||
Contains graph management, scanning orchestration, and forensic logging.
|
Contains graph management, scanning orchestration, and forensic logging.
|
||||||
Phase 2: Enhanced with concurrent processing and real-time capabilities.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from .graph_manager import GraphManager, NodeType, RelationshipType
|
from .graph_manager import GraphManager, NodeType
|
||||||
from .scanner import Scanner, ScanStatus # Remove 'scanner' global instance
|
from .scanner import Scanner, ScanStatus
|
||||||
from .logger import ForensicLogger, get_forensic_logger, new_session
|
from .logger import ForensicLogger, get_forensic_logger, new_session
|
||||||
from .session_manager import session_manager # Add session manager
|
from .session_manager import session_manager
|
||||||
from .session_config import SessionConfig, create_session_config # Add session config
|
from .session_config import SessionConfig, create_session_config
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
'GraphManager',
|
'GraphManager',
|
||||||
'NodeType',
|
'NodeType',
|
||||||
'RelationshipType',
|
|
||||||
'Scanner',
|
'Scanner',
|
||||||
'ScanStatus',
|
'ScanStatus',
|
||||||
# 'scanner', # Remove this - no more global scanner
|
|
||||||
'ForensicLogger',
|
'ForensicLogger',
|
||||||
'get_forensic_logger',
|
'get_forensic_logger',
|
||||||
'new_session',
|
'new_session',
|
||||||
'session_manager', # Add this
|
'session_manager',
|
||||||
'SessionConfig', # Add this
|
'SessionConfig',
|
||||||
'create_session_config' # Add this
|
'create_session_config'
|
||||||
]
|
]
|
||||||
|
|
||||||
__version__ = "1.0.0-phase2"
|
__version__ = "1.0.0-phase2"
|
||||||
@@ -1,3 +1,5 @@
|
|||||||
|
# core/graph_manager.py
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Graph data model for DNSRecon using NetworkX.
|
Graph data model for DNSRecon using NetworkX.
|
||||||
Manages in-memory graph storage with confidence scoring and forensic metadata.
|
Manages in-memory graph storage with confidence scoring and forensic metadata.
|
||||||
@@ -22,28 +24,6 @@ class NodeType(Enum):
|
|||||||
return self.value
|
return self.value
|
||||||
|
|
||||||
|
|
||||||
class RelationshipType(Enum):
|
|
||||||
"""Enumeration of supported relationship types with confidence scores."""
|
|
||||||
SAN_CERTIFICATE = ("san", 0.9)
|
|
||||||
A_RECORD = ("a_record", 0.8)
|
|
||||||
AAAA_RECORD = ("aaaa_record", 0.8)
|
|
||||||
CNAME_RECORD = ("cname", 0.8)
|
|
||||||
MX_RECORD = ("mx_record", 0.7)
|
|
||||||
NS_RECORD = ("ns_record", 0.7)
|
|
||||||
PTR_RECORD = ("ptr_record", 0.8)
|
|
||||||
SOA_RECORD = ("soa_record", 0.7)
|
|
||||||
PASSIVE_DNS = ("passive_dns", 0.6)
|
|
||||||
ASN_MEMBERSHIP = ("asn", 0.7)
|
|
||||||
CORRELATED_TO = ("correlated_to", 0.9)
|
|
||||||
|
|
||||||
def __init__(self, relationship_name: str, default_confidence: float):
|
|
||||||
self.relationship_name = relationship_name
|
|
||||||
self.default_confidence = default_confidence
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return self.relationship_name
|
|
||||||
|
|
||||||
|
|
||||||
class GraphManager:
|
class GraphManager:
|
||||||
"""
|
"""
|
||||||
Thread-safe graph manager for DNSRecon infrastructure mapping.
|
Thread-safe graph manager for DNSRecon infrastructure mapping.
|
||||||
@@ -72,21 +52,23 @@ class GraphManager:
|
|||||||
self.__dict__.update(state)
|
self.__dict__.update(state)
|
||||||
self.date_pattern = re.compile(r'^\d{4}-\d{2}-\d{2}[ T]\d{2}:\d{2}:\d{2}')
|
self.date_pattern = re.compile(r'^\d{4}-\d{2}-\d{2}[ T]\d{2}:\d{2}:\d{2}')
|
||||||
|
|
||||||
def _update_correlation_index(self, node_id: str, data: Any, path: List[str] = None):
|
def _update_correlation_index(self, node_id: str, data: Any, path: List[str] = [], parent_attr: str = ""):
|
||||||
"""Recursively traverse metadata and add hashable values to the index."""
|
"""Recursively traverse metadata and add hashable values to the index with better path tracking."""
|
||||||
if path is None:
|
if path is None:
|
||||||
path = []
|
path = []
|
||||||
|
|
||||||
if isinstance(data, dict):
|
if isinstance(data, dict):
|
||||||
for key, value in data.items():
|
for key, value in data.items():
|
||||||
self._update_correlation_index(node_id, value, path + [key])
|
self._update_correlation_index(node_id, value, path + [key], key)
|
||||||
elif isinstance(data, list):
|
elif isinstance(data, list):
|
||||||
for i, item in enumerate(data):
|
for i, item in enumerate(data):
|
||||||
self._update_correlation_index(node_id, item, path + [f"[{i}]"])
|
# Instead of just using [i], include the parent attribute context
|
||||||
|
list_path_component = f"[{i}]" if not parent_attr else f"{parent_attr}[{i}]"
|
||||||
|
self._update_correlation_index(node_id, item, path + [list_path_component], parent_attr)
|
||||||
else:
|
else:
|
||||||
self._add_to_correlation_index(node_id, data, ".".join(path))
|
self._add_to_correlation_index(node_id, data, ".".join(path), parent_attr)
|
||||||
|
|
||||||
def _add_to_correlation_index(self, node_id: str, value: Any, path_str: str):
|
def _add_to_correlation_index(self, node_id: str, value: Any, path_str: str, parent_attr: str = ""):
|
||||||
"""Add a hashable value to the correlation index, filtering out noise."""
|
"""Add a hashable value to the correlation index, filtering out noise."""
|
||||||
if not isinstance(value, (str, int, float, bool)) or value is None:
|
if not isinstance(value, (str, int, float, bool)) or value is None:
|
||||||
return
|
return
|
||||||
@@ -102,8 +84,8 @@ class GraphManager:
|
|||||||
return
|
return
|
||||||
if len(value) < 4 or value.lower() in ['true', 'false', 'unknown', 'none', 'crt.sh']:
|
if len(value) < 4 or value.lower() in ['true', 'false', 'unknown', 'none', 'crt.sh']:
|
||||||
return
|
return
|
||||||
elif isinstance(value, int) and abs(value) < 9999:
|
elif isinstance(value, int) and (abs(value) < 1024 or abs(value) > 65535):
|
||||||
return # Ignore small integers
|
return # Ignore small integers and common port numbers
|
||||||
elif isinstance(value, bool):
|
elif isinstance(value, bool):
|
||||||
return # Ignore boolean values
|
return # Ignore boolean values
|
||||||
|
|
||||||
@@ -112,10 +94,47 @@ class GraphManager:
|
|||||||
self.correlation_index[value] = {}
|
self.correlation_index[value] = {}
|
||||||
if node_id not in self.correlation_index[value]:
|
if node_id not in self.correlation_index[value]:
|
||||||
self.correlation_index[value][node_id] = []
|
self.correlation_index[value][node_id] = []
|
||||||
if path_str not in self.correlation_index[value][node_id]:
|
|
||||||
self.correlation_index[value][node_id].append(path_str)
|
|
||||||
|
|
||||||
def _check_for_correlations(self, new_node_id: str, data: Any, path: List[str] = None) -> List[Dict]:
|
# Store both the full path and the parent attribute for better edge labeling
|
||||||
|
correlation_entry = {
|
||||||
|
'path': path_str,
|
||||||
|
'parent_attr': parent_attr,
|
||||||
|
'meaningful_attr': self._extract_meaningful_attribute(path_str, parent_attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if correlation_entry not in self.correlation_index[value][node_id]:
|
||||||
|
self.correlation_index[value][node_id].append(correlation_entry)
|
||||||
|
|
||||||
|
def _extract_meaningful_attribute(self, path_str: str, parent_attr: str = "") -> str:
|
||||||
|
"""Extract the most meaningful attribute name from a path string."""
|
||||||
|
if not path_str:
|
||||||
|
return "unknown"
|
||||||
|
|
||||||
|
path_parts = path_str.split('.')
|
||||||
|
|
||||||
|
# Look for the last non-array-index part
|
||||||
|
for part in reversed(path_parts):
|
||||||
|
# Skip array indices like [0], [1], etc.
|
||||||
|
if not (part.startswith('[') and part.endswith(']') and part[1:-1].isdigit()):
|
||||||
|
# Clean up compound names like "hostnames[0]" to just "hostnames"
|
||||||
|
clean_part = re.sub(r'\[\d+\]$', '', part)
|
||||||
|
if clean_part:
|
||||||
|
return clean_part
|
||||||
|
|
||||||
|
# Fallback to parent attribute if available
|
||||||
|
if parent_attr:
|
||||||
|
return parent_attr
|
||||||
|
|
||||||
|
# Last resort - use the first meaningful part
|
||||||
|
for part in path_parts:
|
||||||
|
if not (part.startswith('[') and part.endswith(']') and part[1:-1].isdigit()):
|
||||||
|
clean_part = re.sub(r'\[\d+\]$', '', part)
|
||||||
|
if clean_part:
|
||||||
|
return clean_part
|
||||||
|
|
||||||
|
return "correlation"
|
||||||
|
|
||||||
|
def _check_for_correlations(self, new_node_id: str, data: Any, path: List[str] = [], parent_attr: str = "") -> List[Dict]:
|
||||||
"""Recursively traverse metadata to find correlations with existing data."""
|
"""Recursively traverse metadata to find correlations with existing data."""
|
||||||
if path is None:
|
if path is None:
|
||||||
path = []
|
path = []
|
||||||
@@ -125,10 +144,11 @@ class GraphManager:
|
|||||||
for key, value in data.items():
|
for key, value in data.items():
|
||||||
if key == 'source': # Avoid correlating on the provider name
|
if key == 'source': # Avoid correlating on the provider name
|
||||||
continue
|
continue
|
||||||
all_correlations.extend(self._check_for_correlations(new_node_id, value, path + [key]))
|
all_correlations.extend(self._check_for_correlations(new_node_id, value, path + [key], key))
|
||||||
elif isinstance(data, list):
|
elif isinstance(data, list):
|
||||||
for i, item in enumerate(data):
|
for i, item in enumerate(data):
|
||||||
all_correlations.extend(self._check_for_correlations(new_node_id, item, path + [f"[{i}]"]))
|
list_path_component = f"[{i}]" if not parent_attr else f"{parent_attr}[{i}]"
|
||||||
|
all_correlations.extend(self._check_for_correlations(new_node_id, item, path + [list_path_component], parent_attr))
|
||||||
else:
|
else:
|
||||||
value = data
|
value = data
|
||||||
if value in self.correlation_index:
|
if value in self.correlation_index:
|
||||||
@@ -139,11 +159,31 @@ class GraphManager:
|
|||||||
if len(unique_nodes) < 2:
|
if len(unique_nodes) < 2:
|
||||||
return all_correlations # Correlation must involve at least two distinct nodes
|
return all_correlations # Correlation must involve at least two distinct nodes
|
||||||
|
|
||||||
new_source = {'node_id': new_node_id, 'path': ".".join(path)}
|
new_source = {
|
||||||
|
'node_id': new_node_id,
|
||||||
|
'path': ".".join(path),
|
||||||
|
'parent_attr': parent_attr,
|
||||||
|
'meaningful_attr': self._extract_meaningful_attribute(".".join(path), parent_attr)
|
||||||
|
}
|
||||||
all_sources = [new_source]
|
all_sources = [new_source]
|
||||||
for node_id, paths in existing_nodes_with_paths.items():
|
|
||||||
for p_str in paths:
|
for node_id, path_entries in existing_nodes_with_paths.items():
|
||||||
all_sources.append({'node_id': node_id, 'path': p_str})
|
for entry in path_entries:
|
||||||
|
if isinstance(entry, dict):
|
||||||
|
all_sources.append({
|
||||||
|
'node_id': node_id,
|
||||||
|
'path': entry['path'],
|
||||||
|
'parent_attr': entry.get('parent_attr', ''),
|
||||||
|
'meaningful_attr': entry.get('meaningful_attr', self._extract_meaningful_attribute(entry['path'], entry.get('parent_attr', '')))
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
# Handle legacy string-only entries
|
||||||
|
all_sources.append({
|
||||||
|
'node_id': node_id,
|
||||||
|
'path': str(entry),
|
||||||
|
'parent_attr': '',
|
||||||
|
'meaningful_attr': self._extract_meaningful_attribute(str(entry))
|
||||||
|
})
|
||||||
|
|
||||||
all_correlations.append({
|
all_correlations.append({
|
||||||
'value': value,
|
'value': value,
|
||||||
@@ -180,53 +220,182 @@ class GraphManager:
|
|||||||
for corr in correlations:
|
for corr in correlations:
|
||||||
value = corr['value']
|
value = corr['value']
|
||||||
|
|
||||||
# FIXED: Check if the correlation value contains an existing node ID.
|
# STEP 1: Substring check against all existing nodes
|
||||||
found_major_node_id = None
|
if self._correlation_value_matches_existing_node(value):
|
||||||
if isinstance(value, str):
|
# Skip creating correlation node - would be redundant
|
||||||
for existing_node in self.graph.nodes():
|
continue
|
||||||
if existing_node in value:
|
|
||||||
found_major_node_id = existing_node
|
|
||||||
break
|
|
||||||
|
|
||||||
if found_major_node_id:
|
eligible_nodes = set(corr['nodes'])
|
||||||
# An existing major node is part of the value; link to it directly.
|
|
||||||
for c_node_id in set(corr['nodes']):
|
|
||||||
if self.graph.has_node(c_node_id) and c_node_id != found_major_node_id:
|
|
||||||
self.add_edge(c_node_id, found_major_node_id, RelationshipType.CORRELATED_TO)
|
|
||||||
continue # Skip creating a redundant correlation node
|
|
||||||
|
|
||||||
# Proceed to create a new correlation node if no major node was found.
|
if len(eligible_nodes) < 2:
|
||||||
correlation_node_id = f"{value}"
|
# Need at least 2 nodes to create a correlation
|
||||||
if not self.graph.has_node(correlation_node_id):
|
continue
|
||||||
|
|
||||||
|
# STEP 3: Check for existing correlation node with same connection pattern
|
||||||
|
correlation_nodes_with_pattern = self._find_correlation_nodes_with_same_pattern(eligible_nodes)
|
||||||
|
|
||||||
|
if correlation_nodes_with_pattern:
|
||||||
|
# STEP 4: Merge with existing correlation node
|
||||||
|
target_correlation_node = correlation_nodes_with_pattern[0]
|
||||||
|
self._merge_correlation_values(target_correlation_node, value, corr)
|
||||||
|
else:
|
||||||
|
# STEP 5: Create new correlation node for eligible nodes only
|
||||||
|
correlation_node_id = f"corr_{abs(hash(str(sorted(eligible_nodes))))}"
|
||||||
self.add_node(correlation_node_id, NodeType.CORRELATION_OBJECT,
|
self.add_node(correlation_node_id, NodeType.CORRELATION_OBJECT,
|
||||||
metadata={'value': value, 'sources': corr['sources'],
|
metadata={'values': [value], 'sources': corr['sources'],
|
||||||
'correlated_nodes': list(set(corr['nodes']))})
|
'correlated_nodes': list(eligible_nodes)})
|
||||||
else: # Update existing correlation node
|
|
||||||
existing_meta = self.graph.nodes[correlation_node_id]['metadata']
|
|
||||||
existing_nodes = set(existing_meta.get('correlated_nodes', []))
|
|
||||||
existing_meta['correlated_nodes'] = list(existing_nodes.union(set(corr['nodes'])))
|
|
||||||
existing_sources = {(s['node_id'], s['path']) for s in existing_meta.get('sources', [])}
|
|
||||||
for s in corr['sources']:
|
|
||||||
existing_sources.add((s['node_id'], s['path']))
|
|
||||||
existing_meta['sources'] = [{'node_id': nid, 'path': p} for nid, p in existing_sources]
|
|
||||||
|
|
||||||
for c_node_id in set(corr['nodes']):
|
# Create edges from eligible nodes to this correlation node with better labeling
|
||||||
self.add_edge(c_node_id, correlation_node_id, RelationshipType.CORRELATED_TO)
|
for c_node_id in eligible_nodes:
|
||||||
|
if self.graph.has_node(c_node_id):
|
||||||
|
# Find the best attribute name for this node
|
||||||
|
meaningful_attr = self._find_best_attribute_name_for_node(c_node_id, corr['sources'])
|
||||||
|
relationship_type = f"c_{meaningful_attr}"
|
||||||
|
self.add_edge(c_node_id, correlation_node_id, relationship_type, confidence_score=0.9)
|
||||||
|
|
||||||
self._update_correlation_index(node_id, attributes)
|
self._update_correlation_index(node_id, attributes)
|
||||||
|
|
||||||
self.last_modified = datetime.now(timezone.utc).isoformat()
|
self.last_modified = datetime.now(timezone.utc).isoformat()
|
||||||
return is_new_node
|
return is_new_node
|
||||||
|
|
||||||
def add_edge(self, source_id: str, target_id: str, relationship_type: RelationshipType,
|
def _find_best_attribute_name_for_node(self, node_id: str, sources: List[Dict]) -> str:
|
||||||
confidence_score: Optional[float] = None, source_provider: str = "unknown",
|
"""Find the best attribute name for a correlation edge by looking at the sources."""
|
||||||
|
node_sources = [s for s in sources if s['node_id'] == node_id]
|
||||||
|
|
||||||
|
if not node_sources:
|
||||||
|
return "correlation"
|
||||||
|
|
||||||
|
# Use the meaningful_attr if available
|
||||||
|
for source in node_sources:
|
||||||
|
meaningful_attr = source.get('meaningful_attr')
|
||||||
|
if meaningful_attr and meaningful_attr != "unknown":
|
||||||
|
return meaningful_attr
|
||||||
|
|
||||||
|
# Fallback to parent_attr
|
||||||
|
for source in node_sources:
|
||||||
|
parent_attr = source.get('parent_attr')
|
||||||
|
if parent_attr:
|
||||||
|
return parent_attr
|
||||||
|
|
||||||
|
# Last resort - extract from path
|
||||||
|
for source in node_sources:
|
||||||
|
path = source.get('path', '')
|
||||||
|
if path:
|
||||||
|
extracted = self._extract_meaningful_attribute(path)
|
||||||
|
if extracted != "unknown":
|
||||||
|
return extracted
|
||||||
|
|
||||||
|
return "correlation"
|
||||||
|
|
||||||
|
def _has_direct_edge_bidirectional(self, node_a: str, node_b: str) -> bool:
|
||||||
|
"""
|
||||||
|
Check if there's a direct edge between two nodes in either direction.
|
||||||
|
Returns True if node_a→node_b OR node_b→node_a exists.
|
||||||
|
"""
|
||||||
|
return (self.graph.has_edge(node_a, node_b) or
|
||||||
|
self.graph.has_edge(node_b, node_a))
|
||||||
|
|
||||||
|
def _correlation_value_matches_existing_node(self, correlation_value: str) -> bool:
|
||||||
|
"""
|
||||||
|
Check if correlation value contains any existing node ID as substring.
|
||||||
|
Returns True if match found (correlation node should NOT be created).
|
||||||
|
"""
|
||||||
|
correlation_str = str(correlation_value).lower()
|
||||||
|
|
||||||
|
# Check against all existing nodes
|
||||||
|
for existing_node_id in self.graph.nodes():
|
||||||
|
if existing_node_id.lower() in correlation_str:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _find_correlation_nodes_with_same_pattern(self, node_set: set) -> List[str]:
|
||||||
|
"""
|
||||||
|
Find existing correlation nodes that have the exact same pattern of connected nodes.
|
||||||
|
Returns list of correlation node IDs with matching patterns.
|
||||||
|
"""
|
||||||
|
correlation_nodes = self.get_nodes_by_type(NodeType.CORRELATION_OBJECT)
|
||||||
|
matching_nodes = []
|
||||||
|
|
||||||
|
for corr_node_id in correlation_nodes:
|
||||||
|
# Get all nodes connected to this correlation node
|
||||||
|
connected_nodes = set()
|
||||||
|
|
||||||
|
# Add all predecessors (nodes pointing TO the correlation node)
|
||||||
|
connected_nodes.update(self.graph.predecessors(corr_node_id))
|
||||||
|
|
||||||
|
# Add all successors (nodes pointed TO by the correlation node)
|
||||||
|
connected_nodes.update(self.graph.successors(corr_node_id))
|
||||||
|
|
||||||
|
# Check if the pattern matches exactly
|
||||||
|
if connected_nodes == node_set:
|
||||||
|
matching_nodes.append(corr_node_id)
|
||||||
|
|
||||||
|
return matching_nodes
|
||||||
|
|
||||||
|
def _merge_correlation_values(self, target_node_id: str, new_value: Any, corr_data: Dict) -> None:
|
||||||
|
"""
|
||||||
|
Merge a new correlation value into an existing correlation node.
|
||||||
|
Uses same logic as large entity merging.
|
||||||
|
"""
|
||||||
|
if not self.graph.has_node(target_node_id):
|
||||||
|
return
|
||||||
|
|
||||||
|
target_metadata = self.graph.nodes[target_node_id]['metadata']
|
||||||
|
|
||||||
|
# Get existing values (ensure it's a list)
|
||||||
|
existing_values = target_metadata.get('values', [])
|
||||||
|
if not isinstance(existing_values, list):
|
||||||
|
existing_values = [existing_values]
|
||||||
|
|
||||||
|
# Add new value if not already present
|
||||||
|
if new_value not in existing_values:
|
||||||
|
existing_values.append(new_value)
|
||||||
|
|
||||||
|
# Merge sources
|
||||||
|
existing_sources = target_metadata.get('sources', [])
|
||||||
|
new_sources = corr_data.get('sources', [])
|
||||||
|
|
||||||
|
# Create set of unique sources based on (node_id, path) tuples
|
||||||
|
source_set = set()
|
||||||
|
for source in existing_sources + new_sources:
|
||||||
|
source_tuple = (source['node_id'], source.get('path', ''))
|
||||||
|
source_set.add(source_tuple)
|
||||||
|
|
||||||
|
# Convert back to list of dictionaries
|
||||||
|
merged_sources = [{'node_id': nid, 'path': path} for nid, path in source_set]
|
||||||
|
|
||||||
|
# Update metadata
|
||||||
|
target_metadata.update({
|
||||||
|
'values': existing_values,
|
||||||
|
'sources': merged_sources,
|
||||||
|
'correlated_nodes': list(set(target_metadata.get('correlated_nodes', []) + corr_data.get('nodes', []))),
|
||||||
|
'merge_count': len(existing_values),
|
||||||
|
'last_merge_timestamp': datetime.now(timezone.utc).isoformat()
|
||||||
|
})
|
||||||
|
|
||||||
|
# Update description to reflect merged nature
|
||||||
|
value_count = len(existing_values)
|
||||||
|
node_count = len(target_metadata['correlated_nodes'])
|
||||||
|
self.graph.nodes[target_node_id]['description'] = (
|
||||||
|
f"Correlation container with {value_count} merged values "
|
||||||
|
f"across {node_count} nodes"
|
||||||
|
)
|
||||||
|
|
||||||
|
def add_edge(self, source_id: str, target_id: str, relationship_type: str,
|
||||||
|
confidence_score: float = 0.5, source_provider: str = "unknown",
|
||||||
raw_data: Optional[Dict[str, Any]] = None) -> bool:
|
raw_data: Optional[Dict[str, Any]] = None) -> bool:
|
||||||
"""Add or update an edge between two nodes, ensuring nodes exist."""
|
"""Add or update an edge between two nodes, ensuring nodes exist."""
|
||||||
# LOGIC FIX: Ensure both source and target nodes exist before adding an edge.
|
|
||||||
if not self.graph.has_node(source_id) or not self.graph.has_node(target_id):
|
if not self.graph.has_node(source_id) or not self.graph.has_node(target_id):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
new_confidence = confidence_score or relationship_type.default_confidence
|
new_confidence = confidence_score
|
||||||
|
|
||||||
|
if relationship_type.startswith("c_"):
|
||||||
|
edge_label = relationship_type
|
||||||
|
else:
|
||||||
|
edge_label = f"{source_provider}_{relationship_type}"
|
||||||
|
|
||||||
if self.graph.has_edge(source_id, target_id):
|
if self.graph.has_edge(source_id, target_id):
|
||||||
# If edge exists, update confidence if the new score is higher.
|
# If edge exists, update confidence if the new score is higher.
|
||||||
if new_confidence > self.graph.edges[source_id, target_id].get('confidence_score', 0):
|
if new_confidence > self.graph.edges[source_id, target_id].get('confidence_score', 0):
|
||||||
@@ -237,7 +406,7 @@ class GraphManager:
|
|||||||
|
|
||||||
# Add a new edge with all attributes.
|
# Add a new edge with all attributes.
|
||||||
self.graph.add_edge(source_id, target_id,
|
self.graph.add_edge(source_id, target_id,
|
||||||
relationship_type=relationship_type.relationship_name,
|
relationship_type=edge_label,
|
||||||
confidence_score=new_confidence,
|
confidence_score=new_confidence,
|
||||||
source_provider=source_provider,
|
source_provider=source_provider,
|
||||||
discovery_timestamp=datetime.now(timezone.utc).isoformat(),
|
discovery_timestamp=datetime.now(timezone.utc).isoformat(),
|
||||||
@@ -321,10 +490,14 @@ class GraphManager:
|
|||||||
def _get_confidence_distribution(self) -> Dict[str, int]:
|
def _get_confidence_distribution(self) -> Dict[str, int]:
|
||||||
"""Get distribution of edge confidence scores."""
|
"""Get distribution of edge confidence scores."""
|
||||||
distribution = {'high': 0, 'medium': 0, 'low': 0}
|
distribution = {'high': 0, 'medium': 0, 'low': 0}
|
||||||
for _, _, confidence in self.graph.edges(data='confidence_score', default=0):
|
for _, _, data in self.graph.edges(data=True):
|
||||||
if confidence >= 0.8: distribution['high'] += 1
|
confidence = data.get('confidence_score', 0)
|
||||||
elif confidence >= 0.6: distribution['medium'] += 1
|
if confidence >= 0.8:
|
||||||
else: distribution['low'] += 1
|
distribution['high'] += 1
|
||||||
|
elif confidence >= 0.6:
|
||||||
|
distribution['medium'] += 1
|
||||||
|
else:
|
||||||
|
distribution['low'] += 1
|
||||||
return distribution
|
return distribution
|
||||||
|
|
||||||
def get_statistics(self) -> Dict[str, Any]:
|
def get_statistics(self) -> Dict[str, Any]:
|
||||||
@@ -339,9 +512,10 @@ class GraphManager:
|
|||||||
# Calculate distributions
|
# Calculate distributions
|
||||||
for node_type in NodeType:
|
for node_type in NodeType:
|
||||||
stats['node_type_distribution'][node_type.value] = self.get_nodes_by_type(node_type).__len__()
|
stats['node_type_distribution'][node_type.value] = self.get_nodes_by_type(node_type).__len__()
|
||||||
for _, _, rel_type in self.graph.edges(data='relationship_type', default='unknown'):
|
for _, _, data in self.graph.edges(data=True):
|
||||||
|
rel_type = data.get('relationship_type', 'unknown')
|
||||||
stats['relationship_type_distribution'][rel_type] = stats['relationship_type_distribution'].get(rel_type, 0) + 1
|
stats['relationship_type_distribution'][rel_type] = stats['relationship_type_distribution'].get(rel_type, 0) + 1
|
||||||
for _, _, provider in self.graph.edges(data='source_provider', default='unknown'):
|
provider = data.get('source_provider', 'unknown')
|
||||||
stats['provider_distribution'][provider] = stats['provider_distribution'].get(provider, 0) + 1
|
stats['provider_distribution'][provider] = stats['provider_distribution'].get(provider, 0) + 1
|
||||||
return stats
|
return stats
|
||||||
|
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ class ForensicLogger:
|
|||||||
Maintains detailed audit trail of all reconnaissance activities.
|
Maintains detailed audit trail of all reconnaissance activities.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, session_id: str = None):
|
def __init__(self, session_id: str = ""):
|
||||||
"""
|
"""
|
||||||
Initialize forensic logger.
|
Initialize forensic logger.
|
||||||
|
|
||||||
@@ -203,8 +203,6 @@ class ForensicLogger:
|
|||||||
self.session_metadata['target_domains'] = list(self.session_metadata['target_domains'])
|
self.session_metadata['target_domains'] = list(self.session_metadata['target_domains'])
|
||||||
|
|
||||||
self.logger.info(f"Scan Complete - Session: {self.session_id}")
|
self.logger.info(f"Scan Complete - Session: {self.session_id}")
|
||||||
self.logger.info(f"Total API Requests: {self.session_metadata['total_requests']}")
|
|
||||||
self.logger.info(f"Total Relationships: {self.session_metadata['total_relationships']}")
|
|
||||||
|
|
||||||
def export_audit_trail(self) -> Dict[str, Any]:
|
def export_audit_trail(self) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
|
|||||||
420
core/scanner.py
420
core/scanner.py
@@ -5,12 +5,12 @@ import traceback
|
|||||||
import time
|
import time
|
||||||
import os
|
import os
|
||||||
import importlib
|
import importlib
|
||||||
from typing import List, Set, Dict, Any, Tuple
|
from typing import List, Set, Dict, Any, Tuple, Optional
|
||||||
from concurrent.futures import ThreadPoolExecutor, as_completed, CancelledError, Future
|
from concurrent.futures import ThreadPoolExecutor, as_completed, CancelledError, Future
|
||||||
from collections import defaultdict, deque
|
from collections import defaultdict, deque
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
from core.graph_manager import GraphManager, NodeType, RelationshipType
|
from core.graph_manager import GraphManager, NodeType
|
||||||
from core.logger import get_forensic_logger, new_session
|
from core.logger import get_forensic_logger, new_session
|
||||||
from utils.helpers import _is_valid_ip, _is_valid_domain
|
from utils.helpers import _is_valid_ip, _is_valid_domain
|
||||||
from providers.base_provider import BaseProvider
|
from providers.base_provider import BaseProvider
|
||||||
@@ -28,7 +28,6 @@ class ScanStatus:
|
|||||||
class Scanner:
|
class Scanner:
|
||||||
"""
|
"""
|
||||||
Main scanning orchestrator for DNSRecon passive reconnaissance.
|
Main scanning orchestrator for DNSRecon passive reconnaissance.
|
||||||
Enhanced with reliable cross-process termination capabilities.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, session_config=None):
|
def __init__(self, session_config=None):
|
||||||
@@ -50,11 +49,20 @@ class Scanner:
|
|||||||
self.max_depth = 2
|
self.max_depth = 2
|
||||||
self.stop_event = threading.Event()
|
self.stop_event = threading.Event()
|
||||||
self.scan_thread = None
|
self.scan_thread = None
|
||||||
self.session_id = None # Will be set by session manager
|
self.session_id: Optional[str] = None # Will be set by session manager
|
||||||
|
self.task_queue = deque([])
|
||||||
|
self.target_retries = defaultdict(int)
|
||||||
|
self.scan_failed_due_to_retries = False
|
||||||
|
|
||||||
|
# **NEW**: Track currently processing tasks to prevent processing after stop
|
||||||
|
self.currently_processing = set()
|
||||||
|
self.processing_lock = threading.Lock()
|
||||||
|
|
||||||
# Scanning progress tracking
|
# Scanning progress tracking
|
||||||
self.total_indicators_found = 0
|
self.total_indicators_found = 0
|
||||||
self.indicators_processed = 0
|
self.indicators_processed = 0
|
||||||
|
self.indicators_completed = 0
|
||||||
|
self.tasks_re_enqueued = 0
|
||||||
self.current_indicator = ""
|
self.current_indicator = ""
|
||||||
|
|
||||||
# Concurrent processing configuration
|
# Concurrent processing configuration
|
||||||
@@ -120,7 +128,8 @@ class Scanner:
|
|||||||
unpicklable_attrs = [
|
unpicklable_attrs = [
|
||||||
'stop_event',
|
'stop_event',
|
||||||
'scan_thread',
|
'scan_thread',
|
||||||
'executor'
|
'executor',
|
||||||
|
'processing_lock' # **NEW**: Exclude the processing lock
|
||||||
]
|
]
|
||||||
|
|
||||||
for attr in unpicklable_attrs:
|
for attr in unpicklable_attrs:
|
||||||
@@ -144,6 +153,11 @@ class Scanner:
|
|||||||
self.stop_event = threading.Event()
|
self.stop_event = threading.Event()
|
||||||
self.scan_thread = None
|
self.scan_thread = None
|
||||||
self.executor = None
|
self.executor = None
|
||||||
|
self.processing_lock = threading.Lock() # **NEW**: Recreate processing lock
|
||||||
|
|
||||||
|
# **NEW**: Reset processing tracking
|
||||||
|
if not hasattr(self, 'currently_processing'):
|
||||||
|
self.currently_processing = set()
|
||||||
|
|
||||||
# Re-set stop events for providers
|
# Re-set stop events for providers
|
||||||
if hasattr(self, 'providers'):
|
if hasattr(self, 'providers'):
|
||||||
@@ -166,9 +180,10 @@ class Scanner:
|
|||||||
attribute = getattr(module, attribute_name)
|
attribute = getattr(module, attribute_name)
|
||||||
if isinstance(attribute, type) and issubclass(attribute, BaseProvider) and attribute is not BaseProvider:
|
if isinstance(attribute, type) and issubclass(attribute, BaseProvider) and attribute is not BaseProvider:
|
||||||
provider_class = attribute
|
provider_class = attribute
|
||||||
provider_name = provider_class(session_config=self.config).get_name()
|
provider = provider_class(name=attribute_name, session_config=self.config)
|
||||||
|
provider_name = provider.get_name()
|
||||||
|
|
||||||
if self.config.is_provider_enabled(provider_name):
|
if self.config.is_provider_enabled(provider_name):
|
||||||
provider = provider_class(session_config=self.config)
|
|
||||||
if provider.is_available():
|
if provider.is_available():
|
||||||
provider.set_stop_event(self.stop_event)
|
provider.set_stop_event(self.stop_event)
|
||||||
self.providers.append(provider)
|
self.providers.append(provider)
|
||||||
@@ -190,28 +205,59 @@ class Scanner:
|
|||||||
print("Session configuration updated")
|
print("Session configuration updated")
|
||||||
|
|
||||||
def start_scan(self, target_domain: str, max_depth: int = 2, clear_graph: bool = True) -> bool:
|
def start_scan(self, target_domain: str, max_depth: int = 2, clear_graph: bool = True) -> bool:
|
||||||
"""Start a new reconnaissance scan with immediate GUI feedback."""
|
"""Start a new reconnaissance scan with proper cleanup of previous scans."""
|
||||||
print(f"=== STARTING SCAN IN SCANNER {id(self)} ===")
|
print(f"=== STARTING SCAN IN SCANNER {id(self)} ===")
|
||||||
print(f"Session ID: {self.session_id}")
|
print(f"Session ID: {self.session_id}")
|
||||||
print(f"Initial scanner status: {self.status}")
|
print(f"Initial scanner status: {self.status}")
|
||||||
|
|
||||||
# Clean up previous scan thread if needed
|
# **IMPROVED**: More aggressive cleanup of previous scan
|
||||||
if self.scan_thread and self.scan_thread.is_alive():
|
if self.scan_thread and self.scan_thread.is_alive():
|
||||||
print("A previous scan thread is still alive. Sending termination signal and waiting...")
|
print("A previous scan thread is still alive. Forcing termination...")
|
||||||
self.stop_scan()
|
|
||||||
self.scan_thread.join(10.0)
|
# Set stop signals immediately
|
||||||
|
self._set_stop_signal()
|
||||||
|
self.status = ScanStatus.STOPPED
|
||||||
|
|
||||||
|
# Clear all processing state
|
||||||
|
with self.processing_lock:
|
||||||
|
self.currently_processing.clear()
|
||||||
|
self.task_queue.clear()
|
||||||
|
|
||||||
|
# Shutdown executor aggressively
|
||||||
|
if self.executor:
|
||||||
|
print("Shutting down executor forcefully...")
|
||||||
|
self.executor.shutdown(wait=False, cancel_futures=True)
|
||||||
|
self.executor = None
|
||||||
|
|
||||||
|
# Wait for thread termination with shorter timeout
|
||||||
|
print("Waiting for previous scan thread to terminate...")
|
||||||
|
self.scan_thread.join(5.0) # Reduced from 10 seconds
|
||||||
|
|
||||||
if self.scan_thread.is_alive():
|
if self.scan_thread.is_alive():
|
||||||
print("ERROR: The previous scan thread is unresponsive and could not be stopped.")
|
print("WARNING: Previous scan thread is still alive after 5 seconds")
|
||||||
self.status = ScanStatus.FAILED
|
# Continue anyway, but log the issue
|
||||||
self._update_session_state()
|
self.logger.logger.warning("Previous scan thread failed to terminate cleanly")
|
||||||
return False
|
|
||||||
print("Previous scan thread terminated successfully.")
|
|
||||||
|
|
||||||
# Reset state for new scan
|
# Reset state for new scan with proper forensic logging
|
||||||
|
print("Resetting scanner state for new scan...")
|
||||||
self.status = ScanStatus.IDLE
|
self.status = ScanStatus.IDLE
|
||||||
self._update_session_state() # Update GUI immediately
|
self.stop_event.clear()
|
||||||
print("Scanner state is now clean for a new scan.")
|
|
||||||
|
# **NEW**: Clear Redis stop signal explicitly
|
||||||
|
if self.session_id:
|
||||||
|
from core.session_manager import session_manager
|
||||||
|
session_manager.clear_stop_signal(self.session_id)
|
||||||
|
|
||||||
|
with self.processing_lock:
|
||||||
|
self.currently_processing.clear()
|
||||||
|
|
||||||
|
self.task_queue.clear()
|
||||||
|
self.target_retries.clear()
|
||||||
|
self.scan_failed_due_to_retries = False
|
||||||
|
|
||||||
|
# Update session state immediately for GUI feedback
|
||||||
|
self._update_session_state()
|
||||||
|
print("Scanner state reset complete.")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if not hasattr(self, 'providers') or not self.providers:
|
if not hasattr(self, 'providers') or not self.providers:
|
||||||
@@ -226,24 +272,20 @@ class Scanner:
|
|||||||
self.max_depth = max_depth
|
self.max_depth = max_depth
|
||||||
self.current_depth = 0
|
self.current_depth = 0
|
||||||
|
|
||||||
# Clear both local and Redis stop signals
|
|
||||||
self.stop_event.clear()
|
|
||||||
if self.session_id:
|
|
||||||
from core.session_manager import session_manager
|
|
||||||
session_manager.clear_stop_signal(self.session_id)
|
|
||||||
|
|
||||||
self.total_indicators_found = 0
|
self.total_indicators_found = 0
|
||||||
self.indicators_processed = 0
|
self.indicators_processed = 0
|
||||||
|
self.indicators_completed = 0
|
||||||
|
self.tasks_re_enqueued = 0
|
||||||
self.current_indicator = self.current_target
|
self.current_indicator = self.current_target
|
||||||
|
|
||||||
# Update GUI with scan preparation
|
# Update GUI with scan preparation state
|
||||||
self._update_session_state()
|
self._update_session_state()
|
||||||
|
|
||||||
# Start new forensic session
|
# Start new forensic session
|
||||||
print(f"Starting new forensic session for scanner {id(self)}...")
|
print(f"Starting new forensic session for scanner {id(self)}...")
|
||||||
self.logger = new_session()
|
self.logger = new_session()
|
||||||
|
|
||||||
# Start scan in separate thread
|
# Start scan in a separate thread
|
||||||
print(f"Starting scan thread for scanner {id(self)}...")
|
print(f"Starting scan thread for scanner {id(self)}...")
|
||||||
self.scan_thread = threading.Thread(
|
self.scan_thread = threading.Thread(
|
||||||
target=self._execute_scan,
|
target=self._execute_scan,
|
||||||
@@ -259,16 +301,16 @@ class Scanner:
|
|||||||
print(f"ERROR: Exception in start_scan for scanner {id(self)}: {e}")
|
print(f"ERROR: Exception in start_scan for scanner {id(self)}: {e}")
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
self.status = ScanStatus.FAILED
|
self.status = ScanStatus.FAILED
|
||||||
self._update_session_state() # Update failed status immediately
|
self._update_session_state()
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def _execute_scan(self, target_domain: str, max_depth: int) -> None:
|
def _execute_scan(self, target_domain: str, max_depth: int) -> None:
|
||||||
"""Execute the reconnaissance scan using a task queue-based approach."""
|
"""Execute the reconnaissance scan with proper termination handling."""
|
||||||
print(f"_execute_scan started for {target_domain} with depth {max_depth}")
|
print(f"_execute_scan started for {target_domain} with depth {max_depth}")
|
||||||
self.executor = ThreadPoolExecutor(max_workers=self.max_workers)
|
self.executor = ThreadPoolExecutor(max_workers=self.max_workers)
|
||||||
processed_targets = set()
|
processed_targets = set()
|
||||||
|
|
||||||
task_queue = deque([(target_domain, 0, False)]) # target, depth, is_large_entity_member
|
self.task_queue.append((target_domain, 0, False))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.status = ScanStatus.RUNNING
|
self.status = ScanStatus.RUNNING
|
||||||
@@ -279,34 +321,80 @@ class Scanner:
|
|||||||
self.graph.add_node(target_domain, NodeType.DOMAIN)
|
self.graph.add_node(target_domain, NodeType.DOMAIN)
|
||||||
self._initialize_provider_states(target_domain)
|
self._initialize_provider_states(target_domain)
|
||||||
|
|
||||||
while task_queue:
|
# **IMPROVED**: Better termination checking in main loop
|
||||||
if self._is_stop_requested():
|
while self.task_queue and not self._is_stop_requested():
|
||||||
print("Stop requested, terminating scan.")
|
try:
|
||||||
|
target, depth, is_large_entity_member = self.task_queue.popleft()
|
||||||
|
except IndexError:
|
||||||
|
# Queue became empty during processing
|
||||||
break
|
break
|
||||||
|
|
||||||
target, depth, is_large_entity_member = task_queue.popleft()
|
|
||||||
|
|
||||||
if target in processed_targets:
|
if target in processed_targets:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if depth > max_depth:
|
if depth > max_depth:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
# **NEW**: Track this target as currently processing
|
||||||
|
with self.processing_lock:
|
||||||
|
if self._is_stop_requested():
|
||||||
|
print(f"Stop requested before processing {target}")
|
||||||
|
break
|
||||||
|
self.currently_processing.add(target)
|
||||||
|
|
||||||
|
try:
|
||||||
self.current_depth = depth
|
self.current_depth = depth
|
||||||
self.current_indicator = target
|
self.current_indicator = target
|
||||||
self._update_session_state()
|
self._update_session_state()
|
||||||
|
|
||||||
new_targets, large_entity_members = self._query_providers_for_target(target, depth, is_large_entity_member)
|
# **IMPROVED**: More frequent stop checking during processing
|
||||||
processed_targets.add(target)
|
if self._is_stop_requested():
|
||||||
|
print(f"Stop requested during processing setup for {target}")
|
||||||
|
break
|
||||||
|
|
||||||
|
new_targets, large_entity_members, success = self._query_providers_for_target(target, depth, is_large_entity_member)
|
||||||
|
|
||||||
|
# **NEW**: Check stop signal after provider queries
|
||||||
|
if self._is_stop_requested():
|
||||||
|
print(f"Stop requested after querying providers for {target}")
|
||||||
|
break
|
||||||
|
|
||||||
|
if not success:
|
||||||
|
self.target_retries[target] += 1
|
||||||
|
if self.target_retries[target] <= self.config.max_retries_per_target:
|
||||||
|
print(f"Re-queueing target {target} (attempt {self.target_retries[target]})")
|
||||||
|
self.task_queue.append((target, depth, is_large_entity_member))
|
||||||
|
self.tasks_re_enqueued += 1
|
||||||
|
else:
|
||||||
|
print(f"ERROR: Max retries exceeded for target {target}")
|
||||||
|
self.scan_failed_due_to_retries = True
|
||||||
|
self._log_target_processing_error(target, "Max retries exceeded")
|
||||||
|
else:
|
||||||
|
processed_targets.add(target)
|
||||||
|
self.indicators_completed += 1
|
||||||
|
|
||||||
|
# **NEW**: Only add new targets if not stopped
|
||||||
|
if not self._is_stop_requested():
|
||||||
for new_target in new_targets:
|
for new_target in new_targets:
|
||||||
if new_target not in processed_targets:
|
if new_target not in processed_targets:
|
||||||
task_queue.append((new_target, depth + 1, False))
|
self.task_queue.append((new_target, depth + 1, False))
|
||||||
|
|
||||||
for member in large_entity_members:
|
for member in large_entity_members:
|
||||||
if member not in processed_targets:
|
if member not in processed_targets:
|
||||||
task_queue.append((member, depth, True))
|
self.task_queue.append((member, depth, True))
|
||||||
|
|
||||||
|
finally:
|
||||||
|
# **NEW**: Always remove from processing set
|
||||||
|
with self.processing_lock:
|
||||||
|
self.currently_processing.discard(target)
|
||||||
|
|
||||||
|
# **NEW**: Log termination reason
|
||||||
|
if self._is_stop_requested():
|
||||||
|
print("Scan terminated due to stop request")
|
||||||
|
self.logger.logger.info("Scan terminated by user request")
|
||||||
|
elif not self.task_queue:
|
||||||
|
print("Scan completed - no more targets to process")
|
||||||
|
self.logger.logger.info("Scan completed - all targets processed")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"ERROR: Scan execution failed with error: {e}")
|
print(f"ERROR: Scan execution failed with error: {e}")
|
||||||
@@ -314,8 +402,14 @@ class Scanner:
|
|||||||
self.status = ScanStatus.FAILED
|
self.status = ScanStatus.FAILED
|
||||||
self.logger.logger.error(f"Scan failed: {e}")
|
self.logger.logger.error(f"Scan failed: {e}")
|
||||||
finally:
|
finally:
|
||||||
|
# **NEW**: Clear processing state on exit
|
||||||
|
with self.processing_lock:
|
||||||
|
self.currently_processing.clear()
|
||||||
|
|
||||||
if self._is_stop_requested():
|
if self._is_stop_requested():
|
||||||
self.status = ScanStatus.STOPPED
|
self.status = ScanStatus.STOPPED
|
||||||
|
elif self.scan_failed_due_to_retries:
|
||||||
|
self.status = ScanStatus.FAILED
|
||||||
else:
|
else:
|
||||||
self.status = ScanStatus.COMPLETED
|
self.status = ScanStatus.COMPLETED
|
||||||
|
|
||||||
@@ -323,43 +417,50 @@ class Scanner:
|
|||||||
self.logger.log_scan_complete()
|
self.logger.log_scan_complete()
|
||||||
if self.executor:
|
if self.executor:
|
||||||
self.executor.shutdown(wait=False, cancel_futures=True)
|
self.executor.shutdown(wait=False, cancel_futures=True)
|
||||||
|
self.executor = None
|
||||||
stats = self.graph.get_statistics()
|
stats = self.graph.get_statistics()
|
||||||
print("Final scan statistics:")
|
print("Final scan statistics:")
|
||||||
print(f" - Total nodes: {stats['basic_metrics']['total_nodes']}")
|
print(f" - Total nodes: {stats['basic_metrics']['total_nodes']}")
|
||||||
print(f" - Total edges: {stats['basic_metrics']['total_edges']}")
|
print(f" - Total edges: {stats['basic_metrics']['total_edges']}")
|
||||||
print(f" - Targets processed: {len(processed_targets)}")
|
print(f" - Targets processed: {len(processed_targets)}")
|
||||||
|
|
||||||
def _query_providers_for_target(self, target: str, depth: int, dns_only: bool = False) -> Tuple[Set[str], Set[str]]:
|
def _query_providers_for_target(self, target: str, depth: int, dns_only: bool = False) -> Tuple[Set[str], Set[str], bool]:
|
||||||
"""Helper method to query providers for a single target."""
|
"""Query providers for a single target with enhanced stop checking."""
|
||||||
|
# **NEW**: Early termination check
|
||||||
|
if self._is_stop_requested():
|
||||||
|
print(f"Stop requested before querying providers for {target}")
|
||||||
|
return set(), set(), False
|
||||||
|
|
||||||
is_ip = _is_valid_ip(target)
|
is_ip = _is_valid_ip(target)
|
||||||
target_type = NodeType.IP if is_ip else NodeType.DOMAIN
|
target_type = NodeType.IP if is_ip else NodeType.DOMAIN
|
||||||
print(f"Querying providers for {target_type.value}: {target} at depth {depth}")
|
print(f"Querying providers for {target_type.value}: {target} at depth {depth}")
|
||||||
|
|
||||||
if self._is_stop_requested():
|
|
||||||
print(f"Stop requested before querying providers for {target}")
|
|
||||||
return set(), set()
|
|
||||||
|
|
||||||
self.graph.add_node(target, target_type)
|
self.graph.add_node(target, target_type)
|
||||||
self._initialize_provider_states(target)
|
self._initialize_provider_states(target)
|
||||||
|
|
||||||
new_targets = set()
|
new_targets = set()
|
||||||
large_entity_members = set()
|
large_entity_members = set()
|
||||||
node_attributes = defaultdict(lambda: defaultdict(list))
|
node_attributes = defaultdict(lambda: defaultdict(list))
|
||||||
|
all_providers_successful = True
|
||||||
|
|
||||||
eligible_providers = self._get_eligible_providers(target, is_ip, dns_only)
|
eligible_providers = self._get_eligible_providers(target, is_ip, dns_only)
|
||||||
|
|
||||||
if not eligible_providers:
|
if not eligible_providers:
|
||||||
self._log_no_eligible_providers(target, is_ip)
|
self._log_no_eligible_providers(target, is_ip)
|
||||||
return new_targets, large_entity_members
|
return new_targets, large_entity_members, True
|
||||||
|
|
||||||
for provider in eligible_providers:
|
# **IMPROVED**: Check stop signal before each provider
|
||||||
|
for i, provider in enumerate(eligible_providers):
|
||||||
if self._is_stop_requested():
|
if self._is_stop_requested():
|
||||||
print(f"Stop requested while querying providers for {target}")
|
print(f"Stop requested while querying provider {i+1}/{len(eligible_providers)} for {target}")
|
||||||
|
all_providers_successful = False
|
||||||
break
|
break
|
||||||
|
|
||||||
try:
|
try:
|
||||||
provider_results = self._query_single_provider_forensic(provider, target, is_ip, depth)
|
provider_results = self._query_single_provider_forensic(provider, target, is_ip, depth)
|
||||||
if provider_results and not self._is_stop_requested():
|
if provider_results is None:
|
||||||
|
all_providers_successful = False
|
||||||
|
elif not self._is_stop_requested():
|
||||||
discovered, is_large_entity = self._process_provider_results_forensic(
|
discovered, is_large_entity = self._process_provider_results_forensic(
|
||||||
target, provider, provider_results, node_attributes, depth
|
target, provider, provider_results, node_attributes, depth
|
||||||
)
|
)
|
||||||
@@ -367,16 +468,65 @@ class Scanner:
|
|||||||
large_entity_members.update(discovered)
|
large_entity_members.update(discovered)
|
||||||
else:
|
else:
|
||||||
new_targets.update(discovered)
|
new_targets.update(discovered)
|
||||||
|
else:
|
||||||
|
print(f"Stop requested after processing results from {provider.get_name()}")
|
||||||
|
break
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
all_providers_successful = False
|
||||||
self._log_provider_error(target, provider.get_name(), str(e))
|
self._log_provider_error(target, provider.get_name(), str(e))
|
||||||
|
|
||||||
|
# **NEW**: Only update node attributes if not stopped
|
||||||
|
if not self._is_stop_requested():
|
||||||
for node_id, attributes in node_attributes.items():
|
for node_id, attributes in node_attributes.items():
|
||||||
if self.graph.graph.has_node(node_id):
|
if self.graph.graph.has_node(node_id):
|
||||||
node_is_ip = _is_valid_ip(node_id)
|
node_is_ip = _is_valid_ip(node_id)
|
||||||
node_type_to_add = NodeType.IP if node_is_ip else NodeType.DOMAIN
|
node_type_to_add = NodeType.IP if node_is_ip else NodeType.DOMAIN
|
||||||
self.graph.add_node(node_id, node_type_to_add, attributes=attributes)
|
self.graph.add_node(node_id, node_type_to_add, attributes=attributes)
|
||||||
|
|
||||||
return new_targets, large_entity_members
|
return new_targets, large_entity_members, all_providers_successful
|
||||||
|
|
||||||
|
def stop_scan(self) -> bool:
|
||||||
|
"""Request immediate scan termination with proper cleanup."""
|
||||||
|
try:
|
||||||
|
print("=== INITIATING IMMEDIATE SCAN TERMINATION ===")
|
||||||
|
self.logger.logger.info("Scan termination requested by user")
|
||||||
|
|
||||||
|
# **IMPROVED**: More aggressive stop signal setting
|
||||||
|
self._set_stop_signal()
|
||||||
|
self.status = ScanStatus.STOPPED
|
||||||
|
|
||||||
|
# **NEW**: Clear processing state immediately
|
||||||
|
with self.processing_lock:
|
||||||
|
currently_processing_copy = self.currently_processing.copy()
|
||||||
|
self.currently_processing.clear()
|
||||||
|
print(f"Cleared {len(currently_processing_copy)} currently processing targets: {currently_processing_copy}")
|
||||||
|
|
||||||
|
# **IMPROVED**: Clear task queue and log what was discarded
|
||||||
|
discarded_tasks = list(self.task_queue)
|
||||||
|
self.task_queue.clear()
|
||||||
|
print(f"Discarded {len(discarded_tasks)} pending tasks")
|
||||||
|
|
||||||
|
# **IMPROVED**: Aggressively shut down executor
|
||||||
|
if self.executor:
|
||||||
|
print("Shutting down executor with immediate cancellation...")
|
||||||
|
try:
|
||||||
|
# Cancel all pending futures
|
||||||
|
self.executor.shutdown(wait=False, cancel_futures=True)
|
||||||
|
print("Executor shutdown completed")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error during executor shutdown: {e}")
|
||||||
|
|
||||||
|
# Immediately update GUI with stopped status
|
||||||
|
self._update_session_state()
|
||||||
|
|
||||||
|
print("Termination signals sent. The scan will stop as soon as possible.")
|
||||||
|
return True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"ERROR: Exception in stop_scan: {e}")
|
||||||
|
self.logger.logger.error(f"Error during scan termination: {e}")
|
||||||
|
traceback.print_exc()
|
||||||
|
return False
|
||||||
|
|
||||||
def _update_session_state(self) -> None:
|
def _update_session_state(self) -> None:
|
||||||
"""
|
"""
|
||||||
@@ -392,6 +542,49 @@ class Scanner:
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"ERROR: Failed to update session state: {e}")
|
print(f"ERROR: Failed to update session state: {e}")
|
||||||
|
|
||||||
|
def get_scan_status(self) -> Dict[str, Any]:
|
||||||
|
"""Get current scan status with processing information."""
|
||||||
|
try:
|
||||||
|
with self.processing_lock:
|
||||||
|
currently_processing_count = len(self.currently_processing)
|
||||||
|
currently_processing_list = list(self.currently_processing)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'status': self.status,
|
||||||
|
'target_domain': self.current_target,
|
||||||
|
'current_depth': self.current_depth,
|
||||||
|
'max_depth': self.max_depth,
|
||||||
|
'current_indicator': self.current_indicator,
|
||||||
|
'indicators_processed': self.indicators_processed,
|
||||||
|
'indicators_completed': self.indicators_completed,
|
||||||
|
'tasks_re_enqueued': self.tasks_re_enqueued,
|
||||||
|
'progress_percentage': self._calculate_progress(),
|
||||||
|
'enabled_providers': [provider.get_name() for provider in self.providers],
|
||||||
|
'graph_statistics': self.graph.get_statistics(),
|
||||||
|
'task_queue_size': len(self.task_queue),
|
||||||
|
'currently_processing_count': currently_processing_count, # **NEW**
|
||||||
|
'currently_processing': currently_processing_list[:5] # **NEW**: Show first 5 for debugging
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
print(f"ERROR: Exception in get_scan_status: {e}")
|
||||||
|
traceback.print_exc()
|
||||||
|
return {
|
||||||
|
'status': 'error',
|
||||||
|
'target_domain': None,
|
||||||
|
'current_depth': 0,
|
||||||
|
'max_depth': 0,
|
||||||
|
'current_indicator': '',
|
||||||
|
'indicators_processed': 0,
|
||||||
|
'indicators_completed': 0,
|
||||||
|
'tasks_re_enqueued': 0,
|
||||||
|
'progress_percentage': 0.0,
|
||||||
|
'enabled_providers': [],
|
||||||
|
'graph_statistics': {},
|
||||||
|
'task_queue_size': 0,
|
||||||
|
'currently_processing_count': 0,
|
||||||
|
'currently_processing': []
|
||||||
|
}
|
||||||
|
|
||||||
def _initialize_provider_states(self, target: str) -> None:
|
def _initialize_provider_states(self, target: str) -> None:
|
||||||
"""Initialize provider states for forensic tracking."""
|
"""Initialize provider states for forensic tracking."""
|
||||||
if not self.graph.graph.has_node(target):
|
if not self.graph.graph.has_node(target):
|
||||||
@@ -421,22 +614,25 @@ class Scanner:
|
|||||||
return eligible
|
return eligible
|
||||||
|
|
||||||
def _already_queried_provider(self, target: str, provider_name: str) -> bool:
|
def _already_queried_provider(self, target: str, provider_name: str) -> bool:
|
||||||
"""Check if we already queried a provider for a target."""
|
"""Check if we already successfully queried a provider for a target."""
|
||||||
if not self.graph.graph.has_node(target):
|
if not self.graph.graph.has_node(target):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
node_data = self.graph.graph.nodes[target]
|
node_data = self.graph.graph.nodes[target]
|
||||||
provider_states = node_data.get('metadata', {}).get('provider_states', {})
|
provider_states = node_data.get('metadata', {}).get('provider_states', {})
|
||||||
return provider_name in provider_states
|
|
||||||
|
|
||||||
def _query_single_provider_forensic(self, provider, target: str, is_ip: bool, current_depth: int) -> List:
|
# A provider has been successfully queried if a state exists and its status is 'success'
|
||||||
|
provider_state = provider_states.get(provider_name)
|
||||||
|
return provider_state is not None and provider_state.get('status') == 'success'
|
||||||
|
|
||||||
|
def _query_single_provider_forensic(self, provider, target: str, is_ip: bool, current_depth: int) -> Optional[List]:
|
||||||
"""Query a single provider with stop signal checking."""
|
"""Query a single provider with stop signal checking."""
|
||||||
provider_name = provider.get_name()
|
provider_name = provider.get_name()
|
||||||
start_time = datetime.now(timezone.utc)
|
start_time = datetime.now(timezone.utc)
|
||||||
|
|
||||||
if self._is_stop_requested():
|
if self._is_stop_requested():
|
||||||
print(f"Stop requested before querying {provider_name} for {target}")
|
print(f"Stop requested before querying {provider_name} for {target}")
|
||||||
return []
|
return None
|
||||||
|
|
||||||
print(f"Querying {provider_name} for {target}")
|
print(f"Querying {provider_name} for {target}")
|
||||||
|
|
||||||
@@ -450,7 +646,7 @@ class Scanner:
|
|||||||
|
|
||||||
if self._is_stop_requested():
|
if self._is_stop_requested():
|
||||||
print(f"Stop requested after querying {provider_name} for {target}")
|
print(f"Stop requested after querying {provider_name} for {target}")
|
||||||
return []
|
return None
|
||||||
|
|
||||||
self._update_provider_state(target, provider_name, 'success', len(results), None, start_time)
|
self._update_provider_state(target, provider_name, 'success', len(results), None, start_time)
|
||||||
|
|
||||||
@@ -460,10 +656,10 @@ class Scanner:
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
self._update_provider_state(target, provider_name, 'failed', 0, str(e), start_time)
|
self._update_provider_state(target, provider_name, 'failed', 0, str(e), start_time)
|
||||||
print(f"✗ {provider_name} failed for {target}: {e}")
|
print(f"✗ {provider_name} failed for {target}: {e}")
|
||||||
return []
|
return None
|
||||||
|
|
||||||
def _update_provider_state(self, target: str, provider_name: str, status: str,
|
def _update_provider_state(self, target: str, provider_name: str, status: str,
|
||||||
results_count: int, error: str, start_time: datetime) -> None:
|
results_count: int, error: Optional[str], start_time: datetime) -> None:
|
||||||
"""Update provider state in node metadata for forensic tracking."""
|
"""Update provider state in node metadata for forensic tracking."""
|
||||||
if not self.graph.graph.has_node(target):
|
if not self.graph.graph.has_node(target):
|
||||||
return
|
return
|
||||||
@@ -500,14 +696,14 @@ class Scanner:
|
|||||||
return members, True
|
return members, True
|
||||||
|
|
||||||
for i, (source, rel_target, rel_type, confidence, raw_data) in enumerate(results):
|
for i, (source, rel_target, rel_type, confidence, raw_data) in enumerate(results):
|
||||||
if i % 10 == 0 and self._is_stop_requested():
|
if i % 5 == 0 and self._is_stop_requested(): # Check more frequently
|
||||||
print(f"Stop requested while processing results from {provider_name} for {target}")
|
print(f"Stop requested while processing results from {provider_name} for {target}")
|
||||||
break
|
break
|
||||||
|
|
||||||
self.logger.log_relationship_discovery(
|
self.logger.log_relationship_discovery(
|
||||||
source_node=source,
|
source_node=source,
|
||||||
target_node=rel_target,
|
target_node=rel_target,
|
||||||
relationship_type=rel_type.relationship_name,
|
relationship_type=rel_type,
|
||||||
confidence_score=confidence,
|
confidence_score=confidence,
|
||||||
provider=provider_name,
|
provider=provider_name,
|
||||||
raw_data=raw_data,
|
raw_data=raw_data,
|
||||||
@@ -516,21 +712,36 @@ class Scanner:
|
|||||||
|
|
||||||
self._collect_node_attributes(source, provider_name, rel_type, rel_target, raw_data, node_attributes[source])
|
self._collect_node_attributes(source, provider_name, rel_type, rel_target, raw_data, node_attributes[source])
|
||||||
|
|
||||||
if _is_valid_ip(rel_target):
|
if isinstance(rel_target, list):
|
||||||
|
# If the target is a list, iterate and process each item
|
||||||
|
for single_target in rel_target:
|
||||||
|
if _is_valid_ip(single_target):
|
||||||
|
self.graph.add_node(single_target, NodeType.IP)
|
||||||
|
if self.graph.add_edge(source, single_target, rel_type, confidence, provider_name, raw_data):
|
||||||
|
print(f"Added IP relationship: {source} -> {single_target} ({rel_type})")
|
||||||
|
discovered_targets.add(single_target)
|
||||||
|
elif _is_valid_domain(single_target):
|
||||||
|
self.graph.add_node(single_target, NodeType.DOMAIN)
|
||||||
|
if self.graph.add_edge(source, single_target, rel_type, confidence, provider_name, raw_data):
|
||||||
|
print(f"Added domain relationship: {source} -> {single_target} ({rel_type})")
|
||||||
|
discovered_targets.add(single_target)
|
||||||
|
self._collect_node_attributes(single_target, provider_name, rel_type, source, raw_data, node_attributes[single_target])
|
||||||
|
|
||||||
|
elif _is_valid_ip(rel_target):
|
||||||
self.graph.add_node(rel_target, NodeType.IP)
|
self.graph.add_node(rel_target, NodeType.IP)
|
||||||
if self.graph.add_edge(source, rel_target, rel_type, confidence, provider_name, raw_data):
|
if self.graph.add_edge(source, rel_target, rel_type, confidence, provider_name, raw_data):
|
||||||
print(f"Added IP relationship: {source} -> {rel_target} ({rel_type.relationship_name})")
|
print(f"Added IP relationship: {source} -> {rel_target} ({rel_type})")
|
||||||
discovered_targets.add(rel_target)
|
discovered_targets.add(rel_target)
|
||||||
|
|
||||||
elif rel_target.startswith('AS') and rel_target[2:].isdigit():
|
elif rel_target.startswith('AS') and rel_target[2:].isdigit():
|
||||||
self.graph.add_node(rel_target, NodeType.ASN)
|
self.graph.add_node(rel_target, NodeType.ASN)
|
||||||
if self.graph.add_edge(source, rel_target, rel_type, confidence, provider_name, raw_data):
|
if self.graph.add_edge(source, rel_target, rel_type, confidence, provider_name, raw_data):
|
||||||
print(f"Added ASN relationship: {source} -> {rel_target} ({rel_type.relationship_name})")
|
print(f"Added ASN relationship: {source} -> {rel_target} ({rel_type})")
|
||||||
|
|
||||||
elif _is_valid_domain(rel_target):
|
elif _is_valid_domain(rel_target):
|
||||||
self.graph.add_node(rel_target, NodeType.DOMAIN)
|
self.graph.add_node(rel_target, NodeType.DOMAIN)
|
||||||
if self.graph.add_edge(source, rel_target, rel_type, confidence, provider_name, raw_data):
|
if self.graph.add_edge(source, rel_target, rel_type, confidence, provider_name, raw_data):
|
||||||
print(f"Added domain relationship: {source} -> {rel_target} ({rel_type.relationship_name})")
|
print(f"Added domain relationship: {source} -> {rel_target} ({rel_type})")
|
||||||
discovered_targets.add(rel_target)
|
discovered_targets.add(rel_target)
|
||||||
self._collect_node_attributes(rel_target, provider_name, rel_type, source, raw_data, node_attributes[rel_target])
|
self._collect_node_attributes(rel_target, provider_name, rel_type, source, raw_data, node_attributes[rel_target])
|
||||||
|
|
||||||
@@ -577,10 +788,10 @@ class Scanner:
|
|||||||
|
|
||||||
return set(targets)
|
return set(targets)
|
||||||
|
|
||||||
def _collect_node_attributes(self, node_id: str, provider_name: str, rel_type: RelationshipType,
|
def _collect_node_attributes(self, node_id: str, provider_name: str, rel_type: str,
|
||||||
target: str, raw_data: Dict[str, Any], attributes: Dict[str, Any]) -> None:
|
target: str, raw_data: Dict[str, Any], attributes: Dict[str, Any]) -> None:
|
||||||
"""Collect and organize attributes for a node."""
|
"""Collect and organize attributes for a node."""
|
||||||
self.logger.logger.debug(f"Collecting attributes for {node_id} from {provider_name}: {rel_type.relationship_name}")
|
self.logger.logger.debug(f"Collecting attributes for {node_id} from {provider_name}: {rel_type}")
|
||||||
|
|
||||||
if provider_name == 'dns':
|
if provider_name == 'dns':
|
||||||
record_type = raw_data.get('query_type', 'UNKNOWN')
|
record_type = raw_data.get('query_type', 'UNKNOWN')
|
||||||
@@ -590,7 +801,7 @@ class Scanner:
|
|||||||
attributes.setdefault('dns_records', []).append(dns_entry)
|
attributes.setdefault('dns_records', []).append(dns_entry)
|
||||||
|
|
||||||
elif provider_name == 'crtsh':
|
elif provider_name == 'crtsh':
|
||||||
if rel_type == RelationshipType.SAN_CERTIFICATE:
|
if rel_type == "san_certificate":
|
||||||
domain_certs = raw_data.get('domain_certificates', {})
|
domain_certs = raw_data.get('domain_certificates', {})
|
||||||
if node_id in domain_certs:
|
if node_id in domain_certs:
|
||||||
cert_summary = domain_certs[node_id]
|
cert_summary = domain_certs[node_id]
|
||||||
@@ -604,7 +815,7 @@ class Scanner:
|
|||||||
if key not in shodan_attributes or not shodan_attributes.get(key):
|
if key not in shodan_attributes or not shodan_attributes.get(key):
|
||||||
shodan_attributes[key] = value
|
shodan_attributes[key] = value
|
||||||
|
|
||||||
if rel_type == RelationshipType.ASN_MEMBERSHIP:
|
if rel_type == "asn_membership":
|
||||||
attributes['asn'] = {
|
attributes['asn'] = {
|
||||||
'id': target,
|
'id': target,
|
||||||
'description': raw_data.get('org', ''),
|
'description': raw_data.get('org', ''),
|
||||||
@@ -612,7 +823,7 @@ class Scanner:
|
|||||||
'country': raw_data.get('country', '')
|
'country': raw_data.get('country', '')
|
||||||
}
|
}
|
||||||
|
|
||||||
record_type_name = rel_type.relationship_name
|
record_type_name = rel_type
|
||||||
if record_type_name not in attributes:
|
if record_type_name not in attributes:
|
||||||
attributes[record_type_name] = []
|
attributes[record_type_name] = []
|
||||||
|
|
||||||
@@ -622,7 +833,6 @@ class Scanner:
|
|||||||
if target not in attributes[record_type_name]:
|
if target not in attributes[record_type_name]:
|
||||||
attributes[record_type_name].append(target)
|
attributes[record_type_name].append(target)
|
||||||
|
|
||||||
|
|
||||||
def _log_target_processing_error(self, target: str, error: str) -> None:
|
def _log_target_processing_error(self, target: str, error: str) -> None:
|
||||||
"""Log target processing errors for forensic trail."""
|
"""Log target processing errors for forensic trail."""
|
||||||
self.logger.logger.error(f"Target processing failed for {target}: {error}")
|
self.logger.logger.error(f"Target processing failed for {target}: {error}")
|
||||||
@@ -636,69 +846,12 @@ class Scanner:
|
|||||||
target_type = 'IP' if is_ip else 'domain'
|
target_type = 'IP' if is_ip else 'domain'
|
||||||
self.logger.logger.warning(f"No eligible providers for {target_type}: {target}")
|
self.logger.logger.warning(f"No eligible providers for {target_type}: {target}")
|
||||||
|
|
||||||
def stop_scan(self) -> bool:
|
|
||||||
"""Request immediate scan termination with immediate GUI feedback."""
|
|
||||||
try:
|
|
||||||
print("=== INITIATING IMMEDIATE SCAN TERMINATION ===")
|
|
||||||
self.logger.logger.info("Scan termination requested by user")
|
|
||||||
|
|
||||||
# Set both local and Redis stop signals
|
|
||||||
self._set_stop_signal()
|
|
||||||
self.status = ScanStatus.STOPPED
|
|
||||||
|
|
||||||
# Immediately update GUI with stopped status
|
|
||||||
self._update_session_state()
|
|
||||||
|
|
||||||
# Cancel executor futures if running
|
|
||||||
if self.executor:
|
|
||||||
print("Shutting down executor with immediate cancellation...")
|
|
||||||
self.executor.shutdown(wait=False, cancel_futures=True)
|
|
||||||
|
|
||||||
print("Termination signals sent. The scan will stop as soon as possible.")
|
|
||||||
return True
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"ERROR: Exception in stop_scan: {e}")
|
|
||||||
self.logger.logger.error(f"Error during scan termination: {e}")
|
|
||||||
traceback.print_exc()
|
|
||||||
return False
|
|
||||||
|
|
||||||
def get_scan_status(self) -> Dict[str, Any]:
|
|
||||||
"""Get current scan status with forensic information."""
|
|
||||||
try:
|
|
||||||
return {
|
|
||||||
'status': self.status,
|
|
||||||
'target_domain': self.current_target,
|
|
||||||
'current_depth': self.current_depth,
|
|
||||||
'max_depth': self.max_depth,
|
|
||||||
'current_indicator': self.current_indicator,
|
|
||||||
'total_indicators_found': self.total_indicators_found,
|
|
||||||
'indicators_processed': self.indicators_processed,
|
|
||||||
'progress_percentage': self._calculate_progress(),
|
|
||||||
'enabled_providers': [provider.get_name() for provider in self.providers],
|
|
||||||
'graph_statistics': self.graph.get_statistics()
|
|
||||||
}
|
|
||||||
except Exception as e:
|
|
||||||
print(f"ERROR: Exception in get_scan_status: {e}")
|
|
||||||
traceback.print_exc()
|
|
||||||
return {
|
|
||||||
'status': 'error',
|
|
||||||
'target_domain': None,
|
|
||||||
'current_depth': 0,
|
|
||||||
'max_depth': 0,
|
|
||||||
'current_indicator': '',
|
|
||||||
'total_indicators_found': 0,
|
|
||||||
'indicators_processed': 0,
|
|
||||||
'progress_percentage': 0.0,
|
|
||||||
'enabled_providers': [],
|
|
||||||
'graph_statistics': {}
|
|
||||||
}
|
|
||||||
|
|
||||||
def _calculate_progress(self) -> float:
|
def _calculate_progress(self) -> float:
|
||||||
"""Calculate scan progress percentage."""
|
"""Calculate scan progress percentage based on task completion."""
|
||||||
if self.total_indicators_found == 0:
|
total_tasks = self.indicators_completed + len(self.task_queue)
|
||||||
|
if total_tasks == 0:
|
||||||
return 0.0
|
return 0.0
|
||||||
return min(100.0, (self.indicators_processed / self.total_indicators_found) * 100)
|
return min(100.0, (self.indicators_completed / total_tasks) * 100)
|
||||||
|
|
||||||
def get_graph_data(self) -> Dict[str, Any]:
|
def get_graph_data(self) -> Dict[str, Any]:
|
||||||
"""Get current graph data for visualization."""
|
"""Get current graph data for visualization."""
|
||||||
@@ -719,8 +872,7 @@ class Scanner:
|
|||||||
'final_status': self.status,
|
'final_status': self.status,
|
||||||
'total_indicators_processed': self.indicators_processed,
|
'total_indicators_processed': self.indicators_processed,
|
||||||
'enabled_providers': list(provider_stats.keys()),
|
'enabled_providers': list(provider_stats.keys()),
|
||||||
'session_id': self.session_id,
|
'session_id': self.session_id
|
||||||
'forensic_note': 'Enhanced scanner with reliable cross-process termination'
|
|
||||||
},
|
},
|
||||||
'graph_data': graph_data,
|
'graph_data': graph_data,
|
||||||
'forensic_audit': audit_trail,
|
'forensic_audit': audit_trail,
|
||||||
@@ -750,7 +902,7 @@ class Scanner:
|
|||||||
if isinstance(attribute, type) and issubclass(attribute, BaseProvider) and attribute is not BaseProvider:
|
if isinstance(attribute, type) and issubclass(attribute, BaseProvider) and attribute is not BaseProvider:
|
||||||
provider_class = attribute
|
provider_class = attribute
|
||||||
# Instantiate to get metadata, even if not fully configured
|
# Instantiate to get metadata, even if not fully configured
|
||||||
temp_provider = provider_class(session_config=self.config)
|
temp_provider = provider_class(name=attribute_name, session_config=self.config)
|
||||||
provider_name = temp_provider.get_name()
|
provider_name = temp_provider.get_name()
|
||||||
|
|
||||||
# Find the actual provider instance if it exists, to get live stats
|
# Find the actual provider instance if it exists, to get live stats
|
||||||
|
|||||||
@@ -3,11 +3,9 @@ Per-session configuration management for DNSRecon.
|
|||||||
Provides isolated configuration instances for each user session.
|
Provides isolated configuration instances for each user session.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import os
|
from config import Config
|
||||||
from typing import Dict, Optional
|
|
||||||
|
|
||||||
|
class SessionConfig(Config):
|
||||||
class SessionConfig:
|
|
||||||
"""
|
"""
|
||||||
Session-specific configuration that inherits from global config
|
Session-specific configuration that inherits from global config
|
||||||
but maintains isolated API keys and provider settings.
|
but maintains isolated API keys and provider settings.
|
||||||
@@ -15,106 +13,8 @@ class SessionConfig:
|
|||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
"""Initialize session config with global defaults."""
|
"""Initialize session config with global defaults."""
|
||||||
# Copy all attributes from global config
|
super().__init__()
|
||||||
self.api_keys: Dict[str, Optional[str]] = {
|
|
||||||
'shodan': None
|
|
||||||
}
|
|
||||||
|
|
||||||
# Default settings (copied from global config)
|
def create_session_config() -> 'SessionConfig':
|
||||||
self.default_recursion_depth = 2
|
|
||||||
self.default_timeout = 30
|
|
||||||
self.max_concurrent_requests = 5
|
|
||||||
self.large_entity_threshold = 100
|
|
||||||
|
|
||||||
# Rate limiting settings (per session)
|
|
||||||
self.rate_limits = {
|
|
||||||
'crtsh': 60,
|
|
||||||
'shodan': 60,
|
|
||||||
'dns': 100
|
|
||||||
}
|
|
||||||
|
|
||||||
# Provider settings (per session)
|
|
||||||
self.enabled_providers = {
|
|
||||||
'crtsh': True,
|
|
||||||
'dns': True,
|
|
||||||
'shodan': False
|
|
||||||
}
|
|
||||||
|
|
||||||
# Logging configuration
|
|
||||||
self.log_level = 'INFO'
|
|
||||||
self.log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
|
||||||
|
|
||||||
# Flask configuration (shared)
|
|
||||||
self.flask_host = '127.0.0.1'
|
|
||||||
self.flask_port = 5000
|
|
||||||
self.flask_debug = True
|
|
||||||
|
|
||||||
def set_api_key(self, provider: str, api_key: str) -> bool:
|
|
||||||
"""
|
|
||||||
Set API key for a provider in this session.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
provider: Provider name (shodan, etc)
|
|
||||||
api_key: API key string
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
bool: True if key was set successfully
|
|
||||||
"""
|
|
||||||
if provider in self.api_keys:
|
|
||||||
self.api_keys[provider] = api_key
|
|
||||||
self.enabled_providers[provider] = True if api_key else False
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def get_api_key(self, provider: str) -> Optional[str]:
|
|
||||||
"""
|
|
||||||
Get API key for a provider in this session.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
provider: Provider name
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
API key or None if not set
|
|
||||||
"""
|
|
||||||
return self.api_keys.get(provider)
|
|
||||||
|
|
||||||
def is_provider_enabled(self, provider: str) -> bool:
|
|
||||||
"""
|
|
||||||
Check if a provider is enabled in this session.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
provider: Provider name
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
bool: True if provider is enabled
|
|
||||||
"""
|
|
||||||
return self.enabled_providers.get(provider, False)
|
|
||||||
|
|
||||||
def get_rate_limit(self, provider: str) -> int:
|
|
||||||
"""
|
|
||||||
Get rate limit for a provider in this session.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
provider: Provider name
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Rate limit in requests per minute
|
|
||||||
"""
|
|
||||||
return self.rate_limits.get(provider, 60)
|
|
||||||
|
|
||||||
def load_from_env(self):
|
|
||||||
"""Load configuration from environment variables (only if not already set)."""
|
|
||||||
if os.getenv('SHODAN_API_KEY') and not self.api_keys['shodan']:
|
|
||||||
self.set_api_key('shodan', os.getenv('SHODAN_API_KEY'))
|
|
||||||
|
|
||||||
# Override default settings from environment
|
|
||||||
self.default_recursion_depth = int(os.getenv('DEFAULT_RECURSION_DEPTH', '2'))
|
|
||||||
self.default_timeout = 30
|
|
||||||
self.max_concurrent_requests = 5
|
|
||||||
|
|
||||||
|
|
||||||
def create_session_config() -> SessionConfig:
|
|
||||||
"""Create a new session configuration instance."""
|
"""Create a new session configuration instance."""
|
||||||
session_config = SessionConfig()
|
return SessionConfig()
|
||||||
session_config.load_from_env()
|
|
||||||
return session_config
|
|
||||||
@@ -8,6 +8,7 @@ import pickle
|
|||||||
from typing import Dict, Optional, Any, List
|
from typing import Dict, Optional, Any, List
|
||||||
|
|
||||||
from core.scanner import Scanner
|
from core.scanner import Scanner
|
||||||
|
from config import config
|
||||||
|
|
||||||
# WARNING: Using pickle can be a security risk if the data source is not trusted.
|
# WARNING: Using pickle can be a security risk if the data source is not trusted.
|
||||||
# In this case, we are only serializing/deserializing our own trusted Scanner objects,
|
# In this case, we are only serializing/deserializing our own trusted Scanner objects,
|
||||||
@@ -16,13 +17,15 @@ from core.scanner import Scanner
|
|||||||
class SessionManager:
|
class SessionManager:
|
||||||
"""
|
"""
|
||||||
Manages multiple scanner instances for concurrent user sessions using Redis.
|
Manages multiple scanner instances for concurrent user sessions using Redis.
|
||||||
Enhanced with reliable cross-process stop signal management and immediate state updates.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, session_timeout_minutes: int = 60):
|
def __init__(self, session_timeout_minutes: int = 0):
|
||||||
"""
|
"""
|
||||||
Initialize session manager with a Redis backend.
|
Initialize session manager with a Redis backend.
|
||||||
"""
|
"""
|
||||||
|
if session_timeout_minutes is None:
|
||||||
|
session_timeout_minutes = config.session_timeout_minutes
|
||||||
|
|
||||||
self.redis_client = redis.StrictRedis(db=0, decode_responses=False)
|
self.redis_client = redis.StrictRedis(db=0, decode_responses=False)
|
||||||
self.session_timeout = session_timeout_minutes * 60 # Convert to seconds
|
self.session_timeout = session_timeout_minutes * 60 # Convert to seconds
|
||||||
self.lock = threading.Lock() # Lock for local operations, Redis handles atomic ops
|
self.lock = threading.Lock() # Lock for local operations, Redis handles atomic ops
|
||||||
@@ -250,7 +253,7 @@ class SessionManager:
|
|||||||
|
|
||||||
def get_session(self, session_id: str) -> Optional[Scanner]:
|
def get_session(self, session_id: str) -> Optional[Scanner]:
|
||||||
"""
|
"""
|
||||||
Get scanner instance for a session from Redis with enhanced session ID management.
|
Get scanner instance for a session from Redis with session ID management.
|
||||||
"""
|
"""
|
||||||
if not session_id:
|
if not session_id:
|
||||||
return None
|
return None
|
||||||
@@ -356,31 +359,6 @@ class SessionManager:
|
|||||||
|
|
||||||
time.sleep(300) # Sleep for 5 minutes
|
time.sleep(300) # Sleep for 5 minutes
|
||||||
|
|
||||||
def list_active_sessions(self) -> List[Dict[str, Any]]:
|
|
||||||
"""List all active sessions for admin purposes."""
|
|
||||||
try:
|
|
||||||
session_keys = self.redis_client.keys("dnsrecon:session:*")
|
|
||||||
sessions = []
|
|
||||||
|
|
||||||
for session_key in session_keys:
|
|
||||||
session_id = session_key.decode('utf-8').split(':')[-1]
|
|
||||||
session_data = self._get_session_data(session_id)
|
|
||||||
|
|
||||||
if session_data:
|
|
||||||
scanner = session_data.get('scanner')
|
|
||||||
sessions.append({
|
|
||||||
'session_id': session_id,
|
|
||||||
'created_at': session_data.get('created_at'),
|
|
||||||
'last_activity': session_data.get('last_activity'),
|
|
||||||
'scanner_status': scanner.status if scanner else 'unknown',
|
|
||||||
'current_target': scanner.current_target if scanner else None
|
|
||||||
})
|
|
||||||
|
|
||||||
return sessions
|
|
||||||
except Exception as e:
|
|
||||||
print(f"ERROR: Failed to list active sessions: {e}")
|
|
||||||
return []
|
|
||||||
|
|
||||||
def get_statistics(self) -> Dict[str, Any]:
|
def get_statistics(self) -> Dict[str, Any]:
|
||||||
"""Get session manager statistics."""
|
"""Get session manager statistics."""
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -3,13 +3,10 @@
|
|||||||
import time
|
import time
|
||||||
import requests
|
import requests
|
||||||
import threading
|
import threading
|
||||||
import os
|
|
||||||
import json
|
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from typing import List, Dict, Any, Optional, Tuple
|
from typing import List, Dict, Any, Optional, Tuple
|
||||||
|
|
||||||
from core.logger import get_forensic_logger
|
from core.logger import get_forensic_logger
|
||||||
from core.graph_manager import RelationshipType
|
|
||||||
|
|
||||||
|
|
||||||
class RateLimiter:
|
class RateLimiter:
|
||||||
@@ -81,20 +78,12 @@ class BaseProvider(ABC):
|
|||||||
self.logger = get_forensic_logger()
|
self.logger = get_forensic_logger()
|
||||||
self._stop_event = None
|
self._stop_event = None
|
||||||
|
|
||||||
# Caching configuration (per session)
|
|
||||||
self.cache_dir = f'.cache/{id(self.config)}' # Unique cache per session config
|
|
||||||
self.cache_expiry = 12 * 3600 # 12 hours in seconds
|
|
||||||
if not os.path.exists(self.cache_dir):
|
|
||||||
os.makedirs(self.cache_dir)
|
|
||||||
|
|
||||||
# Statistics (per provider instance)
|
# Statistics (per provider instance)
|
||||||
self.total_requests = 0
|
self.total_requests = 0
|
||||||
self.successful_requests = 0
|
self.successful_requests = 0
|
||||||
self.failed_requests = 0
|
self.failed_requests = 0
|
||||||
self.total_relationships_found = 0
|
self.total_relationships_found = 0
|
||||||
|
|
||||||
print(f"Initialized {name} provider with session-specific config (rate: {actual_rate_limit}/min)")
|
|
||||||
|
|
||||||
def __getstate__(self):
|
def __getstate__(self):
|
||||||
"""Prepare BaseProvider for pickling by excluding unpicklable objects."""
|
"""Prepare BaseProvider for pickling by excluding unpicklable objects."""
|
||||||
state = self.__dict__.copy()
|
state = self.__dict__.copy()
|
||||||
@@ -147,7 +136,7 @@ class BaseProvider(ABC):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def query_domain(self, domain: str) -> List[Tuple[str, str, RelationshipType, float, Dict[str, Any]]]:
|
def query_domain(self, domain: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||||
"""
|
"""
|
||||||
Query the provider for information about a domain.
|
Query the provider for information about a domain.
|
||||||
|
|
||||||
@@ -160,7 +149,7 @@ class BaseProvider(ABC):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def query_ip(self, ip: str) -> List[Tuple[str, str, RelationshipType, float, Dict[str, Any]]]:
|
def query_ip(self, ip: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||||
"""
|
"""
|
||||||
Query the provider for information about an IP address.
|
Query the provider for information about an IP address.
|
||||||
|
|
||||||
@@ -175,53 +164,15 @@ class BaseProvider(ABC):
|
|||||||
def make_request(self, url: str, method: str = "GET",
|
def make_request(self, url: str, method: str = "GET",
|
||||||
params: Optional[Dict[str, Any]] = None,
|
params: Optional[Dict[str, Any]] = None,
|
||||||
headers: Optional[Dict[str, str]] = None,
|
headers: Optional[Dict[str, str]] = None,
|
||||||
target_indicator: str = "",
|
target_indicator: str = "") -> Optional[requests.Response]:
|
||||||
max_retries: int = 3) -> Optional[requests.Response]:
|
|
||||||
"""
|
"""
|
||||||
Make a rate-limited HTTP request with aggressive stop signal handling.
|
Make a rate-limited HTTP request.
|
||||||
Terminates immediately when stop is requested, including during retries.
|
|
||||||
"""
|
"""
|
||||||
# Check for cancellation before starting
|
|
||||||
if self._is_stop_requested():
|
if self._is_stop_requested():
|
||||||
print(f"Request cancelled before start: {url}")
|
print(f"Request cancelled before start: {url}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Create a unique cache key
|
self.rate_limiter.wait_if_needed()
|
||||||
cache_key = f"{self.name}_{hash(f'{method}:{url}:{json.dumps(params, sort_keys=True)}')}.json"
|
|
||||||
cache_path = os.path.join(self.cache_dir, cache_key)
|
|
||||||
|
|
||||||
# Check cache
|
|
||||||
if os.path.exists(cache_path):
|
|
||||||
cache_age = time.time() - os.path.getmtime(cache_path)
|
|
||||||
if cache_age < self.cache_expiry:
|
|
||||||
print(f"Returning cached response for: {url}")
|
|
||||||
with open(cache_path, 'r') as f:
|
|
||||||
cached_data = json.load(f)
|
|
||||||
response = requests.Response()
|
|
||||||
response.status_code = cached_data['status_code']
|
|
||||||
response._content = cached_data['content'].encode('utf-8')
|
|
||||||
response.headers = cached_data['headers']
|
|
||||||
return response
|
|
||||||
|
|
||||||
# Determine effective max_retries based on stop signal
|
|
||||||
effective_max_retries = 0 if self._is_stop_requested() else max_retries
|
|
||||||
last_exception = None
|
|
||||||
|
|
||||||
for attempt in range(effective_max_retries + 1):
|
|
||||||
# AGGRESSIVE: Check for cancellation before each attempt
|
|
||||||
if self._is_stop_requested():
|
|
||||||
print(f"Request cancelled during attempt {attempt + 1}: {url}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Apply rate limiting with cancellation awareness
|
|
||||||
if not self._wait_with_cancellation_check():
|
|
||||||
print(f"Request cancelled during rate limiting: {url}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
# AGGRESSIVE: Final check before making HTTP request
|
|
||||||
if self._is_stop_requested():
|
|
||||||
print(f"Request cancelled before HTTP call: {url}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
response = None
|
response = None
|
||||||
@@ -230,33 +181,25 @@ class BaseProvider(ABC):
|
|||||||
try:
|
try:
|
||||||
self.total_requests += 1
|
self.total_requests += 1
|
||||||
|
|
||||||
# Prepare request
|
request_headers = dict(self.session.headers).copy()
|
||||||
request_headers = self.session.headers.copy()
|
|
||||||
if headers:
|
if headers:
|
||||||
request_headers.update(headers)
|
request_headers.update(headers)
|
||||||
|
|
||||||
print(f"Making {method} request to: {url} (attempt {attempt + 1})")
|
print(f"Making {method} request to: {url}")
|
||||||
|
|
||||||
# AGGRESSIVE: Use much shorter timeout if termination is requested
|
|
||||||
request_timeout = self.timeout
|
|
||||||
if self._is_stop_requested():
|
|
||||||
request_timeout = 2 # Max 2 seconds if termination requested
|
|
||||||
print(f"Stop requested - using short timeout: {request_timeout}s")
|
|
||||||
|
|
||||||
# Make request
|
|
||||||
if method.upper() == "GET":
|
if method.upper() == "GET":
|
||||||
response = self.session.get(
|
response = self.session.get(
|
||||||
url,
|
url,
|
||||||
params=params,
|
params=params,
|
||||||
headers=request_headers,
|
headers=request_headers,
|
||||||
timeout=request_timeout
|
timeout=self.timeout
|
||||||
)
|
)
|
||||||
elif method.upper() == "POST":
|
elif method.upper() == "POST":
|
||||||
response = self.session.post(
|
response = self.session.post(
|
||||||
url,
|
url,
|
||||||
json=params,
|
json=params,
|
||||||
headers=request_headers,
|
headers=request_headers,
|
||||||
timeout=request_timeout
|
timeout=self.timeout
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"Unsupported HTTP method: {method}")
|
raise ValueError(f"Unsupported HTTP method: {method}")
|
||||||
@@ -265,7 +208,6 @@ class BaseProvider(ABC):
|
|||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
self.successful_requests += 1
|
self.successful_requests += 1
|
||||||
|
|
||||||
# Success - log, cache, and return
|
|
||||||
duration_ms = (time.time() - start_time) * 1000
|
duration_ms = (time.time() - start_time) * 1000
|
||||||
self.logger.log_api_request(
|
self.logger.log_api_request(
|
||||||
provider=self.name,
|
provider=self.name,
|
||||||
@@ -277,53 +219,12 @@ class BaseProvider(ABC):
|
|||||||
error=None,
|
error=None,
|
||||||
target_indicator=target_indicator
|
target_indicator=target_indicator
|
||||||
)
|
)
|
||||||
# Cache the successful response to disk
|
|
||||||
with open(cache_path, 'w') as f:
|
|
||||||
json.dump({
|
|
||||||
'status_code': response.status_code,
|
|
||||||
'content': response.text,
|
|
||||||
'headers': dict(response.headers)
|
|
||||||
}, f)
|
|
||||||
return response
|
return response
|
||||||
|
|
||||||
except requests.exceptions.RequestException as e:
|
except requests.exceptions.RequestException as e:
|
||||||
error = str(e)
|
error = str(e)
|
||||||
self.failed_requests += 1
|
self.failed_requests += 1
|
||||||
print(f"Request failed (attempt {attempt + 1}): {error}")
|
|
||||||
last_exception = e
|
|
||||||
|
|
||||||
# AGGRESSIVE: Immediately abort retries if stop requested
|
|
||||||
if self._is_stop_requested():
|
|
||||||
print(f"Stop requested - aborting retries for: {url}")
|
|
||||||
break
|
|
||||||
|
|
||||||
# Check if we should retry (but only if stop not requested)
|
|
||||||
if attempt < effective_max_retries and self._should_retry(e):
|
|
||||||
# Use a longer, more respectful backoff for 429 errors
|
|
||||||
if isinstance(e, requests.exceptions.HTTPError) and e.response and e.response.status_code == 429:
|
|
||||||
# Start with a 10-second backoff and increase exponentially
|
|
||||||
backoff_time = 10 * (2 ** attempt)
|
|
||||||
print(f"Rate limit hit. Retrying in {backoff_time} seconds...")
|
|
||||||
else:
|
|
||||||
backoff_time = min(1.0, (2 ** attempt) * 0.5) # Shorter backoff for other errors
|
|
||||||
print(f"Retrying in {backoff_time} seconds...")
|
|
||||||
|
|
||||||
# AGGRESSIVE: Much shorter backoff and more frequent checking
|
|
||||||
if not self._sleep_with_cancellation_check(backoff_time):
|
|
||||||
print(f"Stop requested during backoff - aborting: {url}")
|
|
||||||
return None
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
error = f"Unexpected error: {str(e)}"
|
|
||||||
self.failed_requests += 1
|
|
||||||
print(f"Unexpected error: {error}")
|
|
||||||
last_exception = e
|
|
||||||
break
|
|
||||||
|
|
||||||
# All attempts failed - log and return None
|
|
||||||
duration_ms = (time.time() - start_time) * 1000
|
duration_ms = (time.time() - start_time) * 1000
|
||||||
self.logger.log_api_request(
|
self.logger.log_api_request(
|
||||||
provider=self.name,
|
provider=self.name,
|
||||||
@@ -335,11 +236,7 @@ class BaseProvider(ABC):
|
|||||||
error=error,
|
error=error,
|
||||||
target_indicator=target_indicator
|
target_indicator=target_indicator
|
||||||
)
|
)
|
||||||
|
raise e
|
||||||
if error and last_exception:
|
|
||||||
raise last_exception
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
def _is_stop_requested(self) -> bool:
|
def _is_stop_requested(self) -> bool:
|
||||||
"""
|
"""
|
||||||
@@ -349,44 +246,6 @@ class BaseProvider(ABC):
|
|||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def _wait_with_cancellation_check(self) -> bool:
|
|
||||||
"""
|
|
||||||
Wait for rate limiting while aggressively checking for cancellation.
|
|
||||||
Returns False if cancelled during wait.
|
|
||||||
"""
|
|
||||||
current_time = time.time()
|
|
||||||
time_since_last = current_time - self.rate_limiter.last_request_time
|
|
||||||
|
|
||||||
if time_since_last < self.rate_limiter.min_interval:
|
|
||||||
sleep_time = self.rate_limiter.min_interval - time_since_last
|
|
||||||
if not self._sleep_with_cancellation_check(sleep_time):
|
|
||||||
return False
|
|
||||||
|
|
||||||
self.rate_limiter.last_request_time = time.time()
|
|
||||||
return True
|
|
||||||
|
|
||||||
def _sleep_with_cancellation_check(self, sleep_time: float) -> bool:
|
|
||||||
"""
|
|
||||||
Sleep for the specified time while aggressively checking for cancellation.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
sleep_time: Time to sleep in seconds
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
bool: True if sleep completed, False if cancelled
|
|
||||||
"""
|
|
||||||
sleep_start = time.time()
|
|
||||||
check_interval = 0.05 # Check every 50ms for aggressive responsiveness
|
|
||||||
|
|
||||||
while time.time() - sleep_start < sleep_time:
|
|
||||||
if self._is_stop_requested():
|
|
||||||
return False
|
|
||||||
remaining_time = sleep_time - (time.time() - sleep_start)
|
|
||||||
time.sleep(min(check_interval, remaining_time))
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def set_stop_event(self, stop_event: threading.Event) -> None:
|
def set_stop_event(self, stop_event: threading.Event) -> None:
|
||||||
"""
|
"""
|
||||||
Set the stop event for this provider to enable cancellation.
|
Set the stop event for this provider to enable cancellation.
|
||||||
@@ -396,30 +255,8 @@ class BaseProvider(ABC):
|
|||||||
"""
|
"""
|
||||||
self._stop_event = stop_event
|
self._stop_event = stop_event
|
||||||
|
|
||||||
def _should_retry(self, exception: requests.exceptions.RequestException) -> bool:
|
|
||||||
"""
|
|
||||||
Determine if a request should be retried based on the exception.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
exception: The request exception that occurred
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if the request should be retried
|
|
||||||
"""
|
|
||||||
# Retry on connection errors and timeouts
|
|
||||||
if isinstance(exception, (requests.exceptions.ConnectionError,
|
|
||||||
requests.exceptions.Timeout)):
|
|
||||||
return True
|
|
||||||
|
|
||||||
if isinstance(exception, requests.exceptions.HTTPError):
|
|
||||||
if hasattr(exception, 'response') and exception.response:
|
|
||||||
# Retry on server errors (5xx) AND on rate-limiting errors (429)
|
|
||||||
return exception.response.status_code >= 500 or exception.response.status_code == 429
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
def log_relationship_discovery(self, source_node: str, target_node: str,
|
def log_relationship_discovery(self, source_node: str, target_node: str,
|
||||||
relationship_type: RelationshipType,
|
relationship_type: str,
|
||||||
confidence_score: float,
|
confidence_score: float,
|
||||||
raw_data: Dict[str, Any],
|
raw_data: Dict[str, Any],
|
||||||
discovery_method: str) -> None:
|
discovery_method: str) -> None:
|
||||||
@@ -439,7 +276,7 @@ class BaseProvider(ABC):
|
|||||||
self.logger.log_relationship_discovery(
|
self.logger.log_relationship_discovery(
|
||||||
source_node=source_node,
|
source_node=source_node,
|
||||||
target_node=target_node,
|
target_node=target_node,
|
||||||
relationship_type=relationship_type.relationship_name,
|
relationship_type=relationship_type,
|
||||||
confidence_score=confidence_score,
|
confidence_score=confidence_score,
|
||||||
provider=self.name,
|
provider=self.name,
|
||||||
raw_data=raw_data,
|
raw_data=raw_data,
|
||||||
|
|||||||
@@ -1,44 +1,60 @@
|
|||||||
"""
|
# dnsrecon/providers/crtsh_provider.py
|
||||||
Certificate Transparency provider using crt.sh.
|
|
||||||
Discovers domain relationships through certificate SAN analysis with comprehensive certificate tracking.
|
|
||||||
Stores certificates as metadata on domain nodes rather than creating certificate nodes.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
from typing import List, Dict, Any, Tuple, Set
|
from typing import List, Dict, Any, Tuple, Set
|
||||||
from urllib.parse import quote
|
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
|
# New dependency required for this provider
|
||||||
|
try:
|
||||||
|
import psycopg2
|
||||||
|
import psycopg2.extras
|
||||||
|
PSYCOPG2_AVAILABLE = True
|
||||||
|
except ImportError:
|
||||||
|
PSYCOPG2_AVAILABLE = False
|
||||||
|
|
||||||
from .base_provider import BaseProvider
|
from .base_provider import BaseProvider
|
||||||
from utils.helpers import _is_valid_domain
|
from utils.helpers import _is_valid_domain
|
||||||
from core.graph_manager import RelationshipType
|
|
||||||
|
# We use requests only to raise the same exception type for compatibility with core retry logic
|
||||||
|
import requests
|
||||||
|
|
||||||
|
|
||||||
class CrtShProvider(BaseProvider):
|
class CrtShProvider(BaseProvider):
|
||||||
"""
|
"""
|
||||||
Provider for querying crt.sh certificate transparency database.
|
Provider for querying crt.sh certificate transparency database via its public PostgreSQL endpoint.
|
||||||
Now uses session-specific configuration and caching.
|
This version is designed to be a drop-in, high-performance replacement for the API-based provider.
|
||||||
|
It preserves the same caching and data processing logic.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, session_config=None):
|
def __init__(self, name=None, session_config=None):
|
||||||
"""Initialize CrtSh provider with session-specific configuration."""
|
"""Initialize CrtShDB provider with session-specific configuration."""
|
||||||
super().__init__(
|
super().__init__(
|
||||||
name="crtsh",
|
name="crtsh",
|
||||||
rate_limit=60,
|
rate_limit=0, # No rate limit for direct DB access
|
||||||
timeout=15,
|
timeout=60, # Increased timeout for potentially long DB queries
|
||||||
session_config=session_config
|
session_config=session_config
|
||||||
)
|
)
|
||||||
self.base_url = "https://crt.sh/"
|
# Database connection details
|
||||||
|
self.db_host = "crt.sh"
|
||||||
|
self.db_port = 5432
|
||||||
|
self.db_name = "certwatch"
|
||||||
|
self.db_user = "guest"
|
||||||
self._stop_event = None
|
self._stop_event = None
|
||||||
|
|
||||||
|
# Initialize cache directory (same as original provider)
|
||||||
|
self.cache_dir = Path('cache') / 'crtsh'
|
||||||
|
self.cache_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
def get_name(self) -> str:
|
def get_name(self) -> str:
|
||||||
"""Return the provider name."""
|
"""Return the provider name."""
|
||||||
return "crtsh"
|
return "crtsh"
|
||||||
|
|
||||||
def get_display_name(self) -> str:
|
def get_display_name(self) -> str:
|
||||||
"""Return the provider display name for the UI."""
|
"""Return the provider display name for the UI."""
|
||||||
return "crt.sh"
|
return "crt.sh (DB)"
|
||||||
|
|
||||||
def requires_api_key(self) -> bool:
|
def requires_api_key(self) -> bool:
|
||||||
"""Return True if the provider requires an API key."""
|
"""Return True if the provider requires an API key."""
|
||||||
@@ -50,224 +66,362 @@ class CrtShProvider(BaseProvider):
|
|||||||
|
|
||||||
def is_available(self) -> bool:
|
def is_available(self) -> bool:
|
||||||
"""
|
"""
|
||||||
Check if the provider is configured to be used.
|
Check if the provider can be used. Requires the psycopg2 library.
|
||||||
This method is intentionally simple and does not perform a network request
|
|
||||||
to avoid blocking application startup.
|
|
||||||
"""
|
"""
|
||||||
|
if not PSYCOPG2_AVAILABLE:
|
||||||
|
self.logger.logger.warning("psycopg2 library not found. CrtShDBProvider is unavailable. "
|
||||||
|
"Please run 'pip install psycopg2-binary'.")
|
||||||
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def _parse_certificate_date(self, date_string: str) -> datetime:
|
def _query_crtsh(self, domain: str) -> List[Dict[str, Any]]:
|
||||||
"""
|
"""
|
||||||
Parse certificate date from crt.sh format.
|
Query the crt.sh PostgreSQL database for raw certificate data.
|
||||||
|
Raises exceptions for DB/network errors to allow core logic to retry.
|
||||||
Args:
|
|
||||||
date_string: Date string from crt.sh API
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Parsed datetime object in UTC
|
|
||||||
"""
|
"""
|
||||||
|
conn = None
|
||||||
|
certificates = []
|
||||||
|
|
||||||
|
# SQL Query to find all certificate IDs related to the domain (including subdomains),
|
||||||
|
# then retrieve comprehensive details for each certificate, mimicking the JSON API structure.
|
||||||
|
sql_query = """
|
||||||
|
WITH certificates_of_interest AS (
|
||||||
|
SELECT DISTINCT ci.certificate_id
|
||||||
|
FROM certificate_identity ci
|
||||||
|
WHERE ci.name_value ILIKE %(domain_wildcard)s OR ci.name_value = %(domain)s
|
||||||
|
)
|
||||||
|
SELECT
|
||||||
|
c.id,
|
||||||
|
c.serial_number,
|
||||||
|
c.not_before,
|
||||||
|
c.not_after,
|
||||||
|
(SELECT min(entry_timestamp) FROM ct_log_entry cle WHERE cle.certificate_id = c.id) as entry_timestamp,
|
||||||
|
ca.id as issuer_ca_id,
|
||||||
|
ca.name as issuer_name,
|
||||||
|
(SELECT array_to_string(array_agg(DISTINCT ci.name_value), E'\n') FROM certificate_identity ci WHERE ci.certificate_id = c.id) as name_value,
|
||||||
|
(SELECT name_value FROM certificate_identity ci WHERE ci.certificate_id = c.id AND ci.name_type = 'commonName' LIMIT 1) as common_name
|
||||||
|
FROM
|
||||||
|
certificate c
|
||||||
|
JOIN ca ON c.issuer_ca_id = ca.id
|
||||||
|
WHERE c.id IN (SELECT certificate_id FROM certificates_of_interest);
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
conn = psycopg2.connect(
|
||||||
|
dbname=self.db_name,
|
||||||
|
user=self.db_user,
|
||||||
|
host=self.db_host,
|
||||||
|
port=self.db_port,
|
||||||
|
connect_timeout=self.timeout
|
||||||
|
)
|
||||||
|
|
||||||
|
with conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cursor:
|
||||||
|
cursor.execute(sql_query, {'domain': domain, 'domain_wildcard': f'%.{domain}'})
|
||||||
|
results = cursor.fetchall()
|
||||||
|
certificates = [dict(row) for row in results]
|
||||||
|
|
||||||
|
self.logger.logger.info(f"crt.sh DB query for '{domain}' returned {len(certificates)} certificates.")
|
||||||
|
|
||||||
|
except psycopg2.Error as e:
|
||||||
|
self.logger.logger.error(f"PostgreSQL query failed for {domain}: {e}")
|
||||||
|
# Raise a RequestException to be compatible with the existing retry logic in the core application
|
||||||
|
raise requests.exceptions.RequestException(f"PostgreSQL query failed: {e}") from e
|
||||||
|
finally:
|
||||||
|
if conn:
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
return certificates
|
||||||
|
|
||||||
|
def query_domain(self, domain: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||||
|
"""
|
||||||
|
Query crt.sh for certificates containing the domain with caching support.
|
||||||
|
Properly raises exceptions for network errors to allow core logic retries.
|
||||||
|
"""
|
||||||
|
if not _is_valid_domain(domain):
|
||||||
|
return []
|
||||||
|
|
||||||
|
if self._stop_event and self._stop_event.is_set():
|
||||||
|
return []
|
||||||
|
|
||||||
|
cache_file = self._get_cache_file_path(domain)
|
||||||
|
cache_status = self._get_cache_status(cache_file)
|
||||||
|
|
||||||
|
certificates = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
if cache_status == "fresh":
|
||||||
|
certificates = self._load_cached_certificates(cache_file)
|
||||||
|
self.logger.logger.info(f"Using cached data for {domain} ({len(certificates)} certificates)")
|
||||||
|
|
||||||
|
elif cache_status == "not_found":
|
||||||
|
# Fresh query from DB, create new cache
|
||||||
|
certificates = self._query_crtsh(domain)
|
||||||
|
if certificates:
|
||||||
|
self._create_cache_file(cache_file, domain, self._serialize_certs_for_cache(certificates))
|
||||||
|
else:
|
||||||
|
self.logger.logger.info(f"No certificates found for {domain}, not caching")
|
||||||
|
|
||||||
|
elif cache_status == "stale":
|
||||||
|
try:
|
||||||
|
new_certificates = self._query_crtsh(domain)
|
||||||
|
if new_certificates:
|
||||||
|
certificates = self._append_to_cache(cache_file, self._serialize_certs_for_cache(new_certificates))
|
||||||
|
else:
|
||||||
|
certificates = self._load_cached_certificates(cache_file)
|
||||||
|
except requests.exceptions.RequestException:
|
||||||
|
certificates = self._load_cached_certificates(cache_file)
|
||||||
|
if certificates:
|
||||||
|
self.logger.logger.warning(f"DB query failed for {domain}, using stale cache data.")
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
# Re-raise so core logic can retry
|
||||||
|
self.logger.logger.error(f"DB query failed for {domain}: {e}")
|
||||||
|
raise e
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
# JSON parsing errors from cache should also be handled
|
||||||
|
self.logger.logger.error(f"Failed to parse JSON from cache for {domain}: {e}")
|
||||||
|
raise e
|
||||||
|
|
||||||
|
if self._stop_event and self._stop_event.is_set():
|
||||||
|
return []
|
||||||
|
|
||||||
|
if not certificates:
|
||||||
|
return []
|
||||||
|
|
||||||
|
return self._process_certificates_to_relationships(domain, certificates)
|
||||||
|
|
||||||
|
def _serialize_certs_for_cache(self, certificates: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Serialize certificate data for JSON caching, converting datetime objects to ISO strings.
|
||||||
|
"""
|
||||||
|
serialized_certs = []
|
||||||
|
for cert in certificates:
|
||||||
|
serialized_cert = cert.copy()
|
||||||
|
for key in ['not_before', 'not_after', 'entry_timestamp']:
|
||||||
|
if isinstance(serialized_cert.get(key), datetime):
|
||||||
|
# Ensure datetime is timezone-aware before converting
|
||||||
|
dt_obj = serialized_cert[key]
|
||||||
|
if dt_obj.tzinfo is None:
|
||||||
|
dt_obj = dt_obj.replace(tzinfo=timezone.utc)
|
||||||
|
serialized_cert[key] = dt_obj.isoformat()
|
||||||
|
serialized_certs.append(serialized_cert)
|
||||||
|
return serialized_certs
|
||||||
|
|
||||||
|
# --- All methods below are copied directly from the original CrtShProvider ---
|
||||||
|
# They are compatible because _query_crtsh returns data in the same format
|
||||||
|
# as the original _query_crtsh_api method. A small adjustment is made to
|
||||||
|
# _parse_certificate_date to handle datetime objects directly from the DB.
|
||||||
|
|
||||||
|
def _get_cache_file_path(self, domain: str) -> Path:
|
||||||
|
"""Generate cache file path for a domain."""
|
||||||
|
safe_domain = domain.replace('.', '_').replace('/', '_').replace('\\', '_')
|
||||||
|
return self.cache_dir / f"{safe_domain}.json"
|
||||||
|
|
||||||
|
def _get_cache_status(self, cache_file_path: Path) -> str:
|
||||||
|
"""Check cache status for a domain."""
|
||||||
|
if not cache_file_path.exists():
|
||||||
|
return "not_found"
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(cache_file_path, 'r') as f:
|
||||||
|
cache_data = json.load(f)
|
||||||
|
|
||||||
|
last_query_str = cache_data.get("last_upstream_query")
|
||||||
|
if not last_query_str:
|
||||||
|
return "stale"
|
||||||
|
|
||||||
|
last_query = datetime.fromisoformat(last_query_str.replace('Z', '+00:00'))
|
||||||
|
hours_since_query = (datetime.now(timezone.utc) - last_query).total_seconds() / 3600
|
||||||
|
|
||||||
|
cache_timeout = self.config.cache_timeout_hours
|
||||||
|
if hours_since_query < cache_timeout:
|
||||||
|
return "fresh"
|
||||||
|
else:
|
||||||
|
return "stale"
|
||||||
|
|
||||||
|
except (json.JSONDecodeError, ValueError, KeyError) as e:
|
||||||
|
self.logger.logger.warning(f"Invalid cache file format for {cache_file_path}: {e}")
|
||||||
|
return "stale"
|
||||||
|
|
||||||
|
def _load_cached_certificates(self, cache_file_path: Path) -> List[Dict[str, Any]]:
|
||||||
|
"""Load certificates from cache file."""
|
||||||
|
try:
|
||||||
|
with open(cache_file_path, 'r') as f:
|
||||||
|
cache_data = json.load(f)
|
||||||
|
return cache_data.get('certificates', [])
|
||||||
|
except (json.JSONDecodeError, FileNotFoundError, KeyError) as e:
|
||||||
|
self.logger.logger.error(f"Failed to load cached certificates from {cache_file_path}: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
def _create_cache_file(self, cache_file_path: Path, domain: str, certificates: List[Dict[str, Any]]) -> None:
|
||||||
|
"""Create new cache file with certificates."""
|
||||||
|
try:
|
||||||
|
cache_data = {
|
||||||
|
"domain": domain,
|
||||||
|
"first_cached": datetime.now(timezone.utc).isoformat(),
|
||||||
|
"last_upstream_query": datetime.now(timezone.utc).isoformat(),
|
||||||
|
"upstream_query_count": 1,
|
||||||
|
"certificates": certificates
|
||||||
|
}
|
||||||
|
cache_file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(cache_file_path, 'w') as f:
|
||||||
|
json.dump(cache_data, f, separators=(',', ':'))
|
||||||
|
self.logger.logger.info(f"Created cache file for {domain} with {len(certificates)} certificates")
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.logger.warning(f"Failed to create cache file for {domain}: {e}")
|
||||||
|
|
||||||
|
def _append_to_cache(self, cache_file_path: Path, new_certificates: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||||
|
"""Append new certificates to existing cache and return all certificates."""
|
||||||
|
try:
|
||||||
|
with open(cache_file_path, 'r') as f:
|
||||||
|
cache_data = json.load(f)
|
||||||
|
|
||||||
|
existing_ids = {cert.get('id') for cert in cache_data.get('certificates', [])}
|
||||||
|
added_count = 0
|
||||||
|
for cert in new_certificates:
|
||||||
|
cert_id = cert.get('id')
|
||||||
|
if cert_id and cert_id not in existing_ids:
|
||||||
|
cache_data['certificates'].append(cert)
|
||||||
|
existing_ids.add(cert_id)
|
||||||
|
added_count += 1
|
||||||
|
|
||||||
|
cache_data['last_upstream_query'] = datetime.now(timezone.utc).isoformat()
|
||||||
|
cache_data['upstream_query_count'] = cache_data.get('upstream_query_count', 0) + 1
|
||||||
|
|
||||||
|
with open(cache_file_path, 'w') as f:
|
||||||
|
json.dump(cache_data, f, separators=(',', ':'))
|
||||||
|
|
||||||
|
total_certs = len(cache_data['certificates'])
|
||||||
|
self.logger.logger.info(f"Appended {added_count} new certificates to cache. Total: {total_certs}")
|
||||||
|
return cache_data['certificates']
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.logger.warning(f"Failed to append to cache: {e}")
|
||||||
|
return new_certificates
|
||||||
|
|
||||||
|
def _parse_issuer_organization(self, issuer_dn: str) -> str:
|
||||||
|
"""Parse the issuer Distinguished Name to extract just the organization name."""
|
||||||
|
if not issuer_dn: return issuer_dn
|
||||||
|
try:
|
||||||
|
components = [comp.strip() for comp in issuer_dn.split(',')]
|
||||||
|
for component in components:
|
||||||
|
if component.startswith('O='):
|
||||||
|
org_name = component[2:].strip()
|
||||||
|
if org_name.startswith('"') and org_name.endswith('"'):
|
||||||
|
org_name = org_name[1:-1]
|
||||||
|
return org_name
|
||||||
|
return issuer_dn
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.logger.debug(f"Failed to parse issuer DN '{issuer_dn}': {e}")
|
||||||
|
return issuer_dn
|
||||||
|
|
||||||
|
def _parse_certificate_date(self, date_input: Any) -> datetime:
|
||||||
|
"""
|
||||||
|
Parse certificate date from various formats (string from cache, datetime from DB).
|
||||||
|
"""
|
||||||
|
if isinstance(date_input, datetime):
|
||||||
|
# If it's already a datetime object from the DB, just ensure it's UTC
|
||||||
|
if date_input.tzinfo is None:
|
||||||
|
return date_input.replace(tzinfo=timezone.utc)
|
||||||
|
return date_input
|
||||||
|
|
||||||
|
date_string = str(date_input)
|
||||||
if not date_string:
|
if not date_string:
|
||||||
raise ValueError("Empty date string")
|
raise ValueError("Empty date string")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Handle various possible formats from crt.sh
|
if 'Z' in date_string:
|
||||||
if date_string.endswith('Z'):
|
return datetime.fromisoformat(date_string.replace('Z', '+00:00'))
|
||||||
return datetime.fromisoformat(date_string[:-1]).replace(tzinfo=timezone.utc)
|
# Handle standard ISO format with or without timezone
|
||||||
elif '+' in date_string or date_string.endswith('UTC'):
|
dt = datetime.fromisoformat(date_string)
|
||||||
# Handle timezone-aware strings
|
if dt.tzinfo is None:
|
||||||
date_string = date_string.replace('UTC', '').strip()
|
return dt.replace(tzinfo=timezone.utc)
|
||||||
if '+' in date_string:
|
return dt
|
||||||
date_string = date_string.split('+')[0]
|
except ValueError as e:
|
||||||
return datetime.fromisoformat(date_string).replace(tzinfo=timezone.utc)
|
|
||||||
else:
|
|
||||||
# Assume UTC if no timezone specified
|
|
||||||
return datetime.fromisoformat(date_string).replace(tzinfo=timezone.utc)
|
|
||||||
except Exception as e:
|
|
||||||
# Fallback: try parsing without timezone info and assume UTC
|
|
||||||
try:
|
try:
|
||||||
|
# Fallback for other formats
|
||||||
return datetime.strptime(date_string[:19], "%Y-%m-%dT%H:%M:%S").replace(tzinfo=timezone.utc)
|
return datetime.strptime(date_string[:19], "%Y-%m-%dT%H:%M:%S").replace(tzinfo=timezone.utc)
|
||||||
except Exception:
|
except Exception:
|
||||||
raise ValueError(f"Unable to parse date: {date_string}") from e
|
raise ValueError(f"Unable to parse date: {date_string}") from e
|
||||||
|
|
||||||
def _is_cert_valid(self, cert_data: Dict[str, Any]) -> bool:
|
def _is_cert_valid(self, cert_data: Dict[str, Any]) -> bool:
|
||||||
"""
|
"""Check if a certificate is currently valid based on its expiry date."""
|
||||||
Check if a certificate is currently valid based on its expiry date.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
cert_data: Certificate data from crt.sh
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if certificate is currently valid (not expired)
|
|
||||||
"""
|
|
||||||
try:
|
try:
|
||||||
not_after_str = cert_data.get('not_after')
|
not_after_str = cert_data.get('not_after')
|
||||||
if not not_after_str:
|
if not not_after_str: return False
|
||||||
return False
|
|
||||||
|
|
||||||
not_after_date = self._parse_certificate_date(not_after_str)
|
not_after_date = self._parse_certificate_date(not_after_str)
|
||||||
not_before_str = cert_data.get('not_before')
|
not_before_str = cert_data.get('not_before')
|
||||||
|
|
||||||
now = datetime.now(timezone.utc)
|
now = datetime.now(timezone.utc)
|
||||||
|
|
||||||
# Check if certificate is within valid date range
|
|
||||||
is_not_expired = not_after_date > now
|
is_not_expired = not_after_date > now
|
||||||
|
|
||||||
if not_before_str:
|
if not_before_str:
|
||||||
not_before_date = self._parse_certificate_date(not_before_str)
|
not_before_date = self._parse_certificate_date(not_before_str)
|
||||||
is_not_before_valid = not_before_date <= now
|
is_not_before_valid = not_before_date <= now
|
||||||
return is_not_expired and is_not_before_valid
|
return is_not_expired and is_not_before_valid
|
||||||
|
|
||||||
return is_not_expired
|
return is_not_expired
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.logger.debug(f"Certificate validity check failed: {e}")
|
self.logger.logger.debug(f"Certificate validity check failed: {e}")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def _extract_certificate_metadata(self, cert_data: Dict[str, Any]) -> Dict[str, Any]:
|
def _extract_certificate_metadata(self, cert_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||||
"""
|
# This method works as-is.
|
||||||
Extract comprehensive metadata from certificate data.
|
raw_issuer_name = cert_data.get('issuer_name', '')
|
||||||
|
parsed_issuer_name = self._parse_issuer_organization(raw_issuer_name)
|
||||||
Args:
|
|
||||||
cert_data: Raw certificate data from crt.sh
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Comprehensive certificate metadata dictionary
|
|
||||||
"""
|
|
||||||
metadata = {
|
metadata = {
|
||||||
'certificate_id': cert_data.get('id'),
|
'certificate_id': cert_data.get('id'),
|
||||||
'serial_number': cert_data.get('serial_number'),
|
'serial_number': cert_data.get('serial_number'),
|
||||||
'issuer_name': cert_data.get('issuer_name'),
|
'issuer_name': parsed_issuer_name,
|
||||||
'issuer_ca_id': cert_data.get('issuer_ca_id'),
|
'issuer_ca_id': cert_data.get('issuer_ca_id'),
|
||||||
'common_name': cert_data.get('common_name'),
|
'common_name': cert_data.get('common_name'),
|
||||||
'not_before': cert_data.get('not_before'),
|
'not_before': cert_data.get('not_before'),
|
||||||
'not_after': cert_data.get('not_after'),
|
'not_after': cert_data.get('not_after'),
|
||||||
'entry_timestamp': cert_data.get('entry_timestamp'),
|
'entry_timestamp': cert_data.get('entry_timestamp'),
|
||||||
'source': 'crt.sh'
|
'source': 'crt.sh (DB)'
|
||||||
}
|
}
|
||||||
|
|
||||||
# Add computed fields
|
|
||||||
try:
|
try:
|
||||||
if metadata['not_before'] and metadata['not_after']:
|
if metadata['not_before'] and metadata['not_after']:
|
||||||
not_before = self._parse_certificate_date(metadata['not_before'])
|
not_before = self._parse_certificate_date(metadata['not_before'])
|
||||||
not_after = self._parse_certificate_date(metadata['not_after'])
|
not_after = self._parse_certificate_date(metadata['not_after'])
|
||||||
|
|
||||||
metadata['validity_period_days'] = (not_after - not_before).days
|
metadata['validity_period_days'] = (not_after - not_before).days
|
||||||
metadata['is_currently_valid'] = self._is_cert_valid(cert_data)
|
metadata['is_currently_valid'] = self._is_cert_valid(cert_data)
|
||||||
metadata['expires_soon'] = (not_after - datetime.now(timezone.utc)).days <= 30
|
metadata['expires_soon'] = (not_after - datetime.now(timezone.utc)).days <= 30
|
||||||
|
|
||||||
# Add human-readable dates
|
|
||||||
metadata['not_before'] = not_before.strftime('%Y-%m-%d %H:%M:%S UTC')
|
metadata['not_before'] = not_before.strftime('%Y-%m-%d %H:%M:%S UTC')
|
||||||
metadata['not_after'] = not_after.strftime('%Y-%m-%d %H:%M:%S UTC')
|
metadata['not_after'] = not_after.strftime('%Y-%m-%d %H:%M:%S UTC')
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.logger.debug(f"Error computing certificate metadata: {e}")
|
self.logger.logger.debug(f"Error computing certificate metadata: {e}")
|
||||||
metadata['is_currently_valid'] = False
|
metadata['is_currently_valid'] = False
|
||||||
metadata['expires_soon'] = False
|
metadata['expires_soon'] = False
|
||||||
|
|
||||||
return metadata
|
return metadata
|
||||||
|
|
||||||
def query_domain(self, domain: str) -> List[Tuple[str, str, RelationshipType, float, Dict[str, Any]]]:
|
def _process_certificates_to_relationships(self, domain: str, certificates: List[Dict[str, Any]]) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||||
"""
|
# This method works as-is.
|
||||||
Query crt.sh for certificates containing the domain.
|
|
||||||
Enhanced with more frequent stop signal checking for reliable termination.
|
|
||||||
"""
|
|
||||||
if not _is_valid_domain(domain):
|
|
||||||
return []
|
|
||||||
|
|
||||||
# Check for cancellation before starting
|
|
||||||
if self._stop_event and self._stop_event.is_set():
|
|
||||||
print(f"CrtSh query cancelled before start for domain: {domain}")
|
|
||||||
return []
|
|
||||||
|
|
||||||
relationships = []
|
relationships = []
|
||||||
|
if self._stop_event and self._stop_event.is_set(): return []
|
||||||
try:
|
|
||||||
# Query crt.sh for certificates
|
|
||||||
url = f"{self.base_url}?q={quote(domain)}&output=json"
|
|
||||||
response = self.make_request(url, target_indicator=domain, max_retries=1) # Reduce retries for faster cancellation
|
|
||||||
|
|
||||||
if not response or response.status_code != 200:
|
|
||||||
return []
|
|
||||||
|
|
||||||
# Check for cancellation after request
|
|
||||||
if self._stop_event and self._stop_event.is_set():
|
|
||||||
print(f"CrtSh query cancelled after request for domain: {domain}")
|
|
||||||
return []
|
|
||||||
|
|
||||||
certificates = response.json()
|
|
||||||
|
|
||||||
if not certificates:
|
|
||||||
return []
|
|
||||||
|
|
||||||
# Check for cancellation before processing
|
|
||||||
if self._stop_event and self._stop_event.is_set():
|
|
||||||
print(f"CrtSh query cancelled before processing for domain: {domain}")
|
|
||||||
return []
|
|
||||||
|
|
||||||
# Aggregate certificate data by domain
|
|
||||||
domain_certificates = {}
|
domain_certificates = {}
|
||||||
all_discovered_domains = set()
|
all_discovered_domains = set()
|
||||||
|
|
||||||
# Process certificates with enhanced cancellation checking
|
|
||||||
for i, cert_data in enumerate(certificates):
|
for i, cert_data in enumerate(certificates):
|
||||||
# Check for cancellation every 5 certificates instead of 10 for faster response
|
if i % 5 == 0 and self._stop_event and self._stop_event.is_set(): break
|
||||||
if i % 5 == 0 and self._stop_event and self._stop_event.is_set():
|
|
||||||
print(f"CrtSh processing cancelled at certificate {i} for domain: {domain}")
|
|
||||||
break
|
|
||||||
|
|
||||||
cert_metadata = self._extract_certificate_metadata(cert_data)
|
cert_metadata = self._extract_certificate_metadata(cert_data)
|
||||||
cert_domains = self._extract_domains_from_certificate(cert_data)
|
cert_domains = self._extract_domains_from_certificate(cert_data)
|
||||||
|
all_discovered_domains.update(cert_domains)
|
||||||
# Add all domains from this certificate to our tracking
|
|
||||||
for cert_domain in cert_domains:
|
for cert_domain in cert_domains:
|
||||||
# Additional stop check during domain processing
|
if not _is_valid_domain(cert_domain): continue
|
||||||
if i % 20 == 0 and self._stop_event and self._stop_event.is_set():
|
|
||||||
print(f"CrtSh domain processing cancelled for domain: {domain}")
|
|
||||||
break
|
|
||||||
|
|
||||||
if not _is_valid_domain(cert_domain):
|
|
||||||
continue
|
|
||||||
|
|
||||||
all_discovered_domains.add(cert_domain)
|
|
||||||
|
|
||||||
# Initialize domain certificate list if needed
|
|
||||||
if cert_domain not in domain_certificates:
|
if cert_domain not in domain_certificates:
|
||||||
domain_certificates[cert_domain] = []
|
domain_certificates[cert_domain] = []
|
||||||
|
|
||||||
# Add this certificate to the domain's certificate list
|
|
||||||
domain_certificates[cert_domain].append(cert_metadata)
|
domain_certificates[cert_domain].append(cert_metadata)
|
||||||
|
if self._stop_event and self._stop_event.is_set(): return []
|
||||||
# Final cancellation check before creating relationships
|
|
||||||
if self._stop_event and self._stop_event.is_set():
|
|
||||||
print(f"CrtSh query cancelled before relationship creation for domain: {domain}")
|
|
||||||
return []
|
|
||||||
|
|
||||||
# Create relationships from query domain to ALL discovered domains with stop checking
|
|
||||||
for i, discovered_domain in enumerate(all_discovered_domains):
|
for i, discovered_domain in enumerate(all_discovered_domains):
|
||||||
if discovered_domain == domain:
|
if discovered_domain == domain: continue
|
||||||
continue # Skip self-relationships
|
if i % 10 == 0 and self._stop_event and self._stop_event.is_set(): break
|
||||||
|
if not _is_valid_domain(discovered_domain): continue
|
||||||
# Check for cancellation every 10 relationships
|
|
||||||
if i % 10 == 0 and self._stop_event and self._stop_event.is_set():
|
|
||||||
print(f"CrtSh relationship creation cancelled for domain: {domain}")
|
|
||||||
break
|
|
||||||
|
|
||||||
if not _is_valid_domain(discovered_domain):
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Get certificates for both domains
|
|
||||||
query_domain_certs = domain_certificates.get(domain, [])
|
query_domain_certs = domain_certificates.get(domain, [])
|
||||||
discovered_domain_certs = domain_certificates.get(discovered_domain, [])
|
discovered_domain_certs = domain_certificates.get(discovered_domain, [])
|
||||||
|
|
||||||
# Find shared certificates (for metadata purposes)
|
|
||||||
shared_certificates = self._find_shared_certificates(query_domain_certs, discovered_domain_certs)
|
shared_certificates = self._find_shared_certificates(query_domain_certs, discovered_domain_certs)
|
||||||
|
|
||||||
# Calculate confidence based on relationship type and shared certificates
|
|
||||||
confidence = self._calculate_domain_relationship_confidence(
|
confidence = self._calculate_domain_relationship_confidence(
|
||||||
domain, discovered_domain, shared_certificates, all_discovered_domains
|
domain, discovered_domain, shared_certificates, all_discovered_domains
|
||||||
)
|
)
|
||||||
|
|
||||||
# Create comprehensive raw data for the relationship
|
|
||||||
relationship_raw_data = {
|
relationship_raw_data = {
|
||||||
'relationship_type': 'certificate_discovery',
|
'relationship_type': 'certificate_discovery',
|
||||||
'shared_certificates': shared_certificates,
|
'shared_certificates': shared_certificates,
|
||||||
@@ -278,270 +432,82 @@ class CrtShProvider(BaseProvider):
|
|||||||
discovered_domain: self._summarize_certificates(discovered_domain_certs)
|
discovered_domain: self._summarize_certificates(discovered_domain_certs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Create domain -> domain relationship
|
|
||||||
relationships.append((
|
relationships.append((
|
||||||
domain,
|
domain, discovered_domain, 'san_certificate', confidence, relationship_raw_data
|
||||||
discovered_domain,
|
|
||||||
RelationshipType.SAN_CERTIFICATE,
|
|
||||||
confidence,
|
|
||||||
relationship_raw_data
|
|
||||||
))
|
))
|
||||||
|
|
||||||
# Log the relationship discovery
|
|
||||||
self.log_relationship_discovery(
|
self.log_relationship_discovery(
|
||||||
source_node=domain,
|
source_node=domain, target_node=discovered_domain, relationship_type='san_certificate',
|
||||||
target_node=discovered_domain,
|
confidence_score=confidence, raw_data=relationship_raw_data,
|
||||||
relationship_type=RelationshipType.SAN_CERTIFICATE,
|
|
||||||
confidence_score=confidence,
|
|
||||||
raw_data=relationship_raw_data,
|
|
||||||
discovery_method="certificate_transparency_analysis"
|
discovery_method="certificate_transparency_analysis"
|
||||||
)
|
)
|
||||||
|
|
||||||
except json.JSONDecodeError as e:
|
|
||||||
self.logger.logger.error(f"Failed to parse JSON response from crt.sh: {e}")
|
|
||||||
|
|
||||||
return relationships
|
return relationships
|
||||||
|
|
||||||
|
# --- All remaining helper methods are identical to the original and fully compatible ---
|
||||||
|
# They are included here for completeness.
|
||||||
|
|
||||||
def _find_shared_certificates(self, certs1: List[Dict[str, Any]], certs2: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
def _find_shared_certificates(self, certs1: List[Dict[str, Any]], certs2: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||||
"""
|
|
||||||
Find certificates that are shared between two domain certificate lists.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
certs1: First domain's certificates
|
|
||||||
certs2: Second domain's certificates
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of shared certificate metadata
|
|
||||||
"""
|
|
||||||
shared = []
|
|
||||||
|
|
||||||
# Create a set of certificate IDs from the first list for quick lookup
|
|
||||||
cert1_ids = {cert.get('certificate_id') for cert in certs1 if cert.get('certificate_id')}
|
cert1_ids = {cert.get('certificate_id') for cert in certs1 if cert.get('certificate_id')}
|
||||||
|
return [cert for cert in certs2 if cert.get('certificate_id') in cert1_ids]
|
||||||
# Find certificates in the second list that match
|
|
||||||
for cert in certs2:
|
|
||||||
if cert.get('certificate_id') in cert1_ids:
|
|
||||||
shared.append(cert)
|
|
||||||
|
|
||||||
return shared
|
|
||||||
|
|
||||||
def _summarize_certificates(self, certificates: List[Dict[str, Any]]) -> Dict[str, Any]:
|
def _summarize_certificates(self, certificates: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||||
"""
|
if not certificates: return {'total_certificates': 0, 'valid_certificates': 0, 'expired_certificates': 0, 'expires_soon_count': 0, 'unique_issuers': [], 'latest_certificate': None, 'has_valid_cert': False}
|
||||||
Create a summary of certificates for a domain.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
certificates: List of certificate metadata
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Summary dictionary with aggregate statistics
|
|
||||||
"""
|
|
||||||
if not certificates:
|
|
||||||
return {
|
|
||||||
'total_certificates': 0,
|
|
||||||
'valid_certificates': 0,
|
|
||||||
'expired_certificates': 0,
|
|
||||||
'expires_soon_count': 0,
|
|
||||||
'unique_issuers': [],
|
|
||||||
'latest_certificate': None,
|
|
||||||
'has_valid_cert': False
|
|
||||||
}
|
|
||||||
|
|
||||||
valid_count = sum(1 for cert in certificates if cert.get('is_currently_valid'))
|
valid_count = sum(1 for cert in certificates if cert.get('is_currently_valid'))
|
||||||
expired_count = len(certificates) - valid_count
|
|
||||||
expires_soon_count = sum(1 for cert in certificates if cert.get('expires_soon'))
|
expires_soon_count = sum(1 for cert in certificates if cert.get('expires_soon'))
|
||||||
|
|
||||||
# Get unique issuers
|
|
||||||
unique_issuers = list(set(cert.get('issuer_name') for cert in certificates if cert.get('issuer_name')))
|
unique_issuers = list(set(cert.get('issuer_name') for cert in certificates if cert.get('issuer_name')))
|
||||||
|
latest_cert, latest_date = None, None
|
||||||
# Find the most recent certificate
|
|
||||||
latest_cert = None
|
|
||||||
latest_date = None
|
|
||||||
|
|
||||||
for cert in certificates:
|
for cert in certificates:
|
||||||
try:
|
try:
|
||||||
if cert.get('not_before'):
|
if cert.get('not_before'):
|
||||||
cert_date = self._parse_certificate_date(cert['not_before'])
|
cert_date = self._parse_certificate_date(cert['not_before'])
|
||||||
if latest_date is None or cert_date > latest_date:
|
if latest_date is None or cert_date > latest_date:
|
||||||
latest_date = cert_date
|
latest_date, latest_cert = cert_date, cert
|
||||||
latest_cert = cert
|
except Exception: continue
|
||||||
except Exception:
|
return {'total_certificates': len(certificates), 'valid_certificates': valid_count, 'expired_certificates': len(certificates) - valid_count, 'expires_soon_count': expires_soon_count, 'unique_issuers': unique_issuers, 'latest_certificate': latest_cert, 'has_valid_cert': valid_count > 0, 'certificate_details': certificates}
|
||||||
continue
|
|
||||||
|
|
||||||
return {
|
def _calculate_domain_relationship_confidence(self, domain1: str, domain2: str, shared_certificates: List[Dict[str, Any]], all_discovered_domains: Set[str]) -> float:
|
||||||
'total_certificates': len(certificates),
|
base_confidence, context_bonus, shared_bonus, validity_bonus, issuer_bonus = 0.9, 0.0, 0.0, 0.0, 0.0
|
||||||
'valid_certificates': valid_count,
|
|
||||||
'expired_certificates': expired_count,
|
|
||||||
'expires_soon_count': expires_soon_count,
|
|
||||||
'unique_issuers': unique_issuers,
|
|
||||||
'latest_certificate': latest_cert,
|
|
||||||
'has_valid_cert': valid_count > 0,
|
|
||||||
'certificate_details': certificates # Full details for forensic analysis
|
|
||||||
}
|
|
||||||
|
|
||||||
def _calculate_domain_relationship_confidence(self, domain1: str, domain2: str,
|
|
||||||
shared_certificates: List[Dict[str, Any]],
|
|
||||||
all_discovered_domains: Set[str]) -> float:
|
|
||||||
"""
|
|
||||||
Calculate confidence score for domain relationship based on various factors.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
domain1: Source domain (query domain)
|
|
||||||
domain2: Target domain (discovered domain)
|
|
||||||
shared_certificates: List of shared certificate metadata
|
|
||||||
all_discovered_domains: All domains discovered in this query
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Confidence score between 0.0 and 1.0
|
|
||||||
"""
|
|
||||||
base_confidence = RelationshipType.SAN_CERTIFICATE.default_confidence
|
|
||||||
|
|
||||||
# Adjust confidence based on domain relationship context
|
|
||||||
relationship_context = self._determine_relationship_context(domain2, domain1)
|
relationship_context = self._determine_relationship_context(domain2, domain1)
|
||||||
|
if relationship_context == 'subdomain': context_bonus = 0.1
|
||||||
if relationship_context == 'exact_match':
|
elif relationship_context == 'parent_domain': context_bonus = 0.05
|
||||||
context_bonus = 0.0 # This shouldn't happen, but just in case
|
|
||||||
elif relationship_context == 'subdomain':
|
|
||||||
context_bonus = 0.1 # High confidence for subdomains
|
|
||||||
elif relationship_context == 'parent_domain':
|
|
||||||
context_bonus = 0.05 # Medium confidence for parent domains
|
|
||||||
else:
|
|
||||||
context_bonus = 0.0 # Related domains get base confidence
|
|
||||||
|
|
||||||
# Adjust confidence based on shared certificates
|
|
||||||
if shared_certificates:
|
|
||||||
shared_count = len(shared_certificates)
|
|
||||||
if shared_count >= 3:
|
|
||||||
shared_bonus = 0.1
|
|
||||||
elif shared_count >= 2:
|
|
||||||
shared_bonus = 0.05
|
|
||||||
else:
|
|
||||||
shared_bonus = 0.02
|
|
||||||
|
|
||||||
# Additional bonus for valid shared certificates
|
|
||||||
valid_shared = sum(1 for cert in shared_certificates if cert.get('is_currently_valid'))
|
|
||||||
if valid_shared > 0:
|
|
||||||
validity_bonus = 0.05
|
|
||||||
else:
|
|
||||||
validity_bonus = 0.0
|
|
||||||
else:
|
|
||||||
# Even without shared certificates, domains found in the same query have some relationship
|
|
||||||
shared_bonus = 0.0
|
|
||||||
validity_bonus = 0.0
|
|
||||||
|
|
||||||
# Adjust confidence based on certificate issuer reputation (if shared certificates exist)
|
|
||||||
issuer_bonus = 0.0
|
|
||||||
if shared_certificates:
|
if shared_certificates:
|
||||||
|
if len(shared_certificates) >= 3: shared_bonus = 0.1
|
||||||
|
elif len(shared_certificates) >= 2: shared_bonus = 0.05
|
||||||
|
else: shared_bonus = 0.02
|
||||||
|
if any(cert.get('is_currently_valid') for cert in shared_certificates): validity_bonus = 0.05
|
||||||
for cert in shared_certificates:
|
for cert in shared_certificates:
|
||||||
issuer = cert.get('issuer_name', '').lower()
|
if any(ca in cert.get('issuer_name', '').lower() for ca in ['let\'s encrypt', 'digicert', 'sectigo', 'globalsign']):
|
||||||
if any(trusted_ca in issuer for trusted_ca in ['let\'s encrypt', 'digicert', 'sectigo', 'globalsign']):
|
|
||||||
issuer_bonus = max(issuer_bonus, 0.03)
|
issuer_bonus = max(issuer_bonus, 0.03)
|
||||||
break
|
break
|
||||||
|
return max(0.1, min(1.0, base_confidence + context_bonus + shared_bonus + validity_bonus + issuer_bonus))
|
||||||
# Calculate final confidence
|
|
||||||
final_confidence = base_confidence + context_bonus + shared_bonus + validity_bonus + issuer_bonus
|
|
||||||
return max(0.1, min(1.0, final_confidence)) # Clamp between 0.1 and 1.0
|
|
||||||
|
|
||||||
def _determine_relationship_context(self, cert_domain: str, query_domain: str) -> str:
|
def _determine_relationship_context(self, cert_domain: str, query_domain: str) -> str:
|
||||||
"""
|
if cert_domain == query_domain: return 'exact_match'
|
||||||
Determine the context of the relationship between certificate domain and query domain.
|
if cert_domain.endswith(f'.{query_domain}'): return 'subdomain'
|
||||||
|
if query_domain.endswith(f'.{cert_domain}'): return 'parent_domain'
|
||||||
Args:
|
|
||||||
cert_domain: Domain found in certificate
|
|
||||||
query_domain: Original query domain
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
String describing the relationship context
|
|
||||||
"""
|
|
||||||
if cert_domain == query_domain:
|
|
||||||
return 'exact_match'
|
|
||||||
elif cert_domain.endswith(f'.{query_domain}'):
|
|
||||||
return 'subdomain'
|
|
||||||
elif query_domain.endswith(f'.{cert_domain}'):
|
|
||||||
return 'parent_domain'
|
|
||||||
else:
|
|
||||||
return 'related_domain'
|
return 'related_domain'
|
||||||
|
|
||||||
def query_ip(self, ip: str) -> List[Tuple[str, str, RelationshipType, float, Dict[str, Any]]]:
|
def query_ip(self, ip: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||||
"""
|
|
||||||
Query crt.sh for certificates containing the IP address.
|
|
||||||
Note: crt.sh doesn't typically index by IP, so this returns empty results.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
ip: IP address to investigate
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Empty list (crt.sh doesn't support IP-based certificate queries effectively)
|
|
||||||
"""
|
|
||||||
# crt.sh doesn't effectively support IP-based certificate queries
|
|
||||||
return []
|
return []
|
||||||
|
|
||||||
def _extract_domains_from_certificate(self, cert_data: Dict[str, Any]) -> Set[str]:
|
def _extract_domains_from_certificate(self, cert_data: Dict[str, Any]) -> Set[str]:
|
||||||
"""
|
|
||||||
Extract all domains from certificate data.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
cert_data: Certificate data from crt.sh API
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Set of unique domain names found in the certificate
|
|
||||||
"""
|
|
||||||
domains = set()
|
domains = set()
|
||||||
|
if cn := cert_data.get('common_name'):
|
||||||
# Extract from common name
|
if cleaned := self._clean_domain_name(cn):
|
||||||
common_name = cert_data.get('common_name', '')
|
domains.update(cleaned)
|
||||||
if common_name:
|
if nv := cert_data.get('name_value'):
|
||||||
cleaned_cn = self._clean_domain_name(common_name)
|
for line in nv.split('\n'):
|
||||||
if cleaned_cn:
|
if cleaned := self._clean_domain_name(line.strip()):
|
||||||
domains.update(cleaned_cn)
|
domains.update(cleaned)
|
||||||
|
|
||||||
# Extract from name_value field (contains SANs)
|
|
||||||
name_value = cert_data.get('name_value', '')
|
|
||||||
if name_value:
|
|
||||||
# Split by newlines and clean each domain
|
|
||||||
for line in name_value.split('\n'):
|
|
||||||
cleaned_domains = self._clean_domain_name(line.strip())
|
|
||||||
if cleaned_domains:
|
|
||||||
domains.update(cleaned_domains)
|
|
||||||
|
|
||||||
return domains
|
return domains
|
||||||
|
|
||||||
def _clean_domain_name(self, domain_name: str) -> List[str]:
|
def _clean_domain_name(self, domain_name: str) -> List[str]:
|
||||||
"""
|
if not domain_name: return []
|
||||||
Clean and normalize domain name from certificate data.
|
domain = domain_name.strip().lower().split('://', 1)[-1].split('/', 1)[0]
|
||||||
Now returns a list to handle wildcards correctly.
|
if ':' in domain and not domain.count(':') > 1: domain = domain.split(':', 1)[0]
|
||||||
"""
|
cleaned_domains = [domain, domain[2:]] if domain.startswith('*.') else [domain]
|
||||||
if not domain_name:
|
|
||||||
return []
|
|
||||||
|
|
||||||
domain = domain_name.strip().lower()
|
|
||||||
|
|
||||||
# Remove protocol if present
|
|
||||||
if domain.startswith(('http://', 'https://')):
|
|
||||||
domain = domain.split('://', 1)[1]
|
|
||||||
|
|
||||||
# Remove path if present
|
|
||||||
if '/' in domain:
|
|
||||||
domain = domain.split('/', 1)[0]
|
|
||||||
|
|
||||||
# Remove port if present
|
|
||||||
if ':' in domain and not domain.count(':') > 1: # Avoid breaking IPv6
|
|
||||||
domain = domain.split(':', 1)[0]
|
|
||||||
|
|
||||||
# Handle wildcard domains
|
|
||||||
cleaned_domains = []
|
|
||||||
if domain.startswith('*.'):
|
|
||||||
# Add both the wildcard and the base domain
|
|
||||||
cleaned_domains.append(domain)
|
|
||||||
cleaned_domains.append(domain[2:])
|
|
||||||
else:
|
|
||||||
cleaned_domains.append(domain)
|
|
||||||
|
|
||||||
# Remove any remaining invalid characters and validate
|
|
||||||
final_domains = []
|
final_domains = []
|
||||||
for d in cleaned_domains:
|
for d in cleaned_domains:
|
||||||
d = re.sub(r'[^\w\-\.]', '', d)
|
d = re.sub(r'[^\w\-\.]', '', d)
|
||||||
if d and not d.startswith(('.', '-')) and not d.endswith(('.', '-')):
|
if d and not d.startswith(('.', '-')) and not d.endswith(('.', '-')):
|
||||||
final_domains.append(d)
|
final_domains.append(d)
|
||||||
|
|
||||||
return [d for d in final_domains if _is_valid_domain(d)]
|
return [d for d in final_domains if _is_valid_domain(d)]
|
||||||
@@ -1,11 +1,9 @@
|
|||||||
# dnsrecon/providers/dns_provider.py
|
# dnsrecon/providers/dns_provider.py
|
||||||
|
|
||||||
import dns.resolver
|
from dns import resolver, reversename
|
||||||
import dns.reversename
|
|
||||||
from typing import List, Dict, Any, Tuple
|
from typing import List, Dict, Any, Tuple
|
||||||
from .base_provider import BaseProvider
|
from .base_provider import BaseProvider
|
||||||
from utils.helpers import _is_valid_ip, _is_valid_domain
|
from utils.helpers import _is_valid_ip, _is_valid_domain
|
||||||
from core.graph_manager import RelationshipType
|
|
||||||
|
|
||||||
|
|
||||||
class DNSProvider(BaseProvider):
|
class DNSProvider(BaseProvider):
|
||||||
@@ -14,7 +12,7 @@ class DNSProvider(BaseProvider):
|
|||||||
Now uses session-specific configuration.
|
Now uses session-specific configuration.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, session_config=None):
|
def __init__(self, name=None, session_config=None):
|
||||||
"""Initialize DNS provider with session-specific configuration."""
|
"""Initialize DNS provider with session-specific configuration."""
|
||||||
super().__init__(
|
super().__init__(
|
||||||
name="dns",
|
name="dns",
|
||||||
@@ -24,7 +22,7 @@ class DNSProvider(BaseProvider):
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Configure DNS resolver
|
# Configure DNS resolver
|
||||||
self.resolver = dns.resolver.Resolver()
|
self.resolver = resolver.Resolver()
|
||||||
self.resolver.timeout = 5
|
self.resolver.timeout = 5
|
||||||
self.resolver.lifetime = 10
|
self.resolver.lifetime = 10
|
||||||
#self.resolver.nameservers = ['127.0.0.1']
|
#self.resolver.nameservers = ['127.0.0.1']
|
||||||
@@ -49,15 +47,10 @@ class DNSProvider(BaseProvider):
|
|||||||
"""DNS is always available - no API key required."""
|
"""DNS is always available - no API key required."""
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def query_domain(self, domain: str) -> List[Tuple[str, str, RelationshipType, float, Dict[str, Any]]]:
|
def query_domain(self, domain: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||||
"""
|
"""
|
||||||
Query DNS records for the domain to discover relationships.
|
Query DNS records for the domain to discover relationships.
|
||||||
|
...
|
||||||
Args:
|
|
||||||
domain: Domain to investigate
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of relationships discovered from DNS analysis
|
|
||||||
"""
|
"""
|
||||||
if not _is_valid_domain(domain):
|
if not _is_valid_domain(domain):
|
||||||
return []
|
return []
|
||||||
@@ -66,11 +59,19 @@ class DNSProvider(BaseProvider):
|
|||||||
|
|
||||||
# Query all record types
|
# Query all record types
|
||||||
for record_type in ['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SOA', 'TXT', 'SRV', 'CAA']:
|
for record_type in ['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SOA', 'TXT', 'SRV', 'CAA']:
|
||||||
|
try:
|
||||||
relationships.extend(self._query_record(domain, record_type))
|
relationships.extend(self._query_record(domain, record_type))
|
||||||
|
except resolver.NoAnswer:
|
||||||
|
# This is not an error, just a confirmation that the record doesn't exist.
|
||||||
|
self.logger.logger.debug(f"No {record_type} record found for {domain}")
|
||||||
|
except Exception as e:
|
||||||
|
self.failed_requests += 1
|
||||||
|
self.logger.logger.debug(f"{record_type} record query failed for {domain}: {e}")
|
||||||
|
# Optionally, you might want to re-raise other, more serious exceptions.
|
||||||
|
|
||||||
return relationships
|
return relationships
|
||||||
|
|
||||||
def query_ip(self, ip: str) -> List[Tuple[str, str, RelationshipType, float, Dict[str, Any]]]:
|
def query_ip(self, ip: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||||
"""
|
"""
|
||||||
Query reverse DNS for the IP address.
|
Query reverse DNS for the IP address.
|
||||||
|
|
||||||
@@ -88,7 +89,7 @@ class DNSProvider(BaseProvider):
|
|||||||
try:
|
try:
|
||||||
# Perform reverse DNS lookup
|
# Perform reverse DNS lookup
|
||||||
self.total_requests += 1
|
self.total_requests += 1
|
||||||
reverse_name = dns.reversename.from_address(ip)
|
reverse_name = reversename.from_address(ip)
|
||||||
response = self.resolver.resolve(reverse_name, 'PTR')
|
response = self.resolver.resolve(reverse_name, 'PTR')
|
||||||
self.successful_requests += 1
|
self.successful_requests += 1
|
||||||
|
|
||||||
@@ -106,27 +107,32 @@ class DNSProvider(BaseProvider):
|
|||||||
relationships.append((
|
relationships.append((
|
||||||
ip,
|
ip,
|
||||||
hostname,
|
hostname,
|
||||||
RelationshipType.PTR_RECORD,
|
'ptr_record',
|
||||||
RelationshipType.PTR_RECORD.default_confidence,
|
0.8,
|
||||||
raw_data
|
raw_data
|
||||||
))
|
))
|
||||||
|
|
||||||
self.log_relationship_discovery(
|
self.log_relationship_discovery(
|
||||||
source_node=ip,
|
source_node=ip,
|
||||||
target_node=hostname,
|
target_node=hostname,
|
||||||
relationship_type=RelationshipType.PTR_RECORD,
|
relationship_type='ptr_record',
|
||||||
confidence_score=RelationshipType.PTR_RECORD.default_confidence,
|
confidence_score=0.8,
|
||||||
raw_data=raw_data,
|
raw_data=raw_data,
|
||||||
discovery_method="reverse_dns_lookup"
|
discovery_method="reverse_dns_lookup"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
except resolver.NXDOMAIN:
|
||||||
|
self.failed_requests += 1
|
||||||
|
self.logger.logger.debug(f"Reverse DNS lookup failed for {ip}: NXDOMAIN")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.failed_requests += 1
|
self.failed_requests += 1
|
||||||
self.logger.logger.debug(f"Reverse DNS lookup failed for {ip}: {e}")
|
self.logger.logger.debug(f"Reverse DNS lookup failed for {ip}: {e}")
|
||||||
|
# Re-raise the exception so the scanner can handle the failure
|
||||||
|
raise e
|
||||||
|
|
||||||
return relationships
|
return relationships
|
||||||
|
|
||||||
def _query_record(self, domain: str, record_type: str) -> List[Tuple[str, str, RelationshipType, float, Dict[str, Any]]]:
|
def _query_record(self, domain: str, record_type: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||||
"""
|
"""
|
||||||
Query a specific type of DNS record for the domain.
|
Query a specific type of DNS record for the domain.
|
||||||
"""
|
"""
|
||||||
@@ -147,7 +153,8 @@ class DNSProvider(BaseProvider):
|
|||||||
elif record_type == 'SOA':
|
elif record_type == 'SOA':
|
||||||
target = str(record.mname).rstrip('.')
|
target = str(record.mname).rstrip('.')
|
||||||
elif record_type in ['TXT']:
|
elif record_type in ['TXT']:
|
||||||
target = b' '.join(record.strings).decode('utf-8', 'ignore')
|
# TXT records are treated as metadata, not relationships.
|
||||||
|
continue
|
||||||
elif record_type == 'SRV':
|
elif record_type == 'SRV':
|
||||||
target = str(record.target).rstrip('.')
|
target = str(record.target).rstrip('.')
|
||||||
elif record_type == 'CAA':
|
elif record_type == 'CAA':
|
||||||
@@ -155,7 +162,6 @@ class DNSProvider(BaseProvider):
|
|||||||
else:
|
else:
|
||||||
target = str(record)
|
target = str(record)
|
||||||
|
|
||||||
|
|
||||||
if target:
|
if target:
|
||||||
raw_data = {
|
raw_data = {
|
||||||
'query_type': record_type,
|
'query_type': record_type,
|
||||||
@@ -163,35 +169,30 @@ class DNSProvider(BaseProvider):
|
|||||||
'value': target,
|
'value': target,
|
||||||
'ttl': response.ttl
|
'ttl': response.ttl
|
||||||
}
|
}
|
||||||
try:
|
relationship_type = f"{record_type.lower()}_record"
|
||||||
relationship_type_enum_name = f"{record_type}_RECORD"
|
confidence = 0.8 # Default confidence for DNS records
|
||||||
# Handle TXT records as metadata, not relationships
|
|
||||||
if record_type == 'TXT':
|
|
||||||
relationship_type_enum = RelationshipType.A_RECORD # Dummy value, won't be used
|
|
||||||
else:
|
|
||||||
relationship_type_enum = getattr(RelationshipType, relationship_type_enum_name)
|
|
||||||
|
|
||||||
relationships.append((
|
relationships.append((
|
||||||
domain,
|
domain,
|
||||||
target,
|
target,
|
||||||
relationship_type_enum,
|
relationship_type,
|
||||||
relationship_type_enum.default_confidence,
|
confidence,
|
||||||
raw_data
|
raw_data
|
||||||
))
|
))
|
||||||
|
|
||||||
self.log_relationship_discovery(
|
self.log_relationship_discovery(
|
||||||
source_node=domain,
|
source_node=domain,
|
||||||
target_node=target,
|
target_node=target,
|
||||||
relationship_type=relationship_type_enum,
|
relationship_type=relationship_type,
|
||||||
confidence_score=relationship_type_enum.default_confidence,
|
confidence_score=confidence,
|
||||||
raw_data=raw_data,
|
raw_data=raw_data,
|
||||||
discovery_method=f"dns_{record_type.lower()}_record"
|
discovery_method=f"dns_{record_type.lower()}_record"
|
||||||
)
|
)
|
||||||
except AttributeError:
|
|
||||||
self.logger.logger.error(f"Unsupported record type '{record_type}' encountered for domain {domain}")
|
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.failed_requests += 1
|
self.failed_requests += 1
|
||||||
self.logger.logger.debug(f"{record_type} record query failed for {domain}: {e}")
|
self.logger.logger.debug(f"{record_type} record query failed for {domain}: {e}")
|
||||||
|
# Re-raise the exception so the scanner can handle it
|
||||||
|
raise e
|
||||||
|
|
||||||
return relationships
|
return relationships
|
||||||
@@ -1,13 +1,9 @@
|
|||||||
"""
|
# dnsrecon/providers/shodan_provider.py
|
||||||
Shodan provider for DNSRecon.
|
|
||||||
Discovers IP relationships and infrastructure context through Shodan API.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import json
|
import json
|
||||||
from typing import List, Dict, Any, Tuple
|
from typing import List, Dict, Any, Tuple
|
||||||
from .base_provider import BaseProvider
|
from .base_provider import BaseProvider
|
||||||
from utils.helpers import _is_valid_ip, _is_valid_domain
|
from utils.helpers import _is_valid_ip, _is_valid_domain
|
||||||
from core.graph_manager import RelationshipType
|
|
||||||
|
|
||||||
|
|
||||||
class ShodanProvider(BaseProvider):
|
class ShodanProvider(BaseProvider):
|
||||||
@@ -16,7 +12,7 @@ class ShodanProvider(BaseProvider):
|
|||||||
Now uses session-specific API keys.
|
Now uses session-specific API keys.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, session_config=None):
|
def __init__(self, name=None, session_config=None):
|
||||||
"""Initialize Shodan provider with session-specific configuration."""
|
"""Initialize Shodan provider with session-specific configuration."""
|
||||||
super().__init__(
|
super().__init__(
|
||||||
name="shodan",
|
name="shodan",
|
||||||
@@ -47,7 +43,7 @@ class ShodanProvider(BaseProvider):
|
|||||||
"""Return a dictionary indicating if the provider can query domains and/or IPs."""
|
"""Return a dictionary indicating if the provider can query domains and/or IPs."""
|
||||||
return {'domains': True, 'ips': True}
|
return {'domains': True, 'ips': True}
|
||||||
|
|
||||||
def query_domain(self, domain: str) -> List[Tuple[str, str, RelationshipType, float, Dict[str, Any]]]:
|
def query_domain(self, domain: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||||
"""
|
"""
|
||||||
Query Shodan for information about a domain.
|
Query Shodan for information about a domain.
|
||||||
Uses Shodan's hostname search to find associated IPs.
|
Uses Shodan's hostname search to find associated IPs.
|
||||||
@@ -103,16 +99,16 @@ class ShodanProvider(BaseProvider):
|
|||||||
relationships.append((
|
relationships.append((
|
||||||
domain,
|
domain,
|
||||||
ip_address,
|
ip_address,
|
||||||
RelationshipType.A_RECORD, # Domain resolves to IP
|
'a_record', # Domain resolves to IP
|
||||||
RelationshipType.A_RECORD.default_confidence,
|
0.8,
|
||||||
raw_data
|
raw_data
|
||||||
))
|
))
|
||||||
|
|
||||||
self.log_relationship_discovery(
|
self.log_relationship_discovery(
|
||||||
source_node=domain,
|
source_node=domain,
|
||||||
target_node=ip_address,
|
target_node=ip_address,
|
||||||
relationship_type=RelationshipType.A_RECORD,
|
relationship_type='a_record',
|
||||||
confidence_score=RelationshipType.A_RECORD.default_confidence,
|
confidence_score=0.8,
|
||||||
raw_data=raw_data,
|
raw_data=raw_data,
|
||||||
discovery_method="shodan_hostname_search"
|
discovery_method="shodan_hostname_search"
|
||||||
)
|
)
|
||||||
@@ -129,7 +125,7 @@ class ShodanProvider(BaseProvider):
|
|||||||
relationships.append((
|
relationships.append((
|
||||||
domain,
|
domain,
|
||||||
hostname,
|
hostname,
|
||||||
RelationshipType.PASSIVE_DNS, # Shared hosting relationship
|
'passive_dns', # Shared hosting relationship
|
||||||
0.6, # Lower confidence for shared hosting
|
0.6, # Lower confidence for shared hosting
|
||||||
hostname_raw_data
|
hostname_raw_data
|
||||||
))
|
))
|
||||||
@@ -137,7 +133,7 @@ class ShodanProvider(BaseProvider):
|
|||||||
self.log_relationship_discovery(
|
self.log_relationship_discovery(
|
||||||
source_node=domain,
|
source_node=domain,
|
||||||
target_node=hostname,
|
target_node=hostname,
|
||||||
relationship_type=RelationshipType.PASSIVE_DNS,
|
relationship_type='passive_dns',
|
||||||
confidence_score=0.6,
|
confidence_score=0.6,
|
||||||
raw_data=hostname_raw_data,
|
raw_data=hostname_raw_data,
|
||||||
discovery_method="shodan_shared_hosting"
|
discovery_method="shodan_shared_hosting"
|
||||||
@@ -148,7 +144,7 @@ class ShodanProvider(BaseProvider):
|
|||||||
|
|
||||||
return relationships
|
return relationships
|
||||||
|
|
||||||
def query_ip(self, ip: str) -> List[Tuple[str, str, RelationshipType, float, Dict[str, Any]]]:
|
def query_ip(self, ip: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||||
"""
|
"""
|
||||||
Query Shodan for information about an IP address.
|
Query Shodan for information about an IP address.
|
||||||
|
|
||||||
@@ -195,16 +191,16 @@ class ShodanProvider(BaseProvider):
|
|||||||
relationships.append((
|
relationships.append((
|
||||||
ip,
|
ip,
|
||||||
hostname,
|
hostname,
|
||||||
RelationshipType.A_RECORD, # IP resolves to hostname
|
'a_record', # IP resolves to hostname
|
||||||
RelationshipType.A_RECORD.default_confidence,
|
0.8,
|
||||||
raw_data
|
raw_data
|
||||||
))
|
))
|
||||||
|
|
||||||
self.log_relationship_discovery(
|
self.log_relationship_discovery(
|
||||||
source_node=ip,
|
source_node=ip,
|
||||||
target_node=hostname,
|
target_node=hostname,
|
||||||
relationship_type=RelationshipType.A_RECORD,
|
relationship_type='a_record',
|
||||||
confidence_score=RelationshipType.A_RECORD.default_confidence,
|
confidence_score=0.8,
|
||||||
raw_data=raw_data,
|
raw_data=raw_data,
|
||||||
discovery_method="shodan_host_lookup"
|
discovery_method="shodan_host_lookup"
|
||||||
)
|
)
|
||||||
@@ -230,16 +226,16 @@ class ShodanProvider(BaseProvider):
|
|||||||
relationships.append((
|
relationships.append((
|
||||||
ip,
|
ip,
|
||||||
asn_name,
|
asn_name,
|
||||||
RelationshipType.ASN_MEMBERSHIP,
|
'asn_membership',
|
||||||
RelationshipType.ASN_MEMBERSHIP.default_confidence,
|
0.7,
|
||||||
asn_raw_data
|
asn_raw_data
|
||||||
))
|
))
|
||||||
|
|
||||||
self.log_relationship_discovery(
|
self.log_relationship_discovery(
|
||||||
source_node=ip,
|
source_node=ip,
|
||||||
target_node=asn_name,
|
target_node=asn_name,
|
||||||
relationship_type=RelationshipType.ASN_MEMBERSHIP,
|
relationship_type='asn_membership',
|
||||||
confidence_score=RelationshipType.ASN_MEMBERSHIP.default_confidence,
|
confidence_score=0.7,
|
||||||
raw_data=asn_raw_data,
|
raw_data=asn_raw_data,
|
||||||
discovery_method="shodan_asn_lookup"
|
discovery_method="shodan_asn_lookup"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -7,3 +7,5 @@ urllib3>=2.0.0
|
|||||||
dnspython>=2.4.2
|
dnspython>=2.4.2
|
||||||
gunicorn
|
gunicorn
|
||||||
redis
|
redis
|
||||||
|
python-dotenv
|
||||||
|
psycopg2-binary
|
||||||
@@ -272,8 +272,24 @@ input[type="text"]:focus, select:focus {
|
|||||||
text-shadow: 0 0 3px rgba(0, 255, 65, 0.3);
|
text-shadow: 0 0 3px rgba(0, 255, 65, 0.3);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.progress-container {
|
||||||
|
padding: 0 1.5rem 1.5rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
.progress-info {
|
||||||
|
display: flex;
|
||||||
|
justify-content: space-between;
|
||||||
|
font-size: 0.8rem;
|
||||||
|
color: #999;
|
||||||
|
margin-bottom: 0.5rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
#progress-compact {
|
||||||
|
color: #00ff41;
|
||||||
|
font-weight: 500;
|
||||||
|
}
|
||||||
|
|
||||||
.progress-bar {
|
.progress-bar {
|
||||||
margin: 1rem 1.5rem;
|
|
||||||
height: 8px;
|
height: 8px;
|
||||||
background-color: #1a1a1a;
|
background-color: #1a1a1a;
|
||||||
border: 1px solid #444;
|
border: 1px solid #444;
|
||||||
@@ -517,7 +533,7 @@ input[type="text"]:focus, select:focus {
|
|||||||
color: #e0e0e0;
|
color: #e0e0e0;
|
||||||
}
|
}
|
||||||
|
|
||||||
.provider-stats {
|
.provider-stats, .provider-task-stats {
|
||||||
font-size: 0.8rem;
|
font-size: 0.8rem;
|
||||||
color: #999;
|
color: #999;
|
||||||
display: grid;
|
display: grid;
|
||||||
@@ -526,6 +542,13 @@ input[type="text"]:focus, select:focus {
|
|||||||
margin-top: 0.5rem;
|
margin-top: 0.5rem;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.provider-task-stats {
|
||||||
|
border-top: 1px solid #333;
|
||||||
|
padding-top: 0.5rem;
|
||||||
|
margin-top: 0.5rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
.provider-stat {
|
.provider-stat {
|
||||||
display: flex;
|
display: flex;
|
||||||
justify-content: space-between;
|
justify-content: space-between;
|
||||||
@@ -1000,6 +1023,46 @@ input[type="text"]:focus, select:focus {
|
|||||||
font-style: italic;
|
font-style: italic;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.correlation-values-list {
|
||||||
|
margin-top: 1rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
.correlation-value-details {
|
||||||
|
margin-bottom: 0.5rem;
|
||||||
|
border: 1px solid #333;
|
||||||
|
border-radius: 3px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.correlation-value-details summary {
|
||||||
|
padding: 0.5rem;
|
||||||
|
background-color: #3a3a3a;
|
||||||
|
cursor: pointer;
|
||||||
|
outline: none;
|
||||||
|
color: #c7c7c7;
|
||||||
|
}
|
||||||
|
|
||||||
|
.correlation-value-details summary:hover {
|
||||||
|
background-color: #4a4a4a;
|
||||||
|
}
|
||||||
|
|
||||||
|
.correlation-value-details .detail-row {
|
||||||
|
margin-left: 1rem;
|
||||||
|
margin-right: 1rem;
|
||||||
|
padding: 0.5rem 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.correlation-value-details .detail-label {
|
||||||
|
color: #999;
|
||||||
|
font-weight: 500;
|
||||||
|
}
|
||||||
|
|
||||||
|
.correlation-value-details .detail-value {
|
||||||
|
color: #c7c7c7;
|
||||||
|
word-break: break-all;
|
||||||
|
font-family: 'Roboto Mono', monospace;
|
||||||
|
font-size: 0.9em;
|
||||||
|
}
|
||||||
|
|
||||||
@keyframes fadeIn {
|
@keyframes fadeIn {
|
||||||
from {opacity: 0; transform: scale(0.95);}
|
from {opacity: 0; transform: scale(0.95);}
|
||||||
to {opacity: 1; transform: scale(1);}
|
to {opacity: 1; transform: scale(1);}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
/**
|
/**
|
||||||
* Graph visualization module for DNSRecon
|
* Graph visualization module for DNSRecon
|
||||||
* Handles network graph rendering using vis.js with enhanced Phase 2 features
|
* Handles network graph rendering using vis.js
|
||||||
*/
|
*/
|
||||||
|
|
||||||
class GraphManager {
|
class GraphManager {
|
||||||
@@ -130,7 +130,7 @@ class GraphManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Initialize the network graph with enhanced features
|
* Initialize the network graph
|
||||||
*/
|
*/
|
||||||
initialize() {
|
initialize() {
|
||||||
if (this.isInitialized) {
|
if (this.isInitialized) {
|
||||||
@@ -156,7 +156,7 @@ class GraphManager {
|
|||||||
// Add graph controls
|
// Add graph controls
|
||||||
this.addGraphControls();
|
this.addGraphControls();
|
||||||
|
|
||||||
console.log('Enhanced graph initialized successfully');
|
console.log('Graph initialized successfully');
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Failed to initialize graph:', error);
|
console.error('Failed to initialize graph:', error);
|
||||||
this.showError('Failed to initialize visualization');
|
this.showError('Failed to initialize visualization');
|
||||||
@@ -184,12 +184,12 @@ class GraphManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Setup enhanced network event handlers
|
* Setup network event handlers
|
||||||
*/
|
*/
|
||||||
setupNetworkEvents() {
|
setupNetworkEvents() {
|
||||||
if (!this.network) return;
|
if (!this.network) return;
|
||||||
|
|
||||||
// Node click event with enhanced details
|
// Node click event with details
|
||||||
this.network.on('click', (params) => {
|
this.network.on('click', (params) => {
|
||||||
if (params.nodes.length > 0) {
|
if (params.nodes.length > 0) {
|
||||||
const nodeId = params.nodes[0];
|
const nodeId = params.nodes[0];
|
||||||
@@ -207,7 +207,7 @@ class GraphManager {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
// Enhanced hover events
|
// Hover events
|
||||||
this.network.on('hoverNode', (params) => {
|
this.network.on('hoverNode', (params) => {
|
||||||
const nodeId = params.node;
|
const nodeId = params.node;
|
||||||
const node = this.nodes.get(nodeId);
|
const node = this.nodes.get(nodeId);
|
||||||
@@ -216,12 +216,8 @@ class GraphManager {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
// FIX: Comment out the problematic context menu handler
|
|
||||||
this.network.on('oncontext', (params) => {
|
this.network.on('oncontext', (params) => {
|
||||||
params.event.preventDefault();
|
params.event.preventDefault();
|
||||||
// if (params.nodes.length > 0) {
|
|
||||||
// this.showNodeContextMenu(params.pointer.DOM, params.nodes[0]);
|
|
||||||
// }
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// Stabilization events with progress
|
// Stabilization events with progress
|
||||||
@@ -242,7 +238,6 @@ class GraphManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Update graph with new data and enhanced processing
|
|
||||||
* @param {Object} graphData - Graph data from backend
|
* @param {Object} graphData - Graph data from backend
|
||||||
*/
|
*/
|
||||||
updateGraph(graphData) {
|
updateGraph(graphData) {
|
||||||
@@ -326,15 +321,15 @@ class GraphManager {
|
|||||||
setTimeout(() => this.fitView(), 800);
|
setTimeout(() => this.fitView(), 800);
|
||||||
}
|
}
|
||||||
|
|
||||||
console.log(`Enhanced graph updated: ${processedNodes.length} nodes, ${processedEdges.length} edges (${newNodes.length} new nodes, ${newEdges.length} new edges)`);
|
console.log(`Graph updated: ${processedNodes.length} nodes, ${processedEdges.length} edges (${newNodes.length} new nodes, ${newEdges.length} new edges)`);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Failed to update enhanced graph:', error);
|
console.error('Failed to update graph:', error);
|
||||||
this.showError('Failed to update visualization');
|
this.showError('Failed to update visualization');
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Process node data with enhanced styling and metadata
|
* Process node data with styling and metadata
|
||||||
* @param {Object} node - Raw node data
|
* @param {Object} node - Raw node data
|
||||||
* @returns {Object} Processed node data
|
* @returns {Object} Processed node data
|
||||||
*/
|
*/
|
||||||
@@ -366,15 +361,31 @@ class GraphManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Handle merged correlation objects (similar to large entities)
|
||||||
if (node.type === 'correlation_object') {
|
if (node.type === 'correlation_object') {
|
||||||
processedNode.label = this.formatNodeLabel(node.metadata.value, node.type);
|
const metadata = node.metadata || {};
|
||||||
|
const values = metadata.values || [];
|
||||||
|
const mergeCount = metadata.merge_count || 1;
|
||||||
|
|
||||||
|
if (mergeCount > 1) {
|
||||||
|
// Display as merged correlation container
|
||||||
|
processedNode.label = `Correlations (${mergeCount})`;
|
||||||
|
processedNode.title = `Merged correlation container with ${mergeCount} values: ${values.slice(0, 3).join(', ')}${values.length > 3 ? '...' : ''}`;
|
||||||
|
processedNode.borderWidth = 3; // Thicker border for merged nodes
|
||||||
|
} else {
|
||||||
|
// Single correlation value
|
||||||
|
const value = Array.isArray(values) && values.length > 0 ? values[0] : (metadata.value || 'Unknown');
|
||||||
|
const displayValue = typeof value === 'string' && value.length > 20 ? value.substring(0, 17) + '...' : value;
|
||||||
|
processedNode.label = `${displayValue}`;
|
||||||
|
processedNode.title = `Correlation: ${value}`;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return processedNode;
|
return processedNode;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Process edge data with enhanced styling and metadata
|
* Process edge data with styling and metadata
|
||||||
* @param {Object} edge - Raw edge data
|
* @param {Object} edge - Raw edge data
|
||||||
* @returns {Object} Processed edge data
|
* @returns {Object} Processed edge data
|
||||||
*/
|
*/
|
||||||
@@ -478,7 +489,7 @@ class GraphManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get enhanced node shape based on type
|
* Get node shape based on type
|
||||||
* @param {string} nodeType - Node type
|
* @param {string} nodeType - Node type
|
||||||
* @returns {string} Shape name
|
* @returns {string} Shape name
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
/**
|
/**
|
||||||
* Main application logic for DNSRecon web interface
|
* Main application logic for DNSRecon web interface
|
||||||
* Handles UI interactions, API communication, and data flow
|
* Handles UI interactions, API communication, and data flow
|
||||||
* DEBUG VERSION WITH EXTRA LOGGING
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
class DNSReconApp {
|
class DNSReconApp {
|
||||||
@@ -61,9 +60,8 @@ class DNSReconApp {
|
|||||||
scanStatus: document.getElementById('scan-status'),
|
scanStatus: document.getElementById('scan-status'),
|
||||||
targetDisplay: document.getElementById('target-display'),
|
targetDisplay: document.getElementById('target-display'),
|
||||||
depthDisplay: document.getElementById('depth-display'),
|
depthDisplay: document.getElementById('depth-display'),
|
||||||
progressDisplay: document.getElementById('progress-display'),
|
|
||||||
indicatorsDisplay: document.getElementById('indicators-display'),
|
|
||||||
relationshipsDisplay: document.getElementById('relationships-display'),
|
relationshipsDisplay: document.getElementById('relationships-display'),
|
||||||
|
progressCompact: document.getElementById('progress-compact'),
|
||||||
progressFill: document.getElementById('progress-fill'),
|
progressFill: document.getElementById('progress-fill'),
|
||||||
|
|
||||||
// Provider elements
|
// Provider elements
|
||||||
@@ -243,7 +241,7 @@ class DNSReconApp {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Enhanced start scan with better error handling
|
* Start scan with error handling
|
||||||
*/
|
*/
|
||||||
async startScan(clearGraph = true) {
|
async startScan(clearGraph = true) {
|
||||||
console.log('=== STARTING SCAN ===');
|
console.log('=== STARTING SCAN ===');
|
||||||
@@ -318,7 +316,7 @@ class DNSReconApp {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* Enhanced scan stop with immediate UI feedback
|
* Scan stop with immediate UI feedback
|
||||||
*/
|
*/
|
||||||
async stopScan() {
|
async stopScan() {
|
||||||
try {
|
try {
|
||||||
@@ -427,7 +425,7 @@ class DNSReconApp {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Enhanced status update with better error handling
|
* Status update with better error handling
|
||||||
*/
|
*/
|
||||||
async updateStatus() {
|
async updateStatus() {
|
||||||
try {
|
try {
|
||||||
@@ -447,7 +445,7 @@ class DNSReconApp {
|
|||||||
// Handle status changes
|
// Handle status changes
|
||||||
if (status.status !== this.scanStatus) {
|
if (status.status !== this.scanStatus) {
|
||||||
console.log(`*** STATUS CHANGED: ${this.scanStatus} -> ${status.status} ***`);
|
console.log(`*** STATUS CHANGED: ${this.scanStatus} -> ${status.status} ***`);
|
||||||
this.handleStatusChange(status.status);
|
this.handleStatusChange(status.status, status.task_queue_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
this.scanStatus = status.status;
|
this.scanStatus = status.status;
|
||||||
@@ -542,16 +540,18 @@ class DNSReconApp {
|
|||||||
if (this.elements.depthDisplay) {
|
if (this.elements.depthDisplay) {
|
||||||
this.elements.depthDisplay.textContent = `${status.current_depth}/${status.max_depth}`;
|
this.elements.depthDisplay.textContent = `${status.current_depth}/${status.max_depth}`;
|
||||||
}
|
}
|
||||||
if (this.elements.progressDisplay) {
|
|
||||||
this.elements.progressDisplay.textContent = `${status.progress_percentage.toFixed(1)}%`;
|
|
||||||
}
|
|
||||||
if (this.elements.indicatorsDisplay) {
|
|
||||||
this.elements.indicatorsDisplay.textContent = status.indicators_processed || 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update progress bar with smooth animation
|
// Update progress bar and compact display
|
||||||
if (this.elements.progressFill) {
|
if (this.elements.progressFill) {
|
||||||
this.elements.progressFill.style.width = `${status.progress_percentage}%`;
|
const completed = status.indicators_completed || 0;
|
||||||
|
const enqueued = status.task_queue_size || 0;
|
||||||
|
const totalTasks = completed + enqueued;
|
||||||
|
const progressPercentage = totalTasks > 0 ? (completed / totalTasks) * 100 : 0;
|
||||||
|
|
||||||
|
this.elements.progressFill.style.width = `${progressPercentage}%`;
|
||||||
|
if (this.elements.progressCompact) {
|
||||||
|
this.elements.progressCompact.textContent = `${completed}/${totalTasks} - ${Math.round(progressPercentage)}%`;
|
||||||
|
}
|
||||||
|
|
||||||
// Add pulsing animation for active scans
|
// Add pulsing animation for active scans
|
||||||
if (status.status === 'running') {
|
if (status.status === 'running') {
|
||||||
@@ -575,6 +575,8 @@ class DNSReconApp {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
this.setUIState(status.status, status.task_queue_size);
|
||||||
|
|
||||||
console.log('Status display updated successfully');
|
console.log('Status display updated successfully');
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Error updating status display:', error);
|
console.error('Error updating status display:', error);
|
||||||
@@ -585,12 +587,12 @@ class DNSReconApp {
|
|||||||
* Handle status changes with improved state synchronization
|
* Handle status changes with improved state synchronization
|
||||||
* @param {string} newStatus - New scan status
|
* @param {string} newStatus - New scan status
|
||||||
*/
|
*/
|
||||||
handleStatusChange(newStatus) {
|
handleStatusChange(newStatus, task_queue_size) {
|
||||||
console.log(`=== STATUS CHANGE: ${this.scanStatus} -> ${newStatus} ===`);
|
console.log(`=== STATUS CHANGE: ${this.scanStatus} -> ${newStatus} ===`);
|
||||||
|
|
||||||
switch (newStatus) {
|
switch (newStatus) {
|
||||||
case 'running':
|
case 'running':
|
||||||
this.setUIState('scanning');
|
this.setUIState('scanning', task_queue_size);
|
||||||
this.showSuccess('Scan is running');
|
this.showSuccess('Scan is running');
|
||||||
// Increase polling frequency for active scans
|
// Increase polling frequency for active scans
|
||||||
this.startPolling(1000); // Poll every 1 second for running scans
|
this.startPolling(1000); // Poll every 1 second for running scans
|
||||||
@@ -598,7 +600,7 @@ class DNSReconApp {
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case 'completed':
|
case 'completed':
|
||||||
this.setUIState('completed');
|
this.setUIState('completed', task_queue_size);
|
||||||
this.stopPolling();
|
this.stopPolling();
|
||||||
this.showSuccess('Scan completed successfully');
|
this.showSuccess('Scan completed successfully');
|
||||||
this.updateConnectionStatus('completed');
|
this.updateConnectionStatus('completed');
|
||||||
@@ -609,7 +611,7 @@ class DNSReconApp {
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case 'failed':
|
case 'failed':
|
||||||
this.setUIState('failed');
|
this.setUIState('failed', task_queue_size);
|
||||||
this.stopPolling();
|
this.stopPolling();
|
||||||
this.showError('Scan failed');
|
this.showError('Scan failed');
|
||||||
this.updateConnectionStatus('error');
|
this.updateConnectionStatus('error');
|
||||||
@@ -617,7 +619,7 @@ class DNSReconApp {
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case 'stopped':
|
case 'stopped':
|
||||||
this.setUIState('stopped');
|
this.setUIState('stopped', task_queue_size);
|
||||||
this.stopPolling();
|
this.stopPolling();
|
||||||
this.showSuccess('Scan stopped');
|
this.showSuccess('Scan stopped');
|
||||||
this.updateConnectionStatus('stopped');
|
this.updateConnectionStatus('stopped');
|
||||||
@@ -625,7 +627,7 @@ class DNSReconApp {
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case 'idle':
|
case 'idle':
|
||||||
this.setUIState('idle');
|
this.setUIState('idle', task_queue_size);
|
||||||
this.stopPolling();
|
this.stopPolling();
|
||||||
this.updateConnectionStatus('idle');
|
this.updateConnectionStatus('idle');
|
||||||
break;
|
break;
|
||||||
@@ -668,11 +670,13 @@ class DNSReconApp {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Enhanced UI state management with immediate button updates
|
* UI state management with immediate button updates
|
||||||
*/
|
*/
|
||||||
setUIState(state) {
|
setUIState(state, task_queue_size) {
|
||||||
console.log(`Setting UI state to: ${state}`);
|
console.log(`Setting UI state to: ${state}`);
|
||||||
|
|
||||||
|
const isQueueEmpty = task_queue_size === 0;
|
||||||
|
|
||||||
switch (state) {
|
switch (state) {
|
||||||
case 'scanning':
|
case 'scanning':
|
||||||
this.isScanning = true;
|
this.isScanning = true;
|
||||||
@@ -701,12 +705,12 @@ class DNSReconApp {
|
|||||||
case 'stopped':
|
case 'stopped':
|
||||||
this.isScanning = false;
|
this.isScanning = false;
|
||||||
if (this.elements.startScan) {
|
if (this.elements.startScan) {
|
||||||
this.elements.startScan.disabled = false;
|
this.elements.startScan.disabled = !isQueueEmpty;
|
||||||
this.elements.startScan.classList.remove('loading');
|
this.elements.startScan.classList.remove('loading');
|
||||||
this.elements.startScan.innerHTML = '<span class="btn-icon">[RUN]</span><span>Start Reconnaissance</span>';
|
this.elements.startScan.innerHTML = '<span class="btn-icon">[RUN]</span><span>Start Reconnaissance</span>';
|
||||||
}
|
}
|
||||||
if (this.elements.addToGraph) {
|
if (this.elements.addToGraph) {
|
||||||
this.elements.addToGraph.disabled = false;
|
this.elements.addToGraph.disabled = !isQueueEmpty;
|
||||||
this.elements.addToGraph.classList.remove('loading');
|
this.elements.addToGraph.classList.remove('loading');
|
||||||
}
|
}
|
||||||
if (this.elements.stopScan) {
|
if (this.elements.stopScan) {
|
||||||
@@ -802,6 +806,47 @@ class DNSReconApp {
|
|||||||
|
|
||||||
let detailsHtml = '<div class="modal-details-grid">';
|
let detailsHtml = '<div class="modal-details-grid">';
|
||||||
|
|
||||||
|
// Handle merged correlation objects similar to large entities
|
||||||
|
if (node.type === 'correlation_object') {
|
||||||
|
const metadata = node.metadata || {};
|
||||||
|
const values = metadata.values || [];
|
||||||
|
const mergeCount = metadata.merge_count || 1;
|
||||||
|
|
||||||
|
detailsHtml += '<div class="modal-section">';
|
||||||
|
detailsHtml += '<h4>Correlation Details</h4>';
|
||||||
|
|
||||||
|
if (mergeCount > 1) {
|
||||||
|
detailsHtml += `<p><strong>Merged Correlations:</strong> ${mergeCount} values</p>`;
|
||||||
|
detailsHtml += '<div class="correlation-values-list">';
|
||||||
|
|
||||||
|
values.forEach((value, index) => {
|
||||||
|
detailsHtml += `<details class="correlation-value-details">`;
|
||||||
|
detailsHtml += `<summary>Value ${index + 1}: ${typeof value === 'string' && value.length > 50 ? value.substring(0, 47) + '...' : value}</summary>`;
|
||||||
|
detailsHtml += `<div class="detail-row"><span class="detail-label">Full Value:</span><span class="detail-value">${value}</span></div>`;
|
||||||
|
detailsHtml += `</details>`;
|
||||||
|
});
|
||||||
|
|
||||||
|
detailsHtml += '</div>';
|
||||||
|
} else {
|
||||||
|
const singleValue = values.length > 0 ? values[0] : (metadata.value || 'Unknown');
|
||||||
|
detailsHtml += `<div class="detail-row"><span class="detail-label">Correlation Value:</span><span class="detail-value">${singleValue}</span></div>`;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Show correlated nodes
|
||||||
|
const correlatedNodes = metadata.correlated_nodes || [];
|
||||||
|
if (correlatedNodes.length > 0) {
|
||||||
|
detailsHtml += `<div class="detail-row"><span class="detail-label">Correlated Nodes:</span><span class="detail-value">${correlatedNodes.length}</span></div>`;
|
||||||
|
detailsHtml += '<ul>';
|
||||||
|
correlatedNodes.forEach(nodeId => {
|
||||||
|
detailsHtml += `<li><a href="#" class="node-link" data-node-id="${nodeId}">${nodeId}</a></li>`;
|
||||||
|
});
|
||||||
|
detailsHtml += '</ul>';
|
||||||
|
}
|
||||||
|
|
||||||
|
detailsHtml += '</div>';
|
||||||
|
}
|
||||||
|
|
||||||
|
// Continue with standard node details for all node types
|
||||||
// Section for Incoming Edges (Source Nodes)
|
// Section for Incoming Edges (Source Nodes)
|
||||||
if (node.incoming_edges && node.incoming_edges.length > 0) {
|
if (node.incoming_edges && node.incoming_edges.length > 0) {
|
||||||
detailsHtml += '<div class="modal-section">';
|
detailsHtml += '<div class="modal-section">';
|
||||||
@@ -824,11 +869,13 @@ class DNSReconApp {
|
|||||||
detailsHtml += '</ul></div>';
|
detailsHtml += '</ul></div>';
|
||||||
}
|
}
|
||||||
|
|
||||||
// Section for Attributes
|
// Section for Attributes (skip for correlation objects - already handled above)
|
||||||
|
if (node.type !== 'correlation_object') {
|
||||||
detailsHtml += '<div class="modal-section">';
|
detailsHtml += '<div class="modal-section">';
|
||||||
detailsHtml += '<h4>Attributes</h4>';
|
detailsHtml += '<h4>Attributes</h4>';
|
||||||
detailsHtml += this.formatObjectToHtml(node.attributes);
|
detailsHtml += this.formatObjectToHtml(node.attributes);
|
||||||
detailsHtml += '</div>';
|
detailsHtml += '</div>';
|
||||||
|
}
|
||||||
|
|
||||||
// Section for Description
|
// Section for Description
|
||||||
detailsHtml += '<div class="modal-section">';
|
detailsHtml += '<div class="modal-section">';
|
||||||
@@ -836,11 +883,13 @@ class DNSReconApp {
|
|||||||
detailsHtml += `<p class="description-text">${node.description || 'No description available.'}</p>`;
|
detailsHtml += `<p class="description-text">${node.description || 'No description available.'}</p>`;
|
||||||
detailsHtml += '</div>';
|
detailsHtml += '</div>';
|
||||||
|
|
||||||
// Section for Metadata
|
// Section for Metadata (skip detailed metadata for correlation objects - already handled above)
|
||||||
|
if (node.type !== 'correlation_object') {
|
||||||
detailsHtml += '<div class="modal-section">';
|
detailsHtml += '<div class="modal-section">';
|
||||||
detailsHtml += '<h4>Metadata</h4>';
|
detailsHtml += '<h4>Metadata</h4>';
|
||||||
detailsHtml += this.formatObjectToHtml(node.metadata);
|
detailsHtml += this.formatObjectToHtml(node.metadata);
|
||||||
detailsHtml += '</div>';
|
detailsHtml += '</div>';
|
||||||
|
}
|
||||||
|
|
||||||
detailsHtml += '</div>';
|
detailsHtml += '</div>';
|
||||||
return detailsHtml;
|
return detailsHtml;
|
||||||
|
|||||||
@@ -90,23 +90,21 @@
|
|||||||
<span class="status-label">Depth:</span>
|
<span class="status-label">Depth:</span>
|
||||||
<span id="depth-display" class="status-value">0/0</span>
|
<span id="depth-display" class="status-value">0/0</span>
|
||||||
</div>
|
</div>
|
||||||
<div class="status-row">
|
|
||||||
<span class="status-label">Progress:</span>
|
|
||||||
<span id="progress-display" class="status-value">0%</span>
|
|
||||||
</div>
|
|
||||||
<div class="status-row">
|
|
||||||
<span class="status-label">Indicators:</span>
|
|
||||||
<span id="indicators-display" class="status-value">0</span>
|
|
||||||
</div>
|
|
||||||
<div class="status-row">
|
<div class="status-row">
|
||||||
<span class="status-label">Relationships:</span>
|
<span class="status-label">Relationships:</span>
|
||||||
<span id="relationships-display" class="status-value">0</span>
|
<span id="relationships-display" class="status-value">0</span>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
<div class="progress-container">
|
||||||
|
<div class="progress-info">
|
||||||
|
<span id="progress-label">Progress:</span>
|
||||||
|
<span id="progress-compact">0/0</span>
|
||||||
|
</div>
|
||||||
<div class="progress-bar">
|
<div class="progress-bar">
|
||||||
<div id="progress-fill" class="progress-fill"></div>
|
<div id="progress-fill" class="progress-fill"></div>
|
||||||
</div>
|
</div>
|
||||||
|
</div>
|
||||||
</section>
|
</section>
|
||||||
|
|
||||||
<section class="visualization-panel">
|
<section class="visualization-panel">
|
||||||
|
|||||||
Reference in New Issue
Block a user