Compare commits
15 Commits
try-fix
...
database_c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9f3b17e658 | ||
|
|
eb9eea127b | ||
|
|
ae07635ab6 | ||
|
|
d7adf9ad8b | ||
|
|
39ce0e9d11 | ||
|
|
926f9e1096 | ||
|
|
9499e62ccc | ||
|
|
89ae06482e | ||
|
|
7fe7ca41ba | ||
|
|
949fbdbb45 | ||
|
|
689e8c00d4 | ||
|
|
3511f18f9a | ||
|
|
72f7056bc7 | ||
|
|
2ae33bc5ba | ||
|
|
c91913fa13 |
34
.env.example
Normal file
34
.env.example
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
# ===============================================
|
||||||
|
# DNSRecon Environment Variables
|
||||||
|
# ===============================================
|
||||||
|
# Copy this file to .env and fill in your values.
|
||||||
|
|
||||||
|
# --- API Keys ---
|
||||||
|
# Add your Shodan API key for the Shodan provider to be enabled.
|
||||||
|
SHODAN_API_KEY=
|
||||||
|
|
||||||
|
# --- Flask & Session Settings ---
|
||||||
|
# A strong, random secret key is crucial for session security.
|
||||||
|
FLASK_SECRET_KEY=your-very-secret-and-random-key-here
|
||||||
|
FLASK_HOST=127.0.0.1
|
||||||
|
FLASK_PORT=5000
|
||||||
|
FLASK_DEBUG=True
|
||||||
|
# How long a user's session in the browser lasts (in hours).
|
||||||
|
FLASK_PERMANENT_SESSION_LIFETIME_HOURS=2
|
||||||
|
# How long inactive scanner data is stored in Redis (in minutes).
|
||||||
|
SESSION_TIMEOUT_MINUTES=60
|
||||||
|
|
||||||
|
|
||||||
|
# --- Application Core Settings ---
|
||||||
|
# The default number of levels to recurse when scanning.
|
||||||
|
DEFAULT_RECURSION_DEPTH=2
|
||||||
|
# Default timeout for provider API requests in seconds.
|
||||||
|
DEFAULT_TIMEOUT=30
|
||||||
|
# The number of concurrent provider requests to make.
|
||||||
|
MAX_CONCURRENT_REQUESTS=5
|
||||||
|
# The number of results from a provider that triggers the "large entity" grouping.
|
||||||
|
LARGE_ENTITY_THRESHOLD=100
|
||||||
|
# The number of times to retry a target if a provider fails.
|
||||||
|
MAX_RETRIES_PER_TARGET=3
|
||||||
|
# How long cached provider responses are stored (in hours).
|
||||||
|
CACHE_EXPIRY_HOURS=12
|
||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -169,4 +169,3 @@ cython_debug/
|
|||||||
#.idea/
|
#.idea/
|
||||||
|
|
||||||
dump.rdb
|
dump.rdb
|
||||||
.vscode
|
|
||||||
343
app.py
343
app.py
@@ -1,6 +1,8 @@
|
|||||||
|
# dnsrecon-reduced/app.py
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Flask application entry point for DNSRecon web interface.
|
Flask application entry point for DNSRecon web interface.
|
||||||
Enhanced with user session management and task-based completion model.
|
Provides REST API endpoints and serves the web interface with user session support.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import json
|
import json
|
||||||
@@ -9,81 +11,42 @@ from flask import Flask, render_template, request, jsonify, send_file, session
|
|||||||
from datetime import datetime, timezone, timedelta
|
from datetime import datetime, timezone, timedelta
|
||||||
import io
|
import io
|
||||||
|
|
||||||
from core.session_manager import session_manager, UserIdentifier
|
from core.session_manager import session_manager
|
||||||
from config import config
|
from config import config
|
||||||
|
|
||||||
|
|
||||||
app = Flask(__name__)
|
app = Flask(__name__)
|
||||||
app.config['SECRET_KEY'] = 'dnsrecon-dev-key-change-in-production'
|
# Use centralized configuration for Flask settings
|
||||||
app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(hours=2) # 2 hour session lifetime
|
app.config['SECRET_KEY'] = config.flask_secret_key
|
||||||
|
app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(hours=config.flask_permanent_session_lifetime_hours)
|
||||||
|
|
||||||
def get_user_scanner():
|
def get_user_scanner():
|
||||||
"""
|
"""
|
||||||
Enhanced user scanner retrieval with user identification and session consolidation.
|
Retrieves the scanner for the current session, or creates a new
|
||||||
Implements single session per user with seamless consolidation.
|
session and scanner if one doesn't exist.
|
||||||
"""
|
"""
|
||||||
print("=== ENHANCED GET_USER_SCANNER ===")
|
# Get current Flask session info for debugging
|
||||||
|
current_flask_session_id = session.get('dnsrecon_session_id')
|
||||||
|
|
||||||
try:
|
# Try to get existing session
|
||||||
# Extract user identification from request
|
if current_flask_session_id:
|
||||||
client_ip, user_agent = UserIdentifier.extract_request_info(request)
|
existing_scanner = session_manager.get_session(current_flask_session_id)
|
||||||
user_fingerprint = UserIdentifier.generate_user_fingerprint(client_ip, user_agent)
|
if existing_scanner:
|
||||||
|
return current_flask_session_id, existing_scanner
|
||||||
print(f"User fingerprint: {user_fingerprint}")
|
|
||||||
print(f"Client IP: {client_ip}")
|
# Create new session if none exists
|
||||||
print(f"User Agent: {user_agent[:50]}...")
|
print("Creating new session as none was found...")
|
||||||
|
new_session_id = session_manager.create_session()
|
||||||
# Get current Flask session info for debugging
|
new_scanner = session_manager.get_session(new_session_id)
|
||||||
current_flask_session_id = session.get('dnsrecon_session_id')
|
|
||||||
print(f"Flask session ID: {current_flask_session_id}")
|
if not new_scanner:
|
||||||
|
raise Exception("Failed to create new scanner session")
|
||||||
# Try to get existing session first
|
|
||||||
if current_flask_session_id:
|
# Store in Flask session
|
||||||
existing_scanner = session_manager.get_session(current_flask_session_id)
|
session['dnsrecon_session_id'] = new_session_id
|
||||||
if existing_scanner:
|
session.permanent = True
|
||||||
# Verify session belongs to current user
|
|
||||||
session_info = session_manager.get_session_info(current_flask_session_id)
|
return new_session_id, new_scanner
|
||||||
if session_info.get('user_fingerprint') == user_fingerprint:
|
|
||||||
print(f"Found valid existing session {current_flask_session_id} for user {user_fingerprint}")
|
|
||||||
existing_scanner.session_id = current_flask_session_id
|
|
||||||
return current_flask_session_id, existing_scanner
|
|
||||||
else:
|
|
||||||
print(f"Session {current_flask_session_id} belongs to different user, will create new session")
|
|
||||||
else:
|
|
||||||
print(f"Session {current_flask_session_id} not found in Redis, will create new session")
|
|
||||||
|
|
||||||
# Create or replace user session (this handles consolidation automatically)
|
|
||||||
new_session_id = session_manager.create_or_replace_user_session(client_ip, user_agent)
|
|
||||||
new_scanner = session_manager.get_session(new_session_id)
|
|
||||||
|
|
||||||
if not new_scanner:
|
|
||||||
print(f"ERROR: Failed to retrieve newly created session {new_session_id}")
|
|
||||||
raise Exception("Failed to create new scanner session")
|
|
||||||
|
|
||||||
# Store in Flask session for browser persistence
|
|
||||||
session['dnsrecon_session_id'] = new_session_id
|
|
||||||
session.permanent = True
|
|
||||||
|
|
||||||
# Ensure session ID is set on scanner
|
|
||||||
new_scanner.session_id = new_session_id
|
|
||||||
|
|
||||||
# Get session info for user feedback
|
|
||||||
session_info = session_manager.get_session_info(new_session_id)
|
|
||||||
|
|
||||||
print(f"Session created/consolidated successfully")
|
|
||||||
print(f" - Session ID: {new_session_id}")
|
|
||||||
print(f" - User: {user_fingerprint}")
|
|
||||||
print(f" - Scanner status: {new_scanner.status}")
|
|
||||||
print(f" - Session age: {session_info.get('session_age_minutes', 0)} minutes")
|
|
||||||
|
|
||||||
return new_session_id, new_scanner
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"ERROR: Exception in get_user_scanner: {e}")
|
|
||||||
traceback.print_exc()
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
@app.route('/')
|
@app.route('/')
|
||||||
def index():
|
def index():
|
||||||
@@ -94,21 +57,15 @@ def index():
|
|||||||
@app.route('/api/scan/start', methods=['POST'])
|
@app.route('/api/scan/start', methods=['POST'])
|
||||||
def start_scan():
|
def start_scan():
|
||||||
"""
|
"""
|
||||||
Start a new reconnaissance scan with enhanced user session management.
|
Start a new reconnaissance scan. Creates a new isolated scanner if
|
||||||
|
clear_graph is true, otherwise adds to the existing one.
|
||||||
"""
|
"""
|
||||||
print("=== API: /api/scan/start called ===")
|
print("=== API: /api/scan/start called ===")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
print("Getting JSON data from request...")
|
|
||||||
data = request.get_json()
|
data = request.get_json()
|
||||||
print(f"Request data: {data}")
|
|
||||||
|
|
||||||
if not data or 'target_domain' not in data:
|
if not data or 'target_domain' not in data:
|
||||||
print("ERROR: Missing target_domain in request")
|
return jsonify({'success': False, 'error': 'Missing target_domain in request'}), 400
|
||||||
return jsonify({
|
|
||||||
'success': False,
|
|
||||||
'error': 'Missing target_domain in request'
|
|
||||||
}), 400
|
|
||||||
|
|
||||||
target_domain = data['target_domain'].strip()
|
target_domain = data['target_domain'].strip()
|
||||||
max_depth = data.get('max_depth', config.default_recursion_depth)
|
max_depth = data.get('max_depth', config.default_recursion_depth)
|
||||||
@@ -118,86 +75,50 @@ def start_scan():
|
|||||||
|
|
||||||
# Validation
|
# Validation
|
||||||
if not target_domain:
|
if not target_domain:
|
||||||
print("ERROR: Target domain cannot be empty")
|
return jsonify({'success': False, 'error': 'Target domain cannot be empty'}), 400
|
||||||
return jsonify({
|
if not isinstance(max_depth, int) or not 1 <= max_depth <= 5:
|
||||||
'success': False,
|
return jsonify({'success': False, 'error': 'Max depth must be an integer between 1 and 5'}), 400
|
||||||
'error': 'Target domain cannot be empty'
|
|
||||||
}), 400
|
|
||||||
|
|
||||||
if not isinstance(max_depth, int) or max_depth < 1 or max_depth > 5:
|
user_session_id, scanner = None, None
|
||||||
print(f"ERROR: Invalid max_depth: {max_depth}")
|
|
||||||
return jsonify({
|
if clear_graph:
|
||||||
'success': False,
|
print("Clear graph requested: Creating a new, isolated scanner session.")
|
||||||
'error': 'Max depth must be an integer between 1 and 5'
|
old_session_id = session.get('dnsrecon_session_id')
|
||||||
}), 400
|
if old_session_id:
|
||||||
|
session_manager.terminate_session(old_session_id)
|
||||||
|
|
||||||
|
user_session_id = session_manager.create_session()
|
||||||
|
session['dnsrecon_session_id'] = user_session_id
|
||||||
|
session.permanent = True
|
||||||
|
scanner = session_manager.get_session(user_session_id)
|
||||||
|
else:
|
||||||
|
print("Adding to existing graph: Reusing the current scanner session.")
|
||||||
|
user_session_id, scanner = get_user_scanner()
|
||||||
|
|
||||||
|
if not scanner:
|
||||||
|
return jsonify({'success': False, 'error': 'Failed to get or create a scanner instance.'}), 500
|
||||||
|
|
||||||
print("Validation passed, getting user scanner...")
|
print(f"Using scanner {id(scanner)} in session {user_session_id}")
|
||||||
|
|
||||||
# Get user-specific scanner with enhanced session management
|
|
||||||
user_session_id, scanner = get_user_scanner()
|
|
||||||
|
|
||||||
# Ensure session ID is properly set
|
|
||||||
if not scanner.session_id:
|
|
||||||
scanner.session_id = user_session_id
|
|
||||||
|
|
||||||
print(f"Using session: {user_session_id}")
|
|
||||||
print(f"Scanner object ID: {id(scanner)}")
|
|
||||||
|
|
||||||
# Start scan
|
|
||||||
print(f"Calling start_scan on scanner {id(scanner)}...")
|
|
||||||
success = scanner.start_scan(target_domain, max_depth, clear_graph=clear_graph)
|
success = scanner.start_scan(target_domain, max_depth, clear_graph=clear_graph)
|
||||||
|
|
||||||
# Immediately update session state regardless of success
|
|
||||||
session_manager.update_session_scanner(user_session_id, scanner)
|
|
||||||
|
|
||||||
if success:
|
if success:
|
||||||
scan_session_id = scanner.logger.session_id
|
|
||||||
print(f"Scan started successfully with scan session ID: {scan_session_id}")
|
|
||||||
|
|
||||||
# Get session info for user feedback
|
|
||||||
session_info = session_manager.get_session_info(user_session_id)
|
|
||||||
|
|
||||||
return jsonify({
|
return jsonify({
|
||||||
'success': True,
|
'success': True,
|
||||||
'message': 'Scan started successfully',
|
'message': 'Scan started successfully',
|
||||||
'scan_id': scan_session_id,
|
'scan_id': scanner.logger.session_id,
|
||||||
'user_session_id': user_session_id,
|
'user_session_id': user_session_id,
|
||||||
'scanner_status': scanner.status,
|
|
||||||
'session_info': {
|
|
||||||
'user_fingerprint': session_info.get('user_fingerprint', 'unknown'),
|
|
||||||
'session_age_minutes': session_info.get('session_age_minutes', 0),
|
|
||||||
'consolidated': session_info.get('session_age_minutes', 0) > 0
|
|
||||||
},
|
|
||||||
'debug_info': {
|
|
||||||
'scanner_object_id': id(scanner),
|
|
||||||
'scanner_status': scanner.status
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
else:
|
else:
|
||||||
print("ERROR: Scanner returned False")
|
|
||||||
|
|
||||||
# Provide more detailed error information
|
|
||||||
error_details = {
|
|
||||||
'scanner_status': scanner.status,
|
|
||||||
'scanner_object_id': id(scanner),
|
|
||||||
'session_id': user_session_id,
|
|
||||||
'providers_count': len(scanner.providers) if hasattr(scanner, 'providers') else 0
|
|
||||||
}
|
|
||||||
|
|
||||||
return jsonify({
|
return jsonify({
|
||||||
'success': False,
|
'success': False,
|
||||||
'error': f'Failed to start scan (scanner status: {scanner.status})',
|
'error': f'Failed to start scan (scanner status: {scanner.status})',
|
||||||
'debug_info': error_details
|
|
||||||
}), 409
|
}), 409
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"ERROR: Exception in start_scan endpoint: {e}")
|
print(f"ERROR: Exception in start_scan endpoint: {e}")
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
return jsonify({
|
return jsonify({'success': False, 'error': f'Internal server error: {str(e)}'}), 500
|
||||||
'success': False,
|
|
||||||
'error': f'Internal server error: {str(e)}'
|
|
||||||
}), 500
|
|
||||||
|
|
||||||
|
|
||||||
@app.route('/api/scan/stop', methods=['POST'])
|
@app.route('/api/scan/stop', methods=['POST'])
|
||||||
def stop_scan():
|
def stop_scan():
|
||||||
@@ -252,7 +173,7 @@ def stop_scan():
|
|||||||
|
|
||||||
@app.route('/api/scan/status', methods=['GET'])
|
@app.route('/api/scan/status', methods=['GET'])
|
||||||
def get_scan_status():
|
def get_scan_status():
|
||||||
"""Get current scan status with enhanced session information."""
|
"""Get current scan status with error handling."""
|
||||||
try:
|
try:
|
||||||
# Get user-specific scanner
|
# Get user-specific scanner
|
||||||
user_session_id, scanner = get_user_scanner()
|
user_session_id, scanner = get_user_scanner()
|
||||||
@@ -283,15 +204,6 @@ def get_scan_status():
|
|||||||
status = scanner.get_scan_status()
|
status = scanner.get_scan_status()
|
||||||
status['user_session_id'] = user_session_id
|
status['user_session_id'] = user_session_id
|
||||||
|
|
||||||
# Add enhanced session information
|
|
||||||
session_info = session_manager.get_session_info(user_session_id)
|
|
||||||
status['session_info'] = {
|
|
||||||
'user_fingerprint': session_info.get('user_fingerprint', 'unknown'),
|
|
||||||
'session_age_minutes': session_info.get('session_age_minutes', 0),
|
|
||||||
'client_ip': session_info.get('client_ip', 'unknown'),
|
|
||||||
'last_activity': session_info.get('last_activity')
|
|
||||||
}
|
|
||||||
|
|
||||||
# Additional debug info
|
# Additional debug info
|
||||||
status['debug_info'] = {
|
status['debug_info'] = {
|
||||||
'scanner_object_id': id(scanner),
|
'scanner_object_id': id(scanner),
|
||||||
@@ -320,6 +232,7 @@ def get_scan_status():
|
|||||||
}), 500
|
}), 500
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@app.route('/api/graph', methods=['GET'])
|
@app.route('/api/graph', methods=['GET'])
|
||||||
def get_graph_data():
|
def get_graph_data():
|
||||||
"""Get current graph data with error handling."""
|
"""Get current graph data with error handling."""
|
||||||
@@ -365,6 +278,7 @@ def get_graph_data():
|
|||||||
}), 500
|
}), 500
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@app.route('/api/export', methods=['GET'])
|
@app.route('/api/export', methods=['GET'])
|
||||||
def export_results():
|
def export_results():
|
||||||
"""Export complete scan results as downloadable JSON for the user session."""
|
"""Export complete scan results as downloadable JSON for the user session."""
|
||||||
@@ -375,22 +289,17 @@ def export_results():
|
|||||||
# Get complete results
|
# Get complete results
|
||||||
results = scanner.export_results()
|
results = scanner.export_results()
|
||||||
|
|
||||||
# Add enhanced session information to export
|
# Add session information to export
|
||||||
session_info = session_manager.get_session_info(user_session_id)
|
|
||||||
results['export_metadata'] = {
|
results['export_metadata'] = {
|
||||||
'user_session_id': user_session_id,
|
'user_session_id': user_session_id,
|
||||||
'user_fingerprint': session_info.get('user_fingerprint', 'unknown'),
|
|
||||||
'client_ip': session_info.get('client_ip', 'unknown'),
|
|
||||||
'session_age_minutes': session_info.get('session_age_minutes', 0),
|
|
||||||
'export_timestamp': datetime.now(timezone.utc).isoformat(),
|
'export_timestamp': datetime.now(timezone.utc).isoformat(),
|
||||||
'export_type': 'user_session_results'
|
'export_type': 'user_session_results'
|
||||||
}
|
}
|
||||||
|
|
||||||
# Create filename with user fingerprint
|
# Create filename with timestamp
|
||||||
timestamp = datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')
|
timestamp = datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')
|
||||||
target = scanner.current_target or 'unknown'
|
target = scanner.current_target or 'unknown'
|
||||||
user_fp = session_info.get('user_fingerprint', 'unknown')[:8]
|
filename = f"dnsrecon_{target}_{timestamp}_{user_session_id[:8]}.json"
|
||||||
filename = f"dnsrecon_{target}_{timestamp}_{user_fp}.json"
|
|
||||||
|
|
||||||
# Create in-memory file
|
# Create in-memory file
|
||||||
json_data = json.dumps(results, indent=2, ensure_ascii=False)
|
json_data = json.dumps(results, indent=2, ensure_ascii=False)
|
||||||
@@ -415,12 +324,18 @@ def export_results():
|
|||||||
@app.route('/api/providers', methods=['GET'])
|
@app.route('/api/providers', methods=['GET'])
|
||||||
def get_providers():
|
def get_providers():
|
||||||
"""Get information about available providers for the user session."""
|
"""Get information about available providers for the user session."""
|
||||||
print("=== API: /api/providers called ===")
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Get user-specific scanner
|
# Get user-specific scanner
|
||||||
user_session_id, scanner = get_user_scanner()
|
user_session_id, scanner = get_user_scanner()
|
||||||
|
|
||||||
|
if scanner:
|
||||||
|
completed_tasks = scanner.indicators_completed
|
||||||
|
enqueued_tasks = len(scanner.task_queue)
|
||||||
|
print(f"DEBUG: Tasks - Completed: {completed_tasks}, Enqueued: {enqueued_tasks}")
|
||||||
|
else:
|
||||||
|
print("DEBUG: No active scanner session found.")
|
||||||
|
|
||||||
provider_info = scanner.get_provider_info()
|
provider_info = scanner.get_provider_info()
|
||||||
|
|
||||||
return jsonify({
|
return jsonify({
|
||||||
@@ -495,122 +410,6 @@ def set_api_keys():
|
|||||||
'error': f'Internal server error: {str(e)}'
|
'error': f'Internal server error: {str(e)}'
|
||||||
}), 500
|
}), 500
|
||||||
|
|
||||||
|
|
||||||
@app.route('/api/session/info', methods=['GET'])
|
|
||||||
def get_session_info():
|
|
||||||
"""Get enhanced information about the current user session."""
|
|
||||||
try:
|
|
||||||
user_session_id, scanner = get_user_scanner()
|
|
||||||
session_info = session_manager.get_session_info(user_session_id)
|
|
||||||
|
|
||||||
return jsonify({
|
|
||||||
'success': True,
|
|
||||||
'session_info': session_info
|
|
||||||
})
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"ERROR: Exception in get_session_info endpoint: {e}")
|
|
||||||
traceback.print_exc()
|
|
||||||
return jsonify({
|
|
||||||
'success': False,
|
|
||||||
'error': f'Internal server error: {str(e)}'
|
|
||||||
}), 500
|
|
||||||
|
|
||||||
|
|
||||||
@app.route('/api/session/terminate', methods=['POST'])
|
|
||||||
def terminate_session():
|
|
||||||
"""Terminate the current user session."""
|
|
||||||
try:
|
|
||||||
user_session_id = session.get('dnsrecon_session_id')
|
|
||||||
|
|
||||||
if user_session_id:
|
|
||||||
success = session_manager.terminate_session(user_session_id)
|
|
||||||
# Clear Flask session
|
|
||||||
session.pop('dnsrecon_session_id', None)
|
|
||||||
|
|
||||||
return jsonify({
|
|
||||||
'success': success,
|
|
||||||
'message': 'Session terminated' if success else 'Session not found'
|
|
||||||
})
|
|
||||||
else:
|
|
||||||
return jsonify({
|
|
||||||
'success': False,
|
|
||||||
'error': 'No active session to terminate'
|
|
||||||
}), 400
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"ERROR: Exception in terminate_session endpoint: {e}")
|
|
||||||
traceback.print_exc()
|
|
||||||
return jsonify({
|
|
||||||
'success': False,
|
|
||||||
'error': f'Internal server error: {str(e)}'
|
|
||||||
}), 500
|
|
||||||
|
|
||||||
|
|
||||||
@app.route('/api/admin/sessions', methods=['GET'])
|
|
||||||
def list_sessions():
|
|
||||||
"""Admin endpoint to list all active sessions with enhanced information."""
|
|
||||||
try:
|
|
||||||
sessions = session_manager.list_active_sessions()
|
|
||||||
stats = session_manager.get_statistics()
|
|
||||||
|
|
||||||
return jsonify({
|
|
||||||
'success': True,
|
|
||||||
'sessions': sessions,
|
|
||||||
'statistics': stats
|
|
||||||
})
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"ERROR: Exception in list_sessions endpoint: {e}")
|
|
||||||
traceback.print_exc()
|
|
||||||
return jsonify({
|
|
||||||
'success': False,
|
|
||||||
'error': f'Internal server error: {str(e)}'
|
|
||||||
}), 500
|
|
||||||
|
|
||||||
|
|
||||||
@app.route('/api/health', methods=['GET'])
|
|
||||||
def health_check():
|
|
||||||
"""Health check endpoint with enhanced session statistics."""
|
|
||||||
try:
|
|
||||||
# Get session stats
|
|
||||||
session_stats = session_manager.get_statistics()
|
|
||||||
|
|
||||||
return jsonify({
|
|
||||||
'success': True,
|
|
||||||
'status': 'healthy',
|
|
||||||
'timestamp': datetime.now(timezone.utc).isoformat(),
|
|
||||||
'version': '2.0.0-enhanced',
|
|
||||||
'phase': 'enhanced_architecture',
|
|
||||||
'features': {
|
|
||||||
'multi_provider': True,
|
|
||||||
'concurrent_processing': True,
|
|
||||||
'real_time_updates': True,
|
|
||||||
'api_key_management': True,
|
|
||||||
'visualization': True,
|
|
||||||
'retry_logic': True,
|
|
||||||
'user_sessions': True,
|
|
||||||
'session_isolation': True,
|
|
||||||
'global_provider_caching': True,
|
|
||||||
'single_session_per_user': True,
|
|
||||||
'session_consolidation': True,
|
|
||||||
'task_completion_model': True
|
|
||||||
},
|
|
||||||
'session_statistics': session_stats,
|
|
||||||
'cache_info': {
|
|
||||||
'global_provider_cache': True,
|
|
||||||
'cache_location': '.cache/<provider_name>/',
|
|
||||||
'cache_expiry_hours': 12
|
|
||||||
}
|
|
||||||
})
|
|
||||||
except Exception as e:
|
|
||||||
print(f"ERROR: Exception in health_check endpoint: {e}")
|
|
||||||
return jsonify({
|
|
||||||
'success': False,
|
|
||||||
'error': f'Health check failed: {str(e)}'
|
|
||||||
}), 500
|
|
||||||
|
|
||||||
|
|
||||||
@app.errorhandler(404)
|
@app.errorhandler(404)
|
||||||
def not_found(error):
|
def not_found(error):
|
||||||
"""Handle 404 errors."""
|
"""Handle 404 errors."""
|
||||||
@@ -632,7 +431,7 @@ def internal_error(error):
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
print("Starting DNSRecon Flask application with enhanced user session support...")
|
print("Starting DNSRecon Flask application with user session support...")
|
||||||
|
|
||||||
# Load configuration from environment
|
# Load configuration from environment
|
||||||
config.load_from_env()
|
config.load_from_env()
|
||||||
|
|||||||
125
config.py
125
config.py
@@ -5,110 +5,97 @@ Handles API key storage, rate limiting, and default settings.
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
from typing import Dict, Optional
|
from typing import Dict, Optional
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
# Load environment variables from .env file
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
class Config:
|
class Config:
|
||||||
"""Configuration manager for DNSRecon application."""
|
"""Configuration manager for DNSRecon application."""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
"""Initialize configuration with default values."""
|
"""Initialize configuration with default values."""
|
||||||
self.api_keys: Dict[str, Optional[str]] = {
|
self.api_keys: Dict[str, Optional[str]] = {}
|
||||||
'shodan': None
|
|
||||||
}
|
|
||||||
|
|
||||||
# Default settings
|
# --- General Settings ---
|
||||||
self.default_recursion_depth = 2
|
self.default_recursion_depth = 2
|
||||||
self.default_timeout = 10
|
self.default_timeout = 15
|
||||||
self.max_concurrent_requests = 5
|
self.max_concurrent_requests = 5
|
||||||
self.large_entity_threshold = 100
|
self.large_entity_threshold = 100
|
||||||
|
self.max_retries_per_target = 3
|
||||||
|
self.cache_expiry_hours = 12
|
||||||
|
|
||||||
# Rate limiting settings (requests per minute)
|
# --- Provider Caching Settings ---
|
||||||
|
self.cache_timeout_hours = 6 # Provider-specific cache timeout
|
||||||
|
|
||||||
|
# --- Rate Limiting (requests per minute) ---
|
||||||
self.rate_limits = {
|
self.rate_limits = {
|
||||||
'crtsh': 60, # Free service, be respectful
|
'crtsh': 30,
|
||||||
'shodan': 60, # API dependent
|
'shodan': 60,
|
||||||
'dns': 100 # Local DNS queries
|
'dns': 100
|
||||||
}
|
}
|
||||||
|
|
||||||
# Provider settings
|
# --- Provider Settings ---
|
||||||
self.enabled_providers = {
|
self.enabled_providers = {
|
||||||
'crtsh': True, # Always enabled (free)
|
'crtsh': True,
|
||||||
'dns': True, # Always enabled (free)
|
'dns': True,
|
||||||
'shodan': False # Requires API key
|
'shodan': False
|
||||||
}
|
}
|
||||||
|
|
||||||
# Logging configuration
|
# --- Logging ---
|
||||||
self.log_level = 'INFO'
|
self.log_level = 'INFO'
|
||||||
self.log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
self.log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||||
|
|
||||||
# Flask configuration
|
# --- Flask & Session Settings ---
|
||||||
self.flask_host = '127.0.0.1'
|
self.flask_host = '127.0.0.1'
|
||||||
self.flask_port = 5000
|
self.flask_port = 5000
|
||||||
self.flask_debug = True
|
self.flask_debug = True
|
||||||
|
self.flask_secret_key = 'default-secret-key-change-me'
|
||||||
|
self.flask_permanent_session_lifetime_hours = 2
|
||||||
|
self.session_timeout_minutes = 60
|
||||||
|
|
||||||
def set_api_key(self, provider: str, api_key: str) -> bool:
|
# Load environment variables to override defaults
|
||||||
"""
|
self.load_from_env()
|
||||||
Set API key for a provider.
|
|
||||||
|
|
||||||
Args:
|
def load_from_env(self):
|
||||||
provider: Provider name (shodan, etc)
|
"""Load configuration from environment variables."""
|
||||||
api_key: API key string
|
self.set_api_key('shodan', os.getenv('SHODAN_API_KEY'))
|
||||||
|
|
||||||
Returns:
|
# Override settings from environment
|
||||||
bool: True if key was set successfully
|
self.default_recursion_depth = int(os.getenv('DEFAULT_RECURSION_DEPTH', self.default_recursion_depth))
|
||||||
"""
|
self.default_timeout = int(os.getenv('DEFAULT_TIMEOUT', self.default_timeout))
|
||||||
if provider in self.api_keys:
|
self.max_concurrent_requests = int(os.getenv('MAX_CONCURRENT_REQUESTS', self.max_concurrent_requests))
|
||||||
self.api_keys[provider] = api_key
|
self.large_entity_threshold = int(os.getenv('LARGE_ENTITY_THRESHOLD', self.large_entity_threshold))
|
||||||
self.enabled_providers[provider] = True if api_key else False
|
self.max_retries_per_target = int(os.getenv('MAX_RETRIES_PER_TARGET', self.max_retries_per_target))
|
||||||
return True
|
self.cache_expiry_hours = int(os.getenv('CACHE_EXPIRY_HOURS', self.cache_expiry_hours))
|
||||||
return False
|
self.cache_timeout_hours = int(os.getenv('CACHE_TIMEOUT_HOURS', self.cache_timeout_hours))
|
||||||
|
|
||||||
|
# Override Flask and session settings
|
||||||
|
self.flask_host = os.getenv('FLASK_HOST', self.flask_host)
|
||||||
|
self.flask_port = int(os.getenv('FLASK_PORT', self.flask_port))
|
||||||
|
self.flask_debug = os.getenv('FLASK_DEBUG', str(self.flask_debug)).lower() == 'true'
|
||||||
|
self.flask_secret_key = os.getenv('FLASK_SECRET_KEY', self.flask_secret_key)
|
||||||
|
self.flask_permanent_session_lifetime_hours = int(os.getenv('FLASK_PERMANENT_SESSION_LIFETIME_HOURS', self.flask_permanent_session_lifetime_hours))
|
||||||
|
self.session_timeout_minutes = int(os.getenv('SESSION_TIMEOUT_MINUTES', self.session_timeout_minutes))
|
||||||
|
|
||||||
|
def set_api_key(self, provider: str, api_key: Optional[str]) -> bool:
|
||||||
|
"""Set API key for a provider."""
|
||||||
|
self.api_keys[provider] = api_key
|
||||||
|
if api_key:
|
||||||
|
self.enabled_providers[provider] = True
|
||||||
|
return True
|
||||||
|
|
||||||
def get_api_key(self, provider: str) -> Optional[str]:
|
def get_api_key(self, provider: str) -> Optional[str]:
|
||||||
"""
|
"""Get API key for a provider."""
|
||||||
Get API key for a provider.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
provider: Provider name
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
API key or None if not set
|
|
||||||
"""
|
|
||||||
return self.api_keys.get(provider)
|
return self.api_keys.get(provider)
|
||||||
|
|
||||||
def is_provider_enabled(self, provider: str) -> bool:
|
def is_provider_enabled(self, provider: str) -> bool:
|
||||||
"""
|
"""Check if a provider is enabled."""
|
||||||
Check if a provider is enabled.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
provider: Provider name
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
bool: True if provider is enabled
|
|
||||||
"""
|
|
||||||
return self.enabled_providers.get(provider, False)
|
return self.enabled_providers.get(provider, False)
|
||||||
|
|
||||||
def get_rate_limit(self, provider: str) -> int:
|
def get_rate_limit(self, provider: str) -> int:
|
||||||
"""
|
"""Get rate limit for a provider."""
|
||||||
Get rate limit for a provider.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
provider: Provider name
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Rate limit in requests per minute
|
|
||||||
"""
|
|
||||||
return self.rate_limits.get(provider, 60)
|
return self.rate_limits.get(provider, 60)
|
||||||
|
|
||||||
def load_from_env(self):
|
|
||||||
"""Load configuration from environment variables."""
|
|
||||||
if os.getenv('SHODAN_API_KEY'):
|
|
||||||
self.set_api_key('shodan', os.getenv('SHODAN_API_KEY'))
|
|
||||||
|
|
||||||
# Override default settings from environment
|
|
||||||
self.default_recursion_depth = int(os.getenv('DEFAULT_RECURSION_DEPTH', '2'))
|
|
||||||
self.flask_debug = os.getenv('FLASK_DEBUG', 'True').lower() == 'true'
|
|
||||||
self.default_timeout = 30
|
|
||||||
self.max_concurrent_requests = 5
|
|
||||||
|
|
||||||
|
|
||||||
# Global configuration instance
|
# Global configuration instance
|
||||||
config = Config()
|
config = Config()
|
||||||
@@ -8,7 +8,6 @@ from .scanner import Scanner, ScanStatus
|
|||||||
from .logger import ForensicLogger, get_forensic_logger, new_session
|
from .logger import ForensicLogger, get_forensic_logger, new_session
|
||||||
from .session_manager import session_manager
|
from .session_manager import session_manager
|
||||||
from .session_config import SessionConfig, create_session_config
|
from .session_config import SessionConfig, create_session_config
|
||||||
from .task_manager import TaskManager, TaskType, ReconTask
|
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
'GraphManager',
|
'GraphManager',
|
||||||
@@ -20,10 +19,7 @@ __all__ = [
|
|||||||
'new_session',
|
'new_session',
|
||||||
'session_manager',
|
'session_manager',
|
||||||
'SessionConfig',
|
'SessionConfig',
|
||||||
'create_session_config',
|
'create_session_config'
|
||||||
'TaskManager',
|
|
||||||
'TaskType',
|
|
||||||
'ReconTask'
|
|
||||||
]
|
]
|
||||||
|
|
||||||
__version__ = "1.0.0-phase2"
|
__version__ = "1.0.0-phase2"
|
||||||
@@ -1,3 +1,5 @@
|
|||||||
|
# core/graph_manager.py
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Graph data model for DNSRecon using NetworkX.
|
Graph data model for DNSRecon using NetworkX.
|
||||||
Manages in-memory graph storage with confidence scoring and forensic metadata.
|
Manages in-memory graph storage with confidence scoring and forensic metadata.
|
||||||
@@ -50,21 +52,23 @@ class GraphManager:
|
|||||||
self.__dict__.update(state)
|
self.__dict__.update(state)
|
||||||
self.date_pattern = re.compile(r'^\d{4}-\d{2}-\d{2}[ T]\d{2}:\d{2}:\d{2}')
|
self.date_pattern = re.compile(r'^\d{4}-\d{2}-\d{2}[ T]\d{2}:\d{2}:\d{2}')
|
||||||
|
|
||||||
def _update_correlation_index(self, node_id: str, data: Any, path: List[str] = None):
|
def _update_correlation_index(self, node_id: str, data: Any, path: List[str] = [], parent_attr: str = ""):
|
||||||
"""Recursively traverse metadata and add hashable values to the index."""
|
"""Recursively traverse metadata and add hashable values to the index with better path tracking."""
|
||||||
if path is None:
|
if path is None:
|
||||||
path = []
|
path = []
|
||||||
|
|
||||||
if isinstance(data, dict):
|
if isinstance(data, dict):
|
||||||
for key, value in data.items():
|
for key, value in data.items():
|
||||||
self._update_correlation_index(node_id, value, path + [key])
|
self._update_correlation_index(node_id, value, path + [key], key)
|
||||||
elif isinstance(data, list):
|
elif isinstance(data, list):
|
||||||
for i, item in enumerate(data):
|
for i, item in enumerate(data):
|
||||||
self._update_correlation_index(node_id, item, path + [f"[{i}]"])
|
# Instead of just using [i], include the parent attribute context
|
||||||
|
list_path_component = f"[{i}]" if not parent_attr else f"{parent_attr}[{i}]"
|
||||||
|
self._update_correlation_index(node_id, item, path + [list_path_component], parent_attr)
|
||||||
else:
|
else:
|
||||||
self._add_to_correlation_index(node_id, data, ".".join(path))
|
self._add_to_correlation_index(node_id, data, ".".join(path), parent_attr)
|
||||||
|
|
||||||
def _add_to_correlation_index(self, node_id: str, value: Any, path_str: str):
|
def _add_to_correlation_index(self, node_id: str, value: Any, path_str: str, parent_attr: str = ""):
|
||||||
"""Add a hashable value to the correlation index, filtering out noise."""
|
"""Add a hashable value to the correlation index, filtering out noise."""
|
||||||
if not isinstance(value, (str, int, float, bool)) or value is None:
|
if not isinstance(value, (str, int, float, bool)) or value is None:
|
||||||
return
|
return
|
||||||
@@ -80,8 +84,8 @@ class GraphManager:
|
|||||||
return
|
return
|
||||||
if len(value) < 4 or value.lower() in ['true', 'false', 'unknown', 'none', 'crt.sh']:
|
if len(value) < 4 or value.lower() in ['true', 'false', 'unknown', 'none', 'crt.sh']:
|
||||||
return
|
return
|
||||||
elif isinstance(value, int) and abs(value) < 9999:
|
elif isinstance(value, int) and (abs(value) < 1024 or abs(value) > 65535):
|
||||||
return # Ignore small integers
|
return # Ignore small integers and common port numbers
|
||||||
elif isinstance(value, bool):
|
elif isinstance(value, bool):
|
||||||
return # Ignore boolean values
|
return # Ignore boolean values
|
||||||
|
|
||||||
@@ -90,10 +94,47 @@ class GraphManager:
|
|||||||
self.correlation_index[value] = {}
|
self.correlation_index[value] = {}
|
||||||
if node_id not in self.correlation_index[value]:
|
if node_id not in self.correlation_index[value]:
|
||||||
self.correlation_index[value][node_id] = []
|
self.correlation_index[value][node_id] = []
|
||||||
if path_str not in self.correlation_index[value][node_id]:
|
|
||||||
self.correlation_index[value][node_id].append(path_str)
|
# Store both the full path and the parent attribute for better edge labeling
|
||||||
|
correlation_entry = {
|
||||||
|
'path': path_str,
|
||||||
|
'parent_attr': parent_attr,
|
||||||
|
'meaningful_attr': self._extract_meaningful_attribute(path_str, parent_attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if correlation_entry not in self.correlation_index[value][node_id]:
|
||||||
|
self.correlation_index[value][node_id].append(correlation_entry)
|
||||||
|
|
||||||
def _check_for_correlations(self, new_node_id: str, data: Any, path: List[str] = None) -> List[Dict]:
|
def _extract_meaningful_attribute(self, path_str: str, parent_attr: str = "") -> str:
|
||||||
|
"""Extract the most meaningful attribute name from a path string."""
|
||||||
|
if not path_str:
|
||||||
|
return "unknown"
|
||||||
|
|
||||||
|
path_parts = path_str.split('.')
|
||||||
|
|
||||||
|
# Look for the last non-array-index part
|
||||||
|
for part in reversed(path_parts):
|
||||||
|
# Skip array indices like [0], [1], etc.
|
||||||
|
if not (part.startswith('[') and part.endswith(']') and part[1:-1].isdigit()):
|
||||||
|
# Clean up compound names like "hostnames[0]" to just "hostnames"
|
||||||
|
clean_part = re.sub(r'\[\d+\]$', '', part)
|
||||||
|
if clean_part:
|
||||||
|
return clean_part
|
||||||
|
|
||||||
|
# Fallback to parent attribute if available
|
||||||
|
if parent_attr:
|
||||||
|
return parent_attr
|
||||||
|
|
||||||
|
# Last resort - use the first meaningful part
|
||||||
|
for part in path_parts:
|
||||||
|
if not (part.startswith('[') and part.endswith(']') and part[1:-1].isdigit()):
|
||||||
|
clean_part = re.sub(r'\[\d+\]$', '', part)
|
||||||
|
if clean_part:
|
||||||
|
return clean_part
|
||||||
|
|
||||||
|
return "correlation"
|
||||||
|
|
||||||
|
def _check_for_correlations(self, new_node_id: str, data: Any, path: List[str] = [], parent_attr: str = "") -> List[Dict]:
|
||||||
"""Recursively traverse metadata to find correlations with existing data."""
|
"""Recursively traverse metadata to find correlations with existing data."""
|
||||||
if path is None:
|
if path is None:
|
||||||
path = []
|
path = []
|
||||||
@@ -103,10 +144,11 @@ class GraphManager:
|
|||||||
for key, value in data.items():
|
for key, value in data.items():
|
||||||
if key == 'source': # Avoid correlating on the provider name
|
if key == 'source': # Avoid correlating on the provider name
|
||||||
continue
|
continue
|
||||||
all_correlations.extend(self._check_for_correlations(new_node_id, value, path + [key]))
|
all_correlations.extend(self._check_for_correlations(new_node_id, value, path + [key], key))
|
||||||
elif isinstance(data, list):
|
elif isinstance(data, list):
|
||||||
for i, item in enumerate(data):
|
for i, item in enumerate(data):
|
||||||
all_correlations.extend(self._check_for_correlations(new_node_id, item, path + [f"[{i}]"]))
|
list_path_component = f"[{i}]" if not parent_attr else f"{parent_attr}[{i}]"
|
||||||
|
all_correlations.extend(self._check_for_correlations(new_node_id, item, path + [list_path_component], parent_attr))
|
||||||
else:
|
else:
|
||||||
value = data
|
value = data
|
||||||
if value in self.correlation_index:
|
if value in self.correlation_index:
|
||||||
@@ -117,11 +159,31 @@ class GraphManager:
|
|||||||
if len(unique_nodes) < 2:
|
if len(unique_nodes) < 2:
|
||||||
return all_correlations # Correlation must involve at least two distinct nodes
|
return all_correlations # Correlation must involve at least two distinct nodes
|
||||||
|
|
||||||
new_source = {'node_id': new_node_id, 'path': ".".join(path)}
|
new_source = {
|
||||||
|
'node_id': new_node_id,
|
||||||
|
'path': ".".join(path),
|
||||||
|
'parent_attr': parent_attr,
|
||||||
|
'meaningful_attr': self._extract_meaningful_attribute(".".join(path), parent_attr)
|
||||||
|
}
|
||||||
all_sources = [new_source]
|
all_sources = [new_source]
|
||||||
for node_id, paths in existing_nodes_with_paths.items():
|
|
||||||
for p_str in paths:
|
for node_id, path_entries in existing_nodes_with_paths.items():
|
||||||
all_sources.append({'node_id': node_id, 'path': p_str})
|
for entry in path_entries:
|
||||||
|
if isinstance(entry, dict):
|
||||||
|
all_sources.append({
|
||||||
|
'node_id': node_id,
|
||||||
|
'path': entry['path'],
|
||||||
|
'parent_attr': entry.get('parent_attr', ''),
|
||||||
|
'meaningful_attr': entry.get('meaningful_attr', self._extract_meaningful_attribute(entry['path'], entry.get('parent_attr', '')))
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
# Handle legacy string-only entries
|
||||||
|
all_sources.append({
|
||||||
|
'node_id': node_id,
|
||||||
|
'path': str(entry),
|
||||||
|
'parent_attr': '',
|
||||||
|
'meaningful_attr': self._extract_meaningful_attribute(str(entry))
|
||||||
|
})
|
||||||
|
|
||||||
all_correlations.append({
|
all_correlations.append({
|
||||||
'value': value,
|
'value': value,
|
||||||
@@ -163,8 +225,7 @@ class GraphManager:
|
|||||||
# Skip creating correlation node - would be redundant
|
# Skip creating correlation node - would be redundant
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# STEP 2: Filter out node pairs that already have direct edges
|
eligible_nodes = set(corr['nodes'])
|
||||||
eligible_nodes = self._filter_nodes_without_direct_edges(set(corr['nodes']))
|
|
||||||
|
|
||||||
if len(eligible_nodes) < 2:
|
if len(eligible_nodes) < 2:
|
||||||
# Need at least 2 nodes to create a correlation
|
# Need at least 2 nodes to create a correlation
|
||||||
@@ -184,11 +245,12 @@ class GraphManager:
|
|||||||
metadata={'values': [value], 'sources': corr['sources'],
|
metadata={'values': [value], 'sources': corr['sources'],
|
||||||
'correlated_nodes': list(eligible_nodes)})
|
'correlated_nodes': list(eligible_nodes)})
|
||||||
|
|
||||||
# Create edges from eligible nodes to this correlation node
|
# Create edges from eligible nodes to this correlation node with better labeling
|
||||||
for c_node_id in eligible_nodes:
|
for c_node_id in eligible_nodes:
|
||||||
if self.graph.has_node(c_node_id):
|
if self.graph.has_node(c_node_id):
|
||||||
attribute = corr['sources'][0]['path'].split('.')[-1]
|
# Find the best attribute name for this node
|
||||||
relationship_type = f"c_{attribute}"
|
meaningful_attr = self._find_best_attribute_name_for_node(c_node_id, corr['sources'])
|
||||||
|
relationship_type = f"c_{meaningful_attr}"
|
||||||
self.add_edge(c_node_id, correlation_node_id, relationship_type, confidence_score=0.9)
|
self.add_edge(c_node_id, correlation_node_id, relationship_type, confidence_score=0.9)
|
||||||
|
|
||||||
self._update_correlation_index(node_id, attributes)
|
self._update_correlation_index(node_id, attributes)
|
||||||
@@ -196,27 +258,34 @@ class GraphManager:
|
|||||||
self.last_modified = datetime.now(timezone.utc).isoformat()
|
self.last_modified = datetime.now(timezone.utc).isoformat()
|
||||||
return is_new_node
|
return is_new_node
|
||||||
|
|
||||||
def _filter_nodes_without_direct_edges(self, node_set: set) -> set:
|
def _find_best_attribute_name_for_node(self, node_id: str, sources: List[Dict]) -> str:
|
||||||
"""
|
"""Find the best attribute name for a correlation edge by looking at the sources."""
|
||||||
Filter out nodes that already have direct edges between them.
|
node_sources = [s for s in sources if s['node_id'] == node_id]
|
||||||
Returns set of nodes that should be included in correlation.
|
|
||||||
"""
|
|
||||||
nodes_list = list(node_set)
|
|
||||||
eligible_nodes = set(node_set) # Start with all nodes
|
|
||||||
|
|
||||||
# Check all pairs of nodes
|
if not node_sources:
|
||||||
for i in range(len(nodes_list)):
|
return "correlation"
|
||||||
for j in range(i + 1, len(nodes_list)):
|
|
||||||
node_a = nodes_list[i]
|
|
||||||
node_b = nodes_list[j]
|
|
||||||
|
|
||||||
# Check if direct edge exists in either direction
|
|
||||||
if self._has_direct_edge_bidirectional(node_a, node_b):
|
|
||||||
# Remove both nodes from eligible set since they're already connected
|
|
||||||
eligible_nodes.discard(node_a)
|
|
||||||
eligible_nodes.discard(node_b)
|
|
||||||
|
|
||||||
return eligible_nodes
|
# Use the meaningful_attr if available
|
||||||
|
for source in node_sources:
|
||||||
|
meaningful_attr = source.get('meaningful_attr')
|
||||||
|
if meaningful_attr and meaningful_attr != "unknown":
|
||||||
|
return meaningful_attr
|
||||||
|
|
||||||
|
# Fallback to parent_attr
|
||||||
|
for source in node_sources:
|
||||||
|
parent_attr = source.get('parent_attr')
|
||||||
|
if parent_attr:
|
||||||
|
return parent_attr
|
||||||
|
|
||||||
|
# Last resort - extract from path
|
||||||
|
for source in node_sources:
|
||||||
|
path = source.get('path', '')
|
||||||
|
if path:
|
||||||
|
extracted = self._extract_meaningful_attribute(path)
|
||||||
|
if extracted != "unknown":
|
||||||
|
return extracted
|
||||||
|
|
||||||
|
return "correlation"
|
||||||
|
|
||||||
def _has_direct_edge_bidirectional(self, node_a: str, node_b: str) -> bool:
|
def _has_direct_edge_bidirectional(self, node_a: str, node_b: str) -> bool:
|
||||||
"""
|
"""
|
||||||
@@ -290,7 +359,7 @@ class GraphManager:
|
|||||||
# Create set of unique sources based on (node_id, path) tuples
|
# Create set of unique sources based on (node_id, path) tuples
|
||||||
source_set = set()
|
source_set = set()
|
||||||
for source in existing_sources + new_sources:
|
for source in existing_sources + new_sources:
|
||||||
source_tuple = (source['node_id'], source['path'])
|
source_tuple = (source['node_id'], source.get('path', ''))
|
||||||
source_set.add(source_tuple)
|
source_set.add(source_tuple)
|
||||||
|
|
||||||
# Convert back to list of dictionaries
|
# Convert back to list of dictionaries
|
||||||
@@ -421,10 +490,14 @@ class GraphManager:
|
|||||||
def _get_confidence_distribution(self) -> Dict[str, int]:
|
def _get_confidence_distribution(self) -> Dict[str, int]:
|
||||||
"""Get distribution of edge confidence scores."""
|
"""Get distribution of edge confidence scores."""
|
||||||
distribution = {'high': 0, 'medium': 0, 'low': 0}
|
distribution = {'high': 0, 'medium': 0, 'low': 0}
|
||||||
for _, _, confidence in self.graph.edges(data='confidence_score', default=0):
|
for _, _, data in self.graph.edges(data=True):
|
||||||
if confidence >= 0.8: distribution['high'] += 1
|
confidence = data.get('confidence_score', 0)
|
||||||
elif confidence >= 0.6: distribution['medium'] += 1
|
if confidence >= 0.8:
|
||||||
else: distribution['low'] += 1
|
distribution['high'] += 1
|
||||||
|
elif confidence >= 0.6:
|
||||||
|
distribution['medium'] += 1
|
||||||
|
else:
|
||||||
|
distribution['low'] += 1
|
||||||
return distribution
|
return distribution
|
||||||
|
|
||||||
def get_statistics(self) -> Dict[str, Any]:
|
def get_statistics(self) -> Dict[str, Any]:
|
||||||
@@ -439,9 +512,10 @@ class GraphManager:
|
|||||||
# Calculate distributions
|
# Calculate distributions
|
||||||
for node_type in NodeType:
|
for node_type in NodeType:
|
||||||
stats['node_type_distribution'][node_type.value] = self.get_nodes_by_type(node_type).__len__()
|
stats['node_type_distribution'][node_type.value] = self.get_nodes_by_type(node_type).__len__()
|
||||||
for _, _, rel_type in self.graph.edges(data='relationship_type', default='unknown'):
|
for _, _, data in self.graph.edges(data=True):
|
||||||
|
rel_type = data.get('relationship_type', 'unknown')
|
||||||
stats['relationship_type_distribution'][rel_type] = stats['relationship_type_distribution'].get(rel_type, 0) + 1
|
stats['relationship_type_distribution'][rel_type] = stats['relationship_type_distribution'].get(rel_type, 0) + 1
|
||||||
for _, _, provider in self.graph.edges(data='source_provider', default='unknown'):
|
provider = data.get('source_provider', 'unknown')
|
||||||
stats['provider_distribution'][provider] = stats['provider_distribution'].get(provider, 0) + 1
|
stats['provider_distribution'][provider] = stats['provider_distribution'].get(provider, 0) + 1
|
||||||
return stats
|
return stats
|
||||||
|
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ class ForensicLogger:
|
|||||||
Maintains detailed audit trail of all reconnaissance activities.
|
Maintains detailed audit trail of all reconnaissance activities.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, session_id: str = None):
|
def __init__(self, session_id: str = ""):
|
||||||
"""
|
"""
|
||||||
Initialize forensic logger.
|
Initialize forensic logger.
|
||||||
|
|
||||||
@@ -203,8 +203,6 @@ class ForensicLogger:
|
|||||||
self.session_metadata['target_domains'] = list(self.session_metadata['target_domains'])
|
self.session_metadata['target_domains'] = list(self.session_metadata['target_domains'])
|
||||||
|
|
||||||
self.logger.info(f"Scan Complete - Session: {self.session_id}")
|
self.logger.info(f"Scan Complete - Session: {self.session_id}")
|
||||||
self.logger.info(f"Total API Requests: {self.session_metadata['total_requests']}")
|
|
||||||
self.logger.info(f"Total Relationships: {self.session_metadata['total_relationships']}")
|
|
||||||
|
|
||||||
def export_audit_trail(self) -> Dict[str, Any]:
|
def export_audit_trail(self) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
|
|||||||
1105
core/scanner.py
1105
core/scanner.py
File diff suppressed because it is too large
Load Diff
@@ -1,372 +1,20 @@
|
|||||||
"""
|
"""
|
||||||
Enhanced per-session configuration management for DNSRecon.
|
Per-session configuration management for DNSRecon.
|
||||||
Provides isolated configuration instances for each user session while supporting global caching.
|
Provides isolated configuration instances for each user session.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import os
|
from config import Config
|
||||||
from typing import Dict, Optional
|
|
||||||
|
|
||||||
|
class SessionConfig(Config):
|
||||||
class SessionConfig:
|
|
||||||
"""
|
"""
|
||||||
Enhanced session-specific configuration that inherits from global config
|
Session-specific configuration that inherits from global config
|
||||||
but maintains isolated API keys and provider settings while supporting global caching.
|
but maintains isolated API keys and provider settings.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
"""Initialize enhanced session config with global cache support."""
|
"""Initialize session config with global defaults."""
|
||||||
# Copy all attributes from global config
|
super().__init__()
|
||||||
self.api_keys: Dict[str, Optional[str]] = {
|
|
||||||
'shodan': None
|
|
||||||
}
|
|
||||||
|
|
||||||
# Default settings (copied from global config)
|
|
||||||
self.default_recursion_depth = 2
|
|
||||||
self.default_timeout = 30
|
|
||||||
self.max_concurrent_requests = 5
|
|
||||||
self.large_entity_threshold = 100
|
|
||||||
|
|
||||||
# Enhanced rate limiting settings (per session)
|
|
||||||
self.rate_limits = {
|
|
||||||
'crtsh': 60,
|
|
||||||
'shodan': 60,
|
|
||||||
'dns': 100
|
|
||||||
}
|
|
||||||
|
|
||||||
# Enhanced provider settings (per session)
|
|
||||||
self.enabled_providers = {
|
|
||||||
'crtsh': True,
|
|
||||||
'dns': True,
|
|
||||||
'shodan': False
|
|
||||||
}
|
|
||||||
|
|
||||||
# Task-based execution settings
|
|
||||||
self.task_retry_settings = {
|
|
||||||
'max_retries': 3,
|
|
||||||
'base_backoff_seconds': 1.0,
|
|
||||||
'max_backoff_seconds': 60.0,
|
|
||||||
'retry_on_rate_limit': True,
|
|
||||||
'retry_on_connection_error': True,
|
|
||||||
'retry_on_timeout': True
|
|
||||||
}
|
|
||||||
|
|
||||||
# Cache settings (global across all sessions)
|
|
||||||
self.cache_settings = {
|
|
||||||
'enabled': True,
|
|
||||||
'expiry_hours': 12,
|
|
||||||
'cache_base_dir': '.cache',
|
|
||||||
'per_provider_directories': True,
|
|
||||||
'thread_safe_operations': True
|
|
||||||
}
|
|
||||||
|
|
||||||
# Logging configuration
|
|
||||||
self.log_level = 'INFO'
|
|
||||||
self.log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
|
||||||
|
|
||||||
# Flask configuration (shared)
|
|
||||||
self.flask_host = '127.0.0.1'
|
|
||||||
self.flask_port = 5000
|
|
||||||
self.flask_debug = True
|
|
||||||
|
|
||||||
# Session isolation settings
|
|
||||||
self.session_isolation = {
|
|
||||||
'enforce_single_session_per_user': True,
|
|
||||||
'consolidate_session_data_on_replacement': True,
|
|
||||||
'user_fingerprinting_enabled': True,
|
|
||||||
'session_timeout_minutes': 60
|
|
||||||
}
|
|
||||||
|
|
||||||
# Circuit breaker settings for provider reliability
|
|
||||||
self.circuit_breaker = {
|
|
||||||
'enabled': True,
|
|
||||||
'failure_threshold': 5, # Failures before opening circuit
|
|
||||||
'recovery_timeout_seconds': 300, # 5 minutes before trying again
|
|
||||||
'half_open_max_calls': 3 # Test calls when recovering
|
|
||||||
}
|
|
||||||
|
|
||||||
def set_api_key(self, provider: str, api_key: str) -> bool:
|
|
||||||
"""
|
|
||||||
Set API key for a provider in this session.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
provider: Provider name (shodan, etc)
|
|
||||||
api_key: API key string (empty string to clear)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
bool: True if key was set successfully
|
|
||||||
"""
|
|
||||||
if provider in self.api_keys:
|
|
||||||
# Handle clearing of API keys
|
|
||||||
if api_key and api_key.strip():
|
|
||||||
self.api_keys[provider] = api_key.strip()
|
|
||||||
self.enabled_providers[provider] = True
|
|
||||||
else:
|
|
||||||
self.api_keys[provider] = None
|
|
||||||
self.enabled_providers[provider] = False
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def get_api_key(self, provider: str) -> Optional[str]:
|
|
||||||
"""
|
|
||||||
Get API key for a provider in this session.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
provider: Provider name
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
API key or None if not set
|
|
||||||
"""
|
|
||||||
return self.api_keys.get(provider)
|
|
||||||
|
|
||||||
def is_provider_enabled(self, provider: str) -> bool:
|
|
||||||
"""
|
|
||||||
Check if a provider is enabled in this session.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
provider: Provider name
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
bool: True if provider is enabled
|
|
||||||
"""
|
|
||||||
return self.enabled_providers.get(provider, False)
|
|
||||||
|
|
||||||
def get_rate_limit(self, provider: str) -> int:
|
|
||||||
"""
|
|
||||||
Get rate limit for a provider in this session.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
provider: Provider name
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Rate limit in requests per minute
|
|
||||||
"""
|
|
||||||
return self.rate_limits.get(provider, 60)
|
|
||||||
|
|
||||||
def get_task_retry_config(self) -> Dict[str, any]:
|
|
||||||
"""
|
|
||||||
Get task retry configuration for this session.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dictionary with retry settings
|
|
||||||
"""
|
|
||||||
return self.task_retry_settings.copy()
|
|
||||||
|
|
||||||
def get_cache_config(self) -> Dict[str, any]:
|
|
||||||
"""
|
|
||||||
Get cache configuration (global settings).
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dictionary with cache settings
|
|
||||||
"""
|
|
||||||
return self.cache_settings.copy()
|
|
||||||
|
|
||||||
def is_circuit_breaker_enabled(self) -> bool:
|
|
||||||
"""Check if circuit breaker is enabled for provider reliability."""
|
|
||||||
return self.circuit_breaker.get('enabled', True)
|
|
||||||
|
|
||||||
def get_circuit_breaker_config(self) -> Dict[str, any]:
|
|
||||||
"""Get circuit breaker configuration."""
|
|
||||||
return self.circuit_breaker.copy()
|
|
||||||
|
|
||||||
def update_provider_settings(self, provider_updates: Dict[str, Dict[str, any]]) -> bool:
|
|
||||||
"""
|
|
||||||
Update provider-specific settings in bulk.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
provider_updates: Dictionary of provider -> settings updates
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
bool: True if updates were applied successfully
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
for provider_name, updates in provider_updates.items():
|
|
||||||
# Update rate limits
|
|
||||||
if 'rate_limit' in updates:
|
|
||||||
self.rate_limits[provider_name] = updates['rate_limit']
|
|
||||||
|
|
||||||
# Update enabled status
|
|
||||||
if 'enabled' in updates:
|
|
||||||
self.enabled_providers[provider_name] = updates['enabled']
|
|
||||||
|
|
||||||
# Update API key
|
|
||||||
if 'api_key' in updates:
|
|
||||||
self.set_api_key(provider_name, updates['api_key'])
|
|
||||||
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error updating provider settings: {e}")
|
|
||||||
return False
|
|
||||||
|
|
||||||
def validate_configuration(self) -> Dict[str, any]:
|
|
||||||
"""
|
|
||||||
Validate the current configuration and return validation results.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dictionary with validation results and any issues found
|
|
||||||
"""
|
|
||||||
validation_result = {
|
|
||||||
'valid': True,
|
|
||||||
'warnings': [],
|
|
||||||
'errors': [],
|
|
||||||
'provider_status': {}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Validate provider configurations
|
|
||||||
for provider_name, enabled in self.enabled_providers.items():
|
|
||||||
provider_status = {
|
|
||||||
'enabled': enabled,
|
|
||||||
'has_api_key': bool(self.api_keys.get(provider_name)),
|
|
||||||
'rate_limit': self.rate_limits.get(provider_name, 60)
|
|
||||||
}
|
|
||||||
|
|
||||||
# Check for potential issues
|
|
||||||
if enabled and provider_name in ['shodan'] and not provider_status['has_api_key']:
|
|
||||||
validation_result['warnings'].append(
|
|
||||||
f"Provider '{provider_name}' is enabled but missing API key"
|
|
||||||
)
|
|
||||||
|
|
||||||
validation_result['provider_status'][provider_name] = provider_status
|
|
||||||
|
|
||||||
# Validate task settings
|
|
||||||
if self.task_retry_settings['max_retries'] > 10:
|
|
||||||
validation_result['warnings'].append(
|
|
||||||
f"High retry count ({self.task_retry_settings['max_retries']}) may cause long delays"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Validate concurrent settings
|
|
||||||
if self.max_concurrent_requests > 10:
|
|
||||||
validation_result['warnings'].append(
|
|
||||||
f"High concurrency ({self.max_concurrent_requests}) may overwhelm providers"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Validate cache settings
|
|
||||||
if not os.path.exists(self.cache_settings['cache_base_dir']):
|
|
||||||
try:
|
|
||||||
os.makedirs(self.cache_settings['cache_base_dir'], exist_ok=True)
|
|
||||||
except Exception as e:
|
|
||||||
validation_result['errors'].append(f"Cannot create cache directory: {e}")
|
|
||||||
validation_result['valid'] = False
|
|
||||||
|
|
||||||
return validation_result
|
|
||||||
|
|
||||||
def load_from_env(self):
|
|
||||||
"""Load configuration from environment variables with enhanced validation."""
|
|
||||||
# Load API keys from environment
|
|
||||||
if os.getenv('SHODAN_API_KEY') and not self.api_keys['shodan']:
|
|
||||||
self.set_api_key('shodan', os.getenv('SHODAN_API_KEY'))
|
|
||||||
print("Loaded Shodan API key from environment")
|
|
||||||
|
|
||||||
# Override default settings from environment
|
|
||||||
self.default_recursion_depth = int(os.getenv('DEFAULT_RECURSION_DEPTH', '2'))
|
|
||||||
self.default_timeout = int(os.getenv('DEFAULT_TIMEOUT', '30'))
|
|
||||||
self.max_concurrent_requests = int(os.getenv('MAX_CONCURRENT_REQUESTS', '5'))
|
|
||||||
|
|
||||||
# Load task retry settings from environment
|
|
||||||
if os.getenv('TASK_MAX_RETRIES'):
|
|
||||||
self.task_retry_settings['max_retries'] = int(os.getenv('TASK_MAX_RETRIES'))
|
|
||||||
|
|
||||||
if os.getenv('TASK_BASE_BACKOFF'):
|
|
||||||
self.task_retry_settings['base_backoff_seconds'] = float(os.getenv('TASK_BASE_BACKOFF'))
|
|
||||||
|
|
||||||
# Load cache settings from environment
|
|
||||||
if os.getenv('CACHE_EXPIRY_HOURS'):
|
|
||||||
self.cache_settings['expiry_hours'] = int(os.getenv('CACHE_EXPIRY_HOURS'))
|
|
||||||
|
|
||||||
if os.getenv('CACHE_DISABLED'):
|
|
||||||
self.cache_settings['enabled'] = os.getenv('CACHE_DISABLED').lower() != 'true'
|
|
||||||
|
|
||||||
# Load circuit breaker settings
|
|
||||||
if os.getenv('CIRCUIT_BREAKER_DISABLED'):
|
|
||||||
self.circuit_breaker['enabled'] = os.getenv('CIRCUIT_BREAKER_DISABLED').lower() != 'true'
|
|
||||||
|
|
||||||
# Flask settings
|
|
||||||
self.flask_debug = os.getenv('FLASK_DEBUG', 'True').lower() == 'true'
|
|
||||||
|
|
||||||
print("Enhanced configuration loaded from environment")
|
|
||||||
|
|
||||||
def export_config_summary(self) -> Dict[str, any]:
|
|
||||||
"""
|
|
||||||
Export a summary of the current configuration for debugging/logging.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dictionary with configuration summary (API keys redacted)
|
|
||||||
"""
|
|
||||||
return {
|
|
||||||
'providers': {
|
|
||||||
provider: {
|
|
||||||
'enabled': self.enabled_providers.get(provider, False),
|
|
||||||
'has_api_key': bool(self.api_keys.get(provider)),
|
|
||||||
'rate_limit': self.rate_limits.get(provider, 60)
|
|
||||||
}
|
|
||||||
for provider in self.enabled_providers.keys()
|
|
||||||
},
|
|
||||||
'task_settings': {
|
|
||||||
'max_retries': self.task_retry_settings['max_retries'],
|
|
||||||
'max_concurrent_requests': self.max_concurrent_requests,
|
|
||||||
'large_entity_threshold': self.large_entity_threshold
|
|
||||||
},
|
|
||||||
'cache_settings': {
|
|
||||||
'enabled': self.cache_settings['enabled'],
|
|
||||||
'expiry_hours': self.cache_settings['expiry_hours'],
|
|
||||||
'base_directory': self.cache_settings['cache_base_dir']
|
|
||||||
},
|
|
||||||
'session_settings': {
|
|
||||||
'isolation_enabled': self.session_isolation['enforce_single_session_per_user'],
|
|
||||||
'consolidation_enabled': self.session_isolation['consolidate_session_data_on_replacement'],
|
|
||||||
'timeout_minutes': self.session_isolation['session_timeout_minutes']
|
|
||||||
},
|
|
||||||
'circuit_breaker': {
|
|
||||||
'enabled': self.circuit_breaker['enabled'],
|
|
||||||
'failure_threshold': self.circuit_breaker['failure_threshold'],
|
|
||||||
'recovery_timeout': self.circuit_breaker['recovery_timeout_seconds']
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
def create_session_config() -> 'SessionConfig':
|
||||||
def create_session_config() -> SessionConfig:
|
"""Create a new session configuration instance."""
|
||||||
"""
|
return SessionConfig()
|
||||||
Create a new enhanced session configuration instance.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Configured SessionConfig instance
|
|
||||||
"""
|
|
||||||
session_config = SessionConfig()
|
|
||||||
session_config.load_from_env()
|
|
||||||
|
|
||||||
# Validate configuration and log any issues
|
|
||||||
validation = session_config.validate_configuration()
|
|
||||||
if validation['warnings']:
|
|
||||||
print("Configuration warnings:")
|
|
||||||
for warning in validation['warnings']:
|
|
||||||
print(f" WARNING: {warning}")
|
|
||||||
|
|
||||||
if validation['errors']:
|
|
||||||
print("Configuration errors:")
|
|
||||||
for error in validation['errors']:
|
|
||||||
print(f" ERROR: {error}")
|
|
||||||
|
|
||||||
if not validation['valid']:
|
|
||||||
raise ValueError("Configuration validation failed - see errors above")
|
|
||||||
|
|
||||||
print(f"Enhanced session configuration created successfully")
|
|
||||||
return session_config
|
|
||||||
|
|
||||||
|
|
||||||
def create_test_config() -> SessionConfig:
|
|
||||||
"""
|
|
||||||
Create a test configuration with safe defaults for testing.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Test-safe SessionConfig instance
|
|
||||||
"""
|
|
||||||
test_config = SessionConfig()
|
|
||||||
|
|
||||||
# Override settings for testing
|
|
||||||
test_config.max_concurrent_requests = 2
|
|
||||||
test_config.task_retry_settings['max_retries'] = 1
|
|
||||||
test_config.task_retry_settings['base_backoff_seconds'] = 0.1
|
|
||||||
test_config.cache_settings['expiry_hours'] = 1
|
|
||||||
test_config.session_isolation['session_timeout_minutes'] = 10
|
|
||||||
|
|
||||||
print("Test configuration created")
|
|
||||||
return test_config
|
|
||||||
@@ -5,153 +5,41 @@ import time
|
|||||||
import uuid
|
import uuid
|
||||||
import redis
|
import redis
|
||||||
import pickle
|
import pickle
|
||||||
import hashlib
|
from typing import Dict, Optional, Any, List
|
||||||
from typing import Dict, Optional, Any, List, Tuple
|
|
||||||
|
|
||||||
from core.scanner import Scanner
|
from core.scanner import Scanner
|
||||||
|
from config import config
|
||||||
|
|
||||||
|
# WARNING: Using pickle can be a security risk if the data source is not trusted.
|
||||||
class UserIdentifier:
|
# In this case, we are only serializing/deserializing our own trusted Scanner objects,
|
||||||
"""Handles user identification for session management."""
|
# which is generally safe. Do not unpickle data from untrusted sources.
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def generate_user_fingerprint(client_ip: str, user_agent: str) -> str:
|
|
||||||
"""
|
|
||||||
Generate a unique fingerprint for a user based on IP and User-Agent.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
client_ip: Client IP address
|
|
||||||
user_agent: User-Agent header value
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Unique user fingerprint hash
|
|
||||||
"""
|
|
||||||
# Create deterministic user identifier
|
|
||||||
user_data = f"{client_ip}:{user_agent[:100]}" # Limit UA to 100 chars
|
|
||||||
fingerprint = hashlib.sha256(user_data.encode()).hexdigest()[:16] # 16 char fingerprint
|
|
||||||
return f"user_{fingerprint}"
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def extract_request_info(request) -> Tuple[str, str]:
|
|
||||||
"""
|
|
||||||
Extract client IP and User-Agent from Flask request.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
request: Flask request object
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tuple of (client_ip, user_agent)
|
|
||||||
"""
|
|
||||||
# Handle proxy headers for real IP
|
|
||||||
client_ip = request.headers.get('X-Forwarded-For', '').split(',')[0].strip()
|
|
||||||
if not client_ip:
|
|
||||||
client_ip = request.headers.get('X-Real-IP', '')
|
|
||||||
if not client_ip:
|
|
||||||
client_ip = request.remote_addr or 'unknown'
|
|
||||||
|
|
||||||
user_agent = request.headers.get('User-Agent', 'unknown')
|
|
||||||
|
|
||||||
return client_ip, user_agent
|
|
||||||
|
|
||||||
|
|
||||||
class SessionConsolidator:
|
|
||||||
"""Handles consolidation of session data when replacing sessions."""
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def consolidate_scanner_data(old_scanner: 'Scanner', new_scanner: 'Scanner') -> 'Scanner':
|
|
||||||
"""
|
|
||||||
Consolidate useful data from old scanner into new scanner.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
old_scanner: Scanner from terminated session
|
|
||||||
new_scanner: New scanner instance
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Enhanced new scanner with consolidated data
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
# Consolidate graph data if old scanner has valuable data
|
|
||||||
if old_scanner and hasattr(old_scanner, 'graph') and old_scanner.graph:
|
|
||||||
old_stats = old_scanner.graph.get_statistics()
|
|
||||||
if old_stats['basic_metrics']['total_nodes'] > 0:
|
|
||||||
print(f"Consolidating graph data: {old_stats['basic_metrics']['total_nodes']} nodes, {old_stats['basic_metrics']['total_edges']} edges")
|
|
||||||
|
|
||||||
# Transfer nodes and edges to new scanner's graph
|
|
||||||
for node_id, node_data in old_scanner.graph.graph.nodes(data=True):
|
|
||||||
# Add node to new graph with all attributes
|
|
||||||
new_scanner.graph.graph.add_node(node_id, **node_data)
|
|
||||||
|
|
||||||
for source, target, edge_data in old_scanner.graph.graph.edges(data=True):
|
|
||||||
# Add edge to new graph with all attributes
|
|
||||||
new_scanner.graph.graph.add_edge(source, target, **edge_data)
|
|
||||||
|
|
||||||
# Update correlation index
|
|
||||||
if hasattr(old_scanner.graph, 'correlation_index'):
|
|
||||||
new_scanner.graph.correlation_index = old_scanner.graph.correlation_index.copy()
|
|
||||||
|
|
||||||
# Update timestamps
|
|
||||||
new_scanner.graph.creation_time = old_scanner.graph.creation_time
|
|
||||||
new_scanner.graph.last_modified = old_scanner.graph.last_modified
|
|
||||||
|
|
||||||
# Consolidate provider statistics
|
|
||||||
if old_scanner and hasattr(old_scanner, 'providers') and old_scanner.providers:
|
|
||||||
for old_provider in old_scanner.providers:
|
|
||||||
# Find matching provider in new scanner
|
|
||||||
matching_new_provider = None
|
|
||||||
for new_provider in new_scanner.providers:
|
|
||||||
if new_provider.get_name() == old_provider.get_name():
|
|
||||||
matching_new_provider = new_provider
|
|
||||||
break
|
|
||||||
|
|
||||||
if matching_new_provider:
|
|
||||||
# Transfer cumulative statistics
|
|
||||||
matching_new_provider.total_requests += old_provider.total_requests
|
|
||||||
matching_new_provider.successful_requests += old_provider.successful_requests
|
|
||||||
matching_new_provider.failed_requests += old_provider.failed_requests
|
|
||||||
matching_new_provider.total_relationships_found += old_provider.total_relationships_found
|
|
||||||
|
|
||||||
# Transfer cache statistics if available
|
|
||||||
if hasattr(old_provider, 'cache_hits'):
|
|
||||||
matching_new_provider.cache_hits += getattr(old_provider, 'cache_hits', 0)
|
|
||||||
matching_new_provider.cache_misses += getattr(old_provider, 'cache_misses', 0)
|
|
||||||
|
|
||||||
print(f"Consolidated {old_provider.get_name()} provider stats: {old_provider.total_requests} requests")
|
|
||||||
|
|
||||||
return new_scanner
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Warning: Error during session consolidation: {e}")
|
|
||||||
return new_scanner
|
|
||||||
|
|
||||||
|
|
||||||
class SessionManager:
|
class SessionManager:
|
||||||
"""
|
"""
|
||||||
Manages single scanner session per user using Redis with user identification.
|
Manages multiple scanner instances for concurrent user sessions using Redis.
|
||||||
Enforces one active session per user for consistent state management.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, session_timeout_minutes: int = 60):
|
def __init__(self, session_timeout_minutes: int = 0):
|
||||||
"""
|
"""
|
||||||
Initialize session manager with Redis backend and user tracking.
|
Initialize session manager with a Redis backend.
|
||||||
"""
|
"""
|
||||||
|
if session_timeout_minutes is None:
|
||||||
|
session_timeout_minutes = config.session_timeout_minutes
|
||||||
|
|
||||||
self.redis_client = redis.StrictRedis(db=0, decode_responses=False)
|
self.redis_client = redis.StrictRedis(db=0, decode_responses=False)
|
||||||
self.session_timeout = session_timeout_minutes * 60 # Convert to seconds
|
self.session_timeout = session_timeout_minutes * 60 # Convert to seconds
|
||||||
self.lock = threading.Lock()
|
self.lock = threading.Lock() # Lock for local operations, Redis handles atomic ops
|
||||||
|
|
||||||
# User identification helper
|
|
||||||
self.user_identifier = UserIdentifier()
|
|
||||||
self.consolidator = SessionConsolidator()
|
|
||||||
|
|
||||||
# Start cleanup thread
|
# Start cleanup thread
|
||||||
self.cleanup_thread = threading.Thread(target=self._cleanup_loop, daemon=True)
|
self.cleanup_thread = threading.Thread(target=self._cleanup_loop, daemon=True)
|
||||||
self.cleanup_thread.start()
|
self.cleanup_thread.start()
|
||||||
|
|
||||||
print(f"SessionManager initialized with Redis backend, user tracking, and {session_timeout_minutes}min timeout")
|
print(f"SessionManager initialized with Redis backend and {session_timeout_minutes}min timeout")
|
||||||
|
|
||||||
def __getstate__(self):
|
def __getstate__(self):
|
||||||
"""Prepare SessionManager for pickling."""
|
"""Prepare SessionManager for pickling."""
|
||||||
state = self.__dict__.copy()
|
state = self.__dict__.copy()
|
||||||
# Exclude unpickleable attributes
|
# Exclude unpickleable attributes - Redis client and threading objects
|
||||||
unpicklable_attrs = ['lock', 'cleanup_thread', 'redis_client']
|
unpicklable_attrs = ['lock', 'cleanup_thread', 'redis_client']
|
||||||
for attr in unpicklable_attrs:
|
for attr in unpicklable_attrs:
|
||||||
if attr in state:
|
if attr in state:
|
||||||
@@ -169,108 +57,67 @@ class SessionManager:
|
|||||||
self.cleanup_thread.start()
|
self.cleanup_thread.start()
|
||||||
|
|
||||||
def _get_session_key(self, session_id: str) -> str:
|
def _get_session_key(self, session_id: str) -> str:
|
||||||
"""Generate Redis key for a session."""
|
"""Generates the Redis key for a session."""
|
||||||
return f"dnsrecon:session:{session_id}"
|
return f"dnsrecon:session:{session_id}"
|
||||||
|
|
||||||
def _get_user_session_key(self, user_fingerprint: str) -> str:
|
|
||||||
"""Generate Redis key for user -> session mapping."""
|
|
||||||
return f"dnsrecon:user:{user_fingerprint}"
|
|
||||||
|
|
||||||
def _get_stop_signal_key(self, session_id: str) -> str:
|
def _get_stop_signal_key(self, session_id: str) -> str:
|
||||||
"""Generate Redis key for session stop signal."""
|
"""Generates the Redis key for a session's stop signal."""
|
||||||
return f"dnsrecon:stop:{session_id}"
|
return f"dnsrecon:stop:{session_id}"
|
||||||
|
|
||||||
def create_or_replace_user_session(self, client_ip: str, user_agent: str) -> str:
|
def create_session(self) -> str:
|
||||||
"""
|
"""
|
||||||
Create new session for user, replacing any existing session.
|
Create a new user session and store it in Redis.
|
||||||
Consolidates data from previous session if it exists.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
client_ip: Client IP address
|
|
||||||
user_agent: User-Agent header
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
New session ID
|
|
||||||
"""
|
"""
|
||||||
user_fingerprint = self.user_identifier.generate_user_fingerprint(client_ip, user_agent)
|
session_id = str(uuid.uuid4())
|
||||||
new_session_id = str(uuid.uuid4())
|
print(f"=== CREATING SESSION {session_id} IN REDIS ===")
|
||||||
|
|
||||||
print(f"=== CREATING/REPLACING SESSION FOR USER {user_fingerprint} ===")
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Check for existing user session
|
|
||||||
existing_session_id = self._get_user_current_session(user_fingerprint)
|
|
||||||
old_scanner = None
|
|
||||||
|
|
||||||
if existing_session_id:
|
|
||||||
print(f"Found existing session {existing_session_id} for user {user_fingerprint}")
|
|
||||||
# Get old scanner data for consolidation
|
|
||||||
old_scanner = self.get_session(existing_session_id)
|
|
||||||
# Terminate old session
|
|
||||||
self._terminate_session_internal(existing_session_id, cleanup_user_mapping=False)
|
|
||||||
print(f"Terminated old session {existing_session_id}")
|
|
||||||
|
|
||||||
# Create new session config and scanner
|
|
||||||
from core.session_config import create_session_config
|
from core.session_config import create_session_config
|
||||||
session_config = create_session_config()
|
session_config = create_session_config()
|
||||||
new_scanner = Scanner(session_config=session_config)
|
scanner_instance = Scanner(session_config=session_config)
|
||||||
|
|
||||||
# Set session ID on scanner for cross-process operations
|
# Set the session ID on the scanner for cross-process stop signal management
|
||||||
new_scanner.session_id = new_session_id
|
scanner_instance.session_id = session_id
|
||||||
|
|
||||||
# Consolidate data from old session if available
|
|
||||||
if old_scanner:
|
|
||||||
new_scanner = self.consolidator.consolidate_scanner_data(old_scanner, new_scanner)
|
|
||||||
print(f"Consolidated data from previous session")
|
|
||||||
|
|
||||||
# Create session data
|
|
||||||
session_data = {
|
session_data = {
|
||||||
'scanner': new_scanner,
|
'scanner': scanner_instance,
|
||||||
'config': session_config,
|
'config': session_config,
|
||||||
'created_at': time.time(),
|
'created_at': time.time(),
|
||||||
'last_activity': time.time(),
|
'last_activity': time.time(),
|
||||||
'status': 'active',
|
'status': 'active'
|
||||||
'user_fingerprint': user_fingerprint,
|
|
||||||
'client_ip': client_ip,
|
|
||||||
'user_agent': user_agent[:200] # Truncate for storage
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Store session in Redis
|
# Serialize the entire session data dictionary using pickle
|
||||||
session_key = self._get_session_key(new_session_id)
|
|
||||||
serialized_data = pickle.dumps(session_data)
|
serialized_data = pickle.dumps(session_data)
|
||||||
|
|
||||||
|
# Store in Redis
|
||||||
|
session_key = self._get_session_key(session_id)
|
||||||
self.redis_client.setex(session_key, self.session_timeout, serialized_data)
|
self.redis_client.setex(session_key, self.session_timeout, serialized_data)
|
||||||
|
|
||||||
# Update user -> session mapping
|
# Initialize stop signal as False
|
||||||
user_session_key = self._get_user_session_key(user_fingerprint)
|
stop_key = self._get_stop_signal_key(session_id)
|
||||||
self.redis_client.setex(user_session_key, self.session_timeout, new_session_id.encode('utf-8'))
|
|
||||||
|
|
||||||
# Initialize stop signal
|
|
||||||
stop_key = self._get_stop_signal_key(new_session_id)
|
|
||||||
self.redis_client.setex(stop_key, self.session_timeout, b'0')
|
self.redis_client.setex(stop_key, self.session_timeout, b'0')
|
||||||
|
|
||||||
print(f"Created new session {new_session_id} for user {user_fingerprint}")
|
print(f"Session {session_id} stored in Redis with stop signal initialized")
|
||||||
return new_session_id
|
return session_id
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"ERROR: Failed to create session for user {user_fingerprint}: {e}")
|
print(f"ERROR: Failed to create session {session_id}: {e}")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def _get_user_current_session(self, user_fingerprint: str) -> Optional[str]:
|
|
||||||
"""Get current session ID for a user."""
|
|
||||||
try:
|
|
||||||
user_session_key = self._get_user_session_key(user_fingerprint)
|
|
||||||
session_id_bytes = self.redis_client.get(user_session_key)
|
|
||||||
if session_id_bytes:
|
|
||||||
return session_id_bytes.decode('utf-8')
|
|
||||||
return None
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error getting user session: {e}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
def set_stop_signal(self, session_id: str) -> bool:
|
def set_stop_signal(self, session_id: str) -> bool:
|
||||||
"""Set stop signal for session (cross-process safe)."""
|
"""
|
||||||
|
Set the stop signal for a session (cross-process safe).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
session_id: Session identifier
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if signal was set successfully
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
stop_key = self._get_stop_signal_key(session_id)
|
stop_key = self._get_stop_signal_key(session_id)
|
||||||
|
# Set stop signal to '1' with the same TTL as the session
|
||||||
self.redis_client.setex(stop_key, self.session_timeout, b'1')
|
self.redis_client.setex(stop_key, self.session_timeout, b'1')
|
||||||
print(f"Stop signal set for session {session_id}")
|
print(f"Stop signal set for session {session_id}")
|
||||||
return True
|
return True
|
||||||
@@ -279,7 +126,15 @@ class SessionManager:
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
def is_stop_requested(self, session_id: str) -> bool:
|
def is_stop_requested(self, session_id: str) -> bool:
|
||||||
"""Check if stop is requested for session (cross-process safe)."""
|
"""
|
||||||
|
Check if stop is requested for a session (cross-process safe).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
session_id: Session identifier
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if stop is requested
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
stop_key = self._get_stop_signal_key(session_id)
|
stop_key = self._get_stop_signal_key(session_id)
|
||||||
value = self.redis_client.get(stop_key)
|
value = self.redis_client.get(stop_key)
|
||||||
@@ -289,7 +144,15 @@ class SessionManager:
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
def clear_stop_signal(self, session_id: str) -> bool:
|
def clear_stop_signal(self, session_id: str) -> bool:
|
||||||
"""Clear stop signal for session."""
|
"""
|
||||||
|
Clear the stop signal for a session.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
session_id: Session identifier
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if signal was cleared successfully
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
stop_key = self._get_stop_signal_key(session_id)
|
stop_key = self._get_stop_signal_key(session_id)
|
||||||
self.redis_client.setex(stop_key, self.session_timeout, b'0')
|
self.redis_client.setex(stop_key, self.session_timeout, b'0')
|
||||||
@@ -300,13 +163,13 @@ class SessionManager:
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
def _get_session_data(self, session_id: str) -> Optional[Dict[str, Any]]:
|
def _get_session_data(self, session_id: str) -> Optional[Dict[str, Any]]:
|
||||||
"""Retrieve and deserialize session data from Redis."""
|
"""Retrieves and deserializes session data from Redis."""
|
||||||
try:
|
try:
|
||||||
session_key = self._get_session_key(session_id)
|
session_key = self._get_session_key(session_id)
|
||||||
serialized_data = self.redis_client.get(session_key)
|
serialized_data = self.redis_client.get(session_key)
|
||||||
if serialized_data:
|
if serialized_data:
|
||||||
session_data = pickle.loads(serialized_data)
|
session_data = pickle.loads(serialized_data)
|
||||||
# Ensure scanner has correct session ID
|
# Ensure the scanner has the correct session ID for stop signal checking
|
||||||
if 'scanner' in session_data and session_data['scanner']:
|
if 'scanner' in session_data and session_data['scanner']:
|
||||||
session_data['scanner'].session_id = session_id
|
session_data['scanner'].session_id = session_id
|
||||||
return session_data
|
return session_data
|
||||||
@@ -316,32 +179,37 @@ class SessionManager:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
def _save_session_data(self, session_id: str, session_data: Dict[str, Any]) -> bool:
|
def _save_session_data(self, session_id: str, session_data: Dict[str, Any]) -> bool:
|
||||||
"""Serialize and save session data to Redis with updated TTL."""
|
"""
|
||||||
|
Serializes and saves session data back to Redis with updated TTL.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if save was successful
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
session_key = self._get_session_key(session_id)
|
session_key = self._get_session_key(session_id)
|
||||||
serialized_data = pickle.dumps(session_data)
|
serialized_data = pickle.dumps(session_data)
|
||||||
result = self.redis_client.setex(session_key, self.session_timeout, serialized_data)
|
result = self.redis_client.setex(session_key, self.session_timeout, serialized_data)
|
||||||
|
|
||||||
# Also refresh user mapping TTL if available
|
|
||||||
if 'user_fingerprint' in session_data:
|
|
||||||
user_session_key = self._get_user_session_key(session_data['user_fingerprint'])
|
|
||||||
self.redis_client.setex(user_session_key, self.session_timeout, session_id.encode('utf-8'))
|
|
||||||
|
|
||||||
return result
|
return result
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"ERROR: Failed to save session data for {session_id}: {e}")
|
print(f"ERROR: Failed to save session data for {session_id}: {e}")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def update_session_scanner(self, session_id: str, scanner: 'Scanner') -> bool:
|
def update_session_scanner(self, session_id: str, scanner: 'Scanner') -> bool:
|
||||||
"""Update scanner object in session with immediate persistence."""
|
"""
|
||||||
|
Updates just the scanner object in a session with immediate persistence.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if update was successful
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
session_data = self._get_session_data(session_id)
|
session_data = self._get_session_data(session_id)
|
||||||
if session_data:
|
if session_data:
|
||||||
# Ensure scanner has session ID
|
# Ensure scanner has the session ID
|
||||||
scanner.session_id = session_id
|
scanner.session_id = session_id
|
||||||
session_data['scanner'] = scanner
|
session_data['scanner'] = scanner
|
||||||
session_data['last_activity'] = time.time()
|
session_data['last_activity'] = time.time()
|
||||||
|
|
||||||
|
# Immediately save to Redis for GUI updates
|
||||||
success = self._save_session_data(session_id, session_data)
|
success = self._save_session_data(session_id, session_data)
|
||||||
if success:
|
if success:
|
||||||
print(f"Scanner state updated for session {session_id} (status: {scanner.status})")
|
print(f"Scanner state updated for session {session_id} (status: {scanner.status})")
|
||||||
@@ -356,7 +224,16 @@ class SessionManager:
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
def update_scanner_status(self, session_id: str, status: str) -> bool:
|
def update_scanner_status(self, session_id: str, status: str) -> bool:
|
||||||
"""Quickly update scanner status for immediate GUI feedback."""
|
"""
|
||||||
|
Quickly update just the scanner status for immediate GUI feedback.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
session_id: Session identifier
|
||||||
|
status: New scanner status
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if update was successful
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
session_data = self._get_session_data(session_id)
|
session_data = self._get_session_data(session_id)
|
||||||
if session_data and 'scanner' in session_data:
|
if session_data and 'scanner' in session_data:
|
||||||
@@ -375,7 +252,9 @@ class SessionManager:
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
def get_session(self, session_id: str) -> Optional[Scanner]:
|
def get_session(self, session_id: str) -> Optional[Scanner]:
|
||||||
"""Get scanner instance for session with session ID management."""
|
"""
|
||||||
|
Get scanner instance for a session from Redis with session ID management.
|
||||||
|
"""
|
||||||
if not session_id:
|
if not session_id:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@@ -390,13 +269,21 @@ class SessionManager:
|
|||||||
|
|
||||||
scanner = session_data.get('scanner')
|
scanner = session_data.get('scanner')
|
||||||
if scanner:
|
if scanner:
|
||||||
# Ensure scanner can check Redis-based stop signal
|
# Ensure the scanner can check the Redis-based stop signal
|
||||||
scanner.session_id = session_id
|
scanner.session_id = session_id
|
||||||
|
|
||||||
return scanner
|
return scanner
|
||||||
|
|
||||||
def get_session_status_only(self, session_id: str) -> Optional[str]:
|
def get_session_status_only(self, session_id: str) -> Optional[str]:
|
||||||
"""Get scanner status without full session retrieval (for performance)."""
|
"""
|
||||||
|
Get just the scanner status without full session retrieval (for performance).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
session_id: Session identifier
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Scanner status string or None if not found
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
session_data = self._get_session_data(session_id)
|
session_data = self._get_session_data(session_id)
|
||||||
if session_data and 'scanner' in session_data:
|
if session_data and 'scanner' in session_data:
|
||||||
@@ -407,18 +294,16 @@ class SessionManager:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
def terminate_session(self, session_id: str) -> bool:
|
def terminate_session(self, session_id: str) -> bool:
|
||||||
"""Terminate specific session with reliable stop signal and immediate status update."""
|
"""
|
||||||
return self._terminate_session_internal(session_id, cleanup_user_mapping=True)
|
Terminate a specific session in Redis with reliable stop signal and immediate status update.
|
||||||
|
"""
|
||||||
def _terminate_session_internal(self, session_id: str, cleanup_user_mapping: bool = True) -> bool:
|
|
||||||
"""Internal session termination with configurable user mapping cleanup."""
|
|
||||||
print(f"=== TERMINATING SESSION {session_id} ===")
|
print(f"=== TERMINATING SESSION {session_id} ===")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Set stop signal first
|
# First, set the stop signal
|
||||||
self.set_stop_signal(session_id)
|
self.set_stop_signal(session_id)
|
||||||
|
|
||||||
# Update scanner status immediately for GUI feedback
|
# Update scanner status to stopped immediately for GUI feedback
|
||||||
self.update_scanner_status(session_id, 'stopped')
|
self.update_scanner_status(session_id, 'stopped')
|
||||||
|
|
||||||
session_data = self._get_session_data(session_id)
|
session_data = self._get_session_data(session_id)
|
||||||
@@ -429,19 +314,16 @@ class SessionManager:
|
|||||||
scanner = session_data.get('scanner')
|
scanner = session_data.get('scanner')
|
||||||
if scanner and scanner.status == 'running':
|
if scanner and scanner.status == 'running':
|
||||||
print(f"Stopping scan for session: {session_id}")
|
print(f"Stopping scan for session: {session_id}")
|
||||||
|
# The scanner will check the Redis stop signal
|
||||||
scanner.stop_scan()
|
scanner.stop_scan()
|
||||||
|
|
||||||
|
# Update the scanner state immediately
|
||||||
self.update_session_scanner(session_id, scanner)
|
self.update_session_scanner(session_id, scanner)
|
||||||
|
|
||||||
# Wait for graceful shutdown
|
# Wait a moment for graceful shutdown
|
||||||
time.sleep(0.5)
|
time.sleep(0.5)
|
||||||
|
|
||||||
# Clean up user mapping if requested
|
# Delete session data and stop signal from Redis
|
||||||
if cleanup_user_mapping and 'user_fingerprint' in session_data:
|
|
||||||
user_session_key = self._get_user_session_key(session_data['user_fingerprint'])
|
|
||||||
self.redis_client.delete(user_session_key)
|
|
||||||
print(f"Cleaned up user mapping for {session_data['user_fingerprint']}")
|
|
||||||
|
|
||||||
# Delete session data and stop signal
|
|
||||||
session_key = self._get_session_key(session_id)
|
session_key = self._get_session_key(session_id)
|
||||||
stop_key = self._get_stop_signal_key(session_id)
|
stop_key = self._get_stop_signal_key(session_id)
|
||||||
self.redis_client.delete(session_key)
|
self.redis_client.delete(session_key)
|
||||||
@@ -455,72 +337,35 @@ class SessionManager:
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
def _cleanup_loop(self) -> None:
|
def _cleanup_loop(self) -> None:
|
||||||
"""Background thread to cleanup inactive sessions and orphaned signals."""
|
"""
|
||||||
|
Background thread to cleanup inactive sessions and orphaned stop signals.
|
||||||
|
"""
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
# Clean up orphaned stop signals
|
# Clean up orphaned stop signals
|
||||||
stop_keys = self.redis_client.keys("dnsrecon:stop:*")
|
stop_keys = self.redis_client.keys("dnsrecon:stop:*")
|
||||||
for stop_key in stop_keys:
|
for stop_key in stop_keys:
|
||||||
|
# Extract session ID from stop key
|
||||||
session_id = stop_key.decode('utf-8').split(':')[-1]
|
session_id = stop_key.decode('utf-8').split(':')[-1]
|
||||||
session_key = self._get_session_key(session_id)
|
session_key = self._get_session_key(session_id)
|
||||||
|
|
||||||
|
# If session doesn't exist but stop signal does, clean it up
|
||||||
if not self.redis_client.exists(session_key):
|
if not self.redis_client.exists(session_key):
|
||||||
self.redis_client.delete(stop_key)
|
self.redis_client.delete(stop_key)
|
||||||
print(f"Cleaned up orphaned stop signal for session {session_id}")
|
print(f"Cleaned up orphaned stop signal for session {session_id}")
|
||||||
|
|
||||||
# Clean up orphaned user mappings
|
|
||||||
user_keys = self.redis_client.keys("dnsrecon:user:*")
|
|
||||||
for user_key in user_keys:
|
|
||||||
session_id_bytes = self.redis_client.get(user_key)
|
|
||||||
if session_id_bytes:
|
|
||||||
session_id = session_id_bytes.decode('utf-8')
|
|
||||||
session_key = self._get_session_key(session_id)
|
|
||||||
|
|
||||||
if not self.redis_client.exists(session_key):
|
|
||||||
self.redis_client.delete(user_key)
|
|
||||||
print(f"Cleaned up orphaned user mapping for session {session_id}")
|
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error in cleanup loop: {e}")
|
print(f"Error in cleanup loop: {e}")
|
||||||
|
|
||||||
time.sleep(300) # Sleep for 5 minutes
|
time.sleep(300) # Sleep for 5 minutes
|
||||||
|
|
||||||
def list_active_sessions(self) -> List[Dict[str, Any]]:
|
|
||||||
"""List all active sessions for admin purposes."""
|
|
||||||
try:
|
|
||||||
session_keys = self.redis_client.keys("dnsrecon:session:*")
|
|
||||||
sessions = []
|
|
||||||
|
|
||||||
for session_key in session_keys:
|
|
||||||
session_id = session_key.decode('utf-8').split(':')[-1]
|
|
||||||
session_data = self._get_session_data(session_id)
|
|
||||||
|
|
||||||
if session_data:
|
|
||||||
scanner = session_data.get('scanner')
|
|
||||||
sessions.append({
|
|
||||||
'session_id': session_id,
|
|
||||||
'user_fingerprint': session_data.get('user_fingerprint', 'unknown'),
|
|
||||||
'client_ip': session_data.get('client_ip', 'unknown'),
|
|
||||||
'created_at': session_data.get('created_at'),
|
|
||||||
'last_activity': session_data.get('last_activity'),
|
|
||||||
'scanner_status': scanner.status if scanner else 'unknown',
|
|
||||||
'current_target': scanner.current_target if scanner else None
|
|
||||||
})
|
|
||||||
|
|
||||||
return sessions
|
|
||||||
except Exception as e:
|
|
||||||
print(f"ERROR: Failed to list active sessions: {e}")
|
|
||||||
return []
|
|
||||||
|
|
||||||
def get_statistics(self) -> Dict[str, Any]:
|
def get_statistics(self) -> Dict[str, Any]:
|
||||||
"""Get session manager statistics."""
|
"""Get session manager statistics."""
|
||||||
try:
|
try:
|
||||||
session_keys = self.redis_client.keys("dnsrecon:session:*")
|
session_keys = self.redis_client.keys("dnsrecon:session:*")
|
||||||
user_keys = self.redis_client.keys("dnsrecon:user:*")
|
|
||||||
stop_keys = self.redis_client.keys("dnsrecon:stop:*")
|
stop_keys = self.redis_client.keys("dnsrecon:stop:*")
|
||||||
|
|
||||||
active_sessions = len(session_keys)
|
active_sessions = len(session_keys)
|
||||||
unique_users = len(user_keys)
|
|
||||||
running_scans = 0
|
running_scans = 0
|
||||||
|
|
||||||
for session_key in session_keys:
|
for session_key in session_keys:
|
||||||
@@ -531,46 +376,16 @@ class SessionManager:
|
|||||||
|
|
||||||
return {
|
return {
|
||||||
'total_active_sessions': active_sessions,
|
'total_active_sessions': active_sessions,
|
||||||
'unique_users': unique_users,
|
|
||||||
'running_scans': running_scans,
|
'running_scans': running_scans,
|
||||||
'total_stop_signals': len(stop_keys),
|
'total_stop_signals': len(stop_keys)
|
||||||
'average_sessions_per_user': round(active_sessions / unique_users, 2) if unique_users > 0 else 0
|
|
||||||
}
|
}
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"ERROR: Failed to get statistics: {e}")
|
print(f"ERROR: Failed to get statistics: {e}")
|
||||||
return {
|
return {
|
||||||
'total_active_sessions': 0,
|
'total_active_sessions': 0,
|
||||||
'unique_users': 0,
|
|
||||||
'running_scans': 0,
|
'running_scans': 0,
|
||||||
'total_stop_signals': 0,
|
'total_stop_signals': 0
|
||||||
'average_sessions_per_user': 0
|
|
||||||
}
|
}
|
||||||
|
|
||||||
def get_session_info(self, session_id: str) -> Dict[str, Any]:
|
|
||||||
"""Get detailed information about a specific session."""
|
|
||||||
try:
|
|
||||||
session_data = self._get_session_data(session_id)
|
|
||||||
if not session_data:
|
|
||||||
return {'error': 'Session not found'}
|
|
||||||
|
|
||||||
scanner = session_data.get('scanner')
|
|
||||||
|
|
||||||
return {
|
|
||||||
'session_id': session_id,
|
|
||||||
'user_fingerprint': session_data.get('user_fingerprint', 'unknown'),
|
|
||||||
'client_ip': session_data.get('client_ip', 'unknown'),
|
|
||||||
'user_agent': session_data.get('user_agent', 'unknown'),
|
|
||||||
'created_at': session_data.get('created_at'),
|
|
||||||
'last_activity': session_data.get('last_activity'),
|
|
||||||
'status': session_data.get('status'),
|
|
||||||
'scanner_status': scanner.status if scanner else 'unknown',
|
|
||||||
'current_target': scanner.current_target if scanner else None,
|
|
||||||
'session_age_minutes': round((time.time() - session_data.get('created_at', time.time())) / 60, 1)
|
|
||||||
}
|
|
||||||
except Exception as e:
|
|
||||||
print(f"ERROR: Failed to get session info for {session_id}: {e}")
|
|
||||||
return {'error': f'Failed to get session info: {str(e)}'}
|
|
||||||
|
|
||||||
|
|
||||||
# Global session manager instance
|
# Global session manager instance
|
||||||
session_manager = SessionManager(session_timeout_minutes=60)
|
session_manager = SessionManager(session_timeout_minutes=60)
|
||||||
@@ -1,564 +0,0 @@
|
|||||||
# dnsrecon/core/task_manager.py
|
|
||||||
|
|
||||||
import threading
|
|
||||||
import time
|
|
||||||
import uuid
|
|
||||||
from enum import Enum
|
|
||||||
from dataclasses import dataclass, field
|
|
||||||
from typing import Dict, List, Optional, Any, Set
|
|
||||||
from datetime import datetime, timezone, timedelta
|
|
||||||
from collections import deque
|
|
||||||
|
|
||||||
from utils.helpers import _is_valid_ip, _is_valid_domain
|
|
||||||
|
|
||||||
|
|
||||||
class TaskStatus(Enum):
|
|
||||||
"""Enumeration of task execution statuses."""
|
|
||||||
PENDING = "pending"
|
|
||||||
RUNNING = "running"
|
|
||||||
SUCCEEDED = "succeeded"
|
|
||||||
FAILED_RETRYING = "failed_retrying"
|
|
||||||
FAILED_PERMANENT = "failed_permanent"
|
|
||||||
CANCELLED = "cancelled"
|
|
||||||
|
|
||||||
|
|
||||||
class TaskType(Enum):
|
|
||||||
"""Enumeration of task types for provider queries."""
|
|
||||||
DOMAIN_QUERY = "domain_query"
|
|
||||||
IP_QUERY = "ip_query"
|
|
||||||
GRAPH_UPDATE = "graph_update"
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class TaskResult:
|
|
||||||
"""Result of a task execution."""
|
|
||||||
success: bool
|
|
||||||
data: Optional[Any] = None
|
|
||||||
error: Optional[str] = None
|
|
||||||
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class ReconTask:
|
|
||||||
"""Represents a single reconnaissance task with retry logic."""
|
|
||||||
task_id: str
|
|
||||||
task_type: TaskType
|
|
||||||
target: str
|
|
||||||
provider_name: str
|
|
||||||
depth: int
|
|
||||||
status: TaskStatus = TaskStatus.PENDING
|
|
||||||
created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
|
||||||
|
|
||||||
# Retry configuration
|
|
||||||
max_retries: int = 3
|
|
||||||
current_retry: int = 0
|
|
||||||
base_backoff_seconds: float = 1.0
|
|
||||||
max_backoff_seconds: float = 60.0
|
|
||||||
|
|
||||||
# Execution tracking
|
|
||||||
last_attempt_at: Optional[datetime] = None
|
|
||||||
next_retry_at: Optional[datetime] = None
|
|
||||||
execution_history: List[Dict[str, Any]] = field(default_factory=list)
|
|
||||||
|
|
||||||
# Results
|
|
||||||
result: Optional[TaskResult] = None
|
|
||||||
|
|
||||||
def __post_init__(self):
|
|
||||||
"""Initialize additional fields after creation."""
|
|
||||||
if not self.task_id:
|
|
||||||
self.task_id = str(uuid.uuid4())[:8]
|
|
||||||
|
|
||||||
def calculate_next_retry_time(self) -> datetime:
|
|
||||||
"""Calculate next retry time with exponential backoff and jitter."""
|
|
||||||
if self.current_retry >= self.max_retries:
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Exponential backoff with jitter
|
|
||||||
backoff_time = min(
|
|
||||||
self.max_backoff_seconds,
|
|
||||||
self.base_backoff_seconds * (2 ** self.current_retry)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Add jitter (±25%)
|
|
||||||
jitter = backoff_time * 0.25 * (0.5 - hash(self.task_id) % 1000 / 1000.0)
|
|
||||||
final_backoff = max(self.base_backoff_seconds, backoff_time + jitter)
|
|
||||||
|
|
||||||
return datetime.now(timezone.utc) + timedelta(seconds=final_backoff)
|
|
||||||
|
|
||||||
def should_retry(self) -> bool:
|
|
||||||
"""Determine if task should be retried based on status and retry count."""
|
|
||||||
if self.status != TaskStatus.FAILED_RETRYING:
|
|
||||||
return False
|
|
||||||
if self.current_retry >= self.max_retries:
|
|
||||||
return False
|
|
||||||
if self.next_retry_at and datetime.now(timezone.utc) < self.next_retry_at:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def mark_failed(self, error: str, metadata: Dict[str, Any] = None):
|
|
||||||
"""Mark task as failed and prepare for retry or permanent failure."""
|
|
||||||
self.current_retry += 1
|
|
||||||
self.last_attempt_at = datetime.now(timezone.utc)
|
|
||||||
|
|
||||||
# Record execution history
|
|
||||||
execution_record = {
|
|
||||||
'attempt': self.current_retry,
|
|
||||||
'timestamp': self.last_attempt_at.isoformat(),
|
|
||||||
'error': error,
|
|
||||||
'metadata': metadata or {}
|
|
||||||
}
|
|
||||||
self.execution_history.append(execution_record)
|
|
||||||
|
|
||||||
if self.current_retry >= self.max_retries:
|
|
||||||
self.status = TaskStatus.FAILED_PERMANENT
|
|
||||||
self.result = TaskResult(success=False, error=f"Permanent failure after {self.max_retries} attempts: {error}")
|
|
||||||
else:
|
|
||||||
self.status = TaskStatus.FAILED_RETRYING
|
|
||||||
self.next_retry_at = self.calculate_next_retry_time()
|
|
||||||
|
|
||||||
def mark_succeeded(self, data: Any = None, metadata: Dict[str, Any] = None):
|
|
||||||
"""Mark task as successfully completed."""
|
|
||||||
self.status = TaskStatus.SUCCEEDED
|
|
||||||
self.last_attempt_at = datetime.now(timezone.utc)
|
|
||||||
self.result = TaskResult(success=True, data=data, metadata=metadata or {})
|
|
||||||
|
|
||||||
# Record successful execution
|
|
||||||
execution_record = {
|
|
||||||
'attempt': self.current_retry + 1,
|
|
||||||
'timestamp': self.last_attempt_at.isoformat(),
|
|
||||||
'success': True,
|
|
||||||
'metadata': metadata or {}
|
|
||||||
}
|
|
||||||
self.execution_history.append(execution_record)
|
|
||||||
|
|
||||||
def get_summary(self) -> Dict[str, Any]:
|
|
||||||
"""Get task summary for progress reporting."""
|
|
||||||
return {
|
|
||||||
'task_id': self.task_id,
|
|
||||||
'task_type': self.task_type.value,
|
|
||||||
'target': self.target,
|
|
||||||
'provider': self.provider_name,
|
|
||||||
'status': self.status.value,
|
|
||||||
'current_retry': self.current_retry,
|
|
||||||
'max_retries': self.max_retries,
|
|
||||||
'created_at': self.created_at.isoformat(),
|
|
||||||
'last_attempt_at': self.last_attempt_at.isoformat() if self.last_attempt_at else None,
|
|
||||||
'next_retry_at': self.next_retry_at.isoformat() if self.next_retry_at else None,
|
|
||||||
'total_attempts': len(self.execution_history),
|
|
||||||
'has_result': self.result is not None
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class TaskQueue:
|
|
||||||
"""Thread-safe task queue with retry logic and priority handling."""
|
|
||||||
|
|
||||||
def __init__(self, max_concurrent_tasks: int = 5):
|
|
||||||
"""Initialize task queue."""
|
|
||||||
self.max_concurrent_tasks = max_concurrent_tasks
|
|
||||||
self.tasks: Dict[str, ReconTask] = {}
|
|
||||||
self.pending_queue = deque()
|
|
||||||
self.retry_queue = deque()
|
|
||||||
self.running_tasks: Set[str] = set()
|
|
||||||
|
|
||||||
self._lock = threading.Lock()
|
|
||||||
self._stop_event = threading.Event()
|
|
||||||
|
|
||||||
def __getstate__(self):
|
|
||||||
"""Prepare TaskQueue for pickling by excluding unpicklable objects."""
|
|
||||||
state = self.__dict__.copy()
|
|
||||||
# Exclude the unpickleable '_lock' and '_stop_event' attributes
|
|
||||||
if '_lock' in state:
|
|
||||||
del state['_lock']
|
|
||||||
if '_stop_event' in state:
|
|
||||||
del state['_stop_event']
|
|
||||||
return state
|
|
||||||
|
|
||||||
def __setstate__(self, state):
|
|
||||||
"""Restore TaskQueue after unpickling by reconstructing threading objects."""
|
|
||||||
self.__dict__.update(state)
|
|
||||||
# Re-initialize the '_lock' and '_stop_event' attributes
|
|
||||||
self._lock = threading.Lock()
|
|
||||||
self._stop_event = threading.Event()
|
|
||||||
|
|
||||||
def add_task(self, task: ReconTask) -> str:
|
|
||||||
"""Add task to queue."""
|
|
||||||
with self._lock:
|
|
||||||
self.tasks[task.task_id] = task
|
|
||||||
self.pending_queue.append(task.task_id)
|
|
||||||
print(f"Added task {task.task_id}: {task.provider_name} query for {task.target}")
|
|
||||||
return task.task_id
|
|
||||||
|
|
||||||
def get_next_ready_task(self) -> Optional[ReconTask]:
|
|
||||||
"""Get next task ready for execution."""
|
|
||||||
with self._lock:
|
|
||||||
# Check if we have room for more concurrent tasks
|
|
||||||
if len(self.running_tasks) >= self.max_concurrent_tasks:
|
|
||||||
return None
|
|
||||||
|
|
||||||
# First priority: retry queue (tasks ready for retry)
|
|
||||||
while self.retry_queue:
|
|
||||||
task_id = self.retry_queue.popleft()
|
|
||||||
if task_id in self.tasks:
|
|
||||||
task = self.tasks[task_id]
|
|
||||||
if task.should_retry():
|
|
||||||
task.status = TaskStatus.RUNNING
|
|
||||||
self.running_tasks.add(task_id)
|
|
||||||
print(f"Retrying task {task_id} (attempt {task.current_retry + 1})")
|
|
||||||
return task
|
|
||||||
|
|
||||||
# Second priority: pending queue (new tasks)
|
|
||||||
while self.pending_queue:
|
|
||||||
task_id = self.pending_queue.popleft()
|
|
||||||
if task_id in self.tasks:
|
|
||||||
task = self.tasks[task_id]
|
|
||||||
if task.status == TaskStatus.PENDING:
|
|
||||||
task.status = TaskStatus.RUNNING
|
|
||||||
self.running_tasks.add(task_id)
|
|
||||||
print(f"Starting task {task_id}")
|
|
||||||
return task
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
def complete_task(self, task_id: str, success: bool, data: Any = None,
|
|
||||||
error: str = None, metadata: Dict[str, Any] = None):
|
|
||||||
"""Mark task as completed (success or failure)."""
|
|
||||||
with self._lock:
|
|
||||||
if task_id not in self.tasks:
|
|
||||||
return
|
|
||||||
|
|
||||||
task = self.tasks[task_id]
|
|
||||||
self.running_tasks.discard(task_id)
|
|
||||||
|
|
||||||
if success:
|
|
||||||
task.mark_succeeded(data=data, metadata=metadata)
|
|
||||||
print(f"Task {task_id} succeeded")
|
|
||||||
else:
|
|
||||||
task.mark_failed(error or "Unknown error", metadata=metadata)
|
|
||||||
if task.status == TaskStatus.FAILED_RETRYING:
|
|
||||||
self.retry_queue.append(task_id)
|
|
||||||
print(f"Task {task_id} failed, scheduled for retry at {task.next_retry_at}")
|
|
||||||
else:
|
|
||||||
print(f"Task {task_id} permanently failed after {task.current_retry} attempts")
|
|
||||||
|
|
||||||
def cancel_all_tasks(self):
|
|
||||||
"""Cancel all pending and running tasks."""
|
|
||||||
with self._lock:
|
|
||||||
self._stop_event.set()
|
|
||||||
for task in self.tasks.values():
|
|
||||||
if task.status in [TaskStatus.PENDING, TaskStatus.RUNNING, TaskStatus.FAILED_RETRYING]:
|
|
||||||
task.status = TaskStatus.CANCELLED
|
|
||||||
self.pending_queue.clear()
|
|
||||||
self.retry_queue.clear()
|
|
||||||
self.running_tasks.clear()
|
|
||||||
print("All tasks cancelled")
|
|
||||||
|
|
||||||
def is_complete(self) -> bool:
|
|
||||||
"""Check if all tasks are complete (succeeded, permanently failed, or cancelled)."""
|
|
||||||
with self._lock:
|
|
||||||
for task in self.tasks.values():
|
|
||||||
if task.status in [TaskStatus.PENDING, TaskStatus.RUNNING, TaskStatus.FAILED_RETRYING]:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def get_statistics(self) -> Dict[str, Any]:
|
|
||||||
"""Get queue statistics."""
|
|
||||||
with self._lock:
|
|
||||||
stats = {
|
|
||||||
'total_tasks': len(self.tasks),
|
|
||||||
'pending': len(self.pending_queue),
|
|
||||||
'running': len(self.running_tasks),
|
|
||||||
'retry_queue': len(self.retry_queue),
|
|
||||||
'succeeded': 0,
|
|
||||||
'failed_permanent': 0,
|
|
||||||
'cancelled': 0,
|
|
||||||
'failed_retrying': 0
|
|
||||||
}
|
|
||||||
|
|
||||||
for task in self.tasks.values():
|
|
||||||
if task.status == TaskStatus.SUCCEEDED:
|
|
||||||
stats['succeeded'] += 1
|
|
||||||
elif task.status == TaskStatus.FAILED_PERMANENT:
|
|
||||||
stats['failed_permanent'] += 1
|
|
||||||
elif task.status == TaskStatus.CANCELLED:
|
|
||||||
stats['cancelled'] += 1
|
|
||||||
elif task.status == TaskStatus.FAILED_RETRYING:
|
|
||||||
stats['failed_retrying'] += 1
|
|
||||||
|
|
||||||
stats['completion_rate'] = (stats['succeeded'] / stats['total_tasks'] * 100) if stats['total_tasks'] > 0 else 0
|
|
||||||
stats['is_complete'] = self.is_complete()
|
|
||||||
|
|
||||||
return stats
|
|
||||||
|
|
||||||
def get_task_summaries(self) -> List[Dict[str, Any]]:
|
|
||||||
"""Get summaries of all tasks for detailed progress reporting."""
|
|
||||||
with self._lock:
|
|
||||||
return [task.get_summary() for task in self.tasks.values()]
|
|
||||||
|
|
||||||
def get_failed_tasks(self) -> List[ReconTask]:
|
|
||||||
"""Get all permanently failed tasks for analysis."""
|
|
||||||
with self._lock:
|
|
||||||
return [task for task in self.tasks.values() if task.status == TaskStatus.FAILED_PERMANENT]
|
|
||||||
|
|
||||||
|
|
||||||
class TaskExecutor:
|
|
||||||
"""Executes reconnaissance tasks using providers."""
|
|
||||||
|
|
||||||
def __init__(self, providers: List, graph_manager, logger):
|
|
||||||
"""Initialize task executor."""
|
|
||||||
self.providers = {provider.get_name(): provider for provider in providers}
|
|
||||||
self.graph = graph_manager
|
|
||||||
self.logger = logger
|
|
||||||
|
|
||||||
def execute_task(self, task: ReconTask) -> TaskResult:
|
|
||||||
"""
|
|
||||||
Execute a single reconnaissance task.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task: Task to execute
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
TaskResult with success/failure information
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
print(f"Executing task {task.task_id}: {task.provider_name} query for {task.target}")
|
|
||||||
|
|
||||||
provider = self.providers.get(task.provider_name)
|
|
||||||
if not provider:
|
|
||||||
return TaskResult(
|
|
||||||
success=False,
|
|
||||||
error=f"Provider {task.provider_name} not available"
|
|
||||||
)
|
|
||||||
|
|
||||||
if not provider.is_available():
|
|
||||||
return TaskResult(
|
|
||||||
success=False,
|
|
||||||
error=f"Provider {task.provider_name} is not available (missing API key or configuration)"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Execute provider query based on task type
|
|
||||||
if task.task_type == TaskType.DOMAIN_QUERY:
|
|
||||||
if not _is_valid_domain(task.target):
|
|
||||||
return TaskResult(success=False, error=f"Invalid domain: {task.target}")
|
|
||||||
|
|
||||||
relationships = provider.query_domain(task.target)
|
|
||||||
|
|
||||||
elif task.task_type == TaskType.IP_QUERY:
|
|
||||||
if not _is_valid_ip(task.target):
|
|
||||||
return TaskResult(success=False, error=f"Invalid IP: {task.target}")
|
|
||||||
|
|
||||||
relationships = provider.query_ip(task.target)
|
|
||||||
|
|
||||||
else:
|
|
||||||
return TaskResult(success=False, error=f"Unsupported task type: {task.task_type}")
|
|
||||||
|
|
||||||
# Process results and update graph
|
|
||||||
new_targets = set()
|
|
||||||
relationships_added = 0
|
|
||||||
|
|
||||||
for source, target, rel_type, confidence, raw_data in relationships:
|
|
||||||
# Add nodes to graph
|
|
||||||
from core.graph_manager import NodeType
|
|
||||||
|
|
||||||
if _is_valid_ip(target):
|
|
||||||
self.graph.add_node(target, NodeType.IP)
|
|
||||||
new_targets.add(target)
|
|
||||||
elif target.startswith('AS') and target[2:].isdigit():
|
|
||||||
self.graph.add_node(target, NodeType.ASN)
|
|
||||||
elif _is_valid_domain(target):
|
|
||||||
self.graph.add_node(target, NodeType.DOMAIN)
|
|
||||||
new_targets.add(target)
|
|
||||||
|
|
||||||
# Add edge to graph
|
|
||||||
if self.graph.add_edge(source, target, rel_type, confidence, task.provider_name, raw_data):
|
|
||||||
relationships_added += 1
|
|
||||||
|
|
||||||
# Log forensic information
|
|
||||||
self.logger.logger.info(
|
|
||||||
f"Task {task.task_id} completed: {len(relationships)} relationships found, "
|
|
||||||
f"{relationships_added} added to graph, {len(new_targets)} new targets"
|
|
||||||
)
|
|
||||||
|
|
||||||
return TaskResult(
|
|
||||||
success=True,
|
|
||||||
data={
|
|
||||||
'relationships': relationships,
|
|
||||||
'new_targets': list(new_targets),
|
|
||||||
'relationships_added': relationships_added
|
|
||||||
},
|
|
||||||
metadata={
|
|
||||||
'provider': task.provider_name,
|
|
||||||
'target': task.target,
|
|
||||||
'depth': task.depth,
|
|
||||||
'execution_time': datetime.now(timezone.utc).isoformat()
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
error_msg = f"Task execution failed: {str(e)}"
|
|
||||||
print(f"ERROR: {error_msg} for task {task.task_id}")
|
|
||||||
self.logger.logger.error(error_msg)
|
|
||||||
|
|
||||||
return TaskResult(
|
|
||||||
success=False,
|
|
||||||
error=error_msg,
|
|
||||||
metadata={
|
|
||||||
'provider': task.provider_name,
|
|
||||||
'target': task.target,
|
|
||||||
'exception_type': type(e).__name__
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TaskManager:
|
|
||||||
"""High-level task management for reconnaissance scans."""
|
|
||||||
|
|
||||||
def __init__(self, providers: List, graph_manager, logger, max_concurrent_tasks: int = 5):
|
|
||||||
"""Initialize task manager."""
|
|
||||||
self.task_queue = TaskQueue(max_concurrent_tasks)
|
|
||||||
self.task_executor = TaskExecutor(providers, graph_manager, logger)
|
|
||||||
self.logger = logger
|
|
||||||
|
|
||||||
# Execution control
|
|
||||||
self._stop_event = threading.Event()
|
|
||||||
self._execution_threads: List[threading.Thread] = []
|
|
||||||
self._is_running = False
|
|
||||||
|
|
||||||
def create_provider_tasks(self, target: str, depth: int, providers: List) -> List[str]:
|
|
||||||
"""
|
|
||||||
Create tasks for querying all eligible providers for a target.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
target: Domain or IP to query
|
|
||||||
depth: Current recursion depth
|
|
||||||
providers: List of available providers
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of created task IDs
|
|
||||||
"""
|
|
||||||
task_ids = []
|
|
||||||
is_ip = _is_valid_ip(target)
|
|
||||||
target_key = 'ips' if is_ip else 'domains'
|
|
||||||
task_type = TaskType.IP_QUERY if is_ip else TaskType.DOMAIN_QUERY
|
|
||||||
|
|
||||||
for provider in providers:
|
|
||||||
if provider.get_eligibility().get(target_key) and provider.is_available():
|
|
||||||
task = ReconTask(
|
|
||||||
task_id=str(uuid.uuid4())[:8],
|
|
||||||
task_type=task_type,
|
|
||||||
target=target,
|
|
||||||
provider_name=provider.get_name(),
|
|
||||||
depth=depth,
|
|
||||||
max_retries=3 # Configure retries per task type/provider
|
|
||||||
)
|
|
||||||
|
|
||||||
task_id = self.task_queue.add_task(task)
|
|
||||||
task_ids.append(task_id)
|
|
||||||
|
|
||||||
return task_ids
|
|
||||||
|
|
||||||
def start_execution(self, max_workers: int = 3):
|
|
||||||
"""Start task execution with specified number of worker threads."""
|
|
||||||
if self._is_running:
|
|
||||||
print("Task execution already running")
|
|
||||||
return
|
|
||||||
|
|
||||||
self._is_running = True
|
|
||||||
self._stop_event.clear()
|
|
||||||
|
|
||||||
print(f"Starting task execution with {max_workers} workers")
|
|
||||||
|
|
||||||
for i in range(max_workers):
|
|
||||||
worker_thread = threading.Thread(
|
|
||||||
target=self._worker_loop,
|
|
||||||
name=f"TaskWorker-{i+1}",
|
|
||||||
daemon=True
|
|
||||||
)
|
|
||||||
worker_thread.start()
|
|
||||||
self._execution_threads.append(worker_thread)
|
|
||||||
|
|
||||||
def stop_execution(self):
|
|
||||||
"""Stop task execution and cancel all tasks."""
|
|
||||||
print("Stopping task execution")
|
|
||||||
self._stop_event.set()
|
|
||||||
self.task_queue.cancel_all_tasks()
|
|
||||||
self._is_running = False
|
|
||||||
|
|
||||||
# Wait for worker threads to finish
|
|
||||||
for thread in self._execution_threads:
|
|
||||||
thread.join(timeout=5.0)
|
|
||||||
|
|
||||||
self._execution_threads.clear()
|
|
||||||
print("Task execution stopped")
|
|
||||||
|
|
||||||
def _worker_loop(self):
|
|
||||||
"""Worker thread loop for executing tasks."""
|
|
||||||
thread_name = threading.current_thread().name
|
|
||||||
print(f"{thread_name} started")
|
|
||||||
|
|
||||||
while not self._stop_event.is_set():
|
|
||||||
try:
|
|
||||||
# Get next task to execute
|
|
||||||
task = self.task_queue.get_next_ready_task()
|
|
||||||
|
|
||||||
if task is None:
|
|
||||||
# No tasks ready, check if we should exit
|
|
||||||
if self.task_queue.is_complete() or self._stop_event.is_set():
|
|
||||||
break
|
|
||||||
time.sleep(0.1) # Brief sleep before checking again
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Execute the task
|
|
||||||
result = self.task_executor.execute_task(task)
|
|
||||||
|
|
||||||
# Complete the task in queue
|
|
||||||
self.task_queue.complete_task(
|
|
||||||
task.task_id,
|
|
||||||
success=result.success,
|
|
||||||
data=result.data,
|
|
||||||
error=result.error,
|
|
||||||
metadata=result.metadata
|
|
||||||
)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"ERROR: Worker {thread_name} encountered error: {e}")
|
|
||||||
# Continue running even if individual task fails
|
|
||||||
continue
|
|
||||||
|
|
||||||
print(f"{thread_name} finished")
|
|
||||||
|
|
||||||
def wait_for_completion(self, timeout_seconds: int = 300) -> bool:
|
|
||||||
"""
|
|
||||||
Wait for all tasks to complete.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
timeout_seconds: Maximum time to wait
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if all tasks completed, False if timeout
|
|
||||||
"""
|
|
||||||
start_time = time.time()
|
|
||||||
|
|
||||||
while time.time() - start_time < timeout_seconds:
|
|
||||||
if self.task_queue.is_complete():
|
|
||||||
return True
|
|
||||||
|
|
||||||
if self._stop_event.is_set():
|
|
||||||
return False
|
|
||||||
|
|
||||||
time.sleep(1.0) # Check every second
|
|
||||||
|
|
||||||
print(f"Timeout waiting for task completion after {timeout_seconds} seconds")
|
|
||||||
return False
|
|
||||||
|
|
||||||
def get_progress_report(self) -> Dict[str, Any]:
|
|
||||||
"""Get detailed progress report for UI updates."""
|
|
||||||
stats = self.task_queue.get_statistics()
|
|
||||||
failed_tasks = self.task_queue.get_failed_tasks()
|
|
||||||
|
|
||||||
return {
|
|
||||||
'statistics': stats,
|
|
||||||
'failed_tasks': [task.get_summary() for task in failed_tasks],
|
|
||||||
'is_running': self._is_running,
|
|
||||||
'worker_count': len(self._execution_threads),
|
|
||||||
'detailed_tasks': self.task_queue.get_task_summaries() if stats['total_tasks'] < 50 else [] # Limit detail for performance
|
|
||||||
}
|
|
||||||
@@ -3,18 +3,14 @@
|
|||||||
import time
|
import time
|
||||||
import requests
|
import requests
|
||||||
import threading
|
import threading
|
||||||
import os
|
|
||||||
import json
|
|
||||||
import hashlib
|
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from typing import List, Dict, Any, Optional, Tuple
|
from typing import List, Dict, Any, Optional, Tuple
|
||||||
from datetime import datetime, timezone
|
|
||||||
|
|
||||||
from core.logger import get_forensic_logger
|
from core.logger import get_forensic_logger
|
||||||
|
|
||||||
|
|
||||||
class RateLimiter:
|
class RateLimiter:
|
||||||
"""Thread-safe rate limiter for API calls."""
|
"""Simple rate limiter for API calls."""
|
||||||
|
|
||||||
def __init__(self, requests_per_minute: int):
|
def __init__(self, requests_per_minute: int):
|
||||||
"""
|
"""
|
||||||
@@ -26,152 +22,36 @@ class RateLimiter:
|
|||||||
self.requests_per_minute = requests_per_minute
|
self.requests_per_minute = requests_per_minute
|
||||||
self.min_interval = 60.0 / requests_per_minute
|
self.min_interval = 60.0 / requests_per_minute
|
||||||
self.last_request_time = 0
|
self.last_request_time = 0
|
||||||
self._lock = threading.Lock()
|
|
||||||
|
|
||||||
def __getstate__(self):
|
def __getstate__(self):
|
||||||
"""RateLimiter is fully picklable, return full state."""
|
"""RateLimiter is fully picklable, return full state."""
|
||||||
state = self.__dict__.copy()
|
return self.__dict__.copy()
|
||||||
# Exclude unpickleable lock
|
|
||||||
if '_lock' in state:
|
|
||||||
del state['_lock']
|
|
||||||
return state
|
|
||||||
|
|
||||||
def __setstate__(self, state):
|
def __setstate__(self, state):
|
||||||
"""Restore RateLimiter state."""
|
"""Restore RateLimiter state."""
|
||||||
self.__dict__.update(state)
|
self.__dict__.update(state)
|
||||||
self._lock = threading.Lock()
|
|
||||||
|
|
||||||
def wait_if_needed(self) -> None:
|
def wait_if_needed(self) -> None:
|
||||||
"""Wait if necessary to respect rate limits."""
|
"""Wait if necessary to respect rate limits."""
|
||||||
with self._lock:
|
current_time = time.time()
|
||||||
current_time = time.time()
|
time_since_last = current_time - self.last_request_time
|
||||||
time_since_last = current_time - self.last_request_time
|
|
||||||
|
|
||||||
if time_since_last < self.min_interval:
|
if time_since_last < self.min_interval:
|
||||||
sleep_time = self.min_interval - time_since_last
|
sleep_time = self.min_interval - time_since_last
|
||||||
time.sleep(sleep_time)
|
time.sleep(sleep_time)
|
||||||
|
|
||||||
self.last_request_time = time.time()
|
self.last_request_time = time.time()
|
||||||
|
|
||||||
|
|
||||||
class ProviderCache:
|
|
||||||
"""Thread-safe global cache for provider queries."""
|
|
||||||
|
|
||||||
def __init__(self, provider_name: str, cache_expiry_hours: int = 12):
|
|
||||||
"""
|
|
||||||
Initialize provider-specific cache.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
provider_name: Name of the provider for cache directory
|
|
||||||
cache_expiry_hours: Cache expiry time in hours
|
|
||||||
"""
|
|
||||||
self.provider_name = provider_name
|
|
||||||
self.cache_expiry = cache_expiry_hours * 3600 # Convert to seconds
|
|
||||||
self.cache_dir = os.path.join('.cache', provider_name)
|
|
||||||
self._lock = threading.Lock()
|
|
||||||
|
|
||||||
# Ensure cache directory exists with thread-safe creation
|
|
||||||
os.makedirs(self.cache_dir, exist_ok=True)
|
|
||||||
|
|
||||||
def _generate_cache_key(self, method: str, url: str, params: Optional[Dict[str, Any]]) -> str:
|
|
||||||
"""Generate unique cache key for request."""
|
|
||||||
cache_data = f"{method}:{url}:{json.dumps(params or {}, sort_keys=True)}"
|
|
||||||
return hashlib.md5(cache_data.encode()).hexdigest() + ".json"
|
|
||||||
|
|
||||||
def get_cached_response(self, method: str, url: str, params: Optional[Dict[str, Any]]) -> Optional[requests.Response]:
|
|
||||||
"""
|
|
||||||
Retrieve cached response if available and not expired.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Cached Response object or None if cache miss/expired
|
|
||||||
"""
|
|
||||||
cache_key = self._generate_cache_key(method, url, params)
|
|
||||||
cache_path = os.path.join(self.cache_dir, cache_key)
|
|
||||||
|
|
||||||
with self._lock:
|
|
||||||
if not os.path.exists(cache_path):
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Check if cache is expired
|
|
||||||
cache_age = time.time() - os.path.getmtime(cache_path)
|
|
||||||
if cache_age >= self.cache_expiry:
|
|
||||||
try:
|
|
||||||
os.remove(cache_path)
|
|
||||||
except OSError:
|
|
||||||
pass # File might have been removed by another thread
|
|
||||||
return None
|
|
||||||
|
|
||||||
try:
|
|
||||||
with open(cache_path, 'r', encoding='utf-8') as f:
|
|
||||||
cached_data = json.load(f)
|
|
||||||
|
|
||||||
# Reconstruct Response object
|
|
||||||
response = requests.Response()
|
|
||||||
response.status_code = cached_data['status_code']
|
|
||||||
response._content = cached_data['content'].encode('utf-8')
|
|
||||||
response.headers.update(cached_data['headers'])
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
except (json.JSONDecodeError, KeyError, IOError) as e:
|
|
||||||
# Cache file corrupted, remove it
|
|
||||||
try:
|
|
||||||
os.remove(cache_path)
|
|
||||||
except OSError:
|
|
||||||
pass
|
|
||||||
return None
|
|
||||||
|
|
||||||
def cache_response(self, method: str, url: str, params: Optional[Dict[str, Any]],
|
|
||||||
response: requests.Response) -> bool:
|
|
||||||
"""
|
|
||||||
Cache successful response to disk.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if cached successfully, False otherwise
|
|
||||||
"""
|
|
||||||
if response.status_code != 200:
|
|
||||||
return False
|
|
||||||
|
|
||||||
cache_key = self._generate_cache_key(method, url, params)
|
|
||||||
cache_path = os.path.join(self.cache_dir, cache_key)
|
|
||||||
|
|
||||||
with self._lock:
|
|
||||||
try:
|
|
||||||
cache_data = {
|
|
||||||
'status_code': response.status_code,
|
|
||||||
'content': response.text,
|
|
||||||
'headers': dict(response.headers),
|
|
||||||
'cached_at': datetime.now(timezone.utc).isoformat()
|
|
||||||
}
|
|
||||||
|
|
||||||
# Write to temporary file first, then rename for atomic operation
|
|
||||||
temp_path = cache_path + '.tmp'
|
|
||||||
with open(temp_path, 'w', encoding='utf-8') as f:
|
|
||||||
json.dump(cache_data, f)
|
|
||||||
|
|
||||||
# Atomic rename to prevent partial cache files
|
|
||||||
os.rename(temp_path, cache_path)
|
|
||||||
return True
|
|
||||||
|
|
||||||
except (IOError, OSError) as e:
|
|
||||||
# Clean up temp file if it exists
|
|
||||||
try:
|
|
||||||
if os.path.exists(temp_path):
|
|
||||||
os.remove(temp_path)
|
|
||||||
except OSError:
|
|
||||||
pass
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class BaseProvider(ABC):
|
class BaseProvider(ABC):
|
||||||
"""
|
"""
|
||||||
Abstract base class for all DNSRecon data providers.
|
Abstract base class for all DNSRecon data providers.
|
||||||
Now supports global provider-specific caching and session-specific configuration.
|
Now supports session-specific configuration.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, name: str, rate_limit: int = 60, timeout: int = 30, session_config=None):
|
def __init__(self, name: str, rate_limit: int = 60, timeout: int = 30, session_config=None):
|
||||||
"""
|
"""
|
||||||
Initialize base provider with global caching and session-specific configuration.
|
Initialize base provider with session-specific configuration.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
name: Provider name for logging
|
name: Provider name for logging
|
||||||
@@ -198,25 +78,20 @@ class BaseProvider(ABC):
|
|||||||
self.logger = get_forensic_logger()
|
self.logger = get_forensic_logger()
|
||||||
self._stop_event = None
|
self._stop_event = None
|
||||||
|
|
||||||
# GLOBAL provider-specific caching (not session-based)
|
|
||||||
self.cache = ProviderCache(name, cache_expiry_hours=12)
|
|
||||||
|
|
||||||
# Statistics (per provider instance)
|
# Statistics (per provider instance)
|
||||||
self.total_requests = 0
|
self.total_requests = 0
|
||||||
self.successful_requests = 0
|
self.successful_requests = 0
|
||||||
self.failed_requests = 0
|
self.failed_requests = 0
|
||||||
self.total_relationships_found = 0
|
self.total_relationships_found = 0
|
||||||
self.cache_hits = 0
|
|
||||||
self.cache_misses = 0
|
|
||||||
|
|
||||||
print(f"Initialized {name} provider with global cache and session config (rate: {actual_rate_limit}/min)")
|
|
||||||
|
|
||||||
def __getstate__(self):
|
def __getstate__(self):
|
||||||
"""Prepare BaseProvider for pickling by excluding unpicklable objects."""
|
"""Prepare BaseProvider for pickling by excluding unpicklable objects."""
|
||||||
state = self.__dict__.copy()
|
state = self.__dict__.copy()
|
||||||
# Exclude the unpickleable '_local' attribute and stop event
|
# Exclude the unpickleable '_local' attribute and stop event
|
||||||
state['_local'] = None
|
unpicklable_attrs = ['_local', '_stop_event']
|
||||||
state['_stop_event'] = None
|
for attr in unpicklable_attrs:
|
||||||
|
if attr in state:
|
||||||
|
del state[attr]
|
||||||
return state
|
return state
|
||||||
|
|
||||||
def __setstate__(self, state):
|
def __setstate__(self, state):
|
||||||
@@ -231,7 +106,7 @@ class BaseProvider(ABC):
|
|||||||
if not hasattr(self._local, 'session'):
|
if not hasattr(self._local, 'session'):
|
||||||
self._local.session = requests.Session()
|
self._local.session = requests.Session()
|
||||||
self._local.session.headers.update({
|
self._local.session.headers.update({
|
||||||
'User-Agent': 'DNSRecon/2.0 (Passive Reconnaissance Tool)'
|
'User-Agent': 'DNSRecon/1.0 (Passive Reconnaissance Tool)'
|
||||||
})
|
})
|
||||||
return self._local.session
|
return self._local.session
|
||||||
|
|
||||||
@@ -289,153 +164,79 @@ class BaseProvider(ABC):
|
|||||||
def make_request(self, url: str, method: str = "GET",
|
def make_request(self, url: str, method: str = "GET",
|
||||||
params: Optional[Dict[str, Any]] = None,
|
params: Optional[Dict[str, Any]] = None,
|
||||||
headers: Optional[Dict[str, str]] = None,
|
headers: Optional[Dict[str, str]] = None,
|
||||||
target_indicator: str = "",
|
target_indicator: str = "") -> Optional[requests.Response]:
|
||||||
max_retries: int = 3) -> Optional[requests.Response]:
|
|
||||||
"""
|
"""
|
||||||
Make a rate-limited HTTP request with global caching and aggressive stop signal handling.
|
Make a rate-limited HTTP request.
|
||||||
"""
|
"""
|
||||||
# Check for cancellation before starting
|
|
||||||
if self._is_stop_requested():
|
if self._is_stop_requested():
|
||||||
print(f"Request cancelled before start: {url}")
|
print(f"Request cancelled before start: {url}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Check global cache first
|
self.rate_limiter.wait_if_needed()
|
||||||
cached_response = self.cache.get_cached_response(method, url, params)
|
|
||||||
if cached_response is not None:
|
|
||||||
print(f"Cache hit for {self.name}: {url}")
|
|
||||||
self.cache_hits += 1
|
|
||||||
return cached_response
|
|
||||||
|
|
||||||
self.cache_misses += 1
|
|
||||||
|
|
||||||
# Determine effective max_retries based on stop signal
|
start_time = time.time()
|
||||||
effective_max_retries = 0 if self._is_stop_requested() else max_retries
|
response = None
|
||||||
last_exception = None
|
error = None
|
||||||
|
|
||||||
for attempt in range(effective_max_retries + 1):
|
try:
|
||||||
# Check for cancellation before each attempt
|
self.total_requests += 1
|
||||||
if self._is_stop_requested():
|
|
||||||
print(f"Request cancelled during attempt {attempt + 1}: {url}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Apply rate limiting with cancellation awareness
|
request_headers = dict(self.session.headers).copy()
|
||||||
if not self._wait_with_cancellation_check():
|
if headers:
|
||||||
print(f"Request cancelled during rate limiting: {url}")
|
request_headers.update(headers)
|
||||||
return None
|
|
||||||
|
|
||||||
# Final check before making HTTP request
|
print(f"Making {method} request to: {url}")
|
||||||
if self._is_stop_requested():
|
|
||||||
print(f"Request cancelled before HTTP call: {url}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
start_time = time.time()
|
if method.upper() == "GET":
|
||||||
response = None
|
response = self.session.get(
|
||||||
error = None
|
url,
|
||||||
|
params=params,
|
||||||
try:
|
headers=request_headers,
|
||||||
self.total_requests += 1
|
timeout=self.timeout
|
||||||
|
|
||||||
# Prepare request
|
|
||||||
request_headers = self.session.headers.copy()
|
|
||||||
if headers:
|
|
||||||
request_headers.update(headers)
|
|
||||||
|
|
||||||
print(f"Making {method} request to: {url} (attempt {attempt + 1})")
|
|
||||||
|
|
||||||
# Use shorter timeout if termination is requested
|
|
||||||
request_timeout = 2 if self._is_stop_requested() else self.timeout
|
|
||||||
|
|
||||||
# Make request
|
|
||||||
if method.upper() == "GET":
|
|
||||||
response = self.session.get(
|
|
||||||
url,
|
|
||||||
params=params,
|
|
||||||
headers=request_headers,
|
|
||||||
timeout=request_timeout
|
|
||||||
)
|
|
||||||
elif method.upper() == "POST":
|
|
||||||
response = self.session.post(
|
|
||||||
url,
|
|
||||||
json=params,
|
|
||||||
headers=request_headers,
|
|
||||||
timeout=request_timeout
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
raise ValueError(f"Unsupported HTTP method: {method}")
|
|
||||||
|
|
||||||
print(f"Response status: {response.status_code}")
|
|
||||||
response.raise_for_status()
|
|
||||||
self.successful_requests += 1
|
|
||||||
|
|
||||||
# Success - log, cache, and return
|
|
||||||
duration_ms = (time.time() - start_time) * 1000
|
|
||||||
self.logger.log_api_request(
|
|
||||||
provider=self.name,
|
|
||||||
url=url,
|
|
||||||
method=method.upper(),
|
|
||||||
status_code=response.status_code,
|
|
||||||
response_size=len(response.content),
|
|
||||||
duration_ms=duration_ms,
|
|
||||||
error=None,
|
|
||||||
target_indicator=target_indicator
|
|
||||||
)
|
)
|
||||||
|
elif method.upper() == "POST":
|
||||||
# Cache the successful response globally
|
response = self.session.post(
|
||||||
self.cache.cache_response(method, url, params, response)
|
url,
|
||||||
return response
|
json=params,
|
||||||
|
headers=request_headers,
|
||||||
|
timeout=self.timeout
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unsupported HTTP method: {method}")
|
||||||
|
|
||||||
except requests.exceptions.RequestException as e:
|
print(f"Response status: {response.status_code}")
|
||||||
error = str(e)
|
response.raise_for_status()
|
||||||
self.failed_requests += 1
|
self.successful_requests += 1
|
||||||
print(f"Request failed (attempt {attempt + 1}): {error}")
|
|
||||||
last_exception = e
|
duration_ms = (time.time() - start_time) * 1000
|
||||||
|
self.logger.log_api_request(
|
||||||
# Immediately abort retries if stop requested
|
provider=self.name,
|
||||||
if self._is_stop_requested():
|
url=url,
|
||||||
print(f"Stop requested - aborting retries for: {url}")
|
method=method.upper(),
|
||||||
break
|
status_code=response.status_code,
|
||||||
|
response_size=len(response.content),
|
||||||
# Check if we should retry
|
duration_ms=duration_ms,
|
||||||
if attempt < effective_max_retries and self._should_retry(e):
|
error=None,
|
||||||
# Exponential backoff with jitter for 429 errors
|
target_indicator=target_indicator
|
||||||
if isinstance(e, requests.exceptions.HTTPError) and e.response and e.response.status_code == 429:
|
)
|
||||||
backoff_time = min(60, 10 * (2 ** attempt))
|
|
||||||
print(f"Rate limit hit. Retrying in {backoff_time} seconds...")
|
return response
|
||||||
else:
|
|
||||||
backoff_time = min(2.0, (2 ** attempt) * 0.5)
|
|
||||||
print(f"Retrying in {backoff_time} seconds...")
|
|
||||||
|
|
||||||
if not self._sleep_with_cancellation_check(backoff_time):
|
|
||||||
print(f"Stop requested during backoff - aborting: {url}")
|
|
||||||
return None
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
|
|
||||||
except Exception as e:
|
except requests.exceptions.RequestException as e:
|
||||||
error = f"Unexpected error: {str(e)}"
|
error = str(e)
|
||||||
self.failed_requests += 1
|
self.failed_requests += 1
|
||||||
print(f"Unexpected error: {error}")
|
duration_ms = (time.time() - start_time) * 1000
|
||||||
last_exception = e
|
self.logger.log_api_request(
|
||||||
break
|
provider=self.name,
|
||||||
|
url=url,
|
||||||
# All attempts failed - log and return None
|
method=method.upper(),
|
||||||
duration_ms = (time.time() - start_time) * 1000
|
status_code=response.status_code if response else None,
|
||||||
self.logger.log_api_request(
|
response_size=len(response.content) if response else None,
|
||||||
provider=self.name,
|
duration_ms=duration_ms,
|
||||||
url=url,
|
error=error,
|
||||||
method=method.upper(),
|
target_indicator=target_indicator
|
||||||
status_code=response.status_code if response else None,
|
)
|
||||||
response_size=len(response.content) if response else None,
|
raise e
|
||||||
duration_ms=duration_ms,
|
|
||||||
error=error,
|
|
||||||
target_indicator=target_indicator
|
|
||||||
)
|
|
||||||
|
|
||||||
if error and last_exception:
|
|
||||||
raise last_exception
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
def _is_stop_requested(self) -> bool:
|
def _is_stop_requested(self) -> bool:
|
||||||
"""
|
"""
|
||||||
@@ -445,43 +246,6 @@ class BaseProvider(ABC):
|
|||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def _wait_with_cancellation_check(self) -> bool:
|
|
||||||
"""
|
|
||||||
Wait for rate limiting while aggressively checking for cancellation.
|
|
||||||
Returns False if cancelled during wait.
|
|
||||||
"""
|
|
||||||
current_time = time.time()
|
|
||||||
time_since_last = current_time - self.rate_limiter.last_request_time
|
|
||||||
|
|
||||||
if time_since_last < self.rate_limiter.min_interval:
|
|
||||||
sleep_time = self.rate_limiter.min_interval - time_since_last
|
|
||||||
if not self._sleep_with_cancellation_check(sleep_time):
|
|
||||||
return False
|
|
||||||
|
|
||||||
self.rate_limiter.last_request_time = time.time()
|
|
||||||
return True
|
|
||||||
|
|
||||||
def _sleep_with_cancellation_check(self, sleep_time: float) -> bool:
|
|
||||||
"""
|
|
||||||
Sleep for the specified time while aggressively checking for cancellation.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
sleep_time: Time to sleep in seconds
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
bool: True if sleep completed, False if cancelled
|
|
||||||
"""
|
|
||||||
sleep_start = time.time()
|
|
||||||
check_interval = 0.05 # Check every 50ms for aggressive responsiveness
|
|
||||||
|
|
||||||
while time.time() - sleep_start < sleep_time:
|
|
||||||
if self._is_stop_requested():
|
|
||||||
return False
|
|
||||||
remaining_time = sleep_time - (time.time() - sleep_start)
|
|
||||||
time.sleep(min(check_interval, remaining_time))
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def set_stop_event(self, stop_event: threading.Event) -> None:
|
def set_stop_event(self, stop_event: threading.Event) -> None:
|
||||||
"""
|
"""
|
||||||
Set the stop event for this provider to enable cancellation.
|
Set the stop event for this provider to enable cancellation.
|
||||||
@@ -491,28 +255,6 @@ class BaseProvider(ABC):
|
|||||||
"""
|
"""
|
||||||
self._stop_event = stop_event
|
self._stop_event = stop_event
|
||||||
|
|
||||||
def _should_retry(self, exception: requests.exceptions.RequestException) -> bool:
|
|
||||||
"""
|
|
||||||
Determine if a request should be retried based on the exception.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
exception: The request exception that occurred
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if the request should be retried
|
|
||||||
"""
|
|
||||||
# Retry on connection errors and timeouts
|
|
||||||
if isinstance(exception, (requests.exceptions.ConnectionError,
|
|
||||||
requests.exceptions.Timeout)):
|
|
||||||
return True
|
|
||||||
|
|
||||||
if isinstance(exception, requests.exceptions.HTTPError):
|
|
||||||
if hasattr(exception, 'response') and exception.response:
|
|
||||||
# Retry on server errors (5xx) AND on rate-limiting errors (429)
|
|
||||||
return exception.response.status_code >= 500 or exception.response.status_code == 429
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
def log_relationship_discovery(self, source_node: str, target_node: str,
|
def log_relationship_discovery(self, source_node: str, target_node: str,
|
||||||
relationship_type: str,
|
relationship_type: str,
|
||||||
confidence_score: float,
|
confidence_score: float,
|
||||||
@@ -543,7 +285,7 @@ class BaseProvider(ABC):
|
|||||||
|
|
||||||
def get_statistics(self) -> Dict[str, Any]:
|
def get_statistics(self) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Get provider statistics including cache performance.
|
Get provider statistics.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Dictionary containing provider performance metrics
|
Dictionary containing provider performance metrics
|
||||||
@@ -555,8 +297,5 @@ class BaseProvider(ABC):
|
|||||||
'failed_requests': self.failed_requests,
|
'failed_requests': self.failed_requests,
|
||||||
'success_rate': (self.successful_requests / self.total_requests * 100) if self.total_requests > 0 else 0,
|
'success_rate': (self.successful_requests / self.total_requests * 100) if self.total_requests > 0 else 0,
|
||||||
'relationships_found': self.total_relationships_found,
|
'relationships_found': self.total_relationships_found,
|
||||||
'rate_limit': self.rate_limiter.requests_per_minute,
|
'rate_limit': self.rate_limiter.requests_per_minute
|
||||||
'cache_hits': self.cache_hits,
|
|
||||||
'cache_misses': self.cache_misses,
|
|
||||||
'cache_hit_rate': (self.cache_hits / (self.cache_hits + self.cache_misses) * 100) if (self.cache_hits + self.cache_misses) > 0 else 0
|
|
||||||
}
|
}
|
||||||
@@ -1,44 +1,60 @@
|
|||||||
"""
|
# dnsrecon/providers/crtsh_provider.py
|
||||||
Certificate Transparency provider using crt.sh.
|
|
||||||
Discovers domain relationships through certificate SAN analysis with comprehensive certificate tracking.
|
|
||||||
Stores certificates as metadata on domain nodes rather than creating certificate nodes.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
from typing import List, Dict, Any, Tuple, Set
|
from typing import List, Dict, Any, Tuple, Set
|
||||||
from urllib.parse import quote
|
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
import requests
|
|
||||||
|
# New dependency required for this provider
|
||||||
|
try:
|
||||||
|
import psycopg2
|
||||||
|
import psycopg2.extras
|
||||||
|
PSYCOPG2_AVAILABLE = True
|
||||||
|
except ImportError:
|
||||||
|
PSYCOPG2_AVAILABLE = False
|
||||||
|
|
||||||
from .base_provider import BaseProvider
|
from .base_provider import BaseProvider
|
||||||
from utils.helpers import _is_valid_domain
|
from utils.helpers import _is_valid_domain
|
||||||
|
|
||||||
|
# We use requests only to raise the same exception type for compatibility with core retry logic
|
||||||
|
import requests
|
||||||
|
|
||||||
|
|
||||||
class CrtShProvider(BaseProvider):
|
class CrtShProvider(BaseProvider):
|
||||||
"""
|
"""
|
||||||
Provider for querying crt.sh certificate transparency database.
|
Provider for querying crt.sh certificate transparency database via its public PostgreSQL endpoint.
|
||||||
Now uses session-specific configuration and caching.
|
This version is designed to be a drop-in, high-performance replacement for the API-based provider.
|
||||||
|
It preserves the same caching and data processing logic.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, session_config=None):
|
def __init__(self, name=None, session_config=None):
|
||||||
"""Initialize CrtSh provider with session-specific configuration."""
|
"""Initialize CrtShDB provider with session-specific configuration."""
|
||||||
super().__init__(
|
super().__init__(
|
||||||
name="crtsh",
|
name="crtsh",
|
||||||
rate_limit=60,
|
rate_limit=0, # No rate limit for direct DB access
|
||||||
timeout=15,
|
timeout=60, # Increased timeout for potentially long DB queries
|
||||||
session_config=session_config
|
session_config=session_config
|
||||||
)
|
)
|
||||||
self.base_url = "https://crt.sh/"
|
# Database connection details
|
||||||
|
self.db_host = "crt.sh"
|
||||||
|
self.db_port = 5432
|
||||||
|
self.db_name = "certwatch"
|
||||||
|
self.db_user = "guest"
|
||||||
self._stop_event = None
|
self._stop_event = None
|
||||||
|
|
||||||
|
# Initialize cache directory (same as original provider)
|
||||||
|
self.cache_dir = Path('cache') / 'crtsh'
|
||||||
|
self.cache_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
def get_name(self) -> str:
|
def get_name(self) -> str:
|
||||||
"""Return the provider name."""
|
"""Return the provider name."""
|
||||||
return "crtsh"
|
return "crtsh"
|
||||||
|
|
||||||
def get_display_name(self) -> str:
|
def get_display_name(self) -> str:
|
||||||
"""Return the provider display name for the UI."""
|
"""Return the provider display name for the UI."""
|
||||||
return "crt.sh"
|
return "crt.sh (DB)"
|
||||||
|
|
||||||
def requires_api_key(self) -> bool:
|
def requires_api_key(self) -> bool:
|
||||||
"""Return True if the provider requires an API key."""
|
"""Return True if the provider requires an API key."""
|
||||||
@@ -50,499 +66,448 @@ class CrtShProvider(BaseProvider):
|
|||||||
|
|
||||||
def is_available(self) -> bool:
|
def is_available(self) -> bool:
|
||||||
"""
|
"""
|
||||||
Check if the provider is configured to be used.
|
Check if the provider can be used. Requires the psycopg2 library.
|
||||||
This method is intentionally simple and does not perform a network request
|
|
||||||
to avoid blocking application startup.
|
|
||||||
"""
|
"""
|
||||||
|
if not PSYCOPG2_AVAILABLE:
|
||||||
|
self.logger.logger.warning("psycopg2 library not found. CrtShDBProvider is unavailable. "
|
||||||
|
"Please run 'pip install psycopg2-binary'.")
|
||||||
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
def _query_crtsh(self, domain: str) -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Query the crt.sh PostgreSQL database for raw certificate data.
|
||||||
|
Raises exceptions for DB/network errors to allow core logic to retry.
|
||||||
|
"""
|
||||||
|
conn = None
|
||||||
|
certificates = []
|
||||||
|
|
||||||
|
# SQL Query to find all certificate IDs related to the domain (including subdomains),
|
||||||
|
# then retrieve comprehensive details for each certificate, mimicking the JSON API structure.
|
||||||
|
sql_query = """
|
||||||
|
WITH certificates_of_interest AS (
|
||||||
|
SELECT DISTINCT ci.certificate_id
|
||||||
|
FROM certificate_identity ci
|
||||||
|
WHERE ci.name_value ILIKE %(domain_wildcard)s OR ci.name_value = %(domain)s
|
||||||
|
)
|
||||||
|
SELECT
|
||||||
|
c.id,
|
||||||
|
c.serial_number,
|
||||||
|
c.not_before,
|
||||||
|
c.not_after,
|
||||||
|
(SELECT min(entry_timestamp) FROM ct_log_entry cle WHERE cle.certificate_id = c.id) as entry_timestamp,
|
||||||
|
ca.id as issuer_ca_id,
|
||||||
|
ca.name as issuer_name,
|
||||||
|
(SELECT array_to_string(array_agg(DISTINCT ci.name_value), E'\n') FROM certificate_identity ci WHERE ci.certificate_id = c.id) as name_value,
|
||||||
|
(SELECT name_value FROM certificate_identity ci WHERE ci.certificate_id = c.id AND ci.name_type = 'commonName' LIMIT 1) as common_name
|
||||||
|
FROM
|
||||||
|
certificate c
|
||||||
|
JOIN ca ON c.issuer_ca_id = ca.id
|
||||||
|
WHERE c.id IN (SELECT certificate_id FROM certificates_of_interest);
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
conn = psycopg2.connect(
|
||||||
|
dbname=self.db_name,
|
||||||
|
user=self.db_user,
|
||||||
|
host=self.db_host,
|
||||||
|
port=self.db_port,
|
||||||
|
connect_timeout=self.timeout
|
||||||
|
)
|
||||||
|
|
||||||
|
with conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cursor:
|
||||||
|
cursor.execute(sql_query, {'domain': domain, 'domain_wildcard': f'%.{domain}'})
|
||||||
|
results = cursor.fetchall()
|
||||||
|
certificates = [dict(row) for row in results]
|
||||||
|
|
||||||
|
self.logger.logger.info(f"crt.sh DB query for '{domain}' returned {len(certificates)} certificates.")
|
||||||
|
|
||||||
|
except psycopg2.Error as e:
|
||||||
|
self.logger.logger.error(f"PostgreSQL query failed for {domain}: {e}")
|
||||||
|
# Raise a RequestException to be compatible with the existing retry logic in the core application
|
||||||
|
raise requests.exceptions.RequestException(f"PostgreSQL query failed: {e}") from e
|
||||||
|
finally:
|
||||||
|
if conn:
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
return certificates
|
||||||
|
|
||||||
|
def query_domain(self, domain: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||||
|
"""
|
||||||
|
Query crt.sh for certificates containing the domain with caching support.
|
||||||
|
Properly raises exceptions for network errors to allow core logic retries.
|
||||||
|
"""
|
||||||
|
if not _is_valid_domain(domain):
|
||||||
|
return []
|
||||||
|
|
||||||
|
if self._stop_event and self._stop_event.is_set():
|
||||||
|
return []
|
||||||
|
|
||||||
|
cache_file = self._get_cache_file_path(domain)
|
||||||
|
cache_status = self._get_cache_status(cache_file)
|
||||||
|
|
||||||
|
certificates = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
if cache_status == "fresh":
|
||||||
|
certificates = self._load_cached_certificates(cache_file)
|
||||||
|
self.logger.logger.info(f"Using cached data for {domain} ({len(certificates)} certificates)")
|
||||||
|
|
||||||
|
elif cache_status == "not_found":
|
||||||
|
# Fresh query from DB, create new cache
|
||||||
|
certificates = self._query_crtsh(domain)
|
||||||
|
if certificates:
|
||||||
|
self._create_cache_file(cache_file, domain, self._serialize_certs_for_cache(certificates))
|
||||||
|
else:
|
||||||
|
self.logger.logger.info(f"No certificates found for {domain}, not caching")
|
||||||
|
|
||||||
|
elif cache_status == "stale":
|
||||||
|
try:
|
||||||
|
new_certificates = self._query_crtsh(domain)
|
||||||
|
if new_certificates:
|
||||||
|
certificates = self._append_to_cache(cache_file, self._serialize_certs_for_cache(new_certificates))
|
||||||
|
else:
|
||||||
|
certificates = self._load_cached_certificates(cache_file)
|
||||||
|
except requests.exceptions.RequestException:
|
||||||
|
certificates = self._load_cached_certificates(cache_file)
|
||||||
|
if certificates:
|
||||||
|
self.logger.logger.warning(f"DB query failed for {domain}, using stale cache data.")
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
# Re-raise so core logic can retry
|
||||||
|
self.logger.logger.error(f"DB query failed for {domain}: {e}")
|
||||||
|
raise e
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
# JSON parsing errors from cache should also be handled
|
||||||
|
self.logger.logger.error(f"Failed to parse JSON from cache for {domain}: {e}")
|
||||||
|
raise e
|
||||||
|
|
||||||
|
if self._stop_event and self._stop_event.is_set():
|
||||||
|
return []
|
||||||
|
|
||||||
|
if not certificates:
|
||||||
|
return []
|
||||||
|
|
||||||
|
return self._process_certificates_to_relationships(domain, certificates)
|
||||||
|
|
||||||
|
def _serialize_certs_for_cache(self, certificates: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Serialize certificate data for JSON caching, converting datetime objects to ISO strings.
|
||||||
|
"""
|
||||||
|
serialized_certs = []
|
||||||
|
for cert in certificates:
|
||||||
|
serialized_cert = cert.copy()
|
||||||
|
for key in ['not_before', 'not_after', 'entry_timestamp']:
|
||||||
|
if isinstance(serialized_cert.get(key), datetime):
|
||||||
|
# Ensure datetime is timezone-aware before converting
|
||||||
|
dt_obj = serialized_cert[key]
|
||||||
|
if dt_obj.tzinfo is None:
|
||||||
|
dt_obj = dt_obj.replace(tzinfo=timezone.utc)
|
||||||
|
serialized_cert[key] = dt_obj.isoformat()
|
||||||
|
serialized_certs.append(serialized_cert)
|
||||||
|
return serialized_certs
|
||||||
|
|
||||||
|
# --- All methods below are copied directly from the original CrtShProvider ---
|
||||||
|
# They are compatible because _query_crtsh returns data in the same format
|
||||||
|
# as the original _query_crtsh_api method. A small adjustment is made to
|
||||||
|
# _parse_certificate_date to handle datetime objects directly from the DB.
|
||||||
|
|
||||||
|
def _get_cache_file_path(self, domain: str) -> Path:
|
||||||
|
"""Generate cache file path for a domain."""
|
||||||
|
safe_domain = domain.replace('.', '_').replace('/', '_').replace('\\', '_')
|
||||||
|
return self.cache_dir / f"{safe_domain}.json"
|
||||||
|
|
||||||
def _parse_certificate_date(self, date_string: str) -> datetime:
|
def _get_cache_status(self, cache_file_path: Path) -> str:
|
||||||
|
"""Check cache status for a domain."""
|
||||||
|
if not cache_file_path.exists():
|
||||||
|
return "not_found"
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(cache_file_path, 'r') as f:
|
||||||
|
cache_data = json.load(f)
|
||||||
|
|
||||||
|
last_query_str = cache_data.get("last_upstream_query")
|
||||||
|
if not last_query_str:
|
||||||
|
return "stale"
|
||||||
|
|
||||||
|
last_query = datetime.fromisoformat(last_query_str.replace('Z', '+00:00'))
|
||||||
|
hours_since_query = (datetime.now(timezone.utc) - last_query).total_seconds() / 3600
|
||||||
|
|
||||||
|
cache_timeout = self.config.cache_timeout_hours
|
||||||
|
if hours_since_query < cache_timeout:
|
||||||
|
return "fresh"
|
||||||
|
else:
|
||||||
|
return "stale"
|
||||||
|
|
||||||
|
except (json.JSONDecodeError, ValueError, KeyError) as e:
|
||||||
|
self.logger.logger.warning(f"Invalid cache file format for {cache_file_path}: {e}")
|
||||||
|
return "stale"
|
||||||
|
|
||||||
|
def _load_cached_certificates(self, cache_file_path: Path) -> List[Dict[str, Any]]:
|
||||||
|
"""Load certificates from cache file."""
|
||||||
|
try:
|
||||||
|
with open(cache_file_path, 'r') as f:
|
||||||
|
cache_data = json.load(f)
|
||||||
|
return cache_data.get('certificates', [])
|
||||||
|
except (json.JSONDecodeError, FileNotFoundError, KeyError) as e:
|
||||||
|
self.logger.logger.error(f"Failed to load cached certificates from {cache_file_path}: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
def _create_cache_file(self, cache_file_path: Path, domain: str, certificates: List[Dict[str, Any]]) -> None:
|
||||||
|
"""Create new cache file with certificates."""
|
||||||
|
try:
|
||||||
|
cache_data = {
|
||||||
|
"domain": domain,
|
||||||
|
"first_cached": datetime.now(timezone.utc).isoformat(),
|
||||||
|
"last_upstream_query": datetime.now(timezone.utc).isoformat(),
|
||||||
|
"upstream_query_count": 1,
|
||||||
|
"certificates": certificates
|
||||||
|
}
|
||||||
|
cache_file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(cache_file_path, 'w') as f:
|
||||||
|
json.dump(cache_data, f, separators=(',', ':'))
|
||||||
|
self.logger.logger.info(f"Created cache file for {domain} with {len(certificates)} certificates")
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.logger.warning(f"Failed to create cache file for {domain}: {e}")
|
||||||
|
|
||||||
|
def _append_to_cache(self, cache_file_path: Path, new_certificates: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||||
|
"""Append new certificates to existing cache and return all certificates."""
|
||||||
|
try:
|
||||||
|
with open(cache_file_path, 'r') as f:
|
||||||
|
cache_data = json.load(f)
|
||||||
|
|
||||||
|
existing_ids = {cert.get('id') for cert in cache_data.get('certificates', [])}
|
||||||
|
added_count = 0
|
||||||
|
for cert in new_certificates:
|
||||||
|
cert_id = cert.get('id')
|
||||||
|
if cert_id and cert_id not in existing_ids:
|
||||||
|
cache_data['certificates'].append(cert)
|
||||||
|
existing_ids.add(cert_id)
|
||||||
|
added_count += 1
|
||||||
|
|
||||||
|
cache_data['last_upstream_query'] = datetime.now(timezone.utc).isoformat()
|
||||||
|
cache_data['upstream_query_count'] = cache_data.get('upstream_query_count', 0) + 1
|
||||||
|
|
||||||
|
with open(cache_file_path, 'w') as f:
|
||||||
|
json.dump(cache_data, f, separators=(',', ':'))
|
||||||
|
|
||||||
|
total_certs = len(cache_data['certificates'])
|
||||||
|
self.logger.logger.info(f"Appended {added_count} new certificates to cache. Total: {total_certs}")
|
||||||
|
return cache_data['certificates']
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.logger.warning(f"Failed to append to cache: {e}")
|
||||||
|
return new_certificates
|
||||||
|
|
||||||
|
def _parse_issuer_organization(self, issuer_dn: str) -> str:
|
||||||
|
"""Parse the issuer Distinguished Name to extract just the organization name."""
|
||||||
|
if not issuer_dn: return issuer_dn
|
||||||
|
try:
|
||||||
|
components = [comp.strip() for comp in issuer_dn.split(',')]
|
||||||
|
for component in components:
|
||||||
|
if component.startswith('O='):
|
||||||
|
org_name = component[2:].strip()
|
||||||
|
if org_name.startswith('"') and org_name.endswith('"'):
|
||||||
|
org_name = org_name[1:-1]
|
||||||
|
return org_name
|
||||||
|
return issuer_dn
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.logger.debug(f"Failed to parse issuer DN '{issuer_dn}': {e}")
|
||||||
|
return issuer_dn
|
||||||
|
|
||||||
|
def _parse_certificate_date(self, date_input: Any) -> datetime:
|
||||||
"""
|
"""
|
||||||
Parse certificate date from crt.sh format.
|
Parse certificate date from various formats (string from cache, datetime from DB).
|
||||||
|
|
||||||
Args:
|
|
||||||
date_string: Date string from crt.sh API
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Parsed datetime object in UTC
|
|
||||||
"""
|
"""
|
||||||
|
if isinstance(date_input, datetime):
|
||||||
|
# If it's already a datetime object from the DB, just ensure it's UTC
|
||||||
|
if date_input.tzinfo is None:
|
||||||
|
return date_input.replace(tzinfo=timezone.utc)
|
||||||
|
return date_input
|
||||||
|
|
||||||
|
date_string = str(date_input)
|
||||||
if not date_string:
|
if not date_string:
|
||||||
raise ValueError("Empty date string")
|
raise ValueError("Empty date string")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Handle various possible formats from crt.sh
|
if 'Z' in date_string:
|
||||||
if date_string.endswith('Z'):
|
return datetime.fromisoformat(date_string.replace('Z', '+00:00'))
|
||||||
return datetime.fromisoformat(date_string[:-1]).replace(tzinfo=timezone.utc)
|
# Handle standard ISO format with or without timezone
|
||||||
elif '+' in date_string or date_string.endswith('UTC'):
|
dt = datetime.fromisoformat(date_string)
|
||||||
# Handle timezone-aware strings
|
if dt.tzinfo is None:
|
||||||
date_string = date_string.replace('UTC', '').strip()
|
return dt.replace(tzinfo=timezone.utc)
|
||||||
if '+' in date_string:
|
return dt
|
||||||
date_string = date_string.split('+')[0]
|
except ValueError as e:
|
||||||
return datetime.fromisoformat(date_string).replace(tzinfo=timezone.utc)
|
|
||||||
else:
|
|
||||||
# Assume UTC if no timezone specified
|
|
||||||
return datetime.fromisoformat(date_string).replace(tzinfo=timezone.utc)
|
|
||||||
except Exception as e:
|
|
||||||
# Fallback: try parsing without timezone info and assume UTC
|
|
||||||
try:
|
try:
|
||||||
|
# Fallback for other formats
|
||||||
return datetime.strptime(date_string[:19], "%Y-%m-%dT%H:%M:%S").replace(tzinfo=timezone.utc)
|
return datetime.strptime(date_string[:19], "%Y-%m-%dT%H:%M:%S").replace(tzinfo=timezone.utc)
|
||||||
except Exception:
|
except Exception:
|
||||||
raise ValueError(f"Unable to parse date: {date_string}") from e
|
raise ValueError(f"Unable to parse date: {date_string}") from e
|
||||||
|
|
||||||
def _is_cert_valid(self, cert_data: Dict[str, Any]) -> bool:
|
def _is_cert_valid(self, cert_data: Dict[str, Any]) -> bool:
|
||||||
"""
|
"""Check if a certificate is currently valid based on its expiry date."""
|
||||||
Check if a certificate is currently valid based on its expiry date.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
cert_data: Certificate data from crt.sh
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if certificate is currently valid (not expired)
|
|
||||||
"""
|
|
||||||
try:
|
try:
|
||||||
not_after_str = cert_data.get('not_after')
|
not_after_str = cert_data.get('not_after')
|
||||||
if not not_after_str:
|
if not not_after_str: return False
|
||||||
return False
|
|
||||||
|
|
||||||
not_after_date = self._parse_certificate_date(not_after_str)
|
not_after_date = self._parse_certificate_date(not_after_str)
|
||||||
not_before_str = cert_data.get('not_before')
|
not_before_str = cert_data.get('not_before')
|
||||||
|
|
||||||
now = datetime.now(timezone.utc)
|
now = datetime.now(timezone.utc)
|
||||||
|
|
||||||
# Check if certificate is within valid date range
|
|
||||||
is_not_expired = not_after_date > now
|
is_not_expired = not_after_date > now
|
||||||
|
|
||||||
if not_before_str:
|
if not_before_str:
|
||||||
not_before_date = self._parse_certificate_date(not_before_str)
|
not_before_date = self._parse_certificate_date(not_before_str)
|
||||||
is_not_before_valid = not_before_date <= now
|
is_not_before_valid = not_before_date <= now
|
||||||
return is_not_expired and is_not_before_valid
|
return is_not_expired and is_not_before_valid
|
||||||
|
|
||||||
return is_not_expired
|
return is_not_expired
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.logger.debug(f"Certificate validity check failed: {e}")
|
self.logger.logger.debug(f"Certificate validity check failed: {e}")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def _extract_certificate_metadata(self, cert_data: Dict[str, Any]) -> Dict[str, Any]:
|
def _extract_certificate_metadata(self, cert_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||||
"""
|
# This method works as-is.
|
||||||
Extract comprehensive metadata from certificate data.
|
raw_issuer_name = cert_data.get('issuer_name', '')
|
||||||
|
parsed_issuer_name = self._parse_issuer_organization(raw_issuer_name)
|
||||||
Args:
|
|
||||||
cert_data: Raw certificate data from crt.sh
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Comprehensive certificate metadata dictionary
|
|
||||||
"""
|
|
||||||
metadata = {
|
metadata = {
|
||||||
'certificate_id': cert_data.get('id'),
|
'certificate_id': cert_data.get('id'),
|
||||||
'serial_number': cert_data.get('serial_number'),
|
'serial_number': cert_data.get('serial_number'),
|
||||||
'issuer_name': cert_data.get('issuer_name'),
|
'issuer_name': parsed_issuer_name,
|
||||||
'issuer_ca_id': cert_data.get('issuer_ca_id'),
|
'issuer_ca_id': cert_data.get('issuer_ca_id'),
|
||||||
'common_name': cert_data.get('common_name'),
|
'common_name': cert_data.get('common_name'),
|
||||||
'not_before': cert_data.get('not_before'),
|
'not_before': cert_data.get('not_before'),
|
||||||
'not_after': cert_data.get('not_after'),
|
'not_after': cert_data.get('not_after'),
|
||||||
'entry_timestamp': cert_data.get('entry_timestamp'),
|
'entry_timestamp': cert_data.get('entry_timestamp'),
|
||||||
'source': 'crt.sh'
|
'source': 'crt.sh (DB)'
|
||||||
}
|
}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if metadata['not_before'] and metadata['not_after']:
|
if metadata['not_before'] and metadata['not_after']:
|
||||||
not_before = self._parse_certificate_date(metadata['not_before'])
|
not_before = self._parse_certificate_date(metadata['not_before'])
|
||||||
not_after = self._parse_certificate_date(metadata['not_after'])
|
not_after = self._parse_certificate_date(metadata['not_after'])
|
||||||
|
|
||||||
metadata['validity_period_days'] = (not_after - not_before).days
|
metadata['validity_period_days'] = (not_after - not_before).days
|
||||||
metadata['is_currently_valid'] = self._is_cert_valid(cert_data)
|
metadata['is_currently_valid'] = self._is_cert_valid(cert_data)
|
||||||
metadata['expires_soon'] = (not_after - datetime.now(timezone.utc)).days <= 30
|
metadata['expires_soon'] = (not_after - datetime.now(timezone.utc)).days <= 30
|
||||||
|
|
||||||
# Add human-readable dates
|
|
||||||
metadata['not_before'] = not_before.strftime('%Y-%m-%d %H:%M:%S UTC')
|
metadata['not_before'] = not_before.strftime('%Y-%m-%d %H:%M:%S UTC')
|
||||||
metadata['not_after'] = not_after.strftime('%Y-%m-%d %H:%M:%S UTC')
|
metadata['not_after'] = not_after.strftime('%Y-%m-%d %H:%M:%S UTC')
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.logger.debug(f"Error computing certificate metadata: {e}")
|
self.logger.logger.debug(f"Error computing certificate metadata: {e}")
|
||||||
metadata['is_currently_valid'] = False
|
metadata['is_currently_valid'] = False
|
||||||
metadata['expires_soon'] = False
|
metadata['expires_soon'] = False
|
||||||
|
|
||||||
return metadata
|
return metadata
|
||||||
|
|
||||||
def query_domain(self, domain: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
def _process_certificates_to_relationships(self, domain: str, certificates: List[Dict[str, Any]]) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||||
"""
|
# This method works as-is.
|
||||||
Query crt.sh for certificates containing the domain.
|
|
||||||
"""
|
|
||||||
if not _is_valid_domain(domain):
|
|
||||||
return []
|
|
||||||
|
|
||||||
# Check for cancellation before starting
|
|
||||||
if self._stop_event and self._stop_event.is_set():
|
|
||||||
print(f"CrtSh query cancelled before start for domain: {domain}")
|
|
||||||
return []
|
|
||||||
|
|
||||||
relationships = []
|
relationships = []
|
||||||
|
if self._stop_event and self._stop_event.is_set(): return []
|
||||||
try:
|
domain_certificates = {}
|
||||||
# Query crt.sh for certificates
|
all_discovered_domains = set()
|
||||||
url = f"{self.base_url}?q={quote(domain)}&output=json"
|
for i, cert_data in enumerate(certificates):
|
||||||
response = self.make_request(url, target_indicator=domain, max_retries=3)
|
if i % 5 == 0 and self._stop_event and self._stop_event.is_set(): break
|
||||||
|
cert_metadata = self._extract_certificate_metadata(cert_data)
|
||||||
if not response or response.status_code != 200:
|
cert_domains = self._extract_domains_from_certificate(cert_data)
|
||||||
return []
|
all_discovered_domains.update(cert_domains)
|
||||||
|
for cert_domain in cert_domains:
|
||||||
# Check for cancellation after request
|
if not _is_valid_domain(cert_domain): continue
|
||||||
if self._stop_event and self._stop_event.is_set():
|
if cert_domain not in domain_certificates:
|
||||||
print(f"CrtSh query cancelled after request for domain: {domain}")
|
domain_certificates[cert_domain] = []
|
||||||
return []
|
domain_certificates[cert_domain].append(cert_metadata)
|
||||||
|
if self._stop_event and self._stop_event.is_set(): return []
|
||||||
certificates = response.json()
|
for i, discovered_domain in enumerate(all_discovered_domains):
|
||||||
|
if discovered_domain == domain: continue
|
||||||
if not certificates:
|
if i % 10 == 0 and self._stop_event and self._stop_event.is_set(): break
|
||||||
return []
|
if not _is_valid_domain(discovered_domain): continue
|
||||||
|
query_domain_certs = domain_certificates.get(domain, [])
|
||||||
# Check for cancellation before processing
|
discovered_domain_certs = domain_certificates.get(discovered_domain, [])
|
||||||
if self._stop_event and self._stop_event.is_set():
|
shared_certificates = self._find_shared_certificates(query_domain_certs, discovered_domain_certs)
|
||||||
print(f"CrtSh query cancelled before processing for domain: {domain}")
|
confidence = self._calculate_domain_relationship_confidence(
|
||||||
return []
|
domain, discovered_domain, shared_certificates, all_discovered_domains
|
||||||
|
)
|
||||||
# Aggregate certificate data by domain
|
relationship_raw_data = {
|
||||||
domain_certificates = {}
|
'relationship_type': 'certificate_discovery',
|
||||||
all_discovered_domains = set()
|
'shared_certificates': shared_certificates,
|
||||||
|
'total_shared_certs': len(shared_certificates),
|
||||||
# Process certificates with cancellation checking
|
'discovery_context': self._determine_relationship_context(discovered_domain, domain),
|
||||||
for i, cert_data in enumerate(certificates):
|
'domain_certificates': {
|
||||||
# Check for cancellation every 5 certificates instead of 10 for faster response
|
domain: self._summarize_certificates(query_domain_certs),
|
||||||
if i % 5 == 0 and self._stop_event and self._stop_event.is_set():
|
discovered_domain: self._summarize_certificates(discovered_domain_certs)
|
||||||
print(f"CrtSh processing cancelled at certificate {i} for domain: {domain}")
|
|
||||||
break
|
|
||||||
|
|
||||||
cert_metadata = self._extract_certificate_metadata(cert_data)
|
|
||||||
cert_domains = self._extract_domains_from_certificate(cert_data)
|
|
||||||
|
|
||||||
# Add all domains from this certificate to our tracking
|
|
||||||
for cert_domain in cert_domains:
|
|
||||||
# Additional stop check during domain processing
|
|
||||||
if i % 20 == 0 and self._stop_event and self._stop_event.is_set():
|
|
||||||
print(f"CrtSh domain processing cancelled for domain: {domain}")
|
|
||||||
break
|
|
||||||
|
|
||||||
if not _is_valid_domain(cert_domain):
|
|
||||||
continue
|
|
||||||
|
|
||||||
all_discovered_domains.add(cert_domain)
|
|
||||||
|
|
||||||
# Initialize domain certificate list if needed
|
|
||||||
if cert_domain not in domain_certificates:
|
|
||||||
domain_certificates[cert_domain] = []
|
|
||||||
|
|
||||||
# Add this certificate to the domain's certificate list
|
|
||||||
domain_certificates[cert_domain].append(cert_metadata)
|
|
||||||
|
|
||||||
# Final cancellation check before creating relationships
|
|
||||||
if self._stop_event and self._stop_event.is_set():
|
|
||||||
print(f"CrtSh query cancelled before relationship creation for domain: {domain}")
|
|
||||||
return []
|
|
||||||
|
|
||||||
# Create relationships from query domain to ALL discovered domains with stop checking
|
|
||||||
for i, discovered_domain in enumerate(all_discovered_domains):
|
|
||||||
if discovered_domain == domain:
|
|
||||||
continue # Skip self-relationships
|
|
||||||
|
|
||||||
# Check for cancellation every 10 relationships
|
|
||||||
if i % 10 == 0 and self._stop_event and self._stop_event.is_set():
|
|
||||||
print(f"CrtSh relationship creation cancelled for domain: {domain}")
|
|
||||||
break
|
|
||||||
|
|
||||||
if not _is_valid_domain(discovered_domain):
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Get certificates for both domains
|
|
||||||
query_domain_certs = domain_certificates.get(domain, [])
|
|
||||||
discovered_domain_certs = domain_certificates.get(discovered_domain, [])
|
|
||||||
|
|
||||||
# Find shared certificates (for metadata purposes)
|
|
||||||
shared_certificates = self._find_shared_certificates(query_domain_certs, discovered_domain_certs)
|
|
||||||
|
|
||||||
# Calculate confidence based on relationship type and shared certificates
|
|
||||||
confidence = self._calculate_domain_relationship_confidence(
|
|
||||||
domain, discovered_domain, shared_certificates, all_discovered_domains
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create comprehensive raw data for the relationship
|
|
||||||
relationship_raw_data = {
|
|
||||||
'relationship_type': 'certificate_discovery',
|
|
||||||
'shared_certificates': shared_certificates,
|
|
||||||
'total_shared_certs': len(shared_certificates),
|
|
||||||
'discovery_context': self._determine_relationship_context(discovered_domain, domain),
|
|
||||||
'domain_certificates': {
|
|
||||||
domain: self._summarize_certificates(query_domain_certs),
|
|
||||||
discovered_domain: self._summarize_certificates(discovered_domain_certs)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
# Create domain -> domain relationship
|
relationships.append((
|
||||||
relationships.append((
|
domain, discovered_domain, 'san_certificate', confidence, relationship_raw_data
|
||||||
domain,
|
))
|
||||||
discovered_domain,
|
self.log_relationship_discovery(
|
||||||
'san_certificate',
|
source_node=domain, target_node=discovered_domain, relationship_type='san_certificate',
|
||||||
confidence,
|
confidence_score=confidence, raw_data=relationship_raw_data,
|
||||||
relationship_raw_data
|
discovery_method="certificate_transparency_analysis"
|
||||||
))
|
)
|
||||||
|
|
||||||
# Log the relationship discovery
|
|
||||||
self.log_relationship_discovery(
|
|
||||||
source_node=domain,
|
|
||||||
target_node=discovered_domain,
|
|
||||||
relationship_type='san_certificate',
|
|
||||||
confidence_score=confidence,
|
|
||||||
raw_data=relationship_raw_data,
|
|
||||||
discovery_method="certificate_transparency_analysis"
|
|
||||||
)
|
|
||||||
|
|
||||||
except json.JSONDecodeError as e:
|
|
||||||
self.logger.logger.error(f"Failed to parse JSON response from crt.sh: {e}")
|
|
||||||
except requests.exceptions.RequestException as e:
|
|
||||||
self.logger.logger.error(f"HTTP request to crt.sh failed: {e}")
|
|
||||||
|
|
||||||
|
|
||||||
return relationships
|
return relationships
|
||||||
|
|
||||||
|
# --- All remaining helper methods are identical to the original and fully compatible ---
|
||||||
|
# They are included here for completeness.
|
||||||
|
|
||||||
def _find_shared_certificates(self, certs1: List[Dict[str, Any]], certs2: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
def _find_shared_certificates(self, certs1: List[Dict[str, Any]], certs2: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||||
"""
|
|
||||||
Find certificates that are shared between two domain certificate lists.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
certs1: First domain's certificates
|
|
||||||
certs2: Second domain's certificates
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of shared certificate metadata
|
|
||||||
"""
|
|
||||||
shared = []
|
|
||||||
|
|
||||||
# Create a set of certificate IDs from the first list for quick lookup
|
|
||||||
cert1_ids = {cert.get('certificate_id') for cert in certs1 if cert.get('certificate_id')}
|
cert1_ids = {cert.get('certificate_id') for cert in certs1 if cert.get('certificate_id')}
|
||||||
|
return [cert for cert in certs2 if cert.get('certificate_id') in cert1_ids]
|
||||||
# Find certificates in the second list that match
|
|
||||||
for cert in certs2:
|
|
||||||
if cert.get('certificate_id') in cert1_ids:
|
|
||||||
shared.append(cert)
|
|
||||||
|
|
||||||
return shared
|
|
||||||
|
|
||||||
def _summarize_certificates(self, certificates: List[Dict[str, Any]]) -> Dict[str, Any]:
|
def _summarize_certificates(self, certificates: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||||
"""
|
if not certificates: return {'total_certificates': 0, 'valid_certificates': 0, 'expired_certificates': 0, 'expires_soon_count': 0, 'unique_issuers': [], 'latest_certificate': None, 'has_valid_cert': False}
|
||||||
Create a summary of certificates for a domain.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
certificates: List of certificate metadata
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Summary dictionary with aggregate statistics
|
|
||||||
"""
|
|
||||||
if not certificates:
|
|
||||||
return {
|
|
||||||
'total_certificates': 0,
|
|
||||||
'valid_certificates': 0,
|
|
||||||
'expired_certificates': 0,
|
|
||||||
'expires_soon_count': 0,
|
|
||||||
'unique_issuers': [],
|
|
||||||
'latest_certificate': None,
|
|
||||||
'has_valid_cert': False
|
|
||||||
}
|
|
||||||
|
|
||||||
valid_count = sum(1 for cert in certificates if cert.get('is_currently_valid'))
|
valid_count = sum(1 for cert in certificates if cert.get('is_currently_valid'))
|
||||||
expired_count = len(certificates) - valid_count
|
|
||||||
expires_soon_count = sum(1 for cert in certificates if cert.get('expires_soon'))
|
expires_soon_count = sum(1 for cert in certificates if cert.get('expires_soon'))
|
||||||
|
|
||||||
# Get unique issuers
|
|
||||||
unique_issuers = list(set(cert.get('issuer_name') for cert in certificates if cert.get('issuer_name')))
|
unique_issuers = list(set(cert.get('issuer_name') for cert in certificates if cert.get('issuer_name')))
|
||||||
|
latest_cert, latest_date = None, None
|
||||||
# Find the most recent certificate
|
|
||||||
latest_cert = None
|
|
||||||
latest_date = None
|
|
||||||
|
|
||||||
for cert in certificates:
|
for cert in certificates:
|
||||||
try:
|
try:
|
||||||
if cert.get('not_before'):
|
if cert.get('not_before'):
|
||||||
cert_date = self._parse_certificate_date(cert['not_before'])
|
cert_date = self._parse_certificate_date(cert['not_before'])
|
||||||
if latest_date is None or cert_date > latest_date:
|
if latest_date is None or cert_date > latest_date:
|
||||||
latest_date = cert_date
|
latest_date, latest_cert = cert_date, cert
|
||||||
latest_cert = cert
|
except Exception: continue
|
||||||
except Exception:
|
return {'total_certificates': len(certificates), 'valid_certificates': valid_count, 'expired_certificates': len(certificates) - valid_count, 'expires_soon_count': expires_soon_count, 'unique_issuers': unique_issuers, 'latest_certificate': latest_cert, 'has_valid_cert': valid_count > 0, 'certificate_details': certificates}
|
||||||
continue
|
|
||||||
|
|
||||||
return {
|
|
||||||
'total_certificates': len(certificates),
|
|
||||||
'valid_certificates': valid_count,
|
|
||||||
'expired_certificates': expired_count,
|
|
||||||
'expires_soon_count': expires_soon_count,
|
|
||||||
'unique_issuers': unique_issuers,
|
|
||||||
'latest_certificate': latest_cert,
|
|
||||||
'has_valid_cert': valid_count > 0,
|
|
||||||
'certificate_details': certificates # Full details for forensic analysis
|
|
||||||
}
|
|
||||||
|
|
||||||
def _calculate_domain_relationship_confidence(self, domain1: str, domain2: str,
|
def _calculate_domain_relationship_confidence(self, domain1: str, domain2: str, shared_certificates: List[Dict[str, Any]], all_discovered_domains: Set[str]) -> float:
|
||||||
shared_certificates: List[Dict[str, Any]],
|
base_confidence, context_bonus, shared_bonus, validity_bonus, issuer_bonus = 0.9, 0.0, 0.0, 0.0, 0.0
|
||||||
all_discovered_domains: Set[str]) -> float:
|
|
||||||
"""
|
|
||||||
Calculate confidence score for domain relationship based on various factors.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
domain1: Source domain (query domain)
|
|
||||||
domain2: Target domain (discovered domain)
|
|
||||||
shared_certificates: List of shared certificate metadata
|
|
||||||
all_discovered_domains: All domains discovered in this query
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Confidence score between 0.0 and 1.0
|
|
||||||
"""
|
|
||||||
base_confidence = 0.9
|
|
||||||
|
|
||||||
# Adjust confidence based on domain relationship context
|
|
||||||
relationship_context = self._determine_relationship_context(domain2, domain1)
|
relationship_context = self._determine_relationship_context(domain2, domain1)
|
||||||
|
if relationship_context == 'subdomain': context_bonus = 0.1
|
||||||
if relationship_context == 'exact_match':
|
elif relationship_context == 'parent_domain': context_bonus = 0.05
|
||||||
context_bonus = 0.0 # This shouldn't happen, but just in case
|
|
||||||
elif relationship_context == 'subdomain':
|
|
||||||
context_bonus = 0.1 # High confidence for subdomains
|
|
||||||
elif relationship_context == 'parent_domain':
|
|
||||||
context_bonus = 0.05 # Medium confidence for parent domains
|
|
||||||
else:
|
|
||||||
context_bonus = 0.0 # Related domains get base confidence
|
|
||||||
|
|
||||||
# Adjust confidence based on shared certificates
|
|
||||||
if shared_certificates:
|
|
||||||
shared_count = len(shared_certificates)
|
|
||||||
if shared_count >= 3:
|
|
||||||
shared_bonus = 0.1
|
|
||||||
elif shared_count >= 2:
|
|
||||||
shared_bonus = 0.05
|
|
||||||
else:
|
|
||||||
shared_bonus = 0.02
|
|
||||||
|
|
||||||
# Additional bonus for valid shared certificates
|
|
||||||
valid_shared = sum(1 for cert in shared_certificates if cert.get('is_currently_valid'))
|
|
||||||
if valid_shared > 0:
|
|
||||||
validity_bonus = 0.05
|
|
||||||
else:
|
|
||||||
validity_bonus = 0.0
|
|
||||||
else:
|
|
||||||
# Even without shared certificates, domains found in the same query have some relationship
|
|
||||||
shared_bonus = 0.0
|
|
||||||
validity_bonus = 0.0
|
|
||||||
|
|
||||||
# Adjust confidence based on certificate issuer reputation (if shared certificates exist)
|
|
||||||
issuer_bonus = 0.0
|
|
||||||
if shared_certificates:
|
if shared_certificates:
|
||||||
|
if len(shared_certificates) >= 3: shared_bonus = 0.1
|
||||||
|
elif len(shared_certificates) >= 2: shared_bonus = 0.05
|
||||||
|
else: shared_bonus = 0.02
|
||||||
|
if any(cert.get('is_currently_valid') for cert in shared_certificates): validity_bonus = 0.05
|
||||||
for cert in shared_certificates:
|
for cert in shared_certificates:
|
||||||
issuer = cert.get('issuer_name', '').lower()
|
if any(ca in cert.get('issuer_name', '').lower() for ca in ['let\'s encrypt', 'digicert', 'sectigo', 'globalsign']):
|
||||||
if any(trusted_ca in issuer for trusted_ca in ['let\'s encrypt', 'digicert', 'sectigo', 'globalsign']):
|
|
||||||
issuer_bonus = max(issuer_bonus, 0.03)
|
issuer_bonus = max(issuer_bonus, 0.03)
|
||||||
break
|
break
|
||||||
|
return max(0.1, min(1.0, base_confidence + context_bonus + shared_bonus + validity_bonus + issuer_bonus))
|
||||||
# Calculate final confidence
|
|
||||||
final_confidence = base_confidence + context_bonus + shared_bonus + validity_bonus + issuer_bonus
|
|
||||||
return max(0.1, min(1.0, final_confidence)) # Clamp between 0.1 and 1.0
|
|
||||||
|
|
||||||
def _determine_relationship_context(self, cert_domain: str, query_domain: str) -> str:
|
def _determine_relationship_context(self, cert_domain: str, query_domain: str) -> str:
|
||||||
"""
|
if cert_domain == query_domain: return 'exact_match'
|
||||||
Determine the context of the relationship between certificate domain and query domain.
|
if cert_domain.endswith(f'.{query_domain}'): return 'subdomain'
|
||||||
|
if query_domain.endswith(f'.{cert_domain}'): return 'parent_domain'
|
||||||
Args:
|
return 'related_domain'
|
||||||
cert_domain: Domain found in certificate
|
|
||||||
query_domain: Original query domain
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
String describing the relationship context
|
|
||||||
"""
|
|
||||||
if cert_domain == query_domain:
|
|
||||||
return 'exact_match'
|
|
||||||
elif cert_domain.endswith(f'.{query_domain}'):
|
|
||||||
return 'subdomain'
|
|
||||||
elif query_domain.endswith(f'.{cert_domain}'):
|
|
||||||
return 'parent_domain'
|
|
||||||
else:
|
|
||||||
return 'related_domain'
|
|
||||||
|
|
||||||
def query_ip(self, ip: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
def query_ip(self, ip: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||||
"""
|
|
||||||
Query crt.sh for certificates containing the IP address.
|
|
||||||
Note: crt.sh doesn't typically index by IP, so this returns empty results.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
ip: IP address to investigate
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Empty list (crt.sh doesn't support IP-based certificate queries effectively)
|
|
||||||
"""
|
|
||||||
# crt.sh doesn't effectively support IP-based certificate queries
|
|
||||||
return []
|
return []
|
||||||
|
|
||||||
def _extract_domains_from_certificate(self, cert_data: Dict[str, Any]) -> Set[str]:
|
def _extract_domains_from_certificate(self, cert_data: Dict[str, Any]) -> Set[str]:
|
||||||
"""
|
|
||||||
Extract all domains from certificate data.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
cert_data: Certificate data from crt.sh API
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Set of unique domain names found in the certificate
|
|
||||||
"""
|
|
||||||
domains = set()
|
domains = set()
|
||||||
|
if cn := cert_data.get('common_name'):
|
||||||
# Extract from common name
|
if cleaned := self._clean_domain_name(cn):
|
||||||
common_name = cert_data.get('common_name', '')
|
domains.update(cleaned)
|
||||||
if common_name:
|
if nv := cert_data.get('name_value'):
|
||||||
cleaned_cn = self._clean_domain_name(common_name)
|
for line in nv.split('\n'):
|
||||||
if cleaned_cn:
|
if cleaned := self._clean_domain_name(line.strip()):
|
||||||
domains.update(cleaned_cn)
|
domains.update(cleaned)
|
||||||
|
|
||||||
# Extract from name_value field (contains SANs)
|
|
||||||
name_value = cert_data.get('name_value', '')
|
|
||||||
if name_value:
|
|
||||||
# Split by newlines and clean each domain
|
|
||||||
for line in name_value.split('\n'):
|
|
||||||
cleaned_domains = self._clean_domain_name(line.strip())
|
|
||||||
if cleaned_domains:
|
|
||||||
domains.update(cleaned_domains)
|
|
||||||
|
|
||||||
return domains
|
return domains
|
||||||
|
|
||||||
def _clean_domain_name(self, domain_name: str) -> List[str]:
|
def _clean_domain_name(self, domain_name: str) -> List[str]:
|
||||||
"""
|
if not domain_name: return []
|
||||||
Clean and normalize domain name from certificate data.
|
domain = domain_name.strip().lower().split('://', 1)[-1].split('/', 1)[0]
|
||||||
Now returns a list to handle wildcards correctly.
|
if ':' in domain and not domain.count(':') > 1: domain = domain.split(':', 1)[0]
|
||||||
"""
|
cleaned_domains = [domain, domain[2:]] if domain.startswith('*.') else [domain]
|
||||||
if not domain_name:
|
|
||||||
return []
|
|
||||||
|
|
||||||
domain = domain_name.strip().lower()
|
|
||||||
|
|
||||||
# Remove protocol if present
|
|
||||||
if domain.startswith(('http://', 'https://')):
|
|
||||||
domain = domain.split('://', 1)[1]
|
|
||||||
|
|
||||||
# Remove path if present
|
|
||||||
if '/' in domain:
|
|
||||||
domain = domain.split('/', 1)[0]
|
|
||||||
|
|
||||||
# Remove port if present
|
|
||||||
if ':' in domain and not domain.count(':') > 1: # Avoid breaking IPv6
|
|
||||||
domain = domain.split(':', 1)[0]
|
|
||||||
|
|
||||||
# Handle wildcard domains
|
|
||||||
cleaned_domains = []
|
|
||||||
if domain.startswith('*.'):
|
|
||||||
# Add both the wildcard and the base domain
|
|
||||||
cleaned_domains.append(domain)
|
|
||||||
cleaned_domains.append(domain[2:])
|
|
||||||
else:
|
|
||||||
cleaned_domains.append(domain)
|
|
||||||
|
|
||||||
# Remove any remaining invalid characters and validate
|
|
||||||
final_domains = []
|
final_domains = []
|
||||||
for d in cleaned_domains:
|
for d in cleaned_domains:
|
||||||
d = re.sub(r'[^\w\-\.]', '', d)
|
d = re.sub(r'[^\w\-\.]', '', d)
|
||||||
if d and not d.startswith(('.', '-')) and not d.endswith(('.', '-')):
|
if d and not d.startswith(('.', '-')) and not d.endswith(('.', '-')):
|
||||||
final_domains.append(d)
|
final_domains.append(d)
|
||||||
|
|
||||||
return [d for d in final_domains if _is_valid_domain(d)]
|
return [d for d in final_domains if _is_valid_domain(d)]
|
||||||
@@ -1,7 +1,6 @@
|
|||||||
# dnsrecon/providers/dns_provider.py
|
# dnsrecon/providers/dns_provider.py
|
||||||
|
|
||||||
import dns.resolver
|
from dns import resolver, reversename
|
||||||
import dns.reversename
|
|
||||||
from typing import List, Dict, Any, Tuple
|
from typing import List, Dict, Any, Tuple
|
||||||
from .base_provider import BaseProvider
|
from .base_provider import BaseProvider
|
||||||
from utils.helpers import _is_valid_ip, _is_valid_domain
|
from utils.helpers import _is_valid_ip, _is_valid_domain
|
||||||
@@ -13,7 +12,7 @@ class DNSProvider(BaseProvider):
|
|||||||
Now uses session-specific configuration.
|
Now uses session-specific configuration.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, session_config=None):
|
def __init__(self, name=None, session_config=None):
|
||||||
"""Initialize DNS provider with session-specific configuration."""
|
"""Initialize DNS provider with session-specific configuration."""
|
||||||
super().__init__(
|
super().__init__(
|
||||||
name="dns",
|
name="dns",
|
||||||
@@ -23,7 +22,7 @@ class DNSProvider(BaseProvider):
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Configure DNS resolver
|
# Configure DNS resolver
|
||||||
self.resolver = dns.resolver.Resolver()
|
self.resolver = resolver.Resolver()
|
||||||
self.resolver.timeout = 5
|
self.resolver.timeout = 5
|
||||||
self.resolver.lifetime = 10
|
self.resolver.lifetime = 10
|
||||||
#self.resolver.nameservers = ['127.0.0.1']
|
#self.resolver.nameservers = ['127.0.0.1']
|
||||||
@@ -51,12 +50,7 @@ class DNSProvider(BaseProvider):
|
|||||||
def query_domain(self, domain: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
def query_domain(self, domain: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||||
"""
|
"""
|
||||||
Query DNS records for the domain to discover relationships.
|
Query DNS records for the domain to discover relationships.
|
||||||
|
...
|
||||||
Args:
|
|
||||||
domain: Domain to investigate
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of relationships discovered from DNS analysis
|
|
||||||
"""
|
"""
|
||||||
if not _is_valid_domain(domain):
|
if not _is_valid_domain(domain):
|
||||||
return []
|
return []
|
||||||
@@ -65,7 +59,15 @@ class DNSProvider(BaseProvider):
|
|||||||
|
|
||||||
# Query all record types
|
# Query all record types
|
||||||
for record_type in ['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SOA', 'TXT', 'SRV', 'CAA']:
|
for record_type in ['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SOA', 'TXT', 'SRV', 'CAA']:
|
||||||
relationships.extend(self._query_record(domain, record_type))
|
try:
|
||||||
|
relationships.extend(self._query_record(domain, record_type))
|
||||||
|
except resolver.NoAnswer:
|
||||||
|
# This is not an error, just a confirmation that the record doesn't exist.
|
||||||
|
self.logger.logger.debug(f"No {record_type} record found for {domain}")
|
||||||
|
except Exception as e:
|
||||||
|
self.failed_requests += 1
|
||||||
|
self.logger.logger.debug(f"{record_type} record query failed for {domain}: {e}")
|
||||||
|
# Optionally, you might want to re-raise other, more serious exceptions.
|
||||||
|
|
||||||
return relationships
|
return relationships
|
||||||
|
|
||||||
@@ -87,7 +89,7 @@ class DNSProvider(BaseProvider):
|
|||||||
try:
|
try:
|
||||||
# Perform reverse DNS lookup
|
# Perform reverse DNS lookup
|
||||||
self.total_requests += 1
|
self.total_requests += 1
|
||||||
reverse_name = dns.reversename.from_address(ip)
|
reverse_name = reversename.from_address(ip)
|
||||||
response = self.resolver.resolve(reverse_name, 'PTR')
|
response = self.resolver.resolve(reverse_name, 'PTR')
|
||||||
self.successful_requests += 1
|
self.successful_requests += 1
|
||||||
|
|
||||||
@@ -119,9 +121,14 @@ class DNSProvider(BaseProvider):
|
|||||||
discovery_method="reverse_dns_lookup"
|
discovery_method="reverse_dns_lookup"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
except resolver.NXDOMAIN:
|
||||||
|
self.failed_requests += 1
|
||||||
|
self.logger.logger.debug(f"Reverse DNS lookup failed for {ip}: NXDOMAIN")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.failed_requests += 1
|
self.failed_requests += 1
|
||||||
self.logger.logger.debug(f"Reverse DNS lookup failed for {ip}: {e}")
|
self.logger.logger.debug(f"Reverse DNS lookup failed for {ip}: {e}")
|
||||||
|
# Re-raise the exception so the scanner can handle the failure
|
||||||
|
raise e
|
||||||
|
|
||||||
return relationships
|
return relationships
|
||||||
|
|
||||||
@@ -185,5 +192,7 @@ class DNSProvider(BaseProvider):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.failed_requests += 1
|
self.failed_requests += 1
|
||||||
self.logger.logger.debug(f"{record_type} record query failed for {domain}: {e}")
|
self.logger.logger.debug(f"{record_type} record query failed for {domain}: {e}")
|
||||||
|
# Re-raise the exception so the scanner can handle it
|
||||||
|
raise e
|
||||||
|
|
||||||
return relationships
|
return relationships
|
||||||
@@ -1,7 +1,4 @@
|
|||||||
"""
|
# dnsrecon/providers/shodan_provider.py
|
||||||
Shodan provider for DNSRecon.
|
|
||||||
Discovers IP relationships and infrastructure context through Shodan API.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import json
|
import json
|
||||||
from typing import List, Dict, Any, Tuple
|
from typing import List, Dict, Any, Tuple
|
||||||
@@ -15,7 +12,7 @@ class ShodanProvider(BaseProvider):
|
|||||||
Now uses session-specific API keys.
|
Now uses session-specific API keys.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, session_config=None):
|
def __init__(self, name=None, session_config=None):
|
||||||
"""Initialize Shodan provider with session-specific configuration."""
|
"""Initialize Shodan provider with session-specific configuration."""
|
||||||
super().__init__(
|
super().__init__(
|
||||||
name="shodan",
|
name="shodan",
|
||||||
|
|||||||
@@ -6,4 +6,6 @@ Werkzeug>=2.3.7
|
|||||||
urllib3>=2.0.0
|
urllib3>=2.0.0
|
||||||
dnspython>=2.4.2
|
dnspython>=2.4.2
|
||||||
gunicorn
|
gunicorn
|
||||||
redis
|
redis
|
||||||
|
python-dotenv
|
||||||
|
psycopg2-binary
|
||||||
@@ -272,8 +272,24 @@ input[type="text"]:focus, select:focus {
|
|||||||
text-shadow: 0 0 3px rgba(0, 255, 65, 0.3);
|
text-shadow: 0 0 3px rgba(0, 255, 65, 0.3);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.progress-container {
|
||||||
|
padding: 0 1.5rem 1.5rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
.progress-info {
|
||||||
|
display: flex;
|
||||||
|
justify-content: space-between;
|
||||||
|
font-size: 0.8rem;
|
||||||
|
color: #999;
|
||||||
|
margin-bottom: 0.5rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
#progress-compact {
|
||||||
|
color: #00ff41;
|
||||||
|
font-weight: 500;
|
||||||
|
}
|
||||||
|
|
||||||
.progress-bar {
|
.progress-bar {
|
||||||
margin: 1rem 1.5rem;
|
|
||||||
height: 8px;
|
height: 8px;
|
||||||
background-color: #1a1a1a;
|
background-color: #1a1a1a;
|
||||||
border: 1px solid #444;
|
border: 1px solid #444;
|
||||||
@@ -517,7 +533,7 @@ input[type="text"]:focus, select:focus {
|
|||||||
color: #e0e0e0;
|
color: #e0e0e0;
|
||||||
}
|
}
|
||||||
|
|
||||||
.provider-stats {
|
.provider-stats, .provider-task-stats {
|
||||||
font-size: 0.8rem;
|
font-size: 0.8rem;
|
||||||
color: #999;
|
color: #999;
|
||||||
display: grid;
|
display: grid;
|
||||||
@@ -526,6 +542,13 @@ input[type="text"]:focus, select:focus {
|
|||||||
margin-top: 0.5rem;
|
margin-top: 0.5rem;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.provider-task-stats {
|
||||||
|
border-top: 1px solid #333;
|
||||||
|
padding-top: 0.5rem;
|
||||||
|
margin-top: 0.5rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
.provider-stat {
|
.provider-stat {
|
||||||
display: flex;
|
display: flex;
|
||||||
justify-content: space-between;
|
justify-content: space-between;
|
||||||
|
|||||||
@@ -216,12 +216,8 @@ class GraphManager {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
// FIX: Comment out the problematic context menu handler
|
|
||||||
this.network.on('oncontext', (params) => {
|
this.network.on('oncontext', (params) => {
|
||||||
params.event.preventDefault();
|
params.event.preventDefault();
|
||||||
// if (params.nodes.length > 0) {
|
|
||||||
// this.showNodeContextMenu(params.pointer.DOM, params.nodes[0]);
|
|
||||||
// }
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// Stabilization events with progress
|
// Stabilization events with progress
|
||||||
@@ -380,7 +376,7 @@ class GraphManager {
|
|||||||
// Single correlation value
|
// Single correlation value
|
||||||
const value = Array.isArray(values) && values.length > 0 ? values[0] : (metadata.value || 'Unknown');
|
const value = Array.isArray(values) && values.length > 0 ? values[0] : (metadata.value || 'Unknown');
|
||||||
const displayValue = typeof value === 'string' && value.length > 20 ? value.substring(0, 17) + '...' : value;
|
const displayValue = typeof value === 'string' && value.length > 20 ? value.substring(0, 17) + '...' : value;
|
||||||
processedNode.label = `Corr: ${displayValue}`;
|
processedNode.label = `${displayValue}`;
|
||||||
processedNode.title = `Correlation: ${value}`;
|
processedNode.title = `Correlation: ${value}`;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
/**
|
/**
|
||||||
* Main application logic for DNSRecon web interface
|
* Main application logic for DNSRecon web interface
|
||||||
* Handles UI interactions, API communication, and data flow
|
* Handles UI interactions, API communication, and data flow
|
||||||
* DEBUG VERSION WITH EXTRA LOGGING
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
class DNSReconApp {
|
class DNSReconApp {
|
||||||
@@ -61,9 +60,8 @@ class DNSReconApp {
|
|||||||
scanStatus: document.getElementById('scan-status'),
|
scanStatus: document.getElementById('scan-status'),
|
||||||
targetDisplay: document.getElementById('target-display'),
|
targetDisplay: document.getElementById('target-display'),
|
||||||
depthDisplay: document.getElementById('depth-display'),
|
depthDisplay: document.getElementById('depth-display'),
|
||||||
progressDisplay: document.getElementById('progress-display'),
|
|
||||||
indicatorsDisplay: document.getElementById('indicators-display'),
|
|
||||||
relationshipsDisplay: document.getElementById('relationships-display'),
|
relationshipsDisplay: document.getElementById('relationships-display'),
|
||||||
|
progressCompact: document.getElementById('progress-compact'),
|
||||||
progressFill: document.getElementById('progress-fill'),
|
progressFill: document.getElementById('progress-fill'),
|
||||||
|
|
||||||
// Provider elements
|
// Provider elements
|
||||||
@@ -447,7 +445,7 @@ class DNSReconApp {
|
|||||||
// Handle status changes
|
// Handle status changes
|
||||||
if (status.status !== this.scanStatus) {
|
if (status.status !== this.scanStatus) {
|
||||||
console.log(`*** STATUS CHANGED: ${this.scanStatus} -> ${status.status} ***`);
|
console.log(`*** STATUS CHANGED: ${this.scanStatus} -> ${status.status} ***`);
|
||||||
this.handleStatusChange(status.status);
|
this.handleStatusChange(status.status, status.task_queue_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
this.scanStatus = status.status;
|
this.scanStatus = status.status;
|
||||||
@@ -542,17 +540,19 @@ class DNSReconApp {
|
|||||||
if (this.elements.depthDisplay) {
|
if (this.elements.depthDisplay) {
|
||||||
this.elements.depthDisplay.textContent = `${status.current_depth}/${status.max_depth}`;
|
this.elements.depthDisplay.textContent = `${status.current_depth}/${status.max_depth}`;
|
||||||
}
|
}
|
||||||
if (this.elements.progressDisplay) {
|
|
||||||
this.elements.progressDisplay.textContent = `${status.progress_percentage.toFixed(1)}%`;
|
|
||||||
}
|
|
||||||
if (this.elements.indicatorsDisplay) {
|
|
||||||
this.elements.indicatorsDisplay.textContent = status.indicators_processed || 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update progress bar with smooth animation
|
// Update progress bar and compact display
|
||||||
if (this.elements.progressFill) {
|
if (this.elements.progressFill) {
|
||||||
this.elements.progressFill.style.width = `${status.progress_percentage}%`;
|
const completed = status.indicators_completed || 0;
|
||||||
|
const enqueued = status.task_queue_size || 0;
|
||||||
|
const totalTasks = completed + enqueued;
|
||||||
|
const progressPercentage = totalTasks > 0 ? (completed / totalTasks) * 100 : 0;
|
||||||
|
|
||||||
|
this.elements.progressFill.style.width = `${progressPercentage}%`;
|
||||||
|
if (this.elements.progressCompact) {
|
||||||
|
this.elements.progressCompact.textContent = `${completed}/${totalTasks} - ${Math.round(progressPercentage)}%`;
|
||||||
|
}
|
||||||
|
|
||||||
// Add pulsing animation for active scans
|
// Add pulsing animation for active scans
|
||||||
if (status.status === 'running') {
|
if (status.status === 'running') {
|
||||||
this.elements.progressFill.parentElement.classList.add('scanning');
|
this.elements.progressFill.parentElement.classList.add('scanning');
|
||||||
@@ -574,6 +574,8 @@ class DNSReconApp {
|
|||||||
this.elements.sessionId.textContent = 'Session: Loading...';
|
this.elements.sessionId.textContent = 'Session: Loading...';
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
this.setUIState(status.status, status.task_queue_size);
|
||||||
|
|
||||||
console.log('Status display updated successfully');
|
console.log('Status display updated successfully');
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
@@ -585,12 +587,12 @@ class DNSReconApp {
|
|||||||
* Handle status changes with improved state synchronization
|
* Handle status changes with improved state synchronization
|
||||||
* @param {string} newStatus - New scan status
|
* @param {string} newStatus - New scan status
|
||||||
*/
|
*/
|
||||||
handleStatusChange(newStatus) {
|
handleStatusChange(newStatus, task_queue_size) {
|
||||||
console.log(`=== STATUS CHANGE: ${this.scanStatus} -> ${newStatus} ===`);
|
console.log(`=== STATUS CHANGE: ${this.scanStatus} -> ${newStatus} ===`);
|
||||||
|
|
||||||
switch (newStatus) {
|
switch (newStatus) {
|
||||||
case 'running':
|
case 'running':
|
||||||
this.setUIState('scanning');
|
this.setUIState('scanning', task_queue_size);
|
||||||
this.showSuccess('Scan is running');
|
this.showSuccess('Scan is running');
|
||||||
// Increase polling frequency for active scans
|
// Increase polling frequency for active scans
|
||||||
this.startPolling(1000); // Poll every 1 second for running scans
|
this.startPolling(1000); // Poll every 1 second for running scans
|
||||||
@@ -598,7 +600,7 @@ class DNSReconApp {
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case 'completed':
|
case 'completed':
|
||||||
this.setUIState('completed');
|
this.setUIState('completed', task_queue_size);
|
||||||
this.stopPolling();
|
this.stopPolling();
|
||||||
this.showSuccess('Scan completed successfully');
|
this.showSuccess('Scan completed successfully');
|
||||||
this.updateConnectionStatus('completed');
|
this.updateConnectionStatus('completed');
|
||||||
@@ -609,7 +611,7 @@ class DNSReconApp {
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case 'failed':
|
case 'failed':
|
||||||
this.setUIState('failed');
|
this.setUIState('failed', task_queue_size);
|
||||||
this.stopPolling();
|
this.stopPolling();
|
||||||
this.showError('Scan failed');
|
this.showError('Scan failed');
|
||||||
this.updateConnectionStatus('error');
|
this.updateConnectionStatus('error');
|
||||||
@@ -617,7 +619,7 @@ class DNSReconApp {
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case 'stopped':
|
case 'stopped':
|
||||||
this.setUIState('stopped');
|
this.setUIState('stopped', task_queue_size);
|
||||||
this.stopPolling();
|
this.stopPolling();
|
||||||
this.showSuccess('Scan stopped');
|
this.showSuccess('Scan stopped');
|
||||||
this.updateConnectionStatus('stopped');
|
this.updateConnectionStatus('stopped');
|
||||||
@@ -625,7 +627,7 @@ class DNSReconApp {
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case 'idle':
|
case 'idle':
|
||||||
this.setUIState('idle');
|
this.setUIState('idle', task_queue_size);
|
||||||
this.stopPolling();
|
this.stopPolling();
|
||||||
this.updateConnectionStatus('idle');
|
this.updateConnectionStatus('idle');
|
||||||
break;
|
break;
|
||||||
@@ -670,9 +672,11 @@ class DNSReconApp {
|
|||||||
/**
|
/**
|
||||||
* UI state management with immediate button updates
|
* UI state management with immediate button updates
|
||||||
*/
|
*/
|
||||||
setUIState(state) {
|
setUIState(state, task_queue_size) {
|
||||||
console.log(`Setting UI state to: ${state}`);
|
console.log(`Setting UI state to: ${state}`);
|
||||||
|
|
||||||
|
const isQueueEmpty = task_queue_size === 0;
|
||||||
|
|
||||||
switch (state) {
|
switch (state) {
|
||||||
case 'scanning':
|
case 'scanning':
|
||||||
this.isScanning = true;
|
this.isScanning = true;
|
||||||
@@ -701,12 +705,12 @@ class DNSReconApp {
|
|||||||
case 'stopped':
|
case 'stopped':
|
||||||
this.isScanning = false;
|
this.isScanning = false;
|
||||||
if (this.elements.startScan) {
|
if (this.elements.startScan) {
|
||||||
this.elements.startScan.disabled = false;
|
this.elements.startScan.disabled = !isQueueEmpty;
|
||||||
this.elements.startScan.classList.remove('loading');
|
this.elements.startScan.classList.remove('loading');
|
||||||
this.elements.startScan.innerHTML = '<span class="btn-icon">[RUN]</span><span>Start Reconnaissance</span>';
|
this.elements.startScan.innerHTML = '<span class="btn-icon">[RUN]</span><span>Start Reconnaissance</span>';
|
||||||
}
|
}
|
||||||
if (this.elements.addToGraph) {
|
if (this.elements.addToGraph) {
|
||||||
this.elements.addToGraph.disabled = false;
|
this.elements.addToGraph.disabled = !isQueueEmpty;
|
||||||
this.elements.addToGraph.classList.remove('loading');
|
this.elements.addToGraph.classList.remove('loading');
|
||||||
}
|
}
|
||||||
if (this.elements.stopScan) {
|
if (this.elements.stopScan) {
|
||||||
|
|||||||
@@ -90,22 +90,20 @@
|
|||||||
<span class="status-label">Depth:</span>
|
<span class="status-label">Depth:</span>
|
||||||
<span id="depth-display" class="status-value">0/0</span>
|
<span id="depth-display" class="status-value">0/0</span>
|
||||||
</div>
|
</div>
|
||||||
<div class="status-row">
|
|
||||||
<span class="status-label">Progress:</span>
|
|
||||||
<span id="progress-display" class="status-value">0%</span>
|
|
||||||
</div>
|
|
||||||
<div class="status-row">
|
|
||||||
<span class="status-label">Indicators:</span>
|
|
||||||
<span id="indicators-display" class="status-value">0</span>
|
|
||||||
</div>
|
|
||||||
<div class="status-row">
|
<div class="status-row">
|
||||||
<span class="status-label">Relationships:</span>
|
<span class="status-label">Relationships:</span>
|
||||||
<span id="relationships-display" class="status-value">0</span>
|
<span id="relationships-display" class="status-value">0</span>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div class="progress-bar">
|
<div class="progress-container">
|
||||||
<div id="progress-fill" class="progress-fill"></div>
|
<div class="progress-info">
|
||||||
|
<span id="progress-label">Progress:</span>
|
||||||
|
<span id="progress-compact">0/0</span>
|
||||||
|
</div>
|
||||||
|
<div class="progress-bar">
|
||||||
|
<div id="progress-fill" class="progress-fill"></div>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</section>
|
</section>
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user