data-model #2
@ -25,10 +25,10 @@ DEFAULT_RECURSION_DEPTH=2
|
||||
# Default timeout for provider API requests in seconds.
|
||||
DEFAULT_TIMEOUT=30
|
||||
# The number of concurrent provider requests to make.
|
||||
MAX_CONCURRENT_REQUESTS=5
|
||||
MAX_CONCURRENT_REQUESTS=1
|
||||
# The number of results from a provider that triggers the "large entity" grouping.
|
||||
LARGE_ENTITY_THRESHOLD=100
|
||||
# The number of times to retry a target if a provider fails.
|
||||
MAX_RETRIES_PER_TARGET=8
|
||||
# How long cached provider responses are stored (in hours).
|
||||
CACHE_EXPIRY_HOURS=12
|
||||
CACHE_TIMEOUT_HOURS=12
|
||||
|
||||
473
app.py
473
app.py
@ -10,46 +10,63 @@ import traceback
|
||||
from flask import Flask, render_template, request, jsonify, send_file, session
|
||||
from datetime import datetime, timezone, timedelta
|
||||
import io
|
||||
import os
|
||||
|
||||
from core.session_manager import session_manager
|
||||
from config import config
|
||||
from core.graph_manager import NodeType
|
||||
from utils.helpers import is_valid_target
|
||||
from decimal import Decimal
|
||||
|
||||
|
||||
app = Flask(__name__)
|
||||
# Use centralized configuration for Flask settings
|
||||
app.config['SECRET_KEY'] = config.flask_secret_key
|
||||
app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(hours=config.flask_permanent_session_lifetime_hours)
|
||||
|
||||
def get_user_scanner():
|
||||
"""
|
||||
Retrieves the scanner for the current session, or creates a new
|
||||
session and scanner if one doesn't exist.
|
||||
Retrieves the scanner for the current session, or creates a new one if none exists.
|
||||
"""
|
||||
# Get current Flask session info for debugging
|
||||
current_flask_session_id = session.get('dnsrecon_session_id')
|
||||
|
||||
# Try to get existing session
|
||||
if current_flask_session_id:
|
||||
existing_scanner = session_manager.get_session(current_flask_session_id)
|
||||
if existing_scanner:
|
||||
return current_flask_session_id, existing_scanner
|
||||
|
||||
# Create new session if none exists
|
||||
print("Creating new session as none was found...")
|
||||
new_session_id = session_manager.create_session()
|
||||
new_scanner = session_manager.get_session(new_session_id)
|
||||
|
||||
if not new_scanner:
|
||||
raise Exception("Failed to create new scanner session")
|
||||
|
||||
# Store in Flask session
|
||||
session['dnsrecon_session_id'] = new_session_id
|
||||
session.permanent = True
|
||||
|
||||
return new_session_id, new_scanner
|
||||
|
||||
class CustomJSONEncoder(json.JSONEncoder):
|
||||
"""Custom JSON encoder to handle non-serializable objects."""
|
||||
|
||||
def default(self, obj):
|
||||
if isinstance(obj, datetime):
|
||||
return obj.isoformat()
|
||||
elif isinstance(obj, set):
|
||||
return list(obj)
|
||||
elif isinstance(obj, Decimal):
|
||||
return float(obj)
|
||||
elif hasattr(obj, '__dict__'):
|
||||
# For custom objects, try to serialize their dict representation
|
||||
try:
|
||||
return obj.__dict__
|
||||
except:
|
||||
return str(obj)
|
||||
elif hasattr(obj, 'value') and hasattr(obj, 'name'):
|
||||
# For enum objects
|
||||
return obj.value
|
||||
else:
|
||||
# For any other non-serializable object, convert to string
|
||||
return str(obj)
|
||||
@app.route('/')
|
||||
def index():
|
||||
"""Serve the main web interface."""
|
||||
@ -59,11 +76,8 @@ def index():
|
||||
@app.route('/api/scan/start', methods=['POST'])
|
||||
def start_scan():
|
||||
"""
|
||||
Start a new reconnaissance scan. Creates a new isolated scanner if
|
||||
clear_graph is true, otherwise adds to the existing one.
|
||||
Starts a new reconnaissance scan.
|
||||
"""
|
||||
print("=== API: /api/scan/start called ===")
|
||||
|
||||
try:
|
||||
data = request.get_json()
|
||||
if not data or 'target' not in data:
|
||||
@ -72,47 +86,28 @@ def start_scan():
|
||||
target = data['target'].strip()
|
||||
max_depth = data.get('max_depth', config.default_recursion_depth)
|
||||
clear_graph = data.get('clear_graph', True)
|
||||
force_rescan_target = data.get('force_rescan_target', None) # **FIX**: Get the new parameter
|
||||
force_rescan_target = data.get('force_rescan_target', None)
|
||||
|
||||
print(f"Parsed - target: '{target}', max_depth: {max_depth}, clear_graph: {clear_graph}, force_rescan: {force_rescan_target}")
|
||||
|
||||
# Validation
|
||||
if not target:
|
||||
return jsonify({'success': False, 'error': 'Target cannot be empty'}), 400
|
||||
if not is_valid_target(target):
|
||||
return jsonify({'success': False, 'error': 'Invalid target format. Please enter a valid domain or IP address.'}), 400
|
||||
return jsonify({'success': False, 'error': 'Invalid target format.'}), 400
|
||||
if not isinstance(max_depth, int) or not 1 <= max_depth <= 5:
|
||||
return jsonify({'success': False, 'error': 'Max depth must be an integer between 1 and 5'}), 400
|
||||
|
||||
user_session_id, scanner = None, None
|
||||
|
||||
if clear_graph:
|
||||
print("Clear graph requested: Creating a new, isolated scanner session.")
|
||||
old_session_id = session.get('dnsrecon_session_id')
|
||||
if old_session_id:
|
||||
session_manager.terminate_session(old_session_id)
|
||||
|
||||
user_session_id = session_manager.create_session()
|
||||
session['dnsrecon_session_id'] = user_session_id
|
||||
session.permanent = True
|
||||
scanner = session_manager.get_session(user_session_id)
|
||||
else:
|
||||
print("Adding to existing graph: Reusing the current scanner session.")
|
||||
user_session_id, scanner = get_user_scanner()
|
||||
|
||||
if not scanner:
|
||||
return jsonify({'success': False, 'error': 'Failed to get or create a scanner instance.'}), 500
|
||||
return jsonify({'success': False, 'error': 'Failed to get scanner instance.'}), 500
|
||||
|
||||
print(f"Using scanner {id(scanner)} in session {user_session_id}")
|
||||
|
||||
success = scanner.start_scan(target, max_depth, clear_graph=clear_graph, force_rescan_target=force_rescan_target) # **FIX**: Pass the new parameter
|
||||
success = scanner.start_scan(target, max_depth, clear_graph=clear_graph, force_rescan_target=force_rescan_target)
|
||||
|
||||
if success:
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'message': 'Scan started successfully',
|
||||
'scan_id': scanner.logger.session_id,
|
||||
'user_session_id': user_session_id,
|
||||
'user_session_id': user_session_id
|
||||
})
|
||||
else:
|
||||
return jsonify({
|
||||
@ -121,170 +116,98 @@ def start_scan():
|
||||
}), 409
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR: Exception in start_scan endpoint: {e}")
|
||||
traceback.print_exc()
|
||||
return jsonify({'success': False, 'error': f'Internal server error: {str(e)}'}), 500
|
||||
|
||||
@app.route('/api/scan/stop', methods=['POST'])
|
||||
def stop_scan():
|
||||
"""Stop the current scan with immediate GUI feedback."""
|
||||
print("=== API: /api/scan/stop called ===")
|
||||
|
||||
"""Stop the current scan."""
|
||||
try:
|
||||
# Get user-specific scanner
|
||||
user_session_id, scanner = get_user_scanner()
|
||||
print(f"Stopping scan for session: {user_session_id}")
|
||||
|
||||
if not scanner:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': 'No scanner found for session'
|
||||
}), 404
|
||||
return jsonify({'success': False, 'error': 'No scanner found for session'}), 404
|
||||
|
||||
# Ensure session ID is set
|
||||
if not scanner.session_id:
|
||||
scanner.session_id = user_session_id
|
||||
|
||||
# Use the stop mechanism
|
||||
success = scanner.stop_scan()
|
||||
|
||||
# Also set the Redis stop signal directly for extra reliability
|
||||
scanner.stop_scan()
|
||||
session_manager.set_stop_signal(user_session_id)
|
||||
|
||||
# Force immediate status update
|
||||
session_manager.update_scanner_status(user_session_id, 'stopped')
|
||||
|
||||
# Update the full scanner state
|
||||
session_manager.update_session_scanner(user_session_id, scanner)
|
||||
|
||||
print(f"Stop scan completed. Success: {success}, Scanner status: {scanner.status}")
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'message': 'Scan stop requested - termination initiated',
|
||||
'user_session_id': user_session_id,
|
||||
'scanner_status': scanner.status,
|
||||
'stop_method': 'cross_process'
|
||||
'message': 'Scan stop requested',
|
||||
'user_session_id': user_session_id
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR: Exception in stop_scan endpoint: {e}")
|
||||
traceback.print_exc()
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f'Internal server error: {str(e)}'
|
||||
}), 500
|
||||
return jsonify({'success': False, 'error': f'Internal server error: {str(e)}'}), 500
|
||||
|
||||
|
||||
@app.route('/api/scan/status', methods=['GET'])
|
||||
def get_scan_status():
|
||||
"""Get current scan status with error handling."""
|
||||
"""Get current scan status."""
|
||||
try:
|
||||
# Get user-specific scanner
|
||||
user_session_id, scanner = get_user_scanner()
|
||||
|
||||
if not scanner:
|
||||
# Return default idle status if no scanner
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'status': {
|
||||
'status': 'idle',
|
||||
'target_domain': None,
|
||||
'current_depth': 0,
|
||||
'max_depth': 0,
|
||||
'current_indicator': '',
|
||||
'total_indicators_found': 0,
|
||||
'indicators_processed': 0,
|
||||
'progress_percentage': 0.0,
|
||||
'enabled_providers': [],
|
||||
'graph_statistics': {},
|
||||
'status': 'idle', 'target_domain': None, 'current_depth': 0,
|
||||
'max_depth': 0, 'progress_percentage': 0.0,
|
||||
'user_session_id': user_session_id
|
||||
}
|
||||
})
|
||||
|
||||
# Ensure session ID is set
|
||||
if not scanner.session_id:
|
||||
scanner.session_id = user_session_id
|
||||
|
||||
status = scanner.get_scan_status()
|
||||
status['user_session_id'] = user_session_id
|
||||
|
||||
# Additional debug info
|
||||
status['debug_info'] = {
|
||||
'scanner_object_id': id(scanner),
|
||||
'session_id_set': bool(scanner.session_id),
|
||||
'has_scan_thread': bool(scanner.scan_thread and scanner.scan_thread.is_alive())
|
||||
}
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'status': status
|
||||
})
|
||||
return jsonify({'success': True, 'status': status})
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR: Exception in get_scan_status endpoint: {e}")
|
||||
traceback.print_exc()
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f'Internal server error: {str(e)}',
|
||||
'fallback_status': {
|
||||
'status': 'error',
|
||||
'target_domain': None,
|
||||
'current_depth': 0,
|
||||
'max_depth': 0,
|
||||
'progress_percentage': 0.0
|
||||
}
|
||||
'success': False, 'error': f'Internal server error: {str(e)}',
|
||||
'fallback_status': {'status': 'error', 'progress_percentage': 0.0}
|
||||
}), 500
|
||||
|
||||
|
||||
|
||||
@app.route('/api/graph', methods=['GET'])
|
||||
def get_graph_data():
|
||||
"""Get current graph data with error handling."""
|
||||
"""Get current graph data."""
|
||||
try:
|
||||
# Get user-specific scanner
|
||||
user_session_id, scanner = get_user_scanner()
|
||||
|
||||
if not scanner:
|
||||
# Return empty graph if no scanner
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'graph': {
|
||||
'nodes': [],
|
||||
'edges': [],
|
||||
'statistics': {
|
||||
'node_count': 0,
|
||||
'edge_count': 0,
|
||||
'creation_time': datetime.now(timezone.utc).isoformat(),
|
||||
'last_modified': datetime.now(timezone.utc).isoformat()
|
||||
}
|
||||
},
|
||||
'user_session_id': user_session_id
|
||||
})
|
||||
|
||||
graph_data = scanner.get_graph_data()
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'graph': graph_data,
|
||||
'user_session_id': user_session_id
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR: Exception in get_graph_data endpoint: {e}")
|
||||
traceback.print_exc()
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f'Internal server error: {str(e)}',
|
||||
'fallback_graph': {
|
||||
'nodes': [],
|
||||
'edges': [],
|
||||
empty_graph = {
|
||||
'nodes': [], 'edges': [],
|
||||
'statistics': {'node_count': 0, 'edge_count': 0}
|
||||
}
|
||||
|
||||
if not scanner:
|
||||
return jsonify({'success': True, 'graph': empty_graph, 'user_session_id': user_session_id})
|
||||
|
||||
graph_data = scanner.get_graph_data() or empty_graph
|
||||
|
||||
return jsonify({'success': True, 'graph': graph_data, 'user_session_id': user_session_id})
|
||||
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
return jsonify({
|
||||
'success': False, 'error': f'Internal server error: {str(e)}',
|
||||
'fallback_graph': {'nodes': [], 'edges': [], 'statistics': {}}
|
||||
}), 500
|
||||
|
||||
@app.route('/api/graph/large-entity/extract', methods=['POST'])
|
||||
def extract_from_large_entity():
|
||||
"""Extract a node from a large entity, making it a standalone node."""
|
||||
"""Extract a node from a large entity."""
|
||||
try:
|
||||
data = request.get_json()
|
||||
large_entity_id = data.get('large_entity_id')
|
||||
@ -306,13 +229,12 @@ def extract_from_large_entity():
|
||||
return jsonify({'success': False, 'error': f'Failed to extract node {node_id}.'}), 500
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR: Exception in extract_from_large_entity endpoint: {e}")
|
||||
traceback.print_exc()
|
||||
return jsonify({'success': False, 'error': f'Internal server error: {str(e)}'}), 500
|
||||
|
||||
@app.route('/api/graph/node/<node_id>', methods=['DELETE'])
|
||||
def delete_graph_node(node_id):
|
||||
"""Delete a node from the graph for the current user session."""
|
||||
"""Delete a node from the graph."""
|
||||
try:
|
||||
user_session_id, scanner = get_user_scanner()
|
||||
if not scanner:
|
||||
@ -321,14 +243,12 @@ def delete_graph_node(node_id):
|
||||
success = scanner.graph.remove_node(node_id)
|
||||
|
||||
if success:
|
||||
# Persist the change
|
||||
session_manager.update_session_scanner(user_session_id, scanner)
|
||||
return jsonify({'success': True, 'message': f'Node {node_id} deleted successfully.'})
|
||||
else:
|
||||
return jsonify({'success': False, 'error': f'Node {node_id} not found in graph.'}), 404
|
||||
return jsonify({'success': False, 'error': f'Node {node_id} not found.'}), 404
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR: Exception in delete_graph_node endpoint: {e}")
|
||||
traceback.print_exc()
|
||||
return jsonify({'success': False, 'error': f'Internal server error: {str(e)}'}), 500
|
||||
|
||||
@ -349,7 +269,6 @@ def revert_graph_action():
|
||||
action_data = data['data']
|
||||
|
||||
if action_type == 'delete':
|
||||
# Re-add the node
|
||||
node_to_add = action_data.get('node')
|
||||
if node_to_add:
|
||||
scanner.graph.add_node(
|
||||
@ -360,56 +279,73 @@ def revert_graph_action():
|
||||
metadata=node_to_add.get('metadata')
|
||||
)
|
||||
|
||||
# Re-add the edges
|
||||
edges_to_add = action_data.get('edges', [])
|
||||
for edge in edges_to_add:
|
||||
# Add edge only if both nodes exist to prevent errors
|
||||
if scanner.graph.graph.has_node(edge['from']) and scanner.graph.graph.has_node(edge['to']):
|
||||
scanner.graph.add_edge(
|
||||
source_id=edge['from'],
|
||||
target_id=edge['to'],
|
||||
source_id=edge['from'], target_id=edge['to'],
|
||||
relationship_type=edge['metadata']['relationship_type'],
|
||||
confidence_score=edge['metadata']['confidence_score'],
|
||||
source_provider=edge['metadata']['source_provider'],
|
||||
raw_data=edge.get('raw_data', {})
|
||||
)
|
||||
|
||||
# Persist the change
|
||||
session_manager.update_session_scanner(user_session_id, scanner)
|
||||
return jsonify({'success': True, 'message': 'Delete action reverted successfully.'})
|
||||
|
||||
return jsonify({'success': False, 'error': f'Unknown revert action type: {action_type}'}), 400
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR: Exception in revert_graph_action endpoint: {e}")
|
||||
traceback.print_exc()
|
||||
return jsonify({'success': False, 'error': f'Internal server error: {str(e)}'}), 500
|
||||
|
||||
|
||||
@app.route('/api/export', methods=['GET'])
|
||||
def export_results():
|
||||
"""Export complete scan results as downloadable JSON for the user session."""
|
||||
"""Export scan results as a JSON file with improved error handling."""
|
||||
try:
|
||||
# Get user-specific scanner
|
||||
user_session_id, scanner = get_user_scanner()
|
||||
|
||||
# Get complete results
|
||||
results = scanner.export_results()
|
||||
if not scanner:
|
||||
return jsonify({'success': False, 'error': 'No active scanner session found'}), 404
|
||||
|
||||
# Add session information to export
|
||||
# Get export data with error handling
|
||||
try:
|
||||
results = scanner.export_results()
|
||||
except Exception as e:
|
||||
return jsonify({'success': False, 'error': f'Failed to gather export data: {str(e)}'}), 500
|
||||
|
||||
# Add export metadata
|
||||
results['export_metadata'] = {
|
||||
'user_session_id': user_session_id,
|
||||
'export_timestamp': datetime.now(timezone.utc).isoformat(),
|
||||
'export_type': 'user_session_results'
|
||||
'export_version': '1.0.0',
|
||||
'forensic_integrity': 'maintained'
|
||||
}
|
||||
|
||||
# Create filename with timestamp
|
||||
# Generate filename with forensic naming convention
|
||||
timestamp = datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')
|
||||
target = scanner.current_target or 'unknown'
|
||||
filename = f"dnsrecon_{target}_{timestamp}_{user_session_id[:8]}.json"
|
||||
# Sanitize target for filename
|
||||
safe_target = "".join(c for c in target if c.isalnum() or c in ('-', '_', '.')).rstrip()
|
||||
filename = f"dnsrecon_{safe_target}_{timestamp}.json"
|
||||
|
||||
# Create in-memory file
|
||||
json_data = json.dumps(results, indent=2, ensure_ascii=False)
|
||||
# Serialize with custom encoder and error handling
|
||||
try:
|
||||
json_data = json.dumps(results, indent=2, cls=CustomJSONEncoder, ensure_ascii=False)
|
||||
except Exception as e:
|
||||
# If custom encoder fails, try a more aggressive approach
|
||||
try:
|
||||
# Convert problematic objects to strings recursively
|
||||
cleaned_results = _clean_for_json(results)
|
||||
json_data = json.dumps(cleaned_results, indent=2, ensure_ascii=False)
|
||||
except Exception as e2:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f'JSON serialization failed: {str(e2)}'
|
||||
}), 500
|
||||
|
||||
# Create file object
|
||||
file_obj = io.BytesIO(json_data.encode('utf-8'))
|
||||
|
||||
return send_file(
|
||||
@ -420,71 +356,70 @@ def export_results():
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR: Exception in export_results endpoint: {e}")
|
||||
traceback.print_exc()
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f'Export failed: {str(e)}'
|
||||
'error': f'Export failed: {str(e)}',
|
||||
'error_type': type(e).__name__
|
||||
}), 500
|
||||
|
||||
def _clean_for_json(obj, max_depth=10, current_depth=0):
|
||||
"""
|
||||
Recursively clean an object to make it JSON serializable.
|
||||
Handles circular references and problematic object types.
|
||||
"""
|
||||
if current_depth > max_depth:
|
||||
return f"<max_depth_exceeded_{type(obj).__name__}>"
|
||||
|
||||
@app.route('/api/providers', methods=['GET'])
|
||||
def get_providers():
|
||||
"""Get information about available providers for the user session."""
|
||||
|
||||
if obj is None or isinstance(obj, (bool, int, float, str)):
|
||||
return obj
|
||||
elif isinstance(obj, datetime):
|
||||
return obj.isoformat()
|
||||
elif isinstance(obj, (set, frozenset)):
|
||||
return list(obj)
|
||||
elif isinstance(obj, dict):
|
||||
cleaned = {}
|
||||
for key, value in obj.items():
|
||||
try:
|
||||
# Get user-specific scanner
|
||||
user_session_id, scanner = get_user_scanner()
|
||||
|
||||
if scanner:
|
||||
# Updated debug print to be consistent with the new progress bar logic
|
||||
completed_tasks = scanner.indicators_completed
|
||||
total_tasks = scanner.total_tasks_ever_enqueued
|
||||
print(f"DEBUG: Task Progress - Completed: {completed_tasks}, Total Enqueued: {total_tasks}")
|
||||
# Ensure key is string
|
||||
clean_key = str(key) if not isinstance(key, str) else key
|
||||
cleaned[clean_key] = _clean_for_json(value, max_depth, current_depth + 1)
|
||||
except Exception:
|
||||
cleaned[str(key)] = f"<serialization_error_{type(value).__name__}>"
|
||||
return cleaned
|
||||
elif isinstance(obj, (list, tuple)):
|
||||
cleaned = []
|
||||
for item in obj:
|
||||
try:
|
||||
cleaned.append(_clean_for_json(item, max_depth, current_depth + 1))
|
||||
except Exception:
|
||||
cleaned.append(f"<serialization_error_{type(item).__name__}>")
|
||||
return cleaned
|
||||
elif hasattr(obj, '__dict__'):
|
||||
try:
|
||||
return _clean_for_json(obj.__dict__, max_depth, current_depth + 1)
|
||||
except Exception:
|
||||
return str(obj)
|
||||
elif hasattr(obj, 'value'):
|
||||
# For enum-like objects
|
||||
return obj.value
|
||||
else:
|
||||
print("DEBUG: No active scanner session found.")
|
||||
|
||||
provider_info = scanner.get_provider_info()
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'providers': provider_info,
|
||||
'user_session_id': user_session_id
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR: Exception in get_providers endpoint: {e}")
|
||||
traceback.print_exc()
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f'Internal server error: {str(e)}'
|
||||
}), 500
|
||||
|
||||
return str(obj)
|
||||
|
||||
@app.route('/api/config/api-keys', methods=['POST'])
|
||||
def set_api_keys():
|
||||
"""
|
||||
Set API keys for providers for the user session only.
|
||||
"""
|
||||
"""Set API keys for the current session."""
|
||||
try:
|
||||
data = request.get_json()
|
||||
|
||||
if data is None:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': 'No API keys provided'
|
||||
}), 400
|
||||
return jsonify({'success': False, 'error': 'No API keys provided'}), 400
|
||||
|
||||
# Get user-specific scanner and config
|
||||
user_session_id, scanner = get_user_scanner()
|
||||
session_config = scanner.config
|
||||
|
||||
updated_providers = []
|
||||
|
||||
# Iterate over the API keys provided in the request data
|
||||
for provider_name, api_key in data.items():
|
||||
# This allows us to both set and clear keys. The config
|
||||
# handles enabling/disabling based on if the key is empty.
|
||||
api_key_value = str(api_key or '').strip()
|
||||
success = session_config.set_api_key(provider_name.lower(), api_key_value)
|
||||
|
||||
@ -492,60 +427,136 @@ def set_api_keys():
|
||||
updated_providers.append(provider_name)
|
||||
|
||||
if updated_providers:
|
||||
# Reinitialize scanner providers to apply the new keys
|
||||
scanner._initialize_providers()
|
||||
|
||||
# Persist the updated scanner object back to the user's session
|
||||
session_manager.update_session_scanner(user_session_id, scanner)
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'message': f'API keys updated for session {user_session_id}: {", ".join(updated_providers)}',
|
||||
'updated_providers': updated_providers,
|
||||
'message': f'API keys updated for: {", ".join(updated_providers)}',
|
||||
'user_session_id': user_session_id
|
||||
})
|
||||
else:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': 'No valid API keys were provided or provider names were incorrect.'
|
||||
}), 400
|
||||
return jsonify({'success': False, 'error': 'No valid API keys were provided.'}), 400
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR: Exception in set_api_keys endpoint: {e}")
|
||||
traceback.print_exc()
|
||||
return jsonify({'success': False, 'error': f'Internal server error: {str(e)}'}), 500
|
||||
|
||||
@app.route('/api/providers', methods=['GET'])
|
||||
def get_providers():
|
||||
"""Get enhanced information about available providers including API key sources."""
|
||||
try:
|
||||
user_session_id, scanner = get_user_scanner()
|
||||
base_provider_info = scanner.get_provider_info()
|
||||
|
||||
# Enhance provider info with API key source information
|
||||
enhanced_provider_info = {}
|
||||
|
||||
for provider_name, info in base_provider_info.items():
|
||||
enhanced_info = dict(info) # Copy base info
|
||||
|
||||
if info['requires_api_key']:
|
||||
# Determine API key source and configuration status
|
||||
api_key = scanner.config.get_api_key(provider_name)
|
||||
backend_api_key = os.getenv(f'{provider_name.upper()}_API_KEY')
|
||||
|
||||
if backend_api_key:
|
||||
# API key configured via backend/environment
|
||||
enhanced_info.update({
|
||||
'api_key_configured': True,
|
||||
'api_key_source': 'backend',
|
||||
'api_key_help': f'API key configured via environment variable {provider_name.upper()}_API_KEY'
|
||||
})
|
||||
elif api_key:
|
||||
# API key configured via web interface
|
||||
enhanced_info.update({
|
||||
'api_key_configured': True,
|
||||
'api_key_source': 'frontend',
|
||||
'api_key_help': f'API key set via web interface (session-only)'
|
||||
})
|
||||
else:
|
||||
# No API key configured
|
||||
enhanced_info.update({
|
||||
'api_key_configured': False,
|
||||
'api_key_source': None,
|
||||
'api_key_help': f'Requires API key to enable {info["display_name"]} integration'
|
||||
})
|
||||
else:
|
||||
# Provider doesn't require API key
|
||||
enhanced_info.update({
|
||||
'api_key_configured': True, # Always "configured" for non-API providers
|
||||
'api_key_source': None,
|
||||
'api_key_help': None
|
||||
})
|
||||
|
||||
enhanced_provider_info[provider_name] = enhanced_info
|
||||
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f'Internal server error: {str(e)}'
|
||||
}), 500
|
||||
'success': True,
|
||||
'providers': enhanced_provider_info,
|
||||
'user_session_id': user_session_id
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
return jsonify({'success': False, 'error': f'Internal server error: {str(e)}'}), 500
|
||||
|
||||
|
||||
@app.route('/api/config/providers', methods=['POST'])
|
||||
def configure_providers():
|
||||
"""Configure provider settings (enable/disable)."""
|
||||
try:
|
||||
data = request.get_json()
|
||||
if data is None:
|
||||
return jsonify({'success': False, 'error': 'No provider settings provided'}), 400
|
||||
|
||||
user_session_id, scanner = get_user_scanner()
|
||||
session_config = scanner.config
|
||||
|
||||
updated_providers = []
|
||||
|
||||
for provider_name, settings in data.items():
|
||||
provider_name_clean = provider_name.lower().strip()
|
||||
|
||||
if 'enabled' in settings:
|
||||
# Update the enabled state in session config
|
||||
session_config.enabled_providers[provider_name_clean] = settings['enabled']
|
||||
updated_providers.append(provider_name_clean)
|
||||
|
||||
if updated_providers:
|
||||
# Reinitialize providers with new settings
|
||||
scanner._initialize_providers()
|
||||
session_manager.update_session_scanner(user_session_id, scanner)
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'message': f'Provider settings updated for: {", ".join(updated_providers)}',
|
||||
'user_session_id': user_session_id
|
||||
})
|
||||
else:
|
||||
return jsonify({'success': False, 'error': 'No valid provider settings were provided.'}), 400
|
||||
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
return jsonify({'success': False, 'error': f'Internal server error: {str(e)}'}), 500
|
||||
|
||||
|
||||
|
||||
@app.errorhandler(404)
|
||||
def not_found(error):
|
||||
"""Handle 404 errors."""
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': 'Endpoint not found'
|
||||
}), 404
|
||||
return jsonify({'success': False, 'error': 'Endpoint not found'}), 404
|
||||
|
||||
|
||||
@app.errorhandler(500)
|
||||
def internal_error(error):
|
||||
"""Handle 500 errors."""
|
||||
print(f"ERROR: 500 Internal Server Error: {error}")
|
||||
traceback.print_exc()
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': 'Internal server error'
|
||||
}), 500
|
||||
return jsonify({'success': False, 'error': 'Internal server error'}), 500
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print("Starting DNSRecon Flask application with user session support...")
|
||||
|
||||
# Load configuration from environment
|
||||
config.load_from_env()
|
||||
|
||||
# Start Flask application
|
||||
print(f"Starting server on {config.flask_host}:{config.flask_port}")
|
||||
app.run(
|
||||
host=config.flask_host,
|
||||
port=config.flask_port,
|
||||
|
||||
60
config.py
60
config.py
@ -21,11 +21,10 @@ class Config:
|
||||
|
||||
# --- General Settings ---
|
||||
self.default_recursion_depth = 2
|
||||
self.default_timeout = 30
|
||||
self.max_concurrent_requests = 5
|
||||
self.default_timeout = 60
|
||||
self.max_concurrent_requests = 1
|
||||
self.large_entity_threshold = 100
|
||||
self.max_retries_per_target = 8
|
||||
self.cache_expiry_hours = 12
|
||||
|
||||
# --- Provider Caching Settings ---
|
||||
self.cache_timeout_hours = 6 # Provider-specific cache timeout
|
||||
@ -69,7 +68,6 @@ class Config:
|
||||
self.max_concurrent_requests = int(os.getenv('MAX_CONCURRENT_REQUESTS', self.max_concurrent_requests))
|
||||
self.large_entity_threshold = int(os.getenv('LARGE_ENTITY_THRESHOLD', self.large_entity_threshold))
|
||||
self.max_retries_per_target = int(os.getenv('MAX_RETRIES_PER_TARGET', self.max_retries_per_target))
|
||||
self.cache_expiry_hours = int(os.getenv('CACHE_EXPIRY_HOURS', self.cache_expiry_hours))
|
||||
self.cache_timeout_hours = int(os.getenv('CACHE_TIMEOUT_HOURS', self.cache_timeout_hours))
|
||||
|
||||
# Override Flask and session settings
|
||||
@ -87,6 +85,60 @@ class Config:
|
||||
self.enabled_providers[provider] = True
|
||||
return True
|
||||
|
||||
def set_provider_enabled(self, provider: str, enabled: bool) -> bool:
|
||||
"""
|
||||
Set provider enabled status for the session.
|
||||
|
||||
Args:
|
||||
provider: Provider name
|
||||
enabled: Whether the provider should be enabled
|
||||
|
||||
Returns:
|
||||
True if the setting was applied successfully
|
||||
"""
|
||||
provider_key = provider.lower()
|
||||
self.enabled_providers[provider_key] = enabled
|
||||
return True
|
||||
|
||||
def get_provider_enabled(self, provider: str) -> bool:
|
||||
"""
|
||||
Get provider enabled status.
|
||||
|
||||
Args:
|
||||
provider: Provider name
|
||||
|
||||
Returns:
|
||||
True if the provider is enabled
|
||||
"""
|
||||
provider_key = provider.lower()
|
||||
return self.enabled_providers.get(provider_key, True) # Default to enabled
|
||||
|
||||
def bulk_set_provider_settings(self, provider_settings: dict) -> dict:
|
||||
"""
|
||||
Set multiple provider settings at once.
|
||||
|
||||
Args:
|
||||
provider_settings: Dict of provider_name -> {'enabled': bool, ...}
|
||||
|
||||
Returns:
|
||||
Dict with results for each provider
|
||||
"""
|
||||
results = {}
|
||||
|
||||
for provider_name, settings in provider_settings.items():
|
||||
provider_key = provider_name.lower()
|
||||
|
||||
try:
|
||||
if 'enabled' in settings:
|
||||
self.enabled_providers[provider_key] = settings['enabled']
|
||||
results[provider_key] = {'success': True, 'enabled': settings['enabled']}
|
||||
else:
|
||||
results[provider_key] = {'success': False, 'error': 'No enabled setting provided'}
|
||||
except Exception as e:
|
||||
results[provider_key] = {'success': False, 'error': str(e)}
|
||||
|
||||
return results
|
||||
|
||||
def get_api_key(self, provider: str) -> Optional[str]:
|
||||
"""Get API key for a provider."""
|
||||
return self.api_keys.get(provider)
|
||||
|
||||
@ -1,8 +1,10 @@
|
||||
# core/graph_manager.py
|
||||
# dnsrecon-reduced/core/graph_manager.py
|
||||
|
||||
"""
|
||||
Graph data model for DNSRecon using NetworkX.
|
||||
Manages in-memory graph storage with confidence scoring and forensic metadata.
|
||||
Now fully compatible with the unified ProviderResult data model.
|
||||
UPDATED: Fixed correlation exclusion keys to match actual attribute names.
|
||||
"""
|
||||
import re
|
||||
from datetime import datetime, timezone
|
||||
@ -16,7 +18,8 @@ class NodeType(Enum):
|
||||
"""Enumeration of supported node types."""
|
||||
DOMAIN = "domain"
|
||||
IP = "ip"
|
||||
ASN = "asn"
|
||||
ISP = "isp"
|
||||
CA = "ca"
|
||||
LARGE_ENTITY = "large_entity"
|
||||
CORRELATION_OBJECT = "correlation_object"
|
||||
|
||||
@ -28,6 +31,7 @@ class GraphManager:
|
||||
"""
|
||||
Thread-safe graph manager for DNSRecon infrastructure mapping.
|
||||
Uses NetworkX for in-memory graph storage with confidence scoring.
|
||||
Compatible with unified ProviderResult data model.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
@ -39,6 +43,31 @@ class GraphManager:
|
||||
# Compile regex for date filtering for efficiency
|
||||
self.date_pattern = re.compile(r'^\d{4}-\d{2}-\d{2}[ T]\d{2}:\d{2}:\d{2}')
|
||||
|
||||
# FIXED: Exclude cert_issuer_name since we already create proper CA relationships
|
||||
self.EXCLUDED_KEYS = [
|
||||
# Certificate metadata that creates noise or has dedicated node types
|
||||
'cert_source', # Always 'crtsh' for crtsh provider
|
||||
'cert_common_name',
|
||||
'cert_validity_period_days', # Numerical, not useful for correlation
|
||||
'cert_issuer_name', # FIXED: Has dedicated CA nodes, don't correlate
|
||||
#'cert_certificate_id', # Unique per certificate
|
||||
#'cert_serial_number', # Unique per certificate
|
||||
'cert_entry_timestamp', # Timestamp, filtered by date regex anyway
|
||||
'cert_not_before', # Date, filtered by date regex anyway
|
||||
'cert_not_after', # Date, filtered by date regex anyway
|
||||
# DNS metadata that creates noise
|
||||
'dns_ttl', # TTL values are not meaningful for correlation
|
||||
# Shodan metadata that might create noise
|
||||
'timestamp', # Generic timestamp fields
|
||||
'last_update', # Generic timestamp fields
|
||||
#'org', # Too generic, causes false correlations
|
||||
#'isp', # Too generic, causes false correlations
|
||||
# Generic noisy attributes
|
||||
'updated_timestamp', # Any timestamp field
|
||||
'discovery_timestamp', # Any timestamp field
|
||||
'query_timestamp', # Any timestamp field
|
||||
]
|
||||
|
||||
def __getstate__(self):
|
||||
"""Prepare GraphManager for pickling, excluding compiled regex."""
|
||||
state = self.__dict__.copy()
|
||||
@ -52,245 +81,138 @@ class GraphManager:
|
||||
self.__dict__.update(state)
|
||||
self.date_pattern = re.compile(r'^\d{4}-\d{2}-\d{2}[ T]\d{2}:\d{2}:\d{2}')
|
||||
|
||||
def _update_correlation_index(self, node_id: str, data: Any, path: List[str] = [], parent_attr: str = ""):
|
||||
"""Recursively traverse metadata and add hashable values to the index with better path tracking."""
|
||||
if path is None:
|
||||
path = []
|
||||
|
||||
if isinstance(data, dict):
|
||||
for key, value in data.items():
|
||||
self._update_correlation_index(node_id, value, path + [key], key)
|
||||
elif isinstance(data, list):
|
||||
for i, item in enumerate(data):
|
||||
# Instead of just using [i], include the parent attribute context
|
||||
list_path_component = f"[{i}]" if not parent_attr else f"{parent_attr}[{i}]"
|
||||
self._update_correlation_index(node_id, item, path + [list_path_component], parent_attr)
|
||||
else:
|
||||
self._add_to_correlation_index(node_id, data, ".".join(path), parent_attr)
|
||||
|
||||
def _add_to_correlation_index(self, node_id: str, value: Any, path_str: str, parent_attr: str = ""):
|
||||
"""Add a hashable value to the correlation index, filtering out noise."""
|
||||
if not isinstance(value, (str, int, float, bool)) or value is None:
|
||||
def process_correlations_for_node(self, node_id: str):
|
||||
"""
|
||||
UPDATED: Process correlations for a given node with enhanced tracking.
|
||||
Now properly tracks which attribute/provider created each correlation.
|
||||
"""
|
||||
if not self.graph.has_node(node_id):
|
||||
return
|
||||
|
||||
# Ignore certain paths that contain noisy, non-unique identifiers
|
||||
if any(keyword in path_str.lower() for keyword in ['count', 'total', 'timestamp', 'date']):
|
||||
return
|
||||
node_attributes = self.graph.nodes[node_id].get('attributes', [])
|
||||
|
||||
# Filter out common low-entropy values and date-like strings
|
||||
if isinstance(value, str):
|
||||
# FIXED: Prevent correlation on date/time strings.
|
||||
if self.date_pattern.match(value):
|
||||
return
|
||||
if len(value) < 4 or value.lower() in ['true', 'false', 'unknown', 'none', 'crt.sh']:
|
||||
return
|
||||
elif isinstance(value, int) and (abs(value) < 1024 or abs(value) > 65535):
|
||||
return # Ignore small integers and common port numbers
|
||||
elif isinstance(value, bool):
|
||||
return # Ignore boolean values
|
||||
# Process each attribute for potential correlations
|
||||
for attr in node_attributes:
|
||||
attr_name = attr.get('name')
|
||||
attr_value = attr.get('value')
|
||||
attr_provider = attr.get('provider', 'unknown')
|
||||
|
||||
# Add the valuable correlation data to the index
|
||||
if value not in self.correlation_index:
|
||||
self.correlation_index[value] = {}
|
||||
if node_id not in self.correlation_index[value]:
|
||||
self.correlation_index[value][node_id] = []
|
||||
# IMPROVED: More comprehensive exclusion logic
|
||||
should_exclude = (
|
||||
# Check against excluded keys (exact match or substring)
|
||||
any(excluded_key in attr_name or attr_name == excluded_key for excluded_key in self.EXCLUDED_KEYS) or
|
||||
# Invalid value types
|
||||
not isinstance(attr_value, (str, int, float, bool)) or
|
||||
attr_value is None or
|
||||
# Boolean values are not useful for correlation
|
||||
isinstance(attr_value, bool) or
|
||||
# String values that are too short or are dates
|
||||
(isinstance(attr_value, str) and (
|
||||
len(attr_value) < 4 or
|
||||
self.date_pattern.match(attr_value) or
|
||||
# Exclude common generic values that create noise
|
||||
attr_value.lower() in ['unknown', 'none', 'null', 'n/a', 'true', 'false', '0', '1']
|
||||
)) or
|
||||
# Numerical values that are likely to be unique identifiers
|
||||
(isinstance(attr_value, (int, float)) and (
|
||||
attr_value == 0 or # Zero values are not meaningful
|
||||
attr_value == 1 or # One values are too common
|
||||
abs(attr_value) > 1000000 # Very large numbers are likely IDs
|
||||
))
|
||||
)
|
||||
|
||||
# Store both the full path and the parent attribute for better edge labeling
|
||||
correlation_entry = {
|
||||
'path': path_str,
|
||||
'parent_attr': parent_attr,
|
||||
'meaningful_attr': self._extract_meaningful_attribute(path_str, parent_attr)
|
||||
}
|
||||
|
||||
if correlation_entry not in self.correlation_index[value][node_id]:
|
||||
self.correlation_index[value][node_id].append(correlation_entry)
|
||||
|
||||
def _extract_meaningful_attribute(self, path_str: str, parent_attr: str = "") -> str:
|
||||
"""Extract the most meaningful attribute name from a path string."""
|
||||
if not path_str:
|
||||
return "unknown"
|
||||
|
||||
path_parts = path_str.split('.')
|
||||
|
||||
# Look for the last non-array-index part
|
||||
for part in reversed(path_parts):
|
||||
# Skip array indices like [0], [1], etc.
|
||||
if not (part.startswith('[') and part.endswith(']') and part[1:-1].isdigit()):
|
||||
# Clean up compound names like "hostnames[0]" to just "hostnames"
|
||||
clean_part = re.sub(r'\[\d+\]$', '', part)
|
||||
if clean_part:
|
||||
return clean_part
|
||||
|
||||
# Fallback to parent attribute if available
|
||||
if parent_attr:
|
||||
return parent_attr
|
||||
|
||||
# Last resort - use the first meaningful part
|
||||
for part in path_parts:
|
||||
if not (part.startswith('[') and part.endswith(']') and part[1:-1].isdigit()):
|
||||
clean_part = re.sub(r'\[\d+\]$', '', part)
|
||||
if clean_part:
|
||||
return clean_part
|
||||
|
||||
return "correlation"
|
||||
|
||||
def _check_for_correlations(self, new_node_id: str, data: Any, path: List[str] = [], parent_attr: str = "") -> List[Dict]:
|
||||
"""Recursively traverse metadata to find correlations with existing data."""
|
||||
if path is None:
|
||||
path = []
|
||||
|
||||
all_correlations = []
|
||||
if isinstance(data, dict):
|
||||
for key, value in data.items():
|
||||
if key == 'source': # Avoid correlating on the provider name
|
||||
if should_exclude:
|
||||
continue
|
||||
all_correlations.extend(self._check_for_correlations(new_node_id, value, path + [key], key))
|
||||
elif isinstance(data, list):
|
||||
for i, item in enumerate(data):
|
||||
list_path_component = f"[{i}]" if not parent_attr else f"{parent_attr}[{i}]"
|
||||
all_correlations.extend(self._check_for_correlations(new_node_id, item, path + [list_path_component], parent_attr))
|
||||
else:
|
||||
value = data
|
||||
if value in self.correlation_index:
|
||||
existing_nodes_with_paths = self.correlation_index[value]
|
||||
unique_nodes = set(existing_nodes_with_paths.keys())
|
||||
unique_nodes.add(new_node_id)
|
||||
|
||||
if len(unique_nodes) < 2:
|
||||
return all_correlations # Correlation must involve at least two distinct nodes
|
||||
|
||||
new_source = {
|
||||
'node_id': new_node_id,
|
||||
'path': ".".join(path),
|
||||
'parent_attr': parent_attr,
|
||||
'meaningful_attr': self._extract_meaningful_attribute(".".join(path), parent_attr)
|
||||
# Initialize correlation tracking for this value
|
||||
if attr_value not in self.correlation_index:
|
||||
self.correlation_index[attr_value] = {
|
||||
'nodes': set(),
|
||||
'sources': [] # Track which provider/attribute combinations contributed
|
||||
}
|
||||
all_sources = [new_source]
|
||||
|
||||
for node_id, path_entries in existing_nodes_with_paths.items():
|
||||
for entry in path_entries:
|
||||
if isinstance(entry, dict):
|
||||
all_sources.append({
|
||||
'node_id': node_id,
|
||||
'path': entry['path'],
|
||||
'parent_attr': entry.get('parent_attr', ''),
|
||||
'meaningful_attr': entry.get('meaningful_attr', self._extract_meaningful_attribute(entry['path'], entry.get('parent_attr', '')))
|
||||
})
|
||||
else:
|
||||
# Handle legacy string-only entries
|
||||
all_sources.append({
|
||||
'node_id': node_id,
|
||||
'path': str(entry),
|
||||
'parent_attr': '',
|
||||
'meaningful_attr': self._extract_meaningful_attribute(str(entry))
|
||||
})
|
||||
# Add this node and source information
|
||||
self.correlation_index[attr_value]['nodes'].add(node_id)
|
||||
|
||||
all_correlations.append({
|
||||
# Track the source of this correlation value
|
||||
source_info = {
|
||||
'node_id': node_id,
|
||||
'provider': attr_provider,
|
||||
'attribute': attr_name,
|
||||
'path': f"{attr_provider}_{attr_name}"
|
||||
}
|
||||
|
||||
# Add source if not already present (avoid duplicates)
|
||||
existing_sources = [s for s in self.correlation_index[attr_value]['sources']
|
||||
if s['node_id'] == node_id and s['path'] == source_info['path']]
|
||||
if not existing_sources:
|
||||
self.correlation_index[attr_value]['sources'].append(source_info)
|
||||
|
||||
# Create correlation node if we have multiple nodes with this value
|
||||
if len(self.correlation_index[attr_value]['nodes']) > 1:
|
||||
self._create_enhanced_correlation_node_and_edges(attr_value, self.correlation_index[attr_value])
|
||||
|
||||
def _create_enhanced_correlation_node_and_edges(self, value, correlation_data):
|
||||
"""
|
||||
UPDATED: Create correlation node and edges with raw provider data (no formatting).
|
||||
"""
|
||||
correlation_node_id = f"corr_{hash(str(value)) & 0x7FFFFFFF}"
|
||||
nodes = correlation_data['nodes']
|
||||
sources = correlation_data['sources']
|
||||
|
||||
# Create or update correlation node
|
||||
if not self.graph.has_node(correlation_node_id):
|
||||
# Use raw provider/attribute data - no formatting
|
||||
provider_counts = {}
|
||||
for source in sources:
|
||||
# Keep original provider and attribute names
|
||||
key = f"{source['provider']}_{source['attribute']}"
|
||||
provider_counts[key] = provider_counts.get(key, 0) + 1
|
||||
|
||||
# Use the most common provider/attribute as the primary label (raw)
|
||||
primary_source = max(provider_counts.items(), key=lambda x: x[1])[0] if provider_counts else "unknown_correlation"
|
||||
|
||||
metadata = {
|
||||
'value': value,
|
||||
'sources': all_sources,
|
||||
'nodes': list(unique_nodes)
|
||||
})
|
||||
return all_correlations
|
||||
'correlated_nodes': list(nodes),
|
||||
'sources': sources,
|
||||
'primary_source': primary_source,
|
||||
'correlation_count': len(nodes)
|
||||
}
|
||||
|
||||
def add_node(self, node_id: str, node_type: NodeType, attributes: Optional[Dict[str, Any]] = None,
|
||||
description: str = "", metadata: Optional[Dict[str, Any]] = None) -> bool:
|
||||
"""Add a node to the graph, update attributes, and process correlations."""
|
||||
is_new_node = not self.graph.has_node(node_id)
|
||||
if is_new_node:
|
||||
self.graph.add_node(node_id, type=node_type.value,
|
||||
added_timestamp=datetime.now(timezone.utc).isoformat(),
|
||||
attributes=attributes or {},
|
||||
description=description,
|
||||
metadata=metadata or {})
|
||||
else:
|
||||
# Safely merge new attributes into existing attributes
|
||||
if attributes:
|
||||
existing_attributes = self.graph.nodes[node_id].get('attributes', {})
|
||||
existing_attributes.update(attributes)
|
||||
self.graph.nodes[node_id]['attributes'] = existing_attributes
|
||||
if description:
|
||||
self.graph.nodes[node_id]['description'] = description
|
||||
if metadata:
|
||||
existing_metadata = self.graph.nodes[node_id].get('metadata', {})
|
||||
existing_metadata.update(metadata)
|
||||
self.graph.nodes[node_id]['metadata'] = existing_metadata
|
||||
self.add_node(correlation_node_id, NodeType.CORRELATION_OBJECT, metadata=metadata)
|
||||
#print(f"Created correlation node {correlation_node_id} for value '{value}' with {len(nodes)} nodes")
|
||||
|
||||
if attributes and node_type != NodeType.CORRELATION_OBJECT:
|
||||
correlations = self._check_for_correlations(node_id, attributes)
|
||||
for corr in correlations:
|
||||
value = corr['value']
|
||||
# Create edges from each node to the correlation node
|
||||
for source in sources:
|
||||
node_id = source['node_id']
|
||||
provider = source['provider']
|
||||
attribute = source['attribute']
|
||||
|
||||
# STEP 1: Substring check against all existing nodes
|
||||
if self._correlation_value_matches_existing_node(value):
|
||||
# Skip creating correlation node - would be redundant
|
||||
continue
|
||||
if self.graph.has_node(node_id) and not self.graph.has_edge(node_id, correlation_node_id):
|
||||
# Format relationship label as "corr_provider_attribute"
|
||||
relationship_label = f"corr_{provider}_{attribute}"
|
||||
|
||||
eligible_nodes = set(corr['nodes'])
|
||||
self.add_edge(
|
||||
source_id=node_id,
|
||||
target_id=correlation_node_id,
|
||||
relationship_type=relationship_label,
|
||||
confidence_score=0.9,
|
||||
source_provider=provider,
|
||||
raw_data={
|
||||
'correlation_value': value,
|
||||
'original_attribute': attribute,
|
||||
'correlation_type': 'attribute_matching'
|
||||
}
|
||||
)
|
||||
|
||||
if len(eligible_nodes) < 2:
|
||||
# Need at least 2 nodes to create a correlation
|
||||
continue
|
||||
#print(f"Added correlation edge: {node_id} -> {correlation_node_id} ({relationship_label})")
|
||||
|
||||
# STEP 3: Check for existing correlation node with same connection pattern
|
||||
correlation_nodes_with_pattern = self._find_correlation_nodes_with_same_pattern(eligible_nodes)
|
||||
|
||||
if correlation_nodes_with_pattern:
|
||||
# STEP 4: Merge with existing correlation node
|
||||
target_correlation_node = correlation_nodes_with_pattern[0]
|
||||
self._merge_correlation_values(target_correlation_node, value, corr)
|
||||
else:
|
||||
# STEP 5: Create new correlation node for eligible nodes only
|
||||
correlation_node_id = f"corr_{abs(hash(str(sorted(eligible_nodes))))}"
|
||||
self.add_node(correlation_node_id, NodeType.CORRELATION_OBJECT,
|
||||
metadata={'values': [value], 'sources': corr['sources'],
|
||||
'correlated_nodes': list(eligible_nodes)})
|
||||
|
||||
# Create edges from eligible nodes to this correlation node with better labeling
|
||||
for c_node_id in eligible_nodes:
|
||||
if self.graph.has_node(c_node_id):
|
||||
# Find the best attribute name for this node
|
||||
meaningful_attr = self._find_best_attribute_name_for_node(c_node_id, corr['sources'])
|
||||
relationship_type = f"c_{meaningful_attr}"
|
||||
self.add_edge(c_node_id, correlation_node_id, relationship_type, confidence_score=0.9)
|
||||
|
||||
self._update_correlation_index(node_id, attributes)
|
||||
|
||||
self.last_modified = datetime.now(timezone.utc).isoformat()
|
||||
return is_new_node
|
||||
|
||||
def _find_best_attribute_name_for_node(self, node_id: str, sources: List[Dict]) -> str:
|
||||
"""Find the best attribute name for a correlation edge by looking at the sources."""
|
||||
node_sources = [s for s in sources if s['node_id'] == node_id]
|
||||
|
||||
if not node_sources:
|
||||
return "correlation"
|
||||
|
||||
# Use the meaningful_attr if available
|
||||
for source in node_sources:
|
||||
meaningful_attr = source.get('meaningful_attr')
|
||||
if meaningful_attr and meaningful_attr != "unknown":
|
||||
return meaningful_attr
|
||||
|
||||
# Fallback to parent_attr
|
||||
for source in node_sources:
|
||||
parent_attr = source.get('parent_attr')
|
||||
if parent_attr:
|
||||
return parent_attr
|
||||
|
||||
# Last resort - extract from path
|
||||
for source in node_sources:
|
||||
path = source.get('path', '')
|
||||
if path:
|
||||
extracted = self._extract_meaningful_attribute(path)
|
||||
if extracted != "unknown":
|
||||
return extracted
|
||||
|
||||
return "correlation"
|
||||
|
||||
def _has_direct_edge_bidirectional(self, node_a: str, node_b: str) -> bool:
|
||||
"""
|
||||
Check if there's a direct edge between two nodes in either direction.
|
||||
Returns True if node_a→node_b OR node_b→node_a exists.
|
||||
Returns True if node_aâ†'node_b OR node_bâ†'node_a exists.
|
||||
"""
|
||||
return (self.graph.has_edge(node_a, node_b) or
|
||||
self.graph.has_edge(node_b, node_a))
|
||||
@ -382,19 +304,60 @@ class GraphManager:
|
||||
f"across {node_count} nodes"
|
||||
)
|
||||
|
||||
def add_node(self, node_id: str, node_type: NodeType, attributes: Optional[List[Dict[str, Any]]] = None,
|
||||
description: str = "", metadata: Optional[Dict[str, Any]] = None) -> bool:
|
||||
"""
|
||||
Add a node to the graph, update attributes, and process correlations.
|
||||
Now compatible with unified data model - attributes are dictionaries from converted StandardAttribute objects.
|
||||
"""
|
||||
is_new_node = not self.graph.has_node(node_id)
|
||||
if is_new_node:
|
||||
self.graph.add_node(node_id, type=node_type.value,
|
||||
added_timestamp=datetime.now(timezone.utc).isoformat(),
|
||||
attributes=attributes or [], # Store as a list from the start
|
||||
description=description,
|
||||
metadata=metadata or {})
|
||||
else:
|
||||
# Safely merge new attributes into the existing list of attributes
|
||||
if attributes:
|
||||
existing_attributes = self.graph.nodes[node_id].get('attributes', [])
|
||||
|
||||
# Handle cases where old data might still be in dictionary format
|
||||
if not isinstance(existing_attributes, list):
|
||||
existing_attributes = []
|
||||
|
||||
# Create a set of existing attribute names for efficient duplicate checking
|
||||
existing_attr_names = {attr['name'] for attr in existing_attributes}
|
||||
|
||||
for new_attr in attributes:
|
||||
if new_attr['name'] not in existing_attr_names:
|
||||
existing_attributes.append(new_attr)
|
||||
existing_attr_names.add(new_attr['name'])
|
||||
|
||||
self.graph.nodes[node_id]['attributes'] = existing_attributes
|
||||
if description:
|
||||
self.graph.nodes[node_id]['description'] = description
|
||||
if metadata:
|
||||
existing_metadata = self.graph.nodes[node_id].get('metadata', {})
|
||||
existing_metadata.update(metadata)
|
||||
self.graph.nodes[node_id]['metadata'] = existing_metadata
|
||||
|
||||
self.last_modified = datetime.now(timezone.utc).isoformat()
|
||||
return is_new_node
|
||||
|
||||
def add_edge(self, source_id: str, target_id: str, relationship_type: str,
|
||||
confidence_score: float = 0.5, source_provider: str = "unknown",
|
||||
raw_data: Optional[Dict[str, Any]] = None) -> bool:
|
||||
"""Add or update an edge between two nodes, ensuring nodes exist."""
|
||||
"""
|
||||
UPDATED: Add or update an edge between two nodes with raw relationship labels.
|
||||
"""
|
||||
if not self.graph.has_node(source_id) or not self.graph.has_node(target_id):
|
||||
return False
|
||||
|
||||
new_confidence = confidence_score
|
||||
|
||||
if relationship_type.startswith("c_"):
|
||||
# UPDATED: Use raw relationship type - no formatting
|
||||
edge_label = relationship_type
|
||||
else:
|
||||
edge_label = f"{source_provider}_{relationship_type}"
|
||||
|
||||
if self.graph.has_edge(source_id, target_id):
|
||||
# If edge exists, update confidence if the new score is higher.
|
||||
@ -404,7 +367,7 @@ class GraphManager:
|
||||
self.graph.edges[source_id, target_id]['updated_by'] = source_provider
|
||||
return False
|
||||
|
||||
# Add a new edge with all attributes.
|
||||
# Add a new edge with raw attributes
|
||||
self.graph.add_edge(source_id, target_id,
|
||||
relationship_type=edge_label,
|
||||
confidence_score=new_confidence,
|
||||
@ -423,13 +386,19 @@ class GraphManager:
|
||||
return False
|
||||
|
||||
node_data = self.graph.nodes[large_entity_id]
|
||||
attributes = node_data.get('attributes', {})
|
||||
attributes = node_data.get('attributes', [])
|
||||
|
||||
# Find the 'nodes' attribute dictionary in the list
|
||||
nodes_attr = next((attr for attr in attributes if attr.get('name') == 'nodes'), None)
|
||||
|
||||
# Remove from the list of member nodes
|
||||
if 'nodes' in attributes and node_id_to_extract in attributes['nodes']:
|
||||
attributes['nodes'].remove(node_id_to_extract)
|
||||
# Update the count
|
||||
attributes['count'] = len(attributes['nodes'])
|
||||
if nodes_attr and 'value' in nodes_attr and isinstance(nodes_attr['value'], list) and node_id_to_extract in nodes_attr['value']:
|
||||
nodes_attr['value'].remove(node_id_to_extract)
|
||||
|
||||
# Find the 'count' attribute and update it
|
||||
count_attr = next((attr for attr in attributes if attr.get('name') == 'count'), None)
|
||||
if count_attr:
|
||||
count_attr['value'] = len(nodes_attr['value'])
|
||||
else:
|
||||
# This can happen if the node was already extracted, which is not an error.
|
||||
print(f"Warning: Node {node_id_to_extract} not found in the 'nodes' list of {large_entity_id}.")
|
||||
@ -448,10 +417,20 @@ class GraphManager:
|
||||
|
||||
# Clean up the correlation index
|
||||
keys_to_delete = []
|
||||
for value, nodes in self.correlation_index.items():
|
||||
if node_id in nodes:
|
||||
del nodes[node_id]
|
||||
if not nodes: # If no other nodes are associated with this value, remove it
|
||||
for value, data in self.correlation_index.items():
|
||||
if isinstance(data, dict) and 'nodes' in data:
|
||||
# Updated correlation structure
|
||||
if node_id in data['nodes']:
|
||||
data['nodes'].discard(node_id)
|
||||
# Remove sources for this node
|
||||
data['sources'] = [s for s in data['sources'] if s['node_id'] != node_id]
|
||||
if not data['nodes']: # If no other nodes are associated, remove it
|
||||
keys_to_delete.append(value)
|
||||
else:
|
||||
# Legacy correlation structure (fallback)
|
||||
if isinstance(data, set) and node_id in data:
|
||||
data.discard(node_id)
|
||||
if not data:
|
||||
keys_to_delete.append(value)
|
||||
|
||||
for key in keys_to_delete:
|
||||
@ -473,54 +452,59 @@ class GraphManager:
|
||||
"""Get all nodes of a specific type."""
|
||||
return [n for n, d in self.graph.nodes(data=True) if d.get('type') == node_type.value]
|
||||
|
||||
def get_neighbors(self, node_id: str) -> List[str]:
|
||||
"""Get all unique neighbors (predecessors and successors) for a node."""
|
||||
if not self.graph.has_node(node_id):
|
||||
return []
|
||||
return list(set(self.graph.predecessors(node_id)) | set(self.graph.successors(node_id)))
|
||||
|
||||
def get_high_confidence_edges(self, min_confidence: float = 0.8) -> List[Tuple[str, str, Dict]]:
|
||||
"""Get edges with confidence score above a given threshold."""
|
||||
return [(u, v, d) for u, v, d in self.graph.edges(data=True)
|
||||
if d.get('confidence_score', 0) >= min_confidence]
|
||||
|
||||
def get_graph_data(self) -> Dict[str, Any]:
|
||||
"""Export graph data formatted for frontend visualization."""
|
||||
"""
|
||||
Export graph data formatted for frontend visualization.
|
||||
SIMPLIFIED: No certificate styling - frontend handles all visual styling.
|
||||
"""
|
||||
nodes = []
|
||||
for node_id, attrs in self.graph.nodes(data=True):
|
||||
node_data = {'id': node_id, 'label': node_id, 'type': attrs.get('type', 'unknown'),
|
||||
'attributes': attrs.get('attributes', {}),
|
||||
node_data = {
|
||||
'id': node_id,
|
||||
'label': node_id,
|
||||
'type': attrs.get('type', 'unknown'),
|
||||
'attributes': attrs.get('attributes', []), # Raw attributes list
|
||||
'description': attrs.get('description', ''),
|
||||
'metadata': attrs.get('metadata', {}),
|
||||
'added_timestamp': attrs.get('added_timestamp')}
|
||||
# Customize node appearance based on type and attributes
|
||||
node_type = node_data['type']
|
||||
attributes = node_data['attributes']
|
||||
if node_type == 'domain' and attributes.get('certificates', {}).get('has_valid_cert') is False:
|
||||
node_data['color'] = {'background': '#c7c7c7', 'border': '#999'} # Gray for invalid cert
|
||||
'added_timestamp': attrs.get('added_timestamp')
|
||||
}
|
||||
|
||||
# Add incoming and outgoing edges to node data
|
||||
if self.graph.has_node(node_id):
|
||||
node_data['incoming_edges'] = [{'from': u, 'data': d} for u, _, d in self.graph.in_edges(node_id, data=True)]
|
||||
node_data['outgoing_edges'] = [{'to': v, 'data': d} for _, v, d in self.graph.out_edges(node_id, data=True)]
|
||||
node_data['incoming_edges'] = [
|
||||
{'from': u, 'data': d} for u, _, d in self.graph.in_edges(node_id, data=True)
|
||||
]
|
||||
node_data['outgoing_edges'] = [
|
||||
{'to': v, 'data': d} for _, v, d in self.graph.out_edges(node_id, data=True)
|
||||
]
|
||||
|
||||
nodes.append(node_data)
|
||||
|
||||
edges = []
|
||||
for source, target, attrs in self.graph.edges(data=True):
|
||||
edges.append({'from': source, 'to': target,
|
||||
edges.append({
|
||||
'from': source,
|
||||
'to': target,
|
||||
'label': attrs.get('relationship_type', ''),
|
||||
'confidence_score': attrs.get('confidence_score', 0),
|
||||
'source_provider': attrs.get('source_provider', ''),
|
||||
'discovery_timestamp': attrs.get('discovery_timestamp')})
|
||||
'discovery_timestamp': attrs.get('discovery_timestamp')
|
||||
})
|
||||
|
||||
return {
|
||||
'nodes': nodes, 'edges': edges,
|
||||
'nodes': nodes,
|
||||
'edges': edges,
|
||||
'statistics': self.get_statistics()['basic_metrics']
|
||||
}
|
||||
|
||||
def export_json(self) -> Dict[str, Any]:
|
||||
"""Export complete graph data as a JSON-serializable dictionary."""
|
||||
graph_data = nx.node_link_data(self.graph) # Use NetworkX's built-in robust serializer
|
||||
graph_data = nx.node_link_data(self.graph, edges="edges")
|
||||
return {
|
||||
'export_metadata': {
|
||||
'export_timestamp': datetime.now(timezone.utc).isoformat(),
|
||||
@ -528,15 +512,20 @@ class GraphManager:
|
||||
'last_modified': self.last_modified,
|
||||
'total_nodes': self.get_node_count(),
|
||||
'total_edges': self.get_edge_count(),
|
||||
'graph_format': 'dnsrecon_v1_nodeling'
|
||||
'graph_format': 'dnsrecon_v1_unified_model'
|
||||
},
|
||||
'graph': graph_data,
|
||||
'statistics': self.get_statistics()
|
||||
}
|
||||
|
||||
def _get_confidence_distribution(self) -> Dict[str, int]:
|
||||
"""Get distribution of edge confidence scores."""
|
||||
"""Get distribution of edge confidence scores with empty graph handling."""
|
||||
distribution = {'high': 0, 'medium': 0, 'low': 0}
|
||||
|
||||
# FIXED: Handle empty graph case
|
||||
if self.get_edge_count() == 0:
|
||||
return distribution
|
||||
|
||||
for _, _, data in self.graph.edges(data=True):
|
||||
confidence = data.get('confidence_score', 0)
|
||||
if confidence >= 0.8:
|
||||
@ -548,22 +537,42 @@ class GraphManager:
|
||||
return distribution
|
||||
|
||||
def get_statistics(self) -> Dict[str, Any]:
|
||||
"""Get comprehensive statistics about the graph."""
|
||||
stats = {'basic_metrics': {'total_nodes': self.get_node_count(),
|
||||
'total_edges': self.get_edge_count(),
|
||||
"""Get comprehensive statistics about the graph with proper empty graph handling."""
|
||||
|
||||
# FIXED: Handle empty graph case properly
|
||||
node_count = self.get_node_count()
|
||||
edge_count = self.get_edge_count()
|
||||
|
||||
stats = {
|
||||
'basic_metrics': {
|
||||
'total_nodes': node_count,
|
||||
'total_edges': edge_count,
|
||||
'creation_time': self.creation_time,
|
||||
'last_modified': self.last_modified},
|
||||
'node_type_distribution': {}, 'relationship_type_distribution': {},
|
||||
'last_modified': self.last_modified
|
||||
},
|
||||
'node_type_distribution': {},
|
||||
'relationship_type_distribution': {},
|
||||
'confidence_distribution': self._get_confidence_distribution(),
|
||||
'provider_distribution': {}}
|
||||
# Calculate distributions
|
||||
'provider_distribution': {}
|
||||
}
|
||||
|
||||
# FIXED: Only calculate distributions if we have data
|
||||
if node_count > 0:
|
||||
# Calculate node type distributions
|
||||
for node_type in NodeType:
|
||||
stats['node_type_distribution'][node_type.value] = self.get_nodes_by_type(node_type).__len__()
|
||||
count = len(self.get_nodes_by_type(node_type))
|
||||
if count > 0: # Only include types that exist
|
||||
stats['node_type_distribution'][node_type.value] = count
|
||||
|
||||
if edge_count > 0:
|
||||
# Calculate edge distributions
|
||||
for _, _, data in self.graph.edges(data=True):
|
||||
rel_type = data.get('relationship_type', 'unknown')
|
||||
stats['relationship_type_distribution'][rel_type] = stats['relationship_type_distribution'].get(rel_type, 0) + 1
|
||||
|
||||
provider = data.get('source_provider', 'unknown')
|
||||
stats['provider_distribution'][provider] = stats['provider_distribution'].get(provider, 0) + 1
|
||||
|
||||
return stats
|
||||
|
||||
def clear(self) -> None:
|
||||
|
||||
@ -152,7 +152,7 @@ class ForensicLogger:
|
||||
|
||||
# Log to standard logger
|
||||
if error:
|
||||
self.logger.error(f"API Request Failed - {provider}: {url} - {error}")
|
||||
self.logger.error(f"API Request Failed.")
|
||||
else:
|
||||
self.logger.info(f"API Request - {provider}: {url} - Status: {status_code}")
|
||||
|
||||
@ -197,7 +197,7 @@ class ForensicLogger:
|
||||
self.logger.info(f"Scan Started - Target: {target_domain}, Depth: {recursion_depth}")
|
||||
self.logger.info(f"Enabled Providers: {', '.join(enabled_providers)}")
|
||||
|
||||
self.session_metadata['target_domains'].add(target_domain)
|
||||
self.session_metadata['target_domains'].update(target_domain)
|
||||
|
||||
def log_scan_complete(self) -> None:
|
||||
"""Log the completion of a reconnaissance scan."""
|
||||
|
||||
107
core/provider_result.py
Normal file
107
core/provider_result.py
Normal file
@ -0,0 +1,107 @@
|
||||
# dnsrecon-reduced/core/provider_result.py
|
||||
|
||||
"""
|
||||
Unified data model for DNSRecon passive reconnaissance.
|
||||
Standardizes the data structure across all providers to ensure consistent processing.
|
||||
"""
|
||||
|
||||
from typing import Any, Optional, List, Dict
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
|
||||
|
||||
@dataclass
|
||||
class StandardAttribute:
|
||||
"""A unified data structure for a single piece of information about a node."""
|
||||
target_node: str
|
||||
name: str
|
||||
value: Any
|
||||
type: str
|
||||
provider: str
|
||||
confidence: float
|
||||
timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
metadata: Optional[Dict[str, Any]] = field(default_factory=dict)
|
||||
|
||||
def __post_init__(self):
|
||||
"""Validate the attribute after initialization."""
|
||||
if not isinstance(self.confidence, (int, float)) or not 0.0 <= self.confidence <= 1.0:
|
||||
raise ValueError(f"Confidence must be between 0.0 and 1.0, got {self.confidence}")
|
||||
|
||||
|
||||
@dataclass
|
||||
class Relationship:
|
||||
"""A unified data structure for a directional link between two nodes."""
|
||||
source_node: str
|
||||
target_node: str
|
||||
relationship_type: str
|
||||
confidence: float
|
||||
provider: str
|
||||
timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
raw_data: Optional[Dict[str, Any]] = field(default_factory=dict)
|
||||
|
||||
def __post_init__(self):
|
||||
"""Validate the relationship after initialization."""
|
||||
if not isinstance(self.confidence, (int, float)) or not 0.0 <= self.confidence <= 1.0:
|
||||
raise ValueError(f"Confidence must be between 0.0 and 1.0, got {self.confidence}")
|
||||
|
||||
|
||||
@dataclass
|
||||
class ProviderResult:
|
||||
"""A container for all data returned by a provider from a single query."""
|
||||
attributes: List[StandardAttribute] = field(default_factory=list)
|
||||
relationships: List[Relationship] = field(default_factory=list)
|
||||
|
||||
def add_attribute(self, target_node: str, name: str, value: Any, attr_type: str,
|
||||
provider: str, confidence: float = 0.8,
|
||||
metadata: Optional[Dict[str, Any]] = None) -> None:
|
||||
"""Helper method to add an attribute to the result."""
|
||||
self.attributes.append(StandardAttribute(
|
||||
target_node=target_node,
|
||||
name=name,
|
||||
value=value,
|
||||
type=attr_type,
|
||||
provider=provider,
|
||||
confidence=confidence,
|
||||
metadata=metadata or {}
|
||||
))
|
||||
|
||||
def add_relationship(self, source_node: str, target_node: str, relationship_type: str,
|
||||
provider: str, confidence: float = 0.8,
|
||||
raw_data: Optional[Dict[str, Any]] = None) -> None:
|
||||
"""Helper method to add a relationship to the result."""
|
||||
self.relationships.append(Relationship(
|
||||
source_node=source_node,
|
||||
target_node=target_node,
|
||||
relationship_type=relationship_type,
|
||||
confidence=confidence,
|
||||
provider=provider,
|
||||
raw_data=raw_data or {}
|
||||
))
|
||||
|
||||
def get_discovered_nodes(self) -> set:
|
||||
"""Get all unique node identifiers discovered in this result."""
|
||||
nodes = set()
|
||||
|
||||
# Add nodes from relationships
|
||||
for rel in self.relationships:
|
||||
nodes.add(rel.source_node)
|
||||
nodes.add(rel.target_node)
|
||||
|
||||
# Add nodes from attributes
|
||||
for attr in self.attributes:
|
||||
nodes.add(attr.target_node)
|
||||
|
||||
return nodes
|
||||
|
||||
def get_relationship_count(self) -> int:
|
||||
"""Get the total number of relationships in this result."""
|
||||
return len(self.relationships)
|
||||
|
||||
def get_attribute_count(self) -> int:
|
||||
"""Get the total number of attributes in this result."""
|
||||
return len(self.attributes)
|
||||
|
||||
##TODO
|
||||
#def is_large_entity(self, threshold: int) -> bool:
|
||||
# """Check if this result qualifies as a large entity based on relationship count."""
|
||||
# return self.get_relationship_count() > threshold
|
||||
@ -1,7 +1,6 @@
|
||||
# dnsrecon-reduced/core/rate_limiter.py
|
||||
|
||||
import time
|
||||
import redis
|
||||
|
||||
class GlobalRateLimiter:
|
||||
def __init__(self, redis_client):
|
||||
|
||||
899
core/scanner.py
899
core/scanner.py
File diff suppressed because it is too large
Load Diff
@ -5,18 +5,15 @@ import time
|
||||
import uuid
|
||||
import redis
|
||||
import pickle
|
||||
from typing import Dict, Optional, Any, List
|
||||
from typing import Dict, Optional, Any
|
||||
|
||||
from core.scanner import Scanner
|
||||
from config import config
|
||||
|
||||
# WARNING: Using pickle can be a security risk if the data source is not trusted.
|
||||
# In this case, we are only serializing/deserializing our own trusted Scanner objects,
|
||||
# which is generally safe. Do not unpickle data from untrusted sources.
|
||||
|
||||
class SessionManager:
|
||||
"""
|
||||
Manages multiple scanner instances for concurrent user sessions using Redis.
|
||||
FIXED: Manages multiple scanner instances for concurrent user sessions using Redis.
|
||||
Now more conservative about session creation to preserve API keys and configuration.
|
||||
"""
|
||||
|
||||
def __init__(self, session_timeout_minutes: int = 0):
|
||||
@ -28,7 +25,10 @@ class SessionManager:
|
||||
|
||||
self.redis_client = redis.StrictRedis(db=0, decode_responses=False)
|
||||
self.session_timeout = session_timeout_minutes * 60 # Convert to seconds
|
||||
self.lock = threading.Lock() # Lock for local operations, Redis handles atomic ops
|
||||
self.lock = threading.Lock()
|
||||
|
||||
# FIXED: Add a creation lock to prevent race conditions
|
||||
self.creation_lock = threading.Lock()
|
||||
|
||||
# Start cleanup thread
|
||||
self.cleanup_thread = threading.Thread(target=self._cleanup_loop, daemon=True)
|
||||
@ -40,7 +40,7 @@ class SessionManager:
|
||||
"""Prepare SessionManager for pickling."""
|
||||
state = self.__dict__.copy()
|
||||
# Exclude unpickleable attributes - Redis client and threading objects
|
||||
unpicklable_attrs = ['lock', 'cleanup_thread', 'redis_client']
|
||||
unpicklable_attrs = ['lock', 'cleanup_thread', 'redis_client', 'creation_lock']
|
||||
for attr in unpicklable_attrs:
|
||||
if attr in state:
|
||||
del state[attr]
|
||||
@ -50,9 +50,9 @@ class SessionManager:
|
||||
"""Restore SessionManager after unpickling."""
|
||||
self.__dict__.update(state)
|
||||
# Re-initialize unpickleable attributes
|
||||
import redis
|
||||
self.redis_client = redis.StrictRedis(db=0, decode_responses=False)
|
||||
self.lock = threading.Lock()
|
||||
self.creation_lock = threading.Lock()
|
||||
self.cleanup_thread = threading.Thread(target=self._cleanup_loop, daemon=True)
|
||||
self.cleanup_thread.start()
|
||||
|
||||
@ -66,8 +66,10 @@ class SessionManager:
|
||||
|
||||
def create_session(self) -> str:
|
||||
"""
|
||||
Create a new user session and store it in Redis.
|
||||
FIXED: Create a new user session with thread-safe creation to prevent duplicates.
|
||||
"""
|
||||
# FIXED: Use creation lock to prevent race conditions
|
||||
with self.creation_lock:
|
||||
session_id = str(uuid.uuid4())
|
||||
print(f"=== CREATING SESSION {session_id} IN REDIS ===")
|
||||
|
||||
@ -99,6 +101,7 @@ class SessionManager:
|
||||
self.redis_client.setex(stop_key, self.session_timeout, b'0')
|
||||
|
||||
print(f"Session {session_id} stored in Redis with stop signal initialized")
|
||||
print(f"Session has {len(scanner_instance.providers)} providers: {[p.get_name() for p in scanner_instance.providers]}")
|
||||
return session_id
|
||||
|
||||
except Exception as e:
|
||||
@ -212,7 +215,14 @@ class SessionManager:
|
||||
# Immediately save to Redis for GUI updates
|
||||
success = self._save_session_data(session_id, session_data)
|
||||
if success:
|
||||
print(f"Scanner state updated for session {session_id} (status: {scanner.status})")
|
||||
# Only log occasionally to reduce noise
|
||||
if hasattr(self, '_last_update_log'):
|
||||
if time.time() - self._last_update_log > 5: # Log every 5 seconds max
|
||||
#print(f"Scanner state updated for session {session_id} (status: {scanner.status})")
|
||||
self._last_update_log = time.time()
|
||||
else:
|
||||
#print(f"Scanner state updated for session {session_id} (status: {scanner.status})")
|
||||
self._last_update_log = time.time()
|
||||
else:
|
||||
print(f"WARNING: Failed to save scanner state for session {session_id}")
|
||||
return success
|
||||
|
||||
@ -4,16 +4,17 @@ import time
|
||||
import requests
|
||||
import threading
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Dict, Any, Optional, Tuple
|
||||
from typing import Dict, Any, Optional
|
||||
|
||||
from core.logger import get_forensic_logger
|
||||
from core.rate_limiter import GlobalRateLimiter
|
||||
from core.provider_result import ProviderResult
|
||||
|
||||
|
||||
class BaseProvider(ABC):
|
||||
"""
|
||||
Abstract base class for all DNSRecon data providers.
|
||||
Now supports session-specific configuration.
|
||||
Now supports session-specific configuration and returns standardized ProviderResult objects.
|
||||
"""
|
||||
|
||||
def __init__(self, name: str, rate_limit: int = 60, timeout: int = 30, session_config=None):
|
||||
@ -101,7 +102,7 @@ class BaseProvider(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def query_domain(self, domain: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||
def query_domain(self, domain: str) -> ProviderResult:
|
||||
"""
|
||||
Query the provider for information about a domain.
|
||||
|
||||
@ -109,12 +110,12 @@ class BaseProvider(ABC):
|
||||
domain: Domain to investigate
|
||||
|
||||
Returns:
|
||||
List of tuples: (source_node, target_node, relationship_type, confidence, raw_data)
|
||||
ProviderResult containing standardized attributes and relationships
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def query_ip(self, ip: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||
def query_ip(self, ip: str) -> ProviderResult:
|
||||
"""
|
||||
Query the provider for information about an IP address.
|
||||
|
||||
@ -122,7 +123,7 @@ class BaseProvider(ABC):
|
||||
ip: IP address to investigate
|
||||
|
||||
Returns:
|
||||
List of tuples: (source_node, target_node, relationship_type, confidence, raw_data)
|
||||
ProviderResult containing standardized attributes and relationships
|
||||
"""
|
||||
pass
|
||||
|
||||
@ -132,6 +133,8 @@ class BaseProvider(ABC):
|
||||
target_indicator: str = "") -> Optional[requests.Response]:
|
||||
"""
|
||||
Make a rate-limited HTTP request.
|
||||
FIXED: Returns response without automatically raising HTTPError exceptions.
|
||||
Individual providers should handle status codes appropriately.
|
||||
"""
|
||||
if self._is_stop_requested():
|
||||
print(f"Request cancelled before start: {url}")
|
||||
@ -168,8 +171,14 @@ class BaseProvider(ABC):
|
||||
raise ValueError(f"Unsupported HTTP method: {method}")
|
||||
|
||||
print(f"Response status: {response.status_code}")
|
||||
response.raise_for_status()
|
||||
|
||||
# FIXED: Don't automatically raise for HTTP error status codes
|
||||
# Let individual providers handle status codes appropriately
|
||||
# Only count 2xx responses as successful
|
||||
if 200 <= response.status_code < 300:
|
||||
self.successful_requests += 1
|
||||
else:
|
||||
self.failed_requests += 1
|
||||
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.log_api_request(
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,15 +1,16 @@
|
||||
# dnsrecon/providers/dns_provider.py
|
||||
|
||||
from dns import resolver, reversename
|
||||
from typing import List, Dict, Any, Tuple
|
||||
from typing import Dict
|
||||
from .base_provider import BaseProvider
|
||||
from utils.helpers import _is_valid_ip, _is_valid_domain
|
||||
from core.provider_result import ProviderResult
|
||||
from utils.helpers import _is_valid_ip, _is_valid_domain, get_ip_version
|
||||
|
||||
|
||||
class DNSProvider(BaseProvider):
|
||||
"""
|
||||
Provider for standard DNS resolution and reverse DNS lookups.
|
||||
Now uses session-specific configuration.
|
||||
Now returns standardized ProviderResult objects with IPv4 and IPv6 support.
|
||||
"""
|
||||
|
||||
def __init__(self, name=None, session_config=None):
|
||||
@ -25,7 +26,6 @@ class DNSProvider(BaseProvider):
|
||||
self.resolver = resolver.Resolver()
|
||||
self.resolver.timeout = 5
|
||||
self.resolver.lifetime = 10
|
||||
#self.resolver.nameservers = ['127.0.0.1']
|
||||
|
||||
def get_name(self) -> str:
|
||||
"""Return the provider name."""
|
||||
@ -47,78 +47,116 @@ class DNSProvider(BaseProvider):
|
||||
"""DNS is always available - no API key required."""
|
||||
return True
|
||||
|
||||
def query_domain(self, domain: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||
def query_domain(self, domain: str) -> ProviderResult:
|
||||
"""
|
||||
Query DNS records for the domain to discover relationships.
|
||||
...
|
||||
Query DNS records for the domain to discover relationships and attributes.
|
||||
FIXED: Now creates separate attributes for each DNS record type.
|
||||
|
||||
Args:
|
||||
domain: Domain to investigate
|
||||
|
||||
Returns:
|
||||
ProviderResult containing discovered relationships and attributes
|
||||
"""
|
||||
if not _is_valid_domain(domain):
|
||||
return []
|
||||
return ProviderResult()
|
||||
|
||||
relationships = []
|
||||
result = ProviderResult()
|
||||
|
||||
# Query all record types
|
||||
# Query all record types - each gets its own attribute
|
||||
for record_type in ['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SOA', 'TXT', 'SRV', 'CAA']:
|
||||
try:
|
||||
relationships.extend(self._query_record(domain, record_type))
|
||||
except resolver.NoAnswer:
|
||||
self._query_record(domain, record_type, result)
|
||||
#except resolver.NoAnswer:
|
||||
# This is not an error, just a confirmation that the record doesn't exist.
|
||||
self.logger.logger.debug(f"No {record_type} record found for {domain}")
|
||||
#self.logger.logger.debug(f"No {record_type} record found for {domain}")
|
||||
except Exception as e:
|
||||
self.failed_requests += 1
|
||||
self.logger.logger.debug(f"{record_type} record query failed for {domain}: {e}")
|
||||
# Optionally, you might want to re-raise other, more serious exceptions.
|
||||
|
||||
return relationships
|
||||
return result
|
||||
|
||||
def query_ip(self, ip: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||
def query_ip(self, ip: str) -> ProviderResult:
|
||||
"""
|
||||
Query reverse DNS for the IP address.
|
||||
Query reverse DNS for the IP address (supports both IPv4 and IPv6).
|
||||
|
||||
Args:
|
||||
ip: IP address to investigate
|
||||
ip: IP address to investigate (IPv4 or IPv6)
|
||||
|
||||
Returns:
|
||||
List of relationships discovered from reverse DNS
|
||||
ProviderResult containing discovered relationships and attributes
|
||||
"""
|
||||
if not _is_valid_ip(ip):
|
||||
return []
|
||||
return ProviderResult()
|
||||
|
||||
relationships = []
|
||||
result = ProviderResult()
|
||||
ip_version = get_ip_version(ip)
|
||||
|
||||
try:
|
||||
# Perform reverse DNS lookup
|
||||
# Perform reverse DNS lookup (works for both IPv4 and IPv6)
|
||||
self.total_requests += 1
|
||||
reverse_name = reversename.from_address(ip)
|
||||
response = self.resolver.resolve(reverse_name, 'PTR')
|
||||
self.successful_requests += 1
|
||||
|
||||
ptr_records = []
|
||||
for ptr_record in response:
|
||||
hostname = str(ptr_record).rstrip('.')
|
||||
|
||||
if _is_valid_domain(hostname):
|
||||
# Determine appropriate forward relationship type based on IP version
|
||||
if ip_version == 6:
|
||||
relationship_type = 'dns_aaaa_record'
|
||||
record_prefix = 'AAAA'
|
||||
else:
|
||||
relationship_type = 'dns_a_record'
|
||||
record_prefix = 'A'
|
||||
|
||||
# Add the relationship
|
||||
result.add_relationship(
|
||||
source_node=ip,
|
||||
target_node=hostname,
|
||||
relationship_type='dns_ptr_record',
|
||||
provider=self.name,
|
||||
confidence=0.8,
|
||||
raw_data={
|
||||
'query_type': 'PTR',
|
||||
'ip_address': ip,
|
||||
'ip_version': ip_version,
|
||||
'hostname': hostname,
|
||||
'ttl': response.ttl
|
||||
}
|
||||
)
|
||||
|
||||
relationships.append((
|
||||
ip,
|
||||
hostname,
|
||||
'ptr_record',
|
||||
0.8,
|
||||
raw_data
|
||||
))
|
||||
# Add to PTR records list
|
||||
ptr_records.append(f"PTR: {hostname}")
|
||||
|
||||
# Log the relationship discovery
|
||||
self.log_relationship_discovery(
|
||||
source_node=ip,
|
||||
target_node=hostname,
|
||||
relationship_type='ptr_record',
|
||||
relationship_type='dns_ptr_record',
|
||||
confidence_score=0.8,
|
||||
raw_data=raw_data,
|
||||
discovery_method="reverse_dns_lookup"
|
||||
raw_data={
|
||||
'query_type': 'PTR',
|
||||
'ip_address': ip,
|
||||
'ip_version': ip_version,
|
||||
'hostname': hostname,
|
||||
'ttl': response.ttl
|
||||
},
|
||||
discovery_method=f"reverse_dns_lookup_ipv{ip_version}"
|
||||
)
|
||||
|
||||
# Add PTR records as separate attribute
|
||||
if ptr_records:
|
||||
result.add_attribute(
|
||||
target_node=ip,
|
||||
name='ptr_records', # Specific name for PTR records
|
||||
value=ptr_records,
|
||||
attr_type='dns_record',
|
||||
provider=self.name,
|
||||
confidence=0.8,
|
||||
metadata={'ttl': response.ttl, 'ip_version': ip_version}
|
||||
)
|
||||
|
||||
except resolver.NXDOMAIN:
|
||||
@ -130,22 +168,28 @@ class DNSProvider(BaseProvider):
|
||||
# Re-raise the exception so the scanner can handle the failure
|
||||
raise e
|
||||
|
||||
return relationships
|
||||
return result
|
||||
|
||||
def _query_record(self, domain: str, record_type: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||
def _query_record(self, domain: str, record_type: str, result: ProviderResult) -> None:
|
||||
"""
|
||||
Query a specific type of DNS record for the domain.
|
||||
FIXED: Query DNS records with unique attribute names for each record type.
|
||||
Enhanced to better handle IPv6 AAAA records.
|
||||
"""
|
||||
relationships = []
|
||||
try:
|
||||
self.total_requests += 1
|
||||
response = self.resolver.resolve(domain, record_type)
|
||||
self.successful_requests += 1
|
||||
|
||||
dns_records = []
|
||||
|
||||
for record in response:
|
||||
target = ""
|
||||
if record_type in ['A', 'AAAA']:
|
||||
target = str(record)
|
||||
# Validate that the IP address is properly formed
|
||||
if not _is_valid_ip(target):
|
||||
self.logger.logger.debug(f"Invalid IP address in {record_type} record: {target}")
|
||||
continue
|
||||
elif record_type in ['CNAME', 'NS', 'PTR']:
|
||||
target = str(record.target).rstrip('.')
|
||||
elif record_type == 'MX':
|
||||
@ -153,32 +197,56 @@ class DNSProvider(BaseProvider):
|
||||
elif record_type == 'SOA':
|
||||
target = str(record.mname).rstrip('.')
|
||||
elif record_type in ['TXT']:
|
||||
# TXT records are treated as metadata, not relationships.
|
||||
# Keep raw TXT record value
|
||||
txt_value = str(record).strip('"')
|
||||
dns_records.append(txt_value) # Just the value for TXT
|
||||
continue
|
||||
elif record_type == 'SRV':
|
||||
target = str(record.target).rstrip('.')
|
||||
elif record_type == 'CAA':
|
||||
target = f"{record.flags} {record.tag.decode('utf-8')} \"{record.value.decode('utf-8')}\""
|
||||
# Keep raw CAA record format
|
||||
caa_value = f"{record.flags} {record.tag.decode('utf-8')} \"{record.value.decode('utf-8')}\""
|
||||
dns_records.append(caa_value) # Just the value for CAA
|
||||
continue
|
||||
else:
|
||||
target = str(record)
|
||||
|
||||
if target:
|
||||
# Determine IP version for metadata if this is an IP record
|
||||
ip_version = None
|
||||
if record_type in ['A', 'AAAA'] and _is_valid_ip(target):
|
||||
ip_version = get_ip_version(target)
|
||||
|
||||
raw_data = {
|
||||
'query_type': record_type,
|
||||
'domain': domain,
|
||||
'value': target,
|
||||
'ttl': response.ttl
|
||||
}
|
||||
relationship_type = f"{record_type.lower()}_record"
|
||||
confidence = 0.8 # Default confidence for DNS records
|
||||
|
||||
relationships.append((
|
||||
domain,
|
||||
target,
|
||||
relationship_type,
|
||||
confidence,
|
||||
raw_data
|
||||
))
|
||||
if ip_version:
|
||||
raw_data['ip_version'] = ip_version
|
||||
|
||||
relationship_type = f"dns_{record_type.lower()}_record"
|
||||
confidence = 0.8
|
||||
|
||||
# Add relationship
|
||||
result.add_relationship(
|
||||
source_node=domain,
|
||||
target_node=target,
|
||||
relationship_type=relationship_type,
|
||||
provider=self.name,
|
||||
confidence=confidence,
|
||||
raw_data=raw_data
|
||||
)
|
||||
|
||||
# Add target to records list
|
||||
dns_records.append(target)
|
||||
|
||||
# Log relationship discovery with IP version info
|
||||
discovery_method = f"dns_{record_type.lower()}_record"
|
||||
if ip_version:
|
||||
discovery_method += f"_ipv{ip_version}"
|
||||
|
||||
self.log_relationship_discovery(
|
||||
source_node=domain,
|
||||
@ -186,13 +254,33 @@ class DNSProvider(BaseProvider):
|
||||
relationship_type=relationship_type,
|
||||
confidence_score=confidence,
|
||||
raw_data=raw_data,
|
||||
discovery_method=f"dns_{record_type.lower()}_record"
|
||||
discovery_method=discovery_method
|
||||
)
|
||||
|
||||
# FIXED: Create attribute with specific name for each record type
|
||||
if dns_records:
|
||||
# Use record type specific attribute name (e.g., 'a_records', 'mx_records', etc.)
|
||||
attribute_name = f"{record_type.lower()}_records"
|
||||
|
||||
metadata = {'record_type': record_type, 'ttl': response.ttl}
|
||||
|
||||
# Add IP version info for A/AAAA records
|
||||
if record_type in ['A', 'AAAA'] and dns_records:
|
||||
first_ip_version = get_ip_version(dns_records[0])
|
||||
if first_ip_version:
|
||||
metadata['ip_version'] = first_ip_version
|
||||
|
||||
result.add_attribute(
|
||||
target_node=domain,
|
||||
name=attribute_name, # UNIQUE name for each record type!
|
||||
value=dns_records,
|
||||
attr_type='dns_record_list',
|
||||
provider=self.name,
|
||||
confidence=0.8,
|
||||
metadata=metadata
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.failed_requests += 1
|
||||
self.logger.logger.debug(f"{record_type} record query failed for {domain}: {e}")
|
||||
# Re-raise the exception so the scanner can handle it
|
||||
raise e
|
||||
|
||||
return relationships
|
||||
@ -1,20 +1,20 @@
|
||||
# dnsrecon/providers/shodan_provider.py
|
||||
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Any, Tuple
|
||||
from typing import Dict, Any
|
||||
from datetime import datetime, timezone
|
||||
import requests
|
||||
|
||||
from .base_provider import BaseProvider
|
||||
from utils.helpers import _is_valid_ip, _is_valid_domain
|
||||
from core.provider_result import ProviderResult
|
||||
from utils.helpers import _is_valid_ip, _is_valid_domain, get_ip_version, normalize_ip
|
||||
|
||||
|
||||
class ShodanProvider(BaseProvider):
|
||||
"""
|
||||
Provider for querying Shodan API for IP address information.
|
||||
Now uses session-specific API keys, is limited to IP-only queries, and includes caching.
|
||||
Now returns standardized ProviderResult objects with caching support for IPv4 and IPv6.
|
||||
"""
|
||||
|
||||
def __init__(self, name=None, session_config=None):
|
||||
@ -53,8 +53,19 @@ class ShodanProvider(BaseProvider):
|
||||
return {'domains': False, 'ips': True}
|
||||
|
||||
def _get_cache_file_path(self, ip: str) -> Path:
|
||||
"""Generate cache file path for an IP address."""
|
||||
"""
|
||||
Generate cache file path for an IP address (IPv4 or IPv6).
|
||||
IPv6 addresses contain colons which are replaced with underscores for filesystem safety.
|
||||
"""
|
||||
# Normalize the IP address first to ensure consistent caching
|
||||
normalized_ip = normalize_ip(ip)
|
||||
if not normalized_ip:
|
||||
# Fallback for invalid IPs
|
||||
safe_ip = ip.replace('.', '_').replace(':', '_')
|
||||
else:
|
||||
# Replace problematic characters for both IPv4 and IPv6
|
||||
safe_ip = normalized_ip.replace('.', '_').replace(':', '_')
|
||||
|
||||
return self.cache_dir / f"{safe_ip}.json"
|
||||
|
||||
def _get_cache_status(self, cache_file_path: Path) -> str:
|
||||
@ -85,115 +96,254 @@ class ShodanProvider(BaseProvider):
|
||||
except (json.JSONDecodeError, ValueError, KeyError):
|
||||
return "stale"
|
||||
|
||||
def query_domain(self, domain: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||
def query_domain(self, domain: str) -> ProviderResult:
|
||||
"""
|
||||
Domain queries are no longer supported for the Shodan provider.
|
||||
"""
|
||||
return []
|
||||
|
||||
def query_ip(self, ip: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||
Args:
|
||||
domain: Domain to investigate
|
||||
|
||||
Returns:
|
||||
Empty ProviderResult
|
||||
"""
|
||||
Query Shodan for information about an IP address, with caching of processed relationships.
|
||||
return ProviderResult()
|
||||
|
||||
def query_ip(self, ip: str) -> ProviderResult:
|
||||
"""
|
||||
Query Shodan for information about an IP address (IPv4 or IPv6), with caching of processed data.
|
||||
|
||||
Args:
|
||||
ip: IP address to investigate (IPv4 or IPv6)
|
||||
|
||||
Returns:
|
||||
ProviderResult containing discovered relationships and attributes
|
||||
"""
|
||||
if not _is_valid_ip(ip) or not self.is_available():
|
||||
return []
|
||||
return ProviderResult()
|
||||
|
||||
cache_file = self._get_cache_file_path(ip)
|
||||
# Normalize IP address for consistent processing
|
||||
normalized_ip = normalize_ip(ip)
|
||||
if not normalized_ip:
|
||||
return ProviderResult()
|
||||
|
||||
cache_file = self._get_cache_file_path(normalized_ip)
|
||||
cache_status = self._get_cache_status(cache_file)
|
||||
|
||||
relationships = []
|
||||
result = ProviderResult()
|
||||
|
||||
try:
|
||||
if cache_status == "fresh":
|
||||
relationships = self._load_from_cache(cache_file)
|
||||
self.logger.logger.info(f"Using cached Shodan relationships for {ip}")
|
||||
result = self._load_from_cache(cache_file)
|
||||
self.logger.logger.info(f"Using cached Shodan data for {normalized_ip}")
|
||||
else: # "stale" or "not_found"
|
||||
url = f"{self.base_url}/shodan/host/{ip}"
|
||||
url = f"{self.base_url}/shodan/host/{normalized_ip}"
|
||||
params = {'key': self.api_key}
|
||||
response = self.make_request(url, method="GET", params=params, target_indicator=ip)
|
||||
response = self.make_request(url, method="GET", params=params, target_indicator=normalized_ip)
|
||||
|
||||
if response and response.status_code == 200:
|
||||
data = response.json()
|
||||
# Process the data into relationships BEFORE caching
|
||||
relationships = self._process_shodan_data(ip, data)
|
||||
self._save_to_cache(cache_file, relationships) # Save the processed relationships
|
||||
# Process the data into ProviderResult BEFORE caching
|
||||
result = self._process_shodan_data(normalized_ip, data)
|
||||
self._save_to_cache(cache_file, result, data) # Save both result and raw data
|
||||
elif response and response.status_code == 404:
|
||||
# Handle 404 "No information available" as successful empty result
|
||||
try:
|
||||
error_data = response.json()
|
||||
if "No information available" in error_data.get('error', ''):
|
||||
# This is a successful query - Shodan just has no data
|
||||
self.logger.logger.debug(f"Shodan has no information for {normalized_ip}")
|
||||
result = ProviderResult() # Empty but successful result
|
||||
# Cache the empty result to avoid repeated queries
|
||||
self._save_to_cache(cache_file, result, {'error': 'No information available'})
|
||||
else:
|
||||
# Some other 404 error - treat as failure
|
||||
raise requests.exceptions.RequestException(f"Shodan API returned 404: {error_data}")
|
||||
except (ValueError, KeyError):
|
||||
# Could not parse JSON response - treat as failure
|
||||
raise requests.exceptions.RequestException(f"Shodan API returned 404 with unparseable response")
|
||||
elif cache_status == "stale":
|
||||
# If API fails on a stale cache, use the old data
|
||||
relationships = self._load_from_cache(cache_file)
|
||||
result = self._load_from_cache(cache_file)
|
||||
else:
|
||||
# Other HTTP error codes should be treated as failures
|
||||
status_code = response.status_code if response else "No response"
|
||||
raise requests.exceptions.RequestException(f"Shodan API returned HTTP {status_code}")
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
self.logger.logger.error(f"Shodan API query failed for {ip}: {e}")
|
||||
self.logger.logger.info(f"Shodan API query returned no info for {normalized_ip}: {e}")
|
||||
if cache_status == "stale":
|
||||
relationships = self._load_from_cache(cache_file)
|
||||
result = self._load_from_cache(cache_file)
|
||||
else:
|
||||
# Re-raise for retry scheduling - but only for actual failures
|
||||
raise e
|
||||
|
||||
return relationships
|
||||
return result
|
||||
|
||||
def _load_from_cache(self, cache_file_path: Path) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||
"""Load processed Shodan relationships from a cache file."""
|
||||
def _load_from_cache(self, cache_file_path: Path) -> ProviderResult:
|
||||
"""Load processed Shodan data from a cache file."""
|
||||
try:
|
||||
with open(cache_file_path, 'r') as f:
|
||||
cache_content = json.load(f)
|
||||
# The entire file content is the list of relationships
|
||||
return cache_content.get("relationships", [])
|
||||
except (json.JSONDecodeError, FileNotFoundError, KeyError):
|
||||
return []
|
||||
|
||||
def _save_to_cache(self, cache_file_path: Path, relationships: List[Tuple[str, str, str, float, Dict[str, Any]]]) -> None:
|
||||
"""Save processed Shodan relationships to a cache file."""
|
||||
result = ProviderResult()
|
||||
|
||||
# Reconstruct relationships
|
||||
for rel_data in cache_content.get("relationships", []):
|
||||
result.add_relationship(
|
||||
source_node=rel_data["source_node"],
|
||||
target_node=rel_data["target_node"],
|
||||
relationship_type=rel_data["relationship_type"],
|
||||
provider=rel_data["provider"],
|
||||
confidence=rel_data["confidence"],
|
||||
raw_data=rel_data.get("raw_data", {})
|
||||
)
|
||||
|
||||
# Reconstruct attributes
|
||||
for attr_data in cache_content.get("attributes", []):
|
||||
result.add_attribute(
|
||||
target_node=attr_data["target_node"],
|
||||
name=attr_data["name"],
|
||||
value=attr_data["value"],
|
||||
attr_type=attr_data["type"],
|
||||
provider=attr_data["provider"],
|
||||
confidence=attr_data["confidence"],
|
||||
metadata=attr_data.get("metadata", {})
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
except (json.JSONDecodeError, FileNotFoundError, KeyError):
|
||||
return ProviderResult()
|
||||
|
||||
def _save_to_cache(self, cache_file_path: Path, result: ProviderResult, raw_data: Dict[str, Any]) -> None:
|
||||
"""Save processed Shodan data to a cache file."""
|
||||
try:
|
||||
cache_data = {
|
||||
"last_upstream_query": datetime.now(timezone.utc).isoformat(),
|
||||
"relationships": relationships
|
||||
"raw_data": raw_data, # Preserve original for forensic purposes
|
||||
"relationships": [
|
||||
{
|
||||
"source_node": rel.source_node,
|
||||
"target_node": rel.target_node,
|
||||
"relationship_type": rel.relationship_type,
|
||||
"confidence": rel.confidence,
|
||||
"provider": rel.provider,
|
||||
"raw_data": rel.raw_data
|
||||
} for rel in result.relationships
|
||||
],
|
||||
"attributes": [
|
||||
{
|
||||
"target_node": attr.target_node,
|
||||
"name": attr.name,
|
||||
"value": attr.value,
|
||||
"type": attr.type,
|
||||
"provider": attr.provider,
|
||||
"confidence": attr.confidence,
|
||||
"metadata": attr.metadata
|
||||
} for attr in result.attributes
|
||||
]
|
||||
}
|
||||
with open(cache_file_path, 'w') as f:
|
||||
json.dump(cache_data, f, separators=(',', ':'))
|
||||
json.dump(cache_data, f, separators=(',', ':'), default=str)
|
||||
except Exception as e:
|
||||
self.logger.logger.warning(f"Failed to save Shodan cache for {cache_file_path.name}: {e}")
|
||||
|
||||
def _process_shodan_data(self, ip: str, data: Dict[str, Any]) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||
def _process_shodan_data(self, ip: str, data: Dict[str, Any]) -> ProviderResult:
|
||||
"""
|
||||
Process Shodan data to extract relationships.
|
||||
VERIFIED: Process Shodan data creating ISP nodes with ASN attributes and proper relationships.
|
||||
Enhanced to include IP version information for IPv6 addresses.
|
||||
"""
|
||||
relationships = []
|
||||
result = ProviderResult()
|
||||
|
||||
# Extract hostname relationships
|
||||
hostnames = data.get('hostnames', [])
|
||||
for hostname in hostnames:
|
||||
# Determine IP version for metadata
|
||||
ip_version = get_ip_version(ip)
|
||||
|
||||
# VERIFIED: Extract ISP information and create proper ISP node with ASN
|
||||
isp_name = data.get('org')
|
||||
asn_value = data.get('asn')
|
||||
|
||||
if isp_name and asn_value:
|
||||
# Create relationship from IP to ISP
|
||||
result.add_relationship(
|
||||
source_node=ip,
|
||||
target_node=isp_name,
|
||||
relationship_type='shodan_isp',
|
||||
provider=self.name,
|
||||
confidence=0.9,
|
||||
raw_data={'asn': asn_value, 'shodan_org': isp_name, 'ip_version': ip_version}
|
||||
)
|
||||
|
||||
# Add ASN as attribute to the ISP node
|
||||
result.add_attribute(
|
||||
target_node=isp_name,
|
||||
name='asn',
|
||||
value=asn_value,
|
||||
attr_type='isp_info',
|
||||
provider=self.name,
|
||||
confidence=0.9,
|
||||
metadata={'description': 'Autonomous System Number from Shodan', 'ip_version': ip_version}
|
||||
)
|
||||
|
||||
# Also add organization name as attribute to ISP node for completeness
|
||||
result.add_attribute(
|
||||
target_node=isp_name,
|
||||
name='organization_name',
|
||||
value=isp_name,
|
||||
attr_type='isp_info',
|
||||
provider=self.name,
|
||||
confidence=0.9,
|
||||
metadata={'description': 'Organization name from Shodan', 'ip_version': ip_version}
|
||||
)
|
||||
|
||||
# Process hostnames (reverse DNS)
|
||||
for key, value in data.items():
|
||||
if key == 'hostnames':
|
||||
for hostname in value:
|
||||
if _is_valid_domain(hostname):
|
||||
relationships.append((
|
||||
ip,
|
||||
hostname,
|
||||
'a_record',
|
||||
0.8,
|
||||
data
|
||||
))
|
||||
# Use appropriate relationship type based on IP version
|
||||
if ip_version == 6:
|
||||
relationship_type = 'shodan_aaaa_record'
|
||||
else:
|
||||
relationship_type = 'shodan_a_record'
|
||||
|
||||
result.add_relationship(
|
||||
source_node=ip,
|
||||
target_node=hostname,
|
||||
relationship_type=relationship_type,
|
||||
provider=self.name,
|
||||
confidence=0.8,
|
||||
raw_data={**data, 'ip_version': ip_version}
|
||||
)
|
||||
self.log_relationship_discovery(
|
||||
source_node=ip,
|
||||
target_node=hostname,
|
||||
relationship_type='a_record',
|
||||
relationship_type=relationship_type,
|
||||
confidence_score=0.8,
|
||||
raw_data=data,
|
||||
discovery_method="shodan_host_lookup"
|
||||
raw_data={**data, 'ip_version': ip_version},
|
||||
discovery_method=f"shodan_host_lookup_ipv{ip_version}"
|
||||
)
|
||||
elif key == 'ports':
|
||||
# Add open ports as attributes to the IP
|
||||
for port in value:
|
||||
result.add_attribute(
|
||||
target_node=ip,
|
||||
name='shodan_open_port',
|
||||
value=port,
|
||||
attr_type='shodan_network_info',
|
||||
provider=self.name,
|
||||
confidence=0.9,
|
||||
metadata={'ip_version': ip_version}
|
||||
)
|
||||
elif isinstance(value, (str, int, float, bool)) and value is not None:
|
||||
# Add other Shodan fields as IP attributes (keep raw field names)
|
||||
result.add_attribute(
|
||||
target_node=ip,
|
||||
name=key, # Raw field name from Shodan API
|
||||
value=value,
|
||||
attr_type='shodan_info',
|
||||
provider=self.name,
|
||||
confidence=0.9,
|
||||
metadata={'ip_version': ip_version}
|
||||
)
|
||||
|
||||
# Extract ASN relationship
|
||||
asn = data.get('asn')
|
||||
if asn:
|
||||
asn_name = f"AS{asn[2:]}" if isinstance(asn, str) and asn.startswith('AS') else f"AS{asn}"
|
||||
relationships.append((
|
||||
ip,
|
||||
asn_name,
|
||||
'asn_membership',
|
||||
0.7,
|
||||
data
|
||||
))
|
||||
self.log_relationship_discovery(
|
||||
source_node=ip,
|
||||
target_node=asn_name,
|
||||
relationship_type='asn_membership',
|
||||
confidence_score=0.7,
|
||||
raw_data=data,
|
||||
discovery_method="shodan_asn_lookup"
|
||||
)
|
||||
|
||||
return relationships
|
||||
return result
|
||||
@ -1,10 +1,10 @@
|
||||
Flask>=2.3.3
|
||||
networkx>=3.1
|
||||
requests>=2.31.0
|
||||
python-dateutil>=2.8.2
|
||||
Werkzeug>=2.3.7
|
||||
urllib3>=2.0.0
|
||||
dnspython>=2.4.2
|
||||
Flask
|
||||
networkx
|
||||
requests
|
||||
python-dateutil
|
||||
Werkzeug
|
||||
urllib3
|
||||
dnspython
|
||||
gunicorn
|
||||
redis
|
||||
python-dotenv
|
||||
2217
static/css/main.css
2217
static/css/main.css
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,7 @@
|
||||
/**
|
||||
* Graph visualization module for DNSRecon
|
||||
* Handles network graph rendering using vis.js with proper large entity node hiding
|
||||
* UPDATED: Now compatible with a strictly flat, unified data model for attributes.
|
||||
*/
|
||||
const contextMenuCSS = `
|
||||
.graph-context-menu {
|
||||
@ -213,7 +214,6 @@ class GraphManager {
|
||||
});
|
||||
|
||||
document.body.appendChild(this.contextMenu);
|
||||
console.log('Context menu created and added to body');
|
||||
}
|
||||
|
||||
/**
|
||||
@ -290,7 +290,6 @@ class GraphManager {
|
||||
// FIXED: Right-click context menu
|
||||
this.container.addEventListener('contextmenu', (event) => {
|
||||
event.preventDefault();
|
||||
console.log('Right-click detected at:', event.offsetX, event.offsetY);
|
||||
|
||||
// Get coordinates relative to the canvas
|
||||
const pointer = {
|
||||
@ -299,7 +298,6 @@ class GraphManager {
|
||||
};
|
||||
|
||||
const nodeId = this.network.getNodeAt(pointer);
|
||||
console.log('Node at pointer:', nodeId);
|
||||
|
||||
if (nodeId) {
|
||||
// Pass the original client event for positioning
|
||||
@ -340,19 +338,12 @@ class GraphManager {
|
||||
// Stabilization events with progress
|
||||
this.network.on('stabilizationProgress', (params) => {
|
||||
const progress = params.iterations / params.total;
|
||||
this.updateStabilizationProgress(progress);
|
||||
});
|
||||
|
||||
this.network.on('stabilizationIterationsDone', () => {
|
||||
this.onStabilizationComplete();
|
||||
});
|
||||
|
||||
// Selection events
|
||||
this.network.on('select', (params) => {
|
||||
console.log('Selected nodes:', params.nodes);
|
||||
console.log('Selected edges:', params.edges);
|
||||
});
|
||||
|
||||
// Click away to hide context menu
|
||||
document.addEventListener('click', (e) => {
|
||||
if (!this.contextMenu.contains(e.target)) {
|
||||
@ -376,28 +367,62 @@ class GraphManager {
|
||||
this.initialize();
|
||||
}
|
||||
|
||||
this.initialTargetIds = new Set(graphData.initial_targets || []);
|
||||
// Check if we have actual data to display
|
||||
const hasData = graphData.nodes.length > 0 || graphData.edges.length > 0;
|
||||
|
||||
// Handle placeholder visibility
|
||||
const placeholder = this.container.querySelector('.graph-placeholder');
|
||||
if (placeholder) {
|
||||
if (hasData) {
|
||||
placeholder.style.display = 'none';
|
||||
} else {
|
||||
placeholder.style.display = 'flex';
|
||||
// Early return if no data to process
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
this.largeEntityMembers.clear();
|
||||
const largeEntityMap = new Map();
|
||||
|
||||
graphData.nodes.forEach(node => {
|
||||
if (node.type === 'large_entity' && node.attributes && Array.isArray(node.attributes.nodes)) {
|
||||
node.attributes.nodes.forEach(nodeId => {
|
||||
if (node.type === 'large_entity' && node.attributes) {
|
||||
const nodesAttribute = this.findAttributeByName(node.attributes, 'nodes');
|
||||
if (nodesAttribute && Array.isArray(nodesAttribute.value)) {
|
||||
nodesAttribute.value.forEach(nodeId => {
|
||||
largeEntityMap.set(nodeId, node.id);
|
||||
this.largeEntityMembers.add(nodeId);
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
const filteredNodes = graphData.nodes.filter(node => {
|
||||
// Only include nodes that are NOT members of large entities, but always include the container itself
|
||||
return !this.largeEntityMembers.has(node.id) || node.type === 'large_entity';
|
||||
});
|
||||
|
||||
console.log(`Filtered ${graphData.nodes.length - filteredNodes.length} large entity member nodes from visualization`);
|
||||
|
||||
// Process only the filtered nodes
|
||||
// Process nodes with proper certificate coloring
|
||||
const processedNodes = filteredNodes.map(node => {
|
||||
return this.processNode(node);
|
||||
const processed = this.processNode(node);
|
||||
|
||||
// Apply certificate-based coloring here in frontend
|
||||
if (node.type === 'domain' && Array.isArray(node.attributes)) {
|
||||
const certInfo = this.analyzeCertificateInfo(node.attributes);
|
||||
|
||||
if (certInfo.hasExpiredOnly) {
|
||||
// Red for domains with only expired/invalid certificates
|
||||
processed.color = { background: '#ff6b6b', border: '#cc5555' };
|
||||
} else if (!certInfo.hasCertificates) {
|
||||
// Grey for domains with no certificates
|
||||
processed.color = { background: '#c7c7c7', border: '#999999' };
|
||||
}
|
||||
// Valid certificates use default green (handled by processNode)
|
||||
}
|
||||
|
||||
return processed;
|
||||
});
|
||||
|
||||
const mergedEdges = {};
|
||||
@ -434,24 +459,19 @@ class GraphManager {
|
||||
const existingNodeIds = this.nodes.getIds();
|
||||
const existingEdgeIds = this.edges.getIds();
|
||||
|
||||
// Add new nodes with fade-in animation
|
||||
const newNodes = processedNodes.filter(node => !existingNodeIds.includes(node.id));
|
||||
const newEdges = processedEdges.filter(edge => !existingEdgeIds.includes(edge.id));
|
||||
|
||||
// Update existing data
|
||||
this.nodes.update(processedNodes);
|
||||
this.edges.update(processedEdges);
|
||||
|
||||
// After data is loaded, apply filters
|
||||
this.updateFilterControls();
|
||||
this.applyAllFilters();
|
||||
|
||||
// Highlight new additions briefly
|
||||
if (newNodes.length > 0 || newEdges.length > 0) {
|
||||
setTimeout(() => this.highlightNewElements(newNodes, newEdges), 100);
|
||||
}
|
||||
|
||||
// Auto-fit view for small graphs or first update
|
||||
if (processedNodes.length <= 10 || existingNodeIds.length === 0) {
|
||||
setTimeout(() => this.fitView(), 800);
|
||||
}
|
||||
@ -465,9 +485,62 @@ class GraphManager {
|
||||
}
|
||||
}
|
||||
|
||||
analyzeCertificateInfo(attributes) {
|
||||
let hasCertificates = false;
|
||||
let hasValidCertificates = false;
|
||||
let hasExpiredCertificates = false;
|
||||
|
||||
for (const attr of attributes) {
|
||||
const attrName = (attr.name || '').toLowerCase();
|
||||
const attrProvider = (attr.provider || '').toLowerCase();
|
||||
const attrValue = attr.value;
|
||||
|
||||
// Look for certificate attributes from crtsh provider
|
||||
if (attrProvider === 'crtsh' || attrName.startsWith('cert_')) {
|
||||
hasCertificates = true;
|
||||
|
||||
// Check certificate validity using raw attribute names
|
||||
if (attrName === 'cert_is_currently_valid') {
|
||||
if (attrValue === true) {
|
||||
hasValidCertificates = true;
|
||||
} else if (attrValue === false) {
|
||||
hasExpiredCertificates = true;
|
||||
}
|
||||
}
|
||||
// Check for expiry indicators
|
||||
else if (attrName === 'cert_expires_soon' && attrValue === true) {
|
||||
hasExpiredCertificates = true;
|
||||
}
|
||||
else if (attrName.includes('expired') && attrValue === true) {
|
||||
hasExpiredCertificates = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
hasCertificates,
|
||||
hasValidCertificates,
|
||||
hasExpiredCertificates,
|
||||
hasExpiredOnly: hasExpiredCertificates && !hasValidCertificates
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Process node data with styling and metadata
|
||||
* @param {Object} node - Raw node data
|
||||
* UPDATED: Helper method to find an attribute by name in the standardized attributes list
|
||||
* @param {Array} attributes - List of StandardAttribute objects
|
||||
* @param {string} name - Attribute name to find
|
||||
* @returns {Object|null} The attribute object if found, null otherwise
|
||||
*/
|
||||
findAttributeByName(attributes, name) {
|
||||
if (!Array.isArray(attributes)) {
|
||||
return null;
|
||||
}
|
||||
return attributes.find(attr => attr.name === name) || null;
|
||||
}
|
||||
|
||||
/**
|
||||
* UPDATED: Process node data with styling and metadata for the flat data model
|
||||
* @param {Object} node - Raw node data with standardized attributes
|
||||
* @returns {Object} Processed node data
|
||||
*/
|
||||
processNode(node) {
|
||||
@ -478,7 +551,7 @@ class GraphManager {
|
||||
size: this.getNodeSize(node.type),
|
||||
borderColor: this.getNodeBorderColor(node.type),
|
||||
shape: this.getNodeShape(node.type),
|
||||
attributes: node.attributes || {},
|
||||
attributes: node.attributes || [],
|
||||
description: node.description || '',
|
||||
metadata: node.metadata || {},
|
||||
type: node.type,
|
||||
@ -491,26 +564,33 @@ class GraphManager {
|
||||
processedNode.borderWidth = Math.max(2, Math.floor(node.confidence * 5));
|
||||
}
|
||||
|
||||
// Style based on certificate validity
|
||||
if (node.type === 'domain') {
|
||||
if (node.attributes && node.attributes.certificates && node.attributes.certificates.has_valid_cert === false) {
|
||||
processedNode.color = { background: '#888888', border: '#666666' };
|
||||
// FIXED: Certificate-based domain coloring
|
||||
if (node.type === 'domain' && Array.isArray(node.attributes)) {
|
||||
const certInfo = this.analyzeCertificateInfo(node.attributes);
|
||||
|
||||
if (certInfo.hasExpiredOnly) {
|
||||
// Red for domains with only expired/invalid certificates
|
||||
processedNode.color = '#ff6b6b';
|
||||
processedNode.borderColor = '#cc5555';
|
||||
} else if (!certInfo.hasCertificates) {
|
||||
// Grey for domains with no certificates
|
||||
processedNode.color = '#c7c7c7';
|
||||
processedNode.borderColor = '#999999';
|
||||
}
|
||||
// Green for valid certificates (default color)
|
||||
}
|
||||
|
||||
// Handle merged correlation objects (similar to large entities)
|
||||
// Handle merged correlation objects
|
||||
if (node.type === 'correlation_object') {
|
||||
const metadata = node.metadata || {};
|
||||
const values = metadata.values || [];
|
||||
const mergeCount = metadata.merge_count || 1;
|
||||
|
||||
if (mergeCount > 1) {
|
||||
// Display as merged correlation container
|
||||
processedNode.label = `Correlations (${mergeCount})`;
|
||||
processedNode.title = `Merged correlation container with ${mergeCount} values: ${values.slice(0, 3).join(', ')}${values.length > 3 ? '...' : ''}`;
|
||||
processedNode.borderWidth = 3; // Thicker border for merged nodes
|
||||
processedNode.borderWidth = 3;
|
||||
} else {
|
||||
// Single correlation value
|
||||
const value = Array.isArray(values) && values.length > 0 ? values[0] : (metadata.value || 'Unknown');
|
||||
const displayValue = typeof value === 'string' && value.length > 20 ? value.substring(0, 17) + '...' : value;
|
||||
processedNode.label = `${displayValue}`;
|
||||
@ -521,6 +601,7 @@ class GraphManager {
|
||||
return processedNode;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Process edge data with styling and metadata
|
||||
* @param {Object} edge - Raw edge data
|
||||
@ -584,7 +665,8 @@ class GraphManager {
|
||||
const colors = {
|
||||
'domain': '#00ff41', // Green
|
||||
'ip': '#ff9900', // Amber
|
||||
'asn': '#00aaff', // Blue
|
||||
'isp': '#00aaff', // Blue
|
||||
'ca': '#ff6b6b', // Red
|
||||
'large_entity': '#ff6b6b', // Red for large entities
|
||||
'correlation_object': '#9620c0ff'
|
||||
};
|
||||
@ -600,7 +682,8 @@ class GraphManager {
|
||||
const borderColors = {
|
||||
'domain': '#00aa2e',
|
||||
'ip': '#cc7700',
|
||||
'asn': '#0088cc',
|
||||
'isp': '#0088cc',
|
||||
'ca': '#cc5555',
|
||||
'correlation_object': '#c235c9ff'
|
||||
};
|
||||
return borderColors[nodeType] || '#666666';
|
||||
@ -615,9 +698,10 @@ class GraphManager {
|
||||
const sizes = {
|
||||
'domain': 12,
|
||||
'ip': 14,
|
||||
'asn': 16,
|
||||
'isp': 16,
|
||||
'ca': 16,
|
||||
'correlation_object': 8,
|
||||
'large_entity': 5
|
||||
'large_entity': 25
|
||||
};
|
||||
return sizes[nodeType] || 12;
|
||||
}
|
||||
@ -631,9 +715,10 @@ class GraphManager {
|
||||
const shapes = {
|
||||
'domain': 'dot',
|
||||
'ip': 'square',
|
||||
'asn': 'triangle',
|
||||
'isp': 'triangle',
|
||||
'ca': 'diamond',
|
||||
'correlation_object': 'hexagon',
|
||||
'large_entity': 'database'
|
||||
'large_entity': 'dot'
|
||||
};
|
||||
return shapes[nodeType] || 'dot';
|
||||
}
|
||||
@ -889,15 +974,6 @@ class GraphManager {
|
||||
}, 2000);
|
||||
}
|
||||
|
||||
/**
|
||||
* Update stabilization progress
|
||||
* @param {number} progress - Progress value (0-1)
|
||||
*/
|
||||
updateStabilizationProgress(progress) {
|
||||
// Could show a progress indicator if needed
|
||||
console.log(`Graph stabilization: ${(progress * 100).toFixed(1)}%`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle stabilization completion
|
||||
*/
|
||||
@ -982,7 +1058,7 @@ class GraphManager {
|
||||
this.edges.clear();
|
||||
this.history = [];
|
||||
this.largeEntityMembers.clear(); // Clear large entity tracking
|
||||
this.clearInitialTargets();
|
||||
this.initialTargetIds.clear();
|
||||
|
||||
// Show placeholder
|
||||
const placeholder = this.container.querySelector('.graph-placeholder');
|
||||
@ -1085,11 +1161,11 @@ class GraphManager {
|
||||
adjacencyList
|
||||
);
|
||||
|
||||
console.log(`Reachability analysis complete:`, {
|
||||
/*console.log(`Reachability analysis complete:`, {
|
||||
reachable: analysis.reachableNodes.size,
|
||||
unreachable: analysis.unreachableNodes.size,
|
||||
clusters: analysis.isolatedClusters.length
|
||||
});
|
||||
});*/
|
||||
|
||||
return analysis;
|
||||
}
|
||||
@ -1157,16 +1233,6 @@ class GraphManager {
|
||||
};
|
||||
}
|
||||
|
||||
addInitialTarget(targetId) {
|
||||
this.initialTargetIds.add(targetId);
|
||||
console.log("Initial targets:", this.initialTargetIds);
|
||||
}
|
||||
|
||||
clearInitialTargets() {
|
||||
this.initialTargetIds.clear();
|
||||
console.log("Initial targets cleared.");
|
||||
}
|
||||
|
||||
updateFilterControls() {
|
||||
if (!this.filterPanel) return;
|
||||
const nodeTypes = new Set(this.nodes.get().map(n => n.type));
|
||||
@ -1204,7 +1270,6 @@ class GraphManager {
|
||||
* Replaces the existing applyAllFilters() method
|
||||
*/
|
||||
applyAllFilters() {
|
||||
console.log("Applying filters with enhanced reachability analysis...");
|
||||
if (this.nodes.length === 0) return;
|
||||
|
||||
// Get filter criteria from UI
|
||||
@ -1261,22 +1326,10 @@ class GraphManager {
|
||||
timestamp: Date.now()
|
||||
};
|
||||
|
||||
// Apply hiding with forensic documentation
|
||||
const updates = nodesToHide.map(id => ({
|
||||
id: id,
|
||||
hidden: true,
|
||||
forensicNote: `Hidden due to reachability analysis from ${nodeId}`
|
||||
}));
|
||||
|
||||
const updates = nodesToHide.map(id => ({ id: id, hidden: true }));
|
||||
this.nodes.update(updates);
|
||||
this.addToHistory('hide', historyData);
|
||||
|
||||
console.log(`Forensic hide operation: ${nodesToHide.length} nodes hidden`, {
|
||||
originalTarget: nodeId,
|
||||
cascadeNodes: nodesToHide.length - 1,
|
||||
isolatedClusters: analysis.isolatedClusters.length
|
||||
});
|
||||
|
||||
return {
|
||||
hiddenNodes: nodesToHide,
|
||||
isolatedClusters: analysis.isolatedClusters
|
||||
@ -1360,8 +1413,6 @@ class GraphManager {
|
||||
// Handle operation results
|
||||
if (!operationFailed) {
|
||||
this.addToHistory('delete', historyData);
|
||||
console.log(`Forensic delete operation completed:`, historyData.forensicAnalysis);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
deletedNodes: nodesToDelete,
|
||||
@ -1452,7 +1503,6 @@ class GraphManager {
|
||||
e.stopPropagation();
|
||||
const action = e.currentTarget.dataset.action;
|
||||
const nodeId = e.currentTarget.dataset.nodeId;
|
||||
console.log('Context menu action:', action, 'for node:', nodeId);
|
||||
this.performContextMenuAction(action, nodeId);
|
||||
this.hideContextMenu();
|
||||
});
|
||||
@ -1473,8 +1523,6 @@ class GraphManager {
|
||||
* Updates the existing performContextMenuAction() method
|
||||
*/
|
||||
performContextMenuAction(action, nodeId) {
|
||||
console.log('Performing enhanced action:', action, 'on node:', nodeId);
|
||||
|
||||
switch (action) {
|
||||
case 'focus':
|
||||
this.focusOnNode(nodeId);
|
||||
|
||||
1803
static/js/main.js
1803
static/js/main.js
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,6 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
@ -7,8 +8,11 @@
|
||||
<link rel="stylesheet" href="{{ url_for('static', filename='css/main.css') }}">
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/vis/4.21.0/vis.min.js"></script>
|
||||
<link href="https://cdnjs.cloudflare.com/ajax/libs/vis/4.21.0/vis.min.css" rel="stylesheet" type="text/css">
|
||||
<link href="https://fonts.googleapis.com/css2?family=Roboto+Mono:wght@300;400;500;700&family=Special+Elite&display=swap" rel="stylesheet">
|
||||
<link
|
||||
href="https://fonts.googleapis.com/css2?family=Roboto+Mono:wght@300;400;500;700&family=Special+Elite&display=swap"
|
||||
rel="stylesheet">
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div class="container">
|
||||
<header class="header">
|
||||
@ -49,9 +53,9 @@
|
||||
<span class="btn-icon">[STOP]</span>
|
||||
<span>Terminate Scan</span>
|
||||
</button>
|
||||
<button id="export-results" class="btn btn-secondary">
|
||||
<button id="export-options" class="btn btn-secondary">
|
||||
<span class="btn-icon">[EXPORT]</span>
|
||||
<span>Download Results</span>
|
||||
<span>Export Options</span>
|
||||
</button>
|
||||
<button id="configure-settings" class="btn btn-secondary">
|
||||
<span class="btn-icon">[API]</span>
|
||||
@ -95,11 +99,13 @@
|
||||
</div>
|
||||
<div class="progress-placeholder">
|
||||
<span class="status-label">
|
||||
⚠️ <strong>Important:</strong> Scanning large public services (e.g., Google, Cloudflare, AWS) is
|
||||
⚠️ <strong>Important:</strong> Scanning large public services (e.g., Google, Cloudflare,
|
||||
AWS) is
|
||||
<strong>discouraged</strong> due to rate limits (e.g., crt.sh).
|
||||
<br><br>
|
||||
Our task scheduler operates on a <strong>priority-based queue</strong>:
|
||||
Short, targeted tasks like DNS are processed first, while resource-intensive requests (e.g., crt.sh)
|
||||
Short, targeted tasks like DNS are processed first, while resource-intensive requests (e.g.,
|
||||
crt.sh)
|
||||
are <strong>automatically deprioritized</strong> and may be processed later.
|
||||
</span>
|
||||
</div>
|
||||
@ -114,9 +120,10 @@
|
||||
<div id="network-graph" class="graph-container">
|
||||
<div class="graph-placeholder">
|
||||
<div class="placeholder-content">
|
||||
<div class="placeholder-icon">[○]</div>
|
||||
<div class="placeholder-icon">[◯]</div>
|
||||
<div class="placeholder-text">Infrastructure map will appear here</div>
|
||||
<div class="placeholder-subtext">Start a reconnaissance scan to visualize relationships</div>
|
||||
<div class="placeholder-subtext">Start a reconnaissance scan to visualize relationships
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@ -126,29 +133,30 @@
|
||||
<div class="legend-color" style="background-color: #00ff41;"></div>
|
||||
<span>Domains</span>
|
||||
</div>
|
||||
<div class="legend-item">
|
||||
<div class="legend-color" style="background-color: #c92f2f;"></div>
|
||||
<span>Domain (no valid cert)</span>
|
||||
</div>
|
||||
<div class="legend-item">
|
||||
<div class="legend-color" style="background-color: #c7c7c7;"></div>
|
||||
<span>Domain (never had cert)</span>
|
||||
</div>
|
||||
<div class="legend-item">
|
||||
<div class="legend-color" style="background-color: #ff9900;"></div>
|
||||
<span>IP Addresses</span>
|
||||
</div>
|
||||
<div class="legend-item">
|
||||
<div class="legend-color" style="background-color: #c7c7c7;"></div>
|
||||
<span>Domain (invalid cert)</span>
|
||||
</div>
|
||||
<div class="legend-item">
|
||||
<div class="legend-color" style="background-color: #9d4edd;"></div>
|
||||
<span>Correlation Objects</span>
|
||||
</div>
|
||||
<div class="legend-item">
|
||||
<div class="legend-edge high-confidence"></div>
|
||||
<span>High Confidence</span>
|
||||
</div>
|
||||
<div class="legend-item">
|
||||
<div class="legend-edge medium-confidence"></div>
|
||||
<span>Medium Confidence</span>
|
||||
<div class="legend-color" style="background-color: #00aaff;"></div>
|
||||
<span>ISPs</span>
|
||||
</div>
|
||||
<div class="legend-item">
|
||||
<div class="legend-color" style="background-color: #ff6b6b;"></div>
|
||||
<span>Large Entity</span>
|
||||
<span>Certificate Authorities</span>
|
||||
</div>
|
||||
|
||||
<div class="legend-item">
|
||||
<div class="legend-color" style="background-color: #9d4edd;"></div>
|
||||
<span>Correlation Objects</span>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
@ -186,18 +194,22 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Settings Modal -->
|
||||
<div id="settings-modal" class="modal">
|
||||
<div class="modal-content">
|
||||
<div class="modal-header">
|
||||
<h3>Settings</h3>
|
||||
<h3>Scanner Configuration</h3>
|
||||
<button id="settings-modal-close" class="modal-close">[×]</button>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<p class="modal-description">
|
||||
Configure scan settings and API keys. Keys are stored in memory for the current session only.
|
||||
Only provide API-keys you dont use for anything else. Don´t enter an API-key if you don´t trust me (best practice would that you don´t).
|
||||
</p>
|
||||
<br>
|
||||
<div class="modal-details">
|
||||
<!-- Scan Settings Section -->
|
||||
<section class="modal-section">
|
||||
<details open>
|
||||
<summary>
|
||||
<span>⚙️ Scan Settings</span>
|
||||
</summary>
|
||||
<div class="modal-section-content">
|
||||
<div class="input-group">
|
||||
<label for="max-depth">Recursion Depth</label>
|
||||
<select id="max-depth">
|
||||
@ -208,14 +220,53 @@
|
||||
<option value="5">Depth 5 - Maximum depth</option>
|
||||
</select>
|
||||
</div>
|
||||
<div id="api-key-inputs">
|
||||
</div>
|
||||
<div class="button-group" style="flex-direction: row; justify-content: flex-end;">
|
||||
<button id="reset-api-keys" class="btn btn-secondary">
|
||||
<span>Reset</span>
|
||||
</details>
|
||||
</section>
|
||||
|
||||
<!-- Provider Configuration Section -->
|
||||
<section class="modal-section">
|
||||
<details open>
|
||||
<summary>
|
||||
<span>🔧 Provider Configuration</span>
|
||||
<span class="merge-badge" id="provider-count">0</span>
|
||||
</summary>
|
||||
<div class="modal-section-content">
|
||||
<div id="provider-config-list">
|
||||
<!-- Dynamically populated -->
|
||||
</div>
|
||||
</div>
|
||||
</details>
|
||||
</section>
|
||||
|
||||
<!-- API Keys Section -->
|
||||
<section class="modal-section">
|
||||
<details>
|
||||
<summary>
|
||||
<span>🔑 API Keys</span>
|
||||
<span class="merge-badge" id="api-key-count">0</span>
|
||||
</summary>
|
||||
<div class="modal-section-content">
|
||||
<p class="placeholder-subtext" style="margin-bottom: 1rem;">
|
||||
⚠️ API keys are stored in memory for the current session only.
|
||||
Only provide API keys you don't use for anything else.
|
||||
</p>
|
||||
<div id="api-key-inputs">
|
||||
<!-- Dynamically populated -->
|
||||
</div>
|
||||
</div>
|
||||
</details>
|
||||
</section>
|
||||
|
||||
<!-- Action Buttons -->
|
||||
<div class="button-group" style="margin-top: 1.5rem;">
|
||||
<button id="save-settings" class="btn btn-primary">
|
||||
<span class="btn-icon">[SAVE]</span>
|
||||
<span>Save Configuration</span>
|
||||
</button>
|
||||
<button id="save-api-keys" class="btn btn-primary">
|
||||
<span>Save API-Keys</span>
|
||||
<button id="reset-settings" class="btn btn-secondary">
|
||||
<span class="btn-icon">[RESET]</span>
|
||||
<span>Reset to Defaults</span>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
@ -223,19 +274,42 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
function copyToClipboard(elementId) {
|
||||
const element = document.getElementById(elementId);
|
||||
const textToCopy = element.innerText;
|
||||
navigator.clipboard.writeText(textToCopy).then(() => {
|
||||
// Optional: Show a success message
|
||||
console.log('Copied to clipboard');
|
||||
}).catch(err => {
|
||||
console.error('Failed to copy: ', err);
|
||||
});
|
||||
}
|
||||
</script>
|
||||
<!-- Export Modal -->
|
||||
<div id="export-modal" class="modal">
|
||||
<div class="modal-content">
|
||||
<div class="modal-header">
|
||||
<h3>Export Options</h3>
|
||||
<button id="export-modal-close" class="modal-close">[×]</button>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<div class="modal-details">
|
||||
<section class="modal-section">
|
||||
<details open>
|
||||
<summary>
|
||||
<span>📊 Available Exports</span>
|
||||
</summary>
|
||||
<div class="modal-section-content">
|
||||
<div class="button-group" style="margin-top: 1rem;">
|
||||
<button id="export-graph-json" class="btn btn-primary">
|
||||
<span class="btn-icon">[JSON]</span>
|
||||
<span>Export Graph Data</span>
|
||||
</button>
|
||||
<div class="status-row" style="margin-top: 0.5rem;">
|
||||
<span class="status-label">Complete graph data with forensic audit trail,
|
||||
provider statistics, and scan metadata in JSON format for analysis and
|
||||
archival.</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</details>
|
||||
</section>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<script src="{{ url_for('static', filename='js/graph.js') }}"></script>
|
||||
<script src="{{ url_for('static', filename='js/main.js') }}"></script>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
@ -1,3 +1,8 @@
|
||||
# dnsrecon-reduced/utils/helpers.py
|
||||
|
||||
import ipaddress
|
||||
from typing import Union
|
||||
|
||||
def _is_valid_domain(domain: str) -> bool:
|
||||
"""
|
||||
Basic domain validation.
|
||||
@ -26,32 +31,27 @@ def _is_valid_domain(domain: str) -> bool:
|
||||
|
||||
def _is_valid_ip(ip: str) -> bool:
|
||||
"""
|
||||
Basic IP address validation.
|
||||
IP address validation supporting both IPv4 and IPv6.
|
||||
|
||||
Args:
|
||||
ip: IP address string to validate
|
||||
|
||||
Returns:
|
||||
True if IP appears valid
|
||||
True if IP appears valid (IPv4 or IPv6)
|
||||
"""
|
||||
if not ip:
|
||||
return False
|
||||
|
||||
try:
|
||||
parts = ip.split('.')
|
||||
if len(parts) != 4:
|
||||
return False
|
||||
|
||||
for part in parts:
|
||||
num = int(part)
|
||||
if not 0 <= num <= 255:
|
||||
return False
|
||||
|
||||
# This handles both IPv4 and IPv6 validation
|
||||
ipaddress.ip_address(ip.strip())
|
||||
return True
|
||||
|
||||
except (ValueError, AttributeError):
|
||||
return False
|
||||
|
||||
def is_valid_target(target: str) -> bool:
|
||||
"""
|
||||
Checks if the target is a valid domain or IP address.
|
||||
Checks if the target is a valid domain or IP address (IPv4/IPv6).
|
||||
|
||||
Args:
|
||||
target: The target string to validate.
|
||||
@ -60,3 +60,35 @@ def is_valid_target(target: str) -> bool:
|
||||
True if the target is a valid domain or IP, False otherwise.
|
||||
"""
|
||||
return _is_valid_domain(target) or _is_valid_ip(target)
|
||||
|
||||
def get_ip_version(ip: str) -> Union[int, None]:
|
||||
"""
|
||||
Get the IP version (4 or 6) of a valid IP address.
|
||||
|
||||
Args:
|
||||
ip: IP address string
|
||||
|
||||
Returns:
|
||||
4 for IPv4, 6 for IPv6, None if invalid
|
||||
"""
|
||||
try:
|
||||
addr = ipaddress.ip_address(ip.strip())
|
||||
return addr.version
|
||||
except (ValueError, AttributeError):
|
||||
return None
|
||||
|
||||
def normalize_ip(ip: str) -> Union[str, None]:
|
||||
"""
|
||||
Normalize an IP address to its canonical form.
|
||||
|
||||
Args:
|
||||
ip: IP address string
|
||||
|
||||
Returns:
|
||||
Normalized IP address string, None if invalid
|
||||
"""
|
||||
try:
|
||||
addr = ipaddress.ip_address(ip.strip())
|
||||
return str(addr)
|
||||
except (ValueError, AttributeError):
|
||||
return None
|
||||
Loading…
x
Reference in New Issue
Block a user