many improvements
This commit is contained in:
parent
709d3b9f3d
commit
db2101d814
@ -19,6 +19,7 @@ class NodeType(Enum):
|
|||||||
IP = "ip"
|
IP = "ip"
|
||||||
CERTIFICATE = "certificate"
|
CERTIFICATE = "certificate"
|
||||||
ASN = "asn"
|
ASN = "asn"
|
||||||
|
LARGE_ENTITY = "large_entity"
|
||||||
|
|
||||||
|
|
||||||
class RelationshipType(Enum):
|
class RelationshipType(Enum):
|
||||||
|
312
core/scanner.py
312
core/scanner.py
@ -7,7 +7,7 @@ import threading
|
|||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
from typing import List, Set, Dict, Any, Optional, Tuple
|
from typing import List, Set, Dict, Any, Optional, Tuple
|
||||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
from concurrent.futures import ThreadPoolExecutor, as_completed, CancelledError
|
||||||
|
|
||||||
from core.graph_manager import GraphManager, NodeType, RelationshipType
|
from core.graph_manager import GraphManager, NodeType, RelationshipType
|
||||||
from core.logger import get_forensic_logger, new_session
|
from core.logger import get_forensic_logger, new_session
|
||||||
@ -36,7 +36,7 @@ class Scanner:
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
"""Initialize scanner with all available providers and empty graph."""
|
"""Initialize scanner with all available providers and empty graph."""
|
||||||
print("Initializing Scanner instance...")
|
print("Initializing Scanner instance...")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.graph = GraphManager()
|
self.graph = GraphManager()
|
||||||
self.providers = []
|
self.providers = []
|
||||||
@ -44,16 +44,17 @@ class Scanner:
|
|||||||
self.current_target = None
|
self.current_target = None
|
||||||
self.current_depth = 0
|
self.current_depth = 0
|
||||||
self.max_depth = 2
|
self.max_depth = 2
|
||||||
self.stop_requested = False
|
self.stop_event = threading.Event() # Use a threading.Event for safer signaling
|
||||||
self.scan_thread = None
|
self.scan_thread = None
|
||||||
|
|
||||||
# Scanning progress tracking
|
# Scanning progress tracking
|
||||||
self.total_indicators_found = 0
|
self.total_indicators_found = 0
|
||||||
self.indicators_processed = 0
|
self.indicators_processed = 0
|
||||||
self.current_indicator = ""
|
self.current_indicator = ""
|
||||||
|
|
||||||
# Concurrent processing configuration
|
# Concurrent processing configuration
|
||||||
self.max_workers = config.max_concurrent_requests
|
self.max_workers = config.max_concurrent_requests
|
||||||
|
self.executor = None # Keep a reference to the executor
|
||||||
|
|
||||||
# Initialize providers
|
# Initialize providers
|
||||||
print("Calling _initialize_providers...")
|
print("Calling _initialize_providers...")
|
||||||
@ -62,9 +63,9 @@ class Scanner:
|
|||||||
# Initialize logger
|
# Initialize logger
|
||||||
print("Initializing forensic logger...")
|
print("Initializing forensic logger...")
|
||||||
self.logger = get_forensic_logger()
|
self.logger = get_forensic_logger()
|
||||||
|
|
||||||
print("Scanner initialization complete")
|
print("Scanner initialization complete")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"ERROR: Scanner initialization failed: {e}")
|
print(f"ERROR: Scanner initialization failed: {e}")
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
@ -81,7 +82,7 @@ class Scanner:
|
|||||||
('crtsh', CrtShProvider),
|
('crtsh', CrtShProvider),
|
||||||
('dns', DNSProvider)
|
('dns', DNSProvider)
|
||||||
]
|
]
|
||||||
|
|
||||||
for provider_name, provider_class in free_providers:
|
for provider_name, provider_class in free_providers:
|
||||||
if config.is_provider_enabled(provider_name):
|
if config.is_provider_enabled(provider_name):
|
||||||
try:
|
try:
|
||||||
@ -100,7 +101,7 @@ class Scanner:
|
|||||||
('shodan', ShodanProvider),
|
('shodan', ShodanProvider),
|
||||||
('virustotal', VirusTotalProvider)
|
('virustotal', VirusTotalProvider)
|
||||||
]
|
]
|
||||||
|
|
||||||
for provider_name, provider_class in api_providers:
|
for provider_name, provider_class in api_providers:
|
||||||
if config.is_provider_enabled(provider_name):
|
if config.is_provider_enabled(provider_name):
|
||||||
try:
|
try:
|
||||||
@ -128,7 +129,7 @@ class Scanner:
|
|||||||
bool: True if scan started successfully
|
bool: True if scan started successfully
|
||||||
"""
|
"""
|
||||||
print(f"Scanner.start_scan called with target='{target_domain}', depth={max_depth}")
|
print(f"Scanner.start_scan called with target='{target_domain}', depth={max_depth}")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if self.status == ScanStatus.RUNNING:
|
if self.status == ScanStatus.RUNNING:
|
||||||
print("Scan already running, rejecting new scan")
|
print("Scan already running, rejecting new scan")
|
||||||
@ -142,8 +143,8 @@ class Scanner:
|
|||||||
# Stop any existing scan thread
|
# Stop any existing scan thread
|
||||||
if self.scan_thread and self.scan_thread.is_alive():
|
if self.scan_thread and self.scan_thread.is_alive():
|
||||||
print("Stopping existing scan thread...")
|
print("Stopping existing scan thread...")
|
||||||
self.stop_requested = True
|
self.stop_event.set()
|
||||||
self.scan_thread.join(timeout=2.0)
|
self.scan_thread.join(timeout=5.0)
|
||||||
if self.scan_thread.is_alive():
|
if self.scan_thread.is_alive():
|
||||||
print("WARNING: Could not stop existing thread")
|
print("WARNING: Could not stop existing thread")
|
||||||
return False
|
return False
|
||||||
@ -154,7 +155,7 @@ class Scanner:
|
|||||||
self.current_target = target_domain.lower().strip()
|
self.current_target = target_domain.lower().strip()
|
||||||
self.max_depth = max_depth
|
self.max_depth = max_depth
|
||||||
self.current_depth = 0
|
self.current_depth = 0
|
||||||
self.stop_requested = False
|
self.stop_event.clear()
|
||||||
self.total_indicators_found = 0
|
self.total_indicators_found = 0
|
||||||
self.indicators_processed = 0
|
self.indicators_processed = 0
|
||||||
self.current_indicator = self.current_target
|
self.current_indicator = self.current_target
|
||||||
@ -163,7 +164,7 @@ class Scanner:
|
|||||||
print("Starting new forensic session...")
|
print("Starting new forensic session...")
|
||||||
self.logger = new_session()
|
self.logger = new_session()
|
||||||
|
|
||||||
# Start scan in separate thread for Phase 2
|
# Start scan in separate thread
|
||||||
print("Starting scan thread...")
|
print("Starting scan thread...")
|
||||||
self.scan_thread = threading.Thread(
|
self.scan_thread = threading.Thread(
|
||||||
target=self._execute_scan_async,
|
target=self._execute_scan_async,
|
||||||
@ -171,9 +172,9 @@ class Scanner:
|
|||||||
daemon=True
|
daemon=True
|
||||||
)
|
)
|
||||||
self.scan_thread.start()
|
self.scan_thread.start()
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"ERROR: Exception in start_scan: {e}")
|
print(f"ERROR: Exception in start_scan: {e}")
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
@ -188,6 +189,7 @@ class Scanner:
|
|||||||
max_depth: Maximum recursion depth
|
max_depth: Maximum recursion depth
|
||||||
"""
|
"""
|
||||||
print(f"_execute_scan_async started for {target_domain} with depth {max_depth}")
|
print(f"_execute_scan_async started for {target_domain} with depth {max_depth}")
|
||||||
|
self.executor = ThreadPoolExecutor(max_workers=self.max_workers)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
print("Setting status to RUNNING")
|
print("Setting status to RUNNING")
|
||||||
@ -202,7 +204,7 @@ class Scanner:
|
|||||||
print(f"Adding target domain '{target_domain}' as initial node")
|
print(f"Adding target domain '{target_domain}' as initial node")
|
||||||
self.graph.add_node(target_domain, NodeType.DOMAIN)
|
self.graph.add_node(target_domain, NodeType.DOMAIN)
|
||||||
|
|
||||||
# BFS-style exploration with depth limiting and concurrent processing
|
# BFS-style exploration
|
||||||
current_level_domains = {target_domain}
|
current_level_domains = {target_domain}
|
||||||
processed_domains = set()
|
processed_domains = set()
|
||||||
all_discovered_ips = set()
|
all_discovered_ips = set()
|
||||||
@ -210,7 +212,7 @@ class Scanner:
|
|||||||
print(f"Starting BFS exploration...")
|
print(f"Starting BFS exploration...")
|
||||||
|
|
||||||
for depth in range(max_depth + 1):
|
for depth in range(max_depth + 1):
|
||||||
if self.stop_requested:
|
if self.stop_event.is_set():
|
||||||
print(f"Stop requested at depth {depth}")
|
print(f"Stop requested at depth {depth}")
|
||||||
break
|
break
|
||||||
|
|
||||||
@ -221,28 +223,27 @@ class Scanner:
|
|||||||
print("No domains to process at this level")
|
print("No domains to process at this level")
|
||||||
break
|
break
|
||||||
|
|
||||||
# Update progress tracking
|
|
||||||
self.total_indicators_found += len(current_level_domains)
|
self.total_indicators_found += len(current_level_domains)
|
||||||
next_level_domains = set()
|
next_level_domains = set()
|
||||||
|
|
||||||
# Process domains at current depth level with concurrent queries
|
|
||||||
domain_results = self._process_domains_concurrent(current_level_domains, processed_domains)
|
domain_results = self._process_domains_concurrent(current_level_domains, processed_domains)
|
||||||
|
|
||||||
for domain, discovered_domains, discovered_ips in domain_results:
|
for domain, discovered_domains, discovered_ips in domain_results:
|
||||||
if self.stop_requested:
|
if self.stop_event.is_set():
|
||||||
break
|
break
|
||||||
|
|
||||||
processed_domains.add(domain)
|
processed_domains.add(domain)
|
||||||
all_discovered_ips.update(discovered_ips)
|
all_discovered_ips.update(discovered_ips)
|
||||||
|
|
||||||
# Add discovered domains to next level if not at max depth
|
|
||||||
if depth < max_depth:
|
if depth < max_depth:
|
||||||
for discovered_domain in discovered_domains:
|
for discovered_domain in discovered_domains:
|
||||||
if discovered_domain not in processed_domains:
|
if discovered_domain not in processed_domains:
|
||||||
next_level_domains.add(discovered_domain)
|
next_level_domains.add(discovered_domain)
|
||||||
print(f"Adding {discovered_domain} to next level")
|
print(f"Adding {discovered_domain} to next level")
|
||||||
|
|
||||||
# Process discovered IPs concurrently
|
if self.stop_event.is_set():
|
||||||
|
break
|
||||||
|
|
||||||
if all_discovered_ips:
|
if all_discovered_ips:
|
||||||
print(f"Processing {len(all_discovered_ips)} discovered IP addresses")
|
print(f"Processing {len(all_discovered_ips)} discovered IP addresses")
|
||||||
self._process_ips_concurrent(all_discovered_ips)
|
self._process_ips_concurrent(all_discovered_ips)
|
||||||
@ -250,8 +251,13 @@ class Scanner:
|
|||||||
current_level_domains = next_level_domains
|
current_level_domains = next_level_domains
|
||||||
print(f"Completed depth {depth}, {len(next_level_domains)} domains for next level")
|
print(f"Completed depth {depth}, {len(next_level_domains)} domains for next level")
|
||||||
|
|
||||||
# Finalize scan
|
except Exception as e:
|
||||||
if self.stop_requested:
|
print(f"ERROR: Scan execution failed with error: {e}")
|
||||||
|
traceback.print_exc()
|
||||||
|
self.status = ScanStatus.FAILED
|
||||||
|
self.logger.logger.error(f"Scan failed: {e}")
|
||||||
|
finally:
|
||||||
|
if self.stop_event.is_set():
|
||||||
self.status = ScanStatus.STOPPED
|
self.status = ScanStatus.STOPPED
|
||||||
print("Scan completed with STOPPED status")
|
print("Scan completed with STOPPED status")
|
||||||
else:
|
else:
|
||||||
@ -259,8 +265,8 @@ class Scanner:
|
|||||||
print("Scan completed with COMPLETED status")
|
print("Scan completed with COMPLETED status")
|
||||||
|
|
||||||
self.logger.log_scan_complete()
|
self.logger.log_scan_complete()
|
||||||
|
self.executor.shutdown(wait=False, cancel_futures=True)
|
||||||
|
|
||||||
# Print final statistics
|
|
||||||
stats = self.graph.get_statistics()
|
stats = self.graph.get_statistics()
|
||||||
print(f"Final scan statistics:")
|
print(f"Final scan statistics:")
|
||||||
print(f" - Total nodes: {stats['basic_metrics']['total_nodes']}")
|
print(f" - Total nodes: {stats['basic_metrics']['total_nodes']}")
|
||||||
@ -268,132 +274,97 @@ class Scanner:
|
|||||||
print(f" - Domains processed: {len(processed_domains)}")
|
print(f" - Domains processed: {len(processed_domains)}")
|
||||||
print(f" - IPs discovered: {len(all_discovered_ips)}")
|
print(f" - IPs discovered: {len(all_discovered_ips)}")
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"ERROR: Scan execution failed with error: {e}")
|
|
||||||
traceback.print_exc()
|
|
||||||
self.status = ScanStatus.FAILED
|
|
||||||
self.logger.logger.error(f"Scan failed: {e}")
|
|
||||||
|
|
||||||
def _process_domains_concurrent(self, domains: Set[str], processed_domains: Set[str]) -> List[Tuple[str, Set[str], Set[str]]]:
|
def _process_domains_concurrent(self, domains: Set[str], processed_domains: Set[str]) -> List[Tuple[str, Set[str], Set[str]]]:
|
||||||
"""
|
"""
|
||||||
Process multiple domains concurrently using thread pool.
|
Process multiple domains concurrently using thread pool.
|
||||||
|
|
||||||
Args:
|
|
||||||
domains: Set of domains to process
|
|
||||||
processed_domains: Set of already processed domains
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of tuples (domain, discovered_domains, discovered_ips)
|
|
||||||
"""
|
"""
|
||||||
results = []
|
results = []
|
||||||
|
|
||||||
# Filter out already processed domains
|
|
||||||
domains_to_process = domains - processed_domains
|
domains_to_process = domains - processed_domains
|
||||||
|
|
||||||
if not domains_to_process:
|
if not domains_to_process:
|
||||||
return results
|
return results
|
||||||
|
|
||||||
print(f"Processing {len(domains_to_process)} domains concurrently with {self.max_workers} workers")
|
print(f"Processing {len(domains_to_process)} domains concurrently with {self.max_workers} workers")
|
||||||
|
|
||||||
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
|
future_to_domain = {
|
||||||
# Submit all domain processing tasks
|
self.executor.submit(self._query_providers_for_domain, domain): domain
|
||||||
future_to_domain = {
|
for domain in domains_to_process
|
||||||
executor.submit(self._query_providers_for_domain, domain): domain
|
}
|
||||||
for domain in domains_to_process
|
|
||||||
}
|
for future in as_completed(future_to_domain):
|
||||||
|
if self.stop_event.is_set():
|
||||||
# Collect results as they complete
|
future.cancel()
|
||||||
for future in as_completed(future_to_domain):
|
continue
|
||||||
if self.stop_requested:
|
domain = future_to_domain[future]
|
||||||
break
|
try:
|
||||||
|
discovered_domains, discovered_ips = future.result()
|
||||||
domain = future_to_domain[future]
|
results.append((domain, discovered_domains, discovered_ips))
|
||||||
|
self.indicators_processed += 1
|
||||||
try:
|
print(f"Completed processing domain: {domain} ({len(discovered_domains)} domains, {len(discovered_ips)} IPs)")
|
||||||
discovered_domains, discovered_ips = future.result()
|
except (Exception, CancelledError) as e:
|
||||||
results.append((domain, discovered_domains, discovered_ips))
|
print(f"Error processing domain {domain}: {e}")
|
||||||
self.indicators_processed += 1
|
|
||||||
print(f"Completed processing domain: {domain} ({len(discovered_domains)} domains, {len(discovered_ips)} IPs)")
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error processing domain {domain}: {e}")
|
|
||||||
traceback.print_exc()
|
|
||||||
|
|
||||||
return results
|
return results
|
||||||
|
|
||||||
def _process_ips_concurrent(self, ips: Set[str]) -> None:
|
def _process_ips_concurrent(self, ips: Set[str]) -> None:
|
||||||
"""
|
"""
|
||||||
Process multiple IP addresses concurrently.
|
Process multiple IP addresses concurrently.
|
||||||
|
|
||||||
Args:
|
|
||||||
ips: Set of IP addresses to process
|
|
||||||
"""
|
"""
|
||||||
if not ips:
|
if not ips or self.stop_event.is_set():
|
||||||
return
|
return
|
||||||
|
|
||||||
print(f"Processing {len(ips)} IP addresses concurrently")
|
print(f"Processing {len(ips)} IP addresses concurrently")
|
||||||
|
future_to_ip = {
|
||||||
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
|
self.executor.submit(self._query_providers_for_ip, ip): ip
|
||||||
# Submit all IP processing tasks
|
for ip in ips
|
||||||
future_to_ip = {
|
}
|
||||||
executor.submit(self._query_providers_for_ip, ip): ip
|
for future in as_completed(future_to_ip):
|
||||||
for ip in ips
|
if self.stop_event.is_set():
|
||||||
}
|
future.cancel()
|
||||||
|
continue
|
||||||
# Collect results as they complete
|
ip = future_to_ip[future]
|
||||||
for future in as_completed(future_to_ip):
|
try:
|
||||||
if self.stop_requested:
|
future.result() # Just wait for completion
|
||||||
break
|
print(f"Completed processing IP: {ip}")
|
||||||
|
except (Exception, CancelledError) as e:
|
||||||
ip = future_to_ip[future]
|
print(f"Error processing IP {ip}: {e}")
|
||||||
|
|
||||||
try:
|
|
||||||
future.result() # Just wait for completion
|
|
||||||
print(f"Completed processing IP: {ip}")
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error processing IP {ip}: {e}")
|
|
||||||
traceback.print_exc()
|
|
||||||
|
|
||||||
def _query_providers_for_domain(self, domain: str) -> Tuple[Set[str], Set[str]]:
|
def _query_providers_for_domain(self, domain: str) -> Tuple[Set[str], Set[str]]:
|
||||||
"""
|
"""
|
||||||
Query all enabled providers for information about a domain.
|
Query all enabled providers for information about a domain.
|
||||||
|
|
||||||
Args:
|
|
||||||
domain: Domain to investigate
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tuple of (discovered_domains, discovered_ips)
|
|
||||||
"""
|
"""
|
||||||
print(f"Querying {len(self.providers)} providers for domain: {domain}")
|
print(f"Querying {len(self.providers)} providers for domain: {domain}")
|
||||||
discovered_domains = set()
|
discovered_domains = set()
|
||||||
discovered_ips = set()
|
discovered_ips = set()
|
||||||
|
|
||||||
|
# Define a threshold for creating a "large entity" node
|
||||||
|
LARGE_ENTITY_THRESHOLD = 50
|
||||||
|
|
||||||
if not self.providers:
|
if not self.providers or self.stop_event.is_set():
|
||||||
print("No providers available")
|
|
||||||
return discovered_domains, discovered_ips
|
return discovered_domains, discovered_ips
|
||||||
|
|
||||||
# Query providers concurrently for better performance
|
with ThreadPoolExecutor(max_workers=len(self.providers)) as provider_executor:
|
||||||
with ThreadPoolExecutor(max_workers=len(self.providers)) as executor:
|
|
||||||
# Submit queries for all providers
|
|
||||||
future_to_provider = {
|
future_to_provider = {
|
||||||
executor.submit(self._safe_provider_query_domain, provider, domain): provider
|
provider_executor.submit(self._safe_provider_query_domain, provider, domain): provider
|
||||||
for provider in self.providers
|
for provider in self.providers
|
||||||
}
|
}
|
||||||
|
|
||||||
# Collect results as they complete
|
|
||||||
for future in as_completed(future_to_provider):
|
for future in as_completed(future_to_provider):
|
||||||
if self.stop_requested:
|
if self.stop_event.is_set():
|
||||||
break
|
future.cancel()
|
||||||
|
continue
|
||||||
provider = future_to_provider[future]
|
provider = future_to_provider[future]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
relationships = future.result()
|
relationships = future.result()
|
||||||
print(f"Provider {provider.get_name()} returned {len(relationships)} relationships")
|
print(f"Provider {provider.get_name()} returned {len(relationships)} relationships")
|
||||||
|
|
||||||
|
# Check if the number of relationships exceeds the threshold
|
||||||
|
if len(relationships) > LARGE_ENTITY_THRESHOLD:
|
||||||
|
# Create a single "large entity" node
|
||||||
|
large_entity_id = f"large_entity_{provider.get_name()}_{domain}"
|
||||||
|
self.graph.add_node(large_entity_id, NodeType.LARGE_ENTITY, metadata={'count': len(relationships), 'provider': provider.get_name()})
|
||||||
|
self.graph.add_edge(domain, large_entity_id, RelationshipType.PASSIVE_DNS, 1.0, provider.get_name(), {})
|
||||||
|
print(f"Created large entity node for {domain} from {provider.get_name()} with {len(relationships)} relationships")
|
||||||
|
continue # Skip adding individual nodes
|
||||||
|
|
||||||
for source, target, rel_type, confidence, raw_data in relationships:
|
for source, target, rel_type, confidence, raw_data in relationships:
|
||||||
# Determine node type based on target
|
|
||||||
if self._is_valid_ip(target):
|
if self._is_valid_ip(target):
|
||||||
target_node_type = NodeType.IP
|
target_node_type = NodeType.IP
|
||||||
discovered_ips.add(target)
|
discovered_ips.add(target)
|
||||||
@ -401,22 +372,13 @@ class Scanner:
|
|||||||
target_node_type = NodeType.DOMAIN
|
target_node_type = NodeType.DOMAIN
|
||||||
discovered_domains.add(target)
|
discovered_domains.add(target)
|
||||||
else:
|
else:
|
||||||
# Could be ASN or certificate
|
|
||||||
target_node_type = NodeType.ASN if target.startswith('AS') else NodeType.CERTIFICATE
|
target_node_type = NodeType.ASN if target.startswith('AS') else NodeType.CERTIFICATE
|
||||||
|
|
||||||
# Add nodes and relationship to graph
|
|
||||||
self.graph.add_node(source, NodeType.DOMAIN)
|
self.graph.add_node(source, NodeType.DOMAIN)
|
||||||
self.graph.add_node(target, target_node_type)
|
self.graph.add_node(target, target_node_type)
|
||||||
|
if self.graph.add_edge(source, target, rel_type, confidence, provider.get_name(), raw_data):
|
||||||
success = self.graph.add_edge(
|
|
||||||
source, target, rel_type, confidence,
|
|
||||||
provider.get_name(), raw_data
|
|
||||||
)
|
|
||||||
|
|
||||||
if success:
|
|
||||||
print(f"Added relationship: {source} -> {target} ({rel_type.relationship_name})")
|
print(f"Added relationship: {source} -> {target} ({rel_type.relationship_name})")
|
||||||
|
except (Exception, CancelledError) as e:
|
||||||
except Exception as e:
|
|
||||||
print(f"Provider {provider.get_name()} failed for {domain}: {e}")
|
print(f"Provider {provider.get_name()} failed for {domain}: {e}")
|
||||||
|
|
||||||
print(f"Domain {domain}: discovered {len(discovered_domains)} domains, {len(discovered_ips)} IPs")
|
print(f"Domain {domain}: discovered {len(discovered_domains)} domains, {len(discovered_ips)} IPs")
|
||||||
@ -425,61 +387,43 @@ class Scanner:
|
|||||||
def _query_providers_for_ip(self, ip: str) -> None:
|
def _query_providers_for_ip(self, ip: str) -> None:
|
||||||
"""
|
"""
|
||||||
Query all enabled providers for information about an IP address.
|
Query all enabled providers for information about an IP address.
|
||||||
|
|
||||||
Args:
|
|
||||||
ip: IP address to investigate
|
|
||||||
"""
|
"""
|
||||||
print(f"Querying {len(self.providers)} providers for IP: {ip}")
|
print(f"Querying {len(self.providers)} providers for IP: {ip}")
|
||||||
|
if not self.providers or self.stop_event.is_set():
|
||||||
if not self.providers:
|
|
||||||
print("No providers available")
|
|
||||||
return
|
return
|
||||||
|
|
||||||
# Query providers concurrently
|
with ThreadPoolExecutor(max_workers=len(self.providers)) as provider_executor:
|
||||||
with ThreadPoolExecutor(max_workers=len(self.providers)) as executor:
|
|
||||||
# Submit queries for all providers
|
|
||||||
future_to_provider = {
|
future_to_provider = {
|
||||||
executor.submit(self._safe_provider_query_ip, provider, ip): provider
|
provider_executor.submit(self._safe_provider_query_ip, provider, ip): provider
|
||||||
for provider in self.providers
|
for provider in self.providers
|
||||||
}
|
}
|
||||||
|
|
||||||
# Collect results as they complete
|
|
||||||
for future in as_completed(future_to_provider):
|
for future in as_completed(future_to_provider):
|
||||||
if self.stop_requested:
|
if self.stop_event.is_set():
|
||||||
break
|
future.cancel()
|
||||||
|
continue
|
||||||
provider = future_to_provider[future]
|
provider = future_to_provider[future]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
relationships = future.result()
|
relationships = future.result()
|
||||||
print(f"Provider {provider.get_name()} returned {len(relationships)} relationships for IP {ip}")
|
print(f"Provider {provider.get_name()} returned {len(relationships)} relationships for IP {ip}")
|
||||||
|
|
||||||
for source, target, rel_type, confidence, raw_data in relationships:
|
for source, target, rel_type, confidence, raw_data in relationships:
|
||||||
# Determine node type based on target
|
|
||||||
if self._is_valid_domain(target):
|
if self._is_valid_domain(target):
|
||||||
target_node_type = NodeType.DOMAIN
|
target_node_type = NodeType.DOMAIN
|
||||||
elif target.startswith('AS'):
|
elif target.startswith('AS'):
|
||||||
target_node_type = NodeType.ASN
|
target_node_type = NodeType.ASN
|
||||||
else:
|
else:
|
||||||
target_node_type = NodeType.IP
|
target_node_type = NodeType.IP
|
||||||
|
|
||||||
# Add nodes and relationship to graph
|
|
||||||
self.graph.add_node(source, NodeType.IP)
|
self.graph.add_node(source, NodeType.IP)
|
||||||
self.graph.add_node(target, target_node_type)
|
self.graph.add_node(target, target_node_type)
|
||||||
|
if self.graph.add_edge(source, target, rel_type, confidence, provider.get_name(), raw_data):
|
||||||
success = self.graph.add_edge(
|
|
||||||
source, target, rel_type, confidence,
|
|
||||||
provider.get_name(), raw_data
|
|
||||||
)
|
|
||||||
|
|
||||||
if success:
|
|
||||||
print(f"Added IP relationship: {source} -> {target} ({rel_type.relationship_name})")
|
print(f"Added IP relationship: {source} -> {target} ({rel_type.relationship_name})")
|
||||||
|
except (Exception, CancelledError) as e:
|
||||||
except Exception as e:
|
|
||||||
print(f"Provider {provider.get_name()} failed for IP {ip}: {e}")
|
print(f"Provider {provider.get_name()} failed for IP {ip}: {e}")
|
||||||
|
|
||||||
|
|
||||||
def _safe_provider_query_domain(self, provider, domain: str):
|
def _safe_provider_query_domain(self, provider, domain: str):
|
||||||
"""Safely query provider for domain with error handling."""
|
"""Safely query provider for domain with error handling."""
|
||||||
|
if self.stop_event.is_set():
|
||||||
|
return []
|
||||||
try:
|
try:
|
||||||
return provider.query_domain(domain)
|
return provider.query_domain(domain)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -488,6 +432,8 @@ class Scanner:
|
|||||||
|
|
||||||
def _safe_provider_query_ip(self, provider, ip: str):
|
def _safe_provider_query_ip(self, provider, ip: str):
|
||||||
"""Safely query provider for IP with error handling."""
|
"""Safely query provider for IP with error handling."""
|
||||||
|
if self.stop_event.is_set():
|
||||||
|
return []
|
||||||
try:
|
try:
|
||||||
return provider.query_ip(ip)
|
return provider.query_ip(ip)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -497,13 +443,10 @@ class Scanner:
|
|||||||
def stop_scan(self) -> bool:
|
def stop_scan(self) -> bool:
|
||||||
"""
|
"""
|
||||||
Request scan termination.
|
Request scan termination.
|
||||||
|
|
||||||
Returns:
|
|
||||||
bool: True if stop request was accepted
|
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
if self.status == ScanStatus.RUNNING:
|
if self.status == ScanStatus.RUNNING:
|
||||||
self.stop_requested = True
|
self.stop_event.set()
|
||||||
print("Scan stop requested")
|
print("Scan stop requested")
|
||||||
return True
|
return True
|
||||||
print("No active scan to stop")
|
print("No active scan to stop")
|
||||||
@ -516,9 +459,6 @@ class Scanner:
|
|||||||
def get_scan_status(self) -> Dict[str, Any]:
|
def get_scan_status(self) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Get current scan status and progress.
|
Get current scan status and progress.
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dictionary containing scan status information
|
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
return {
|
return {
|
||||||
@ -558,31 +498,18 @@ class Scanner:
|
|||||||
def get_graph_data(self) -> Dict[str, Any]:
|
def get_graph_data(self) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Get current graph data for visualization.
|
Get current graph data for visualization.
|
||||||
|
|
||||||
Returns:
|
|
||||||
Graph data formatted for frontend
|
|
||||||
"""
|
"""
|
||||||
return self.graph.get_graph_data()
|
return self.graph.get_graph_data()
|
||||||
|
|
||||||
def export_results(self) -> Dict[str, Any]:
|
def export_results(self) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Export complete scan results including graph and audit trail.
|
Export complete scan results including graph and audit trail.
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dictionary containing complete scan results
|
|
||||||
"""
|
"""
|
||||||
# Get graph data
|
|
||||||
graph_data = self.graph.export_json()
|
graph_data = self.graph.export_json()
|
||||||
|
|
||||||
# Get forensic audit trail
|
|
||||||
audit_trail = self.logger.export_audit_trail()
|
audit_trail = self.logger.export_audit_trail()
|
||||||
|
|
||||||
# Get provider statistics
|
|
||||||
provider_stats = {}
|
provider_stats = {}
|
||||||
for provider in self.providers:
|
for provider in self.providers:
|
||||||
provider_stats[provider.get_name()] = provider.get_statistics()
|
provider_stats[provider.get_name()] = provider.get_statistics()
|
||||||
|
|
||||||
# Combine all results
|
|
||||||
export_data = {
|
export_data = {
|
||||||
'scan_metadata': {
|
'scan_metadata': {
|
||||||
'target_domain': self.current_target,
|
'target_domain': self.current_target,
|
||||||
@ -596,18 +523,11 @@ class Scanner:
|
|||||||
'provider_statistics': provider_stats,
|
'provider_statistics': provider_stats,
|
||||||
'scan_summary': self.logger.get_forensic_summary()
|
'scan_summary': self.logger.get_forensic_summary()
|
||||||
}
|
}
|
||||||
|
|
||||||
return export_data
|
return export_data
|
||||||
|
|
||||||
def remove_provider(self, provider_name: str) -> bool:
|
def remove_provider(self, provider_name: str) -> bool:
|
||||||
"""
|
"""
|
||||||
Remove a provider from the scanner.
|
Remove a provider from the scanner.
|
||||||
|
|
||||||
Args:
|
|
||||||
provider_name: Name of provider to remove
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
bool: True if provider was removed
|
|
||||||
"""
|
"""
|
||||||
for i, provider in enumerate(self.providers):
|
for i, provider in enumerate(self.providers):
|
||||||
if provider.get_name() == provider_name:
|
if provider.get_name() == provider_name:
|
||||||
@ -618,63 +538,41 @@ class Scanner:
|
|||||||
def get_provider_statistics(self) -> Dict[str, Dict[str, Any]]:
|
def get_provider_statistics(self) -> Dict[str, Dict[str, Any]]:
|
||||||
"""
|
"""
|
||||||
Get statistics for all providers.
|
Get statistics for all providers.
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dictionary mapping provider names to their statistics
|
|
||||||
"""
|
"""
|
||||||
stats = {}
|
stats = {}
|
||||||
for provider in self.providers:
|
for provider in self.providers:
|
||||||
stats[provider.get_name()] = provider.get_statistics()
|
stats[provider.get_name()] = provider.get_statistics()
|
||||||
return stats
|
return stats
|
||||||
|
|
||||||
def _is_valid_domain(self, domain: str) -> bool:
|
def _is_valid_domain(self, domain: str) -> bool:
|
||||||
"""
|
"""
|
||||||
Basic domain validation.
|
Basic domain validation.
|
||||||
|
|
||||||
Args:
|
|
||||||
domain: Domain string to validate
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if domain appears valid
|
|
||||||
"""
|
"""
|
||||||
if not domain or len(domain) > 253:
|
if not domain or len(domain) > 253:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Check for valid characters and structure
|
|
||||||
parts = domain.split('.')
|
parts = domain.split('.')
|
||||||
if len(parts) < 2:
|
if len(parts) < 2:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
for part in parts:
|
for part in parts:
|
||||||
if not part or len(part) > 63:
|
if not part or len(part) > 63:
|
||||||
return False
|
return False
|
||||||
if not part.replace('-', '').replace('_', '').isalnum():
|
if not part.replace('-', '').replace('_', '').isalnum():
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def _is_valid_ip(self, ip: str) -> bool:
|
def _is_valid_ip(self, ip: str) -> bool:
|
||||||
"""
|
"""
|
||||||
Basic IP address validation.
|
Basic IP address validation.
|
||||||
|
|
||||||
Args:
|
|
||||||
ip: IP address string to validate
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if IP appears valid
|
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
parts = ip.split('.')
|
parts = ip.split('.')
|
||||||
if len(parts) != 4:
|
if len(parts) != 4:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
for part in parts:
|
for part in parts:
|
||||||
num = int(part)
|
num = int(part)
|
||||||
if not 0 <= num <= 255:
|
if not 0 <= num <= 255:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
except (ValueError, AttributeError):
|
except (ValueError, AttributeError):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -1,11 +1,10 @@
|
|||||||
"""
|
# dnsrecon/providers/base_provider.py
|
||||||
Abstract base provider class for DNSRecon data sources.
|
|
||||||
Defines the interface and common functionality for all providers.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import time
|
import time
|
||||||
import requests
|
import requests
|
||||||
import threading
|
import threading
|
||||||
|
import os
|
||||||
|
import json
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from typing import List, Dict, Any, Optional, Tuple
|
from typing import List, Dict, Any, Optional, Tuple
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
@ -61,12 +60,18 @@ class BaseProvider(ABC):
|
|||||||
self._local = threading.local()
|
self._local = threading.local()
|
||||||
self.logger = get_forensic_logger()
|
self.logger = get_forensic_logger()
|
||||||
|
|
||||||
|
# Caching configuration
|
||||||
|
self.cache_dir = '.cache'
|
||||||
|
self.cache_expiry = 12 * 3600 # 12 hours in seconds
|
||||||
|
if not os.path.exists(self.cache_dir):
|
||||||
|
os.makedirs(self.cache_dir)
|
||||||
|
|
||||||
# Statistics
|
# Statistics
|
||||||
self.total_requests = 0
|
self.total_requests = 0
|
||||||
self.successful_requests = 0
|
self.successful_requests = 0
|
||||||
self.failed_requests = 0
|
self.failed_requests = 0
|
||||||
self.total_relationships_found = 0
|
self.total_relationships_found = 0
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def session(self):
|
def session(self):
|
||||||
if not hasattr(self._local, 'session'):
|
if not hasattr(self._local, 'session'):
|
||||||
@ -131,6 +136,23 @@ class BaseProvider(ABC):
|
|||||||
Returns:
|
Returns:
|
||||||
Response object or None if request failed
|
Response object or None if request failed
|
||||||
"""
|
"""
|
||||||
|
# Create a unique cache key
|
||||||
|
cache_key = f"{self.name}_{hash(f'{method}:{url}:{json.dumps(params, sort_keys=True)}')}.json"
|
||||||
|
cache_path = os.path.join(self.cache_dir, cache_key)
|
||||||
|
|
||||||
|
# Check cache
|
||||||
|
if os.path.exists(cache_path):
|
||||||
|
cache_age = time.time() - os.path.getmtime(cache_path)
|
||||||
|
if cache_age < self.cache_expiry:
|
||||||
|
print(f"Returning cached response for: {url}")
|
||||||
|
with open(cache_path, 'r') as f:
|
||||||
|
cached_data = json.load(f)
|
||||||
|
response = requests.Response()
|
||||||
|
response.status_code = cached_data['status_code']
|
||||||
|
response._content = cached_data['content'].encode('utf-8')
|
||||||
|
response.headers = cached_data['headers']
|
||||||
|
return response
|
||||||
|
|
||||||
for attempt in range(max_retries + 1):
|
for attempt in range(max_retries + 1):
|
||||||
# Apply rate limiting
|
# Apply rate limiting
|
||||||
self.rate_limiter.wait_if_needed()
|
self.rate_limiter.wait_if_needed()
|
||||||
@ -171,7 +193,7 @@ class BaseProvider(ABC):
|
|||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
self.successful_requests += 1
|
self.successful_requests += 1
|
||||||
|
|
||||||
# Success - log and return
|
# Success - log, cache, and return
|
||||||
duration_ms = (time.time() - start_time) * 1000
|
duration_ms = (time.time() - start_time) * 1000
|
||||||
self.logger.log_api_request(
|
self.logger.log_api_request(
|
||||||
provider=self.name,
|
provider=self.name,
|
||||||
@ -183,6 +205,13 @@ class BaseProvider(ABC):
|
|||||||
error=None,
|
error=None,
|
||||||
target_indicator=target_indicator
|
target_indicator=target_indicator
|
||||||
)
|
)
|
||||||
|
# Cache the successful response to disk
|
||||||
|
with open(cache_path, 'w') as f:
|
||||||
|
json.dump({
|
||||||
|
'status_code': response.status_code,
|
||||||
|
'content': response.text,
|
||||||
|
'headers': dict(response.headers)
|
||||||
|
}, f)
|
||||||
return response
|
return response
|
||||||
|
|
||||||
except requests.exceptions.RequestException as e:
|
except requests.exceptions.RequestException as e:
|
||||||
|
@ -1,7 +1,4 @@
|
|||||||
"""
|
# dnsrecon/providers/dns_provider.py
|
||||||
DNS resolution provider for DNSRecon.
|
|
||||||
Discovers domain relationships through DNS record analysis.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import socket
|
import socket
|
||||||
import dns.resolver
|
import dns.resolver
|
||||||
@ -87,8 +84,10 @@ class DNSProvider(BaseProvider):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
# Perform reverse DNS lookup
|
# Perform reverse DNS lookup
|
||||||
|
self.total_requests += 1
|
||||||
reverse_name = dns.reversename.from_address(ip)
|
reverse_name = dns.reversename.from_address(ip)
|
||||||
response = self.resolver.resolve(reverse_name, 'PTR')
|
response = self.resolver.resolve(reverse_name, 'PTR')
|
||||||
|
self.successful_requests += 1
|
||||||
|
|
||||||
for ptr_record in response:
|
for ptr_record in response:
|
||||||
hostname = str(ptr_record).rstrip('.')
|
hostname = str(ptr_record).rstrip('.')
|
||||||
@ -119,6 +118,7 @@ class DNSProvider(BaseProvider):
|
|||||||
)
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
self.failed_requests += 1
|
||||||
self.logger.logger.debug(f"Reverse DNS lookup failed for {ip}: {e}")
|
self.logger.logger.debug(f"Reverse DNS lookup failed for {ip}: {e}")
|
||||||
|
|
||||||
return relationships
|
return relationships
|
||||||
@ -131,7 +131,9 @@ class DNSProvider(BaseProvider):
|
|||||||
# return relationships
|
# return relationships
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
self.total_requests += 1
|
||||||
response = self.resolver.resolve(domain, 'A')
|
response = self.resolver.resolve(domain, 'A')
|
||||||
|
self.successful_requests += 1
|
||||||
|
|
||||||
for a_record in response:
|
for a_record in response:
|
||||||
ip_address = str(a_record)
|
ip_address = str(a_record)
|
||||||
@ -161,6 +163,7 @@ class DNSProvider(BaseProvider):
|
|||||||
)
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
self.failed_requests += 1
|
||||||
self.logger.logger.debug(f"A record query failed for {domain}: {e}")
|
self.logger.logger.debug(f"A record query failed for {domain}: {e}")
|
||||||
|
|
||||||
return relationships
|
return relationships
|
||||||
@ -173,7 +176,9 @@ class DNSProvider(BaseProvider):
|
|||||||
# return relationships
|
# return relationships
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
self.total_requests += 1
|
||||||
response = self.resolver.resolve(domain, 'AAAA')
|
response = self.resolver.resolve(domain, 'AAAA')
|
||||||
|
self.successful_requests += 1
|
||||||
|
|
||||||
for aaaa_record in response:
|
for aaaa_record in response:
|
||||||
ip_address = str(aaaa_record)
|
ip_address = str(aaaa_record)
|
||||||
@ -203,6 +208,7 @@ class DNSProvider(BaseProvider):
|
|||||||
)
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
self.failed_requests += 1
|
||||||
self.logger.logger.debug(f"AAAA record query failed for {domain}: {e}")
|
self.logger.logger.debug(f"AAAA record query failed for {domain}: {e}")
|
||||||
|
|
||||||
return relationships
|
return relationships
|
||||||
@ -215,7 +221,9 @@ class DNSProvider(BaseProvider):
|
|||||||
# return relationships
|
# return relationships
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
self.total_requests += 1
|
||||||
response = self.resolver.resolve(domain, 'CNAME')
|
response = self.resolver.resolve(domain, 'CNAME')
|
||||||
|
self.successful_requests += 1
|
||||||
|
|
||||||
for cname_record in response:
|
for cname_record in response:
|
||||||
target_domain = str(cname_record).rstrip('.')
|
target_domain = str(cname_record).rstrip('.')
|
||||||
@ -246,6 +254,7 @@ class DNSProvider(BaseProvider):
|
|||||||
)
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
self.failed_requests += 1
|
||||||
self.logger.logger.debug(f"CNAME record query failed for {domain}: {e}")
|
self.logger.logger.debug(f"CNAME record query failed for {domain}: {e}")
|
||||||
|
|
||||||
return relationships
|
return relationships
|
||||||
@ -258,7 +267,9 @@ class DNSProvider(BaseProvider):
|
|||||||
# return relationships
|
# return relationships
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
self.total_requests += 1
|
||||||
response = self.resolver.resolve(domain, 'MX')
|
response = self.resolver.resolve(domain, 'MX')
|
||||||
|
self.successful_requests += 1
|
||||||
|
|
||||||
for mx_record in response:
|
for mx_record in response:
|
||||||
mx_host = str(mx_record.exchange).rstrip('.')
|
mx_host = str(mx_record.exchange).rstrip('.')
|
||||||
@ -290,6 +301,7 @@ class DNSProvider(BaseProvider):
|
|||||||
)
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
self.failed_requests += 1
|
||||||
self.logger.logger.debug(f"MX record query failed for {domain}: {e}")
|
self.logger.logger.debug(f"MX record query failed for {domain}: {e}")
|
||||||
|
|
||||||
return relationships
|
return relationships
|
||||||
@ -302,7 +314,9 @@ class DNSProvider(BaseProvider):
|
|||||||
# return relationships
|
# return relationships
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
self.total_requests += 1
|
||||||
response = self.resolver.resolve(domain, 'NS')
|
response = self.resolver.resolve(domain, 'NS')
|
||||||
|
self.successful_requests += 1
|
||||||
|
|
||||||
for ns_record in response:
|
for ns_record in response:
|
||||||
ns_host = str(ns_record).rstrip('.')
|
ns_host = str(ns_record).rstrip('.')
|
||||||
@ -333,6 +347,7 @@ class DNSProvider(BaseProvider):
|
|||||||
)
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
self.failed_requests += 1
|
||||||
self.logger.logger.debug(f"NS record query failed for {domain}: {e}")
|
self.logger.logger.debug(f"NS record query failed for {domain}: {e}")
|
||||||
|
|
||||||
return relationships
|
return relationships
|
@ -217,8 +217,12 @@ class GraphManager {
|
|||||||
this.network.on('click', (params) => {
|
this.network.on('click', (params) => {
|
||||||
if (params.nodes.length > 0) {
|
if (params.nodes.length > 0) {
|
||||||
const nodeId = params.nodes[0];
|
const nodeId = params.nodes[0];
|
||||||
this.showNodeDetails(nodeId);
|
if (this.network.isCluster(nodeId)) {
|
||||||
this.highlightNodeConnections(nodeId);
|
this.network.openCluster(nodeId);
|
||||||
|
} else {
|
||||||
|
this.showNodeDetails(nodeId);
|
||||||
|
this.highlightNodeConnections(nodeId);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
this.clearHighlights();
|
this.clearHighlights();
|
||||||
}
|
}
|
||||||
@ -454,11 +458,13 @@ class GraphManager {
|
|||||||
'domain': '#00ff41', // Green
|
'domain': '#00ff41', // Green
|
||||||
'ip': '#ff9900', // Amber
|
'ip': '#ff9900', // Amber
|
||||||
'certificate': '#c7c7c7', // Gray
|
'certificate': '#c7c7c7', // Gray
|
||||||
'asn': '#00aaff' // Blue
|
'asn': '#00aaff', // Blue
|
||||||
|
'large_entity': '#ff6b6b' // Red for large entities
|
||||||
};
|
};
|
||||||
return colors[nodeType] || '#ffffff';
|
return colors[nodeType] || '#ffffff';
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get node border color based on type
|
* Get node border color based on type
|
||||||
* @param {string} nodeType - Node type
|
* @param {string} nodeType - Node type
|
||||||
@ -900,24 +906,22 @@ class GraphManager {
|
|||||||
* Toggle node clustering
|
* Toggle node clustering
|
||||||
*/
|
*/
|
||||||
toggleClustering() {
|
toggleClustering() {
|
||||||
// Simple clustering by node type
|
if (this.network.isCluster('domain-cluster')) {
|
||||||
const clusterOptionsByType = {
|
this.network.openCluster('domain-cluster');
|
||||||
joinCondition: (childOptions) => {
|
|
||||||
return childOptions.type === 'domain';
|
|
||||||
},
|
|
||||||
clusterNodeProperties: {
|
|
||||||
id: 'domain-cluster',
|
|
||||||
borderWidth: 3,
|
|
||||||
shape: 'database',
|
|
||||||
label: 'Domains',
|
|
||||||
color: '#00ff41'
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if (this.network.clustering.isCluster('domain-cluster')) {
|
|
||||||
this.network.clustering.openCluster('domain-cluster');
|
|
||||||
} else {
|
} else {
|
||||||
this.network.clustering.cluster(clusterOptionsByType);
|
const clusterOptions = {
|
||||||
|
joinCondition: (nodeOptions) => {
|
||||||
|
return nodeOptions.type === 'domain';
|
||||||
|
},
|
||||||
|
clusterNodeProperties: {
|
||||||
|
id: 'domain-cluster',
|
||||||
|
label: 'Domains',
|
||||||
|
shape: 'database',
|
||||||
|
color: '#00ff41',
|
||||||
|
borderWidth: 3,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
this.network.cluster(clusterOptions);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -360,6 +360,7 @@ class DNSReconApp {
|
|||||||
console.log('--- Polling tick ---');
|
console.log('--- Polling tick ---');
|
||||||
this.updateStatus();
|
this.updateStatus();
|
||||||
this.updateGraph();
|
this.updateGraph();
|
||||||
|
this.loadProviders();
|
||||||
}, 1000); // Poll every 1 second for debugging
|
}, 1000); // Poll every 1 second for debugging
|
||||||
|
|
||||||
console.log('Polling started with 1 second interval');
|
console.log('Polling started with 1 second interval');
|
||||||
@ -542,6 +543,7 @@ class DNSReconApp {
|
|||||||
this.stopPolling();
|
this.stopPolling();
|
||||||
this.showSuccess('Scan completed successfully');
|
this.showSuccess('Scan completed successfully');
|
||||||
this.updateConnectionStatus('completed');
|
this.updateConnectionStatus('completed');
|
||||||
|
this.loadProviders();
|
||||||
// Force a final graph update
|
// Force a final graph update
|
||||||
console.log('Scan completed - forcing final graph update');
|
console.log('Scan completed - forcing final graph update');
|
||||||
setTimeout(() => this.updateGraph(), 100);
|
setTimeout(() => this.updateGraph(), 100);
|
||||||
@ -552,6 +554,7 @@ class DNSReconApp {
|
|||||||
this.stopPolling();
|
this.stopPolling();
|
||||||
this.showError('Scan failed');
|
this.showError('Scan failed');
|
||||||
this.updateConnectionStatus('error');
|
this.updateConnectionStatus('error');
|
||||||
|
this.loadProviders();
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 'stopped':
|
case 'stopped':
|
||||||
@ -559,6 +562,7 @@ class DNSReconApp {
|
|||||||
this.stopPolling();
|
this.stopPolling();
|
||||||
this.showSuccess('Scan stopped');
|
this.showSuccess('Scan stopped');
|
||||||
this.updateConnectionStatus('stopped');
|
this.updateConnectionStatus('stopped');
|
||||||
|
this.loadProviders();
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 'idle':
|
case 'idle':
|
||||||
|
@ -145,6 +145,10 @@
|
|||||||
<div class="legend-edge medium-confidence"></div>
|
<div class="legend-edge medium-confidence"></div>
|
||||||
<span>Medium Confidence</span>
|
<span>Medium Confidence</span>
|
||||||
</div>
|
</div>
|
||||||
|
<div class="legend-item">
|
||||||
|
<div class="legend-color" style="background-color: #ff6b6b;"></div>
|
||||||
|
<span>Large Entity</span>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</section>
|
</section>
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user