implement new data api
This commit is contained in:
parent
15421dd4a5
commit
97aa18f788
@ -1,8 +1,9 @@
|
|||||||
# core/graph_manager.py
|
# dnsrecon-reduced/core/graph_manager.py
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Graph data model for DNSRecon using NetworkX.
|
Graph data model for DNSRecon using NetworkX.
|
||||||
Manages in-memory graph storage with confidence scoring and forensic metadata.
|
Manages in-memory graph storage with confidence scoring and forensic metadata.
|
||||||
|
Now fully compatible with the unified ProviderResult data model.
|
||||||
"""
|
"""
|
||||||
import re
|
import re
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
@ -28,6 +29,7 @@ class GraphManager:
|
|||||||
"""
|
"""
|
||||||
Thread-safe graph manager for DNSRecon infrastructure mapping.
|
Thread-safe graph manager for DNSRecon infrastructure mapping.
|
||||||
Uses NetworkX for in-memory graph storage with confidence scoring.
|
Uses NetworkX for in-memory graph storage with confidence scoring.
|
||||||
|
Compatible with unified ProviderResult data model.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@ -192,21 +194,36 @@ class GraphManager:
|
|||||||
})
|
})
|
||||||
return all_correlations
|
return all_correlations
|
||||||
|
|
||||||
def add_node(self, node_id: str, node_type: NodeType, attributes: Optional[Dict[str, Any]] = None,
|
def add_node(self, node_id: str, node_type: NodeType, attributes: Optional[List[Dict[str, Any]]] = None,
|
||||||
description: str = "", metadata: Optional[Dict[str, Any]] = None) -> bool:
|
description: str = "", metadata: Optional[Dict[str, Any]] = None) -> bool:
|
||||||
"""Add a node to the graph, update attributes, and process correlations."""
|
"""
|
||||||
|
Add a node to the graph, update attributes, and process correlations.
|
||||||
|
Now compatible with unified data model - attributes are dictionaries from converted StandardAttribute objects.
|
||||||
|
"""
|
||||||
is_new_node = not self.graph.has_node(node_id)
|
is_new_node = not self.graph.has_node(node_id)
|
||||||
if is_new_node:
|
if is_new_node:
|
||||||
self.graph.add_node(node_id, type=node_type.value,
|
self.graph.add_node(node_id, type=node_type.value,
|
||||||
added_timestamp=datetime.now(timezone.utc).isoformat(),
|
added_timestamp=datetime.now(timezone.utc).isoformat(),
|
||||||
attributes=attributes or {},
|
attributes=attributes or [], # Store as a list from the start
|
||||||
description=description,
|
description=description,
|
||||||
metadata=metadata or {})
|
metadata=metadata or {})
|
||||||
else:
|
else:
|
||||||
# Safely merge new attributes into existing attributes
|
# Safely merge new attributes into the existing list of attributes
|
||||||
if attributes:
|
if attributes:
|
||||||
existing_attributes = self.graph.nodes[node_id].get('attributes', {})
|
existing_attributes = self.graph.nodes[node_id].get('attributes', [])
|
||||||
existing_attributes.update(attributes)
|
|
||||||
|
# Handle cases where old data might still be in dictionary format
|
||||||
|
if not isinstance(existing_attributes, list):
|
||||||
|
existing_attributes = []
|
||||||
|
|
||||||
|
# Create a set of existing attribute names for efficient duplicate checking
|
||||||
|
existing_attr_names = {attr['name'] for attr in existing_attributes}
|
||||||
|
|
||||||
|
for new_attr in attributes:
|
||||||
|
if new_attr['name'] not in existing_attr_names:
|
||||||
|
existing_attributes.append(new_attr)
|
||||||
|
existing_attr_names.add(new_attr['name'])
|
||||||
|
|
||||||
self.graph.nodes[node_id]['attributes'] = existing_attributes
|
self.graph.nodes[node_id]['attributes'] = existing_attributes
|
||||||
if description:
|
if description:
|
||||||
self.graph.nodes[node_id]['description'] = description
|
self.graph.nodes[node_id]['description'] = description
|
||||||
@ -485,18 +502,27 @@ class GraphManager:
|
|||||||
if d.get('confidence_score', 0) >= min_confidence]
|
if d.get('confidence_score', 0) >= min_confidence]
|
||||||
|
|
||||||
def get_graph_data(self) -> Dict[str, Any]:
|
def get_graph_data(self) -> Dict[str, Any]:
|
||||||
"""Export graph data formatted for frontend visualization."""
|
"""
|
||||||
|
Export graph data formatted for frontend visualization.
|
||||||
|
Compatible with unified data model - preserves all attribute information for frontend display.
|
||||||
|
"""
|
||||||
nodes = []
|
nodes = []
|
||||||
for node_id, attrs in self.graph.nodes(data=True):
|
for node_id, attrs in self.graph.nodes(data=True):
|
||||||
node_data = {'id': node_id, 'label': node_id, 'type': attrs.get('type', 'unknown'),
|
node_data = {'id': node_id, 'label': node_id, 'type': attrs.get('type', 'unknown'),
|
||||||
'attributes': attrs.get('attributes', {}),
|
'attributes': attrs.get('attributes', []), # Ensure attributes is a list
|
||||||
'description': attrs.get('description', ''),
|
'description': attrs.get('description', ''),
|
||||||
'metadata': attrs.get('metadata', {}),
|
'metadata': attrs.get('metadata', {}),
|
||||||
'added_timestamp': attrs.get('added_timestamp')}
|
'added_timestamp': attrs.get('added_timestamp')}
|
||||||
|
|
||||||
# Customize node appearance based on type and attributes
|
# Customize node appearance based on type and attributes
|
||||||
node_type = node_data['type']
|
node_type = node_data['type']
|
||||||
attributes = node_data['attributes']
|
attributes_list = node_data['attributes']
|
||||||
if node_type == 'domain' and attributes.get('certificates', {}).get('has_valid_cert') is False:
|
|
||||||
|
# CORRECTED LOGIC: Handle certificate validity styling
|
||||||
|
if node_type == 'domain' and isinstance(attributes_list, list):
|
||||||
|
# Find the certificates attribute in the list
|
||||||
|
cert_attr = next((attr for attr in attributes_list if attr.get('name') == 'certificates'), None)
|
||||||
|
if cert_attr and cert_attr.get('value', {}).get('has_valid_cert') is False:
|
||||||
node_data['color'] = {'background': '#c7c7c7', 'border': '#999'} # Gray for invalid cert
|
node_data['color'] = {'background': '#c7c7c7', 'border': '#999'} # Gray for invalid cert
|
||||||
|
|
||||||
# Add incoming and outgoing edges to node data
|
# Add incoming and outgoing edges to node data
|
||||||
@ -528,7 +554,7 @@ class GraphManager:
|
|||||||
'last_modified': self.last_modified,
|
'last_modified': self.last_modified,
|
||||||
'total_nodes': self.get_node_count(),
|
'total_nodes': self.get_node_count(),
|
||||||
'total_edges': self.get_edge_count(),
|
'total_edges': self.get_edge_count(),
|
||||||
'graph_format': 'dnsrecon_v1_nodeling'
|
'graph_format': 'dnsrecon_v1_unified_model'
|
||||||
},
|
},
|
||||||
'graph': graph_data,
|
'graph': graph_data,
|
||||||
'statistics': self.get_statistics()
|
'statistics': self.get_statistics()
|
||||||
|
|||||||
106
core/provider_result.py
Normal file
106
core/provider_result.py
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
# dnsrecon-reduced/core/provider_result.py
|
||||||
|
|
||||||
|
"""
|
||||||
|
Unified data model for DNSRecon passive reconnaissance.
|
||||||
|
Standardizes the data structure across all providers to ensure consistent processing.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Any, Optional, List, Dict
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class StandardAttribute:
|
||||||
|
"""A unified data structure for a single piece of information about a node."""
|
||||||
|
target_node: str
|
||||||
|
name: str
|
||||||
|
value: Any
|
||||||
|
type: str
|
||||||
|
provider: str
|
||||||
|
confidence: float
|
||||||
|
timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
||||||
|
metadata: Optional[Dict[str, Any]] = field(default_factory=dict)
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
"""Validate the attribute after initialization."""
|
||||||
|
if not isinstance(self.confidence, (int, float)) or not 0.0 <= self.confidence <= 1.0:
|
||||||
|
raise ValueError(f"Confidence must be between 0.0 and 1.0, got {self.confidence}")
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Relationship:
|
||||||
|
"""A unified data structure for a directional link between two nodes."""
|
||||||
|
source_node: str
|
||||||
|
target_node: str
|
||||||
|
relationship_type: str
|
||||||
|
confidence: float
|
||||||
|
provider: str
|
||||||
|
timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
||||||
|
raw_data: Optional[Dict[str, Any]] = field(default_factory=dict)
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
"""Validate the relationship after initialization."""
|
||||||
|
if not isinstance(self.confidence, (int, float)) or not 0.0 <= self.confidence <= 1.0:
|
||||||
|
raise ValueError(f"Confidence must be between 0.0 and 1.0, got {self.confidence}")
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ProviderResult:
|
||||||
|
"""A container for all data returned by a provider from a single query."""
|
||||||
|
attributes: List[StandardAttribute] = field(default_factory=list)
|
||||||
|
relationships: List[Relationship] = field(default_factory=list)
|
||||||
|
|
||||||
|
def add_attribute(self, target_node: str, name: str, value: Any, attr_type: str,
|
||||||
|
provider: str, confidence: float = 0.8,
|
||||||
|
metadata: Optional[Dict[str, Any]] = None) -> None:
|
||||||
|
"""Helper method to add an attribute to the result."""
|
||||||
|
self.attributes.append(StandardAttribute(
|
||||||
|
target_node=target_node,
|
||||||
|
name=name,
|
||||||
|
value=value,
|
||||||
|
type=attr_type,
|
||||||
|
provider=provider,
|
||||||
|
confidence=confidence,
|
||||||
|
metadata=metadata or {}
|
||||||
|
))
|
||||||
|
|
||||||
|
def add_relationship(self, source_node: str, target_node: str, relationship_type: str,
|
||||||
|
provider: str, confidence: float = 0.8,
|
||||||
|
raw_data: Optional[Dict[str, Any]] = None) -> None:
|
||||||
|
"""Helper method to add a relationship to the result."""
|
||||||
|
self.relationships.append(Relationship(
|
||||||
|
source_node=source_node,
|
||||||
|
target_node=target_node,
|
||||||
|
relationship_type=relationship_type,
|
||||||
|
confidence=confidence,
|
||||||
|
provider=provider,
|
||||||
|
raw_data=raw_data or {}
|
||||||
|
))
|
||||||
|
|
||||||
|
def get_discovered_nodes(self) -> set:
|
||||||
|
"""Get all unique node identifiers discovered in this result."""
|
||||||
|
nodes = set()
|
||||||
|
|
||||||
|
# Add nodes from relationships
|
||||||
|
for rel in self.relationships:
|
||||||
|
nodes.add(rel.source_node)
|
||||||
|
nodes.add(rel.target_node)
|
||||||
|
|
||||||
|
# Add nodes from attributes
|
||||||
|
for attr in self.attributes:
|
||||||
|
nodes.add(attr.target_node)
|
||||||
|
|
||||||
|
return nodes
|
||||||
|
|
||||||
|
def get_relationship_count(self) -> int:
|
||||||
|
"""Get the total number of relationships in this result."""
|
||||||
|
return len(self.relationships)
|
||||||
|
|
||||||
|
def get_attribute_count(self) -> int:
|
||||||
|
"""Get the total number of attributes in this result."""
|
||||||
|
return len(self.attributes)
|
||||||
|
|
||||||
|
def is_large_entity(self, threshold: int) -> bool:
|
||||||
|
"""Check if this result qualifies as a large entity based on relationship count."""
|
||||||
|
return self.get_relationship_count() > threshold
|
||||||
@ -1,7 +1,6 @@
|
|||||||
# dnsrecon-reduced/core/rate_limiter.py
|
# dnsrecon-reduced/core/rate_limiter.py
|
||||||
|
|
||||||
import time
|
import time
|
||||||
import redis
|
|
||||||
|
|
||||||
class GlobalRateLimiter:
|
class GlobalRateLimiter:
|
||||||
def __init__(self, redis_client):
|
def __init__(self, redis_client):
|
||||||
|
|||||||
529
core/scanner.py
529
core/scanner.py
@ -2,18 +2,18 @@
|
|||||||
|
|
||||||
import threading
|
import threading
|
||||||
import traceback
|
import traceback
|
||||||
import time
|
|
||||||
import os
|
import os
|
||||||
import importlib
|
import importlib
|
||||||
import redis
|
import redis
|
||||||
from typing import List, Set, Dict, Any, Tuple, Optional
|
from typing import List, Set, Dict, Any, Tuple, Optional
|
||||||
from concurrent.futures import ThreadPoolExecutor, as_completed, CancelledError, Future
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from queue import PriorityQueue
|
from queue import PriorityQueue
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
from core.graph_manager import GraphManager, NodeType
|
from core.graph_manager import GraphManager, NodeType
|
||||||
from core.logger import get_forensic_logger, new_session
|
from core.logger import get_forensic_logger, new_session
|
||||||
|
from core.provider_result import ProviderResult
|
||||||
from utils.helpers import _is_valid_ip, _is_valid_domain
|
from utils.helpers import _is_valid_ip, _is_valid_domain
|
||||||
from providers.base_provider import BaseProvider
|
from providers.base_provider import BaseProvider
|
||||||
from core.rate_limiter import GlobalRateLimiter
|
from core.rate_limiter import GlobalRateLimiter
|
||||||
@ -30,6 +30,7 @@ class ScanStatus:
|
|||||||
class Scanner:
|
class Scanner:
|
||||||
"""
|
"""
|
||||||
Main scanning orchestrator for DNSRecon passive reconnaissance.
|
Main scanning orchestrator for DNSRecon passive reconnaissance.
|
||||||
|
Now provider-agnostic, consuming standardized ProviderResult objects.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, session_config=None):
|
def __init__(self, session_config=None):
|
||||||
@ -470,6 +471,10 @@ class Scanner:
|
|||||||
print(f" - Tasks processed: {len(processed_tasks)}")
|
print(f" - Tasks processed: {len(processed_tasks)}")
|
||||||
|
|
||||||
def _query_single_provider_for_target(self, provider: BaseProvider, target: str, depth: int) -> Tuple[Set[str], Set[str], bool]:
|
def _query_single_provider_for_target(self, provider: BaseProvider, target: str, depth: int) -> Tuple[Set[str], Set[str], bool]:
|
||||||
|
"""
|
||||||
|
Query a single provider and process the unified ProviderResult.
|
||||||
|
Now provider-agnostic - handles any provider that returns ProviderResult.
|
||||||
|
"""
|
||||||
if self._is_stop_requested():
|
if self._is_stop_requested():
|
||||||
print(f"Stop requested before querying {provider.get_name()} for {target}")
|
print(f"Stop requested before querying {provider.get_name()} for {target}")
|
||||||
return set(), set(), False
|
return set(), set(), False
|
||||||
@ -478,21 +483,24 @@ class Scanner:
|
|||||||
target_type = NodeType.IP if is_ip else NodeType.DOMAIN
|
target_type = NodeType.IP if is_ip else NodeType.DOMAIN
|
||||||
print(f"Querying {provider.get_name()} for {target_type.value}: {target} at depth {depth}")
|
print(f"Querying {provider.get_name()} for {target_type.value}: {target} at depth {depth}")
|
||||||
|
|
||||||
|
# Ensure target node exists in graph
|
||||||
self.graph.add_node(target, target_type)
|
self.graph.add_node(target, target_type)
|
||||||
self._initialize_provider_states(target)
|
self._initialize_provider_states(target)
|
||||||
|
|
||||||
new_targets = set()
|
new_targets = set()
|
||||||
large_entity_members = set()
|
large_entity_members = set()
|
||||||
node_attributes = defaultdict(lambda: defaultdict(list))
|
|
||||||
provider_successful = True
|
provider_successful = True
|
||||||
|
|
||||||
try:
|
try:
|
||||||
provider_results = self._query_single_provider_forensic(provider, target, is_ip, depth)
|
# Query provider - now returns unified ProviderResult
|
||||||
if provider_results is None:
|
provider_result = self._query_single_provider_unified(provider, target, is_ip, depth)
|
||||||
|
|
||||||
|
if provider_result is None:
|
||||||
provider_successful = False
|
provider_successful = False
|
||||||
elif not self._is_stop_requested():
|
elif not self._is_stop_requested():
|
||||||
discovered, is_large_entity = self._process_provider_results(
|
# Process the unified result
|
||||||
target, provider, provider_results, node_attributes, depth
|
discovered, is_large_entity = self._process_provider_result_unified(
|
||||||
|
target, provider, provider_result, depth
|
||||||
)
|
)
|
||||||
if is_large_entity:
|
if is_large_entity:
|
||||||
large_entity_members.update(discovered)
|
large_entity_members.update(discovered)
|
||||||
@ -504,15 +512,177 @@ class Scanner:
|
|||||||
provider_successful = False
|
provider_successful = False
|
||||||
self._log_provider_error(target, provider.get_name(), str(e))
|
self._log_provider_error(target, provider.get_name(), str(e))
|
||||||
|
|
||||||
if not self._is_stop_requested():
|
|
||||||
for node_id, attributes in node_attributes.items():
|
|
||||||
if self.graph.graph.has_node(node_id):
|
|
||||||
node_is_ip = _is_valid_ip(node_id)
|
|
||||||
node_type_to_add = NodeType.IP if node_is_ip else NodeType.DOMAIN
|
|
||||||
self.graph.add_node(node_id, node_type_to_add, attributes=attributes)
|
|
||||||
|
|
||||||
return new_targets, large_entity_members, provider_successful
|
return new_targets, large_entity_members, provider_successful
|
||||||
|
|
||||||
|
def _query_single_provider_unified(self, provider: BaseProvider, target: str, is_ip: bool, current_depth: int) -> Optional[ProviderResult]:
|
||||||
|
"""
|
||||||
|
Query a single provider with stop signal checking, now returns ProviderResult.
|
||||||
|
"""
|
||||||
|
provider_name = provider.get_name()
|
||||||
|
start_time = datetime.now(timezone.utc)
|
||||||
|
|
||||||
|
if self._is_stop_requested():
|
||||||
|
print(f"Stop requested before querying {provider_name} for {target}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
print(f"Querying {provider_name} for {target}")
|
||||||
|
|
||||||
|
self.logger.logger.info(f"Attempting {provider_name} query for {target} at depth {current_depth}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Query the provider - returns unified ProviderResult
|
||||||
|
if is_ip:
|
||||||
|
result = provider.query_ip(target)
|
||||||
|
else:
|
||||||
|
result = provider.query_domain(target)
|
||||||
|
|
||||||
|
if self._is_stop_requested():
|
||||||
|
print(f"Stop requested after querying {provider_name} for {target}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Update provider state with relationship count (more meaningful than raw result count)
|
||||||
|
relationship_count = result.get_relationship_count() if result else 0
|
||||||
|
self._update_provider_state(target, provider_name, 'success', relationship_count, None, start_time)
|
||||||
|
|
||||||
|
print(f"✓ {provider_name} returned {relationship_count} relationships for {target}")
|
||||||
|
return result
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self._update_provider_state(target, provider_name, 'failed', 0, str(e), start_time)
|
||||||
|
print(f"✗ {provider_name} failed for {target}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _process_provider_result_unified(self, target: str, provider: BaseProvider,
|
||||||
|
provider_result: ProviderResult, current_depth: int) -> Tuple[Set[str], bool]:
|
||||||
|
"""
|
||||||
|
Process a unified ProviderResult object to update the graph.
|
||||||
|
Returns (discovered_targets, is_large_entity).
|
||||||
|
"""
|
||||||
|
provider_name = provider.get_name()
|
||||||
|
discovered_targets = set()
|
||||||
|
|
||||||
|
if self._is_stop_requested():
|
||||||
|
print(f"Stop requested before processing results from {provider_name} for {target}")
|
||||||
|
return discovered_targets, False
|
||||||
|
|
||||||
|
# Check for large entity based on relationship count
|
||||||
|
if provider_result.get_relationship_count() > self.config.large_entity_threshold:
|
||||||
|
print(f"Large entity detected: {provider_name} returned {provider_result.get_relationship_count()} relationships for {target}")
|
||||||
|
members = self._create_large_entity_from_provider_result(target, provider_name, provider_result, current_depth)
|
||||||
|
return members, True
|
||||||
|
|
||||||
|
# Process relationships
|
||||||
|
for i, relationship in enumerate(provider_result.relationships):
|
||||||
|
if i % 5 == 0 and self._is_stop_requested(): # Check periodically for stop
|
||||||
|
print(f"Stop requested while processing relationships from {provider_name} for {target}")
|
||||||
|
break
|
||||||
|
|
||||||
|
# Add nodes for relationship endpoints
|
||||||
|
source_node = relationship.source_node
|
||||||
|
target_node = relationship.target_node
|
||||||
|
|
||||||
|
# Determine node types
|
||||||
|
source_type = NodeType.IP if _is_valid_ip(source_node) else NodeType.DOMAIN
|
||||||
|
if target_node.startswith('AS') and target_node[2:].isdigit():
|
||||||
|
target_type = NodeType.ASN
|
||||||
|
elif _is_valid_ip(target_node):
|
||||||
|
target_type = NodeType.IP
|
||||||
|
else:
|
||||||
|
target_type = NodeType.DOMAIN
|
||||||
|
|
||||||
|
# Add nodes to graph
|
||||||
|
self.graph.add_node(source_node, source_type)
|
||||||
|
self.graph.add_node(target_node, target_type)
|
||||||
|
|
||||||
|
# Add edge to graph
|
||||||
|
if self.graph.add_edge(
|
||||||
|
source_node, target_node,
|
||||||
|
relationship.relationship_type,
|
||||||
|
relationship.confidence,
|
||||||
|
provider_name,
|
||||||
|
relationship.raw_data
|
||||||
|
):
|
||||||
|
print(f"Added relationship: {source_node} -> {target_node} ({relationship.relationship_type})")
|
||||||
|
|
||||||
|
# Track discovered targets for further processing
|
||||||
|
if _is_valid_domain(target_node) or _is_valid_ip(target_node):
|
||||||
|
discovered_targets.add(target_node)
|
||||||
|
|
||||||
|
# Process attributes, preserving them as a list of objects
|
||||||
|
attributes_by_node = defaultdict(list)
|
||||||
|
for attribute in provider_result.attributes:
|
||||||
|
# Convert the StandardAttribute object to a dictionary that the frontend can use
|
||||||
|
attr_dict = {
|
||||||
|
"name": attribute.name,
|
||||||
|
"value": attribute.value,
|
||||||
|
"type": attribute.type,
|
||||||
|
"provider": attribute.provider,
|
||||||
|
"confidence": attribute.confidence,
|
||||||
|
"metadata": attribute.metadata
|
||||||
|
}
|
||||||
|
attributes_by_node[attribute.target_node].append(attr_dict)
|
||||||
|
|
||||||
|
# Add attributes to nodes
|
||||||
|
for node_id, node_attributes_list in attributes_by_node.items():
|
||||||
|
if self.graph.graph.has_node(node_id):
|
||||||
|
# Determine node type
|
||||||
|
if _is_valid_ip(node_id):
|
||||||
|
node_type = NodeType.IP
|
||||||
|
elif node_id.startswith('AS') and node_id[2:].isdigit():
|
||||||
|
node_type = NodeType.ASN
|
||||||
|
else:
|
||||||
|
node_type = NodeType.DOMAIN
|
||||||
|
|
||||||
|
# Add node with the list of attributes
|
||||||
|
self.graph.add_node(node_id, node_type, attributes=node_attributes_list)
|
||||||
|
|
||||||
|
return discovered_targets, False
|
||||||
|
|
||||||
|
def _create_large_entity_from_provider_result(self, source: str, provider_name: str,
|
||||||
|
provider_result: ProviderResult, current_depth: int) -> Set[str]:
|
||||||
|
"""
|
||||||
|
Create a large entity node from a ProviderResult and return the members for DNS processing.
|
||||||
|
"""
|
||||||
|
entity_id = f"large_entity_{provider_name}_{hash(source) & 0x7FFFFFFF}"
|
||||||
|
|
||||||
|
# Extract target nodes from relationships
|
||||||
|
targets = [rel.target_node for rel in provider_result.relationships]
|
||||||
|
node_type = 'unknown'
|
||||||
|
|
||||||
|
if targets:
|
||||||
|
if _is_valid_domain(targets[0]):
|
||||||
|
node_type = 'domain'
|
||||||
|
elif _is_valid_ip(targets[0]):
|
||||||
|
node_type = 'ip'
|
||||||
|
|
||||||
|
# Create nodes in graph (they exist but are grouped)
|
||||||
|
for target in targets:
|
||||||
|
target_node_type = NodeType.DOMAIN if node_type == 'domain' else NodeType.IP
|
||||||
|
self.graph.add_node(target, target_node_type)
|
||||||
|
|
||||||
|
attributes = {
|
||||||
|
'count': len(targets),
|
||||||
|
'nodes': targets,
|
||||||
|
'node_type': node_type,
|
||||||
|
'source_provider': provider_name,
|
||||||
|
'discovery_depth': current_depth,
|
||||||
|
'threshold_exceeded': self.config.large_entity_threshold,
|
||||||
|
}
|
||||||
|
description = f'Large entity created due to {len(targets)} relationships from {provider_name}'
|
||||||
|
|
||||||
|
self.graph.add_node(entity_id, NodeType.LARGE_ENTITY, attributes=attributes, description=description)
|
||||||
|
|
||||||
|
# Create edge from source to large entity
|
||||||
|
if provider_result.relationships:
|
||||||
|
rel_type = provider_result.relationships[0].relationship_type
|
||||||
|
self.graph.add_edge(source, entity_id, rel_type, 0.9, provider_name,
|
||||||
|
{'large_entity_info': f'Contains {len(targets)} {node_type}s'})
|
||||||
|
|
||||||
|
self.logger.logger.warning(f"Large entity created: {entity_id} contains {len(targets)} targets from {provider_name}")
|
||||||
|
print(f"Created large entity {entity_id} for {len(targets)} {node_type}s from {provider_name}")
|
||||||
|
|
||||||
|
return set(targets)
|
||||||
|
|
||||||
def stop_scan(self) -> bool:
|
def stop_scan(self) -> bool:
|
||||||
"""Request immediate scan termination with proper cleanup."""
|
"""Request immediate scan termination with proper cleanup."""
|
||||||
try:
|
try:
|
||||||
@ -558,6 +728,73 @@ class Scanner:
|
|||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
def extract_node_from_large_entity(self, large_entity_id: str, node_id_to_extract: str) -> bool:
|
||||||
|
"""
|
||||||
|
Extracts a node from a large entity, re-creates its original edge, and
|
||||||
|
re-queues it for full scanning.
|
||||||
|
"""
|
||||||
|
if not self.graph.graph.has_node(large_entity_id):
|
||||||
|
print(f"ERROR: Large entity {large_entity_id} not found.")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# 1. Get the original source node that discovered the large entity
|
||||||
|
predecessors = list(self.graph.graph.predecessors(large_entity_id))
|
||||||
|
if not predecessors:
|
||||||
|
print(f"ERROR: No source node found for large entity {large_entity_id}.")
|
||||||
|
return False
|
||||||
|
source_node_id = predecessors[0]
|
||||||
|
|
||||||
|
# Get the original edge data to replicate it for the extracted node
|
||||||
|
original_edge_data = self.graph.graph.get_edge_data(source_node_id, large_entity_id)
|
||||||
|
if not original_edge_data:
|
||||||
|
print(f"ERROR: Could not find original edge data from {source_node_id} to {large_entity_id}.")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# 2. Modify the graph data structure first
|
||||||
|
success = self.graph.extract_node_from_large_entity(large_entity_id, node_id_to_extract)
|
||||||
|
if not success:
|
||||||
|
print(f"ERROR: Node {node_id_to_extract} could not be removed from {large_entity_id}'s attributes.")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# 3. Create the direct edge from the original source to the newly extracted node
|
||||||
|
print(f"Re-creating direct edge from {source_node_id} to extracted node {node_id_to_extract}")
|
||||||
|
self.graph.add_edge(
|
||||||
|
source_id=source_node_id,
|
||||||
|
target_id=node_id_to_extract,
|
||||||
|
relationship_type=original_edge_data.get('relationship_type', 'extracted_from_large_entity'),
|
||||||
|
confidence_score=original_edge_data.get('confidence_score', 0.85), # Slightly lower confidence
|
||||||
|
source_provider=original_edge_data.get('source_provider', 'unknown'),
|
||||||
|
raw_data={'context': f'Extracted from large entity {large_entity_id}'}
|
||||||
|
)
|
||||||
|
|
||||||
|
# 4. Re-queue the extracted node for full processing by all eligible providers
|
||||||
|
print(f"Re-queueing extracted node {node_id_to_extract} for full reconnaissance...")
|
||||||
|
is_ip = _is_valid_ip(node_id_to_extract)
|
||||||
|
current_depth = self.graph.graph.nodes[large_entity_id].get('attributes', {}).get('discovery_depth', 0)
|
||||||
|
|
||||||
|
eligible_providers = self._get_eligible_providers(node_id_to_extract, is_ip, False)
|
||||||
|
for provider in eligible_providers:
|
||||||
|
provider_name = provider.get_name()
|
||||||
|
self.task_queue.put((self._get_priority(provider_name), (provider_name, node_id_to_extract, current_depth)))
|
||||||
|
self.total_tasks_ever_enqueued += 1
|
||||||
|
|
||||||
|
# 5. If the scanner is not running, we need to kickstart it to process this one item.
|
||||||
|
if self.status != ScanStatus.RUNNING:
|
||||||
|
print("Scanner is idle. Starting a mini-scan to process the extracted node.")
|
||||||
|
self.status = ScanStatus.RUNNING
|
||||||
|
self._update_session_state()
|
||||||
|
|
||||||
|
if not self.scan_thread or not self.scan_thread.is_alive():
|
||||||
|
self.scan_thread = threading.Thread(
|
||||||
|
target=self._execute_scan,
|
||||||
|
args=(self.current_target, self.max_depth),
|
||||||
|
daemon=True
|
||||||
|
)
|
||||||
|
self.scan_thread.start()
|
||||||
|
|
||||||
|
print(f"Successfully extracted and re-queued {node_id_to_extract} from {large_entity_id}.")
|
||||||
|
return True
|
||||||
|
|
||||||
def _update_session_state(self) -> None:
|
def _update_session_state(self) -> None:
|
||||||
"""
|
"""
|
||||||
Update the scanner state in Redis for GUI updates.
|
Update the scanner state in Redis for GUI updates.
|
||||||
@ -656,39 +893,6 @@ class Scanner:
|
|||||||
provider_state = provider_states.get(provider_name)
|
provider_state = provider_states.get(provider_name)
|
||||||
return provider_state is not None and provider_state.get('status') == 'success'
|
return provider_state is not None and provider_state.get('status') == 'success'
|
||||||
|
|
||||||
def _query_single_provider_forensic(self, provider, target: str, is_ip: bool, current_depth: int) -> Optional[List]:
|
|
||||||
"""Query a single provider with stop signal checking."""
|
|
||||||
provider_name = provider.get_name()
|
|
||||||
start_time = datetime.now(timezone.utc)
|
|
||||||
|
|
||||||
if self._is_stop_requested():
|
|
||||||
print(f"Stop requested before querying {provider_name} for {target}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
print(f"Querying {provider_name} for {target}")
|
|
||||||
|
|
||||||
self.logger.logger.info(f"Attempting {provider_name} query for {target} at depth {current_depth}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
if is_ip:
|
|
||||||
results = provider.query_ip(target)
|
|
||||||
else:
|
|
||||||
results = provider.query_domain(target)
|
|
||||||
|
|
||||||
if self._is_stop_requested():
|
|
||||||
print(f"Stop requested after querying {provider_name} for {target}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
self._update_provider_state(target, provider_name, 'success', len(results), None, start_time)
|
|
||||||
|
|
||||||
print(f"✓ {provider_name} returned {len(results)} results for {target}")
|
|
||||||
return results
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
self._update_provider_state(target, provider_name, 'failed', 0, str(e), start_time)
|
|
||||||
print(f"✗ {provider_name} failed for {target}: {e}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
def _update_provider_state(self, target: str, provider_name: str, status: str,
|
def _update_provider_state(self, target: str, provider_name: str, status: str,
|
||||||
results_count: int, error: Optional[str], start_time: datetime) -> None:
|
results_count: int, error: Optional[str], start_time: datetime) -> None:
|
||||||
"""Update provider state in node metadata for forensic tracking."""
|
"""Update provider state in node metadata for forensic tracking."""
|
||||||
@ -711,237 +915,6 @@ class Scanner:
|
|||||||
|
|
||||||
self.logger.logger.info(f"Provider state updated: {target} -> {provider_name} -> {status} ({results_count} results)")
|
self.logger.logger.info(f"Provider state updated: {target} -> {provider_name} -> {status} ({results_count} results)")
|
||||||
|
|
||||||
def _process_provider_results(self, target: str, provider, results: List,
|
|
||||||
node_attributes: Dict, current_depth: int) -> Tuple[Set[str], bool]:
|
|
||||||
"""Process provider results, returns (discovered_targets, is_large_entity)."""
|
|
||||||
provider_name = provider.get_name()
|
|
||||||
discovered_targets = set()
|
|
||||||
|
|
||||||
if self._is_stop_requested():
|
|
||||||
print(f"Stop requested before processing results from {provider_name} for {target}")
|
|
||||||
return discovered_targets, False
|
|
||||||
|
|
||||||
if len(results) > self.config.large_entity_threshold:
|
|
||||||
print(f"Large entity detected: {provider_name} returned {len(results)} results for {target}")
|
|
||||||
members = self._create_large_entity(target, provider_name, results, current_depth)
|
|
||||||
return members, True
|
|
||||||
|
|
||||||
for i, (source, rel_target, rel_type, confidence, raw_data) in enumerate(results):
|
|
||||||
if i % 5 == 0 and self._is_stop_requested(): # Check more frequently
|
|
||||||
print(f"Stop requested while processing results from {provider_name} for {target}")
|
|
||||||
break
|
|
||||||
|
|
||||||
self.logger.log_relationship_discovery(
|
|
||||||
source_node=source,
|
|
||||||
target_node=rel_target,
|
|
||||||
relationship_type=rel_type,
|
|
||||||
confidence_score=confidence,
|
|
||||||
provider=provider_name,
|
|
||||||
raw_data=raw_data,
|
|
||||||
discovery_method=f"{provider_name}_query_depth_{current_depth}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Collect attributes for the source node
|
|
||||||
self._collect_node_attributes(source, provider_name, rel_type, rel_target, raw_data, node_attributes[source])
|
|
||||||
|
|
||||||
# If the relationship is asn_membership, collect attributes for the target ASN node
|
|
||||||
if rel_type == 'asn_membership':
|
|
||||||
self._collect_node_attributes(rel_target, provider_name, rel_type, source, raw_data, node_attributes[rel_target])
|
|
||||||
|
|
||||||
|
|
||||||
if isinstance(rel_target, list):
|
|
||||||
# If the target is a list, iterate and process each item
|
|
||||||
for single_target in rel_target:
|
|
||||||
if _is_valid_ip(single_target):
|
|
||||||
self.graph.add_node(single_target, NodeType.IP)
|
|
||||||
if self.graph.add_edge(source, single_target, rel_type, confidence, provider_name, raw_data):
|
|
||||||
print(f"Added IP relationship: {source} -> {single_target} ({rel_type})")
|
|
||||||
discovered_targets.add(single_target)
|
|
||||||
elif _is_valid_domain(single_target):
|
|
||||||
self.graph.add_node(single_target, NodeType.DOMAIN)
|
|
||||||
if self.graph.add_edge(source, single_target, rel_type, confidence, provider_name, raw_data):
|
|
||||||
print(f"Added domain relationship: {source} -> {single_target} ({rel_type})")
|
|
||||||
discovered_targets.add(single_target)
|
|
||||||
self._collect_node_attributes(single_target, provider_name, rel_type, source, raw_data, node_attributes[single_target])
|
|
||||||
|
|
||||||
elif _is_valid_ip(rel_target):
|
|
||||||
self.graph.add_node(rel_target, NodeType.IP)
|
|
||||||
if self.graph.add_edge(source, rel_target, rel_type, confidence, provider_name, raw_data):
|
|
||||||
print(f"Added IP relationship: {source} -> {rel_target} ({rel_type})")
|
|
||||||
discovered_targets.add(rel_target)
|
|
||||||
|
|
||||||
elif rel_target.startswith('AS') and rel_target[2:].isdigit():
|
|
||||||
self.graph.add_node(rel_target, NodeType.ASN)
|
|
||||||
if self.graph.add_edge(source, rel_target, rel_type, confidence, provider_name, raw_data):
|
|
||||||
print(f"Added ASN relationship: {source} -> {rel_target} ({rel_type})")
|
|
||||||
|
|
||||||
elif _is_valid_domain(rel_target):
|
|
||||||
self.graph.add_node(rel_target, NodeType.DOMAIN)
|
|
||||||
if self.graph.add_edge(source, rel_target, rel_type, confidence, provider_name, raw_data):
|
|
||||||
print(f"Added domain relationship: {source} -> {rel_target} ({rel_type})")
|
|
||||||
discovered_targets.add(rel_target)
|
|
||||||
self._collect_node_attributes(rel_target, provider_name, rel_type, source, raw_data, node_attributes[rel_target])
|
|
||||||
|
|
||||||
else:
|
|
||||||
self._collect_node_attributes(source, provider_name, rel_type, rel_target, raw_data, node_attributes[source])
|
|
||||||
|
|
||||||
return discovered_targets, False
|
|
||||||
|
|
||||||
def _create_large_entity(self, source: str, provider_name: str, results: List, current_depth: int) -> Set[str]:
|
|
||||||
"""Create a large entity node and returns the members for DNS processing."""
|
|
||||||
entity_id = f"large_entity_{provider_name}_{hash(source) & 0x7FFFFFFF}"
|
|
||||||
|
|
||||||
targets = [rel[1] for rel in results if len(rel) > 1]
|
|
||||||
node_type = 'unknown'
|
|
||||||
|
|
||||||
if targets:
|
|
||||||
if _is_valid_domain(targets[0]):
|
|
||||||
node_type = 'domain'
|
|
||||||
elif _is_valid_ip(targets[0]):
|
|
||||||
node_type = 'ip'
|
|
||||||
|
|
||||||
# We still create the nodes so they exist in the graph, they are just not processed for edges yet.
|
|
||||||
for target in targets:
|
|
||||||
self.graph.add_node(target, NodeType.DOMAIN if node_type == 'domain' else NodeType.IP)
|
|
||||||
|
|
||||||
attributes = {
|
|
||||||
'count': len(targets),
|
|
||||||
'nodes': targets,
|
|
||||||
'node_type': node_type,
|
|
||||||
'source_provider': provider_name,
|
|
||||||
'discovery_depth': current_depth,
|
|
||||||
'threshold_exceeded': self.config.large_entity_threshold,
|
|
||||||
}
|
|
||||||
description = f'Large entity created due to {len(targets)} results from {provider_name}'
|
|
||||||
|
|
||||||
self.graph.add_node(entity_id, NodeType.LARGE_ENTITY, attributes=attributes, description=description)
|
|
||||||
|
|
||||||
if results:
|
|
||||||
rel_type = results[0][2]
|
|
||||||
self.graph.add_edge(source, entity_id, rel_type, 0.9, provider_name,
|
|
||||||
{'large_entity_info': f'Contains {len(targets)} {node_type}s'})
|
|
||||||
|
|
||||||
self.logger.logger.warning(f"Large entity created: {entity_id} contains {len(targets)} targets from {provider_name}")
|
|
||||||
print(f"Created large entity {entity_id} for {len(targets)} {node_type}s from {provider_name}")
|
|
||||||
|
|
||||||
return set(targets)
|
|
||||||
|
|
||||||
def extract_node_from_large_entity(self, large_entity_id: str, node_id_to_extract: str) -> bool:
|
|
||||||
"""
|
|
||||||
Extracts a node from a large entity, re-creates its original edge, and
|
|
||||||
re-queues it for full scanning.
|
|
||||||
"""
|
|
||||||
if not self.graph.graph.has_node(large_entity_id):
|
|
||||||
print(f"ERROR: Large entity {large_entity_id} not found.")
|
|
||||||
return False
|
|
||||||
|
|
||||||
# 1. Get the original source node that discovered the large entity
|
|
||||||
predecessors = list(self.graph.graph.predecessors(large_entity_id))
|
|
||||||
if not predecessors:
|
|
||||||
print(f"ERROR: No source node found for large entity {large_entity_id}.")
|
|
||||||
return False
|
|
||||||
source_node_id = predecessors[0]
|
|
||||||
|
|
||||||
# Get the original edge data to replicate it for the extracted node
|
|
||||||
original_edge_data = self.graph.graph.get_edge_data(source_node_id, large_entity_id)
|
|
||||||
if not original_edge_data:
|
|
||||||
print(f"ERROR: Could not find original edge data from {source_node_id} to {large_entity_id}.")
|
|
||||||
return False
|
|
||||||
|
|
||||||
# 2. Modify the graph data structure first
|
|
||||||
success = self.graph.extract_node_from_large_entity(large_entity_id, node_id_to_extract)
|
|
||||||
if not success:
|
|
||||||
print(f"ERROR: Node {node_id_to_extract} could not be removed from {large_entity_id}'s attributes.")
|
|
||||||
return False
|
|
||||||
|
|
||||||
# 3. Create the direct edge from the original source to the newly extracted node
|
|
||||||
print(f"Re-creating direct edge from {source_node_id} to extracted node {node_id_to_extract}")
|
|
||||||
self.graph.add_edge(
|
|
||||||
source_id=source_node_id,
|
|
||||||
target_id=node_id_to_extract,
|
|
||||||
relationship_type=original_edge_data.get('relationship_type', 'extracted_from_large_entity'),
|
|
||||||
confidence_score=original_edge_data.get('confidence_score', 0.85), # Slightly lower confidence
|
|
||||||
source_provider=original_edge_data.get('source_provider', 'unknown'),
|
|
||||||
raw_data={'context': f'Extracted from large entity {large_entity_id}'}
|
|
||||||
)
|
|
||||||
|
|
||||||
# 4. Re-queue the extracted node for full processing by all eligible providers
|
|
||||||
print(f"Re-queueing extracted node {node_id_to_extract} for full reconnaissance...")
|
|
||||||
is_ip = _is_valid_ip(node_id_to_extract)
|
|
||||||
current_depth = self.graph.graph.nodes[large_entity_id].get('attributes', {}).get('discovery_depth', 0)
|
|
||||||
|
|
||||||
eligible_providers = self._get_eligible_providers(node_id_to_extract, is_ip, False)
|
|
||||||
for provider in eligible_providers:
|
|
||||||
provider_name = provider.get_name()
|
|
||||||
self.task_queue.put((self._get_priority(provider_name), (provider_name, node_id_to_extract, current_depth)))
|
|
||||||
self.total_tasks_ever_enqueued += 1
|
|
||||||
|
|
||||||
# 5. If the scanner is not running, we need to kickstart it to process this one item.
|
|
||||||
if self.status != ScanStatus.RUNNING:
|
|
||||||
print("Scanner is idle. Starting a mini-scan to process the extracted node.")
|
|
||||||
self.status = ScanStatus.RUNNING
|
|
||||||
self._update_session_state()
|
|
||||||
|
|
||||||
if not self.scan_thread or not self.scan_thread.is_alive():
|
|
||||||
self.scan_thread = threading.Thread(
|
|
||||||
target=self._execute_scan,
|
|
||||||
args=(self.current_target, self.max_depth),
|
|
||||||
daemon=True
|
|
||||||
)
|
|
||||||
self.scan_thread.start()
|
|
||||||
|
|
||||||
print(f"Successfully extracted and re-queued {node_id_to_extract} from {large_entity_id}.")
|
|
||||||
return True
|
|
||||||
|
|
||||||
def _collect_node_attributes(self, node_id: str, provider_name: str, rel_type: str,
|
|
||||||
target: str, raw_data: Dict[str, Any], attributes: Dict[str, Any]) -> None:
|
|
||||||
"""Collect and organize attributes for a node."""
|
|
||||||
self.logger.logger.debug(f"Collecting attributes for {node_id} from {provider_name}: {rel_type}")
|
|
||||||
|
|
||||||
if provider_name == 'dns':
|
|
||||||
record_type = raw_data.get('query_type', 'UNKNOWN')
|
|
||||||
value = raw_data.get('value', target)
|
|
||||||
dns_entry = f"{record_type}: {value}"
|
|
||||||
if dns_entry not in attributes.get('dns_records', []):
|
|
||||||
attributes.setdefault('dns_records', []).append(dns_entry)
|
|
||||||
|
|
||||||
elif provider_name == 'crtsh':
|
|
||||||
if rel_type == "san_certificate":
|
|
||||||
domain_certs = raw_data.get('domain_certificates', {})
|
|
||||||
if node_id in domain_certs:
|
|
||||||
cert_summary = domain_certs[node_id]
|
|
||||||
attributes['certificates'] = cert_summary
|
|
||||||
if target not in attributes.get('related_domains_san', []):
|
|
||||||
attributes.setdefault('related_domains_san', []).append(target)
|
|
||||||
|
|
||||||
elif provider_name == 'shodan':
|
|
||||||
# This logic will now apply to the correct node (ASN or IP)
|
|
||||||
shodan_attributes = attributes.setdefault('shodan', {})
|
|
||||||
for key, value in raw_data.items():
|
|
||||||
if key not in shodan_attributes or not shodan_attributes.get(key):
|
|
||||||
shodan_attributes[key] = value
|
|
||||||
|
|
||||||
if _is_valid_ip(node_id):
|
|
||||||
if 'ports' in raw_data:
|
|
||||||
attributes['ports'] = raw_data['ports']
|
|
||||||
if 'os' in raw_data and raw_data['os']:
|
|
||||||
attributes['os'] = raw_data['os']
|
|
||||||
|
|
||||||
if rel_type == "asn_membership":
|
|
||||||
# This is the key change: these attributes are for the target (the ASN),
|
|
||||||
# not the source (the IP). We will add them to the ASN node later.
|
|
||||||
pass
|
|
||||||
|
|
||||||
record_type_name = rel_type
|
|
||||||
if record_type_name not in attributes:
|
|
||||||
attributes[record_type_name] = []
|
|
||||||
|
|
||||||
if isinstance(target, list):
|
|
||||||
attributes[record_type_name].extend(target)
|
|
||||||
else:
|
|
||||||
if target not in attributes[record_type_name]:
|
|
||||||
attributes[record_type_name].append(target)
|
|
||||||
|
|
||||||
def _log_target_processing_error(self, target: str, error: str) -> None:
|
def _log_target_processing_error(self, target: str, error: str) -> None:
|
||||||
"""Log target processing errors for forensic trail."""
|
"""Log target processing errors for forensic trail."""
|
||||||
self.logger.logger.error(f"Target processing failed for {target}: {error}")
|
self.logger.logger.error(f"Target processing failed for {target}: {error}")
|
||||||
|
|||||||
@ -5,15 +5,11 @@ import time
|
|||||||
import uuid
|
import uuid
|
||||||
import redis
|
import redis
|
||||||
import pickle
|
import pickle
|
||||||
from typing import Dict, Optional, Any, List
|
from typing import Dict, Optional, Any
|
||||||
|
|
||||||
from core.scanner import Scanner
|
from core.scanner import Scanner
|
||||||
from config import config
|
from config import config
|
||||||
|
|
||||||
# WARNING: Using pickle can be a security risk if the data source is not trusted.
|
|
||||||
# In this case, we are only serializing/deserializing our own trusted Scanner objects,
|
|
||||||
# which is generally safe. Do not unpickle data from untrusted sources.
|
|
||||||
|
|
||||||
class SessionManager:
|
class SessionManager:
|
||||||
"""
|
"""
|
||||||
Manages multiple scanner instances for concurrent user sessions using Redis.
|
Manages multiple scanner instances for concurrent user sessions using Redis.
|
||||||
|
|||||||
@ -4,16 +4,17 @@ import time
|
|||||||
import requests
|
import requests
|
||||||
import threading
|
import threading
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from typing import List, Dict, Any, Optional, Tuple
|
from typing import Dict, Any, Optional
|
||||||
|
|
||||||
from core.logger import get_forensic_logger
|
from core.logger import get_forensic_logger
|
||||||
from core.rate_limiter import GlobalRateLimiter
|
from core.rate_limiter import GlobalRateLimiter
|
||||||
|
from core.provider_result import ProviderResult
|
||||||
|
|
||||||
|
|
||||||
class BaseProvider(ABC):
|
class BaseProvider(ABC):
|
||||||
"""
|
"""
|
||||||
Abstract base class for all DNSRecon data providers.
|
Abstract base class for all DNSRecon data providers.
|
||||||
Now supports session-specific configuration.
|
Now supports session-specific configuration and returns standardized ProviderResult objects.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, name: str, rate_limit: int = 60, timeout: int = 30, session_config=None):
|
def __init__(self, name: str, rate_limit: int = 60, timeout: int = 30, session_config=None):
|
||||||
@ -101,7 +102,7 @@ class BaseProvider(ABC):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def query_domain(self, domain: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
def query_domain(self, domain: str) -> ProviderResult:
|
||||||
"""
|
"""
|
||||||
Query the provider for information about a domain.
|
Query the provider for information about a domain.
|
||||||
|
|
||||||
@ -109,12 +110,12 @@ class BaseProvider(ABC):
|
|||||||
domain: Domain to investigate
|
domain: Domain to investigate
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
List of tuples: (source_node, target_node, relationship_type, confidence, raw_data)
|
ProviderResult containing standardized attributes and relationships
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def query_ip(self, ip: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
def query_ip(self, ip: str) -> ProviderResult:
|
||||||
"""
|
"""
|
||||||
Query the provider for information about an IP address.
|
Query the provider for information about an IP address.
|
||||||
|
|
||||||
@ -122,7 +123,7 @@ class BaseProvider(ABC):
|
|||||||
ip: IP address to investigate
|
ip: IP address to investigate
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
List of tuples: (source_node, target_node, relationship_type, confidence, raw_data)
|
ProviderResult containing standardized attributes and relationships
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -1,15 +1,16 @@
|
|||||||
# dnsrecon/providers/dns_provider.py
|
# dnsrecon/providers/dns_provider.py
|
||||||
|
|
||||||
from dns import resolver, reversename
|
from dns import resolver, reversename
|
||||||
from typing import List, Dict, Any, Tuple
|
from typing import Dict
|
||||||
from .base_provider import BaseProvider
|
from .base_provider import BaseProvider
|
||||||
|
from core.provider_result import ProviderResult
|
||||||
from utils.helpers import _is_valid_ip, _is_valid_domain
|
from utils.helpers import _is_valid_ip, _is_valid_domain
|
||||||
|
|
||||||
|
|
||||||
class DNSProvider(BaseProvider):
|
class DNSProvider(BaseProvider):
|
||||||
"""
|
"""
|
||||||
Provider for standard DNS resolution and reverse DNS lookups.
|
Provider for standard DNS resolution and reverse DNS lookups.
|
||||||
Now uses session-specific configuration.
|
Now returns standardized ProviderResult objects.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, name=None, session_config=None):
|
def __init__(self, name=None, session_config=None):
|
||||||
@ -25,7 +26,6 @@ class DNSProvider(BaseProvider):
|
|||||||
self.resolver = resolver.Resolver()
|
self.resolver = resolver.Resolver()
|
||||||
self.resolver.timeout = 5
|
self.resolver.timeout = 5
|
||||||
self.resolver.lifetime = 10
|
self.resolver.lifetime = 10
|
||||||
#self.resolver.nameservers = ['127.0.0.1']
|
|
||||||
|
|
||||||
def get_name(self) -> str:
|
def get_name(self) -> str:
|
||||||
"""Return the provider name."""
|
"""Return the provider name."""
|
||||||
@ -47,31 +47,35 @@ class DNSProvider(BaseProvider):
|
|||||||
"""DNS is always available - no API key required."""
|
"""DNS is always available - no API key required."""
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def query_domain(self, domain: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
def query_domain(self, domain: str) -> ProviderResult:
|
||||||
"""
|
"""
|
||||||
Query DNS records for the domain to discover relationships.
|
Query DNS records for the domain to discover relationships and attributes.
|
||||||
...
|
|
||||||
|
Args:
|
||||||
|
domain: Domain to investigate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ProviderResult containing discovered relationships and attributes
|
||||||
"""
|
"""
|
||||||
if not _is_valid_domain(domain):
|
if not _is_valid_domain(domain):
|
||||||
return []
|
return ProviderResult()
|
||||||
|
|
||||||
relationships = []
|
result = ProviderResult()
|
||||||
|
|
||||||
# Query all record types
|
# Query all record types
|
||||||
for record_type in ['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SOA', 'TXT', 'SRV', 'CAA']:
|
for record_type in ['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SOA', 'TXT', 'SRV', 'CAA']:
|
||||||
try:
|
try:
|
||||||
relationships.extend(self._query_record(domain, record_type))
|
self._query_record(domain, record_type, result)
|
||||||
except resolver.NoAnswer:
|
except resolver.NoAnswer:
|
||||||
# This is not an error, just a confirmation that the record doesn't exist.
|
# This is not an error, just a confirmation that the record doesn't exist.
|
||||||
self.logger.logger.debug(f"No {record_type} record found for {domain}")
|
self.logger.logger.debug(f"No {record_type} record found for {domain}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.failed_requests += 1
|
self.failed_requests += 1
|
||||||
self.logger.logger.debug(f"{record_type} record query failed for {domain}: {e}")
|
self.logger.logger.debug(f"{record_type} record query failed for {domain}: {e}")
|
||||||
# Optionally, you might want to re-raise other, more serious exceptions.
|
|
||||||
|
|
||||||
return relationships
|
return result
|
||||||
|
|
||||||
def query_ip(self, ip: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
def query_ip(self, ip: str) -> ProviderResult:
|
||||||
"""
|
"""
|
||||||
Query reverse DNS for the IP address.
|
Query reverse DNS for the IP address.
|
||||||
|
|
||||||
@ -79,12 +83,12 @@ class DNSProvider(BaseProvider):
|
|||||||
ip: IP address to investigate
|
ip: IP address to investigate
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
List of relationships discovered from reverse DNS
|
ProviderResult containing discovered relationships and attributes
|
||||||
"""
|
"""
|
||||||
if not _is_valid_ip(ip):
|
if not _is_valid_ip(ip):
|
||||||
return []
|
return ProviderResult()
|
||||||
|
|
||||||
relationships = []
|
result = ProviderResult()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Perform reverse DNS lookup
|
# Perform reverse DNS lookup
|
||||||
@ -97,27 +101,44 @@ class DNSProvider(BaseProvider):
|
|||||||
hostname = str(ptr_record).rstrip('.')
|
hostname = str(ptr_record).rstrip('.')
|
||||||
|
|
||||||
if _is_valid_domain(hostname):
|
if _is_valid_domain(hostname):
|
||||||
raw_data = {
|
# Add the relationship
|
||||||
|
result.add_relationship(
|
||||||
|
source_node=ip,
|
||||||
|
target_node=hostname,
|
||||||
|
relationship_type='ptr_record',
|
||||||
|
provider=self.name,
|
||||||
|
confidence=0.8,
|
||||||
|
raw_data={
|
||||||
'query_type': 'PTR',
|
'query_type': 'PTR',
|
||||||
'ip_address': ip,
|
'ip_address': ip,
|
||||||
'hostname': hostname,
|
'hostname': hostname,
|
||||||
'ttl': response.ttl
|
'ttl': response.ttl
|
||||||
}
|
}
|
||||||
|
)
|
||||||
|
|
||||||
relationships.append((
|
# Add PTR record as attribute to the IP
|
||||||
ip,
|
result.add_attribute(
|
||||||
hostname,
|
target_node=ip,
|
||||||
'ptr_record',
|
name='ptr_record',
|
||||||
0.8,
|
value=hostname,
|
||||||
raw_data
|
attr_type='dns_record',
|
||||||
))
|
provider=self.name,
|
||||||
|
confidence=0.8,
|
||||||
|
metadata={'ttl': response.ttl}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Log the relationship discovery
|
||||||
self.log_relationship_discovery(
|
self.log_relationship_discovery(
|
||||||
source_node=ip,
|
source_node=ip,
|
||||||
target_node=hostname,
|
target_node=hostname,
|
||||||
relationship_type='ptr_record',
|
relationship_type='ptr_record',
|
||||||
confidence_score=0.8,
|
confidence_score=0.8,
|
||||||
raw_data=raw_data,
|
raw_data={
|
||||||
|
'query_type': 'PTR',
|
||||||
|
'ip_address': ip,
|
||||||
|
'hostname': hostname,
|
||||||
|
'ttl': response.ttl
|
||||||
|
},
|
||||||
discovery_method="reverse_dns_lookup"
|
discovery_method="reverse_dns_lookup"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -130,18 +151,24 @@ class DNSProvider(BaseProvider):
|
|||||||
# Re-raise the exception so the scanner can handle the failure
|
# Re-raise the exception so the scanner can handle the failure
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
return relationships
|
return result
|
||||||
|
|
||||||
def _query_record(self, domain: str, record_type: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
def _query_record(self, domain: str, record_type: str, result: ProviderResult) -> None:
|
||||||
"""
|
"""
|
||||||
Query a specific type of DNS record for the domain.
|
Query a specific type of DNS record for the domain and add results to ProviderResult.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
domain: Domain to query
|
||||||
|
record_type: DNS record type (A, AAAA, CNAME, etc.)
|
||||||
|
result: ProviderResult to populate
|
||||||
"""
|
"""
|
||||||
relationships = []
|
|
||||||
try:
|
try:
|
||||||
self.total_requests += 1
|
self.total_requests += 1
|
||||||
response = self.resolver.resolve(domain, record_type)
|
response = self.resolver.resolve(domain, record_type)
|
||||||
self.successful_requests += 1
|
self.successful_requests += 1
|
||||||
|
|
||||||
|
dns_records = []
|
||||||
|
|
||||||
for record in response:
|
for record in response:
|
||||||
target = ""
|
target = ""
|
||||||
if record_type in ['A', 'AAAA']:
|
if record_type in ['A', 'AAAA']:
|
||||||
@ -153,12 +180,16 @@ class DNSProvider(BaseProvider):
|
|||||||
elif record_type == 'SOA':
|
elif record_type == 'SOA':
|
||||||
target = str(record.mname).rstrip('.')
|
target = str(record.mname).rstrip('.')
|
||||||
elif record_type in ['TXT']:
|
elif record_type in ['TXT']:
|
||||||
# TXT records are treated as metadata, not relationships.
|
# TXT records are treated as attributes, not relationships
|
||||||
|
txt_value = str(record).strip('"')
|
||||||
|
dns_records.append(f"TXT: {txt_value}")
|
||||||
continue
|
continue
|
||||||
elif record_type == 'SRV':
|
elif record_type == 'SRV':
|
||||||
target = str(record.target).rstrip('.')
|
target = str(record.target).rstrip('.')
|
||||||
elif record_type == 'CAA':
|
elif record_type == 'CAA':
|
||||||
target = f"{record.flags} {record.tag.decode('utf-8')} \"{record.value.decode('utf-8')}\""
|
caa_value = f"{record.flags} {record.tag.decode('utf-8')} \"{record.value.decode('utf-8')}\""
|
||||||
|
dns_records.append(f"CAA: {caa_value}")
|
||||||
|
continue
|
||||||
else:
|
else:
|
||||||
target = str(record)
|
target = str(record)
|
||||||
|
|
||||||
@ -170,16 +201,22 @@ class DNSProvider(BaseProvider):
|
|||||||
'ttl': response.ttl
|
'ttl': response.ttl
|
||||||
}
|
}
|
||||||
relationship_type = f"{record_type.lower()}_record"
|
relationship_type = f"{record_type.lower()}_record"
|
||||||
confidence = 0.8 # Default confidence for DNS records
|
confidence = 0.8 # Standard confidence for DNS records
|
||||||
|
|
||||||
relationships.append((
|
# Add relationship
|
||||||
domain,
|
result.add_relationship(
|
||||||
target,
|
source_node=domain,
|
||||||
relationship_type,
|
target_node=target,
|
||||||
confidence,
|
relationship_type=relationship_type,
|
||||||
raw_data
|
provider=self.name,
|
||||||
))
|
confidence=confidence,
|
||||||
|
raw_data=raw_data
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add DNS record as attribute to the source domain
|
||||||
|
dns_records.append(f"{record_type}: {target}")
|
||||||
|
|
||||||
|
# Log relationship discovery
|
||||||
self.log_relationship_discovery(
|
self.log_relationship_discovery(
|
||||||
source_node=domain,
|
source_node=domain,
|
||||||
target_node=target,
|
target_node=target,
|
||||||
@ -189,10 +226,20 @@ class DNSProvider(BaseProvider):
|
|||||||
discovery_method=f"dns_{record_type.lower()}_record"
|
discovery_method=f"dns_{record_type.lower()}_record"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Add DNS records as a consolidated attribute
|
||||||
|
if dns_records:
|
||||||
|
result.add_attribute(
|
||||||
|
target_node=domain,
|
||||||
|
name='dns_records',
|
||||||
|
value=dns_records,
|
||||||
|
attr_type='dns_record_list',
|
||||||
|
provider=self.name,
|
||||||
|
confidence=0.8,
|
||||||
|
metadata={'record_types': [record_type]}
|
||||||
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.failed_requests += 1
|
self.failed_requests += 1
|
||||||
self.logger.logger.debug(f"{record_type} record query failed for {domain}: {e}")
|
self.logger.logger.debug(f"{record_type} record query failed for {domain}: {e}")
|
||||||
# Re-raise the exception so the scanner can handle it
|
# Re-raise the exception so the scanner can handle it
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
return relationships
|
|
||||||
@ -1,20 +1,20 @@
|
|||||||
# dnsrecon/providers/shodan_provider.py
|
# dnsrecon/providers/shodan_provider.py
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import os
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import List, Dict, Any, Tuple
|
from typing import Dict, Any
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
from .base_provider import BaseProvider
|
from .base_provider import BaseProvider
|
||||||
|
from core.provider_result import ProviderResult
|
||||||
from utils.helpers import _is_valid_ip, _is_valid_domain
|
from utils.helpers import _is_valid_ip, _is_valid_domain
|
||||||
|
|
||||||
|
|
||||||
class ShodanProvider(BaseProvider):
|
class ShodanProvider(BaseProvider):
|
||||||
"""
|
"""
|
||||||
Provider for querying Shodan API for IP address information.
|
Provider for querying Shodan API for IP address information.
|
||||||
Now uses session-specific API keys, is limited to IP-only queries, and includes caching.
|
Now returns standardized ProviderResult objects with caching support.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, name=None, session_config=None):
|
def __init__(self, name=None, session_config=None):
|
||||||
@ -85,28 +85,40 @@ class ShodanProvider(BaseProvider):
|
|||||||
except (json.JSONDecodeError, ValueError, KeyError):
|
except (json.JSONDecodeError, ValueError, KeyError):
|
||||||
return "stale"
|
return "stale"
|
||||||
|
|
||||||
def query_domain(self, domain: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
def query_domain(self, domain: str) -> ProviderResult:
|
||||||
"""
|
"""
|
||||||
Domain queries are no longer supported for the Shodan provider.
|
Domain queries are no longer supported for the Shodan provider.
|
||||||
"""
|
|
||||||
return []
|
|
||||||
|
|
||||||
def query_ip(self, ip: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
Args:
|
||||||
|
domain: Domain to investigate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Empty ProviderResult
|
||||||
"""
|
"""
|
||||||
Query Shodan for information about an IP address, with caching of processed relationships.
|
return ProviderResult()
|
||||||
|
|
||||||
|
def query_ip(self, ip: str) -> ProviderResult:
|
||||||
|
"""
|
||||||
|
Query Shodan for information about an IP address, with caching of processed data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
ip: IP address to investigate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ProviderResult containing discovered relationships and attributes
|
||||||
"""
|
"""
|
||||||
if not _is_valid_ip(ip) or not self.is_available():
|
if not _is_valid_ip(ip) or not self.is_available():
|
||||||
return []
|
return ProviderResult()
|
||||||
|
|
||||||
cache_file = self._get_cache_file_path(ip)
|
cache_file = self._get_cache_file_path(ip)
|
||||||
cache_status = self._get_cache_status(cache_file)
|
cache_status = self._get_cache_status(cache_file)
|
||||||
|
|
||||||
relationships = []
|
result = ProviderResult()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if cache_status == "fresh":
|
if cache_status == "fresh":
|
||||||
relationships = self._load_from_cache(cache_file)
|
result = self._load_from_cache(cache_file)
|
||||||
self.logger.logger.info(f"Using cached Shodan relationships for {ip}")
|
self.logger.logger.info(f"Using cached Shodan data for {ip}")
|
||||||
else: # "stale" or "not_found"
|
else: # "stale" or "not_found"
|
||||||
url = f"{self.base_url}/shodan/host/{ip}"
|
url = f"{self.base_url}/shodan/host/{ip}"
|
||||||
params = {'key': self.api_key}
|
params = {'key': self.api_key}
|
||||||
@ -114,59 +126,115 @@ class ShodanProvider(BaseProvider):
|
|||||||
|
|
||||||
if response and response.status_code == 200:
|
if response and response.status_code == 200:
|
||||||
data = response.json()
|
data = response.json()
|
||||||
# Process the data into relationships BEFORE caching
|
# Process the data into ProviderResult BEFORE caching
|
||||||
relationships = self._process_shodan_data(ip, data)
|
result = self._process_shodan_data(ip, data)
|
||||||
self._save_to_cache(cache_file, relationships) # Save the processed relationships
|
self._save_to_cache(cache_file, result, data) # Save both result and raw data
|
||||||
elif cache_status == "stale":
|
elif cache_status == "stale":
|
||||||
# If API fails on a stale cache, use the old data
|
# If API fails on a stale cache, use the old data
|
||||||
relationships = self._load_from_cache(cache_file)
|
result = self._load_from_cache(cache_file)
|
||||||
|
|
||||||
except requests.exceptions.RequestException as e:
|
except requests.exceptions.RequestException as e:
|
||||||
self.logger.logger.error(f"Shodan API query failed for {ip}: {e}")
|
self.logger.logger.error(f"Shodan API query failed for {ip}: {e}")
|
||||||
if cache_status == "stale":
|
if cache_status == "stale":
|
||||||
relationships = self._load_from_cache(cache_file)
|
result = self._load_from_cache(cache_file)
|
||||||
|
|
||||||
return relationships
|
return result
|
||||||
|
|
||||||
def _load_from_cache(self, cache_file_path: Path) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
def _load_from_cache(self, cache_file_path: Path) -> ProviderResult:
|
||||||
"""Load processed Shodan relationships from a cache file."""
|
"""Load processed Shodan data from a cache file."""
|
||||||
try:
|
try:
|
||||||
with open(cache_file_path, 'r') as f:
|
with open(cache_file_path, 'r') as f:
|
||||||
cache_content = json.load(f)
|
cache_content = json.load(f)
|
||||||
# The entire file content is the list of relationships
|
|
||||||
return cache_content.get("relationships", [])
|
|
||||||
except (json.JSONDecodeError, FileNotFoundError, KeyError):
|
|
||||||
return []
|
|
||||||
|
|
||||||
def _save_to_cache(self, cache_file_path: Path, relationships: List[Tuple[str, str, str, float, Dict[str, Any]]]) -> None:
|
result = ProviderResult()
|
||||||
"""Save processed Shodan relationships to a cache file."""
|
|
||||||
|
# Reconstruct relationships
|
||||||
|
for rel_data in cache_content.get("relationships", []):
|
||||||
|
result.add_relationship(
|
||||||
|
source_node=rel_data["source_node"],
|
||||||
|
target_node=rel_data["target_node"],
|
||||||
|
relationship_type=rel_data["relationship_type"],
|
||||||
|
provider=rel_data["provider"],
|
||||||
|
confidence=rel_data["confidence"],
|
||||||
|
raw_data=rel_data.get("raw_data", {})
|
||||||
|
)
|
||||||
|
|
||||||
|
# Reconstruct attributes
|
||||||
|
for attr_data in cache_content.get("attributes", []):
|
||||||
|
result.add_attribute(
|
||||||
|
target_node=attr_data["target_node"],
|
||||||
|
name=attr_data["name"],
|
||||||
|
value=attr_data["value"],
|
||||||
|
attr_type=attr_data["type"],
|
||||||
|
provider=attr_data["provider"],
|
||||||
|
confidence=attr_data["confidence"],
|
||||||
|
metadata=attr_data.get("metadata", {})
|
||||||
|
)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
except (json.JSONDecodeError, FileNotFoundError, KeyError):
|
||||||
|
return ProviderResult()
|
||||||
|
|
||||||
|
def _save_to_cache(self, cache_file_path: Path, result: ProviderResult, raw_data: Dict[str, Any]) -> None:
|
||||||
|
"""Save processed Shodan data to a cache file."""
|
||||||
try:
|
try:
|
||||||
cache_data = {
|
cache_data = {
|
||||||
"last_upstream_query": datetime.now(timezone.utc).isoformat(),
|
"last_upstream_query": datetime.now(timezone.utc).isoformat(),
|
||||||
"relationships": relationships
|
"raw_data": raw_data, # Preserve original for forensic purposes
|
||||||
|
"relationships": [
|
||||||
|
{
|
||||||
|
"source_node": rel.source_node,
|
||||||
|
"target_node": rel.target_node,
|
||||||
|
"relationship_type": rel.relationship_type,
|
||||||
|
"confidence": rel.confidence,
|
||||||
|
"provider": rel.provider,
|
||||||
|
"raw_data": rel.raw_data
|
||||||
|
} for rel in result.relationships
|
||||||
|
],
|
||||||
|
"attributes": [
|
||||||
|
{
|
||||||
|
"target_node": attr.target_node,
|
||||||
|
"name": attr.name,
|
||||||
|
"value": attr.value,
|
||||||
|
"type": attr.type,
|
||||||
|
"provider": attr.provider,
|
||||||
|
"confidence": attr.confidence,
|
||||||
|
"metadata": attr.metadata
|
||||||
|
} for attr in result.attributes
|
||||||
|
]
|
||||||
}
|
}
|
||||||
with open(cache_file_path, 'w') as f:
|
with open(cache_file_path, 'w') as f:
|
||||||
json.dump(cache_data, f, separators=(',', ':'))
|
json.dump(cache_data, f, separators=(',', ':'), default=str)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.logger.warning(f"Failed to save Shodan cache for {cache_file_path.name}: {e}")
|
self.logger.logger.warning(f"Failed to save Shodan cache for {cache_file_path.name}: {e}")
|
||||||
|
|
||||||
def _process_shodan_data(self, ip: str, data: Dict[str, Any]) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
def _process_shodan_data(self, ip: str, data: Dict[str, Any]) -> ProviderResult:
|
||||||
"""
|
"""
|
||||||
Process Shodan data to extract relationships.
|
Process Shodan data to extract relationships and attributes.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
ip: IP address queried
|
||||||
|
data: Raw Shodan response data
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ProviderResult with relationships and attributes
|
||||||
"""
|
"""
|
||||||
relationships = []
|
result = ProviderResult()
|
||||||
|
|
||||||
# Extract hostname relationships
|
# Extract hostname relationships
|
||||||
hostnames = data.get('hostnames', [])
|
hostnames = data.get('hostnames', [])
|
||||||
for hostname in hostnames:
|
for hostname in hostnames:
|
||||||
if _is_valid_domain(hostname):
|
if _is_valid_domain(hostname):
|
||||||
relationships.append((
|
result.add_relationship(
|
||||||
ip,
|
source_node=ip,
|
||||||
hostname,
|
target_node=hostname,
|
||||||
'a_record',
|
relationship_type='a_record',
|
||||||
0.8,
|
provider=self.name,
|
||||||
data
|
confidence=0.8,
|
||||||
))
|
raw_data=data
|
||||||
|
)
|
||||||
|
|
||||||
self.log_relationship_discovery(
|
self.log_relationship_discovery(
|
||||||
source_node=ip,
|
source_node=ip,
|
||||||
target_node=hostname,
|
target_node=hostname,
|
||||||
@ -180,13 +248,15 @@ class ShodanProvider(BaseProvider):
|
|||||||
asn = data.get('asn')
|
asn = data.get('asn')
|
||||||
if asn:
|
if asn:
|
||||||
asn_name = f"AS{asn[2:]}" if isinstance(asn, str) and asn.startswith('AS') else f"AS{asn}"
|
asn_name = f"AS{asn[2:]}" if isinstance(asn, str) and asn.startswith('AS') else f"AS{asn}"
|
||||||
relationships.append((
|
result.add_relationship(
|
||||||
ip,
|
source_node=ip,
|
||||||
asn_name,
|
target_node=asn_name,
|
||||||
'asn_membership',
|
relationship_type='asn_membership',
|
||||||
0.7,
|
provider=self.name,
|
||||||
data
|
confidence=0.7,
|
||||||
))
|
raw_data=data
|
||||||
|
)
|
||||||
|
|
||||||
self.log_relationship_discovery(
|
self.log_relationship_discovery(
|
||||||
source_node=ip,
|
source_node=ip,
|
||||||
target_node=asn_name,
|
target_node=asn_name,
|
||||||
@ -196,4 +266,66 @@ class ShodanProvider(BaseProvider):
|
|||||||
discovery_method="shodan_asn_lookup"
|
discovery_method="shodan_asn_lookup"
|
||||||
)
|
)
|
||||||
|
|
||||||
return relationships
|
# Add comprehensive Shodan host information as attributes
|
||||||
|
if 'ports' in data:
|
||||||
|
result.add_attribute(
|
||||||
|
target_node=ip,
|
||||||
|
name='ports',
|
||||||
|
value=data['ports'],
|
||||||
|
attr_type='network_info',
|
||||||
|
provider=self.name,
|
||||||
|
confidence=0.9
|
||||||
|
)
|
||||||
|
|
||||||
|
if 'os' in data and data['os']:
|
||||||
|
result.add_attribute(
|
||||||
|
target_node=ip,
|
||||||
|
name='operating_system',
|
||||||
|
value=data['os'],
|
||||||
|
attr_type='system_info',
|
||||||
|
provider=self.name,
|
||||||
|
confidence=0.8
|
||||||
|
)
|
||||||
|
|
||||||
|
if 'org' in data:
|
||||||
|
result.add_attribute(
|
||||||
|
target_node=ip,
|
||||||
|
name='organization',
|
||||||
|
value=data['org'],
|
||||||
|
attr_type='network_info',
|
||||||
|
provider=self.name,
|
||||||
|
confidence=0.8
|
||||||
|
)
|
||||||
|
|
||||||
|
if 'country_name' in data:
|
||||||
|
result.add_attribute(
|
||||||
|
target_node=ip,
|
||||||
|
name='country',
|
||||||
|
value=data['country_name'],
|
||||||
|
attr_type='location_info',
|
||||||
|
provider=self.name,
|
||||||
|
confidence=0.9
|
||||||
|
)
|
||||||
|
|
||||||
|
if 'city' in data:
|
||||||
|
result.add_attribute(
|
||||||
|
target_node=ip,
|
||||||
|
name='city',
|
||||||
|
value=data['city'],
|
||||||
|
attr_type='location_info',
|
||||||
|
provider=self.name,
|
||||||
|
confidence=0.8
|
||||||
|
)
|
||||||
|
|
||||||
|
# Store complete Shodan data as a comprehensive attribute
|
||||||
|
result.add_attribute(
|
||||||
|
target_node=ip,
|
||||||
|
name='shodan_host_info',
|
||||||
|
value=data, # Complete Shodan response for full forensic detail
|
||||||
|
attr_type='comprehensive_data',
|
||||||
|
provider=self.name,
|
||||||
|
confidence=0.9,
|
||||||
|
metadata={'data_source': 'shodan_api', 'query_type': 'host_lookup'}
|
||||||
|
)
|
||||||
|
|
||||||
|
return result
|
||||||
@ -1,6 +1,7 @@
|
|||||||
/**
|
/**
|
||||||
* Graph visualization module for DNSRecon
|
* Graph visualization module for DNSRecon
|
||||||
* Handles network graph rendering using vis.js with proper large entity node hiding
|
* Handles network graph rendering using vis.js with proper large entity node hiding
|
||||||
|
* UPDATED: Now compatible with unified data model (StandardAttribute objects)
|
||||||
*/
|
*/
|
||||||
const contextMenuCSS = `
|
const contextMenuCSS = `
|
||||||
.graph-context-menu {
|
.graph-context-menu {
|
||||||
@ -380,12 +381,16 @@ class GraphManager {
|
|||||||
const largeEntityMap = new Map();
|
const largeEntityMap = new Map();
|
||||||
|
|
||||||
graphData.nodes.forEach(node => {
|
graphData.nodes.forEach(node => {
|
||||||
if (node.type === 'large_entity' && node.attributes && Array.isArray(node.attributes.nodes)) {
|
if (node.type === 'large_entity' && node.attributes) {
|
||||||
node.attributes.nodes.forEach(nodeId => {
|
// UPDATED: Handle unified data model - look for 'nodes' attribute in the attributes list
|
||||||
|
const nodesAttribute = this.findAttributeByName(node.attributes, 'nodes');
|
||||||
|
if (nodesAttribute && Array.isArray(nodesAttribute.value)) {
|
||||||
|
nodesAttribute.value.forEach(nodeId => {
|
||||||
largeEntityMap.set(nodeId, node.id);
|
largeEntityMap.set(nodeId, node.id);
|
||||||
this.largeEntityMembers.add(nodeId);
|
this.largeEntityMembers.add(nodeId);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
const filteredNodes = graphData.nodes.filter(node => {
|
const filteredNodes = graphData.nodes.filter(node => {
|
||||||
@ -466,8 +471,21 @@ class GraphManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Process node data with styling and metadata
|
* UPDATED: Helper method to find an attribute by name in the standardized attributes list
|
||||||
* @param {Object} node - Raw node data
|
* @param {Array} attributes - List of StandardAttribute objects
|
||||||
|
* @param {string} name - Attribute name to find
|
||||||
|
* @returns {Object|null} The attribute object if found, null otherwise
|
||||||
|
*/
|
||||||
|
findAttributeByName(attributes, name) {
|
||||||
|
if (!Array.isArray(attributes)) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
return attributes.find(attr => attr.name === name) || null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* UPDATED: Process node data with styling and metadata for unified data model
|
||||||
|
* @param {Object} node - Raw node data with standardized attributes
|
||||||
* @returns {Object} Processed node data
|
* @returns {Object} Processed node data
|
||||||
*/
|
*/
|
||||||
processNode(node) {
|
processNode(node) {
|
||||||
@ -478,7 +496,7 @@ class GraphManager {
|
|||||||
size: this.getNodeSize(node.type),
|
size: this.getNodeSize(node.type),
|
||||||
borderColor: this.getNodeBorderColor(node.type),
|
borderColor: this.getNodeBorderColor(node.type),
|
||||||
shape: this.getNodeShape(node.type),
|
shape: this.getNodeShape(node.type),
|
||||||
attributes: node.attributes || {},
|
attributes: node.attributes || [], // Keep as standardized attributes list
|
||||||
description: node.description || '',
|
description: node.description || '',
|
||||||
metadata: node.metadata || {},
|
metadata: node.metadata || {},
|
||||||
type: node.type,
|
type: node.type,
|
||||||
@ -491,9 +509,10 @@ class GraphManager {
|
|||||||
processedNode.borderWidth = Math.max(2, Math.floor(node.confidence * 5));
|
processedNode.borderWidth = Math.max(2, Math.floor(node.confidence * 5));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Style based on certificate validity
|
// UPDATED: Style based on certificate validity using unified data model
|
||||||
if (node.type === 'domain') {
|
if (node.type === 'domain') {
|
||||||
if (node.attributes && node.attributes.certificates && node.attributes.certificates.has_valid_cert === false) {
|
const certificatesAttr = this.findAttributeByName(node.attributes, 'certificates');
|
||||||
|
if (certificatesAttr && certificatesAttr.value && certificatesAttr.value.has_valid_cert === false) {
|
||||||
processedNode.color = { background: '#888888', border: '#666666' };
|
processedNode.color = { background: '#888888', border: '#666666' };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,6 +1,7 @@
|
|||||||
/**
|
/**
|
||||||
* Main application logic for DNSRecon web interface
|
* Main application logic for DNSRecon web interface
|
||||||
* Handles UI interactions, API communication, and data flow
|
* Handles UI interactions, API communication, and data flow
|
||||||
|
* UPDATED: Now compatible with unified data model (StandardAttribute objects)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
class DNSReconApp {
|
class DNSReconApp {
|
||||||
@ -808,10 +809,22 @@ class DNSReconApp {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Enhanced node details HTML generation with better visual hierarchy
|
* UPDATED: Helper method to find an attribute by name in the standardized attributes list
|
||||||
* File: static/js/main.js (replace generateNodeDetailsHtml method)
|
* @param {Array} attributes - List of StandardAttribute objects
|
||||||
|
* @param {string} name - Attribute name to find
|
||||||
|
* @returns {Object|null} The attribute object if found, null otherwise
|
||||||
*/
|
*/
|
||||||
|
findAttributeByName(attributes, name) {
|
||||||
|
if (!Array.isArray(attributes)) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
return attributes.find(attr => attr.name === name) || null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* UPDATED: Enhanced node details HTML generation for unified data model
|
||||||
|
* Now processes StandardAttribute objects instead of simple key-value pairs
|
||||||
|
*/
|
||||||
generateNodeDetailsHtml(node) {
|
generateNodeDetailsHtml(node) {
|
||||||
if (!node) return '<div class="detail-row"><span class="detail-value">Details not available.</span></div>';
|
if (!node) return '<div class="detail-row"><span class="detail-value">Details not available.</span></div>';
|
||||||
|
|
||||||
@ -857,23 +870,28 @@ class DNSReconApp {
|
|||||||
return detailsHtml;
|
return detailsHtml;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* UPDATED: Generate details for standard nodes using unified data model
|
||||||
|
*/
|
||||||
generateStandardNodeDetails(node) {
|
generateStandardNodeDetails(node) {
|
||||||
let html = '';
|
let html = '';
|
||||||
|
|
||||||
// Relationships sections
|
// Relationships sections
|
||||||
html += this.generateRelationshipsSection(node);
|
html += this.generateRelationshipsSection(node);
|
||||||
|
|
||||||
// Enhanced attributes section with special certificate handling
|
// UPDATED: Enhanced attributes section with special certificate handling for unified model
|
||||||
if (node.attributes && Object.keys(node.attributes).length > 0) {
|
if (node.attributes && Array.isArray(node.attributes) && node.attributes.length > 0) {
|
||||||
const { certificates, ...otherAttributes } = node.attributes;
|
// Find certificate attribute separately
|
||||||
|
const certificatesAttr = this.findAttributeByName(node.attributes, 'certificates');
|
||||||
|
|
||||||
// Handle certificates separately with enhanced display
|
// Handle certificates separately with enhanced display
|
||||||
if (certificates) {
|
if (certificatesAttr) {
|
||||||
html += this.generateCertificateSection({ certificates });
|
html += this.generateCertificateSection(certificatesAttr);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle other attributes normally
|
// Handle other attributes normally (excluding certificates to avoid duplication)
|
||||||
if (Object.keys(otherAttributes).length > 0) {
|
const otherAttributes = node.attributes.filter(attr => attr.name !== 'certificates');
|
||||||
|
if (otherAttributes.length > 0) {
|
||||||
html += this.generateAttributesSection(otherAttributes);
|
html += this.generateAttributesSection(otherAttributes);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -888,10 +906,10 @@ class DNSReconApp {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Enhanced certificate section generation using existing styles
|
* UPDATED: Enhanced certificate section generation for unified data model
|
||||||
*/
|
*/
|
||||||
generateCertificateSection(attributes) {
|
generateCertificateSection(certificatesAttr) {
|
||||||
const certificates = attributes.certificates;
|
const certificates = certificatesAttr.value;
|
||||||
if (!certificates || typeof certificates !== 'object') {
|
if (!certificates || typeof certificates !== 'object') {
|
||||||
return '';
|
return '';
|
||||||
}
|
}
|
||||||
@ -1094,10 +1112,22 @@ class DNSReconApp {
|
|||||||
return html;
|
return html;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* UPDATED: Generate large entity details using unified data model
|
||||||
|
*/
|
||||||
generateLargeEntityDetails(node) {
|
generateLargeEntityDetails(node) {
|
||||||
const attributes = node.attributes || {};
|
// UPDATED: Look for attributes in the unified model structure
|
||||||
const nodes = attributes.nodes || [];
|
const nodesAttribute = this.findAttributeByName(node.attributes, 'nodes');
|
||||||
const nodeType = attributes.node_type || 'nodes';
|
const countAttribute = this.findAttributeByName(node.attributes, 'count');
|
||||||
|
const nodeTypeAttribute = this.findAttributeByName(node.attributes, 'node_type');
|
||||||
|
const sourceProviderAttribute = this.findAttributeByName(node.attributes, 'source_provider');
|
||||||
|
const discoveryDepthAttribute = this.findAttributeByName(node.attributes, 'discovery_depth');
|
||||||
|
|
||||||
|
const nodes = nodesAttribute ? nodesAttribute.value : [];
|
||||||
|
const count = countAttribute ? countAttribute.value : 0;
|
||||||
|
const nodeType = nodeTypeAttribute ? nodeTypeAttribute.value : 'nodes';
|
||||||
|
const sourceProvider = sourceProviderAttribute ? sourceProviderAttribute.value : 'Unknown';
|
||||||
|
const discoveryDepth = discoveryDepthAttribute ? discoveryDepthAttribute.value : 'Unknown';
|
||||||
|
|
||||||
let html = `
|
let html = `
|
||||||
<div class="modal-section">
|
<div class="modal-section">
|
||||||
@ -1107,15 +1137,15 @@ class DNSReconApp {
|
|||||||
<div class="attribute-list">
|
<div class="attribute-list">
|
||||||
<div class="attribute-item-compact">
|
<div class="attribute-item-compact">
|
||||||
<span class="attribute-key-compact">Contains:</span>
|
<span class="attribute-key-compact">Contains:</span>
|
||||||
<span class="attribute-value-compact">${attributes.count} ${nodeType}s</span>
|
<span class="attribute-value-compact">${count} ${nodeType}s</span>
|
||||||
</div>
|
</div>
|
||||||
<div class="attribute-item-compact">
|
<div class="attribute-item-compact">
|
||||||
<span class="attribute-key-compact">Provider:</span>
|
<span class="attribute-key-compact">Provider:</span>
|
||||||
<span class="attribute-value-compact">${attributes.source_provider || 'Unknown'}</span>
|
<span class="attribute-value-compact">${sourceProvider}</span>
|
||||||
</div>
|
</div>
|
||||||
<div class="attribute-item-compact">
|
<div class="attribute-item-compact">
|
||||||
<span class="attribute-key-compact">Depth:</span>
|
<span class="attribute-key-compact">Depth:</span>
|
||||||
<span class="attribute-value-compact">${attributes.discovery_depth || 'Unknown'}</span>
|
<span class="attribute-value-compact">${discoveryDepth}</span>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@ -1132,6 +1162,7 @@ class DNSReconApp {
|
|||||||
// Use node.id for the large_entity_id
|
// Use node.id for the large_entity_id
|
||||||
const largeEntityId = node.id;
|
const largeEntityId = node.id;
|
||||||
|
|
||||||
|
if (Array.isArray(nodes)) {
|
||||||
nodes.forEach(innerNodeId => {
|
nodes.forEach(innerNodeId => {
|
||||||
html += `
|
html += `
|
||||||
<div class="relationship-compact-item">
|
<div class="relationship-compact-item">
|
||||||
@ -1143,6 +1174,7 @@ class DNSReconApp {
|
|||||||
</div>
|
</div>
|
||||||
`;
|
`;
|
||||||
});
|
});
|
||||||
|
}
|
||||||
|
|
||||||
html += '</div></div></details></div>';
|
html += '</div></div></details></div>';
|
||||||
|
|
||||||
@ -1255,151 +1287,6 @@ class DNSReconApp {
|
|||||||
return valueSourceMap;
|
return valueSourceMap;
|
||||||
}
|
}
|
||||||
|
|
||||||
generateCorrelationObjectLayout(node) {
|
|
||||||
const metadata = node.metadata || {};
|
|
||||||
const values = metadata.values || [];
|
|
||||||
const mergeCount = metadata.merge_count || 1;
|
|
||||||
|
|
||||||
let html = '<div class="correlation-layout">';
|
|
||||||
|
|
||||||
if (mergeCount > 1) {
|
|
||||||
html += `
|
|
||||||
<div class="section-card correlation-summary">
|
|
||||||
<div class="section-header">
|
|
||||||
<h4><span class="section-icon">🔗</span>Merged Correlations</h4>
|
|
||||||
<div class="merge-badge">${mergeCount} values</div>
|
|
||||||
</div>
|
|
||||||
<div class="correlation-grid">
|
|
||||||
`;
|
|
||||||
|
|
||||||
values.forEach((value, index) => {
|
|
||||||
const displayValue = typeof value === 'string' && value.length > 50 ?
|
|
||||||
value.substring(0, 47) + '...' : value;
|
|
||||||
|
|
||||||
html += `
|
|
||||||
<div class="correlation-item" data-index="${index}">
|
|
||||||
<div class="correlation-preview">${displayValue}</div>
|
|
||||||
<button class="expand-btn" onclick="this.parentElement.classList.toggle('expanded')">
|
|
||||||
<span class="expand-icon">▼</span>
|
|
||||||
</button>
|
|
||||||
<div class="correlation-full hidden">${value}</div>
|
|
||||||
</div>
|
|
||||||
`;
|
|
||||||
});
|
|
||||||
|
|
||||||
html += '</div></div>';
|
|
||||||
} else {
|
|
||||||
const singleValue = values.length > 0 ? values[0] : (metadata.value || 'Unknown');
|
|
||||||
html += `
|
|
||||||
<div class="section-card">
|
|
||||||
<div class="section-header">
|
|
||||||
<h4><span class="section-icon">🔗</span>Correlation Value</h4>
|
|
||||||
</div>
|
|
||||||
<div class="correlation-value-display">${singleValue}</div>
|
|
||||||
</div>
|
|
||||||
`;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Show correlated nodes
|
|
||||||
const correlatedNodes = metadata.correlated_nodes || [];
|
|
||||||
if (correlatedNodes.length > 0) {
|
|
||||||
html += `
|
|
||||||
<div class="section-card">
|
|
||||||
<div class="section-header">
|
|
||||||
<h4><span class="section-icon">🌐</span>Correlated Nodes</h4>
|
|
||||||
<div class="count-badge">${correlatedNodes.length}</div>
|
|
||||||
</div>
|
|
||||||
<div class="node-list">
|
|
||||||
`;
|
|
||||||
|
|
||||||
correlatedNodes.forEach(nodeId => {
|
|
||||||
html += `
|
|
||||||
<div class="node-link-item" data-node-id="${nodeId}">
|
|
||||||
<span class="node-icon">●</span>
|
|
||||||
<span class="node-name">${nodeId}</span>
|
|
||||||
<button class="navigate-btn" onclick="this.click()">→</button>
|
|
||||||
</div>
|
|
||||||
`;
|
|
||||||
});
|
|
||||||
|
|
||||||
html += '</div></div>';
|
|
||||||
}
|
|
||||||
|
|
||||||
html += '</div>';
|
|
||||||
return html;
|
|
||||||
}
|
|
||||||
|
|
||||||
generateLargeEntityLayout(node) {
|
|
||||||
const attributes = node.attributes || {};
|
|
||||||
const nodes = attributes.nodes || [];
|
|
||||||
const nodeType = attributes.node_type || 'nodes';
|
|
||||||
|
|
||||||
let html = `
|
|
||||||
<div class="large-entity-layout">
|
|
||||||
<div class="section-card entity-summary">
|
|
||||||
<div class="section-header">
|
|
||||||
<h4><span class="section-icon">📦</span>Large Entity Container</h4>
|
|
||||||
<div class="entity-badge">${attributes.count} ${nodeType}s</div>
|
|
||||||
</div>
|
|
||||||
<div class="entity-stats">
|
|
||||||
<div class="stat-row">
|
|
||||||
<span class="stat-label">Source Provider:</span>
|
|
||||||
<span class="stat-value">${attributes.source_provider || 'Unknown'}</span>
|
|
||||||
</div>
|
|
||||||
<div class="stat-row">
|
|
||||||
<span class="stat-label">Discovery Depth:</span>
|
|
||||||
<span class="stat-value">${attributes.discovery_depth || 'Unknown'}</span>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="section-card entity-contents">
|
|
||||||
<div class="section-header">
|
|
||||||
<h4><span class="section-icon">📋</span>Contained ${nodeType}s</h4>
|
|
||||||
<button class="toggle-all-btn" onclick="this.toggleAllEntities()">Expand All</button>
|
|
||||||
</div>
|
|
||||||
<div class="entity-node-grid">
|
|
||||||
`;
|
|
||||||
|
|
||||||
nodes.forEach((innerNodeId, index) => {
|
|
||||||
const innerNode = this.graphManager.nodes.get(innerNodeId);
|
|
||||||
html += `
|
|
||||||
<div class="entity-node-card" data-node-id="${innerNodeId}">
|
|
||||||
<div class="entity-node-header" onclick="this.parentElement.classList.toggle('expanded')">
|
|
||||||
<span class="node-icon">●</span>
|
|
||||||
<span class="node-name">${innerNodeId}</span>
|
|
||||||
<span class="expand-indicator">▼</span>
|
|
||||||
</div>
|
|
||||||
<div class="entity-node-details">
|
|
||||||
${innerNode ? this.generateStandardNodeLayout(innerNode) : '<div class="no-details">No details available</div>'}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
`;
|
|
||||||
});
|
|
||||||
|
|
||||||
html += '</div></div></div>';
|
|
||||||
return html;
|
|
||||||
}
|
|
||||||
|
|
||||||
generateStandardNodeLayout(node) {
|
|
||||||
let html = '<div class="standard-node-layout">';
|
|
||||||
|
|
||||||
// Relationships section
|
|
||||||
html += this.generateRelationshipsSection(node);
|
|
||||||
|
|
||||||
// Attributes section with smart categorization
|
|
||||||
html += this.generateAttributesSection(node);
|
|
||||||
|
|
||||||
// Description section
|
|
||||||
html += this.generateDescriptionSection(node);
|
|
||||||
|
|
||||||
// Metadata section (collapsed by default)
|
|
||||||
html += this.generateMetadataSection(node);
|
|
||||||
|
|
||||||
html += '</div>';
|
|
||||||
return html;
|
|
||||||
}
|
|
||||||
|
|
||||||
generateRelationshipsSection(node) {
|
generateRelationshipsSection(node) {
|
||||||
let html = '';
|
let html = '';
|
||||||
|
|
||||||
@ -1468,12 +1355,20 @@ class DNSReconApp {
|
|||||||
return html;
|
return html;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* UPDATED: Generate attributes section for unified data model
|
||||||
|
* Now processes StandardAttribute objects instead of key-value pairs
|
||||||
|
*/
|
||||||
generateAttributesSection(attributes) {
|
generateAttributesSection(attributes) {
|
||||||
const categorized = this.categorizeAttributes(attributes);
|
if (!Array.isArray(attributes) || attributes.length === 0) {
|
||||||
|
return '';
|
||||||
|
}
|
||||||
|
|
||||||
|
const categorized = this.categorizeStandardAttributes(attributes);
|
||||||
let html = '';
|
let html = '';
|
||||||
|
|
||||||
Object.entries(categorized).forEach(([category, attrs]) => {
|
Object.entries(categorized).forEach(([category, attrs]) => {
|
||||||
if (Object.keys(attrs).length === 0) return;
|
if (attrs.length === 0) return;
|
||||||
|
|
||||||
html += `
|
html += `
|
||||||
<div class="modal-section">
|
<div class="modal-section">
|
||||||
@ -1482,20 +1377,16 @@ class DNSReconApp {
|
|||||||
<div class="modal-section-content">
|
<div class="modal-section-content">
|
||||||
`;
|
`;
|
||||||
|
|
||||||
if (category === 'Certificates' && attrs.certificates) {
|
|
||||||
html += this.formatCertificateData(attrs.certificates);
|
|
||||||
} else {
|
|
||||||
html += '<div class="attribute-list">';
|
html += '<div class="attribute-list">';
|
||||||
Object.entries(attrs).forEach(([key, value]) => {
|
attrs.forEach(attr => {
|
||||||
html += `
|
html += `
|
||||||
<div class="attribute-item-compact">
|
<div class="attribute-item-compact">
|
||||||
<span class="attribute-key-compact">${this.formatLabel(key)}</span>
|
<span class="attribute-key-compact">${this.formatLabel(attr.name)}</span>
|
||||||
<span class="attribute-value-compact">${this.formatAttributeValue(value)}</span>
|
<span class="attribute-value-compact">${this.formatStandardAttributeValue(attr)}</span>
|
||||||
</div>
|
</div>
|
||||||
`;
|
`;
|
||||||
});
|
});
|
||||||
html += '</div>';
|
html += '</div>';
|
||||||
}
|
|
||||||
|
|
||||||
html += '</div></details></div>';
|
html += '</div></details></div>';
|
||||||
});
|
});
|
||||||
@ -1503,47 +1394,41 @@ class DNSReconApp {
|
|||||||
return html;
|
return html;
|
||||||
}
|
}
|
||||||
|
|
||||||
formatCertificateData(certData) {
|
/**
|
||||||
if (!certData || typeof certData !== 'object') {
|
* UPDATED: Categorize StandardAttribute objects by type and content
|
||||||
return '<p>No certificate data available</p>';
|
*/
|
||||||
|
categorizeStandardAttributes(attributes) {
|
||||||
|
const categories = {
|
||||||
|
'DNS Records': [],
|
||||||
|
'Network Info': [],
|
||||||
|
'Provider Data': [],
|
||||||
|
'Other': []
|
||||||
|
};
|
||||||
|
|
||||||
|
attributes.forEach(attr => {
|
||||||
|
const lowerName = attr.name.toLowerCase();
|
||||||
|
const attrType = attr.type ? attr.type.toLowerCase() : '';
|
||||||
|
|
||||||
|
if (lowerName.includes('dns') || lowerName.includes('record') || attrType.includes('dns')) {
|
||||||
|
categories['DNS Records'].push(attr);
|
||||||
|
} else if (lowerName.includes('ip') || lowerName.includes('asn') || lowerName.includes('network') || attrType.includes('network')) {
|
||||||
|
categories['Network Info'].push(attr);
|
||||||
|
} else if (lowerName.includes('shodan') || lowerName.includes('crtsh') || lowerName.includes('provider') || attrType.includes('provider')) {
|
||||||
|
categories['Provider Data'].push(attr);
|
||||||
|
} else {
|
||||||
|
categories['Other'].push(attr);
|
||||||
}
|
}
|
||||||
|
|
||||||
let html = '<div class="certificate-list">';
|
|
||||||
|
|
||||||
// Handle certificate summary
|
|
||||||
if (certData.total_certificates) {
|
|
||||||
html += `
|
|
||||||
<div class="certificate-item">
|
|
||||||
<div class="certificate-summary">
|
|
||||||
<span>Total Certificates: ${certData.total_certificates}</span>
|
|
||||||
<span class="certificate-status ${certData.has_valid_cert ? 'valid' : 'invalid'}">
|
|
||||||
${certData.has_valid_cert ? 'Valid' : 'Invalid'}
|
|
||||||
</span>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
`;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle unique issuers
|
|
||||||
if (certData.unique_issuers && Array.isArray(certData.unique_issuers)) {
|
|
||||||
html += `
|
|
||||||
<div class="certificate-item">
|
|
||||||
<div class="certificate-summary">
|
|
||||||
<span>Issuers:</span>
|
|
||||||
</div>
|
|
||||||
<div class="array-display">
|
|
||||||
`;
|
|
||||||
certData.unique_issuers.forEach(issuer => {
|
|
||||||
html += `<div class="array-display-item">${this.escapeHtml(String(issuer))}</div>`;
|
|
||||||
});
|
});
|
||||||
html += '</div></div>';
|
|
||||||
|
return categories;
|
||||||
}
|
}
|
||||||
|
|
||||||
html += '</div>';
|
/**
|
||||||
return html;
|
* UPDATED: Format StandardAttribute value for display
|
||||||
}
|
*/
|
||||||
|
formatStandardAttributeValue(attr) {
|
||||||
|
const value = attr.value;
|
||||||
|
|
||||||
formatAttributeValue(value) {
|
|
||||||
if (value === null || value === undefined) {
|
if (value === null || value === undefined) {
|
||||||
return '<em>None</em>';
|
return '<em>None</em>';
|
||||||
}
|
}
|
||||||
@ -1567,35 +1452,6 @@ class DNSReconApp {
|
|||||||
return this.escapeHtml(String(value));
|
return this.escapeHtml(String(value));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
categorizeAttributes(attributes) {
|
|
||||||
const categories = {
|
|
||||||
'DNS Records': {},
|
|
||||||
'Certificates': {},
|
|
||||||
'Network Info': {},
|
|
||||||
'Provider Data': {},
|
|
||||||
'Other': {}
|
|
||||||
};
|
|
||||||
|
|
||||||
for (const [key, value] of Object.entries(attributes)) {
|
|
||||||
const lowerKey = key.toLowerCase();
|
|
||||||
|
|
||||||
if (lowerKey.includes('dns') || lowerKey.includes('record') || key.endsWith('_record')) {
|
|
||||||
categories['DNS Records'][key] = value;
|
|
||||||
} else if (lowerKey.includes('cert') || lowerKey.includes('ssl') || lowerKey.includes('tls')) {
|
|
||||||
categories['Certificates'][key] = value;
|
|
||||||
} else if (lowerKey.includes('ip') || lowerKey.includes('asn') || lowerKey.includes('network')) {
|
|
||||||
categories['Network Info'][key] = value;
|
|
||||||
} else if (lowerKey.includes('shodan') || lowerKey.includes('crtsh') || lowerKey.includes('provider')) {
|
|
||||||
categories['Provider Data'][key] = value;
|
|
||||||
} else {
|
|
||||||
categories['Other'][key] = value;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return categories;
|
|
||||||
}
|
|
||||||
|
|
||||||
formatObjectCompact(obj) {
|
formatObjectCompact(obj) {
|
||||||
if (!obj || typeof obj !== 'object') return '';
|
if (!obj || typeof obj !== 'object') return '';
|
||||||
|
|
||||||
@ -1625,7 +1481,7 @@ class DNSReconApp {
|
|||||||
return `
|
return `
|
||||||
<div class="section-card description-section">
|
<div class="section-card description-section">
|
||||||
<div class="section-header">
|
<div class="section-header">
|
||||||
<h4><span class="section-icon">📝</span>Description</h4>
|
<h4><span class="section-icon">📄</span>Description</h4>
|
||||||
</div>
|
</div>
|
||||||
<div class="description-content">
|
<div class="description-content">
|
||||||
${this.escapeHtml(node.description)}
|
${this.escapeHtml(node.description)}
|
||||||
@ -1827,7 +1683,7 @@ class DNSReconApp {
|
|||||||
getNodeTypeIcon(nodeType) {
|
getNodeTypeIcon(nodeType) {
|
||||||
const icons = {
|
const icons = {
|
||||||
'domain': '🌐',
|
'domain': '🌐',
|
||||||
'ip': '📍',
|
'ip': '🔍',
|
||||||
'asn': '🏢',
|
'asn': '🏢',
|
||||||
'large_entity': '📦',
|
'large_entity': '📦',
|
||||||
'correlation_object': '🔗'
|
'correlation_object': '🔗'
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user