fix large entity

This commit is contained in:
overcuriousity 2025-09-13 16:09:10 +02:00
parent 612f414d2a
commit 717f103596
3 changed files with 47 additions and 18 deletions

1
.gitignore vendored
View File

@ -168,3 +168,4 @@ cython_debug/
# option (not recommended) you can uncomment the following to ignore the entire idea folder. # option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/ #.idea/
dump.rdb

View File

@ -753,13 +753,14 @@ class Scanner:
return discovered_targets return discovered_targets
def _create_large_entity(self, source: str, provider_name: str, results: List, current_depth: int) -> None: def _create_large_entity(self, source: str, provider_name: str, results: List, current_depth: int) -> None:
"""Create a large entity node for forensic tracking.""" """Create a large entity node and process its contents with the DNS provider."""
entity_id = f"Large Entity: {provider_name}" entity_id = f"Large Collection from {provider_name}"
# Extract targets from results
targets = [] targets = []
node_type = 'unknown' node_type = 'unknown'
dns_provider = next((p for p in self.providers if p.get_name() == 'dns'), None)
for rel in results: for rel in results:
if len(rel) > 1: if len(rel) > 1:
target = rel[1] target = rel[1]
@ -769,11 +770,16 @@ class Scanner:
if _is_valid_domain(target): if _is_valid_domain(target):
node_type = 'domain' node_type = 'domain'
self.graph.add_node(target, NodeType.DOMAIN) self.graph.add_node(target, NodeType.DOMAIN)
if dns_provider:
dns_results = dns_provider.query_domain(target)
self._process_provider_results_forensic(target, dns_provider, dns_results, defaultdict(lambda: defaultdict(list)), current_depth)
elif _is_valid_ip(target): elif _is_valid_ip(target):
node_type = 'ip' node_type = 'ip'
self.graph.add_node(target, NodeType.IP) self.graph.add_node(target, NodeType.IP)
if dns_provider:
dns_results = dns_provider.query_ip(target)
self._process_provider_results_forensic(target, dns_provider, dns_results, defaultdict(lambda: defaultdict(list)), current_depth)
# Create large entity metadata
metadata = { metadata = {
'count': len(targets), 'count': len(targets),
'nodes': targets, 'nodes': targets,
@ -784,18 +790,14 @@ class Scanner:
'forensic_note': f'Large entity created due to {len(targets)} results from {provider_name}' 'forensic_note': f'Large entity created due to {len(targets)} results from {provider_name}'
} }
# Create the node and edge
self.graph.add_node(entity_id, NodeType.LARGE_ENTITY, metadata=metadata) self.graph.add_node(entity_id, NodeType.LARGE_ENTITY, metadata=metadata)
# Use first result's relationship type for the edge
if results: if results:
rel_type = results[0][2] rel_type = results[0][2]
self.graph.add_edge(source, entity_id, rel_type, 0.9, provider_name, self.graph.add_edge(source, entity_id, rel_type, 0.9, provider_name,
{'large_entity_info': f'Contains {len(targets)} {node_type}s'}) {'large_entity_info': f'Contains {len(targets)} {node_type}s'})
# Forensic logging
self.logger.logger.warning(f"Large entity created: {entity_id} contains {len(targets)} targets from {provider_name}") self.logger.logger.warning(f"Large entity created: {entity_id} contains {len(targets)} targets from {provider_name}")
print(f"Created large entity {entity_id} for {len(targets)} {node_type}s from {provider_name}") print(f"Created large entity {entity_id} for {len(targets)} {node_type}s from {provider_name}")
def _collect_node_metadata_forensic(self, node_id: str, provider_name: str, rel_type: RelationshipType, def _collect_node_metadata_forensic(self, node_id: str, provider_name: str, rel_type: RelationshipType,

View File

@ -270,24 +270,50 @@ class GraphManager {
this.initialize(); this.initialize();
} }
// Find all aggregated node IDs first const largeEntityMap = new Map();
const aggregatedNodeIds = new Set();
graphData.nodes.forEach(node => { graphData.nodes.forEach(node => {
if (node.type === 'large_entity' && node.metadata && Array.isArray(node.metadata.nodes)) { if (node.type === 'large_entity' && node.metadata && Array.isArray(node.metadata.nodes)) {
node.metadata.nodes.forEach(nodeId => aggregatedNodeIds.add(nodeId)); node.metadata.nodes.forEach(nodeId => {
largeEntityMap.set(nodeId, node.id);
});
} }
}); });
// Process nodes, hiding the ones that are aggregated
const processedNodes = graphData.nodes.map(node => { const processedNodes = graphData.nodes.map(node => {
const processed = this.processNode(node); const processed = this.processNode(node);
if (aggregatedNodeIds.has(node.id)) { if (largeEntityMap.has(node.id)) {
processed.hidden = true; // Mark node as hidden processed.hidden = true;
} }
return processed; return processed;
}); });
const processedEdges = graphData.edges.map(edge => this.processEdge(edge)); const mergedEdges = {};
graphData.edges.forEach(edge => {
const fromNode = largeEntityMap.has(edge.from) ? largeEntityMap.get(edge.from) : edge.from;
const mergeKey = `${fromNode}-${edge.to}-${edge.label}`;
if (!mergedEdges[mergeKey]) {
mergedEdges[mergeKey] = {
...edge,
from: fromNode,
count: 0,
confidence_score: 0
};
}
mergedEdges[mergeKey].count++;
if (edge.confidence_score > mergedEdges[mergeKey].confidence_score) {
mergedEdges[mergeKey].confidence_score = edge.confidence_score;
}
});
const processedEdges = Object.values(mergedEdges).map(edge => {
const processed = this.processEdge(edge);
if (edge.count > 1) {
processed.label = `${edge.label} (${edge.count})`;
}
return processed;
});
// Update datasets with animation // Update datasets with animation
const existingNodeIds = this.nodes.getIds(); const existingNodeIds = this.nodes.getIds();
@ -457,7 +483,7 @@ class GraphManager {
'ip': 14, 'ip': 14,
'asn': 16, 'asn': 16,
'correlation_object': 8, 'correlation_object': 8,
'large_entity': 12 'large_entity': 5
}; };
return sizes[nodeType] || 12; return sizes[nodeType] || 12;
} }