82 Commits

Author SHA1 Message Date
overcuriousity
c4e6a8998a iteration on ws implementation 2025-09-20 16:52:05 +02:00
overcuriousity
75a595c9cb try to implement websockets 2025-09-20 14:17:17 +02:00
3ee23c9d05 Merge pull request 'remove-large-entity-temporarily' (#3) from remove-large-entity-temporarily into main
Reviewed-on: mstoeck3/dnsrecon#3
2025-09-19 12:29:26 +00:00
overcuriousity
8d402ab4b1 postgres 2025-09-19 14:28:37 +02:00
overcuriousity
7472e6f416 fixes to hint for incomplete data 2025-09-19 12:35:28 +02:00
overcuriousity
eabb532557 almost fixed 2025-09-19 01:10:07 +02:00
overcuriousity
0a6d12de9a large entity recreation 2025-09-19 00:38:26 +02:00
overcuriousity
332805709d remove 2025-09-18 23:44:24 +02:00
overcuriousity
1558731c1c attempt fix large entity 2025-09-18 23:22:49 +02:00
overcuriousity
95cebbf935 bug fixes, improvements 2025-09-18 22:39:12 +02:00
overcuriousity
4c48917993 fixes for scheduler 2025-09-18 21:32:26 +02:00
overcuriousity
9d9afa6a08 fixes 2025-09-18 21:04:29 +02:00
overcuriousity
12f834bb65 correlation engine 2025-09-18 20:51:13 +02:00
overcuriousity
cbfd40ee98 adjustments to shodan & export manager 2025-09-18 19:22:58 +02:00
overcuriousity
d4081e1a32 export manager modularized 2025-09-18 17:42:39 +02:00
overcuriousity
15227b392d readme file & some ux improvements 2025-09-18 00:24:35 +02:00
overcuriousity
fdc26dcf15 executive summary 2025-09-18 00:13:37 +02:00
140ef54674 Merge pull request 'data-model' (#2) from data-model into main
Reviewed-on: mstoeck3/dnsrecon#2
2025-09-17 21:56:17 +00:00
overcuriousity
aae459446c update requirements, fix some bugs 2025-09-17 23:55:41 +02:00
overcuriousity
98e1b2280b new node types 2025-09-17 22:42:08 +02:00
overcuriousity
cd14198452 smaller css 2025-09-17 22:39:26 +02:00
overcuriousity
284660ab8c new node types 2025-09-17 22:09:39 +02:00
overcuriousity
ecfb27e02a new scheduling, removed many debug prints 2025-09-17 21:47:03 +02:00
overcuriousity
39b4242200 fix cli last task started 2025-09-17 21:35:54 +02:00
overcuriousity
a56755320c initial targets managed in backend 2025-09-17 21:29:18 +02:00
overcuriousity
b985f1e5f0 potential bugfix for the right click hiding 2025-09-17 21:15:52 +02:00
overcuriousity
8ae4fdbf80 UX improvements 2025-09-17 21:12:11 +02:00
overcuriousity
d0ee415f0d enhancements 2025-09-17 19:42:14 +02:00
overcuriousity
173c3dcf92 some adjustments for clarity 2025-09-17 17:10:11 +02:00
overcuriousity
ec755b17ad remove many unnecessary debug print, improve large entity handling 2025-09-17 13:31:35 +02:00
overcuriousity
469c133f1b fix session handling 2025-09-17 11:18:06 +02:00
overcuriousity
f775c61731 iterating on fixes 2025-09-17 11:08:50 +02:00
overcuriousity
b984189e08 scheduler fixes 2025-09-17 00:31:12 +02:00
overcuriousity
f2db739fa1 attempt to fix some logic 2025-09-17 00:05:48 +02:00
overcuriousity
47ce7ff883 format keys reduction 2025-09-16 23:17:23 +02:00
overcuriousity
229746e1ec improving the display 2025-09-16 22:25:46 +02:00
overcuriousity
733e1da640 new data model refinement 2025-09-16 21:23:02 +02:00
overcuriousity
97aa18f788 implement new data api 2025-09-16 20:21:08 +02:00
overcuriousity
15421dd4a5 update caching logic 2025-09-16 15:36:29 +02:00
overcuriousity
ad4086b156 fix root computation 2025-09-16 15:25:39 +02:00
overcuriousity
0e92ec6e9a readme 2025-09-16 01:00:21 +02:00
overcuriousity
baa57bfac2 update the shodan to use only ip 2025-09-16 00:57:24 +02:00
overcuriousity
f0f80be955 finalize pop-out 2025-09-16 00:32:46 +02:00
overcuriousity
ecc143ddbb extraction feature works 2025-09-16 00:08:27 +02:00
overcuriousity
2c48316477 extract from node feature 2025-09-16 00:01:24 +02:00
overcuriousity
fc098aed28 remove cache 2025-09-15 22:48:49 +02:00
9285226cbc Merge pull request 'new-scheduler' (#1) from new-scheduler into main
Reviewed-on: mstoeck3/dnsrecon#1
2025-09-15 20:45:47 +00:00
overcuriousity
350055fcec successfully implemented scheduler 2025-09-15 22:44:38 +02:00
overcuriousity
4a5ecf7a37 new highest-priority-first scheduler 2025-09-15 22:21:17 +02:00
overcuriousity
71b2855d01 fixes to iteration context menu 2025-09-15 21:37:19 +02:00
overcuriousity
93a258170a context menu option 2025-09-15 21:27:21 +02:00
overcuriousity
e2d4e12057 also allow ip lookups in scan 2025-09-15 21:00:57 +02:00
overcuriousity
c076ee028f main page refinement 2025-09-15 20:44:45 +02:00
overcuriousity
cbfac0922a fix node modals 2025-09-15 20:27:43 +02:00
overcuriousity
881f7b74e5 fix graph delete&revert 2025-09-15 20:20:15 +02:00
overcuriousity
c347581a6c fix graph trueRoot 2025-09-15 20:11:40 +02:00
overcuriousity
30ee21f087 revert graph.js refactor 2025-09-15 18:06:11 +02:00
overcuriousity
2496ca26a5 small fixes 2025-09-15 17:52:09 +02:00
overcuriousity
8aa3c4933e fix large entity 2025-09-15 14:12:02 +02:00
overcuriousity
fc326a66c8 fix large entity 2025-09-15 13:58:30 +02:00
overcuriousity
51902e3155 it 2025-09-15 13:35:58 +02:00
overcuriousity
a261d706c8 update style 2025-09-15 01:10:28 +02:00
overcuriousity
2410e689b8 visual enhancements 2025-09-15 00:25:27 +02:00
overcuriousity
62470673fe integrate checkbox filters 2025-09-14 23:54:27 +02:00
overcuriousity
2658bd148b context menu 2025-09-14 23:42:45 +02:00
overcuriousity
f02381910d Merge branch 'main' of https://git.cc24.dev/mstoeck3/dnsrecon 2025-09-14 23:10:16 +02:00
overcuriousity
674ac59c98 iteration 2025-09-14 23:09:38 +02:00
434d1f4803 dump.rdb gelöscht 2025-09-14 20:55:33 +00:00
overcuriousity
eb9eea127b it 2025-09-14 22:37:23 +02:00
overcuriousity
ae07635ab6 update edge labels 2025-09-14 20:50:09 +02:00
overcuriousity
d7adf9ad8b it 2025-09-14 20:22:09 +02:00
overcuriousity
39ce0e9d11 great progress 2025-09-14 19:12:12 +02:00
overcuriousity
926f9e1096 fixes 2025-09-14 19:06:20 +02:00
overcuriousity
9499e62ccc it 2025-09-14 18:45:02 +02:00
overcuriousity
89ae06482e it 2025-09-14 18:02:15 +02:00
overcuriousity
7fe7ca41ba it 2025-09-14 17:40:18 +02:00
overcuriousity
949fbdbb45 itteration 2025-09-14 17:18:56 +02:00
overcuriousity
689e8c00d4 unify config 2025-09-14 16:17:26 +02:00
overcuriousity
3511f18f9a it 2025-09-14 16:07:58 +02:00
overcuriousity
72f7056bc7 it 2025-09-14 15:31:18 +02:00
overcuriousity
2ae33bc5ba it 2025-09-14 15:00:00 +02:00
overcuriousity
c91913fa13 it 2025-09-14 14:28:04 +02:00
29 changed files with 8962 additions and 5407 deletions

34
.env.example Normal file
View File

@@ -0,0 +1,34 @@
# ===============================================
# DNSRecon Environment Variables
# ===============================================
# Copy this file to .env and fill in your values.
# --- API Keys ---
# Add your Shodan API key for the Shodan provider to be enabled.
SHODAN_API_KEY=
# --- Flask & Session Settings ---
# A strong, random secret key is crucial for session security.
FLASK_SECRET_KEY=your-very-secret-and-random-key-here
FLASK_HOST=127.0.0.1
FLASK_PORT=5000
FLASK_DEBUG=True
# How long a user's session in the browser lasts (in hours).
FLASK_PERMANENT_SESSION_LIFETIME_HOURS=2
# How long inactive scanner data is stored in Redis (in minutes).
SESSION_TIMEOUT_MINUTES=60
# --- Application Core Settings ---
# The default number of levels to recurse when scanning.
DEFAULT_RECURSION_DEPTH=2
# Default timeout for provider API requests in seconds.
DEFAULT_TIMEOUT=30
# The number of concurrent provider requests to make.
MAX_CONCURRENT_REQUESTS=1
# The number of results from a provider that triggers the "large entity" grouping.
LARGE_ENTITY_THRESHOLD=100
# The number of times to retry a target if a provider fails.
MAX_RETRIES_PER_TARGET=8
# How long cached provider responses are stored (in hours).
CACHE_TIMEOUT_HOURS=12

2
.gitignore vendored
View File

@@ -169,4 +169,4 @@ cython_debug/
#.idea/
dump.rdb
.vscode
cache/

307
README.md
View File

@@ -1,36 +1,66 @@
# DNSRecon - Passive Infrastructure Reconnaissance Tool
DNSRecon is an interactive, passive reconnaissance tool designed to map adversary infrastructure. It operates on a "free-by-default" model, ensuring core functionality without subscriptions, while allowing power users to enhance its capabilities with paid API keys.
DNSRecon is an interactive, passive reconnaissance tool designed to map adversary infrastructure. It operates on a "free-by-default" model, ensuring core functionality without subscriptions, while allowing power users to enhance its capabilities with paid API keys. It is aimed at cybersecurity researchers, pentesters, and administrators who want to understand the public footprint of a target domain.
**Current Status: Phase 2 Implementation**
**Repo Link:** [https://git.cc24.dev/mstoeck3/dnsrecon](https://git.cc24.dev/mstoeck3/dnsrecon)
- ✅ Core infrastructure and graph engine
- ✅ Multi-provider support (crt.sh, DNS, Shodan)
- ✅ Session-based multi-user support
- ✅ Real-time web interface with interactive visualization
- ✅ Forensic logging system and JSON export
-----
## Concept and Philosophy
The core philosophy of DNSRecon is to provide a comprehensive and accurate map of a target's infrastructure using only **passive data sources** by default. This means that, out of the box, DNSRecon will not send any traffic to the target's servers. Instead, it queries public and historical data sources to build a picture of the target's online presence. This approach is ideal for researchers and pentesters who want to gather intelligence without alerting the target, and for administrators who want to see what information about their own infrastructure is publicly available.
For power users who require more in-depth information, DNSRecon can be configured to use API keys for services like Shodan, which provides a wealth of information about internet-connected devices. However, this is an optional feature, and the core functionality of the tool will always remain free and passive.
-----
## Features
- **Passive Reconnaissance**: Gathers data without direct contact with target infrastructure.
- **In-Memory Graph Analysis**: Uses NetworkX for efficient relationship mapping.
- **Real-Time Visualization**: The graph updates dynamically as the scan progresses.
- **Forensic Logging**: A complete audit trail of all reconnaissance activities is maintained.
- **Confidence Scoring**: Relationships are weighted based on the reliability of the data source.
- **Session Management**: Supports concurrent user sessions with isolated scanner instances.
* **Passive Reconnaissance**: Gathers data without direct contact with target infrastructure.
* **In-Memory Graph Analysis**: Uses NetworkX for efficient relationship mapping.
* **Real-Time Visualization**: The graph updates dynamically as the scan progresses.
* **Forensic Logging**: A complete audit trail of all reconnaissance activities is maintained.
* **Confidence Scoring**: Relationships are weighted based on the reliability of the data source.
* **Session Management**: Supports concurrent user sessions with isolated scanner instances.
* **Extensible Provider Architecture**: Easily add new data sources to expand the tool's capabilities.
* **Web-Based UI**: An intuitive and interactive web interface for managing scans and visualizing results.
## Installation
-----
## Technical Architecture
DNSRecon is a web-based application built with a modern technology stack:
* **Backend**: The backend is a **Flask** application that provides a REST API for the frontend and manages the scanning process.
* **Scanning Engine**: The core scanning engine is a multi-threaded Python application that uses a provider-based architecture to query different data sources.
* **Session Management**: **Redis** is used for session management, allowing for concurrent user sessions with isolated scanner instances.
* **Data Storage**: The application uses an in-memory graph to store and analyze the relationships between different pieces of information. The graph is built using the **NetworkX** library.
* **Frontend**: The frontend is a single-page application that uses JavaScript to interact with the backend API and visualize the graph.
-----
## Data Sources
DNSRecon queries the following data sources:
* **DNS**: Standard DNS lookups (A, AAAA, CNAME, MX, NS, SOA, TXT).
* **crt.sh**: A certificate transparency log that provides information about SSL/TLS certificates.
* **Shodan**: A search engine for internet-connected devices (requires an API key).
-----
## Installation and Setup
### Prerequisites
- Python 3.8 or higher
- A modern web browser with JavaScript enabled
- (Recommended) A Linux host for running the application and the optional DNS cache.
* Python 3.8 or higher
* A modern web browser with JavaScript enabled
* A Linux host for running the application
### 1\. Clone the Project
```bash
git clone https://github.com/your-repo/dnsrecon.git
git clone https://git.cc24.dev/mstoeck3/dnsrecon
cd dnsrecon
```
@@ -44,156 +74,64 @@ source venv/bin/activate
pip install -r requirements.txt
```
### 3\. (Optional but Recommended) Set up a Local DNS Caching Resolver
The `requirements.txt` file contains the following dependencies:
Running a local DNS caching resolver can significantly speed up DNS queries and reduce your network footprint. Heres how to set up `unbound` on a Debian-based Linux distribution (like Ubuntu).
* Flask
* networkx
* requests
* python-dateutil
* Werkzeug
* urllib3
* dnspython
* gunicorn
* redis
* python-dotenv
**a. Install Unbound:**
### 3\. Configure the Application
DNSRecon is configured using a `.env` file. You can copy the provided example file and edit it to suit your needs:
```bash
sudo apt update
sudo apt install unbound -y
cp .env.example .env
```
**b. Configure Unbound:**
Create a new configuration file for DNSRecon:
The following environment variables are available for configuration:
```bash
sudo nano /etc/unbound/unbound.conf.d/dnsrecon.conf
```
| Variable | Description | Default |
| :--- | :--- | :--- |
| `SHODAN_API_KEY` | Your Shodan API key. | |
| `FLASK_SECRET_KEY`| A strong, random secret key for session security. | `your-very-secret-and-random-key-here` |
| `FLASK_HOST` | The host address for the Flask application. | `127.0.0.1` |
| `FLASK_PORT` | The port for the Flask application. | `5000` |
| `FLASK_DEBUG` | Enable or disable Flask's debug mode. | `True` |
| `FLASK_PERMANENT_SESSION_LIFETIME_HOURS`| How long a user's session in the browser lasts (in hours). | `2` |
| `SESSION_TIMEOUT_MINUTES` | How long inactive scanner data is stored in Redis (in minutes). | `60` |
| `DEFAULT_RECURSION_DEPTH` | The default number of levels to recurse when scanning. | `2` |
| `DEFAULT_TIMEOUT` | Default timeout for provider API requests in seconds. | `30` |
| `MAX_CONCURRENT_REQUESTS`| The number of concurrent provider requests to make. | `5` |
| `LARGE_ENTITY_THRESHOLD`| The number of results from a provider that triggers the "large entity" grouping. | `100` |
| `MAX_RETRIES_PER_TARGET`| The number of times to retry a target if a provider fails. | `8` |
| `CACHE_EXPIRY_HOURS`| How long cached provider responses are stored (in hours). | `12` |
Add the following content to the file:
-----
```
server:
# Listen on localhost for all users
interface: 127.0.0.1
access-control: 0.0.0.0/0 refuse
access-control: 127.0.0.0/8 allow
## Running the Application
# Enable prefetching of popular items
prefetch: yes
```
**c. Restart Unbound and set it as the default resolver:**
```bash
sudo systemctl restart unbound
sudo systemctl enable unbound
```
To use this resolver for your system, you may need to update your network settings to point to `127.0.0.1` as your DNS server.
**d. Update DNSProvider to use the local resolver:**
In `dnsrecon/providers/dns_provider.py`, you can explicitly set the resolver's nameservers in the `__init__` method:
```python
# dnsrecon/providers/dns_provider.py
class DNSProvider(BaseProvider):
def __init__(self, session_config=None):
"""Initialize DNS provider with session-specific configuration."""
super().__init__(...)
# Configure DNS resolver
self.resolver = dns.resolver.Resolver()
self.resolver.nameservers = ['127.0.0.1'] # Use local caching resolver
self.resolver.timeout = 5
self.resolver.lifetime = 10
```
## Usage (Development)
### 1\. Start the Application
For development, you can run the application using the following command:
```bash
python app.py
```
### 2\. Open Your Browser
Navigate to `http://127.0.0.1:5000`.
### 3\. Basic Reconnaissance Workflow
1. **Enter Target Domain**: Input a domain like `example.com`.
2. **Select Recursion Depth**: Depth 2 is recommended for most investigations.
3. **Start Reconnaissance**: Click "Start Reconnaissance" to begin.
4. **Monitor Progress**: Watch the real-time graph build as relationships are discovered.
5. **Analyze and Export**: Interact with the graph and download the results when the scan is complete.
## Production Deployment
To deploy DNSRecon in a production environment, follow these steps:
### 1\. Use a Production WSGI Server
Do not use the built-in Flask development server for production. Use a WSGI server like **Gunicorn**:
For production, it is recommended to use a more robust server, such as Gunicorn:
```bash
pip install gunicorn
gunicorn --workers 4 --bind 0.0.0.0:5000 app:app
```
### 2\. Configure Environment Variables
-----
Set the following environment variables for a secure and configurable deployment:
```bash
# Generate a strong, random secret key
export SECRET_KEY='your-super-secret-and-random-key'
# Set Flask to production mode
export FLASK_ENV='production'
export FLASK_DEBUG=False
# API keys (optional, but recommended for full functionality)
export SHODAN_API_KEY="your_shodan_key"
```
### 3\. Use a Reverse Proxy
Set up a reverse proxy like **Nginx** to sit in front of the Gunicorn server. This provides several benefits, including:
- **TLS/SSL Termination**: Securely handle HTTPS traffic.
- **Load Balancing**: Distribute traffic across multiple application instances.
- **Serving Static Files**: Efficiently serve CSS and JavaScript files.
**Example Nginx Configuration:**
```nginx
server {
listen 80;
server_name your_domain.com;
location / {
return 301 https://$host$request_uri;
}
}
server {
listen 443 ssl;
server_name your_domain.com;
# SSL cert configuration
ssl_certificate /etc/letsencrypt/live/your_domain.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/your_domain.com/privkey.pem;
location / {
proxy_pass http://127.0.0.1:5000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
location /static {
alias /path/to/your/dnsrecon/static;
expires 30d;
}
}
```
## Autostart with systemd
## Systemd Service
To run DNSRecon as a service that starts automatically on boot, you can use `systemd`.
@@ -245,12 +183,77 @@ You can check the status of the service at any time with:
sudo systemctl status dnsrecon.service
```
## Security Considerations
-----
- **API Keys**: API keys are stored in memory for the duration of a user session and are not written to disk.
- **Rate Limiting**: DNSRecon includes built-in rate limiting to be respectful to data sources.
- **Local Use**: The application is designed for local or trusted network use and does not have built-in authentication. **Do not expose it directly to the internet without proper security controls.**
## Updating the Application
To update the application, you should first pull the latest changes from the git repository. Then, you will need to wipe the Redis database and the local cache to ensure that you are using the latest data.
### 1\. Update the Code
```bash
git pull
```
### 2\. Wipe the Redis Database
```bash
redis-cli FLUSHALL
```
### 3\. Wipe the Local Cache
```bash
rm -rf cache/*
```
### 4\. Restart the Service
```bash
sudo systemctl restart dnsrecon.service
```
-----
## Extensibility
DNSRecon is designed to be extensible, and adding new providers is a straightforward process. To add a new provider, you will need to create a new Python file in the `providers` directory that inherits from the `BaseProvider` class. The new provider will need to implement the following methods:
* `get_name()`: Return the name of the provider.
* `get_display_name()`: Return a display-friendly name for the provider.
* `requires_api_key()`: Return `True` if the provider requires an API key.
* `get_eligibility()`: Return a dictionary indicating whether the provider can query domains and/or IPs.
* `is_available()`: Return `True` if the provider is available (e.g., if an API key is configured).
* `query_domain(domain)`: Query the provider for information about a domain.
* `query_ip(ip)`: Query the provider for information about an IP address.
-----
## Unique Capabilities and Limitations
### Unique Capabilities
* **Graph-Based Analysis**: The use of a graph-based data model allows for a more intuitive and powerful analysis of the relationships between different pieces of information.
* **Real-Time Visualization**: The real-time visualization of the graph provides immediate feedback and allows for a more interactive and engaging analysis experience.
* **Session Management**: The session management feature allows multiple users to use the application concurrently without interfering with each other's work.
### Limitations
* **Passive-Only by Default**: While the passive-only approach is a key feature of the tool, it also means that the information it can gather is limited to what is publicly available.
* **No Active Scanning**: The tool does not perform any active scanning, such as port scanning or vulnerability scanning.
-----
## License
This project is licensed under the terms of the license agreement found in the `LICENSE` file.
This project is licensed under the terms of the **BSD-3-Clause** license.
Copyright (c) 2025 mstoeck3.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

844
app.py

File diff suppressed because it is too large Load Diff

191
config.py
View File

@@ -1,3 +1,5 @@
# dnsrecon-reduced/config.py
"""
Configuration management for DNSRecon tool.
Handles API key storage, rate limiting, and default settings.
@@ -5,110 +7,151 @@ Handles API key storage, rate limiting, and default settings.
import os
from typing import Dict, Optional
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
class Config:
"""Configuration manager for DNSRecon application."""
def __init__(self):
"""Initialize configuration with default values."""
self.api_keys: Dict[str, Optional[str]] = {
'shodan': None
}
self.api_keys: Dict[str, Optional[str]] = {}
# Default settings
# --- General Settings ---
self.default_recursion_depth = 2
self.default_timeout = 10
self.max_concurrent_requests = 5
self.default_timeout = 60
self.max_concurrent_requests = 1
self.large_entity_threshold = 100
self.max_retries_per_target = 8
# Rate limiting settings (requests per minute)
# --- Provider Caching Settings ---
self.cache_timeout_hours = 6 # Provider-specific cache timeout
# --- Rate Limiting (requests per minute) ---
self.rate_limits = {
'crtsh': 60, # Free service, be respectful
'shodan': 60, # API dependent
'dns': 100 # Local DNS queries
'crtsh': 5,
'shodan': 60,
'dns': 100,
'correlation': 0 # Set to 0 to make sure correlations run last
}
# Provider settings
# --- Provider Settings ---
self.enabled_providers = {
'crtsh': True, # Always enabled (free)
'dns': True, # Always enabled (free)
'shodan': False # Requires API key
'crtsh': True,
'dns': True,
'shodan': False,
'correlation': True # Enable the new provider by default
}
# Logging configuration
# --- Logging ---
self.log_level = 'INFO'
self.log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
# Flask configuration
# --- Flask & Session Settings ---
self.flask_host = '127.0.0.1'
self.flask_port = 5000
self.flask_debug = True
self.flask_secret_key = 'default-secret-key-change-me'
self.flask_permanent_session_lifetime_hours = 2
self.session_timeout_minutes = 60
def set_api_key(self, provider: str, api_key: str) -> bool:
"""
Set API key for a provider.
Args:
provider: Provider name (shodan, etc)
api_key: API key string
Returns:
bool: True if key was set successfully
"""
if provider in self.api_keys:
self.api_keys[provider] = api_key
self.enabled_providers[provider] = True if api_key else False
return True
return False
def get_api_key(self, provider: str) -> Optional[str]:
"""
Get API key for a provider.
Args:
provider: Provider name
Returns:
API key or None if not set
"""
return self.api_keys.get(provider)
def is_provider_enabled(self, provider: str) -> bool:
"""
Check if a provider is enabled.
Args:
provider: Provider name
Returns:
bool: True if provider is enabled
"""
return self.enabled_providers.get(provider, False)
def get_rate_limit(self, provider: str) -> int:
"""
Get rate limit for a provider.
Args:
provider: Provider name
Returns:
Rate limit in requests per minute
"""
return self.rate_limits.get(provider, 60)
# Load environment variables to override defaults
self.load_from_env()
def load_from_env(self):
"""Load configuration from environment variables."""
if os.getenv('SHODAN_API_KEY'):
self.set_api_key('shodan', os.getenv('SHODAN_API_KEY'))
# Override default settings from environment
self.default_recursion_depth = int(os.getenv('DEFAULT_RECURSION_DEPTH', '2'))
self.flask_debug = os.getenv('FLASK_DEBUG', 'True').lower() == 'true'
self.default_timeout = 30
self.max_concurrent_requests = 5
# Override settings from environment
self.default_recursion_depth = int(os.getenv('DEFAULT_RECURSION_DEPTH', self.default_recursion_depth))
self.default_timeout = int(os.getenv('DEFAULT_TIMEOUT', self.default_timeout))
self.max_concurrent_requests = int(os.getenv('MAX_CONCURRENT_REQUESTS', self.max_concurrent_requests))
self.large_entity_threshold = int(os.getenv('LARGE_ENTITY_THRESHOLD', self.large_entity_threshold))
self.max_retries_per_target = int(os.getenv('MAX_RETRIES_PER_TARGET', self.max_retries_per_target))
self.cache_timeout_hours = int(os.getenv('CACHE_TIMEOUT_HOURS', self.cache_timeout_hours))
# Override Flask and session settings
self.flask_host = os.getenv('FLASK_HOST', self.flask_host)
self.flask_port = int(os.getenv('FLASK_PORT', self.flask_port))
self.flask_debug = os.getenv('FLASK_DEBUG', str(self.flask_debug)).lower() == 'true'
self.flask_secret_key = os.getenv('FLASK_SECRET_KEY', self.flask_secret_key)
self.flask_permanent_session_lifetime_hours = int(os.getenv('FLASK_PERMANENT_SESSION_LIFETIME_HOURS', self.flask_permanent_session_lifetime_hours))
self.session_timeout_minutes = int(os.getenv('SESSION_TIMEOUT_MINUTES', self.session_timeout_minutes))
def set_api_key(self, provider: str, api_key: Optional[str]) -> bool:
"""Set API key for a provider."""
self.api_keys[provider] = api_key
if api_key:
self.enabled_providers[provider] = True
return True
def set_provider_enabled(self, provider: str, enabled: bool) -> bool:
"""
Set provider enabled status for the session.
Args:
provider: Provider name
enabled: Whether the provider should be enabled
Returns:
True if the setting was applied successfully
"""
provider_key = provider.lower()
self.enabled_providers[provider_key] = enabled
return True
def get_provider_enabled(self, provider: str) -> bool:
"""
Get provider enabled status.
Args:
provider: Provider name
Returns:
True if the provider is enabled
"""
provider_key = provider.lower()
return self.enabled_providers.get(provider_key, True) # Default to enabled
def bulk_set_provider_settings(self, provider_settings: dict) -> dict:
"""
Set multiple provider settings at once.
Args:
provider_settings: Dict of provider_name -> {'enabled': bool, ...}
Returns:
Dict with results for each provider
"""
results = {}
for provider_name, settings in provider_settings.items():
provider_key = provider_name.lower()
try:
if 'enabled' in settings:
self.enabled_providers[provider_key] = settings['enabled']
results[provider_key] = {'success': True, 'enabled': settings['enabled']}
else:
results[provider_key] = {'success': False, 'error': 'No enabled setting provided'}
except Exception as e:
results[provider_key] = {'success': False, 'error': str(e)}
return results
def get_api_key(self, provider: str) -> Optional[str]:
"""Get API key for a provider."""
return self.api_keys.get(provider)
def is_provider_enabled(self, provider: str) -> bool:
"""Check if a provider is enabled."""
return self.enabled_providers.get(provider, False)
def get_rate_limit(self, provider: str) -> int:
"""Get rate limit for a provider."""
return self.rate_limits.get(provider, 60)
# Global configuration instance
config = Config()

View File

@@ -8,7 +8,6 @@ from .scanner import Scanner, ScanStatus
from .logger import ForensicLogger, get_forensic_logger, new_session
from .session_manager import session_manager
from .session_config import SessionConfig, create_session_config
from .task_manager import TaskManager, TaskType, ReconTask
__all__ = [
'GraphManager',
@@ -20,10 +19,7 @@ __all__ = [
'new_session',
'session_manager',
'SessionConfig',
'create_session_config',
'TaskManager',
'TaskType',
'ReconTask'
'create_session_config'
]
__version__ = "1.0.0-phase2"

View File

@@ -1,6 +1,10 @@
# dnsrecon-reduced/core/graph_manager.py
"""
Graph data model for DNSRecon using NetworkX.
Manages in-memory graph storage with confidence scoring and forensic metadata.
Now fully compatible with the unified ProviderResult data model.
FIXED: Added proper pickle support to prevent weakref serialization errors.
"""
import re
from datetime import datetime, timezone
@@ -14,7 +18,8 @@ class NodeType(Enum):
"""Enumeration of supported node types."""
DOMAIN = "domain"
IP = "ip"
ASN = "asn"
ISP = "isp"
CA = "ca"
LARGE_ENTITY = "large_entity"
CORRELATION_OBJECT = "correlation_object"
@@ -26,6 +31,8 @@ class GraphManager:
"""
Thread-safe graph manager for DNSRecon infrastructure mapping.
Uses NetworkX for in-memory graph storage with confidence scoring.
Compatible with unified ProviderResult data model.
FIXED: Added proper pickle support to handle NetworkX graph serialization.
"""
def __init__(self):
@@ -33,118 +40,88 @@ class GraphManager:
self.graph = nx.DiGraph()
self.creation_time = datetime.now(timezone.utc).isoformat()
self.last_modified = self.creation_time
self.correlation_index = {}
# Compile regex for date filtering for efficiency
self.date_pattern = re.compile(r'^\d{4}-\d{2}-\d{2}[ T]\d{2}:\d{2}:\d{2}')
def __getstate__(self):
"""Prepare GraphManager for pickling, excluding compiled regex."""
"""Prepare GraphManager for pickling by converting NetworkX graph to serializable format."""
state = self.__dict__.copy()
# Compiled regex patterns are not always picklable
if 'date_pattern' in state:
del state['date_pattern']
# Convert NetworkX graph to a serializable format
if hasattr(self, 'graph') and self.graph:
# Extract all nodes with their data
nodes_data = {}
for node_id, attrs in self.graph.nodes(data=True):
nodes_data[node_id] = dict(attrs)
# Extract all edges with their data
edges_data = []
for source, target, attrs in self.graph.edges(data=True):
edges_data.append({
'source': source,
'target': target,
'attributes': dict(attrs)
})
# Replace the NetworkX graph with serializable data
state['_graph_nodes'] = nodes_data
state['_graph_edges'] = edges_data
del state['graph']
return state
def __setstate__(self, state):
"""Restore GraphManager state and recompile regex."""
"""Restore GraphManager after unpickling by reconstructing NetworkX graph."""
# Restore basic attributes
self.__dict__.update(state)
self.date_pattern = re.compile(r'^\d{4}-\d{2}-\d{2}[ T]\d{2}:\d{2}:\d{2}')
def _update_correlation_index(self, node_id: str, data: Any, path: List[str] = None):
"""Recursively traverse metadata and add hashable values to the index."""
if path is None:
path = []
# Reconstruct NetworkX graph from serializable data
self.graph = nx.DiGraph()
if isinstance(data, dict):
for key, value in data.items():
self._update_correlation_index(node_id, value, path + [key])
elif isinstance(data, list):
for i, item in enumerate(data):
self._update_correlation_index(node_id, item, path + [f"[{i}]"])
else:
self._add_to_correlation_index(node_id, data, ".".join(path))
# Restore nodes
if hasattr(self, '_graph_nodes'):
for node_id, attrs in self._graph_nodes.items():
self.graph.add_node(node_id, **attrs)
del self._graph_nodes
def _add_to_correlation_index(self, node_id: str, value: Any, path_str: str):
"""Add a hashable value to the correlation index, filtering out noise."""
if not isinstance(value, (str, int, float, bool)) or value is None:
return
# Restore edges
if hasattr(self, '_graph_edges'):
for edge_data in self._graph_edges:
self.graph.add_edge(
edge_data['source'],
edge_data['target'],
**edge_data['attributes']
)
del self._graph_edges
# Ignore certain paths that contain noisy, non-unique identifiers
if any(keyword in path_str.lower() for keyword in ['count', 'total', 'timestamp', 'date']):
return
# Filter out common low-entropy values and date-like strings
if isinstance(value, str):
# FIXED: Prevent correlation on date/time strings.
if self.date_pattern.match(value):
return
if len(value) < 4 or value.lower() in ['true', 'false', 'unknown', 'none', 'crt.sh']:
return
elif isinstance(value, int) and abs(value) < 9999:
return # Ignore small integers
elif isinstance(value, bool):
return # Ignore boolean values
# Add the valuable correlation data to the index
if value not in self.correlation_index:
self.correlation_index[value] = {}
if node_id not in self.correlation_index[value]:
self.correlation_index[value][node_id] = []
if path_str not in self.correlation_index[value][node_id]:
self.correlation_index[value][node_id].append(path_str)
def _check_for_correlations(self, new_node_id: str, data: Any, path: List[str] = None) -> List[Dict]:
"""Recursively traverse metadata to find correlations with existing data."""
if path is None:
path = []
all_correlations = []
if isinstance(data, dict):
for key, value in data.items():
if key == 'source': # Avoid correlating on the provider name
continue
all_correlations.extend(self._check_for_correlations(new_node_id, value, path + [key]))
elif isinstance(data, list):
for i, item in enumerate(data):
all_correlations.extend(self._check_for_correlations(new_node_id, item, path + [f"[{i}]"]))
else:
value = data
if value in self.correlation_index:
existing_nodes_with_paths = self.correlation_index[value]
unique_nodes = set(existing_nodes_with_paths.keys())
unique_nodes.add(new_node_id)
if len(unique_nodes) < 2:
return all_correlations # Correlation must involve at least two distinct nodes
new_source = {'node_id': new_node_id, 'path': ".".join(path)}
all_sources = [new_source]
for node_id, paths in existing_nodes_with_paths.items():
for p_str in paths:
all_sources.append({'node_id': node_id, 'path': p_str})
all_correlations.append({
'value': value,
'sources': all_sources,
'nodes': list(unique_nodes)
})
return all_correlations
def add_node(self, node_id: str, node_type: NodeType, attributes: Optional[Dict[str, Any]] = None,
def add_node(self, node_id: str, node_type: NodeType, attributes: Optional[List[Dict[str, Any]]] = None,
description: str = "", metadata: Optional[Dict[str, Any]] = None) -> bool:
"""Add a node to the graph, update attributes, and process correlations."""
"""
Add a node to the graph, update attributes, and process correlations.
Now compatible with unified data model - attributes are dictionaries from converted StandardAttribute objects.
"""
is_new_node = not self.graph.has_node(node_id)
if is_new_node:
self.graph.add_node(node_id, type=node_type.value,
added_timestamp=datetime.now(timezone.utc).isoformat(),
attributes=attributes or {},
attributes=attributes or [], # Store as a list from the start
description=description,
metadata=metadata or {})
else:
# Safely merge new attributes into existing attributes
# Safely merge new attributes into the existing list of attributes
if attributes:
existing_attributes = self.graph.nodes[node_id].get('attributes', {})
existing_attributes.update(attributes)
existing_attributes = self.graph.nodes[node_id].get('attributes', [])
# Handle cases where old data might still be in dictionary format
if not isinstance(existing_attributes, list):
existing_attributes = []
# Create a set of existing attribute names for efficient duplicate checking
existing_attr_names = {attr['name'] for attr in existing_attributes}
for new_attr in attributes:
if new_attr['name'] not in existing_attr_names:
existing_attributes.append(new_attr)
existing_attr_names.add(new_attr['name'])
self.graph.nodes[node_id]['attributes'] = existing_attributes
if description:
self.graph.nodes[node_id]['description'] = description
@@ -153,179 +130,22 @@ class GraphManager:
existing_metadata.update(metadata)
self.graph.nodes[node_id]['metadata'] = existing_metadata
if attributes and node_type != NodeType.CORRELATION_OBJECT:
correlations = self._check_for_correlations(node_id, attributes)
for corr in correlations:
value = corr['value']
# STEP 1: Substring check against all existing nodes
if self._correlation_value_matches_existing_node(value):
# Skip creating correlation node - would be redundant
continue
# STEP 2: Filter out node pairs that already have direct edges
eligible_nodes = self._filter_nodes_without_direct_edges(set(corr['nodes']))
if len(eligible_nodes) < 2:
# Need at least 2 nodes to create a correlation
continue
# STEP 3: Check for existing correlation node with same connection pattern
correlation_nodes_with_pattern = self._find_correlation_nodes_with_same_pattern(eligible_nodes)
if correlation_nodes_with_pattern:
# STEP 4: Merge with existing correlation node
target_correlation_node = correlation_nodes_with_pattern[0]
self._merge_correlation_values(target_correlation_node, value, corr)
else:
# STEP 5: Create new correlation node for eligible nodes only
correlation_node_id = f"corr_{abs(hash(str(sorted(eligible_nodes))))}"
self.add_node(correlation_node_id, NodeType.CORRELATION_OBJECT,
metadata={'values': [value], 'sources': corr['sources'],
'correlated_nodes': list(eligible_nodes)})
# Create edges from eligible nodes to this correlation node
for c_node_id in eligible_nodes:
if self.graph.has_node(c_node_id):
attribute = corr['sources'][0]['path'].split('.')[-1]
relationship_type = f"c_{attribute}"
self.add_edge(c_node_id, correlation_node_id, relationship_type, confidence_score=0.9)
self._update_correlation_index(node_id, attributes)
self.last_modified = datetime.now(timezone.utc).isoformat()
return is_new_node
def _filter_nodes_without_direct_edges(self, node_set: set) -> set:
"""
Filter out nodes that already have direct edges between them.
Returns set of nodes that should be included in correlation.
"""
nodes_list = list(node_set)
eligible_nodes = set(node_set) # Start with all nodes
# Check all pairs of nodes
for i in range(len(nodes_list)):
for j in range(i + 1, len(nodes_list)):
node_a = nodes_list[i]
node_b = nodes_list[j]
# Check if direct edge exists in either direction
if self._has_direct_edge_bidirectional(node_a, node_b):
# Remove both nodes from eligible set since they're already connected
eligible_nodes.discard(node_a)
eligible_nodes.discard(node_b)
return eligible_nodes
def _has_direct_edge_bidirectional(self, node_a: str, node_b: str) -> bool:
"""
Check if there's a direct edge between two nodes in either direction.
Returns True if node_a→node_b OR node_b→node_a exists.
"""
return (self.graph.has_edge(node_a, node_b) or
self.graph.has_edge(node_b, node_a))
def _correlation_value_matches_existing_node(self, correlation_value: str) -> bool:
"""
Check if correlation value contains any existing node ID as substring.
Returns True if match found (correlation node should NOT be created).
"""
correlation_str = str(correlation_value).lower()
# Check against all existing nodes
for existing_node_id in self.graph.nodes():
if existing_node_id.lower() in correlation_str:
return True
return False
def _find_correlation_nodes_with_same_pattern(self, node_set: set) -> List[str]:
"""
Find existing correlation nodes that have the exact same pattern of connected nodes.
Returns list of correlation node IDs with matching patterns.
"""
correlation_nodes = self.get_nodes_by_type(NodeType.CORRELATION_OBJECT)
matching_nodes = []
for corr_node_id in correlation_nodes:
# Get all nodes connected to this correlation node
connected_nodes = set()
# Add all predecessors (nodes pointing TO the correlation node)
connected_nodes.update(self.graph.predecessors(corr_node_id))
# Add all successors (nodes pointed TO by the correlation node)
connected_nodes.update(self.graph.successors(corr_node_id))
# Check if the pattern matches exactly
if connected_nodes == node_set:
matching_nodes.append(corr_node_id)
return matching_nodes
def _merge_correlation_values(self, target_node_id: str, new_value: Any, corr_data: Dict) -> None:
"""
Merge a new correlation value into an existing correlation node.
Uses same logic as large entity merging.
"""
if not self.graph.has_node(target_node_id):
return
target_metadata = self.graph.nodes[target_node_id]['metadata']
# Get existing values (ensure it's a list)
existing_values = target_metadata.get('values', [])
if not isinstance(existing_values, list):
existing_values = [existing_values]
# Add new value if not already present
if new_value not in existing_values:
existing_values.append(new_value)
# Merge sources
existing_sources = target_metadata.get('sources', [])
new_sources = corr_data.get('sources', [])
# Create set of unique sources based on (node_id, path) tuples
source_set = set()
for source in existing_sources + new_sources:
source_tuple = (source['node_id'], source['path'])
source_set.add(source_tuple)
# Convert back to list of dictionaries
merged_sources = [{'node_id': nid, 'path': path} for nid, path in source_set]
# Update metadata
target_metadata.update({
'values': existing_values,
'sources': merged_sources,
'correlated_nodes': list(set(target_metadata.get('correlated_nodes', []) + corr_data.get('nodes', []))),
'merge_count': len(existing_values),
'last_merge_timestamp': datetime.now(timezone.utc).isoformat()
})
# Update description to reflect merged nature
value_count = len(existing_values)
node_count = len(target_metadata['correlated_nodes'])
self.graph.nodes[target_node_id]['description'] = (
f"Correlation container with {value_count} merged values "
f"across {node_count} nodes"
)
def add_edge(self, source_id: str, target_id: str, relationship_type: str,
confidence_score: float = 0.5, source_provider: str = "unknown",
raw_data: Optional[Dict[str, Any]] = None) -> bool:
"""Add or update an edge between two nodes, ensuring nodes exist."""
"""
UPDATED: Add or update an edge between two nodes with raw relationship labels.
"""
if not self.graph.has_node(source_id) or not self.graph.has_node(target_id):
return False
new_confidence = confidence_score
if relationship_type.startswith("c_"):
# UPDATED: Use raw relationship type - no formatting
edge_label = relationship_type
else:
edge_label = f"{source_provider}_{relationship_type}"
if self.graph.has_edge(source_id, target_id):
# If edge exists, update confidence if the new score is higher.
@@ -335,7 +155,7 @@ class GraphManager:
self.graph.edges[source_id, target_id]['updated_by'] = source_provider
return False
# Add a new edge with all attributes.
# Add a new edge with raw attributes
self.graph.add_edge(source_id, target_id,
relationship_type=edge_label,
confidence_score=new_confidence,
@@ -345,6 +165,17 @@ class GraphManager:
self.last_modified = datetime.now(timezone.utc).isoformat()
return True
def remove_node(self, node_id: str) -> bool:
"""Remove a node and its connected edges from the graph."""
if not self.graph.has_node(node_id):
return False
# Remove node from the graph (NetworkX handles removing connected edges)
self.graph.remove_node(node_id)
self.last_modified = datetime.now(timezone.utc).isoformat()
return True
def get_node_count(self) -> int:
"""Get total number of nodes in the graph."""
return self.graph.number_of_nodes()
@@ -357,97 +188,116 @@ class GraphManager:
"""Get all nodes of a specific type."""
return [n for n, d in self.graph.nodes(data=True) if d.get('type') == node_type.value]
def get_neighbors(self, node_id: str) -> List[str]:
"""Get all unique neighbors (predecessors and successors) for a node."""
if not self.graph.has_node(node_id):
return []
return list(set(self.graph.predecessors(node_id)) | set(self.graph.successors(node_id)))
def get_high_confidence_edges(self, min_confidence: float = 0.8) -> List[Tuple[str, str, Dict]]:
"""Get edges with confidence score above a given threshold."""
return [(u, v, d) for u, v, d in self.graph.edges(data=True)
if d.get('confidence_score', 0) >= min_confidence]
def get_graph_data(self) -> Dict[str, Any]:
"""Export graph data formatted for frontend visualization."""
"""
Export graph data formatted for frontend visualization.
SIMPLIFIED: No certificate styling - frontend handles all visual styling.
"""
nodes = []
for node_id, attrs in self.graph.nodes(data=True):
node_data = {'id': node_id, 'label': node_id, 'type': attrs.get('type', 'unknown'),
'attributes': attrs.get('attributes', {}),
node_data = {
'id': node_id,
'label': node_id,
'type': attrs.get('type', 'unknown'),
'attributes': attrs.get('attributes', []), # Raw attributes list
'description': attrs.get('description', ''),
'metadata': attrs.get('metadata', {}),
'added_timestamp': attrs.get('added_timestamp')}
# Customize node appearance based on type and attributes
node_type = node_data['type']
attributes = node_data['attributes']
if node_type == 'domain' and attributes.get('certificates', {}).get('has_valid_cert') is False:
node_data['color'] = {'background': '#c7c7c7', 'border': '#999'} # Gray for invalid cert
'added_timestamp': attrs.get('added_timestamp'),
'max_depth_reached': attrs.get('metadata', {}).get('max_depth_reached', False)
}
# Add incoming and outgoing edges to node data
if self.graph.has_node(node_id):
node_data['incoming_edges'] = [{'from': u, 'data': d} for u, _, d in self.graph.in_edges(node_id, data=True)]
node_data['outgoing_edges'] = [{'to': v, 'data': d} for _, v, d in self.graph.out_edges(node_id, data=True)]
node_data['incoming_edges'] = [
{'from': u, 'data': d} for u, _, d in self.graph.in_edges(node_id, data=True)
]
node_data['outgoing_edges'] = [
{'to': v, 'data': d} for _, v, d in self.graph.out_edges(node_id, data=True)
]
nodes.append(node_data)
edges = []
for source, target, attrs in self.graph.edges(data=True):
edges.append({'from': source, 'to': target,
edges.append({
'from': source,
'to': target,
'label': attrs.get('relationship_type', ''),
'confidence_score': attrs.get('confidence_score', 0),
'source_provider': attrs.get('source_provider', ''),
'discovery_timestamp': attrs.get('discovery_timestamp')})
'discovery_timestamp': attrs.get('discovery_timestamp')
})
return {
'nodes': nodes, 'edges': edges,
'nodes': nodes,
'edges': edges,
'statistics': self.get_statistics()['basic_metrics']
}
def export_json(self) -> Dict[str, Any]:
"""Export complete graph data as a JSON-serializable dictionary."""
graph_data = nx.node_link_data(self.graph) # Use NetworkX's built-in robust serializer
return {
'export_metadata': {
'export_timestamp': datetime.now(timezone.utc).isoformat(),
'graph_creation_time': self.creation_time,
'last_modified': self.last_modified,
'total_nodes': self.get_node_count(),
'total_edges': self.get_edge_count(),
'graph_format': 'dnsrecon_v1_nodeling'
},
'graph': graph_data,
'statistics': self.get_statistics()
}
def _get_confidence_distribution(self) -> Dict[str, int]:
"""Get distribution of edge confidence scores."""
"""Get distribution of edge confidence scores with empty graph handling."""
distribution = {'high': 0, 'medium': 0, 'low': 0}
for _, _, confidence in self.graph.edges(data='confidence_score', default=0):
if confidence >= 0.8: distribution['high'] += 1
elif confidence >= 0.6: distribution['medium'] += 1
else: distribution['low'] += 1
# FIXED: Handle empty graph case
if self.get_edge_count() == 0:
return distribution
for _, _, data in self.graph.edges(data=True):
confidence = data.get('confidence_score', 0)
if confidence >= 0.8:
distribution['high'] += 1
elif confidence >= 0.6:
distribution['medium'] += 1
else:
distribution['low'] += 1
return distribution
def get_statistics(self) -> Dict[str, Any]:
"""Get comprehensive statistics about the graph."""
stats = {'basic_metrics': {'total_nodes': self.get_node_count(),
'total_edges': self.get_edge_count(),
"""Get comprehensive statistics about the graph with proper empty graph handling."""
# FIXED: Handle empty graph case properly
node_count = self.get_node_count()
edge_count = self.get_edge_count()
stats = {
'basic_metrics': {
'total_nodes': node_count,
'total_edges': edge_count,
'creation_time': self.creation_time,
'last_modified': self.last_modified},
'node_type_distribution': {}, 'relationship_type_distribution': {},
'last_modified': self.last_modified
},
'node_type_distribution': {},
'relationship_type_distribution': {},
'confidence_distribution': self._get_confidence_distribution(),
'provider_distribution': {}}
# Calculate distributions
'provider_distribution': {}
}
# FIXED: Only calculate distributions if we have data
if node_count > 0:
# Calculate node type distributions
for node_type in NodeType:
stats['node_type_distribution'][node_type.value] = self.get_nodes_by_type(node_type).__len__()
for _, _, rel_type in self.graph.edges(data='relationship_type', default='unknown'):
count = len(self.get_nodes_by_type(node_type))
if count > 0: # Only include types that exist
stats['node_type_distribution'][node_type.value] = count
if edge_count > 0:
# Calculate edge distributions
for _, _, data in self.graph.edges(data=True):
rel_type = data.get('relationship_type', 'unknown')
stats['relationship_type_distribution'][rel_type] = stats['relationship_type_distribution'].get(rel_type, 0) + 1
for _, _, provider in self.graph.edges(data='source_provider', default='unknown'):
provider = data.get('source_provider', 'unknown')
stats['provider_distribution'][provider] = stats['provider_distribution'].get(provider, 0) + 1
return stats
def clear(self) -> None:
"""Clear all nodes, edges, and indices from the graph."""
"""Clear all nodes and edges from the graph."""
self.graph.clear()
self.correlation_index.clear()
self.creation_time = datetime.now(timezone.utc).isoformat()
self.last_modified = self.creation_time

View File

@@ -40,9 +40,10 @@ class ForensicLogger:
"""
Thread-safe forensic logging system for DNSRecon.
Maintains detailed audit trail of all reconnaissance activities.
FIXED: Enhanced pickle support to prevent weakref issues in logging handlers.
"""
def __init__(self, session_id: str = None):
def __init__(self, session_id: str = ""):
"""
Initialize forensic logger.
@@ -50,7 +51,7 @@ class ForensicLogger:
session_id: Unique identifier for this reconnaissance session
"""
self.session_id = session_id or self._generate_session_id()
#self.lock = threading.Lock()
self.lock = threading.Lock()
# Initialize audit trail storage
self.api_requests: List[APIRequest] = []
@@ -65,43 +66,75 @@ class ForensicLogger:
'target_domains': set()
}
# Configure standard logger
# Configure standard logger with simple setup to avoid weakrefs
self.logger = logging.getLogger(f'dnsrecon.{self.session_id}')
self.logger.setLevel(logging.INFO)
# Create formatter for structured logging
# Create minimal formatter
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
# Add console handler if not already present
# Add console handler only if not already present (avoid duplicate handlers)
if not self.logger.handlers:
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
self.logger.addHandler(console_handler)
def __getstate__(self):
"""Prepare ForensicLogger for pickling by excluding unpicklable objects."""
"""
FIXED: Prepare ForensicLogger for pickling by excluding problematic objects.
"""
state = self.__dict__.copy()
# Remove the unpickleable 'logger' attribute
if 'logger' in state:
del state['logger']
# Remove potentially unpickleable attributes that may contain weakrefs
unpicklable_attrs = ['logger', 'lock']
for attr in unpicklable_attrs:
if attr in state:
del state[attr]
# Convert sets to lists for JSON serialization compatibility
if 'session_metadata' in state:
metadata = state['session_metadata'].copy()
if 'providers_used' in metadata and isinstance(metadata['providers_used'], set):
metadata['providers_used'] = list(metadata['providers_used'])
if 'target_domains' in metadata and isinstance(metadata['target_domains'], set):
metadata['target_domains'] = list(metadata['target_domains'])
state['session_metadata'] = metadata
return state
def __setstate__(self, state):
"""Restore ForensicLogger after unpickling by reconstructing logger."""
"""
FIXED: Restore ForensicLogger after unpickling by reconstructing components.
"""
self.__dict__.update(state)
# Re-initialize the 'logger' attribute
# Re-initialize threading lock
self.lock = threading.Lock()
# Re-initialize logger with minimal setup
self.logger = logging.getLogger(f'dnsrecon.{self.session_id}')
self.logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
# Only add handler if not already present
if not self.logger.handlers:
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
self.logger.addHandler(console_handler)
# Convert lists back to sets if needed
if 'session_metadata' in self.__dict__:
metadata = self.session_metadata
if 'providers_used' in metadata and isinstance(metadata['providers_used'], list):
metadata['providers_used'] = set(metadata['providers_used'])
if 'target_domains' in metadata and isinstance(metadata['target_domains'], list):
metadata['target_domains'] = set(metadata['target_domains'])
def _generate_session_id(self) -> str:
"""Generate unique session identifier."""
return f"dnsrecon_{datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')}"
@@ -140,6 +173,7 @@ class ForensicLogger:
discovery_context=discovery_context
)
with self.lock:
self.api_requests.append(api_request)
self.session_metadata['total_requests'] += 1
self.session_metadata['providers_used'].add(provider)
@@ -147,11 +181,15 @@ class ForensicLogger:
if target_indicator:
self.session_metadata['target_domains'].add(target_indicator)
# Log to standard logger
# Log to standard logger with error handling
try:
if error:
self.logger.error(f"API Request Failed - {provider}: {url} - {error}")
self.logger.error(f"API Request Failed - {provider}: {url}")
else:
self.logger.info(f"API Request - {provider}: {url} - Status: {status_code}")
except Exception:
# If logging fails, continue without breaking the application
pass
def log_relationship_discovery(self, source_node: str, target_node: str,
relationship_type: str, confidence_score: float,
@@ -180,31 +218,44 @@ class ForensicLogger:
discovery_method=discovery_method
)
with self.lock:
self.relationships.append(relationship)
self.session_metadata['total_relationships'] += 1
# Log to standard logger with error handling
try:
self.logger.info(
f"Relationship Discovered - {source_node} -> {target_node} "
f"({relationship_type}) - Confidence: {confidence_score:.2f} - Provider: {provider}"
)
except Exception:
# If logging fails, continue without breaking the application
pass
def log_scan_start(self, target_domain: str, recursion_depth: int,
enabled_providers: List[str]) -> None:
"""Log the start of a reconnaissance scan."""
try:
self.logger.info(f"Scan Started - Target: {target_domain}, Depth: {recursion_depth}")
self.logger.info(f"Enabled Providers: {', '.join(enabled_providers)}")
with self.lock:
self.session_metadata['target_domains'].add(target_domain)
except Exception:
pass
def log_scan_complete(self) -> None:
"""Log the completion of a reconnaissance scan."""
with self.lock:
self.session_metadata['end_time'] = datetime.now(timezone.utc).isoformat()
# Convert sets to lists for serialization
self.session_metadata['providers_used'] = list(self.session_metadata['providers_used'])
self.session_metadata['target_domains'] = list(self.session_metadata['target_domains'])
try:
self.logger.info(f"Scan Complete - Session: {self.session_id}")
self.logger.info(f"Total API Requests: {self.session_metadata['total_requests']}")
self.logger.info(f"Total Relationships: {self.session_metadata['total_relationships']}")
except Exception:
pass
def export_audit_trail(self) -> Dict[str, Any]:
"""
@@ -213,6 +264,7 @@ class ForensicLogger:
Returns:
Dictionary containing complete session audit trail
"""
with self.lock:
return {
'session_metadata': self.session_metadata.copy(),
'api_requests': [asdict(req) for req in self.api_requests],
@@ -228,7 +280,13 @@ class ForensicLogger:
Dictionary containing summary statistics
"""
provider_stats = {}
for provider in self.session_metadata['providers_used']:
# Ensure providers_used is a set for iteration
providers_used = self.session_metadata['providers_used']
if isinstance(providers_used, list):
providers_used = set(providers_used)
for provider in providers_used:
provider_requests = [req for req in self.api_requests if req.provider == provider]
provider_relationships = [rel for rel in self.relationships if rel.provider == provider]

107
core/provider_result.py Normal file
View File

@@ -0,0 +1,107 @@
# dnsrecon-reduced/core/provider_result.py
"""
Unified data model for DNSRecon passive reconnaissance.
Standardizes the data structure across all providers to ensure consistent processing.
"""
from typing import Any, Optional, List, Dict
from dataclasses import dataclass, field
from datetime import datetime, timezone
@dataclass
class StandardAttribute:
"""A unified data structure for a single piece of information about a node."""
target_node: str
name: str
value: Any
type: str
provider: str
confidence: float
timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
metadata: Optional[Dict[str, Any]] = field(default_factory=dict)
def __post_init__(self):
"""Validate the attribute after initialization."""
if not isinstance(self.confidence, (int, float)) or not 0.0 <= self.confidence <= 1.0:
raise ValueError(f"Confidence must be between 0.0 and 1.0, got {self.confidence}")
@dataclass
class Relationship:
"""A unified data structure for a directional link between two nodes."""
source_node: str
target_node: str
relationship_type: str
confidence: float
provider: str
timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
raw_data: Optional[Dict[str, Any]] = field(default_factory=dict)
def __post_init__(self):
"""Validate the relationship after initialization."""
if not isinstance(self.confidence, (int, float)) or not 0.0 <= self.confidence <= 1.0:
raise ValueError(f"Confidence must be between 0.0 and 1.0, got {self.confidence}")
@dataclass
class ProviderResult:
"""A container for all data returned by a provider from a single query."""
attributes: List[StandardAttribute] = field(default_factory=list)
relationships: List[Relationship] = field(default_factory=list)
def add_attribute(self, target_node: str, name: str, value: Any, attr_type: str,
provider: str, confidence: float = 0.8,
metadata: Optional[Dict[str, Any]] = None) -> None:
"""Helper method to add an attribute to the result."""
self.attributes.append(StandardAttribute(
target_node=target_node,
name=name,
value=value,
type=attr_type,
provider=provider,
confidence=confidence,
metadata=metadata or {}
))
def add_relationship(self, source_node: str, target_node: str, relationship_type: str,
provider: str, confidence: float = 0.8,
raw_data: Optional[Dict[str, Any]] = None) -> None:
"""Helper method to add a relationship to the result."""
self.relationships.append(Relationship(
source_node=source_node,
target_node=target_node,
relationship_type=relationship_type,
confidence=confidence,
provider=provider,
raw_data=raw_data or {}
))
def get_discovered_nodes(self) -> set:
"""Get all unique node identifiers discovered in this result."""
nodes = set()
# Add nodes from relationships
for rel in self.relationships:
nodes.add(rel.source_node)
nodes.add(rel.target_node)
# Add nodes from attributes
for attr in self.attributes:
nodes.add(attr.target_node)
return nodes
def get_relationship_count(self) -> int:
"""Get the total number of relationships in this result."""
return len(self.relationships)
def get_attribute_count(self) -> int:
"""Get the total number of attributes in this result."""
return len(self.attributes)
##TODO
#def is_large_entity(self, threshold: int) -> bool:
# """Check if this result qualifies as a large entity based on relationship count."""
# return self.get_relationship_count() > threshold

28
core/rate_limiter.py Normal file
View File

@@ -0,0 +1,28 @@
# dnsrecon-reduced/core/rate_limiter.py
import time
class GlobalRateLimiter:
def __init__(self, redis_client):
self.redis = redis_client
def is_rate_limited(self, key, limit, period):
"""
Check if a key is rate-limited.
"""
now = time.time()
key = f"rate_limit:{key}"
# Remove old timestamps
self.redis.zremrangebyscore(key, 0, now - period)
# Check the count
count = self.redis.zcard(key)
if count >= limit:
return True
# Add new timestamp
self.redis.zadd(key, {now: now})
self.redis.expire(key, period)
return False

File diff suppressed because it is too large Load Diff

View File

@@ -1,372 +1,20 @@
"""
Enhanced per-session configuration management for DNSRecon.
Provides isolated configuration instances for each user session while supporting global caching.
Per-session configuration management for DNSRecon.
Provides isolated configuration instances for each user session.
"""
import os
from typing import Dict, Optional
from config import Config
class SessionConfig:
class SessionConfig(Config):
"""
Enhanced session-specific configuration that inherits from global config
but maintains isolated API keys and provider settings while supporting global caching.
Session-specific configuration that inherits from global config
but maintains isolated API keys and provider settings.
"""
def __init__(self):
"""Initialize enhanced session config with global cache support."""
# Copy all attributes from global config
self.api_keys: Dict[str, Optional[str]] = {
'shodan': None
}
"""Initialize session config with global defaults."""
super().__init__()
# Default settings (copied from global config)
self.default_recursion_depth = 2
self.default_timeout = 30
self.max_concurrent_requests = 5
self.large_entity_threshold = 100
# Enhanced rate limiting settings (per session)
self.rate_limits = {
'crtsh': 60,
'shodan': 60,
'dns': 100
}
# Enhanced provider settings (per session)
self.enabled_providers = {
'crtsh': True,
'dns': True,
'shodan': False
}
# Task-based execution settings
self.task_retry_settings = {
'max_retries': 3,
'base_backoff_seconds': 1.0,
'max_backoff_seconds': 60.0,
'retry_on_rate_limit': True,
'retry_on_connection_error': True,
'retry_on_timeout': True
}
# Cache settings (global across all sessions)
self.cache_settings = {
'enabled': True,
'expiry_hours': 12,
'cache_base_dir': '.cache',
'per_provider_directories': True,
'thread_safe_operations': True
}
# Logging configuration
self.log_level = 'INFO'
self.log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
# Flask configuration (shared)
self.flask_host = '127.0.0.1'
self.flask_port = 5000
self.flask_debug = True
# Session isolation settings
self.session_isolation = {
'enforce_single_session_per_user': True,
'consolidate_session_data_on_replacement': True,
'user_fingerprinting_enabled': True,
'session_timeout_minutes': 60
}
# Circuit breaker settings for provider reliability
self.circuit_breaker = {
'enabled': True,
'failure_threshold': 5, # Failures before opening circuit
'recovery_timeout_seconds': 300, # 5 minutes before trying again
'half_open_max_calls': 3 # Test calls when recovering
}
def set_api_key(self, provider: str, api_key: str) -> bool:
"""
Set API key for a provider in this session.
Args:
provider: Provider name (shodan, etc)
api_key: API key string (empty string to clear)
Returns:
bool: True if key was set successfully
"""
if provider in self.api_keys:
# Handle clearing of API keys
if api_key and api_key.strip():
self.api_keys[provider] = api_key.strip()
self.enabled_providers[provider] = True
else:
self.api_keys[provider] = None
self.enabled_providers[provider] = False
return True
return False
def get_api_key(self, provider: str) -> Optional[str]:
"""
Get API key for a provider in this session.
Args:
provider: Provider name
Returns:
API key or None if not set
"""
return self.api_keys.get(provider)
def is_provider_enabled(self, provider: str) -> bool:
"""
Check if a provider is enabled in this session.
Args:
provider: Provider name
Returns:
bool: True if provider is enabled
"""
return self.enabled_providers.get(provider, False)
def get_rate_limit(self, provider: str) -> int:
"""
Get rate limit for a provider in this session.
Args:
provider: Provider name
Returns:
Rate limit in requests per minute
"""
return self.rate_limits.get(provider, 60)
def get_task_retry_config(self) -> Dict[str, any]:
"""
Get task retry configuration for this session.
Returns:
Dictionary with retry settings
"""
return self.task_retry_settings.copy()
def get_cache_config(self) -> Dict[str, any]:
"""
Get cache configuration (global settings).
Returns:
Dictionary with cache settings
"""
return self.cache_settings.copy()
def is_circuit_breaker_enabled(self) -> bool:
"""Check if circuit breaker is enabled for provider reliability."""
return self.circuit_breaker.get('enabled', True)
def get_circuit_breaker_config(self) -> Dict[str, any]:
"""Get circuit breaker configuration."""
return self.circuit_breaker.copy()
def update_provider_settings(self, provider_updates: Dict[str, Dict[str, any]]) -> bool:
"""
Update provider-specific settings in bulk.
Args:
provider_updates: Dictionary of provider -> settings updates
Returns:
bool: True if updates were applied successfully
"""
try:
for provider_name, updates in provider_updates.items():
# Update rate limits
if 'rate_limit' in updates:
self.rate_limits[provider_name] = updates['rate_limit']
# Update enabled status
if 'enabled' in updates:
self.enabled_providers[provider_name] = updates['enabled']
# Update API key
if 'api_key' in updates:
self.set_api_key(provider_name, updates['api_key'])
return True
except Exception as e:
print(f"Error updating provider settings: {e}")
return False
def validate_configuration(self) -> Dict[str, any]:
"""
Validate the current configuration and return validation results.
Returns:
Dictionary with validation results and any issues found
"""
validation_result = {
'valid': True,
'warnings': [],
'errors': [],
'provider_status': {}
}
# Validate provider configurations
for provider_name, enabled in self.enabled_providers.items():
provider_status = {
'enabled': enabled,
'has_api_key': bool(self.api_keys.get(provider_name)),
'rate_limit': self.rate_limits.get(provider_name, 60)
}
# Check for potential issues
if enabled and provider_name in ['shodan'] and not provider_status['has_api_key']:
validation_result['warnings'].append(
f"Provider '{provider_name}' is enabled but missing API key"
)
validation_result['provider_status'][provider_name] = provider_status
# Validate task settings
if self.task_retry_settings['max_retries'] > 10:
validation_result['warnings'].append(
f"High retry count ({self.task_retry_settings['max_retries']}) may cause long delays"
)
# Validate concurrent settings
if self.max_concurrent_requests > 10:
validation_result['warnings'].append(
f"High concurrency ({self.max_concurrent_requests}) may overwhelm providers"
)
# Validate cache settings
if not os.path.exists(self.cache_settings['cache_base_dir']):
try:
os.makedirs(self.cache_settings['cache_base_dir'], exist_ok=True)
except Exception as e:
validation_result['errors'].append(f"Cannot create cache directory: {e}")
validation_result['valid'] = False
return validation_result
def load_from_env(self):
"""Load configuration from environment variables with enhanced validation."""
# Load API keys from environment
if os.getenv('SHODAN_API_KEY') and not self.api_keys['shodan']:
self.set_api_key('shodan', os.getenv('SHODAN_API_KEY'))
print("Loaded Shodan API key from environment")
# Override default settings from environment
self.default_recursion_depth = int(os.getenv('DEFAULT_RECURSION_DEPTH', '2'))
self.default_timeout = int(os.getenv('DEFAULT_TIMEOUT', '30'))
self.max_concurrent_requests = int(os.getenv('MAX_CONCURRENT_REQUESTS', '5'))
# Load task retry settings from environment
if os.getenv('TASK_MAX_RETRIES'):
self.task_retry_settings['max_retries'] = int(os.getenv('TASK_MAX_RETRIES'))
if os.getenv('TASK_BASE_BACKOFF'):
self.task_retry_settings['base_backoff_seconds'] = float(os.getenv('TASK_BASE_BACKOFF'))
# Load cache settings from environment
if os.getenv('CACHE_EXPIRY_HOURS'):
self.cache_settings['expiry_hours'] = int(os.getenv('CACHE_EXPIRY_HOURS'))
if os.getenv('CACHE_DISABLED'):
self.cache_settings['enabled'] = os.getenv('CACHE_DISABLED').lower() != 'true'
# Load circuit breaker settings
if os.getenv('CIRCUIT_BREAKER_DISABLED'):
self.circuit_breaker['enabled'] = os.getenv('CIRCUIT_BREAKER_DISABLED').lower() != 'true'
# Flask settings
self.flask_debug = os.getenv('FLASK_DEBUG', 'True').lower() == 'true'
print("Enhanced configuration loaded from environment")
def export_config_summary(self) -> Dict[str, any]:
"""
Export a summary of the current configuration for debugging/logging.
Returns:
Dictionary with configuration summary (API keys redacted)
"""
return {
'providers': {
provider: {
'enabled': self.enabled_providers.get(provider, False),
'has_api_key': bool(self.api_keys.get(provider)),
'rate_limit': self.rate_limits.get(provider, 60)
}
for provider in self.enabled_providers.keys()
},
'task_settings': {
'max_retries': self.task_retry_settings['max_retries'],
'max_concurrent_requests': self.max_concurrent_requests,
'large_entity_threshold': self.large_entity_threshold
},
'cache_settings': {
'enabled': self.cache_settings['enabled'],
'expiry_hours': self.cache_settings['expiry_hours'],
'base_directory': self.cache_settings['cache_base_dir']
},
'session_settings': {
'isolation_enabled': self.session_isolation['enforce_single_session_per_user'],
'consolidation_enabled': self.session_isolation['consolidate_session_data_on_replacement'],
'timeout_minutes': self.session_isolation['session_timeout_minutes']
},
'circuit_breaker': {
'enabled': self.circuit_breaker['enabled'],
'failure_threshold': self.circuit_breaker['failure_threshold'],
'recovery_timeout': self.circuit_breaker['recovery_timeout_seconds']
}
}
def create_session_config() -> SessionConfig:
"""
Create a new enhanced session configuration instance.
Returns:
Configured SessionConfig instance
"""
session_config = SessionConfig()
session_config.load_from_env()
# Validate configuration and log any issues
validation = session_config.validate_configuration()
if validation['warnings']:
print("Configuration warnings:")
for warning in validation['warnings']:
print(f" WARNING: {warning}")
if validation['errors']:
print("Configuration errors:")
for error in validation['errors']:
print(f" ERROR: {error}")
if not validation['valid']:
raise ValueError("Configuration validation failed - see errors above")
print(f"Enhanced session configuration created successfully")
return session_config
def create_test_config() -> SessionConfig:
"""
Create a test configuration with safe defaults for testing.
Returns:
Test-safe SessionConfig instance
"""
test_config = SessionConfig()
# Override settings for testing
test_config.max_concurrent_requests = 2
test_config.task_retry_settings['max_retries'] = 1
test_config.task_retry_settings['base_backoff_seconds'] = 0.1
test_config.cache_settings['expiry_hours'] = 1
test_config.session_isolation['session_timeout_minutes'] = 10
print("Test configuration created")
return test_config
def create_session_config() -> 'SessionConfig':
"""Create a new session configuration instance."""
return SessionConfig()

View File

@@ -5,154 +5,46 @@ import time
import uuid
import redis
import pickle
import hashlib
from typing import Dict, Optional, Any, List, Tuple
from typing import Dict, Optional, Any
import copy
from core.scanner import Scanner
class UserIdentifier:
"""Handles user identification for session management."""
@staticmethod
def generate_user_fingerprint(client_ip: str, user_agent: str) -> str:
"""
Generate a unique fingerprint for a user based on IP and User-Agent.
Args:
client_ip: Client IP address
user_agent: User-Agent header value
Returns:
Unique user fingerprint hash
"""
# Create deterministic user identifier
user_data = f"{client_ip}:{user_agent[:100]}" # Limit UA to 100 chars
fingerprint = hashlib.sha256(user_data.encode()).hexdigest()[:16] # 16 char fingerprint
return f"user_{fingerprint}"
@staticmethod
def extract_request_info(request) -> Tuple[str, str]:
"""
Extract client IP and User-Agent from Flask request.
Args:
request: Flask request object
Returns:
Tuple of (client_ip, user_agent)
"""
# Handle proxy headers for real IP
client_ip = request.headers.get('X-Forwarded-For', '').split(',')[0].strip()
if not client_ip:
client_ip = request.headers.get('X-Real-IP', '')
if not client_ip:
client_ip = request.remote_addr or 'unknown'
user_agent = request.headers.get('User-Agent', 'unknown')
return client_ip, user_agent
class SessionConsolidator:
"""Handles consolidation of session data when replacing sessions."""
@staticmethod
def consolidate_scanner_data(old_scanner: 'Scanner', new_scanner: 'Scanner') -> 'Scanner':
"""
Consolidate useful data from old scanner into new scanner.
Args:
old_scanner: Scanner from terminated session
new_scanner: New scanner instance
Returns:
Enhanced new scanner with consolidated data
"""
try:
# Consolidate graph data if old scanner has valuable data
if old_scanner and hasattr(old_scanner, 'graph') and old_scanner.graph:
old_stats = old_scanner.graph.get_statistics()
if old_stats['basic_metrics']['total_nodes'] > 0:
print(f"Consolidating graph data: {old_stats['basic_metrics']['total_nodes']} nodes, {old_stats['basic_metrics']['total_edges']} edges")
# Transfer nodes and edges to new scanner's graph
for node_id, node_data in old_scanner.graph.graph.nodes(data=True):
# Add node to new graph with all attributes
new_scanner.graph.graph.add_node(node_id, **node_data)
for source, target, edge_data in old_scanner.graph.graph.edges(data=True):
# Add edge to new graph with all attributes
new_scanner.graph.graph.add_edge(source, target, **edge_data)
# Update correlation index
if hasattr(old_scanner.graph, 'correlation_index'):
new_scanner.graph.correlation_index = old_scanner.graph.correlation_index.copy()
# Update timestamps
new_scanner.graph.creation_time = old_scanner.graph.creation_time
new_scanner.graph.last_modified = old_scanner.graph.last_modified
# Consolidate provider statistics
if old_scanner and hasattr(old_scanner, 'providers') and old_scanner.providers:
for old_provider in old_scanner.providers:
# Find matching provider in new scanner
matching_new_provider = None
for new_provider in new_scanner.providers:
if new_provider.get_name() == old_provider.get_name():
matching_new_provider = new_provider
break
if matching_new_provider:
# Transfer cumulative statistics
matching_new_provider.total_requests += old_provider.total_requests
matching_new_provider.successful_requests += old_provider.successful_requests
matching_new_provider.failed_requests += old_provider.failed_requests
matching_new_provider.total_relationships_found += old_provider.total_relationships_found
# Transfer cache statistics if available
if hasattr(old_provider, 'cache_hits'):
matching_new_provider.cache_hits += getattr(old_provider, 'cache_hits', 0)
matching_new_provider.cache_misses += getattr(old_provider, 'cache_misses', 0)
print(f"Consolidated {old_provider.get_name()} provider stats: {old_provider.total_requests} requests")
return new_scanner
except Exception as e:
print(f"Warning: Error during session consolidation: {e}")
return new_scanner
from config import config
class SessionManager:
"""
Manages single scanner session per user using Redis with user identification.
Enforces one active session per user for consistent state management.
FIXED: Manages multiple scanner instances for concurrent user sessions using Redis.
Enhanced to properly maintain WebSocket connections throughout scan lifecycle.
"""
def __init__(self, session_timeout_minutes: int = 60):
def __init__(self, session_timeout_minutes: int = 0):
"""
Initialize session manager with Redis backend and user tracking.
Initialize session manager with a Redis backend.
"""
if session_timeout_minutes is None:
session_timeout_minutes = config.session_timeout_minutes
self.redis_client = redis.StrictRedis(db=0, decode_responses=False)
self.session_timeout = session_timeout_minutes * 60 # Convert to seconds
self.lock = threading.Lock()
# User identification helper
self.user_identifier = UserIdentifier()
self.consolidator = SessionConsolidator()
# FIXED: Add a creation lock to prevent race conditions
self.creation_lock = threading.Lock()
# Track active socketio connections per session
self.active_socketio_connections = {}
# Start cleanup thread
self.cleanup_thread = threading.Thread(target=self._cleanup_loop, daemon=True)
self.cleanup_thread.start()
print(f"SessionManager initialized with Redis backend, user tracking, and {session_timeout_minutes}min timeout")
print(f"SessionManager initialized with Redis backend and {session_timeout_minutes}min timeout")
def __getstate__(self):
"""Prepare SessionManager for pickling."""
state = self.__dict__.copy()
# Exclude unpickleable attributes
unpicklable_attrs = ['lock', 'cleanup_thread', 'redis_client']
# Exclude unpickleable attributes - Redis client and threading objects
unpicklable_attrs = ['lock', 'cleanup_thread', 'redis_client', 'creation_lock', 'active_socketio_connections']
for attr in unpicklable_attrs:
if attr in state:
del state[attr]
@@ -162,115 +54,140 @@ class SessionManager:
"""Restore SessionManager after unpickling."""
self.__dict__.update(state)
# Re-initialize unpickleable attributes
import redis
self.redis_client = redis.StrictRedis(db=0, decode_responses=False)
self.lock = threading.Lock()
self.creation_lock = threading.Lock()
self.active_socketio_connections = {}
self.cleanup_thread = threading.Thread(target=self._cleanup_loop, daemon=True)
self.cleanup_thread.start()
def _get_session_key(self, session_id: str) -> str:
"""Generate Redis key for a session."""
"""Generates the Redis key for a session."""
return f"dnsrecon:session:{session_id}"
def _get_user_session_key(self, user_fingerprint: str) -> str:
"""Generate Redis key for user -> session mapping."""
return f"dnsrecon:user:{user_fingerprint}"
def _get_stop_signal_key(self, session_id: str) -> str:
"""Generate Redis key for session stop signal."""
"""Generates the Redis key for a session's stop signal."""
return f"dnsrecon:stop:{session_id}"
def create_or_replace_user_session(self, client_ip: str, user_agent: str) -> str:
def register_socketio_connection(self, session_id: str, socketio) -> None:
"""
Create new session for user, replacing any existing session.
Consolidates data from previous session if it exists.
Args:
client_ip: Client IP address
user_agent: User-Agent header
Returns:
New session ID
FIXED: Register a socketio connection for a session.
This ensures the connection is maintained throughout the session lifecycle.
"""
user_fingerprint = self.user_identifier.generate_user_fingerprint(client_ip, user_agent)
new_session_id = str(uuid.uuid4())
with self.lock:
self.active_socketio_connections[session_id] = socketio
print(f"Registered socketio connection for session {session_id}")
print(f"=== CREATING/REPLACING SESSION FOR USER {user_fingerprint} ===")
def get_socketio_connection(self, session_id: str):
"""
FIXED: Get the active socketio connection for a session.
"""
with self.lock:
return self.active_socketio_connections.get(session_id)
def _prepare_scanner_for_storage(self, scanner: Scanner, session_id: str) -> Scanner:
"""
FIXED: Prepare scanner for storage by ensuring proper cleanup of unpicklable objects.
Now preserves socketio connection info for restoration.
"""
# Set the session ID on the scanner for cross-process stop signal management
scanner.session_id = session_id
# FIXED: Don't set socketio to None if we want to preserve real-time updates
# Instead, we'll restore it when loading the scanner
scanner.socketio = None
# Force cleanup of any threading objects that might cause issues
if hasattr(scanner, 'stop_event'):
scanner.stop_event = None
if hasattr(scanner, 'scan_thread'):
scanner.scan_thread = None
if hasattr(scanner, 'executor'):
scanner.executor = None
if hasattr(scanner, 'status_logger_thread'):
scanner.status_logger_thread = None
if hasattr(scanner, 'status_logger_stop_event'):
scanner.status_logger_stop_event = None
return scanner
def create_session(self, socketio=None) -> str:
"""
FIXED: Create a new user session with enhanced WebSocket management.
"""
# FIXED: Use creation lock to prevent race conditions
with self.creation_lock:
session_id = str(uuid.uuid4())
print(f"=== CREATING SESSION {session_id} IN REDIS ===")
# FIXED: Register socketio connection first
if socketio:
self.register_socketio_connection(session_id, socketio)
try:
# Check for existing user session
existing_session_id = self._get_user_current_session(user_fingerprint)
old_scanner = None
if existing_session_id:
print(f"Found existing session {existing_session_id} for user {user_fingerprint}")
# Get old scanner data for consolidation
old_scanner = self.get_session(existing_session_id)
# Terminate old session
self._terminate_session_internal(existing_session_id, cleanup_user_mapping=False)
print(f"Terminated old session {existing_session_id}")
# Create new session config and scanner
from core.session_config import create_session_config
session_config = create_session_config()
new_scanner = Scanner(session_config=session_config)
# Set session ID on scanner for cross-process operations
new_scanner.session_id = new_session_id
# Create scanner WITHOUT socketio to avoid weakref issues
scanner_instance = Scanner(session_config=session_config, socketio=None)
# Consolidate data from old session if available
if old_scanner:
new_scanner = self.consolidator.consolidate_scanner_data(old_scanner, new_scanner)
print(f"Consolidated data from previous session")
# Prepare scanner for storage (removes problematic objects)
scanner_instance = self._prepare_scanner_for_storage(scanner_instance, session_id)
# Create session data
session_data = {
'scanner': new_scanner,
'scanner': scanner_instance,
'config': session_config,
'created_at': time.time(),
'last_activity': time.time(),
'status': 'active',
'user_fingerprint': user_fingerprint,
'client_ip': client_ip,
'user_agent': user_agent[:200] # Truncate for storage
'status': 'active'
}
# Store session in Redis
session_key = self._get_session_key(new_session_id)
serialized_data = pickle.dumps(session_data)
self.redis_client.setex(session_key, self.session_timeout, serialized_data)
# Test serialization before storing to catch issues early
try:
test_serialization = pickle.dumps(session_data)
print(f"Session serialization test successful ({len(test_serialization)} bytes)")
except Exception as pickle_error:
print(f"PICKLE TEST FAILED: {pickle_error}")
# Try to identify the problematic object
for key, value in session_data.items():
try:
pickle.dumps(value)
print(f" {key}: OK")
except Exception as item_error:
print(f" {key}: FAILED - {item_error}")
raise pickle_error
# Update user -> session mapping
user_session_key = self._get_user_session_key(user_fingerprint)
self.redis_client.setex(user_session_key, self.session_timeout, new_session_id.encode('utf-8'))
# Store in Redis
session_key = self._get_session_key(session_id)
self.redis_client.setex(session_key, self.session_timeout, test_serialization)
# Initialize stop signal
stop_key = self._get_stop_signal_key(new_session_id)
# Initialize stop signal as False
stop_key = self._get_stop_signal_key(session_id)
self.redis_client.setex(stop_key, self.session_timeout, b'0')
print(f"Created new session {new_session_id} for user {user_fingerprint}")
return new_session_id
print(f"Session {session_id} stored in Redis with stop signal initialized")
print(f"Session has {len(scanner_instance.providers)} providers: {[p.get_name() for p in scanner_instance.providers]}")
return session_id
except Exception as e:
print(f"ERROR: Failed to create session for user {user_fingerprint}: {e}")
print(f"ERROR: Failed to create session {session_id}: {e}")
import traceback
traceback.print_exc()
raise
def _get_user_current_session(self, user_fingerprint: str) -> Optional[str]:
"""Get current session ID for a user."""
try:
user_session_key = self._get_user_session_key(user_fingerprint)
session_id_bytes = self.redis_client.get(user_session_key)
if session_id_bytes:
return session_id_bytes.decode('utf-8')
return None
except Exception as e:
print(f"Error getting user session: {e}")
return None
def set_stop_signal(self, session_id: str) -> bool:
"""Set stop signal for session (cross-process safe)."""
"""
Set the stop signal for a session (cross-process safe).
Args:
session_id: Session identifier
Returns:
bool: True if signal was set successfully
"""
try:
stop_key = self._get_stop_signal_key(session_id)
# Set stop signal to '1' with the same TTL as the session
self.redis_client.setex(stop_key, self.session_timeout, b'1')
print(f"Stop signal set for session {session_id}")
return True
@@ -279,7 +196,15 @@ class SessionManager:
return False
def is_stop_requested(self, session_id: str) -> bool:
"""Check if stop is requested for session (cross-process safe)."""
"""
Check if stop is requested for a session (cross-process safe).
Args:
session_id: Session identifier
Returns:
bool: True if stop is requested
"""
try:
stop_key = self._get_stop_signal_key(session_id)
value = self.redis_client.get(stop_key)
@@ -289,7 +214,15 @@ class SessionManager:
return False
def clear_stop_signal(self, session_id: str) -> bool:
"""Clear stop signal for session."""
"""
Clear the stop signal for a session.
Args:
session_id: Session identifier
Returns:
bool: True if signal was cleared successfully
"""
try:
stop_key = self._get_stop_signal_key(session_id)
self.redis_client.setex(stop_key, self.session_timeout, b'0')
@@ -300,51 +233,101 @@ class SessionManager:
return False
def _get_session_data(self, session_id: str) -> Optional[Dict[str, Any]]:
"""Retrieve and deserialize session data from Redis."""
"""Retrieves and deserializes session data from Redis."""
try:
session_key = self._get_session_key(session_id)
serialized_data = self.redis_client.get(session_key)
if serialized_data:
session_data = pickle.loads(serialized_data)
# Ensure scanner has correct session ID
# Ensure the scanner has the correct session ID for stop signal checking
if 'scanner' in session_data and session_data['scanner']:
session_data['scanner'].session_id = session_id
# FIXED: Restore socketio connection from our registry
socketio_conn = self.get_socketio_connection(session_id)
if socketio_conn:
session_data['scanner'].socketio = socketio_conn
print(f"Restored socketio connection for session {session_id}")
else:
print(f"No socketio connection found for session {session_id}")
session_data['scanner'].socketio = None
return session_data
return None
except Exception as e:
print(f"ERROR: Failed to get session data for {session_id}: {e}")
import traceback
traceback.print_exc()
return None
def _save_session_data(self, session_id: str, session_data: Dict[str, Any]) -> bool:
"""Serialize and save session data to Redis with updated TTL."""
"""
Serializes and saves session data back to Redis with updated TTL.
FIXED: Now preserves socketio connection during storage.
Returns:
bool: True if save was successful
"""
try:
session_key = self._get_session_key(session_id)
serialized_data = pickle.dumps(session_data)
# Create a deep copy to avoid modifying the original scanner object
session_data_to_save = copy.deepcopy(session_data)
# Prepare scanner for storage if it exists
if 'scanner' in session_data_to_save and session_data_to_save['scanner']:
# FIXED: Preserve the original socketio connection before preparing for storage
original_socketio = session_data_to_save['scanner'].socketio
session_data_to_save['scanner'] = self._prepare_scanner_for_storage(
session_data_to_save['scanner'],
session_id
)
# FIXED: If we had a socketio connection, make sure it's registered
if original_socketio and session_id not in self.active_socketio_connections:
self.register_socketio_connection(session_id, original_socketio)
serialized_data = pickle.dumps(session_data_to_save)
result = self.redis_client.setex(session_key, self.session_timeout, serialized_data)
# Also refresh user mapping TTL if available
if 'user_fingerprint' in session_data:
user_session_key = self._get_user_session_key(session_data['user_fingerprint'])
self.redis_client.setex(user_session_key, self.session_timeout, session_id.encode('utf-8'))
return result
except Exception as e:
print(f"ERROR: Failed to save session data for {session_id}: {e}")
import traceback
traceback.print_exc()
return False
def update_session_scanner(self, session_id: str, scanner: 'Scanner') -> bool:
"""Update scanner object in session with immediate persistence."""
"""
FIXED: Updates just the scanner object in a session with immediate persistence.
Now maintains socketio connection throughout the update process.
Returns:
bool: True if update was successful
"""
try:
session_data = self._get_session_data(session_id)
if session_data:
# Ensure scanner has session ID
scanner.session_id = session_id
# FIXED: Preserve socketio connection before preparing for storage
original_socketio = scanner.socketio
# Prepare scanner for storage
scanner = self._prepare_scanner_for_storage(scanner, session_id)
session_data['scanner'] = scanner
session_data['last_activity'] = time.time()
# FIXED: Restore socketio connection after preparation
if original_socketio:
self.register_socketio_connection(session_id, original_socketio)
session_data['scanner'].socketio = original_socketio
# Immediately save to Redis for GUI updates
success = self._save_session_data(session_id, session_data)
if success:
print(f"Scanner state updated for session {session_id} (status: {scanner.status})")
# Only log occasionally to reduce noise
if hasattr(self, '_last_update_log'):
if time.time() - self._last_update_log > 5: # Log every 5 seconds max
self._last_update_log = time.time()
else:
self._last_update_log = time.time()
else:
print(f"WARNING: Failed to save scanner state for session {session_id}")
return success
@@ -353,10 +336,21 @@ class SessionManager:
return False
except Exception as e:
print(f"ERROR: Failed to update scanner for session {session_id}: {e}")
import traceback
traceback.print_exc()
return False
def update_scanner_status(self, session_id: str, status: str) -> bool:
"""Quickly update scanner status for immediate GUI feedback."""
"""
Quickly update just the scanner status for immediate GUI feedback.
Args:
session_id: Session identifier
status: New scanner status
Returns:
bool: True if update was successful
"""
try:
session_data = self._get_session_data(session_id)
if session_data and 'scanner' in session_data:
@@ -375,7 +369,9 @@ class SessionManager:
return False
def get_session(self, session_id: str) -> Optional[Scanner]:
"""Get scanner instance for session with session ID management."""
"""
FIXED: Get scanner instance for a session from Redis with proper socketio restoration.
"""
if not session_id:
return None
@@ -390,13 +386,30 @@ class SessionManager:
scanner = session_data.get('scanner')
if scanner:
# Ensure scanner can check Redis-based stop signal
# Ensure the scanner can check the Redis-based stop signal
scanner.session_id = session_id
# FIXED: Restore socketio connection from our registry
socketio_conn = self.get_socketio_connection(session_id)
if socketio_conn:
scanner.socketio = socketio_conn
print(f"✓ Restored socketio connection for session {session_id}")
else:
scanner.socketio = None
print(f"⚠️ No socketio connection found for session {session_id}")
return scanner
def get_session_status_only(self, session_id: str) -> Optional[str]:
"""Get scanner status without full session retrieval (for performance)."""
"""
Get just the scanner status without full session retrieval (for performance).
Args:
session_id: Session identifier
Returns:
Scanner status string or None if not found
"""
try:
session_data = self._get_session_data(session_id)
if session_data and 'scanner' in session_data:
@@ -407,18 +420,16 @@ class SessionManager:
return None
def terminate_session(self, session_id: str) -> bool:
"""Terminate specific session with reliable stop signal and immediate status update."""
return self._terminate_session_internal(session_id, cleanup_user_mapping=True)
def _terminate_session_internal(self, session_id: str, cleanup_user_mapping: bool = True) -> bool:
"""Internal session termination with configurable user mapping cleanup."""
"""
Terminate a specific session in Redis with reliable stop signal and immediate status update.
"""
print(f"=== TERMINATING SESSION {session_id} ===")
try:
# Set stop signal first
# First, set the stop signal
self.set_stop_signal(session_id)
# Update scanner status immediately for GUI feedback
# Update scanner status to stopped immediately for GUI feedback
self.update_scanner_status(session_id, 'stopped')
session_data = self._get_session_data(session_id)
@@ -429,19 +440,22 @@ class SessionManager:
scanner = session_data.get('scanner')
if scanner and scanner.status == 'running':
print(f"Stopping scan for session: {session_id}")
# The scanner will check the Redis stop signal
scanner.stop_scan()
# Update the scanner state immediately
self.update_session_scanner(session_id, scanner)
# Wait for graceful shutdown
# Wait a moment for graceful shutdown
time.sleep(0.5)
# Clean up user mapping if requested
if cleanup_user_mapping and 'user_fingerprint' in session_data:
user_session_key = self._get_user_session_key(session_data['user_fingerprint'])
self.redis_client.delete(user_session_key)
print(f"Cleaned up user mapping for {session_data['user_fingerprint']}")
# FIXED: Clean up socketio connection
with self.lock:
if session_id in self.active_socketio_connections:
del self.active_socketio_connections[session_id]
print(f"Cleaned up socketio connection for session {session_id}")
# Delete session data and stop signal
# Delete session data and stop signal from Redis
session_key = self._get_session_key(session_id)
stop_key = self._get_stop_signal_key(session_id)
self.redis_client.delete(session_key)
@@ -452,75 +466,46 @@ class SessionManager:
except Exception as e:
print(f"ERROR: Failed to terminate session {session_id}: {e}")
import traceback
traceback.print_exc()
return False
def _cleanup_loop(self) -> None:
"""Background thread to cleanup inactive sessions and orphaned signals."""
"""
Background thread to cleanup inactive sessions and orphaned stop signals.
"""
while True:
try:
# Clean up orphaned stop signals
stop_keys = self.redis_client.keys("dnsrecon:stop:*")
for stop_key in stop_keys:
# Extract session ID from stop key
session_id = stop_key.decode('utf-8').split(':')[-1]
session_key = self._get_session_key(session_id)
# If session doesn't exist but stop signal does, clean it up
if not self.redis_client.exists(session_key):
self.redis_client.delete(stop_key)
print(f"Cleaned up orphaned stop signal for session {session_id}")
# Clean up orphaned user mappings
user_keys = self.redis_client.keys("dnsrecon:user:*")
for user_key in user_keys:
session_id_bytes = self.redis_client.get(user_key)
if session_id_bytes:
session_id = session_id_bytes.decode('utf-8')
session_key = self._get_session_key(session_id)
if not self.redis_client.exists(session_key):
self.redis_client.delete(user_key)
print(f"Cleaned up orphaned user mapping for session {session_id}")
# Also clean up socketio connection
with self.lock:
if session_id in self.active_socketio_connections:
del self.active_socketio_connections[session_id]
print(f"Cleaned up orphaned socketio for session {session_id}")
except Exception as e:
print(f"Error in cleanup loop: {e}")
time.sleep(300) # Sleep for 5 minutes
def list_active_sessions(self) -> List[Dict[str, Any]]:
"""List all active sessions for admin purposes."""
try:
session_keys = self.redis_client.keys("dnsrecon:session:*")
sessions = []
for session_key in session_keys:
session_id = session_key.decode('utf-8').split(':')[-1]
session_data = self._get_session_data(session_id)
if session_data:
scanner = session_data.get('scanner')
sessions.append({
'session_id': session_id,
'user_fingerprint': session_data.get('user_fingerprint', 'unknown'),
'client_ip': session_data.get('client_ip', 'unknown'),
'created_at': session_data.get('created_at'),
'last_activity': session_data.get('last_activity'),
'scanner_status': scanner.status if scanner else 'unknown',
'current_target': scanner.current_target if scanner else None
})
return sessions
except Exception as e:
print(f"ERROR: Failed to list active sessions: {e}")
return []
def get_statistics(self) -> Dict[str, Any]:
"""Get session manager statistics."""
try:
session_keys = self.redis_client.keys("dnsrecon:session:*")
user_keys = self.redis_client.keys("dnsrecon:user:*")
stop_keys = self.redis_client.keys("dnsrecon:stop:*")
active_sessions = len(session_keys)
unique_users = len(user_keys)
running_scans = 0
for session_key in session_keys:
@@ -531,46 +516,18 @@ class SessionManager:
return {
'total_active_sessions': active_sessions,
'unique_users': unique_users,
'running_scans': running_scans,
'total_stop_signals': len(stop_keys),
'average_sessions_per_user': round(active_sessions / unique_users, 2) if unique_users > 0 else 0
'active_socketio_connections': len(self.active_socketio_connections)
}
except Exception as e:
print(f"ERROR: Failed to get statistics: {e}")
return {
'total_active_sessions': 0,
'unique_users': 0,
'running_scans': 0,
'total_stop_signals': 0,
'average_sessions_per_user': 0
'active_socketio_connections': 0
}
def get_session_info(self, session_id: str) -> Dict[str, Any]:
"""Get detailed information about a specific session."""
try:
session_data = self._get_session_data(session_id)
if not session_data:
return {'error': 'Session not found'}
scanner = session_data.get('scanner')
return {
'session_id': session_id,
'user_fingerprint': session_data.get('user_fingerprint', 'unknown'),
'client_ip': session_data.get('client_ip', 'unknown'),
'user_agent': session_data.get('user_agent', 'unknown'),
'created_at': session_data.get('created_at'),
'last_activity': session_data.get('last_activity'),
'status': session_data.get('status'),
'scanner_status': scanner.status if scanner else 'unknown',
'current_target': scanner.current_target if scanner else None,
'session_age_minutes': round((time.time() - session_data.get('created_at', time.time())) / 60, 1)
}
except Exception as e:
print(f"ERROR: Failed to get session info for {session_id}: {e}")
return {'error': f'Failed to get session info: {str(e)}'}
# Global session manager instance
session_manager = SessionManager(session_timeout_minutes=60)

View File

@@ -1,564 +0,0 @@
# dnsrecon/core/task_manager.py
import threading
import time
import uuid
from enum import Enum
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Any, Set
from datetime import datetime, timezone, timedelta
from collections import deque
from utils.helpers import _is_valid_ip, _is_valid_domain
class TaskStatus(Enum):
"""Enumeration of task execution statuses."""
PENDING = "pending"
RUNNING = "running"
SUCCEEDED = "succeeded"
FAILED_RETRYING = "failed_retrying"
FAILED_PERMANENT = "failed_permanent"
CANCELLED = "cancelled"
class TaskType(Enum):
"""Enumeration of task types for provider queries."""
DOMAIN_QUERY = "domain_query"
IP_QUERY = "ip_query"
GRAPH_UPDATE = "graph_update"
@dataclass
class TaskResult:
"""Result of a task execution."""
success: bool
data: Optional[Any] = None
error: Optional[str] = None
metadata: Dict[str, Any] = field(default_factory=dict)
@dataclass
class ReconTask:
"""Represents a single reconnaissance task with retry logic."""
task_id: str
task_type: TaskType
target: str
provider_name: str
depth: int
status: TaskStatus = TaskStatus.PENDING
created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
# Retry configuration
max_retries: int = 3
current_retry: int = 0
base_backoff_seconds: float = 1.0
max_backoff_seconds: float = 60.0
# Execution tracking
last_attempt_at: Optional[datetime] = None
next_retry_at: Optional[datetime] = None
execution_history: List[Dict[str, Any]] = field(default_factory=list)
# Results
result: Optional[TaskResult] = None
def __post_init__(self):
"""Initialize additional fields after creation."""
if not self.task_id:
self.task_id = str(uuid.uuid4())[:8]
def calculate_next_retry_time(self) -> datetime:
"""Calculate next retry time with exponential backoff and jitter."""
if self.current_retry >= self.max_retries:
return None
# Exponential backoff with jitter
backoff_time = min(
self.max_backoff_seconds,
self.base_backoff_seconds * (2 ** self.current_retry)
)
# Add jitter (±25%)
jitter = backoff_time * 0.25 * (0.5 - hash(self.task_id) % 1000 / 1000.0)
final_backoff = max(self.base_backoff_seconds, backoff_time + jitter)
return datetime.now(timezone.utc) + timedelta(seconds=final_backoff)
def should_retry(self) -> bool:
"""Determine if task should be retried based on status and retry count."""
if self.status != TaskStatus.FAILED_RETRYING:
return False
if self.current_retry >= self.max_retries:
return False
if self.next_retry_at and datetime.now(timezone.utc) < self.next_retry_at:
return False
return True
def mark_failed(self, error: str, metadata: Dict[str, Any] = None):
"""Mark task as failed and prepare for retry or permanent failure."""
self.current_retry += 1
self.last_attempt_at = datetime.now(timezone.utc)
# Record execution history
execution_record = {
'attempt': self.current_retry,
'timestamp': self.last_attempt_at.isoformat(),
'error': error,
'metadata': metadata or {}
}
self.execution_history.append(execution_record)
if self.current_retry >= self.max_retries:
self.status = TaskStatus.FAILED_PERMANENT
self.result = TaskResult(success=False, error=f"Permanent failure after {self.max_retries} attempts: {error}")
else:
self.status = TaskStatus.FAILED_RETRYING
self.next_retry_at = self.calculate_next_retry_time()
def mark_succeeded(self, data: Any = None, metadata: Dict[str, Any] = None):
"""Mark task as successfully completed."""
self.status = TaskStatus.SUCCEEDED
self.last_attempt_at = datetime.now(timezone.utc)
self.result = TaskResult(success=True, data=data, metadata=metadata or {})
# Record successful execution
execution_record = {
'attempt': self.current_retry + 1,
'timestamp': self.last_attempt_at.isoformat(),
'success': True,
'metadata': metadata or {}
}
self.execution_history.append(execution_record)
def get_summary(self) -> Dict[str, Any]:
"""Get task summary for progress reporting."""
return {
'task_id': self.task_id,
'task_type': self.task_type.value,
'target': self.target,
'provider': self.provider_name,
'status': self.status.value,
'current_retry': self.current_retry,
'max_retries': self.max_retries,
'created_at': self.created_at.isoformat(),
'last_attempt_at': self.last_attempt_at.isoformat() if self.last_attempt_at else None,
'next_retry_at': self.next_retry_at.isoformat() if self.next_retry_at else None,
'total_attempts': len(self.execution_history),
'has_result': self.result is not None
}
class TaskQueue:
"""Thread-safe task queue with retry logic and priority handling."""
def __init__(self, max_concurrent_tasks: int = 5):
"""Initialize task queue."""
self.max_concurrent_tasks = max_concurrent_tasks
self.tasks: Dict[str, ReconTask] = {}
self.pending_queue = deque()
self.retry_queue = deque()
self.running_tasks: Set[str] = set()
self._lock = threading.Lock()
self._stop_event = threading.Event()
def __getstate__(self):
"""Prepare TaskQueue for pickling by excluding unpicklable objects."""
state = self.__dict__.copy()
# Exclude the unpickleable '_lock' and '_stop_event' attributes
if '_lock' in state:
del state['_lock']
if '_stop_event' in state:
del state['_stop_event']
return state
def __setstate__(self, state):
"""Restore TaskQueue after unpickling by reconstructing threading objects."""
self.__dict__.update(state)
# Re-initialize the '_lock' and '_stop_event' attributes
self._lock = threading.Lock()
self._stop_event = threading.Event()
def add_task(self, task: ReconTask) -> str:
"""Add task to queue."""
with self._lock:
self.tasks[task.task_id] = task
self.pending_queue.append(task.task_id)
print(f"Added task {task.task_id}: {task.provider_name} query for {task.target}")
return task.task_id
def get_next_ready_task(self) -> Optional[ReconTask]:
"""Get next task ready for execution."""
with self._lock:
# Check if we have room for more concurrent tasks
if len(self.running_tasks) >= self.max_concurrent_tasks:
return None
# First priority: retry queue (tasks ready for retry)
while self.retry_queue:
task_id = self.retry_queue.popleft()
if task_id in self.tasks:
task = self.tasks[task_id]
if task.should_retry():
task.status = TaskStatus.RUNNING
self.running_tasks.add(task_id)
print(f"Retrying task {task_id} (attempt {task.current_retry + 1})")
return task
# Second priority: pending queue (new tasks)
while self.pending_queue:
task_id = self.pending_queue.popleft()
if task_id in self.tasks:
task = self.tasks[task_id]
if task.status == TaskStatus.PENDING:
task.status = TaskStatus.RUNNING
self.running_tasks.add(task_id)
print(f"Starting task {task_id}")
return task
return None
def complete_task(self, task_id: str, success: bool, data: Any = None,
error: str = None, metadata: Dict[str, Any] = None):
"""Mark task as completed (success or failure)."""
with self._lock:
if task_id not in self.tasks:
return
task = self.tasks[task_id]
self.running_tasks.discard(task_id)
if success:
task.mark_succeeded(data=data, metadata=metadata)
print(f"Task {task_id} succeeded")
else:
task.mark_failed(error or "Unknown error", metadata=metadata)
if task.status == TaskStatus.FAILED_RETRYING:
self.retry_queue.append(task_id)
print(f"Task {task_id} failed, scheduled for retry at {task.next_retry_at}")
else:
print(f"Task {task_id} permanently failed after {task.current_retry} attempts")
def cancel_all_tasks(self):
"""Cancel all pending and running tasks."""
with self._lock:
self._stop_event.set()
for task in self.tasks.values():
if task.status in [TaskStatus.PENDING, TaskStatus.RUNNING, TaskStatus.FAILED_RETRYING]:
task.status = TaskStatus.CANCELLED
self.pending_queue.clear()
self.retry_queue.clear()
self.running_tasks.clear()
print("All tasks cancelled")
def is_complete(self) -> bool:
"""Check if all tasks are complete (succeeded, permanently failed, or cancelled)."""
with self._lock:
for task in self.tasks.values():
if task.status in [TaskStatus.PENDING, TaskStatus.RUNNING, TaskStatus.FAILED_RETRYING]:
return False
return True
def get_statistics(self) -> Dict[str, Any]:
"""Get queue statistics."""
with self._lock:
stats = {
'total_tasks': len(self.tasks),
'pending': len(self.pending_queue),
'running': len(self.running_tasks),
'retry_queue': len(self.retry_queue),
'succeeded': 0,
'failed_permanent': 0,
'cancelled': 0,
'failed_retrying': 0
}
for task in self.tasks.values():
if task.status == TaskStatus.SUCCEEDED:
stats['succeeded'] += 1
elif task.status == TaskStatus.FAILED_PERMANENT:
stats['failed_permanent'] += 1
elif task.status == TaskStatus.CANCELLED:
stats['cancelled'] += 1
elif task.status == TaskStatus.FAILED_RETRYING:
stats['failed_retrying'] += 1
stats['completion_rate'] = (stats['succeeded'] / stats['total_tasks'] * 100) if stats['total_tasks'] > 0 else 0
stats['is_complete'] = self.is_complete()
return stats
def get_task_summaries(self) -> List[Dict[str, Any]]:
"""Get summaries of all tasks for detailed progress reporting."""
with self._lock:
return [task.get_summary() for task in self.tasks.values()]
def get_failed_tasks(self) -> List[ReconTask]:
"""Get all permanently failed tasks for analysis."""
with self._lock:
return [task for task in self.tasks.values() if task.status == TaskStatus.FAILED_PERMANENT]
class TaskExecutor:
"""Executes reconnaissance tasks using providers."""
def __init__(self, providers: List, graph_manager, logger):
"""Initialize task executor."""
self.providers = {provider.get_name(): provider for provider in providers}
self.graph = graph_manager
self.logger = logger
def execute_task(self, task: ReconTask) -> TaskResult:
"""
Execute a single reconnaissance task.
Args:
task: Task to execute
Returns:
TaskResult with success/failure information
"""
try:
print(f"Executing task {task.task_id}: {task.provider_name} query for {task.target}")
provider = self.providers.get(task.provider_name)
if not provider:
return TaskResult(
success=False,
error=f"Provider {task.provider_name} not available"
)
if not provider.is_available():
return TaskResult(
success=False,
error=f"Provider {task.provider_name} is not available (missing API key or configuration)"
)
# Execute provider query based on task type
if task.task_type == TaskType.DOMAIN_QUERY:
if not _is_valid_domain(task.target):
return TaskResult(success=False, error=f"Invalid domain: {task.target}")
relationships = provider.query_domain(task.target)
elif task.task_type == TaskType.IP_QUERY:
if not _is_valid_ip(task.target):
return TaskResult(success=False, error=f"Invalid IP: {task.target}")
relationships = provider.query_ip(task.target)
else:
return TaskResult(success=False, error=f"Unsupported task type: {task.task_type}")
# Process results and update graph
new_targets = set()
relationships_added = 0
for source, target, rel_type, confidence, raw_data in relationships:
# Add nodes to graph
from core.graph_manager import NodeType
if _is_valid_ip(target):
self.graph.add_node(target, NodeType.IP)
new_targets.add(target)
elif target.startswith('AS') and target[2:].isdigit():
self.graph.add_node(target, NodeType.ASN)
elif _is_valid_domain(target):
self.graph.add_node(target, NodeType.DOMAIN)
new_targets.add(target)
# Add edge to graph
if self.graph.add_edge(source, target, rel_type, confidence, task.provider_name, raw_data):
relationships_added += 1
# Log forensic information
self.logger.logger.info(
f"Task {task.task_id} completed: {len(relationships)} relationships found, "
f"{relationships_added} added to graph, {len(new_targets)} new targets"
)
return TaskResult(
success=True,
data={
'relationships': relationships,
'new_targets': list(new_targets),
'relationships_added': relationships_added
},
metadata={
'provider': task.provider_name,
'target': task.target,
'depth': task.depth,
'execution_time': datetime.now(timezone.utc).isoformat()
}
)
except Exception as e:
error_msg = f"Task execution failed: {str(e)}"
print(f"ERROR: {error_msg} for task {task.task_id}")
self.logger.logger.error(error_msg)
return TaskResult(
success=False,
error=error_msg,
metadata={
'provider': task.provider_name,
'target': task.target,
'exception_type': type(e).__name__
}
)
class TaskManager:
"""High-level task management for reconnaissance scans."""
def __init__(self, providers: List, graph_manager, logger, max_concurrent_tasks: int = 5):
"""Initialize task manager."""
self.task_queue = TaskQueue(max_concurrent_tasks)
self.task_executor = TaskExecutor(providers, graph_manager, logger)
self.logger = logger
# Execution control
self._stop_event = threading.Event()
self._execution_threads: List[threading.Thread] = []
self._is_running = False
def create_provider_tasks(self, target: str, depth: int, providers: List) -> List[str]:
"""
Create tasks for querying all eligible providers for a target.
Args:
target: Domain or IP to query
depth: Current recursion depth
providers: List of available providers
Returns:
List of created task IDs
"""
task_ids = []
is_ip = _is_valid_ip(target)
target_key = 'ips' if is_ip else 'domains'
task_type = TaskType.IP_QUERY if is_ip else TaskType.DOMAIN_QUERY
for provider in providers:
if provider.get_eligibility().get(target_key) and provider.is_available():
task = ReconTask(
task_id=str(uuid.uuid4())[:8],
task_type=task_type,
target=target,
provider_name=provider.get_name(),
depth=depth,
max_retries=3 # Configure retries per task type/provider
)
task_id = self.task_queue.add_task(task)
task_ids.append(task_id)
return task_ids
def start_execution(self, max_workers: int = 3):
"""Start task execution with specified number of worker threads."""
if self._is_running:
print("Task execution already running")
return
self._is_running = True
self._stop_event.clear()
print(f"Starting task execution with {max_workers} workers")
for i in range(max_workers):
worker_thread = threading.Thread(
target=self._worker_loop,
name=f"TaskWorker-{i+1}",
daemon=True
)
worker_thread.start()
self._execution_threads.append(worker_thread)
def stop_execution(self):
"""Stop task execution and cancel all tasks."""
print("Stopping task execution")
self._stop_event.set()
self.task_queue.cancel_all_tasks()
self._is_running = False
# Wait for worker threads to finish
for thread in self._execution_threads:
thread.join(timeout=5.0)
self._execution_threads.clear()
print("Task execution stopped")
def _worker_loop(self):
"""Worker thread loop for executing tasks."""
thread_name = threading.current_thread().name
print(f"{thread_name} started")
while not self._stop_event.is_set():
try:
# Get next task to execute
task = self.task_queue.get_next_ready_task()
if task is None:
# No tasks ready, check if we should exit
if self.task_queue.is_complete() or self._stop_event.is_set():
break
time.sleep(0.1) # Brief sleep before checking again
continue
# Execute the task
result = self.task_executor.execute_task(task)
# Complete the task in queue
self.task_queue.complete_task(
task.task_id,
success=result.success,
data=result.data,
error=result.error,
metadata=result.metadata
)
except Exception as e:
print(f"ERROR: Worker {thread_name} encountered error: {e}")
# Continue running even if individual task fails
continue
print(f"{thread_name} finished")
def wait_for_completion(self, timeout_seconds: int = 300) -> bool:
"""
Wait for all tasks to complete.
Args:
timeout_seconds: Maximum time to wait
Returns:
True if all tasks completed, False if timeout
"""
start_time = time.time()
while time.time() - start_time < timeout_seconds:
if self.task_queue.is_complete():
return True
if self._stop_event.is_set():
return False
time.sleep(1.0) # Check every second
print(f"Timeout waiting for task completion after {timeout_seconds} seconds")
return False
def get_progress_report(self) -> Dict[str, Any]:
"""Get detailed progress report for UI updates."""
stats = self.task_queue.get_statistics()
failed_tasks = self.task_queue.get_failed_tasks()
return {
'statistics': stats,
'failed_tasks': [task.get_summary() for task in failed_tasks],
'is_running': self._is_running,
'worker_count': len(self._execution_threads),
'detailed_tasks': self.task_queue.get_task_summaries() if stats['total_tasks'] < 50 else [] # Limit detail for performance
}

BIN
dump.rdb

Binary file not shown.

View File

@@ -3,17 +3,20 @@ Data provider modules for DNSRecon.
Contains implementations for various reconnaissance data sources.
"""
from .base_provider import BaseProvider, RateLimiter
from .base_provider import BaseProvider
from .crtsh_provider import CrtShProvider
from .dns_provider import DNSProvider
from .shodan_provider import ShodanProvider
from .correlation_provider import CorrelationProvider
from core.rate_limiter import GlobalRateLimiter
__all__ = [
'BaseProvider',
'RateLimiter',
'GlobalRateLimiter',
'CrtShProvider',
'DNSProvider',
'ShodanProvider'
'ShodanProvider',
'CorrelationProvider'
]
__version__ = "0.0.0-rc"

View File

@@ -3,175 +3,24 @@
import time
import requests
import threading
import os
import json
import hashlib
from abc import ABC, abstractmethod
from typing import List, Dict, Any, Optional, Tuple
from datetime import datetime, timezone
from typing import Dict, Any, Optional
from core.logger import get_forensic_logger
class RateLimiter:
"""Thread-safe rate limiter for API calls."""
def __init__(self, requests_per_minute: int):
"""
Initialize rate limiter.
Args:
requests_per_minute: Maximum requests allowed per minute
"""
self.requests_per_minute = requests_per_minute
self.min_interval = 60.0 / requests_per_minute
self.last_request_time = 0
self._lock = threading.Lock()
def __getstate__(self):
"""RateLimiter is fully picklable, return full state."""
state = self.__dict__.copy()
# Exclude unpickleable lock
if '_lock' in state:
del state['_lock']
return state
def __setstate__(self, state):
"""Restore RateLimiter state."""
self.__dict__.update(state)
self._lock = threading.Lock()
def wait_if_needed(self) -> None:
"""Wait if necessary to respect rate limits."""
with self._lock:
current_time = time.time()
time_since_last = current_time - self.last_request_time
if time_since_last < self.min_interval:
sleep_time = self.min_interval - time_since_last
time.sleep(sleep_time)
self.last_request_time = time.time()
class ProviderCache:
"""Thread-safe global cache for provider queries."""
def __init__(self, provider_name: str, cache_expiry_hours: int = 12):
"""
Initialize provider-specific cache.
Args:
provider_name: Name of the provider for cache directory
cache_expiry_hours: Cache expiry time in hours
"""
self.provider_name = provider_name
self.cache_expiry = cache_expiry_hours * 3600 # Convert to seconds
self.cache_dir = os.path.join('.cache', provider_name)
self._lock = threading.Lock()
# Ensure cache directory exists with thread-safe creation
os.makedirs(self.cache_dir, exist_ok=True)
def _generate_cache_key(self, method: str, url: str, params: Optional[Dict[str, Any]]) -> str:
"""Generate unique cache key for request."""
cache_data = f"{method}:{url}:{json.dumps(params or {}, sort_keys=True)}"
return hashlib.md5(cache_data.encode()).hexdigest() + ".json"
def get_cached_response(self, method: str, url: str, params: Optional[Dict[str, Any]]) -> Optional[requests.Response]:
"""
Retrieve cached response if available and not expired.
Returns:
Cached Response object or None if cache miss/expired
"""
cache_key = self._generate_cache_key(method, url, params)
cache_path = os.path.join(self.cache_dir, cache_key)
with self._lock:
if not os.path.exists(cache_path):
return None
# Check if cache is expired
cache_age = time.time() - os.path.getmtime(cache_path)
if cache_age >= self.cache_expiry:
try:
os.remove(cache_path)
except OSError:
pass # File might have been removed by another thread
return None
try:
with open(cache_path, 'r', encoding='utf-8') as f:
cached_data = json.load(f)
# Reconstruct Response object
response = requests.Response()
response.status_code = cached_data['status_code']
response._content = cached_data['content'].encode('utf-8')
response.headers.update(cached_data['headers'])
return response
except (json.JSONDecodeError, KeyError, IOError) as e:
# Cache file corrupted, remove it
try:
os.remove(cache_path)
except OSError:
pass
return None
def cache_response(self, method: str, url: str, params: Optional[Dict[str, Any]],
response: requests.Response) -> bool:
"""
Cache successful response to disk.
Returns:
True if cached successfully, False otherwise
"""
if response.status_code != 200:
return False
cache_key = self._generate_cache_key(method, url, params)
cache_path = os.path.join(self.cache_dir, cache_key)
with self._lock:
try:
cache_data = {
'status_code': response.status_code,
'content': response.text,
'headers': dict(response.headers),
'cached_at': datetime.now(timezone.utc).isoformat()
}
# Write to temporary file first, then rename for atomic operation
temp_path = cache_path + '.tmp'
with open(temp_path, 'w', encoding='utf-8') as f:
json.dump(cache_data, f)
# Atomic rename to prevent partial cache files
os.rename(temp_path, cache_path)
return True
except (IOError, OSError) as e:
# Clean up temp file if it exists
try:
if os.path.exists(temp_path):
os.remove(temp_path)
except OSError:
pass
return False
from core.rate_limiter import GlobalRateLimiter
from core.provider_result import ProviderResult
class BaseProvider(ABC):
"""
Abstract base class for all DNSRecon data providers.
Now supports global provider-specific caching and session-specific configuration.
Now supports session-specific configuration and returns standardized ProviderResult objects.
FIXED: Enhanced pickle support to prevent weakref serialization errors.
"""
def __init__(self, name: str, rate_limit: int = 60, timeout: int = 30, session_config=None):
"""
Initialize base provider with global caching and session-specific configuration.
Initialize base provider with session-specific configuration.
Args:
name: Provider name for logging
@@ -188,50 +37,78 @@ class BaseProvider(ABC):
# Fallback to global config for backwards compatibility
from config import config as global_config
self.config = global_config
actual_rate_limit = rate_limit
actual_timeout = timeout
self.name = name
self.rate_limiter = RateLimiter(actual_rate_limit)
self.timeout = actual_timeout
self._local = threading.local()
self.logger = get_forensic_logger()
self._stop_event = None
# GLOBAL provider-specific caching (not session-based)
self.cache = ProviderCache(name, cache_expiry_hours=12)
# Statistics (per provider instance)
self.total_requests = 0
self.successful_requests = 0
self.failed_requests = 0
self.total_relationships_found = 0
self.cache_hits = 0
self.cache_misses = 0
print(f"Initialized {name} provider with global cache and session config (rate: {actual_rate_limit}/min)")
def __getstate__(self):
"""Prepare BaseProvider for pickling by excluding unpicklable objects."""
state = self.__dict__.copy()
# Exclude the unpickleable '_local' attribute and stop event
state['_local'] = None
state['_stop_event'] = None
# Exclude unpickleable attributes that may contain weakrefs
unpicklable_attrs = [
'_local', # Thread-local storage (contains requests.Session)
'_stop_event', # Threading event
'logger', # Logger may contain weakrefs in handlers
]
for attr in unpicklable_attrs:
if attr in state:
del state[attr]
# Also handle any potential weakrefs in the config object
if 'config' in state and hasattr(state['config'], '__getstate__'):
# If config has its own pickle support, let it handle itself
pass
elif 'config' in state:
# Otherwise, ensure config doesn't contain unpicklable objects
try:
# Test if config can be pickled
import pickle
pickle.dumps(state['config'])
except (TypeError, AttributeError):
# If config can't be pickled, we'll recreate it during unpickling
state['_config_class'] = type(state['config']).__name__
del state['config']
return state
def __setstate__(self, state):
"""Restore BaseProvider after unpickling by reconstructing threading objects."""
self.__dict__.update(state)
# Re-initialize the '_local' attribute and stop event
# Re-initialize unpickleable attributes
self._local = threading.local()
self._stop_event = None
self.logger = get_forensic_logger()
# Recreate config if it was removed during pickling
if not hasattr(self, 'config') and hasattr(self, '_config_class'):
if self._config_class == 'Config':
from config import config as global_config
self.config = global_config
elif self._config_class == 'SessionConfig':
from core.session_config import create_session_config
self.config = create_session_config()
del self._config_class
@property
def session(self):
"""Get or create thread-local requests session."""
if not hasattr(self._local, 'session'):
self._local.session = requests.Session()
self._local.session.headers.update({
'User-Agent': 'DNSRecon/2.0 (Passive Reconnaissance Tool)'
'User-Agent': 'DNSRecon/1.0 (Passive Reconnaissance Tool)'
})
return self._local.session
@@ -261,7 +138,7 @@ class BaseProvider(ABC):
pass
@abstractmethod
def query_domain(self, domain: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
def query_domain(self, domain: str) -> ProviderResult:
"""
Query the provider for information about a domain.
@@ -269,12 +146,12 @@ class BaseProvider(ABC):
domain: Domain to investigate
Returns:
List of tuples: (source_node, target_node, relationship_type, confidence, raw_data)
ProviderResult containing standardized attributes and relationships
"""
pass
@abstractmethod
def query_ip(self, ip: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
def query_ip(self, ip: str) -> ProviderResult:
"""
Query the provider for information about an IP address.
@@ -282,52 +159,23 @@ class BaseProvider(ABC):
ip: IP address to investigate
Returns:
List of tuples: (source_node, target_node, relationship_type, confidence, raw_data)
ProviderResult containing standardized attributes and relationships
"""
pass
def make_request(self, url: str, method: str = "GET",
params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
target_indicator: str = "",
max_retries: int = 3) -> Optional[requests.Response]:
target_indicator: str = "") -> Optional[requests.Response]:
"""
Make a rate-limited HTTP request with global caching and aggressive stop signal handling.
Make a rate-limited HTTP request.
FIXED: Returns response without automatically raising HTTPError exceptions.
Individual providers should handle status codes appropriately.
"""
# Check for cancellation before starting
if self._is_stop_requested():
print(f"Request cancelled before start: {url}")
return None
# Check global cache first
cached_response = self.cache.get_cached_response(method, url, params)
if cached_response is not None:
print(f"Cache hit for {self.name}: {url}")
self.cache_hits += 1
return cached_response
self.cache_misses += 1
# Determine effective max_retries based on stop signal
effective_max_retries = 0 if self._is_stop_requested() else max_retries
last_exception = None
for attempt in range(effective_max_retries + 1):
# Check for cancellation before each attempt
if self._is_stop_requested():
print(f"Request cancelled during attempt {attempt + 1}: {url}")
return None
# Apply rate limiting with cancellation awareness
if not self._wait_with_cancellation_check():
print(f"Request cancelled during rate limiting: {url}")
return None
# Final check before making HTTP request
if self._is_stop_requested():
print(f"Request cancelled before HTTP call: {url}")
return None
start_time = time.time()
response = None
error = None
@@ -335,39 +183,39 @@ class BaseProvider(ABC):
try:
self.total_requests += 1
# Prepare request
request_headers = self.session.headers.copy()
request_headers = dict(self.session.headers).copy()
if headers:
request_headers.update(headers)
print(f"Making {method} request to: {url} (attempt {attempt + 1})")
print(f"Making {method} request to: {url}")
# Use shorter timeout if termination is requested
request_timeout = 2 if self._is_stop_requested() else self.timeout
# Make request
if method.upper() == "GET":
response = self.session.get(
url,
params=params,
headers=request_headers,
timeout=request_timeout
timeout=self.timeout
)
elif method.upper() == "POST":
response = self.session.post(
url,
json=params,
headers=request_headers,
timeout=request_timeout
timeout=self.timeout
)
else:
raise ValueError(f"Unsupported HTTP method: {method}")
print(f"Response status: {response.status_code}")
response.raise_for_status()
self.successful_requests += 1
# Success - log, cache, and return
# FIXED: Don't automatically raise for HTTP error status codes
# Let individual providers handle status codes appropriately
# Only count 2xx responses as successful
if 200 <= response.status_code < 300:
self.successful_requests += 1
else:
self.failed_requests += 1
duration_ms = (time.time() - start_time) * 1000
self.logger.log_api_request(
provider=self.name,
@@ -380,46 +228,11 @@ class BaseProvider(ABC):
target_indicator=target_indicator
)
# Cache the successful response globally
self.cache.cache_response(method, url, params, response)
return response
except requests.exceptions.RequestException as e:
error = str(e)
self.failed_requests += 1
print(f"Request failed (attempt {attempt + 1}): {error}")
last_exception = e
# Immediately abort retries if stop requested
if self._is_stop_requested():
print(f"Stop requested - aborting retries for: {url}")
break
# Check if we should retry
if attempt < effective_max_retries and self._should_retry(e):
# Exponential backoff with jitter for 429 errors
if isinstance(e, requests.exceptions.HTTPError) and e.response and e.response.status_code == 429:
backoff_time = min(60, 10 * (2 ** attempt))
print(f"Rate limit hit. Retrying in {backoff_time} seconds...")
else:
backoff_time = min(2.0, (2 ** attempt) * 0.5)
print(f"Retrying in {backoff_time} seconds...")
if not self._sleep_with_cancellation_check(backoff_time):
print(f"Stop requested during backoff - aborting: {url}")
return None
continue
else:
break
except Exception as e:
error = f"Unexpected error: {str(e)}"
self.failed_requests += 1
print(f"Unexpected error: {error}")
last_exception = e
break
# All attempts failed - log and return None
duration_ms = (time.time() - start_time) * 1000
self.logger.log_api_request(
provider=self.name,
@@ -431,11 +244,7 @@ class BaseProvider(ABC):
error=error,
target_indicator=target_indicator
)
if error and last_exception:
raise last_exception
return None
raise e
def _is_stop_requested(self) -> bool:
"""
@@ -445,43 +254,6 @@ class BaseProvider(ABC):
return True
return False
def _wait_with_cancellation_check(self) -> bool:
"""
Wait for rate limiting while aggressively checking for cancellation.
Returns False if cancelled during wait.
"""
current_time = time.time()
time_since_last = current_time - self.rate_limiter.last_request_time
if time_since_last < self.rate_limiter.min_interval:
sleep_time = self.rate_limiter.min_interval - time_since_last
if not self._sleep_with_cancellation_check(sleep_time):
return False
self.rate_limiter.last_request_time = time.time()
return True
def _sleep_with_cancellation_check(self, sleep_time: float) -> bool:
"""
Sleep for the specified time while aggressively checking for cancellation.
Args:
sleep_time: Time to sleep in seconds
Returns:
bool: True if sleep completed, False if cancelled
"""
sleep_start = time.time()
check_interval = 0.05 # Check every 50ms for aggressive responsiveness
while time.time() - sleep_start < sleep_time:
if self._is_stop_requested():
return False
remaining_time = sleep_time - (time.time() - sleep_start)
time.sleep(min(check_interval, remaining_time))
return True
def set_stop_event(self, stop_event: threading.Event) -> None:
"""
Set the stop event for this provider to enable cancellation.
@@ -491,28 +263,6 @@ class BaseProvider(ABC):
"""
self._stop_event = stop_event
def _should_retry(self, exception: requests.exceptions.RequestException) -> bool:
"""
Determine if a request should be retried based on the exception.
Args:
exception: The request exception that occurred
Returns:
True if the request should be retried
"""
# Retry on connection errors and timeouts
if isinstance(exception, (requests.exceptions.ConnectionError,
requests.exceptions.Timeout)):
return True
if isinstance(exception, requests.exceptions.HTTPError):
if hasattr(exception, 'response') and exception.response:
# Retry on server errors (5xx) AND on rate-limiting errors (429)
return exception.response.status_code >= 500 or exception.response.status_code == 429
return False
def log_relationship_discovery(self, source_node: str, target_node: str,
relationship_type: str,
confidence_score: float,
@@ -543,7 +293,7 @@ class BaseProvider(ABC):
def get_statistics(self) -> Dict[str, Any]:
"""
Get provider statistics including cache performance.
Get provider statistics.
Returns:
Dictionary containing provider performance metrics
@@ -555,8 +305,5 @@ class BaseProvider(ABC):
'failed_requests': self.failed_requests,
'success_rate': (self.successful_requests / self.total_requests * 100) if self.total_requests > 0 else 0,
'relationships_found': self.total_relationships_found,
'rate_limit': self.rate_limiter.requests_per_minute,
'cache_hits': self.cache_hits,
'cache_misses': self.cache_misses,
'cache_hit_rate': (self.cache_hits / (self.cache_hits + self.cache_misses) * 100) if (self.cache_hits + self.cache_misses) > 0 else 0
'rate_limit': self.config.get_rate_limit(self.name)
}

View File

@@ -0,0 +1,220 @@
# dnsrecon/providers/correlation_provider.py
import re
from typing import Dict, Any, List
from .base_provider import BaseProvider
from core.provider_result import ProviderResult
from core.graph_manager import NodeType, GraphManager
class CorrelationProvider(BaseProvider):
"""
A provider that finds correlations between nodes in the graph.
FIXED: Enhanced pickle support to prevent weakref issues with graph references.
"""
def __init__(self, name: str = "correlation", session_config=None):
"""
Initialize the correlation provider.
"""
super().__init__(name, session_config=session_config)
self.graph: GraphManager | None = None
self.correlation_index = {}
self.date_pattern = re.compile(r'^\d{4}-\d{2}-\d{2}[ T]\d{2}:\d{2}:\d{2}')
self.EXCLUDED_KEYS = [
'cert_source',
'cert_issuer_ca_id',
'cert_common_name',
'cert_validity_period_days',
'cert_issuer_name',
'cert_serial_number',
'cert_entry_timestamp',
'cert_not_before',
'cert_not_after',
'dns_ttl',
'timestamp',
'last_update',
'updated_timestamp',
'discovery_timestamp',
'query_timestamp',
]
def __getstate__(self):
"""
FIXED: Prepare CorrelationProvider for pickling by excluding graph reference.
"""
state = super().__getstate__()
# Remove graph reference to prevent circular dependencies and weakrefs
if 'graph' in state:
del state['graph']
# Also handle correlation_index which might contain complex objects
if 'correlation_index' in state:
# Clear correlation index as it will be rebuilt when needed
state['correlation_index'] = {}
return state
def __setstate__(self, state):
"""
FIXED: Restore CorrelationProvider after unpickling.
"""
super().__setstate__(state)
# Re-initialize graph reference (will be set by scanner)
self.graph = None
# Re-initialize correlation index
self.correlation_index = {}
# Re-compile regex pattern
self.date_pattern = re.compile(r'^\d{4}-\d{2}-\d{2}[ T]\d{2}:\d{2}:\d{2}')
def get_name(self) -> str:
"""Return the provider name."""
return "correlation"
def get_display_name(self) -> str:
"""Return the provider display name for the UI."""
return "Correlation Engine"
def requires_api_key(self) -> bool:
"""Return True if the provider requires an API key."""
return False
def get_eligibility(self) -> Dict[str, bool]:
"""Return a dictionary indicating if the provider can query domains and/or IPs."""
return {'domains': True, 'ips': True}
def is_available(self) -> bool:
"""Check if the provider is available and properly configured."""
return True
def query_domain(self, domain: str) -> ProviderResult:
"""
Query the provider for information about a domain.
"""
return self._find_correlations(domain)
def query_ip(self, ip: str) -> ProviderResult:
"""
Query the provider for information about an IP address.
"""
return self._find_correlations(ip)
def set_graph_manager(self, graph_manager: GraphManager):
"""
Set the graph manager for the provider to use.
"""
self.graph = graph_manager
def _find_correlations(self, node_id: str) -> ProviderResult:
"""
Find correlations for a given node.
FIXED: Added safety checks to prevent issues when graph is None.
"""
result = ProviderResult()
# FIXED: Ensure self.graph is not None before proceeding
if not self.graph or not self.graph.graph.has_node(node_id):
return result
try:
node_attributes = self.graph.graph.nodes[node_id].get('attributes', [])
except Exception as e:
# If there's any issue accessing the graph, return empty result
print(f"Warning: Could not access graph for correlation analysis: {e}")
return result
for attr in node_attributes:
attr_name = attr.get('name')
attr_value = attr.get('value')
attr_provider = attr.get('provider', 'unknown')
should_exclude = (
any(excluded_key in attr_name or attr_name == excluded_key for excluded_key in self.EXCLUDED_KEYS) or
not isinstance(attr_value, (str, int, float, bool)) or
attr_value is None or
isinstance(attr_value, bool) or
(isinstance(attr_value, str) and (
len(attr_value) < 4 or
self.date_pattern.match(attr_value) or
attr_value.lower() in ['unknown', 'none', 'null', 'n/a', 'true', 'false', '0', '1']
)) or
(isinstance(attr_value, (int, float)) and (
attr_value == 0 or
attr_value == 1 or
abs(attr_value) > 1000000
))
)
if should_exclude:
continue
if attr_value not in self.correlation_index:
self.correlation_index[attr_value] = {
'nodes': set(),
'sources': []
}
self.correlation_index[attr_value]['nodes'].add(node_id)
source_info = {
'node_id': node_id,
'provider': attr_provider,
'attribute': attr_name,
'path': f"{attr_provider}_{attr_name}"
}
existing_sources = [s for s in self.correlation_index[attr_value]['sources']
if s['node_id'] == node_id and s['path'] == source_info['path']]
if not existing_sources:
self.correlation_index[attr_value]['sources'].append(source_info)
if len(self.correlation_index[attr_value]['nodes']) > 1:
self._create_correlation_relationships(attr_value, self.correlation_index[attr_value], result)
return result
def _create_correlation_relationships(self, value: Any, correlation_data: Dict[str, Any], result: ProviderResult):
"""
Create correlation relationships and add them to the provider result.
"""
correlation_node_id = f"corr_{hash(str(value)) & 0x7FFFFFFF}"
nodes = correlation_data['nodes']
sources = correlation_data['sources']
# Add the correlation node as an attribute to the result
result.add_attribute(
target_node=correlation_node_id,
name="correlation_value",
value=value,
attr_type=str(type(value)),
provider=self.name,
confidence=0.9,
metadata={
'correlated_nodes': list(nodes),
'sources': sources,
}
)
for source in sources:
node_id = source['node_id']
provider = source['provider']
attribute = source['attribute']
relationship_label = f"corr_{provider}_{attribute}"
# Add the relationship to the result
result.add_relationship(
source_node=node_id,
target_node=correlation_node_id,
relationship_type=relationship_label,
provider=self.name,
confidence=0.9,
raw_data={
'correlation_value': value,
'original_attribute': attribute,
'correlation_type': 'attribute_matching'
}
)

View File

@@ -1,27 +1,27 @@
"""
Certificate Transparency provider using crt.sh.
Discovers domain relationships through certificate SAN analysis with comprehensive certificate tracking.
Stores certificates as metadata on domain nodes rather than creating certificate nodes.
"""
# dnsrecon/providers/crtsh_provider.py
import json
import re
from typing import List, Dict, Any, Tuple, Set
from pathlib import Path
from typing import List, Dict, Any, Set, Optional
from urllib.parse import quote
from datetime import datetime, timezone
import requests
from .base_provider import BaseProvider
from core.provider_result import ProviderResult
from utils.helpers import _is_valid_domain
from core.logger import get_forensic_logger
class CrtShProvider(BaseProvider):
"""
Provider for querying crt.sh certificate transparency database.
Now uses session-specific configuration and caching.
FIXED: Now properly creates domain and CA nodes instead of large entities.
Returns standardized ProviderResult objects with caching support.
"""
def __init__(self, session_config=None):
def __init__(self, name=None, session_config=None):
"""Initialize CrtSh provider with session-specific configuration."""
super().__init__(
name="crtsh",
@@ -32,6 +32,13 @@ class CrtShProvider(BaseProvider):
self.base_url = "https://crt.sh/"
self._stop_event = None
# Initialize cache directory (separate from BaseProvider's HTTP cache)
self.domain_cache_dir = Path('cache') / 'crtsh'
self.domain_cache_dir.mkdir(parents=True, exist_ok=True)
# Compile regex for date filtering for efficiency
self.date_pattern = re.compile(r'^\d{4}-\d{2}-\d{2}[ T]\d{2}:\d{2}:\d{2}')
def get_name(self) -> str:
"""Return the provider name."""
return "crtsh"
@@ -49,100 +56,361 @@ class CrtShProvider(BaseProvider):
return {'domains': True, 'ips': False}
def is_available(self) -> bool:
"""
Check if the provider is configured to be used.
This method is intentionally simple and does not perform a network request
to avoid blocking application startup.
"""
"""Check if the provider is configured to be used."""
return True
def _parse_certificate_date(self, date_string: str) -> datetime:
"""
Parse certificate date from crt.sh format.
def _get_cache_file_path(self, domain: str) -> Path:
"""Generate cache file path for a domain."""
safe_domain = domain.replace('.', '_').replace('/', '_').replace('\\', '_')
return self.domain_cache_dir / f"{safe_domain}.json"
Args:
date_string: Date string from crt.sh API
Returns:
Parsed datetime object in UTC
def _get_cache_status(self, cache_file_path: Path) -> str:
"""
if not date_string:
raise ValueError("Empty date string")
Check cache status for a domain.
Returns: 'not_found', 'fresh', or 'stale'
"""
if not cache_file_path.exists():
return "not_found"
try:
# Handle various possible formats from crt.sh
if date_string.endswith('Z'):
return datetime.fromisoformat(date_string[:-1]).replace(tzinfo=timezone.utc)
elif '+' in date_string or date_string.endswith('UTC'):
# Handle timezone-aware strings
date_string = date_string.replace('UTC', '').strip()
if '+' in date_string:
date_string = date_string.split('+')[0]
return datetime.fromisoformat(date_string).replace(tzinfo=timezone.utc)
with open(cache_file_path, 'r') as f:
cache_data = json.load(f)
last_query_str = cache_data.get("last_upstream_query")
if not last_query_str:
return "stale"
last_query = datetime.fromisoformat(last_query_str.replace('Z', '+00:00'))
hours_since_query = (datetime.now(timezone.utc) - last_query).total_seconds() / 3600
cache_timeout = self.config.cache_timeout_hours
if hours_since_query < cache_timeout:
return "fresh"
else:
# Assume UTC if no timezone specified
return datetime.fromisoformat(date_string).replace(tzinfo=timezone.utc)
except Exception as e:
# Fallback: try parsing without timezone info and assume UTC
try:
return datetime.strptime(date_string[:19], "%Y-%m-%dT%H:%M:%S").replace(tzinfo=timezone.utc)
except Exception:
raise ValueError(f"Unable to parse date: {date_string}") from e
return "stale"
def _is_cert_valid(self, cert_data: Dict[str, Any]) -> bool:
except (json.JSONDecodeError, ValueError, KeyError) as e:
self.logger.logger.warning(f"Invalid cache file format for {cache_file_path}: {e}")
return "stale"
def query_domain(self, domain: str) -> ProviderResult:
"""
Check if a certificate is currently valid based on its expiry date.
FIXED: Query crt.sh for certificates containing the domain.
Now properly creates domain and CA nodes instead of large entities.
Args:
cert_data: Certificate data from crt.sh
domain: Domain to investigate
Returns:
True if certificate is currently valid (not expired)
ProviderResult containing discovered relationships and attributes
"""
if not _is_valid_domain(domain):
return ProviderResult()
if self._stop_event and self._stop_event.is_set():
return ProviderResult()
cache_file = self._get_cache_file_path(domain)
cache_status = self._get_cache_status(cache_file)
result = ProviderResult()
if cache_status == "fresh":
result = self._load_from_cache(cache_file)
self.logger.logger.info(f"Using fresh cached crt.sh data for {domain}")
else: # "stale" or "not_found"
# Query the API for the latest certificates
new_raw_certs = self._query_crtsh_api(domain)
if self._stop_event and self._stop_event.is_set():
return ProviderResult()
# Combine with old data if cache is stale
if cache_status == "stale":
old_raw_certs = self._load_raw_data_from_cache(cache_file)
combined_certs = old_raw_certs + new_raw_certs
# Deduplicate the combined list
seen_ids = set()
unique_certs = []
for cert in combined_certs:
cert_id = cert.get('id')
if cert_id not in seen_ids:
unique_certs.append(cert)
seen_ids.add(cert_id)
raw_certificates_to_process = unique_certs
self.logger.logger.info(f"Refreshed and merged cache for {domain}. Total unique certs: {len(raw_certificates_to_process)}")
else: # "not_found"
raw_certificates_to_process = new_raw_certs
# FIXED: Process certificates to create proper domain and CA nodes
result = self._process_certificates_to_result_fixed(domain, raw_certificates_to_process)
self.logger.logger.info(f"Created fresh result for {domain} ({result.get_relationship_count()} relationships)")
# Save the new result and the raw data to the cache
self._save_result_to_cache(cache_file, result, raw_certificates_to_process, domain)
return result
def query_ip(self, ip: str) -> ProviderResult:
"""
Query crt.sh for certificates containing the IP address.
Note: crt.sh doesn't typically index by IP, so this returns empty results.
Args:
ip: IP address to investigate
Returns:
Empty ProviderResult (crt.sh doesn't support IP-based certificate queries effectively)
"""
return ProviderResult()
def _load_from_cache(self, cache_file_path: Path) -> ProviderResult:
"""Load processed crt.sh data from a cache file."""
try:
not_after_str = cert_data.get('not_after')
if not not_after_str:
return False
with open(cache_file_path, 'r') as f:
cache_content = json.load(f)
not_after_date = self._parse_certificate_date(not_after_str)
not_before_str = cert_data.get('not_before')
result = ProviderResult()
now = datetime.now(timezone.utc)
# Reconstruct relationships
for rel_data in cache_content.get("relationships", []):
result.add_relationship(
source_node=rel_data["source_node"],
target_node=rel_data["target_node"],
relationship_type=rel_data["relationship_type"],
provider=rel_data["provider"],
confidence=rel_data["confidence"],
raw_data=rel_data.get("raw_data", {})
)
# Check if certificate is within valid date range
is_not_expired = not_after_date > now
# Reconstruct attributes
for attr_data in cache_content.get("attributes", []):
result.add_attribute(
target_node=attr_data["target_node"],
name=attr_data["name"],
value=attr_data["value"],
attr_type=attr_data["type"],
provider=attr_data["provider"],
confidence=attr_data["confidence"],
metadata=attr_data.get("metadata", {})
)
if not_before_str:
not_before_date = self._parse_certificate_date(not_before_str)
is_not_before_valid = not_before_date <= now
return is_not_expired and is_not_before_valid
return result
return is_not_expired
except (json.JSONDecodeError, FileNotFoundError, KeyError) as e:
self.logger.logger.error(f"Failed to load cached certificates from {cache_file_path}: {e}")
return ProviderResult()
def _load_raw_data_from_cache(self, cache_file_path: Path) -> List[Dict[str, Any]]:
"""Load only the raw certificate data from a cache file."""
try:
with open(cache_file_path, 'r') as f:
cache_content = json.load(f)
return cache_content.get("raw_certificates", [])
except (json.JSONDecodeError, FileNotFoundError):
return []
def _save_result_to_cache(self, cache_file_path: Path, result: ProviderResult, raw_certificates: List[Dict[str, Any]], domain: str) -> None:
"""Save processed crt.sh result and raw data to a cache file."""
try:
cache_data = {
"domain": domain,
"last_upstream_query": datetime.now(timezone.utc).isoformat(),
"raw_certificates": raw_certificates, # Store the raw data for deduplication
"relationships": [
{
"source_node": rel.source_node,
"target_node": rel.target_node,
"relationship_type": rel.relationship_type,
"confidence": rel.confidence,
"provider": rel.provider,
"raw_data": rel.raw_data
} for rel in result.relationships
],
"attributes": [
{
"target_node": attr.target_node,
"name": attr.name,
"value": attr.value,
"type": attr.type,
"provider": attr.provider,
"confidence": attr.confidence,
"metadata": attr.metadata
} for attr in result.attributes
]
}
cache_file_path.parent.mkdir(parents=True, exist_ok=True)
with open(cache_file_path, 'w') as f:
json.dump(cache_data, f, separators=(',', ':'), default=str)
except Exception as e:
self.logger.logger.debug(f"Certificate validity check failed: {e}")
self.logger.logger.warning(f"Failed to save cache file for {domain}: {e}")
def _query_crtsh_api(self, domain: str) -> List[Dict[str, Any]]:
"""Query crt.sh API for raw certificate data."""
url = f"{self.base_url}?q={quote(domain)}&output=json"
response = self.make_request(url, target_indicator=domain)
if not response or response.status_code != 200:
raise requests.exceptions.RequestException(f"crt.sh API returned status {response.status_code if response else 'None'}")
try:
certificates = response.json()
except json.JSONDecodeError:
self.logger.logger.error(f"crt.sh returned invalid JSON for {domain}")
return []
if not certificates:
return []
return certificates
def _process_certificates_to_result_fixed(self, query_domain: str, certificates: List[Dict[str, Any]]) -> ProviderResult:
"""
FIXED: Process certificates to create proper domain and CA nodes.
Now creates individual domain nodes instead of large entities.
"""
result = ProviderResult()
if self._stop_event and self._stop_event.is_set():
self.logger.logger.info(f"CrtSh processing cancelled before processing for domain: {query_domain}")
return result
incompleteness_warning = self._check_for_incomplete_data(query_domain, certificates)
if incompleteness_warning:
result.add_attribute(
target_node=query_domain,
name="crtsh_data_warning",
value=incompleteness_warning,
attr_type='metadata',
provider=self.name,
confidence=1.0
)
all_discovered_domains = set()
processed_issuers = set()
for i, cert_data in enumerate(certificates):
if i % 10 == 0 and self._stop_event and self._stop_event.is_set():
self.logger.logger.info(f"CrtSh processing cancelled at certificate {i} for domain: {query_domain}")
break
# Extract all domains from this certificate
cert_domains = self._extract_domains_from_certificate(cert_data)
all_discovered_domains.update(cert_domains)
# FIXED: Create CA nodes for certificate issuers (not as domain metadata)
issuer_name = self._parse_issuer_organization(cert_data.get('issuer_name', ''))
if issuer_name and issuer_name not in processed_issuers:
# Create relationship from query domain to CA
result.add_relationship(
source_node=query_domain,
target_node=issuer_name,
relationship_type='crtsh_cert_issuer',
provider=self.name,
confidence=0.95,
raw_data={'issuer_dn': cert_data.get('issuer_name', '')}
)
processed_issuers.add(issuer_name)
# Add certificate metadata to each domain in this certificate
cert_metadata = self._extract_certificate_metadata(cert_data)
for cert_domain in cert_domains:
if not _is_valid_domain(cert_domain):
continue
# Add certificate attributes to the domain
for key, value in cert_metadata.items():
if value is not None:
result.add_attribute(
target_node=cert_domain,
name=f"cert_{key}",
value=value,
attr_type='certificate_data',
provider=self.name,
confidence=0.9,
metadata={'certificate_id': cert_data.get('id')}
)
if self._stop_event and self._stop_event.is_set():
self.logger.logger.info(f"CrtSh query cancelled before relationship creation for domain: {query_domain}")
return result
# FIXED: Create selective relationships to avoid large entities
# Only create relationships to domains that are closely related
for discovered_domain in all_discovered_domains:
if discovered_domain == query_domain:
continue
if not _is_valid_domain(discovered_domain):
continue
# FIXED: Only create relationships for domains that share a meaningful connection
# This prevents creating too many relationships that trigger large entity creation
if self._should_create_relationship(query_domain, discovered_domain):
confidence = self._calculate_domain_relationship_confidence(
query_domain, discovered_domain, [], all_discovered_domains
)
result.add_relationship(
source_node=query_domain,
target_node=discovered_domain,
relationship_type='crtsh_san_certificate',
provider=self.name,
confidence=confidence,
raw_data={'relationship_type': 'certificate_discovery'}
)
self.log_relationship_discovery(
source_node=query_domain,
target_node=discovered_domain,
relationship_type='crtsh_san_certificate',
confidence_score=confidence,
raw_data={'relationship_type': 'certificate_discovery'},
discovery_method="certificate_transparency_analysis"
)
self.logger.logger.info(f"CrtSh processing completed for {query_domain}: {len(all_discovered_domains)} domains, {result.get_relationship_count()} relationships")
return result
def _should_create_relationship(self, source_domain: str, target_domain: str) -> bool:
"""
FIXED: Determine if a relationship should be created between two domains.
This helps avoid creating too many relationships that trigger large entity creation.
"""
# Always create relationships for subdomains
if target_domain.endswith(f'.{source_domain}') or source_domain.endswith(f'.{target_domain}'):
return True
# Create relationships for domains that share a common parent (up to 2 levels)
source_parts = source_domain.split('.')
target_parts = target_domain.split('.')
# Check if they share the same root domain (last 2 parts)
if len(source_parts) >= 2 and len(target_parts) >= 2:
source_root = '.'.join(source_parts[-2:])
target_root = '.'.join(target_parts[-2:])
return source_root == target_root
return False
def _extract_certificate_metadata(self, cert_data: Dict[str, Any]) -> Dict[str, Any]:
"""
Extract comprehensive metadata from certificate data.
"""Extract comprehensive metadata from certificate data."""
raw_issuer_name = cert_data.get('issuer_name', '')
parsed_issuer_name = self._parse_issuer_organization(raw_issuer_name)
Args:
cert_data: Raw certificate data from crt.sh
Returns:
Comprehensive certificate metadata dictionary
"""
metadata = {
'certificate_id': cert_data.get('id'),
'serial_number': cert_data.get('serial_number'),
'issuer_name': cert_data.get('issuer_name'),
'issuer_name': parsed_issuer_name,
'issuer_ca_id': cert_data.get('issuer_ca_id'),
'common_name': cert_data.get('common_name'),
'not_before': cert_data.get('not_before'),
'not_after': cert_data.get('not_after'),
'entry_timestamp': cert_data.get('entry_timestamp'),
'source': 'crt.sh'
'source': 'crtsh'
}
try:
@@ -154,9 +422,9 @@ class CrtShProvider(BaseProvider):
metadata['is_currently_valid'] = self._is_cert_valid(cert_data)
metadata['expires_soon'] = (not_after - datetime.now(timezone.utc)).days <= 30
# Add human-readable dates
metadata['not_before'] = not_before.strftime('%Y-%m-%d %H:%M:%S UTC')
metadata['not_after'] = not_after.strftime('%Y-%m-%d %H:%M:%S UTC')
# Keep raw date format or convert to standard format
metadata['not_before'] = not_before.isoformat()
metadata['not_after'] = not_after.isoformat()
except Exception as e:
self.logger.logger.debug(f"Error computing certificate metadata: {e}")
@@ -165,328 +433,75 @@ class CrtShProvider(BaseProvider):
return metadata
def query_domain(self, domain: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
"""
Query crt.sh for certificates containing the domain.
"""
if not _is_valid_domain(domain):
return []
# Check for cancellation before starting
if self._stop_event and self._stop_event.is_set():
print(f"CrtSh query cancelled before start for domain: {domain}")
return []
relationships = []
def _parse_issuer_organization(self, issuer_dn: str) -> str:
"""Parse the issuer Distinguished Name to extract just the organization name."""
if not issuer_dn:
return issuer_dn
try:
# Query crt.sh for certificates
url = f"{self.base_url}?q={quote(domain)}&output=json"
response = self.make_request(url, target_indicator=domain, max_retries=3)
components = [comp.strip() for comp in issuer_dn.split(',')]
if not response or response.status_code != 200:
return []
for component in components:
if component.startswith('O='):
org_name = component[2:].strip()
if org_name.startswith('"') and org_name.endswith('"'):
org_name = org_name[1:-1]
return org_name
# Check for cancellation after request
if self._stop_event and self._stop_event.is_set():
print(f"CrtSh query cancelled after request for domain: {domain}")
return []
return issuer_dn
certificates = response.json()
except Exception as e:
self.logger.logger.debug(f"Failed to parse issuer DN '{issuer_dn}': {e}")
return issuer_dn
if not certificates:
return []
def _parse_certificate_date(self, date_string: str) -> datetime:
"""Parse certificate date from crt.sh format."""
if not date_string:
raise ValueError("Empty date string")
# Check for cancellation before processing
if self._stop_event and self._stop_event.is_set():
print(f"CrtSh query cancelled before processing for domain: {domain}")
return []
# Aggregate certificate data by domain
domain_certificates = {}
all_discovered_domains = set()
# Process certificates with cancellation checking
for i, cert_data in enumerate(certificates):
# Check for cancellation every 5 certificates instead of 10 for faster response
if i % 5 == 0 and self._stop_event and self._stop_event.is_set():
print(f"CrtSh processing cancelled at certificate {i} for domain: {domain}")
break
cert_metadata = self._extract_certificate_metadata(cert_data)
cert_domains = self._extract_domains_from_certificate(cert_data)
# Add all domains from this certificate to our tracking
for cert_domain in cert_domains:
# Additional stop check during domain processing
if i % 20 == 0 and self._stop_event and self._stop_event.is_set():
print(f"CrtSh domain processing cancelled for domain: {domain}")
break
if not _is_valid_domain(cert_domain):
continue
all_discovered_domains.add(cert_domain)
# Initialize domain certificate list if needed
if cert_domain not in domain_certificates:
domain_certificates[cert_domain] = []
# Add this certificate to the domain's certificate list
domain_certificates[cert_domain].append(cert_metadata)
# Final cancellation check before creating relationships
if self._stop_event and self._stop_event.is_set():
print(f"CrtSh query cancelled before relationship creation for domain: {domain}")
return []
# Create relationships from query domain to ALL discovered domains with stop checking
for i, discovered_domain in enumerate(all_discovered_domains):
if discovered_domain == domain:
continue # Skip self-relationships
# Check for cancellation every 10 relationships
if i % 10 == 0 and self._stop_event and self._stop_event.is_set():
print(f"CrtSh relationship creation cancelled for domain: {domain}")
break
if not _is_valid_domain(discovered_domain):
continue
# Get certificates for both domains
query_domain_certs = domain_certificates.get(domain, [])
discovered_domain_certs = domain_certificates.get(discovered_domain, [])
# Find shared certificates (for metadata purposes)
shared_certificates = self._find_shared_certificates(query_domain_certs, discovered_domain_certs)
# Calculate confidence based on relationship type and shared certificates
confidence = self._calculate_domain_relationship_confidence(
domain, discovered_domain, shared_certificates, all_discovered_domains
)
# Create comprehensive raw data for the relationship
relationship_raw_data = {
'relationship_type': 'certificate_discovery',
'shared_certificates': shared_certificates,
'total_shared_certs': len(shared_certificates),
'discovery_context': self._determine_relationship_context(discovered_domain, domain),
'domain_certificates': {
domain: self._summarize_certificates(query_domain_certs),
discovered_domain: self._summarize_certificates(discovered_domain_certs)
}
}
# Create domain -> domain relationship
relationships.append((
domain,
discovered_domain,
'san_certificate',
confidence,
relationship_raw_data
))
# Log the relationship discovery
self.log_relationship_discovery(
source_node=domain,
target_node=discovered_domain,
relationship_type='san_certificate',
confidence_score=confidence,
raw_data=relationship_raw_data,
discovery_method="certificate_transparency_analysis"
)
except json.JSONDecodeError as e:
self.logger.logger.error(f"Failed to parse JSON response from crt.sh: {e}")
except requests.exceptions.RequestException as e:
self.logger.logger.error(f"HTTP request to crt.sh failed: {e}")
return relationships
def _find_shared_certificates(self, certs1: List[Dict[str, Any]], certs2: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Find certificates that are shared between two domain certificate lists.
Args:
certs1: First domain's certificates
certs2: Second domain's certificates
Returns:
List of shared certificate metadata
"""
shared = []
# Create a set of certificate IDs from the first list for quick lookup
cert1_ids = {cert.get('certificate_id') for cert in certs1 if cert.get('certificate_id')}
# Find certificates in the second list that match
for cert in certs2:
if cert.get('certificate_id') in cert1_ids:
shared.append(cert)
return shared
def _summarize_certificates(self, certificates: List[Dict[str, Any]]) -> Dict[str, Any]:
"""
Create a summary of certificates for a domain.
Args:
certificates: List of certificate metadata
Returns:
Summary dictionary with aggregate statistics
"""
if not certificates:
return {
'total_certificates': 0,
'valid_certificates': 0,
'expired_certificates': 0,
'expires_soon_count': 0,
'unique_issuers': [],
'latest_certificate': None,
'has_valid_cert': False
}
valid_count = sum(1 for cert in certificates if cert.get('is_currently_valid'))
expired_count = len(certificates) - valid_count
expires_soon_count = sum(1 for cert in certificates if cert.get('expires_soon'))
# Get unique issuers
unique_issuers = list(set(cert.get('issuer_name') for cert in certificates if cert.get('issuer_name')))
# Find the most recent certificate
latest_cert = None
latest_date = None
for cert in certificates:
try:
if cert.get('not_before'):
cert_date = self._parse_certificate_date(cert['not_before'])
if latest_date is None or cert_date > latest_date:
latest_date = cert_date
latest_cert = cert
if isinstance(date_string, datetime):
return date_string.replace(tzinfo=timezone.utc)
if date_string.endswith('Z'):
return datetime.fromisoformat(date_string[:-1]).replace(tzinfo=timezone.utc)
elif '+' in date_string or date_string.endswith('UTC'):
date_string = date_string.replace('UTC', '').strip()
if '+' in date_string:
date_string = date_string.split('+')[0]
return datetime.fromisoformat(date_string).replace(tzinfo=timezone.utc)
else:
return datetime.fromisoformat(date_string).replace(tzinfo=timezone.utc)
except Exception as e:
try:
return datetime.strptime(date_string[:19], "%Y-%m-%dT%H:%M:%S").replace(tzinfo=timezone.utc)
except Exception:
continue
raise ValueError(f"Unable to parse date: {date_string}") from e
return {
'total_certificates': len(certificates),
'valid_certificates': valid_count,
'expired_certificates': expired_count,
'expires_soon_count': expires_soon_count,
'unique_issuers': unique_issuers,
'latest_certificate': latest_cert,
'has_valid_cert': valid_count > 0,
'certificate_details': certificates # Full details for forensic analysis
}
def _is_cert_valid(self, cert_data: Dict[str, Any]) -> bool:
"""Check if a certificate is currently valid based on its expiry date."""
try:
not_after_str = cert_data.get('not_after')
if not not_after_str:
return False
def _calculate_domain_relationship_confidence(self, domain1: str, domain2: str,
shared_certificates: List[Dict[str, Any]],
all_discovered_domains: Set[str]) -> float:
"""
Calculate confidence score for domain relationship based on various factors.
not_after_date = self._parse_certificate_date(not_after_str)
not_before_str = cert_data.get('not_before')
Args:
domain1: Source domain (query domain)
domain2: Target domain (discovered domain)
shared_certificates: List of shared certificate metadata
all_discovered_domains: All domains discovered in this query
now = datetime.now(timezone.utc)
is_not_expired = not_after_date > now
Returns:
Confidence score between 0.0 and 1.0
"""
base_confidence = 0.9
if not_before_str:
not_before_date = self._parse_certificate_date(not_before_str)
is_not_before_valid = not_before_date <= now
return is_not_expired and is_not_before_valid
# Adjust confidence based on domain relationship context
relationship_context = self._determine_relationship_context(domain2, domain1)
return is_not_expired
if relationship_context == 'exact_match':
context_bonus = 0.0 # This shouldn't happen, but just in case
elif relationship_context == 'subdomain':
context_bonus = 0.1 # High confidence for subdomains
elif relationship_context == 'parent_domain':
context_bonus = 0.05 # Medium confidence for parent domains
else:
context_bonus = 0.0 # Related domains get base confidence
# Adjust confidence based on shared certificates
if shared_certificates:
shared_count = len(shared_certificates)
if shared_count >= 3:
shared_bonus = 0.1
elif shared_count >= 2:
shared_bonus = 0.05
else:
shared_bonus = 0.02
# Additional bonus for valid shared certificates
valid_shared = sum(1 for cert in shared_certificates if cert.get('is_currently_valid'))
if valid_shared > 0:
validity_bonus = 0.05
else:
validity_bonus = 0.0
else:
# Even without shared certificates, domains found in the same query have some relationship
shared_bonus = 0.0
validity_bonus = 0.0
# Adjust confidence based on certificate issuer reputation (if shared certificates exist)
issuer_bonus = 0.0
if shared_certificates:
for cert in shared_certificates:
issuer = cert.get('issuer_name', '').lower()
if any(trusted_ca in issuer for trusted_ca in ['let\'s encrypt', 'digicert', 'sectigo', 'globalsign']):
issuer_bonus = max(issuer_bonus, 0.03)
break
# Calculate final confidence
final_confidence = base_confidence + context_bonus + shared_bonus + validity_bonus + issuer_bonus
return max(0.1, min(1.0, final_confidence)) # Clamp between 0.1 and 1.0
def _determine_relationship_context(self, cert_domain: str, query_domain: str) -> str:
"""
Determine the context of the relationship between certificate domain and query domain.
Args:
cert_domain: Domain found in certificate
query_domain: Original query domain
Returns:
String describing the relationship context
"""
if cert_domain == query_domain:
return 'exact_match'
elif cert_domain.endswith(f'.{query_domain}'):
return 'subdomain'
elif query_domain.endswith(f'.{cert_domain}'):
return 'parent_domain'
else:
return 'related_domain'
def query_ip(self, ip: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
"""
Query crt.sh for certificates containing the IP address.
Note: crt.sh doesn't typically index by IP, so this returns empty results.
Args:
ip: IP address to investigate
Returns:
Empty list (crt.sh doesn't support IP-based certificate queries effectively)
"""
# crt.sh doesn't effectively support IP-based certificate queries
return []
except Exception as e:
return False
def _extract_domains_from_certificate(self, cert_data: Dict[str, Any]) -> Set[str]:
"""
Extract all domains from certificate data.
Args:
cert_data: Certificate data from crt.sh API
Returns:
Set of unique domain names found in the certificate
"""
"""Extract all domains from certificate data."""
domains = set()
# Extract from common name
@@ -499,7 +514,6 @@ class CrtShProvider(BaseProvider):
# Extract from name_value field (contains SANs)
name_value = cert_data.get('name_value', '')
if name_value:
# Split by newlines and clean each domain
for line in name_value.split('\n'):
cleaned_domains = self._clean_domain_name(line.strip())
if cleaned_domains:
@@ -508,37 +522,28 @@ class CrtShProvider(BaseProvider):
return domains
def _clean_domain_name(self, domain_name: str) -> List[str]:
"""
Clean and normalize domain name from certificate data.
Now returns a list to handle wildcards correctly.
"""
"""Clean and normalize domain name from certificate data."""
if not domain_name:
return []
domain = domain_name.strip().lower()
# Remove protocol if present
if domain.startswith(('http://', 'https://')):
domain = domain.split('://', 1)[1]
# Remove path if present
if '/' in domain:
domain = domain.split('/', 1)[0]
# Remove port if present
if ':' in domain and not domain.count(':') > 1: # Avoid breaking IPv6
if ':' in domain and not domain.count(':') > 1:
domain = domain.split(':', 1)[0]
# Handle wildcard domains
cleaned_domains = []
if domain.startswith('*.'):
# Add both the wildcard and the base domain
cleaned_domains.append(domain)
cleaned_domains.append(domain[2:])
else:
cleaned_domains.append(domain)
# Remove any remaining invalid characters and validate
final_domains = []
for d in cleaned_domains:
d = re.sub(r'[^\w\-\.]', '', d)
@@ -546,3 +551,61 @@ class CrtShProvider(BaseProvider):
final_domains.append(d)
return [d for d in final_domains if _is_valid_domain(d)]
def _calculate_domain_relationship_confidence(self, domain1: str, domain2: str,
shared_certificates: List[Dict[str, Any]],
all_discovered_domains: Set[str]) -> float:
"""Calculate confidence score for domain relationship based on various factors."""
base_confidence = 0.9
# Adjust confidence based on domain relationship context
relationship_context = self._determine_relationship_context(domain2, domain1)
if relationship_context == 'exact_match':
context_bonus = 0.0
elif relationship_context == 'subdomain':
context_bonus = 0.1
elif relationship_context == 'parent_domain':
context_bonus = 0.05
else:
context_bonus = 0.0
final_confidence = base_confidence + context_bonus
return max(0.1, min(1.0, final_confidence))
def _determine_relationship_context(self, cert_domain: str, query_domain: str) -> str:
"""Determine the context of the relationship between certificate domain and query domain."""
if cert_domain == query_domain:
return 'exact_match'
elif cert_domain.endswith(f'.{query_domain}'):
return 'subdomain'
elif query_domain.endswith(f'.{cert_domain}'):
return 'parent_domain'
else:
return 'related_domain'
def _check_for_incomplete_data(self, domain: str, certificates: List[Dict[str, Any]]) -> Optional[str]:
"""
Analyzes the certificate list to heuristically detect if the data from crt.sh is incomplete.
"""
cert_count = len(certificates)
# Heuristic 1: Check if the number of certs hits a known hard limit.
if cert_count >= 10000:
return f"Result likely truncated; received {cert_count} certificates, which may be the maximum limit."
# Heuristic 2: Check if all returned certificates are old.
if cert_count > 1000: # Only apply this for a reasonable number of certs
latest_expiry = None
for cert in certificates:
try:
not_after = self._parse_certificate_date(cert.get('not_after'))
if latest_expiry is None or not_after > latest_expiry:
latest_expiry = not_after
except (ValueError, TypeError):
continue
if latest_expiry and (datetime.now(timezone.utc) - latest_expiry).days > 365:
return f"Incomplete data suspected: The latest certificate expired more than a year ago ({latest_expiry.strftime('%Y-%m-%d')})."
return None

View File

@@ -1,19 +1,20 @@
# dnsrecon/providers/dns_provider.py
import dns.resolver
import dns.reversename
from typing import List, Dict, Any, Tuple
from dns import resolver, reversename
from typing import Dict
from .base_provider import BaseProvider
from utils.helpers import _is_valid_ip, _is_valid_domain
from core.provider_result import ProviderResult
from utils.helpers import _is_valid_ip, _is_valid_domain, get_ip_version
class DNSProvider(BaseProvider):
"""
Provider for standard DNS resolution and reverse DNS lookups.
Now uses session-specific configuration.
Now returns standardized ProviderResult objects with IPv4 and IPv6 support.
FIXED: Enhanced pickle support to prevent resolver serialization issues.
"""
def __init__(self, session_config=None):
def __init__(self, name=None, session_config=None):
"""Initialize DNS provider with session-specific configuration."""
super().__init__(
name="dns",
@@ -23,10 +24,25 @@ class DNSProvider(BaseProvider):
)
# Configure DNS resolver
self.resolver = dns.resolver.Resolver()
self.resolver = resolver.Resolver()
self.resolver.timeout = 5
self.resolver.lifetime = 10
def __getstate__(self):
"""Prepare the object for pickling by excluding resolver."""
state = super().__getstate__()
# Remove the unpickleable 'resolver' attribute
if 'resolver' in state:
del state['resolver']
return state
def __setstate__(self, state):
"""Restore the object after unpickling by reconstructing resolver."""
super().__setstate__(state)
# Re-initialize the 'resolver' attribute
self.resolver = resolver.Resolver()
self.resolver.timeout = 5
self.resolver.lifetime = 10
#self.resolver.nameservers = ['127.0.0.1']
def get_name(self) -> str:
"""Return the provider name."""
@@ -48,97 +64,149 @@ class DNSProvider(BaseProvider):
"""DNS is always available - no API key required."""
return True
def query_domain(self, domain: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
def query_domain(self, domain: str) -> ProviderResult:
"""
Query DNS records for the domain to discover relationships.
Query DNS records for the domain to discover relationships and attributes.
FIXED: Now creates separate attributes for each DNS record type.
Args:
domain: Domain to investigate
Returns:
List of relationships discovered from DNS analysis
ProviderResult containing discovered relationships and attributes
"""
if not _is_valid_domain(domain):
return []
return ProviderResult()
relationships = []
result = ProviderResult()
# Query all record types
# Query all record types - each gets its own attribute
for record_type in ['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SOA', 'TXT', 'SRV', 'CAA']:
relationships.extend(self._query_record(domain, record_type))
try:
self._query_record(domain, record_type, result)
#except resolver.NoAnswer:
# This is not an error, just a confirmation that the record doesn't exist.
#self.logger.logger.debug(f"No {record_type} record found for {domain}")
except Exception as e:
self.failed_requests += 1
self.logger.logger.debug(f"{record_type} record query failed for {domain}: {e}")
return relationships
return result
def query_ip(self, ip: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
def query_ip(self, ip: str) -> ProviderResult:
"""
Query reverse DNS for the IP address.
Query reverse DNS for the IP address (supports both IPv4 and IPv6).
Args:
ip: IP address to investigate
ip: IP address to investigate (IPv4 or IPv6)
Returns:
List of relationships discovered from reverse DNS
ProviderResult containing discovered relationships and attributes
"""
if not _is_valid_ip(ip):
return []
return ProviderResult()
relationships = []
result = ProviderResult()
ip_version = get_ip_version(ip)
try:
# Perform reverse DNS lookup
# Perform reverse DNS lookup (works for both IPv4 and IPv6)
self.total_requests += 1
reverse_name = dns.reversename.from_address(ip)
reverse_name = reversename.from_address(ip)
response = self.resolver.resolve(reverse_name, 'PTR')
self.successful_requests += 1
ptr_records = []
for ptr_record in response:
hostname = str(ptr_record).rstrip('.')
if _is_valid_domain(hostname):
raw_data = {
# Determine appropriate forward relationship type based on IP version
if ip_version == 6:
relationship_type = 'shodan_aaaa_record'
record_prefix = 'AAAA'
else:
relationship_type = 'shodan_a_record'
record_prefix = 'A'
# Add the relationship
result.add_relationship(
source_node=ip,
target_node=hostname,
relationship_type='dns_ptr_record',
provider=self.name,
confidence=0.8,
raw_data={
'query_type': 'PTR',
'ip_address': ip,
'ip_version': ip_version,
'hostname': hostname,
'ttl': response.ttl
}
)
relationships.append((
ip,
hostname,
'ptr_record',
0.8,
raw_data
))
# Add to PTR records list
ptr_records.append(f"PTR: {hostname}")
# Log the relationship discovery
self.log_relationship_discovery(
source_node=ip,
target_node=hostname,
relationship_type='ptr_record',
relationship_type='dns_ptr_record',
confidence_score=0.8,
raw_data=raw_data,
discovery_method="reverse_dns_lookup"
raw_data={
'query_type': 'PTR',
'ip_address': ip,
'ip_version': ip_version,
'hostname': hostname,
'ttl': response.ttl
},
discovery_method=f"reverse_dns_lookup_ipv{ip_version}"
)
# Add PTR records as separate attribute
if ptr_records:
result.add_attribute(
target_node=ip,
name='ptr_records', # Specific name for PTR records
value=ptr_records,
attr_type='dns_record',
provider=self.name,
confidence=0.8,
metadata={'ttl': response.ttl, 'ip_version': ip_version}
)
except resolver.NXDOMAIN:
self.failed_requests += 1
self.logger.logger.debug(f"Reverse DNS lookup failed for {ip}: NXDOMAIN")
except Exception as e:
self.failed_requests += 1
self.logger.logger.debug(f"Reverse DNS lookup failed for {ip}: {e}")
# Re-raise the exception so the scanner can handle the failure
raise e
return relationships
return result
def _query_record(self, domain: str, record_type: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
def _query_record(self, domain: str, record_type: str, result: ProviderResult) -> None:
"""
Query a specific type of DNS record for the domain.
FIXED: Query DNS records with unique attribute names for each record type.
Enhanced to better handle IPv6 AAAA records.
"""
relationships = []
try:
self.total_requests += 1
response = self.resolver.resolve(domain, record_type)
self.successful_requests += 1
dns_records = []
for record in response:
target = ""
if record_type in ['A', 'AAAA']:
target = str(record)
# Validate that the IP address is properly formed
if not _is_valid_ip(target):
self.logger.logger.debug(f"Invalid IP address in {record_type} record: {target}")
continue
elif record_type in ['CNAME', 'NS', 'PTR']:
target = str(record.target).rstrip('.')
elif record_type == 'MX':
@@ -146,32 +214,56 @@ class DNSProvider(BaseProvider):
elif record_type == 'SOA':
target = str(record.mname).rstrip('.')
elif record_type in ['TXT']:
# TXT records are treated as metadata, not relationships.
# Keep raw TXT record value
txt_value = str(record).strip('"')
dns_records.append(txt_value) # Just the value for TXT
continue
elif record_type == 'SRV':
target = str(record.target).rstrip('.')
elif record_type == 'CAA':
target = f"{record.flags} {record.tag.decode('utf-8')} \"{record.value.decode('utf-8')}\""
# Keep raw CAA record format
caa_value = f"{record.flags} {record.tag.decode('utf-8')} \"{record.value.decode('utf-8')}\""
dns_records.append(caa_value) # Just the value for CAA
continue
else:
target = str(record)
if target:
# Determine IP version for metadata if this is an IP record
ip_version = None
if record_type in ['A', 'AAAA'] and _is_valid_ip(target):
ip_version = get_ip_version(target)
raw_data = {
'query_type': record_type,
'domain': domain,
'value': target,
'ttl': response.ttl
}
relationship_type = f"{record_type.lower()}_record"
confidence = 0.8 # Default confidence for DNS records
relationships.append((
domain,
target,
relationship_type,
confidence,
raw_data
))
if ip_version:
raw_data['ip_version'] = ip_version
relationship_type = f"dns_{record_type.lower()}_record"
confidence = 0.8
# Add relationship
result.add_relationship(
source_node=domain,
target_node=target,
relationship_type=relationship_type,
provider=self.name,
confidence=confidence,
raw_data=raw_data
)
# Add target to records list
dns_records.append(target)
# Log relationship discovery with IP version info
discovery_method = f"dns_{record_type.lower()}_record"
if ip_version:
discovery_method += f"_ipv{ip_version}"
self.log_relationship_discovery(
source_node=domain,
@@ -179,11 +271,33 @@ class DNSProvider(BaseProvider):
relationship_type=relationship_type,
confidence_score=confidence,
raw_data=raw_data,
discovery_method=f"dns_{record_type.lower()}_record"
discovery_method=discovery_method
)
# FIXED: Create attribute with specific name for each record type
if dns_records:
# Use record type specific attribute name (e.g., 'a_records', 'mx_records', etc.)
attribute_name = f"{record_type.lower()}_records"
metadata = {'record_type': record_type, 'ttl': response.ttl}
# Add IP version info for A/AAAA records
if record_type in ['A', 'AAAA'] and dns_records:
first_ip_version = get_ip_version(dns_records[0])
if first_ip_version:
metadata['ip_version'] = first_ip_version
result.add_attribute(
target_node=domain,
name=attribute_name, # UNIQUE name for each record type!
value=dns_records,
attr_type='dns_record_list',
provider=self.name,
confidence=0.8,
metadata=metadata
)
except Exception as e:
self.failed_requests += 1
self.logger.logger.debug(f"{record_type} record query failed for {domain}: {e}")
return relationships
raise e

View File

@@ -1,21 +1,23 @@
"""
Shodan provider for DNSRecon.
Discovers IP relationships and infrastructure context through Shodan API.
"""
# dnsrecon/providers/shodan_provider.py
import json
from typing import List, Dict, Any, Tuple
from pathlib import Path
from typing import Dict, Any
from datetime import datetime, timezone
import requests
from .base_provider import BaseProvider
from utils.helpers import _is_valid_ip, _is_valid_domain
from core.provider_result import ProviderResult
from utils.helpers import _is_valid_ip, _is_valid_domain, get_ip_version, normalize_ip
class ShodanProvider(BaseProvider):
"""
Provider for querying Shodan API for IP address and hostname information.
Now uses session-specific API keys.
Provider for querying Shodan API for IP address information.
Now returns standardized ProviderResult objects with caching support for IPv4 and IPv6.
"""
def __init__(self, session_config=None):
def __init__(self, name=None, session_config=None):
"""Initialize Shodan provider with session-specific configuration."""
super().__init__(
name="shodan",
@@ -26,9 +28,61 @@ class ShodanProvider(BaseProvider):
self.base_url = "https://api.shodan.io"
self.api_key = self.config.get_api_key('shodan')
# FIXED: Don't fail initialization on connection issues - defer to actual usage
self._connection_tested = False
self._connection_works = False
# Initialize cache directory
self.cache_dir = Path('cache') / 'shodan'
self.cache_dir.mkdir(parents=True, exist_ok=True)
def __getstate__(self):
"""Prepare the object for pickling."""
state = super().__getstate__()
return state
def __setstate__(self, state):
"""Restore the object after unpickling."""
super().__setstate__(state)
def _check_api_connection(self) -> bool:
"""
FIXED: Lazy connection checking - only test when actually needed.
Don't block provider initialization on network issues.
"""
if self._connection_tested:
return self._connection_works
if not self.api_key:
self._connection_tested = True
self._connection_works = False
return False
try:
print(f"Testing Shodan API connection with key: {self.api_key[:8]}...")
response = self.session.get(f"{self.base_url}/api-info?key={self.api_key}", timeout=5)
self._connection_works = response.status_code == 200
print(f"Shodan API test result: {response.status_code} - {'Success' if self._connection_works else 'Failed'}")
except requests.exceptions.RequestException as e:
print(f"Shodan API connection test failed: {e}")
self._connection_works = False
finally:
self._connection_tested = True
return self._connection_works
def is_available(self) -> bool:
"""Check if Shodan provider is available (has valid API key in this session)."""
return self.api_key is not None and len(self.api_key.strip()) > 0
"""
FIXED: Check if Shodan provider is available based on API key presence.
Don't require successful connection test during initialization.
"""
has_api_key = self.api_key is not None and len(self.api_key.strip()) > 0
if not has_api_key:
return False
# FIXED: Only test connection on first actual usage, not during initialization
return True
def get_name(self) -> str:
"""Return the provider name."""
@@ -36,7 +90,7 @@ class ShodanProvider(BaseProvider):
def get_display_name(self) -> str:
"""Return the provider display name for the UI."""
return "shodan"
return "Shodan"
def requires_api_key(self) -> bool:
"""Return True if the provider requires an API key."""
@@ -44,267 +98,371 @@ class ShodanProvider(BaseProvider):
def get_eligibility(self) -> Dict[str, bool]:
"""Return a dictionary indicating if the provider can query domains and/or IPs."""
return {'domains': True, 'ips': True}
return {'domains': False, 'ips': True}
def query_domain(self, domain: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
def _get_cache_file_path(self, ip: str) -> Path:
"""
Query Shodan for information about a domain.
Uses Shodan's hostname search to find associated IPs.
Args:
domain: Domain to investigate
Returns:
List of relationships discovered from Shodan data
Generate cache file path for an IP address (IPv4 or IPv6).
IPv6 addresses contain colons which are replaced with underscores for filesystem safety.
"""
if not _is_valid_domain(domain) or not self.is_available():
return []
relationships = []
try:
# Search for hostname in Shodan
search_query = f"hostname:{domain}"
url = f"{self.base_url}/shodan/host/search"
params = {
'key': self.api_key,
'query': search_query,
'minify': True # Get minimal data to reduce bandwidth
}
response = self.make_request(url, method="GET", params=params, target_indicator=domain)
if not response or response.status_code != 200:
return []
data = response.json()
if 'matches' not in data:
return []
# Process search results
for match in data['matches']:
ip_address = match.get('ip_str')
hostnames = match.get('hostnames', [])
if ip_address and domain in hostnames:
raw_data = {
'ip_address': ip_address,
'hostnames': hostnames,
'country': match.get('location', {}).get('country_name', ''),
'city': match.get('location', {}).get('city', ''),
'isp': match.get('isp', ''),
'org': match.get('org', ''),
'ports': match.get('ports', []),
'last_update': match.get('last_update', '')
}
relationships.append((
domain,
ip_address,
'a_record', # Domain resolves to IP
0.8,
raw_data
))
self.log_relationship_discovery(
source_node=domain,
target_node=ip_address,
relationship_type='a_record',
confidence_score=0.8,
raw_data=raw_data,
discovery_method="shodan_hostname_search"
)
# Also create relationships to other hostnames on the same IP
for hostname in hostnames:
if hostname != domain and _is_valid_domain(hostname):
hostname_raw_data = {
'shared_ip': ip_address,
'all_hostnames': hostnames,
'discovery_context': 'shared_hosting'
}
relationships.append((
domain,
hostname,
'passive_dns', # Shared hosting relationship
0.6, # Lower confidence for shared hosting
hostname_raw_data
))
self.log_relationship_discovery(
source_node=domain,
target_node=hostname,
relationship_type='passive_dns',
confidence_score=0.6,
raw_data=hostname_raw_data,
discovery_method="shodan_shared_hosting"
)
except json.JSONDecodeError as e:
self.logger.logger.error(f"Failed to parse JSON response from Shodan: {e}")
return relationships
def query_ip(self, ip: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
"""
Query Shodan for information about an IP address.
Args:
ip: IP address to investigate
Returns:
List of relationships discovered from Shodan IP data
"""
if not _is_valid_ip(ip) or not self.is_available():
return []
relationships = []
try:
# Query Shodan host information
url = f"{self.base_url}/shodan/host/{ip}"
params = {'key': self.api_key}
response = self.make_request(url, method="GET", params=params, target_indicator=ip)
if not response or response.status_code != 200:
return []
data = response.json()
# Extract hostname relationships
hostnames = data.get('hostnames', [])
for hostname in hostnames:
if _is_valid_domain(hostname):
raw_data = {
'ip_address': ip,
'hostname': hostname,
'country': data.get('country_name', ''),
'city': data.get('city', ''),
'isp': data.get('isp', ''),
'org': data.get('org', ''),
'asn': data.get('asn', ''),
'ports': data.get('ports', []),
'last_update': data.get('last_update', ''),
'os': data.get('os', '')
}
relationships.append((
ip,
hostname,
'a_record', # IP resolves to hostname
0.8,
raw_data
))
self.log_relationship_discovery(
source_node=ip,
target_node=hostname,
relationship_type='a_record',
confidence_score=0.8,
raw_data=raw_data,
discovery_method="shodan_host_lookup"
)
# Extract ASN relationship if available
asn = data.get('asn')
if asn:
# Ensure the ASN starts with "AS"
if isinstance(asn, str) and asn.startswith('AS'):
asn_name = asn
asn_number = asn[2:]
# Normalize the IP address first to ensure consistent caching
normalized_ip = normalize_ip(ip)
if not normalized_ip:
# Fallback for invalid IPs
safe_ip = ip.replace('.', '_').replace(':', '_')
else:
asn_name = f"AS{asn}"
asn_number = str(asn)
# Replace problematic characters for both IPv4 and IPv6
safe_ip = normalized_ip.replace('.', '_').replace(':', '_')
asn_raw_data = {
'ip_address': ip,
'asn': asn_number,
'isp': data.get('isp', ''),
'org': data.get('org', '')
}
return self.cache_dir / f"{safe_ip}.json"
relationships.append((
ip,
asn_name,
'asn_membership',
0.7,
asn_raw_data
))
self.log_relationship_discovery(
source_node=ip,
target_node=asn_name,
relationship_type='asn_membership',
confidence_score=0.7,
raw_data=asn_raw_data,
discovery_method="shodan_asn_lookup"
)
except json.JSONDecodeError as e:
self.logger.logger.error(f"Failed to parse JSON response from Shodan: {e}")
return relationships
def search_by_organization(self, org_name: str) -> List[Dict[str, Any]]:
def _get_cache_status(self, cache_file_path: Path) -> str:
"""
Search Shodan for hosts belonging to a specific organization.
Args:
org_name: Organization name to search for
Returns:
List of host information dictionaries
Check cache status for an IP.
Returns: 'not_found', 'fresh', or 'stale'
"""
if not self.is_available():
return []
if not cache_file_path.exists():
return "not_found"
try:
search_query = f"org:\"{org_name}\""
url = f"{self.base_url}/shodan/host/search"
params = {
'key': self.api_key,
'query': search_query,
'minify': True
}
with open(cache_file_path, 'r') as f:
cache_data = json.load(f)
response = self.make_request(url, method="GET", params=params, target_indicator=org_name)
last_query_str = cache_data.get("last_upstream_query")
if not last_query_str:
return "stale"
if response and response.status_code == 200:
data = response.json()
return data.get('matches', [])
last_query = datetime.fromisoformat(last_query_str.replace('Z', '+00:00'))
hours_since_query = (datetime.now(timezone.utc) - last_query).total_seconds() / 3600
except Exception as e:
self.logger.logger.error(f"Error searching Shodan by organization {org_name}: {e}")
cache_timeout = self.config.cache_timeout_hours
if hours_since_query < cache_timeout:
return "fresh"
else:
return "stale"
return []
except (json.JSONDecodeError, ValueError, KeyError):
return "stale"
def get_host_services(self, ip: str) -> List[Dict[str, Any]]:
def query_domain(self, domain: str) -> ProviderResult:
"""
Get service information for a specific IP address.
Shodan does not support domain queries. This method returns an empty result.
"""
return ProviderResult()
def query_ip(self, ip: str) -> ProviderResult:
"""
Query Shodan for information about an IP address (IPv4 or IPv6), with caching of processed data.
FIXED: Proper 404 handling to prevent unnecessary retries.
Args:
ip: IP address to query
ip: IP address to investigate (IPv4 or IPv6)
Returns:
List of service information dictionaries
"""
if not _is_valid_ip(ip) or not self.is_available():
return []
ProviderResult containing discovered relationships and attributes
try:
url = f"{self.base_url}/shodan/host/{ip}"
Raises:
Exception: For temporary failures that should be retried (timeouts, 502/503 errors, connection issues)
"""
if not _is_valid_ip(ip):
return ProviderResult()
# Test connection only when actually making requests
if not self._check_api_connection():
print(f"Shodan API not available for {ip} - API key: {'present' if self.api_key else 'missing'}")
return ProviderResult()
# Normalize IP address for consistent processing
normalized_ip = normalize_ip(ip)
if not normalized_ip:
return ProviderResult()
cache_file = self._get_cache_file_path(normalized_ip)
cache_status = self._get_cache_status(cache_file)
if cache_status == "fresh":
self.logger.logger.debug(f"Using fresh cache for Shodan query: {normalized_ip}")
return self._load_from_cache(cache_file)
# Need to query API
self.logger.logger.debug(f"Querying Shodan API for: {normalized_ip}")
url = f"{self.base_url}/shodan/host/{normalized_ip}"
params = {'key': self.api_key}
response = self.make_request(url, method="GET", params=params, target_indicator=ip)
try:
response = self.make_request(url, method="GET", params=params, target_indicator=normalized_ip)
if response and response.status_code == 200:
if not response:
self.logger.logger.warning(f"Shodan API unreachable for {normalized_ip} - network failure")
if cache_status == "stale":
self.logger.logger.info(f"Using stale cache for {normalized_ip} due to network failure")
return self._load_from_cache(cache_file)
else:
# FIXED: Treat network failures as "no information" rather than retryable errors
self.logger.logger.info(f"No Shodan data available for {normalized_ip} due to network failure")
result = ProviderResult() # Empty result
network_failure_data = {'shodan_status': 'network_unreachable', 'error': 'API unreachable'}
self._save_to_cache(cache_file, result, network_failure_data)
return result
# FIXED: Handle different status codes more precisely
if response.status_code == 200:
self.logger.logger.debug(f"Shodan returned data for {normalized_ip}")
try:
data = response.json()
return data.get('data', []) # Service banners
result = self._process_shodan_data(normalized_ip, data)
self._save_to_cache(cache_file, result, data)
return result
except json.JSONDecodeError as e:
self.logger.logger.error(f"Invalid JSON response from Shodan for {normalized_ip}: {e}")
if cache_status == "stale":
return self._load_from_cache(cache_file)
else:
raise requests.exceptions.RequestException("Invalid JSON response from Shodan - should retry")
elif response.status_code == 404:
# FIXED: 404 = "no information available" - successful but empty result, don't retry
self.logger.logger.debug(f"Shodan has no information for {normalized_ip} (404)")
result = ProviderResult() # Empty but successful result
# Cache the empty result to avoid repeated queries
empty_data = {'shodan_status': 'no_information', 'status_code': 404}
self._save_to_cache(cache_file, result, empty_data)
return result
elif response.status_code in [401, 403]:
# Authentication/authorization errors - permanent failures, don't retry
self.logger.logger.error(f"Shodan API authentication failed for {normalized_ip} (HTTP {response.status_code})")
return ProviderResult() # Empty result, don't retry
elif response.status_code == 429:
# Rate limiting - should be handled by rate limiter, but if we get here, retry
self.logger.logger.warning(f"Shodan API rate limited for {normalized_ip} (HTTP {response.status_code})")
if cache_status == "stale":
self.logger.logger.info(f"Using stale cache for {normalized_ip} due to rate limiting")
return self._load_from_cache(cache_file)
else:
raise requests.exceptions.RequestException(f"Shodan API rate limited (HTTP {response.status_code}) - should retry")
elif response.status_code in [500, 502, 503, 504]:
# Server errors - temporary failures that should be retried
self.logger.logger.warning(f"Shodan API server error for {normalized_ip} (HTTP {response.status_code})")
if cache_status == "stale":
self.logger.logger.info(f"Using stale cache for {normalized_ip} due to server error")
return self._load_from_cache(cache_file)
else:
raise requests.exceptions.RequestException(f"Shodan API server error (HTTP {response.status_code}) - should retry")
else:
# FIXED: Other HTTP status codes - treat as no information available, don't retry
self.logger.logger.info(f"Shodan returned status {response.status_code} for {normalized_ip} - treating as no information")
result = ProviderResult() # Empty result
no_info_data = {'shodan_status': 'no_information', 'status_code': response.status_code}
self._save_to_cache(cache_file, result, no_info_data)
return result
except requests.exceptions.Timeout:
# Timeout errors - should be retried
self.logger.logger.warning(f"Shodan API timeout for {normalized_ip}")
if cache_status == "stale":
self.logger.logger.info(f"Using stale cache for {normalized_ip} due to timeout")
return self._load_from_cache(cache_file)
else:
raise # Re-raise timeout for retry
except requests.exceptions.ConnectionError:
# Connection errors - should be retried
self.logger.logger.warning(f"Shodan API connection error for {normalized_ip}")
if cache_status == "stale":
self.logger.logger.info(f"Using stale cache for {normalized_ip} due to connection error")
return self._load_from_cache(cache_file)
else:
raise # Re-raise connection error for retry
except json.JSONDecodeError:
# JSON parsing error - treat as temporary failure
self.logger.logger.error(f"Invalid JSON response from Shodan for {normalized_ip}")
if cache_status == "stale":
self.logger.logger.info(f"Using stale cache for {normalized_ip} due to JSON parsing error")
return self._load_from_cache(cache_file)
else:
raise requests.exceptions.RequestException("Invalid JSON response from Shodan - should retry")
# FIXED: Remove the generic RequestException handler that was causing 404s to retry
# Now only specific exceptions that should be retried are re-raised
except Exception as e:
self.logger.logger.error(f"Error getting Shodan services for IP {ip}: {e}")
# FIXED: Unexpected exceptions - log but treat as no information available, don't retry
self.logger.logger.warning(f"Unexpected exception in Shodan query for {normalized_ip}: {e}")
result = ProviderResult() # Empty result
error_data = {'shodan_status': 'error', 'error': str(e)}
self._save_to_cache(cache_file, result, error_data)
return result
return []
def _load_from_cache(self, cache_file_path: Path) -> ProviderResult:
"""Load processed Shodan data from a cache file."""
try:
with open(cache_file_path, 'r') as f:
cache_content = json.load(f)
result = ProviderResult()
# Reconstruct relationships
for rel_data in cache_content.get("relationships", []):
result.add_relationship(
source_node=rel_data["source_node"],
target_node=rel_data["target_node"],
relationship_type=rel_data["relationship_type"],
provider=rel_data["provider"],
confidence=rel_data["confidence"],
raw_data=rel_data.get("raw_data", {})
)
# Reconstruct attributes
for attr_data in cache_content.get("attributes", []):
result.add_attribute(
target_node=attr_data["target_node"],
name=attr_data["name"],
value=attr_data["value"],
attr_type=attr_data["type"],
provider=attr_data["provider"],
confidence=attr_data["confidence"],
metadata=attr_data.get("metadata", {})
)
return result
except (json.JSONDecodeError, FileNotFoundError, KeyError):
return ProviderResult()
def _save_to_cache(self, cache_file_path: Path, result: ProviderResult, raw_data: Dict[str, Any]) -> None:
"""Save processed Shodan data to a cache file."""
try:
cache_data = {
"last_upstream_query": datetime.now(timezone.utc).isoformat(),
"raw_data": raw_data, # Preserve original for forensic purposes
"relationships": [
{
"source_node": rel.source_node,
"target_node": rel.target_node,
"relationship_type": rel.relationship_type,
"confidence": rel.confidence,
"provider": rel.provider,
"raw_data": rel.raw_data
} for rel in result.relationships
],
"attributes": [
{
"target_node": attr.target_node,
"name": attr.name,
"value": attr.value,
"type": attr.type,
"provider": attr.provider,
"confidence": attr.confidence,
"metadata": attr.metadata
} for attr in result.attributes
]
}
with open(cache_file_path, 'w') as f:
json.dump(cache_data, f, separators=(',', ':'), default=str)
except Exception as e:
self.logger.logger.warning(f"Failed to save Shodan cache for {cache_file_path.name}: {e}")
def _process_shodan_data(self, ip: str, data: Dict[str, Any]) -> ProviderResult:
"""
VERIFIED: Process Shodan data creating ISP nodes with ASN attributes and proper relationships.
Enhanced to include IP version information for IPv6 addresses.
"""
result = ProviderResult()
# Determine IP version for metadata
ip_version = get_ip_version(ip)
# VERIFIED: Extract ISP information and create proper ISP node with ASN
isp_name = data.get('org')
asn_value = data.get('asn')
if isp_name and asn_value:
# Create relationship from IP to ISP
result.add_relationship(
source_node=ip,
target_node=isp_name,
relationship_type='shodan_isp',
provider=self.name,
confidence=0.9,
raw_data={'asn': asn_value, 'shodan_org': isp_name, 'ip_version': ip_version}
)
# Add ASN as attribute to the ISP node
result.add_attribute(
target_node=isp_name,
name='asn',
value=asn_value,
attr_type='isp_info',
provider=self.name,
confidence=0.9,
metadata={'description': 'Autonomous System Number from Shodan', 'ip_version': ip_version}
)
# Also add organization name as attribute to ISP node for completeness
result.add_attribute(
target_node=isp_name,
name='organization_name',
value=isp_name,
attr_type='isp_info',
provider=self.name,
confidence=0.9,
metadata={'description': 'Organization name from Shodan', 'ip_version': ip_version}
)
# Process hostnames (reverse DNS)
for key, value in data.items():
if key == 'hostnames':
for hostname in value:
if _is_valid_domain(hostname):
# Use appropriate relationship type based on IP version
if ip_version == 6:
relationship_type = 'shodan_aaaa_record'
else:
relationship_type = 'shodan_a_record'
result.add_relationship(
source_node=ip,
target_node=hostname,
relationship_type=relationship_type,
provider=self.name,
confidence=0.8,
raw_data={**data, 'ip_version': ip_version}
)
self.log_relationship_discovery(
source_node=ip,
target_node=hostname,
relationship_type=relationship_type,
confidence_score=0.8,
raw_data={**data, 'ip_version': ip_version},
discovery_method=f"shodan_host_lookup_ipv{ip_version}"
)
elif key == 'ports':
# Add open ports as attributes to the IP
for port in value:
result.add_attribute(
target_node=ip,
name='shodan_open_port',
value=port,
attr_type='shodan_network_info',
provider=self.name,
confidence=0.9,
metadata={'ip_version': ip_version}
)
elif isinstance(value, (str, int, float, bool)) and value is not None:
# Add other Shodan fields as IP attributes (keep raw field names)
result.add_attribute(
target_node=ip,
name=key, # Raw field name from Shodan API
value=value,
attr_type='shodan_info',
provider=self.name,
confidence=0.9,
metadata={'ip_version': ip_version}
)
return result

View File

@@ -1,9 +1,13 @@
Flask>=2.3.3
networkx>=3.1
requests>=2.31.0
python-dateutil>=2.8.2
Werkzeug>=2.3.7
urllib3>=2.0.0
dnspython>=2.4.2
Flask
networkx
requests
python-dateutil
Werkzeug
urllib3
dnspython
gunicorn
redis
python-dotenv
psycopg2-binary
Flask-SocketIO
eventlet

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,14 +1,19 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>DNSRecon - Infrastructure Reconnaissance</title>
<link rel="stylesheet" href="{{ url_for('static', filename='css/main.css') }}">
<script src="https://cdnjs.cloudflare.com/ajax/libs/vis/4.21.0/vis.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/socket.io/4.7.2/socket.io.js"></script>
<link href="https://cdnjs.cloudflare.com/ajax/libs/vis/4.21.0/vis.min.css" rel="stylesheet" type="text/css">
<link href="https://fonts.googleapis.com/css2?family=Roboto+Mono:wght@300;400;500;700&family=Special+Elite&display=swap" rel="stylesheet">
<link
href="https://fonts.googleapis.com/css2?family=Roboto+Mono:wght@300;400;500;700&family=Special+Elite&display=swap"
rel="stylesheet">
</head>
<body>
<div class="container">
<header class="header">
@@ -32,19 +37,8 @@
<div class="form-container">
<div class="input-group">
<label for="target-domain">Target Domain</label>
<input type="text" id="target-domain" placeholder="example.com" autocomplete="off">
</div>
<div class="input-group">
<label for="max-depth">Recursion Depth</label>
<select id="max-depth">
<option value="1">Depth 1 - Direct relationships</option>
<option value="2" selected>Depth 2 - Recommended</option>
<option value="3">Depth 3 - Extended analysis</option>
<option value="4">Depth 4 - Deep reconnaissance</option>
<option value="5">Depth 5 - Maximum depth</option>
</select>
<label for="target-input">Target Domain or IP</label>
<input type="text" id="target-input" placeholder="example.com or 8.8.8.8" autocomplete="off">
</div>
<div class="button-group">
@@ -60,13 +54,13 @@
<span class="btn-icon">[STOP]</span>
<span>Terminate Scan</span>
</button>
<button id="export-results" class="btn btn-secondary">
<button id="export-options" class="btn btn-secondary">
<span class="btn-icon">[EXPORT]</span>
<span>Download Results</span>
<span>Export Options</span>
</button>
<button id="configure-api-keys" class="btn btn-secondary">
<button id="configure-settings" class="btn btn-secondary">
<span class="btn-icon">[API]</span>
<span>Configure API Keys</span>
<span>Settings</span>
</button>
</div>
</div>
@@ -90,54 +84,47 @@
<span class="status-label">Depth:</span>
<span id="depth-display" class="status-value">0/0</span>
</div>
<div class="status-row">
<span class="status-label">Progress:</span>
<span id="progress-display" class="status-value">0%</span>
</div>
<div class="status-row">
<span class="status-label">Indicators:</span>
<span id="indicators-display" class="status-value">0</span>
</div>
<div class="status-row">
<span class="status-label">Relationships:</span>
<span id="relationships-display" class="status-value">0</span>
</div>
</div>
<div class="progress-container">
<div class="progress-info">
<span id="progress-label">Progress:</span>
<span id="progress-compact">0/0</span>
</div>
<div class="progress-bar">
<div id="progress-fill" class="progress-fill"></div>
</div>
<div class="progress-placeholder">
<span class="status-label">
⚠️ <strong>Important:</strong> Scanning large public services (e.g., Google, Cloudflare,
AWS) is
<strong>discouraged</strong> due to rate limits (e.g., crt.sh).
<br><br>
Our task scheduler operates on a <strong>priority-based queue</strong>:
Short, targeted tasks like DNS are processed first, while resource-intensive requests (e.g.,
crt.sh)
are <strong>automatically deprioritized</strong> and may be processed later.
</span>
</div>
</div>
</section>
<section class="visualization-panel">
<div class="panel-header">
<h2>Infrastructure Map</h2>
<div class="view-controls">
<div class="filter-group">
<label for="node-type-filter">Node Type:</label>
<select id="node-type-filter">
<option value="all">All</option>
<option value="domain">Domain</option>
<option value="ip">IP</option>
<option value="asn">ASN</option>
<option value="correlation_object">Correlation Object</option>
<option value="large_entity">Large Entity</option>
</select>
</div>
<div class="filter-group">
<label for="confidence-filter">Min Confidence:</label>
<input type="range" id="confidence-filter" min="0" max="1" step="0.1" value="0">
<span id="confidence-value">0</span>
</div>
</div>
</div>
<div id="network-graph" class="graph-container">
<div class="graph-placeholder">
<div class="placeholder-content">
<div class="placeholder-icon">[]</div>
<div class="placeholder-icon">[]</div>
<div class="placeholder-text">Infrastructure map will appear here</div>
<div class="placeholder-subtext">Start a reconnaissance scan to visualize relationships</div>
<div class="placeholder-subtext">Start a reconnaissance scan to visualize relationships
</div>
</div>
</div>
</div>
@@ -147,29 +134,30 @@
<div class="legend-color" style="background-color: #00ff41;"></div>
<span>Domains</span>
</div>
<div class="legend-item">
<div class="legend-color" style="background-color: #c92f2f;"></div>
<span>Domain (no valid cert)</span>
</div>
<div class="legend-item">
<div class="legend-color" style="background-color: #c7c7c7;"></div>
<span>Domain (never had cert)</span>
</div>
<div class="legend-item">
<div class="legend-color" style="background-color: #ff9900;"></div>
<span>IP Addresses</span>
</div>
<div class="legend-item">
<div class="legend-color" style="background-color: #c7c7c7;"></div>
<span>Domain (invalid cert)</span>
</div>
<div class="legend-item">
<div class="legend-color" style="background-color: #9d4edd;"></div>
<span>Correlation Objects</span>
</div>
<div class="legend-item">
<div class="legend-edge high-confidence"></div>
<span>High Confidence</span>
</div>
<div class="legend-item">
<div class="legend-edge medium-confidence"></div>
<span>Medium Confidence</span>
<div class="legend-color" style="background-color: #00aaff;"></div>
<span>ISPs</span>
</div>
<div class="legend-item">
<div class="legend-color" style="background-color: #ff6b6b;"></div>
<span>Large Entity</span>
<span>Certificate Authorities</span>
</div>
<div class="legend-item">
<div class="legend-color" style="background-color: #9d4edd;"></div>
<span>Correlation Objects</span>
</div>
</div>
</section>
@@ -207,24 +195,72 @@
</div>
</div>
<div id="api-key-modal" class="modal">
<div id="settings-modal" class="modal">
<div class="modal-content">
<div class="modal-header">
<h3>Configure API Keys</h3>
<button id="api-key-modal-close" class="modal-close">[×]</button>
<h3>Scanner Configuration</h3>
<button id="settings-modal-close" class="modal-close">[×]</button>
</div>
<div class="modal-body">
<p class="modal-description">
Enter your API keys for enhanced data providers. Keys are stored in memory for the current session only and are never saved to disk.
<div class="modal-details">
<section class="modal-section">
<details open>
<summary>
<span>⚙️ Scan Settings</span>
</summary>
<div class="modal-section-content">
<div class="input-group">
<label for="max-depth">Recursion Depth</label>
<select id="max-depth">
<option value="1">Depth 1 - Direct relationships</option>
<option value="2" selected>Depth 2 - Recommended</option>
<option value="3">Depth 3 - Extended analysis</option>
<option value="4">Depth 4 - Deep reconnaissance</option>
<option value="5">Depth 5 - Maximum depth</option>
</select>
</div>
</div>
</details>
</section>
<section class="modal-section">
<details open>
<summary>
<span>🔧 Provider Configuration</span>
<span class="merge-badge" id="provider-count">0</span>
</summary>
<div class="modal-section-content">
<div id="provider-config-list">
</div>
</div>
</details>
</section>
<section class="modal-section">
<details>
<summary>
<span>🔑 API Keys</span>
<span class="merge-badge" id="api-key-count">0</span>
</summary>
<div class="modal-section-content">
<p class="placeholder-subtext" style="margin-bottom: 1rem;">
⚠️ API keys are stored in memory for the current session only.
Only provide API keys you don't use for anything else.
</p>
<div id="api-key-inputs">
</div>
<div class="button-group" style="flex-direction: row; justify-content: flex-end;">
<button id="reset-api-keys" class="btn btn-secondary">
<span>Reset</span>
</div>
</details>
</section>
<div class="button-group" style="margin-top: 1.5rem;">
<button id="save-settings" class="btn btn-primary">
<span class="btn-icon">[SAVE]</span>
<span>Save Configuration</span>
</button>
<button id="save-api-keys" class="btn btn-primary">
<span>Save Keys</span>
<button id="reset-settings" class="btn btn-secondary">
<span class="btn-icon">[RESET]</span>
<span>Reset to Defaults</span>
</button>
</div>
</div>
@@ -232,19 +268,55 @@
</div>
</div>
<script>
function copyToClipboard(elementId) {
const element = document.getElementById(elementId);
const textToCopy = element.innerText;
navigator.clipboard.writeText(textToCopy).then(() => {
// Optional: Show a success message
console.log('Copied to clipboard');
}).catch(err => {
console.error('Failed to copy: ', err);
});
}
</script>
<div id="export-modal" class="modal">
<div class="modal-content">
<div class="modal-header">
<h3>Export Options</h3>
<button id="export-modal-close" class="modal-close">[×]</button>
</div>
<div class="modal-body">
<div class="modal-details">
<section class="modal-section">
<details open>
<summary>
<span>📊 Available Exports</span>
</summary>
<div class="modal-section-content">
<div class="button-group" style="margin-top: 1rem;">
<button id="export-graph-json" class="btn btn-primary">
<span class="btn-icon">[JSON]</span>
<span>Export Graph Data</span>
</button>
<div class="status-row" style="margin-top: 0.5rem;">
<span class="status-label">Complete graph data with forensic audit trail,
provider statistics, and scan metadata in JSON format for analysis and
archival.</span>
</div>
<button id="export-targets-txt" class="btn btn-primary" style="margin-top: 1rem;">
<span class="btn-icon">[TXT]</span>
<span>Export Targets</span>
</button>
<div class="status-row" style="margin-top: 0.5rem;">
<span class="status-label">A simple text file containing all discovered domains and IP addresses.</span>
</div>
<button id="export-executive-summary" class="btn btn-primary" style="margin-top: 1rem;">
<span class="btn-icon">[TXT]</span>
<span>Export Executive Summary</span>
</button>
<div class="status-row" style="margin-top: 0.5rem;">
<span class="status-label">A natural-language summary of the scan findings.</span>
</div>
</div>
</div>
</details>
</section>
</div>
</div>
</div>
</div>
</div>
<script src="{{ url_for('static', filename='js/graph.js') }}"></script>
<script src="{{ url_for('static', filename='js/main.js') }}"></script>
</body>
</html>

View File

@@ -0,0 +1,22 @@
# dnsrecon-reduced/utils/__init__.py
"""
Utility modules for DNSRecon.
Contains helper functions, export management, and supporting utilities.
"""
from .helpers import is_valid_target, _is_valid_domain, _is_valid_ip, get_ip_version, normalize_ip
from .export_manager import export_manager, ExportManager, CustomJSONEncoder
__all__ = [
'is_valid_target',
'_is_valid_domain',
'_is_valid_ip',
'get_ip_version',
'normalize_ip',
'export_manager',
'ExportManager',
'CustomJSONEncoder'
]
__version__ = "1.0.0"

849
utils/export_manager.py Normal file
View File

@@ -0,0 +1,849 @@
# dnsrecon-reduced/utils/export_manager.py
"""
Centralized export functionality for DNSRecon.
Handles all data export operations with forensic integrity and proper formatting.
ENHANCED: Professional forensic executive summary generation for court-ready documentation.
"""
import json
from datetime import datetime, timezone
from typing import Dict, Any, List, Optional, Set, Tuple
from decimal import Decimal
from collections import defaultdict, Counter
import networkx as nx
from utils.helpers import _is_valid_domain, _is_valid_ip
class ExportManager:
"""
Centralized manager for all DNSRecon export operations.
Maintains forensic integrity and provides consistent export formats.
ENHANCED: Advanced forensic analysis and professional reporting capabilities.
"""
def __init__(self):
"""Initialize export manager."""
pass
def export_scan_results(self, scanner) -> Dict[str, Any]:
"""
Export complete scan results with forensic metadata.
Args:
scanner: Scanner instance with completed scan data
Returns:
Complete scan results dictionary
"""
graph_data = self.export_graph_json(scanner.graph)
audit_trail = scanner.logger.export_audit_trail()
provider_stats = {}
for provider in scanner.providers:
provider_stats[provider.get_name()] = provider.get_statistics()
results = {
'scan_metadata': {
'target_domain': scanner.current_target,
'max_depth': scanner.max_depth,
'final_status': scanner.status,
'total_indicators_processed': scanner.indicators_processed,
'enabled_providers': list(provider_stats.keys()),
'session_id': scanner.session_id
},
'graph_data': graph_data,
'forensic_audit': audit_trail,
'provider_statistics': provider_stats,
'scan_summary': scanner.logger.get_forensic_summary()
}
# Add export metadata
results['export_metadata'] = {
'export_timestamp': datetime.now(timezone.utc).isoformat(),
'export_version': '1.0.0',
'forensic_integrity': 'maintained'
}
return results
def export_targets_list(self, scanner) -> str:
"""
Export all discovered domains and IPs as a text file.
Args:
scanner: Scanner instance with graph data
Returns:
Newline-separated list of targets
"""
nodes = scanner.graph.get_graph_data().get('nodes', [])
targets = {
node['id'] for node in nodes
if _is_valid_domain(node['id']) or _is_valid_ip(node['id'])
}
return "\n".join(sorted(list(targets)))
def generate_executive_summary(self, scanner) -> str:
"""
ENHANCED: Generate a comprehensive, court-ready forensic executive summary.
Args:
scanner: Scanner instance with completed scan data
Returns:
Professional forensic summary formatted for investigative use
"""
report = []
now = datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC')
# Get comprehensive data for analysis
graph_data = scanner.graph.get_graph_data()
nodes = graph_data.get('nodes', [])
edges = graph_data.get('edges', [])
audit_trail = scanner.logger.export_audit_trail()
# Perform advanced analysis
infrastructure_analysis = self._analyze_infrastructure_patterns(nodes, edges)
# === HEADER AND METADATA ===
report.extend([
"=" * 80,
"DIGITAL INFRASTRUCTURE RECONNAISSANCE REPORT",
"=" * 80,
"",
f"Report Generated: {now}",
f"Investigation Target: {scanner.current_target}",
f"Analysis Session: {scanner.session_id}",
f"Scan Depth: {scanner.max_depth} levels",
f"Final Status: {scanner.status.upper()}",
""
])
# === EXECUTIVE SUMMARY ===
report.extend([
"EXECUTIVE SUMMARY",
"-" * 40,
"",
f"This report presents the findings of a comprehensive passive reconnaissance analysis "
f"conducted against the target '{scanner.current_target}'. The investigation employed "
f"multiple intelligence sources and discovered {len(nodes)} distinct digital entities "
f"connected through {len(edges)} verified relationships.",
"",
f"The analysis reveals a digital infrastructure comprising {infrastructure_analysis['domains']} "
f"domain names, {infrastructure_analysis['ips']} IP addresses, and {infrastructure_analysis['isps']} "
f"infrastructure service providers. Certificate transparency analysis identified "
f"{infrastructure_analysis['cas']} certificate authorities managing the cryptographic "
f"infrastructure for the investigated entities.",
"",
])
# === METHODOLOGY ===
report.extend([
"INVESTIGATIVE METHODOLOGY",
"-" * 40,
"",
"This analysis employed passive reconnaissance techniques using the following verified data sources:",
""
])
provider_info = {
'dns': 'Standard DNS resolution and reverse DNS lookups',
'crtsh': 'Certificate Transparency database analysis via crt.sh',
'shodan': 'Internet-connected device intelligence via Shodan API'
}
for provider in scanner.providers:
provider_name = provider.get_name()
stats = provider.get_statistics()
description = provider_info.get(provider_name, f'{provider_name} data provider')
report.extend([
f"{provider.get_display_name()}: {description}",
f" - Total Requests: {stats['total_requests']}",
f" - Success Rate: {stats['success_rate']:.1f}%",
f" - Relationships Discovered: {stats['relationships_found']}",
""
])
# === INFRASTRUCTURE ANALYSIS ===
report.extend([
"INFRASTRUCTURE ANALYSIS",
"-" * 40,
""
])
# Domain Analysis
if infrastructure_analysis['domains'] > 0:
report.extend([
f"Domain Name Infrastructure ({infrastructure_analysis['domains']} entities):",
""
])
domain_details = self._get_detailed_domain_analysis(nodes, edges)
for domain_info in domain_details[:10]: # Top 10 domains
report.extend([
f"{domain_info['domain']}",
f" - Type: {domain_info['classification']}",
f" - Connected IPs: {len(domain_info['ips'])}",
f" - Certificate Status: {domain_info['cert_status']}",
f" - Relationship Confidence: {domain_info['avg_confidence']:.2f}",
])
if domain_info['security_notes']:
report.extend([
f" - Security Notes: {', '.join(domain_info['security_notes'])}",
])
report.append("")
# IP Address Analysis
if infrastructure_analysis['ips'] > 0:
report.extend([
f"IP Address Infrastructure ({infrastructure_analysis['ips']} entities):",
""
])
ip_details = self._get_detailed_ip_analysis(nodes, edges)
for ip_info in ip_details[:8]: # Top 8 IPs
report.extend([
f"{ip_info['ip']} ({ip_info['version']})",
f" - Associated Domains: {len(ip_info['domains'])}",
f" - ISP: {ip_info['isp'] or 'Unknown'}",
f" - Geographic Location: {ip_info['location'] or 'Not determined'}",
])
if ip_info['open_ports']:
report.extend([
f" - Exposed Services: {', '.join(map(str, ip_info['open_ports'][:5]))}"
+ (f" (and {len(ip_info['open_ports']) - 5} more)" if len(ip_info['open_ports']) > 5 else ""),
])
report.append("")
# === RELATIONSHIP ANALYSIS ===
report.extend([
"ENTITY RELATIONSHIP ANALYSIS",
"-" * 40,
""
])
# Network topology insights
topology = self._analyze_network_topology(nodes, edges)
report.extend([
f"Network Topology Assessment:",
f"• Central Hubs: {len(topology['hubs'])} entities serve as primary connection points",
f"• Isolated Clusters: {len(topology['clusters'])} distinct groupings identified",
f"• Relationship Density: {topology['density']:.3f} (0=sparse, 1=fully connected)",
f"• Average Path Length: {topology['avg_path_length']:.2f} degrees of separation",
""
])
# Key relationships
key_relationships = self._identify_key_relationships(edges)
if key_relationships:
report.extend([
"Critical Infrastructure Relationships:",
""
])
for rel in key_relationships[:8]: # Top 8 relationships
confidence_desc = self._describe_confidence(rel['confidence'])
report.extend([
f"{rel['source']}{rel['target']}",
f" - Relationship: {self._humanize_relationship_type(rel['type'])}",
f" - Evidence Strength: {confidence_desc} ({rel['confidence']:.2f})",
f" - Discovery Method: {rel['provider']}",
""
])
# === CERTIFICATE ANALYSIS ===
cert_analysis = self._analyze_certificate_infrastructure(nodes)
if cert_analysis['total_certs'] > 0:
report.extend([
"CERTIFICATE INFRASTRUCTURE ANALYSIS",
"-" * 40,
"",
f"Certificate Status Overview:",
f"• Total Certificates Analyzed: {cert_analysis['total_certs']}",
f"• Valid Certificates: {cert_analysis['valid']}",
f"• Expired/Invalid: {cert_analysis['expired']}",
f"• Certificate Authorities: {len(cert_analysis['cas'])}",
""
])
if cert_analysis['cas']:
report.extend([
"Certificate Authority Distribution:",
""
])
for ca, count in cert_analysis['cas'].most_common(5):
report.extend([
f"{ca}: {count} certificate(s)",
])
report.append("")
# === TECHNICAL APPENDIX ===
report.extend([
"TECHNICAL APPENDIX",
"-" * 40,
"",
"Data Quality Assessment:",
f"• Total API Requests: {audit_trail.get('session_metadata', {}).get('total_requests', 0)}",
f"• Data Providers Used: {len(audit_trail.get('session_metadata', {}).get('providers_used', []))}",
f"• Relationship Confidence Distribution:",
])
# Confidence distribution
confidence_dist = self._calculate_confidence_distribution(edges)
for level, count in confidence_dist.items():
percentage = (count / len(edges) * 100) if edges else 0
report.extend([
f" - {level.title()} Confidence (≥{self._get_confidence_threshold(level)}): {count} ({percentage:.1f}%)",
])
report.extend([
"",
"Correlation Analysis:",
f"• Entity Correlations Identified: {len(scanner.graph.correlation_index)}",
f"• Cross-Reference Validation: {self._count_cross_validated_relationships(edges)} relationships verified by multiple sources",
""
])
# === CONCLUSION ===
report.extend([
"CONCLUSION",
"-" * 40,
"",
self._generate_conclusion(scanner.current_target, infrastructure_analysis,
len(edges)),
"",
"This analysis was conducted using passive reconnaissance techniques and represents "
"the digital infrastructure observable through public data sources at the time of investigation. "
"All findings are supported by verifiable technical evidence and documented through "
"a complete audit trail maintained for forensic integrity.",
"",
f"Investigation completed: {now}",
f"Report authenticated by: DNSRecon v{self._get_version()}",
"",
"=" * 80,
"END OF REPORT",
"=" * 80
])
return "\n".join(report)
def _analyze_infrastructure_patterns(self, nodes: List[Dict], edges: List[Dict]) -> Dict[str, Any]:
"""Analyze infrastructure patterns and classify entities."""
analysis = {
'domains': len([n for n in nodes if n['type'] == 'domain']),
'ips': len([n for n in nodes if n['type'] == 'ip']),
'isps': len([n for n in nodes if n['type'] == 'isp']),
'cas': len([n for n in nodes if n['type'] == 'ca']),
'correlations': len([n for n in nodes if n['type'] == 'correlation_object'])
}
return analysis
def _get_detailed_domain_analysis(self, nodes: List[Dict], edges: List[Dict]) -> List[Dict[str, Any]]:
"""Generate detailed analysis for each domain."""
domain_nodes = [n for n in nodes if n['type'] == 'domain']
domain_analysis = []
for domain in domain_nodes:
# Find connected IPs
connected_ips = [e['to'] for e in edges
if e['from'] == domain['id'] and _is_valid_ip(e['to'])]
# Determine classification
classification = "Primary Domain"
if domain['id'].startswith('www.'):
classification = "Web Interface"
elif any(subdomain in domain['id'] for subdomain in ['api.', 'mail.', 'smtp.']):
classification = "Service Endpoint"
elif domain['id'].count('.') > 1:
classification = "Subdomain"
# Certificate status
cert_status = self._determine_certificate_status(domain)
# Security notes
security_notes = []
if cert_status == "Expired/Invalid":
security_notes.append("Certificate validation issues")
if len(connected_ips) == 0:
security_notes.append("No IP resolution found")
if len(connected_ips) > 5:
security_notes.append("Multiple IP endpoints")
# Average confidence
domain_edges = [e for e in edges if e['from'] == domain['id']]
avg_confidence = sum(e['confidence_score'] for e in domain_edges) / len(domain_edges) if domain_edges else 0
domain_analysis.append({
'domain': domain['id'],
'classification': classification,
'ips': connected_ips,
'cert_status': cert_status,
'security_notes': security_notes,
'avg_confidence': avg_confidence
})
# Sort by number of connections (most connected first)
return sorted(domain_analysis, key=lambda x: len(x['ips']), reverse=True)
def _get_detailed_ip_analysis(self, nodes: List[Dict], edges: List[Dict]) -> List[Dict[str, Any]]:
"""Generate detailed analysis for each IP address."""
ip_nodes = [n for n in nodes if n['type'] == 'ip']
ip_analysis = []
for ip in ip_nodes:
# Find connected domains
connected_domains = [e['from'] for e in edges
if e['to'] == ip['id'] and _is_valid_domain(e['from'])]
# Extract metadata from attributes
ip_version = "IPv4"
location = None
isp = None
open_ports = []
for attr in ip.get('attributes', []):
if attr.get('name') == 'country':
location = attr.get('value')
elif attr.get('name') == 'org':
isp = attr.get('value')
elif attr.get('name') == 'shodan_open_port':
open_ports.append(attr.get('value'))
elif 'ipv6' in str(attr.get('metadata', {})).lower():
ip_version = "IPv6"
# Find ISP from relationships
if not isp:
isp_edges = [e for e in edges if e['from'] == ip['id'] and e['label'].endswith('_isp')]
isp = isp_edges[0]['to'] if isp_edges else None
ip_analysis.append({
'ip': ip['id'],
'version': ip_version,
'domains': connected_domains,
'isp': isp,
'location': location,
'open_ports': open_ports
})
# Sort by number of connected domains
return sorted(ip_analysis, key=lambda x: len(x['domains']), reverse=True)
def _analyze_network_topology(self, nodes: List[Dict], edges: List[Dict]) -> Dict[str, Any]:
"""Analyze network topology and identify key structural patterns."""
if not nodes or not edges:
return {'hubs': [], 'clusters': [], 'density': 0, 'avg_path_length': 0}
# Create NetworkX graph
G = nx.DiGraph()
for node in nodes:
G.add_node(node['id'])
for edge in edges:
G.add_edge(edge['from'], edge['to'])
# Convert to undirected for certain analyses
G_undirected = G.to_undirected()
# Identify hubs (nodes with high degree centrality)
centrality = nx.degree_centrality(G_undirected)
hub_threshold = max(centrality.values()) * 0.7 if centrality else 0
hubs = [node for node, cent in centrality.items() if cent >= hub_threshold]
# Find connected components (clusters)
clusters = list(nx.connected_components(G_undirected))
# Calculate density
density = nx.density(G_undirected)
# Calculate average path length (for largest component)
if G_undirected.number_of_nodes() > 1:
largest_cc = max(nx.connected_components(G_undirected), key=len)
subgraph = G_undirected.subgraph(largest_cc)
try:
avg_path_length = nx.average_shortest_path_length(subgraph)
except:
avg_path_length = 0
else:
avg_path_length = 0
return {
'hubs': hubs,
'clusters': clusters,
'density': density,
'avg_path_length': avg_path_length
}
def _identify_key_relationships(self, edges: List[Dict]) -> List[Dict[str, Any]]:
"""Identify the most significant relationships in the infrastructure."""
# Score relationships by confidence and type importance
relationship_importance = {
'dns_a_record': 0.9,
'dns_aaaa_record': 0.9,
'crtsh_cert_issuer': 0.8,
'shodan_isp': 0.8,
'crtsh_san_certificate': 0.7,
'dns_mx_record': 0.7,
'dns_ns_record': 0.7
}
scored_edges = []
for edge in edges:
base_confidence = edge.get('confidence_score', 0)
type_weight = relationship_importance.get(edge.get('label', ''), 0.5)
combined_score = (base_confidence * 0.7) + (type_weight * 0.3)
scored_edges.append({
'source': edge['from'],
'target': edge['to'],
'type': edge.get('label', ''),
'confidence': base_confidence,
'provider': edge.get('source_provider', ''),
'score': combined_score
})
# Return top relationships by score
return sorted(scored_edges, key=lambda x: x['score'], reverse=True)
def _analyze_certificate_infrastructure(self, nodes: List[Dict]) -> Dict[str, Any]:
"""Analyze certificate infrastructure across all domains."""
domain_nodes = [n for n in nodes if n['type'] == 'domain']
ca_nodes = [n for n in nodes if n['type'] == 'ca']
valid_certs = 0
expired_certs = 0
total_certs = 0
cas = Counter()
for domain in domain_nodes:
for attr in domain.get('attributes', []):
if attr.get('name') == 'cert_is_currently_valid':
total_certs += 1
if attr.get('value') is True:
valid_certs += 1
else:
expired_certs += 1
elif attr.get('name') == 'cert_issuer_name':
issuer = attr.get('value')
if issuer:
cas[issuer] += 1
return {
'total_certs': total_certs,
'valid': valid_certs,
'expired': expired_certs,
'cas': cas
}
def _has_expired_certificates(self, domain_node: Dict) -> bool:
"""Check if domain has expired certificates."""
for attr in domain_node.get('attributes', []):
if (attr.get('name') == 'cert_is_currently_valid' and
attr.get('value') is False):
return True
return False
def _determine_certificate_status(self, domain_node: Dict) -> str:
"""Determine the certificate status for a domain."""
has_valid = False
has_expired = False
has_any = False
for attr in domain_node.get('attributes', []):
if attr.get('name') == 'cert_is_currently_valid':
has_any = True
if attr.get('value') is True:
has_valid = True
else:
has_expired = True
if not has_any:
return "No Certificate Data"
elif has_valid and not has_expired:
return "Valid"
elif has_expired and not has_valid:
return "Expired/Invalid"
else:
return "Mixed Status"
def _describe_confidence(self, confidence: float) -> str:
"""Convert confidence score to descriptive text."""
if confidence >= 0.9:
return "Very High"
elif confidence >= 0.8:
return "High"
elif confidence >= 0.6:
return "Medium"
elif confidence >= 0.4:
return "Low"
else:
return "Very Low"
def _humanize_relationship_type(self, rel_type: str) -> str:
"""Convert technical relationship types to human-readable descriptions."""
type_map = {
'dns_a_record': 'DNS A Record Resolution',
'dns_aaaa_record': 'DNS AAAA Record (IPv6) Resolution',
'dns_mx_record': 'Email Server (MX) Configuration',
'dns_ns_record': 'Name Server Delegation',
'dns_cname_record': 'DNS Alias (CNAME) Resolution',
'crtsh_cert_issuer': 'SSL Certificate Issuer Relationship',
'crtsh_san_certificate': 'Shared SSL Certificate',
'shodan_isp': 'Internet Service Provider Assignment',
'shodan_a_record': 'IP-to-Domain Resolution (Shodan)',
'dns_ptr_record': 'Reverse DNS Resolution'
}
return type_map.get(rel_type, rel_type.replace('_', ' ').title())
def _calculate_confidence_distribution(self, edges: List[Dict]) -> Dict[str, int]:
"""Calculate confidence score distribution."""
distribution = {'high': 0, 'medium': 0, 'low': 0}
for edge in edges:
confidence = edge.get('confidence_score', 0)
if confidence >= 0.8:
distribution['high'] += 1
elif confidence >= 0.6:
distribution['medium'] += 1
else:
distribution['low'] += 1
return distribution
def _get_confidence_threshold(self, level: str) -> str:
"""Get confidence threshold for a level."""
thresholds = {'high': '0.80', 'medium': '0.60', 'low': '0.00'}
return thresholds.get(level, '0.00')
def _count_cross_validated_relationships(self, edges: List[Dict]) -> int:
"""Count relationships verified by multiple providers."""
# Group edges by source-target pair
edge_pairs = defaultdict(list)
for edge in edges:
pair_key = f"{edge['from']}->{edge['to']}"
edge_pairs[pair_key].append(edge.get('source_provider', ''))
# Count pairs with multiple providers
cross_validated = 0
for pair, providers in edge_pairs.items():
if len(set(providers)) > 1: # Multiple unique providers
cross_validated += 1
return cross_validated
def _generate_security_recommendations(self, infrastructure_analysis: Dict) -> List[str]:
"""Generate actionable security recommendations."""
recommendations = []
# Check for complex infrastructure
if infrastructure_analysis['ips'] > 10:
recommendations.append(
"Document and validate the necessity of extensive IP address infrastructure"
)
if infrastructure_analysis['correlations'] > 5:
recommendations.append(
"Investigate shared infrastructure components for operational security implications"
)
if not recommendations:
recommendations.append(
"Continue monitoring for changes in the identified digital infrastructure"
)
return recommendations
def _generate_conclusion(self, target: str, infrastructure_analysis: Dict, total_relationships: int) -> str:
"""Generate a professional conclusion for the report."""
conclusion_parts = [
f"The passive reconnaissance analysis of '{target}' has successfully mapped "
f"a digital infrastructure ecosystem consisting of {infrastructure_analysis['domains']} "
f"domain names, {infrastructure_analysis['ips']} IP addresses, and "
f"{total_relationships} verified inter-entity relationships."
]
conclusion_parts.append(
"All findings in this report are based on publicly available information and "
"passive reconnaissance techniques. The analysis maintains full forensic integrity "
"with complete audit trails for all data collection activities."
)
return " ".join(conclusion_parts)
def _count_bidirectional_relationships(self, graph) -> int:
"""Count bidirectional relationships in the graph."""
count = 0
for u, v in graph.edges():
if graph.has_edge(v, u):
count += 1
return count // 2 # Each pair counted twice
def _identify_hub_nodes(self, graph, nodes: List[Dict]) -> List[str]:
"""Identify nodes that serve as major hubs in the network."""
if not graph.nodes():
return []
degree_centrality = nx.degree_centrality(graph.to_undirected())
threshold = max(degree_centrality.values()) * 0.8 if degree_centrality else 0
return [node for node, centrality in degree_centrality.items()
if centrality >= threshold]
def _get_version(self) -> str:
"""Get DNSRecon version for report authentication."""
return "1.0.0-forensic"
def export_graph_json(self, graph_manager) -> Dict[str, Any]:
"""
Export complete graph data as a JSON-serializable dictionary.
Moved from GraphManager to centralize export functionality.
Args:
graph_manager: GraphManager instance with graph data
Returns:
Complete graph data with export metadata
"""
graph_data = nx.node_link_data(graph_manager.graph, edges="edges")
return {
'export_metadata': {
'export_timestamp': datetime.now(timezone.utc).isoformat(),
'graph_creation_time': graph_manager.creation_time,
'last_modified': graph_manager.last_modified,
'total_nodes': graph_manager.get_node_count(),
'total_edges': graph_manager.get_edge_count(),
'graph_format': 'dnsrecon_v1_unified_model'
},
'graph': graph_data,
'statistics': graph_manager.get_statistics()
}
def serialize_to_json(self, data: Dict[str, Any], indent: int = 2) -> str:
"""
Serialize data to JSON with custom handling for non-serializable objects.
Args:
data: Data to serialize
indent: JSON indentation level
Returns:
JSON string representation
"""
try:
return json.dumps(data, indent=indent, cls=CustomJSONEncoder, ensure_ascii=False)
except Exception:
# Fallback to aggressive cleaning
cleaned_data = self._clean_for_json(data)
return json.dumps(cleaned_data, indent=indent, ensure_ascii=False)
def _clean_for_json(self, obj, max_depth: int = 10, current_depth: int = 0) -> Any:
"""
Recursively clean an object to make it JSON serializable.
Handles circular references and problematic object types.
Args:
obj: Object to clean
max_depth: Maximum recursion depth
current_depth: Current recursion depth
Returns:
JSON-serializable object
"""
if current_depth > max_depth:
return f"<max_depth_exceeded_{type(obj).__name__}>"
if obj is None or isinstance(obj, (bool, int, float, str)):
return obj
elif isinstance(obj, datetime):
return obj.isoformat()
elif isinstance(obj, (set, frozenset)):
return list(obj)
elif isinstance(obj, dict):
cleaned = {}
for key, value in obj.items():
try:
# Ensure key is string
clean_key = str(key) if not isinstance(key, str) else key
cleaned[clean_key] = self._clean_for_json(value, max_depth, current_depth + 1)
except Exception:
cleaned[str(key)] = f"<serialization_error_{type(value).__name__}>"
return cleaned
elif isinstance(obj, (list, tuple)):
cleaned = []
for item in obj:
try:
cleaned.append(self._clean_for_json(item, max_depth, current_depth + 1))
except Exception:
cleaned.append(f"<serialization_error_{type(item).__name__}>")
return cleaned
elif hasattr(obj, '__dict__'):
try:
return self._clean_for_json(obj.__dict__, max_depth, current_depth + 1)
except Exception:
return str(obj)
elif hasattr(obj, 'value'):
# For enum-like objects
return obj.value
else:
return str(obj)
def generate_filename(self, target: str, export_type: str, timestamp: Optional[datetime] = None) -> str:
"""
Generate standardized filename for exports.
Args:
target: Target domain/IP being scanned
export_type: Type of export (json, txt, summary)
timestamp: Optional timestamp (defaults to now)
Returns:
Formatted filename with forensic naming convention
"""
if timestamp is None:
timestamp = datetime.now(timezone.utc)
timestamp_str = timestamp.strftime('%Y%m%d_%H%M%S')
safe_target = "".join(c for c in target if c.isalnum() or c in ('-', '_', '.')).rstrip()
extension_map = {
'json': 'json',
'txt': 'txt',
'summary': 'txt',
'targets': 'txt'
}
extension = extension_map.get(export_type, 'txt')
return f"dnsrecon_{export_type}_{safe_target}_{timestamp_str}.{extension}"
class CustomJSONEncoder(json.JSONEncoder):
"""Custom JSON encoder to handle non-serializable objects."""
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
elif isinstance(obj, set):
return list(obj)
elif isinstance(obj, Decimal):
return float(obj)
elif hasattr(obj, '__dict__'):
# For custom objects, try to serialize their dict representation
try:
return obj.__dict__
except:
return str(obj)
elif hasattr(obj, 'value') and hasattr(obj, 'name'):
# For enum objects
return obj.value
else:
# For any other non-serializable object, convert to string
return str(obj)
# Global export manager instance
export_manager = ExportManager()

View File

@@ -1,3 +1,8 @@
# dnsrecon-reduced/utils/helpers.py
import ipaddress
from typing import Union
def _is_valid_domain(domain: str) -> bool:
"""
Basic domain validation.
@@ -26,25 +31,64 @@ def _is_valid_domain(domain: str) -> bool:
def _is_valid_ip(ip: str) -> bool:
"""
Basic IP address validation.
IP address validation supporting both IPv4 and IPv6.
Args:
ip: IP address string to validate
Returns:
True if IP appears valid
True if IP appears valid (IPv4 or IPv6)
"""
if not ip:
return False
try:
parts = ip.split('.')
if len(parts) != 4:
return False
for part in parts:
num = int(part)
if not 0 <= num <= 255:
return False
# This handles both IPv4 and IPv6 validation
ipaddress.ip_address(ip.strip())
return True
except (ValueError, AttributeError):
return False
def is_valid_target(target: str) -> bool:
"""
Checks if the target is a valid domain or IP address (IPv4/IPv6).
Args:
target: The target string to validate.
Returns:
True if the target is a valid domain or IP, False otherwise.
"""
return _is_valid_domain(target) or _is_valid_ip(target)
def get_ip_version(ip: str) -> Union[int, None]:
"""
Get the IP version (4 or 6) of a valid IP address.
Args:
ip: IP address string
Returns:
4 for IPv4, 6 for IPv6, None if invalid
"""
try:
addr = ipaddress.ip_address(ip.strip())
return addr.version
except (ValueError, AttributeError):
return None
def normalize_ip(ip: str) -> Union[str, None]:
"""
Normalize an IP address to its canonical form.
Args:
ip: IP address string
Returns:
Normalized IP address string, None if invalid
"""
try:
addr = ipaddress.ip_address(ip.strip())
return str(addr)
except (ValueError, AttributeError):
return None