Compare commits
51 Commits
database_c
...
140ef54674
| Author | SHA1 | Date | |
|---|---|---|---|
| 140ef54674 | |||
|
|
aae459446c | ||
|
|
98e1b2280b | ||
|
|
cd14198452 | ||
|
|
284660ab8c | ||
|
|
ecfb27e02a | ||
|
|
39b4242200 | ||
|
|
a56755320c | ||
|
|
b985f1e5f0 | ||
|
|
8ae4fdbf80 | ||
|
|
d0ee415f0d | ||
|
|
173c3dcf92 | ||
|
|
ec755b17ad | ||
|
|
469c133f1b | ||
|
|
f775c61731 | ||
|
|
b984189e08 | ||
|
|
f2db739fa1 | ||
|
|
47ce7ff883 | ||
|
|
229746e1ec | ||
|
|
733e1da640 | ||
|
|
97aa18f788 | ||
|
|
15421dd4a5 | ||
|
|
ad4086b156 | ||
|
|
0e92ec6e9a | ||
|
|
baa57bfac2 | ||
|
|
f0f80be955 | ||
|
|
ecc143ddbb | ||
|
|
2c48316477 | ||
|
|
fc098aed28 | ||
| 9285226cbc | |||
|
|
350055fcec | ||
|
|
4a5ecf7a37 | ||
|
|
71b2855d01 | ||
|
|
93a258170a | ||
|
|
e2d4e12057 | ||
|
|
c076ee028f | ||
|
|
cbfac0922a | ||
|
|
881f7b74e5 | ||
|
|
c347581a6c | ||
|
|
30ee21f087 | ||
|
|
2496ca26a5 | ||
|
|
8aa3c4933e | ||
|
|
fc326a66c8 | ||
|
|
51902e3155 | ||
|
|
a261d706c8 | ||
|
|
2410e689b8 | ||
|
|
62470673fe | ||
|
|
2658bd148b | ||
|
|
f02381910d | ||
|
|
674ac59c98 | ||
| 434d1f4803 |
@@ -25,10 +25,10 @@ DEFAULT_RECURSION_DEPTH=2
|
||||
# Default timeout for provider API requests in seconds.
|
||||
DEFAULT_TIMEOUT=30
|
||||
# The number of concurrent provider requests to make.
|
||||
MAX_CONCURRENT_REQUESTS=5
|
||||
MAX_CONCURRENT_REQUESTS=1
|
||||
# The number of results from a provider that triggers the "large entity" grouping.
|
||||
LARGE_ENTITY_THRESHOLD=100
|
||||
# The number of times to retry a target if a provider fails.
|
||||
MAX_RETRIES_PER_TARGET=3
|
||||
MAX_RETRIES_PER_TARGET=8
|
||||
# How long cached provider responses are stored (in hours).
|
||||
CACHE_EXPIRY_HOURS=12
|
||||
CACHE_TIMEOUT_HOURS=12
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -169,3 +169,4 @@ cython_debug/
|
||||
#.idea/
|
||||
|
||||
dump.rdb
|
||||
cache/
|
||||
226
README.md
226
README.md
@@ -4,28 +4,32 @@ DNSRecon is an interactive, passive reconnaissance tool designed to map adversar
|
||||
|
||||
**Current Status: Phase 2 Implementation**
|
||||
|
||||
- ✅ Core infrastructure and graph engine
|
||||
- ✅ Multi-provider support (crt.sh, DNS, Shodan)
|
||||
- ✅ Session-based multi-user support
|
||||
- ✅ Real-time web interface with interactive visualization
|
||||
- ✅ Forensic logging system and JSON export
|
||||
* ✅ Core infrastructure and graph engine
|
||||
* ✅ Multi-provider support (crt.sh, DNS, Shodan)
|
||||
* ✅ Session-based multi-user support
|
||||
* ✅ Real-time web interface with interactive visualization
|
||||
* ✅ Forensic logging system and JSON export
|
||||
|
||||
-----
|
||||
|
||||
## Features
|
||||
|
||||
- **Passive Reconnaissance**: Gathers data without direct contact with target infrastructure.
|
||||
- **In-Memory Graph Analysis**: Uses NetworkX for efficient relationship mapping.
|
||||
- **Real-Time Visualization**: The graph updates dynamically as the scan progresses.
|
||||
- **Forensic Logging**: A complete audit trail of all reconnaissance activities is maintained.
|
||||
- **Confidence Scoring**: Relationships are weighted based on the reliability of the data source.
|
||||
- **Session Management**: Supports concurrent user sessions with isolated scanner instances.
|
||||
* **Passive Reconnaissance**: Gathers data without direct contact with target infrastructure.
|
||||
* **In-Memory Graph Analysis**: Uses NetworkX for efficient relationship mapping.
|
||||
* **Real-Time Visualization**: The graph updates dynamically as the scan progresses.
|
||||
* **Forensic Logging**: A complete audit trail of all reconnaissance activities is maintained.
|
||||
* **Confidence Scoring**: Relationships are weighted based on the reliability of the data source.
|
||||
* **Session Management**: Supports concurrent user sessions with isolated scanner instances.
|
||||
|
||||
-----
|
||||
|
||||
## Installation
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Python 3.8 or higher
|
||||
- A modern web browser with JavaScript enabled
|
||||
- (Recommended) A Linux host for running the application and the optional DNS cache.
|
||||
* Python 3.8 or higher
|
||||
* A modern web browser with JavaScript enabled
|
||||
* (Recommended) A Linux host for running the application and the optional DNS cache.
|
||||
|
||||
### 1\. Clone the Project
|
||||
|
||||
@@ -44,156 +48,50 @@ source venv/bin/activate
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
### 3\. (Optional but Recommended) Set up a Local DNS Caching Resolver
|
||||
The `requirements.txt` file contains the following dependencies:
|
||||
|
||||
Running a local DNS caching resolver can significantly speed up DNS queries and reduce your network footprint. Here’s how to set up `unbound` on a Debian-based Linux distribution (like Ubuntu).
|
||||
* Flask\>=2.3.3
|
||||
* networkx\>=3.1
|
||||
* requests\>=2.31.0
|
||||
* python-dateutil\>=2.8.2
|
||||
* Werkzeug\>=2.3.7
|
||||
* urllib3\>=2.0.0
|
||||
* dnspython\>=2.4.2
|
||||
* gunicorn
|
||||
* redis
|
||||
* python-dotenv
|
||||
|
||||
**a. Install Unbound:**
|
||||
-----
|
||||
|
||||
## Configuration
|
||||
|
||||
DNSRecon is configured using a `.env` file. You can copy the provided example file and edit it to suit your needs:
|
||||
|
||||
```bash
|
||||
sudo apt update
|
||||
sudo apt install unbound -y
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
**b. Configure Unbound:**
|
||||
Create a new configuration file for DNSRecon:
|
||||
The following environment variables are available for configuration:
|
||||
|
||||
```bash
|
||||
sudo nano /etc/unbound/unbound.conf.d/dnsrecon.conf
|
||||
```
|
||||
| Variable | Description | Default |
|
||||
| :--- | :--- | :--- |
|
||||
| `SHODAN_API_KEY` | Your Shodan API key. | |
|
||||
| `FLASK_SECRET_KEY`| A strong, random secret key for session security. | `your-very-secret-and-random-key-here` |
|
||||
| `FLASK_HOST` | The host address for the Flask application. | `127.0.0.1` |
|
||||
| `FLASK_PORT` | The port for the Flask application. | `5000` |
|
||||
| `FLASK_DEBUG` | Enable or disable Flask's debug mode. | `True` |
|
||||
| `FLASK_PERMANENT_SESSION_LIFETIME_HOURS`| How long a user's session in the browser lasts (in hours). | `2` |
|
||||
| `SESSION_TIMEOUT_MINUTES` | How long inactive scanner data is stored in Redis (in minutes). | `60` |
|
||||
| `DEFAULT_RECURSION_DEPTH` | The default number of levels to recurse when scanning. | `2` |
|
||||
| `DEFAULT_TIMEOUT` | Default timeout for provider API requests in seconds. | `30` |
|
||||
| `MAX_CONCURRENT_REQUESTS`| The number of concurrent provider requests to make. | `5` |
|
||||
| `LARGE_ENTITY_THRESHOLD`| The number of results from a provider that triggers the "large entity" grouping. | `100` |
|
||||
| `MAX_RETRIES_PER_TARGET`| The number of times to retry a target if a provider fails. | `8` |
|
||||
| `CACHE_EXPIRY_HOURS`| How long cached provider responses are stored (in hours). | `12` |
|
||||
|
||||
Add the following content to the file:
|
||||
-----
|
||||
|
||||
```
|
||||
server:
|
||||
# Listen on localhost for all users
|
||||
interface: 127.0.0.1
|
||||
access-control: 0.0.0.0/0 refuse
|
||||
access-control: 127.0.0.0/8 allow
|
||||
|
||||
# Enable prefetching of popular items
|
||||
prefetch: yes
|
||||
```
|
||||
|
||||
**c. Restart Unbound and set it as the default resolver:**
|
||||
|
||||
```bash
|
||||
sudo systemctl restart unbound
|
||||
sudo systemctl enable unbound
|
||||
```
|
||||
|
||||
To use this resolver for your system, you may need to update your network settings to point to `127.0.0.1` as your DNS server.
|
||||
|
||||
**d. Update DNSProvider to use the local resolver:**
|
||||
In `dnsrecon/providers/dns_provider.py`, you can explicitly set the resolver's nameservers in the `__init__` method:
|
||||
|
||||
```python
|
||||
# dnsrecon/providers/dns_provider.py
|
||||
|
||||
class DNSProvider(BaseProvider):
|
||||
def __init__(self, session_config=None):
|
||||
"""Initialize DNS provider with session-specific configuration."""
|
||||
super().__init__(...)
|
||||
|
||||
# Configure DNS resolver
|
||||
self.resolver = dns.resolver.Resolver()
|
||||
self.resolver.nameservers = ['127.0.0.1'] # Use local caching resolver
|
||||
self.resolver.timeout = 5
|
||||
self.resolver.lifetime = 10
|
||||
```
|
||||
|
||||
## Usage (Development)
|
||||
|
||||
### 1\. Start the Application
|
||||
|
||||
```bash
|
||||
python app.py
|
||||
```
|
||||
|
||||
### 2\. Open Your Browser
|
||||
|
||||
Navigate to `http://127.0.0.1:5000`.
|
||||
|
||||
### 3\. Basic Reconnaissance Workflow
|
||||
|
||||
1. **Enter Target Domain**: Input a domain like `example.com`.
|
||||
2. **Select Recursion Depth**: Depth 2 is recommended for most investigations.
|
||||
3. **Start Reconnaissance**: Click "Start Reconnaissance" to begin.
|
||||
4. **Monitor Progress**: Watch the real-time graph build as relationships are discovered.
|
||||
5. **Analyze and Export**: Interact with the graph and download the results when the scan is complete.
|
||||
|
||||
## Production Deployment
|
||||
|
||||
To deploy DNSRecon in a production environment, follow these steps:
|
||||
|
||||
### 1\. Use a Production WSGI Server
|
||||
|
||||
Do not use the built-in Flask development server for production. Use a WSGI server like **Gunicorn**:
|
||||
|
||||
```bash
|
||||
pip install gunicorn
|
||||
gunicorn --workers 4 --bind 0.0.0.0:5000 app:app
|
||||
```
|
||||
|
||||
### 2\. Configure Environment Variables
|
||||
|
||||
Set the following environment variables for a secure and configurable deployment:
|
||||
|
||||
```bash
|
||||
# Generate a strong, random secret key
|
||||
export SECRET_KEY='your-super-secret-and-random-key'
|
||||
|
||||
# Set Flask to production mode
|
||||
export FLASK_ENV='production'
|
||||
export FLASK_DEBUG=False
|
||||
|
||||
# API keys (optional, but recommended for full functionality)
|
||||
export SHODAN_API_KEY="your_shodan_key"
|
||||
```
|
||||
|
||||
### 3\. Use a Reverse Proxy
|
||||
|
||||
Set up a reverse proxy like **Nginx** to sit in front of the Gunicorn server. This provides several benefits, including:
|
||||
|
||||
- **TLS/SSL Termination**: Securely handle HTTPS traffic.
|
||||
- **Load Balancing**: Distribute traffic across multiple application instances.
|
||||
- **Serving Static Files**: Efficiently serve CSS and JavaScript files.
|
||||
|
||||
**Example Nginx Configuration:**
|
||||
|
||||
```nginx
|
||||
server {
|
||||
listen 80;
|
||||
server_name your_domain.com;
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name your_domain.com;
|
||||
|
||||
# SSL cert configuration
|
||||
ssl_certificate /etc/letsencrypt/live/your_domain.com/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/your_domain.com/privkey.pem;
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:5000;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
|
||||
location /static {
|
||||
alias /path/to/your/dnsrecon/static;
|
||||
expires 30d;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Autostart with systemd
|
||||
## Systemd Service
|
||||
|
||||
To run DNSRecon as a service that starts automatically on boot, you can use `systemd`.
|
||||
|
||||
@@ -245,12 +143,18 @@ You can check the status of the service at any time with:
|
||||
sudo systemctl status dnsrecon.service
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
- **API Keys**: API keys are stored in memory for the duration of a user session and are not written to disk.
|
||||
- **Rate Limiting**: DNSRecon includes built-in rate limiting to be respectful to data sources.
|
||||
- **Local Use**: The application is designed for local or trusted network use and does not have built-in authentication. **Do not expose it directly to the internet without proper security controls.**
|
||||
-----
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under the terms of the license agreement found in the `LICENSE` file.
|
||||
This project is licensed under the terms of the **BSD-3-Clause** license.
|
||||
|
||||
Copyright (c) 2025 mstoeck3.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
559
app.py
559
app.py
@@ -10,44 +10,63 @@ import traceback
|
||||
from flask import Flask, render_template, request, jsonify, send_file, session
|
||||
from datetime import datetime, timezone, timedelta
|
||||
import io
|
||||
import os
|
||||
|
||||
from core.session_manager import session_manager
|
||||
from config import config
|
||||
from core.graph_manager import NodeType
|
||||
from utils.helpers import is_valid_target
|
||||
from decimal import Decimal
|
||||
|
||||
|
||||
app = Flask(__name__)
|
||||
# Use centralized configuration for Flask settings
|
||||
app.config['SECRET_KEY'] = config.flask_secret_key
|
||||
app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(hours=config.flask_permanent_session_lifetime_hours)
|
||||
|
||||
def get_user_scanner():
|
||||
"""
|
||||
Retrieves the scanner for the current session, or creates a new
|
||||
session and scanner if one doesn't exist.
|
||||
Retrieves the scanner for the current session, or creates a new one if none exists.
|
||||
"""
|
||||
# Get current Flask session info for debugging
|
||||
current_flask_session_id = session.get('dnsrecon_session_id')
|
||||
|
||||
# Try to get existing session
|
||||
if current_flask_session_id:
|
||||
existing_scanner = session_manager.get_session(current_flask_session_id)
|
||||
if existing_scanner:
|
||||
return current_flask_session_id, existing_scanner
|
||||
|
||||
# Create new session if none exists
|
||||
print("Creating new session as none was found...")
|
||||
new_session_id = session_manager.create_session()
|
||||
new_scanner = session_manager.get_session(new_session_id)
|
||||
|
||||
if not new_scanner:
|
||||
raise Exception("Failed to create new scanner session")
|
||||
|
||||
# Store in Flask session
|
||||
session['dnsrecon_session_id'] = new_session_id
|
||||
session.permanent = True
|
||||
|
||||
return new_session_id, new_scanner
|
||||
|
||||
class CustomJSONEncoder(json.JSONEncoder):
|
||||
"""Custom JSON encoder to handle non-serializable objects."""
|
||||
|
||||
def default(self, obj):
|
||||
if isinstance(obj, datetime):
|
||||
return obj.isoformat()
|
||||
elif isinstance(obj, set):
|
||||
return list(obj)
|
||||
elif isinstance(obj, Decimal):
|
||||
return float(obj)
|
||||
elif hasattr(obj, '__dict__'):
|
||||
# For custom objects, try to serialize their dict representation
|
||||
try:
|
||||
return obj.__dict__
|
||||
except:
|
||||
return str(obj)
|
||||
elif hasattr(obj, 'value') and hasattr(obj, 'name'):
|
||||
# For enum objects
|
||||
return obj.value
|
||||
else:
|
||||
# For any other non-serializable object, convert to string
|
||||
return str(obj)
|
||||
@app.route('/')
|
||||
def index():
|
||||
"""Serve the main web interface."""
|
||||
@@ -57,57 +76,38 @@ def index():
|
||||
@app.route('/api/scan/start', methods=['POST'])
|
||||
def start_scan():
|
||||
"""
|
||||
Start a new reconnaissance scan. Creates a new isolated scanner if
|
||||
clear_graph is true, otherwise adds to the existing one.
|
||||
Starts a new reconnaissance scan.
|
||||
"""
|
||||
print("=== API: /api/scan/start called ===")
|
||||
|
||||
try:
|
||||
data = request.get_json()
|
||||
if not data or 'target_domain' not in data:
|
||||
return jsonify({'success': False, 'error': 'Missing target_domain in request'}), 400
|
||||
if not data or 'target' not in data:
|
||||
return jsonify({'success': False, 'error': 'Missing target in request'}), 400
|
||||
|
||||
target_domain = data['target_domain'].strip()
|
||||
target = data['target'].strip()
|
||||
max_depth = data.get('max_depth', config.default_recursion_depth)
|
||||
clear_graph = data.get('clear_graph', True)
|
||||
force_rescan_target = data.get('force_rescan_target', None)
|
||||
|
||||
print(f"Parsed - target_domain: '{target_domain}', max_depth: {max_depth}, clear_graph: {clear_graph}")
|
||||
|
||||
# Validation
|
||||
if not target_domain:
|
||||
return jsonify({'success': False, 'error': 'Target domain cannot be empty'}), 400
|
||||
if not target:
|
||||
return jsonify({'success': False, 'error': 'Target cannot be empty'}), 400
|
||||
if not is_valid_target(target):
|
||||
return jsonify({'success': False, 'error': 'Invalid target format.'}), 400
|
||||
if not isinstance(max_depth, int) or not 1 <= max_depth <= 5:
|
||||
return jsonify({'success': False, 'error': 'Max depth must be an integer between 1 and 5'}), 400
|
||||
|
||||
user_session_id, scanner = None, None
|
||||
|
||||
if clear_graph:
|
||||
print("Clear graph requested: Creating a new, isolated scanner session.")
|
||||
old_session_id = session.get('dnsrecon_session_id')
|
||||
if old_session_id:
|
||||
session_manager.terminate_session(old_session_id)
|
||||
|
||||
user_session_id = session_manager.create_session()
|
||||
session['dnsrecon_session_id'] = user_session_id
|
||||
session.permanent = True
|
||||
scanner = session_manager.get_session(user_session_id)
|
||||
else:
|
||||
print("Adding to existing graph: Reusing the current scanner session.")
|
||||
user_session_id, scanner = get_user_scanner()
|
||||
|
||||
if not scanner:
|
||||
return jsonify({'success': False, 'error': 'Failed to get or create a scanner instance.'}), 500
|
||||
return jsonify({'success': False, 'error': 'Failed to get scanner instance.'}), 500
|
||||
|
||||
print(f"Using scanner {id(scanner)} in session {user_session_id}")
|
||||
|
||||
success = scanner.start_scan(target_domain, max_depth, clear_graph=clear_graph)
|
||||
success = scanner.start_scan(target, max_depth, clear_graph=clear_graph, force_rescan_target=force_rescan_target)
|
||||
|
||||
if success:
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'message': 'Scan started successfully',
|
||||
'scan_id': scanner.logger.session_id,
|
||||
'user_session_id': user_session_id,
|
||||
'user_session_id': user_session_id
|
||||
})
|
||||
else:
|
||||
return jsonify({
|
||||
@@ -116,193 +116,236 @@ def start_scan():
|
||||
}), 409
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR: Exception in start_scan endpoint: {e}")
|
||||
traceback.print_exc()
|
||||
return jsonify({'success': False, 'error': f'Internal server error: {str(e)}'}), 500
|
||||
|
||||
@app.route('/api/scan/stop', methods=['POST'])
|
||||
def stop_scan():
|
||||
"""Stop the current scan with immediate GUI feedback."""
|
||||
print("=== API: /api/scan/stop called ===")
|
||||
|
||||
"""Stop the current scan."""
|
||||
try:
|
||||
# Get user-specific scanner
|
||||
user_session_id, scanner = get_user_scanner()
|
||||
print(f"Stopping scan for session: {user_session_id}")
|
||||
|
||||
if not scanner:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': 'No scanner found for session'
|
||||
}), 404
|
||||
return jsonify({'success': False, 'error': 'No scanner found for session'}), 404
|
||||
|
||||
# Ensure session ID is set
|
||||
if not scanner.session_id:
|
||||
scanner.session_id = user_session_id
|
||||
|
||||
# Use the stop mechanism
|
||||
success = scanner.stop_scan()
|
||||
|
||||
# Also set the Redis stop signal directly for extra reliability
|
||||
scanner.stop_scan()
|
||||
session_manager.set_stop_signal(user_session_id)
|
||||
|
||||
# Force immediate status update
|
||||
session_manager.update_scanner_status(user_session_id, 'stopped')
|
||||
|
||||
# Update the full scanner state
|
||||
session_manager.update_session_scanner(user_session_id, scanner)
|
||||
|
||||
print(f"Stop scan completed. Success: {success}, Scanner status: {scanner.status}")
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'message': 'Scan stop requested - termination initiated',
|
||||
'user_session_id': user_session_id,
|
||||
'scanner_status': scanner.status,
|
||||
'stop_method': 'cross_process'
|
||||
'message': 'Scan stop requested',
|
||||
'user_session_id': user_session_id
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR: Exception in stop_scan endpoint: {e}")
|
||||
traceback.print_exc()
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f'Internal server error: {str(e)}'
|
||||
}), 500
|
||||
return jsonify({'success': False, 'error': f'Internal server error: {str(e)}'}), 500
|
||||
|
||||
|
||||
@app.route('/api/scan/status', methods=['GET'])
|
||||
def get_scan_status():
|
||||
"""Get current scan status with error handling."""
|
||||
"""Get current scan status."""
|
||||
try:
|
||||
# Get user-specific scanner
|
||||
user_session_id, scanner = get_user_scanner()
|
||||
|
||||
if not scanner:
|
||||
# Return default idle status if no scanner
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'status': {
|
||||
'status': 'idle',
|
||||
'target_domain': None,
|
||||
'current_depth': 0,
|
||||
'max_depth': 0,
|
||||
'current_indicator': '',
|
||||
'total_indicators_found': 0,
|
||||
'indicators_processed': 0,
|
||||
'progress_percentage': 0.0,
|
||||
'enabled_providers': [],
|
||||
'graph_statistics': {},
|
||||
'status': 'idle', 'target_domain': None, 'current_depth': 0,
|
||||
'max_depth': 0, 'progress_percentage': 0.0,
|
||||
'user_session_id': user_session_id
|
||||
}
|
||||
})
|
||||
|
||||
# Ensure session ID is set
|
||||
if not scanner.session_id:
|
||||
scanner.session_id = user_session_id
|
||||
|
||||
status = scanner.get_scan_status()
|
||||
status['user_session_id'] = user_session_id
|
||||
|
||||
# Additional debug info
|
||||
status['debug_info'] = {
|
||||
'scanner_object_id': id(scanner),
|
||||
'session_id_set': bool(scanner.session_id),
|
||||
'has_scan_thread': bool(scanner.scan_thread and scanner.scan_thread.is_alive())
|
||||
}
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'status': status
|
||||
})
|
||||
return jsonify({'success': True, 'status': status})
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR: Exception in get_scan_status endpoint: {e}")
|
||||
traceback.print_exc()
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f'Internal server error: {str(e)}',
|
||||
'fallback_status': {
|
||||
'status': 'error',
|
||||
'target_domain': None,
|
||||
'current_depth': 0,
|
||||
'max_depth': 0,
|
||||
'progress_percentage': 0.0
|
||||
}
|
||||
'success': False, 'error': f'Internal server error: {str(e)}',
|
||||
'fallback_status': {'status': 'error', 'progress_percentage': 0.0}
|
||||
}), 500
|
||||
|
||||
|
||||
|
||||
@app.route('/api/graph', methods=['GET'])
|
||||
def get_graph_data():
|
||||
"""Get current graph data with error handling."""
|
||||
"""Get current graph data."""
|
||||
try:
|
||||
# Get user-specific scanner
|
||||
user_session_id, scanner = get_user_scanner()
|
||||
|
||||
if not scanner:
|
||||
# Return empty graph if no scanner
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'graph': {
|
||||
'nodes': [],
|
||||
'edges': [],
|
||||
'statistics': {
|
||||
'node_count': 0,
|
||||
'edge_count': 0,
|
||||
'creation_time': datetime.now(timezone.utc).isoformat(),
|
||||
'last_modified': datetime.now(timezone.utc).isoformat()
|
||||
}
|
||||
},
|
||||
'user_session_id': user_session_id
|
||||
})
|
||||
|
||||
graph_data = scanner.get_graph_data()
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'graph': graph_data,
|
||||
'user_session_id': user_session_id
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR: Exception in get_graph_data endpoint: {e}")
|
||||
traceback.print_exc()
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f'Internal server error: {str(e)}',
|
||||
'fallback_graph': {
|
||||
'nodes': [],
|
||||
'edges': [],
|
||||
empty_graph = {
|
||||
'nodes': [], 'edges': [],
|
||||
'statistics': {'node_count': 0, 'edge_count': 0}
|
||||
}
|
||||
|
||||
if not scanner:
|
||||
return jsonify({'success': True, 'graph': empty_graph, 'user_session_id': user_session_id})
|
||||
|
||||
graph_data = scanner.get_graph_data() or empty_graph
|
||||
|
||||
return jsonify({'success': True, 'graph': graph_data, 'user_session_id': user_session_id})
|
||||
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
return jsonify({
|
||||
'success': False, 'error': f'Internal server error: {str(e)}',
|
||||
'fallback_graph': {'nodes': [], 'edges': [], 'statistics': {}}
|
||||
}), 500
|
||||
|
||||
@app.route('/api/graph/large-entity/extract', methods=['POST'])
|
||||
def extract_from_large_entity():
|
||||
"""Extract a node from a large entity."""
|
||||
try:
|
||||
data = request.get_json()
|
||||
large_entity_id = data.get('large_entity_id')
|
||||
node_id = data.get('node_id')
|
||||
|
||||
if not large_entity_id or not node_id:
|
||||
return jsonify({'success': False, 'error': 'Missing required parameters'}), 400
|
||||
|
||||
user_session_id, scanner = get_user_scanner()
|
||||
if not scanner:
|
||||
return jsonify({'success': False, 'error': 'No active session found'}), 404
|
||||
|
||||
success = scanner.extract_node_from_large_entity(large_entity_id, node_id)
|
||||
|
||||
if success:
|
||||
session_manager.update_session_scanner(user_session_id, scanner)
|
||||
return jsonify({'success': True, 'message': f'Node {node_id} extracted successfully.'})
|
||||
else:
|
||||
return jsonify({'success': False, 'error': f'Failed to extract node {node_id}.'}), 500
|
||||
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
return jsonify({'success': False, 'error': f'Internal server error: {str(e)}'}), 500
|
||||
|
||||
@app.route('/api/graph/node/<node_id>', methods=['DELETE'])
|
||||
def delete_graph_node(node_id):
|
||||
"""Delete a node from the graph."""
|
||||
try:
|
||||
user_session_id, scanner = get_user_scanner()
|
||||
if not scanner:
|
||||
return jsonify({'success': False, 'error': 'No active session found'}), 404
|
||||
|
||||
success = scanner.graph.remove_node(node_id)
|
||||
|
||||
if success:
|
||||
session_manager.update_session_scanner(user_session_id, scanner)
|
||||
return jsonify({'success': True, 'message': f'Node {node_id} deleted successfully.'})
|
||||
else:
|
||||
return jsonify({'success': False, 'error': f'Node {node_id} not found.'}), 404
|
||||
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
return jsonify({'success': False, 'error': f'Internal server error: {str(e)}'}), 500
|
||||
|
||||
|
||||
@app.route('/api/graph/revert', methods=['POST'])
|
||||
def revert_graph_action():
|
||||
"""Reverts a graph action, such as re-adding a deleted node."""
|
||||
try:
|
||||
data = request.get_json()
|
||||
if not data or 'type' not in data or 'data' not in data:
|
||||
return jsonify({'success': False, 'error': 'Invalid revert request format'}), 400
|
||||
|
||||
user_session_id, scanner = get_user_scanner()
|
||||
if not scanner:
|
||||
return jsonify({'success': False, 'error': 'No active session found'}), 404
|
||||
|
||||
action_type = data['type']
|
||||
action_data = data['data']
|
||||
|
||||
if action_type == 'delete':
|
||||
node_to_add = action_data.get('node')
|
||||
if node_to_add:
|
||||
scanner.graph.add_node(
|
||||
node_id=node_to_add['id'],
|
||||
node_type=NodeType(node_to_add['type']),
|
||||
attributes=node_to_add.get('attributes'),
|
||||
description=node_to_add.get('description'),
|
||||
metadata=node_to_add.get('metadata')
|
||||
)
|
||||
|
||||
edges_to_add = action_data.get('edges', [])
|
||||
for edge in edges_to_add:
|
||||
if scanner.graph.graph.has_node(edge['from']) and scanner.graph.graph.has_node(edge['to']):
|
||||
scanner.graph.add_edge(
|
||||
source_id=edge['from'], target_id=edge['to'],
|
||||
relationship_type=edge['metadata']['relationship_type'],
|
||||
confidence_score=edge['metadata']['confidence_score'],
|
||||
source_provider=edge['metadata']['source_provider'],
|
||||
raw_data=edge.get('raw_data', {})
|
||||
)
|
||||
|
||||
session_manager.update_session_scanner(user_session_id, scanner)
|
||||
return jsonify({'success': True, 'message': 'Delete action reverted successfully.'})
|
||||
|
||||
return jsonify({'success': False, 'error': f'Unknown revert action type: {action_type}'}), 400
|
||||
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
return jsonify({'success': False, 'error': f'Internal server error: {str(e)}'}), 500
|
||||
|
||||
|
||||
@app.route('/api/export', methods=['GET'])
|
||||
def export_results():
|
||||
"""Export complete scan results as downloadable JSON for the user session."""
|
||||
"""Export scan results as a JSON file with improved error handling."""
|
||||
try:
|
||||
# Get user-specific scanner
|
||||
user_session_id, scanner = get_user_scanner()
|
||||
|
||||
# Get complete results
|
||||
results = scanner.export_results()
|
||||
if not scanner:
|
||||
return jsonify({'success': False, 'error': 'No active scanner session found'}), 404
|
||||
|
||||
# Add session information to export
|
||||
# Get export data with error handling
|
||||
try:
|
||||
results = scanner.export_results()
|
||||
except Exception as e:
|
||||
return jsonify({'success': False, 'error': f'Failed to gather export data: {str(e)}'}), 500
|
||||
|
||||
# Add export metadata
|
||||
results['export_metadata'] = {
|
||||
'user_session_id': user_session_id,
|
||||
'export_timestamp': datetime.now(timezone.utc).isoformat(),
|
||||
'export_type': 'user_session_results'
|
||||
'export_version': '1.0.0',
|
||||
'forensic_integrity': 'maintained'
|
||||
}
|
||||
|
||||
# Create filename with timestamp
|
||||
# Generate filename with forensic naming convention
|
||||
timestamp = datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')
|
||||
target = scanner.current_target or 'unknown'
|
||||
filename = f"dnsrecon_{target}_{timestamp}_{user_session_id[:8]}.json"
|
||||
# Sanitize target for filename
|
||||
safe_target = "".join(c for c in target if c.isalnum() or c in ('-', '_', '.')).rstrip()
|
||||
filename = f"dnsrecon_{safe_target}_{timestamp}.json"
|
||||
|
||||
# Create in-memory file
|
||||
json_data = json.dumps(results, indent=2, ensure_ascii=False)
|
||||
# Serialize with custom encoder and error handling
|
||||
try:
|
||||
json_data = json.dumps(results, indent=2, cls=CustomJSONEncoder, ensure_ascii=False)
|
||||
except Exception as e:
|
||||
# If custom encoder fails, try a more aggressive approach
|
||||
try:
|
||||
# Convert problematic objects to strings recursively
|
||||
cleaned_results = _clean_for_json(results)
|
||||
json_data = json.dumps(cleaned_results, indent=2, ensure_ascii=False)
|
||||
except Exception as e2:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f'JSON serialization failed: {str(e2)}'
|
||||
}), 500
|
||||
|
||||
# Create file object
|
||||
file_obj = io.BytesIO(json_data.encode('utf-8'))
|
||||
|
||||
return send_file(
|
||||
@@ -313,70 +356,70 @@ def export_results():
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR: Exception in export_results endpoint: {e}")
|
||||
traceback.print_exc()
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f'Export failed: {str(e)}'
|
||||
'error': f'Export failed: {str(e)}',
|
||||
'error_type': type(e).__name__
|
||||
}), 500
|
||||
|
||||
def _clean_for_json(obj, max_depth=10, current_depth=0):
|
||||
"""
|
||||
Recursively clean an object to make it JSON serializable.
|
||||
Handles circular references and problematic object types.
|
||||
"""
|
||||
if current_depth > max_depth:
|
||||
return f"<max_depth_exceeded_{type(obj).__name__}>"
|
||||
|
||||
@app.route('/api/providers', methods=['GET'])
|
||||
def get_providers():
|
||||
"""Get information about available providers for the user session."""
|
||||
|
||||
if obj is None or isinstance(obj, (bool, int, float, str)):
|
||||
return obj
|
||||
elif isinstance(obj, datetime):
|
||||
return obj.isoformat()
|
||||
elif isinstance(obj, (set, frozenset)):
|
||||
return list(obj)
|
||||
elif isinstance(obj, dict):
|
||||
cleaned = {}
|
||||
for key, value in obj.items():
|
||||
try:
|
||||
# Get user-specific scanner
|
||||
user_session_id, scanner = get_user_scanner()
|
||||
|
||||
if scanner:
|
||||
completed_tasks = scanner.indicators_completed
|
||||
enqueued_tasks = len(scanner.task_queue)
|
||||
print(f"DEBUG: Tasks - Completed: {completed_tasks}, Enqueued: {enqueued_tasks}")
|
||||
# Ensure key is string
|
||||
clean_key = str(key) if not isinstance(key, str) else key
|
||||
cleaned[clean_key] = _clean_for_json(value, max_depth, current_depth + 1)
|
||||
except Exception:
|
||||
cleaned[str(key)] = f"<serialization_error_{type(value).__name__}>"
|
||||
return cleaned
|
||||
elif isinstance(obj, (list, tuple)):
|
||||
cleaned = []
|
||||
for item in obj:
|
||||
try:
|
||||
cleaned.append(_clean_for_json(item, max_depth, current_depth + 1))
|
||||
except Exception:
|
||||
cleaned.append(f"<serialization_error_{type(item).__name__}>")
|
||||
return cleaned
|
||||
elif hasattr(obj, '__dict__'):
|
||||
try:
|
||||
return _clean_for_json(obj.__dict__, max_depth, current_depth + 1)
|
||||
except Exception:
|
||||
return str(obj)
|
||||
elif hasattr(obj, 'value'):
|
||||
# For enum-like objects
|
||||
return obj.value
|
||||
else:
|
||||
print("DEBUG: No active scanner session found.")
|
||||
|
||||
provider_info = scanner.get_provider_info()
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'providers': provider_info,
|
||||
'user_session_id': user_session_id
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR: Exception in get_providers endpoint: {e}")
|
||||
traceback.print_exc()
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f'Internal server error: {str(e)}'
|
||||
}), 500
|
||||
|
||||
return str(obj)
|
||||
|
||||
@app.route('/api/config/api-keys', methods=['POST'])
|
||||
def set_api_keys():
|
||||
"""
|
||||
Set API keys for providers for the user session only.
|
||||
"""
|
||||
"""Set API keys for the current session."""
|
||||
try:
|
||||
data = request.get_json()
|
||||
|
||||
if data is None:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': 'No API keys provided'
|
||||
}), 400
|
||||
return jsonify({'success': False, 'error': 'No API keys provided'}), 400
|
||||
|
||||
# Get user-specific scanner and config
|
||||
user_session_id, scanner = get_user_scanner()
|
||||
session_config = scanner.config
|
||||
|
||||
updated_providers = []
|
||||
|
||||
# Iterate over the API keys provided in the request data
|
||||
for provider_name, api_key in data.items():
|
||||
# This allows us to both set and clear keys. The config
|
||||
# handles enabling/disabling based on if the key is empty.
|
||||
api_key_value = str(api_key or '').strip()
|
||||
success = session_config.set_api_key(provider_name.lower(), api_key_value)
|
||||
|
||||
@@ -384,60 +427,136 @@ def set_api_keys():
|
||||
updated_providers.append(provider_name)
|
||||
|
||||
if updated_providers:
|
||||
# Reinitialize scanner providers to apply the new keys
|
||||
scanner._initialize_providers()
|
||||
|
||||
# Persist the updated scanner object back to the user's session
|
||||
session_manager.update_session_scanner(user_session_id, scanner)
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'message': f'API keys updated for session {user_session_id}: {", ".join(updated_providers)}',
|
||||
'updated_providers': updated_providers,
|
||||
'message': f'API keys updated for: {", ".join(updated_providers)}',
|
||||
'user_session_id': user_session_id
|
||||
})
|
||||
else:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': 'No valid API keys were provided or provider names were incorrect.'
|
||||
}), 400
|
||||
return jsonify({'success': False, 'error': 'No valid API keys were provided.'}), 400
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR: Exception in set_api_keys endpoint: {e}")
|
||||
traceback.print_exc()
|
||||
return jsonify({'success': False, 'error': f'Internal server error: {str(e)}'}), 500
|
||||
|
||||
@app.route('/api/providers', methods=['GET'])
|
||||
def get_providers():
|
||||
"""Get enhanced information about available providers including API key sources."""
|
||||
try:
|
||||
user_session_id, scanner = get_user_scanner()
|
||||
base_provider_info = scanner.get_provider_info()
|
||||
|
||||
# Enhance provider info with API key source information
|
||||
enhanced_provider_info = {}
|
||||
|
||||
for provider_name, info in base_provider_info.items():
|
||||
enhanced_info = dict(info) # Copy base info
|
||||
|
||||
if info['requires_api_key']:
|
||||
# Determine API key source and configuration status
|
||||
api_key = scanner.config.get_api_key(provider_name)
|
||||
backend_api_key = os.getenv(f'{provider_name.upper()}_API_KEY')
|
||||
|
||||
if backend_api_key:
|
||||
# API key configured via backend/environment
|
||||
enhanced_info.update({
|
||||
'api_key_configured': True,
|
||||
'api_key_source': 'backend',
|
||||
'api_key_help': f'API key configured via environment variable {provider_name.upper()}_API_KEY'
|
||||
})
|
||||
elif api_key:
|
||||
# API key configured via web interface
|
||||
enhanced_info.update({
|
||||
'api_key_configured': True,
|
||||
'api_key_source': 'frontend',
|
||||
'api_key_help': f'API key set via web interface (session-only)'
|
||||
})
|
||||
else:
|
||||
# No API key configured
|
||||
enhanced_info.update({
|
||||
'api_key_configured': False,
|
||||
'api_key_source': None,
|
||||
'api_key_help': f'Requires API key to enable {info["display_name"]} integration'
|
||||
})
|
||||
else:
|
||||
# Provider doesn't require API key
|
||||
enhanced_info.update({
|
||||
'api_key_configured': True, # Always "configured" for non-API providers
|
||||
'api_key_source': None,
|
||||
'api_key_help': None
|
||||
})
|
||||
|
||||
enhanced_provider_info[provider_name] = enhanced_info
|
||||
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f'Internal server error: {str(e)}'
|
||||
}), 500
|
||||
'success': True,
|
||||
'providers': enhanced_provider_info,
|
||||
'user_session_id': user_session_id
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
return jsonify({'success': False, 'error': f'Internal server error: {str(e)}'}), 500
|
||||
|
||||
|
||||
@app.route('/api/config/providers', methods=['POST'])
|
||||
def configure_providers():
|
||||
"""Configure provider settings (enable/disable)."""
|
||||
try:
|
||||
data = request.get_json()
|
||||
if data is None:
|
||||
return jsonify({'success': False, 'error': 'No provider settings provided'}), 400
|
||||
|
||||
user_session_id, scanner = get_user_scanner()
|
||||
session_config = scanner.config
|
||||
|
||||
updated_providers = []
|
||||
|
||||
for provider_name, settings in data.items():
|
||||
provider_name_clean = provider_name.lower().strip()
|
||||
|
||||
if 'enabled' in settings:
|
||||
# Update the enabled state in session config
|
||||
session_config.enabled_providers[provider_name_clean] = settings['enabled']
|
||||
updated_providers.append(provider_name_clean)
|
||||
|
||||
if updated_providers:
|
||||
# Reinitialize providers with new settings
|
||||
scanner._initialize_providers()
|
||||
session_manager.update_session_scanner(user_session_id, scanner)
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'message': f'Provider settings updated for: {", ".join(updated_providers)}',
|
||||
'user_session_id': user_session_id
|
||||
})
|
||||
else:
|
||||
return jsonify({'success': False, 'error': 'No valid provider settings were provided.'}), 400
|
||||
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
return jsonify({'success': False, 'error': f'Internal server error: {str(e)}'}), 500
|
||||
|
||||
|
||||
|
||||
@app.errorhandler(404)
|
||||
def not_found(error):
|
||||
"""Handle 404 errors."""
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': 'Endpoint not found'
|
||||
}), 404
|
||||
return jsonify({'success': False, 'error': 'Endpoint not found'}), 404
|
||||
|
||||
|
||||
@app.errorhandler(500)
|
||||
def internal_error(error):
|
||||
"""Handle 500 errors."""
|
||||
print(f"ERROR: 500 Internal Server Error: {error}")
|
||||
traceback.print_exc()
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': 'Internal server error'
|
||||
}), 500
|
||||
return jsonify({'success': False, 'error': 'Internal server error'}), 500
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print("Starting DNSRecon Flask application with user session support...")
|
||||
|
||||
# Load configuration from environment
|
||||
config.load_from_env()
|
||||
|
||||
# Start Flask application
|
||||
print(f"Starting server on {config.flask_host}:{config.flask_port}")
|
||||
app.run(
|
||||
host=config.flask_host,
|
||||
port=config.flask_port,
|
||||
|
||||
66
config.py
66
config.py
@@ -1,3 +1,5 @@
|
||||
# dnsrecon-reduced/config.py
|
||||
|
||||
"""
|
||||
Configuration management for DNSRecon tool.
|
||||
Handles API key storage, rate limiting, and default settings.
|
||||
@@ -19,18 +21,17 @@ class Config:
|
||||
|
||||
# --- General Settings ---
|
||||
self.default_recursion_depth = 2
|
||||
self.default_timeout = 15
|
||||
self.max_concurrent_requests = 5
|
||||
self.default_timeout = 60
|
||||
self.max_concurrent_requests = 1
|
||||
self.large_entity_threshold = 100
|
||||
self.max_retries_per_target = 3
|
||||
self.cache_expiry_hours = 12
|
||||
self.max_retries_per_target = 8
|
||||
|
||||
# --- Provider Caching Settings ---
|
||||
self.cache_timeout_hours = 6 # Provider-specific cache timeout
|
||||
|
||||
# --- Rate Limiting (requests per minute) ---
|
||||
self.rate_limits = {
|
||||
'crtsh': 30,
|
||||
'crtsh': 5,
|
||||
'shodan': 60,
|
||||
'dns': 100
|
||||
}
|
||||
@@ -67,7 +68,6 @@ class Config:
|
||||
self.max_concurrent_requests = int(os.getenv('MAX_CONCURRENT_REQUESTS', self.max_concurrent_requests))
|
||||
self.large_entity_threshold = int(os.getenv('LARGE_ENTITY_THRESHOLD', self.large_entity_threshold))
|
||||
self.max_retries_per_target = int(os.getenv('MAX_RETRIES_PER_TARGET', self.max_retries_per_target))
|
||||
self.cache_expiry_hours = int(os.getenv('CACHE_EXPIRY_HOURS', self.cache_expiry_hours))
|
||||
self.cache_timeout_hours = int(os.getenv('CACHE_TIMEOUT_HOURS', self.cache_timeout_hours))
|
||||
|
||||
# Override Flask and session settings
|
||||
@@ -85,6 +85,60 @@ class Config:
|
||||
self.enabled_providers[provider] = True
|
||||
return True
|
||||
|
||||
def set_provider_enabled(self, provider: str, enabled: bool) -> bool:
|
||||
"""
|
||||
Set provider enabled status for the session.
|
||||
|
||||
Args:
|
||||
provider: Provider name
|
||||
enabled: Whether the provider should be enabled
|
||||
|
||||
Returns:
|
||||
True if the setting was applied successfully
|
||||
"""
|
||||
provider_key = provider.lower()
|
||||
self.enabled_providers[provider_key] = enabled
|
||||
return True
|
||||
|
||||
def get_provider_enabled(self, provider: str) -> bool:
|
||||
"""
|
||||
Get provider enabled status.
|
||||
|
||||
Args:
|
||||
provider: Provider name
|
||||
|
||||
Returns:
|
||||
True if the provider is enabled
|
||||
"""
|
||||
provider_key = provider.lower()
|
||||
return self.enabled_providers.get(provider_key, True) # Default to enabled
|
||||
|
||||
def bulk_set_provider_settings(self, provider_settings: dict) -> dict:
|
||||
"""
|
||||
Set multiple provider settings at once.
|
||||
|
||||
Args:
|
||||
provider_settings: Dict of provider_name -> {'enabled': bool, ...}
|
||||
|
||||
Returns:
|
||||
Dict with results for each provider
|
||||
"""
|
||||
results = {}
|
||||
|
||||
for provider_name, settings in provider_settings.items():
|
||||
provider_key = provider_name.lower()
|
||||
|
||||
try:
|
||||
if 'enabled' in settings:
|
||||
self.enabled_providers[provider_key] = settings['enabled']
|
||||
results[provider_key] = {'success': True, 'enabled': settings['enabled']}
|
||||
else:
|
||||
results[provider_key] = {'success': False, 'error': 'No enabled setting provided'}
|
||||
except Exception as e:
|
||||
results[provider_key] = {'success': False, 'error': str(e)}
|
||||
|
||||
return results
|
||||
|
||||
def get_api_key(self, provider: str) -> Optional[str]:
|
||||
"""Get API key for a provider."""
|
||||
return self.api_keys.get(provider)
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
# core/graph_manager.py
|
||||
# dnsrecon-reduced/core/graph_manager.py
|
||||
|
||||
"""
|
||||
Graph data model for DNSRecon using NetworkX.
|
||||
Manages in-memory graph storage with confidence scoring and forensic metadata.
|
||||
Now fully compatible with the unified ProviderResult data model.
|
||||
UPDATED: Fixed correlation exclusion keys to match actual attribute names.
|
||||
"""
|
||||
import re
|
||||
from datetime import datetime, timezone
|
||||
@@ -16,7 +18,8 @@ class NodeType(Enum):
|
||||
"""Enumeration of supported node types."""
|
||||
DOMAIN = "domain"
|
||||
IP = "ip"
|
||||
ASN = "asn"
|
||||
ISP = "isp"
|
||||
CA = "ca"
|
||||
LARGE_ENTITY = "large_entity"
|
||||
CORRELATION_OBJECT = "correlation_object"
|
||||
|
||||
@@ -28,6 +31,7 @@ class GraphManager:
|
||||
"""
|
||||
Thread-safe graph manager for DNSRecon infrastructure mapping.
|
||||
Uses NetworkX for in-memory graph storage with confidence scoring.
|
||||
Compatible with unified ProviderResult data model.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
@@ -39,6 +43,31 @@ class GraphManager:
|
||||
# Compile regex for date filtering for efficiency
|
||||
self.date_pattern = re.compile(r'^\d{4}-\d{2}-\d{2}[ T]\d{2}:\d{2}:\d{2}')
|
||||
|
||||
# FIXED: Exclude cert_issuer_name since we already create proper CA relationships
|
||||
self.EXCLUDED_KEYS = [
|
||||
# Certificate metadata that creates noise or has dedicated node types
|
||||
'cert_source', # Always 'crtsh' for crtsh provider
|
||||
'cert_common_name',
|
||||
'cert_validity_period_days', # Numerical, not useful for correlation
|
||||
'cert_issuer_name', # FIXED: Has dedicated CA nodes, don't correlate
|
||||
#'cert_certificate_id', # Unique per certificate
|
||||
#'cert_serial_number', # Unique per certificate
|
||||
'cert_entry_timestamp', # Timestamp, filtered by date regex anyway
|
||||
'cert_not_before', # Date, filtered by date regex anyway
|
||||
'cert_not_after', # Date, filtered by date regex anyway
|
||||
# DNS metadata that creates noise
|
||||
'dns_ttl', # TTL values are not meaningful for correlation
|
||||
# Shodan metadata that might create noise
|
||||
'timestamp', # Generic timestamp fields
|
||||
'last_update', # Generic timestamp fields
|
||||
#'org', # Too generic, causes false correlations
|
||||
#'isp', # Too generic, causes false correlations
|
||||
# Generic noisy attributes
|
||||
'updated_timestamp', # Any timestamp field
|
||||
'discovery_timestamp', # Any timestamp field
|
||||
'query_timestamp', # Any timestamp field
|
||||
]
|
||||
|
||||
def __getstate__(self):
|
||||
"""Prepare GraphManager for pickling, excluding compiled regex."""
|
||||
state = self.__dict__.copy()
|
||||
@@ -52,245 +81,138 @@ class GraphManager:
|
||||
self.__dict__.update(state)
|
||||
self.date_pattern = re.compile(r'^\d{4}-\d{2}-\d{2}[ T]\d{2}:\d{2}:\d{2}')
|
||||
|
||||
def _update_correlation_index(self, node_id: str, data: Any, path: List[str] = [], parent_attr: str = ""):
|
||||
"""Recursively traverse metadata and add hashable values to the index with better path tracking."""
|
||||
if path is None:
|
||||
path = []
|
||||
|
||||
if isinstance(data, dict):
|
||||
for key, value in data.items():
|
||||
self._update_correlation_index(node_id, value, path + [key], key)
|
||||
elif isinstance(data, list):
|
||||
for i, item in enumerate(data):
|
||||
# Instead of just using [i], include the parent attribute context
|
||||
list_path_component = f"[{i}]" if not parent_attr else f"{parent_attr}[{i}]"
|
||||
self._update_correlation_index(node_id, item, path + [list_path_component], parent_attr)
|
||||
else:
|
||||
self._add_to_correlation_index(node_id, data, ".".join(path), parent_attr)
|
||||
|
||||
def _add_to_correlation_index(self, node_id: str, value: Any, path_str: str, parent_attr: str = ""):
|
||||
"""Add a hashable value to the correlation index, filtering out noise."""
|
||||
if not isinstance(value, (str, int, float, bool)) or value is None:
|
||||
def process_correlations_for_node(self, node_id: str):
|
||||
"""
|
||||
UPDATED: Process correlations for a given node with enhanced tracking.
|
||||
Now properly tracks which attribute/provider created each correlation.
|
||||
"""
|
||||
if not self.graph.has_node(node_id):
|
||||
return
|
||||
|
||||
# Ignore certain paths that contain noisy, non-unique identifiers
|
||||
if any(keyword in path_str.lower() for keyword in ['count', 'total', 'timestamp', 'date']):
|
||||
return
|
||||
node_attributes = self.graph.nodes[node_id].get('attributes', [])
|
||||
|
||||
# Filter out common low-entropy values and date-like strings
|
||||
if isinstance(value, str):
|
||||
# FIXED: Prevent correlation on date/time strings.
|
||||
if self.date_pattern.match(value):
|
||||
return
|
||||
if len(value) < 4 or value.lower() in ['true', 'false', 'unknown', 'none', 'crt.sh']:
|
||||
return
|
||||
elif isinstance(value, int) and (abs(value) < 1024 or abs(value) > 65535):
|
||||
return # Ignore small integers and common port numbers
|
||||
elif isinstance(value, bool):
|
||||
return # Ignore boolean values
|
||||
# Process each attribute for potential correlations
|
||||
for attr in node_attributes:
|
||||
attr_name = attr.get('name')
|
||||
attr_value = attr.get('value')
|
||||
attr_provider = attr.get('provider', 'unknown')
|
||||
|
||||
# Add the valuable correlation data to the index
|
||||
if value not in self.correlation_index:
|
||||
self.correlation_index[value] = {}
|
||||
if node_id not in self.correlation_index[value]:
|
||||
self.correlation_index[value][node_id] = []
|
||||
# IMPROVED: More comprehensive exclusion logic
|
||||
should_exclude = (
|
||||
# Check against excluded keys (exact match or substring)
|
||||
any(excluded_key in attr_name or attr_name == excluded_key for excluded_key in self.EXCLUDED_KEYS) or
|
||||
# Invalid value types
|
||||
not isinstance(attr_value, (str, int, float, bool)) or
|
||||
attr_value is None or
|
||||
# Boolean values are not useful for correlation
|
||||
isinstance(attr_value, bool) or
|
||||
# String values that are too short or are dates
|
||||
(isinstance(attr_value, str) and (
|
||||
len(attr_value) < 4 or
|
||||
self.date_pattern.match(attr_value) or
|
||||
# Exclude common generic values that create noise
|
||||
attr_value.lower() in ['unknown', 'none', 'null', 'n/a', 'true', 'false', '0', '1']
|
||||
)) or
|
||||
# Numerical values that are likely to be unique identifiers
|
||||
(isinstance(attr_value, (int, float)) and (
|
||||
attr_value == 0 or # Zero values are not meaningful
|
||||
attr_value == 1 or # One values are too common
|
||||
abs(attr_value) > 1000000 # Very large numbers are likely IDs
|
||||
))
|
||||
)
|
||||
|
||||
# Store both the full path and the parent attribute for better edge labeling
|
||||
correlation_entry = {
|
||||
'path': path_str,
|
||||
'parent_attr': parent_attr,
|
||||
'meaningful_attr': self._extract_meaningful_attribute(path_str, parent_attr)
|
||||
}
|
||||
|
||||
if correlation_entry not in self.correlation_index[value][node_id]:
|
||||
self.correlation_index[value][node_id].append(correlation_entry)
|
||||
|
||||
def _extract_meaningful_attribute(self, path_str: str, parent_attr: str = "") -> str:
|
||||
"""Extract the most meaningful attribute name from a path string."""
|
||||
if not path_str:
|
||||
return "unknown"
|
||||
|
||||
path_parts = path_str.split('.')
|
||||
|
||||
# Look for the last non-array-index part
|
||||
for part in reversed(path_parts):
|
||||
# Skip array indices like [0], [1], etc.
|
||||
if not (part.startswith('[') and part.endswith(']') and part[1:-1].isdigit()):
|
||||
# Clean up compound names like "hostnames[0]" to just "hostnames"
|
||||
clean_part = re.sub(r'\[\d+\]$', '', part)
|
||||
if clean_part:
|
||||
return clean_part
|
||||
|
||||
# Fallback to parent attribute if available
|
||||
if parent_attr:
|
||||
return parent_attr
|
||||
|
||||
# Last resort - use the first meaningful part
|
||||
for part in path_parts:
|
||||
if not (part.startswith('[') and part.endswith(']') and part[1:-1].isdigit()):
|
||||
clean_part = re.sub(r'\[\d+\]$', '', part)
|
||||
if clean_part:
|
||||
return clean_part
|
||||
|
||||
return "correlation"
|
||||
|
||||
def _check_for_correlations(self, new_node_id: str, data: Any, path: List[str] = [], parent_attr: str = "") -> List[Dict]:
|
||||
"""Recursively traverse metadata to find correlations with existing data."""
|
||||
if path is None:
|
||||
path = []
|
||||
|
||||
all_correlations = []
|
||||
if isinstance(data, dict):
|
||||
for key, value in data.items():
|
||||
if key == 'source': # Avoid correlating on the provider name
|
||||
if should_exclude:
|
||||
continue
|
||||
all_correlations.extend(self._check_for_correlations(new_node_id, value, path + [key], key))
|
||||
elif isinstance(data, list):
|
||||
for i, item in enumerate(data):
|
||||
list_path_component = f"[{i}]" if not parent_attr else f"{parent_attr}[{i}]"
|
||||
all_correlations.extend(self._check_for_correlations(new_node_id, item, path + [list_path_component], parent_attr))
|
||||
else:
|
||||
value = data
|
||||
if value in self.correlation_index:
|
||||
existing_nodes_with_paths = self.correlation_index[value]
|
||||
unique_nodes = set(existing_nodes_with_paths.keys())
|
||||
unique_nodes.add(new_node_id)
|
||||
|
||||
if len(unique_nodes) < 2:
|
||||
return all_correlations # Correlation must involve at least two distinct nodes
|
||||
|
||||
new_source = {
|
||||
'node_id': new_node_id,
|
||||
'path': ".".join(path),
|
||||
'parent_attr': parent_attr,
|
||||
'meaningful_attr': self._extract_meaningful_attribute(".".join(path), parent_attr)
|
||||
# Initialize correlation tracking for this value
|
||||
if attr_value not in self.correlation_index:
|
||||
self.correlation_index[attr_value] = {
|
||||
'nodes': set(),
|
||||
'sources': [] # Track which provider/attribute combinations contributed
|
||||
}
|
||||
all_sources = [new_source]
|
||||
|
||||
for node_id, path_entries in existing_nodes_with_paths.items():
|
||||
for entry in path_entries:
|
||||
if isinstance(entry, dict):
|
||||
all_sources.append({
|
||||
'node_id': node_id,
|
||||
'path': entry['path'],
|
||||
'parent_attr': entry.get('parent_attr', ''),
|
||||
'meaningful_attr': entry.get('meaningful_attr', self._extract_meaningful_attribute(entry['path'], entry.get('parent_attr', '')))
|
||||
})
|
||||
else:
|
||||
# Handle legacy string-only entries
|
||||
all_sources.append({
|
||||
'node_id': node_id,
|
||||
'path': str(entry),
|
||||
'parent_attr': '',
|
||||
'meaningful_attr': self._extract_meaningful_attribute(str(entry))
|
||||
})
|
||||
# Add this node and source information
|
||||
self.correlation_index[attr_value]['nodes'].add(node_id)
|
||||
|
||||
all_correlations.append({
|
||||
# Track the source of this correlation value
|
||||
source_info = {
|
||||
'node_id': node_id,
|
||||
'provider': attr_provider,
|
||||
'attribute': attr_name,
|
||||
'path': f"{attr_provider}_{attr_name}"
|
||||
}
|
||||
|
||||
# Add source if not already present (avoid duplicates)
|
||||
existing_sources = [s for s in self.correlation_index[attr_value]['sources']
|
||||
if s['node_id'] == node_id and s['path'] == source_info['path']]
|
||||
if not existing_sources:
|
||||
self.correlation_index[attr_value]['sources'].append(source_info)
|
||||
|
||||
# Create correlation node if we have multiple nodes with this value
|
||||
if len(self.correlation_index[attr_value]['nodes']) > 1:
|
||||
self._create_enhanced_correlation_node_and_edges(attr_value, self.correlation_index[attr_value])
|
||||
|
||||
def _create_enhanced_correlation_node_and_edges(self, value, correlation_data):
|
||||
"""
|
||||
UPDATED: Create correlation node and edges with raw provider data (no formatting).
|
||||
"""
|
||||
correlation_node_id = f"corr_{hash(str(value)) & 0x7FFFFFFF}"
|
||||
nodes = correlation_data['nodes']
|
||||
sources = correlation_data['sources']
|
||||
|
||||
# Create or update correlation node
|
||||
if not self.graph.has_node(correlation_node_id):
|
||||
# Use raw provider/attribute data - no formatting
|
||||
provider_counts = {}
|
||||
for source in sources:
|
||||
# Keep original provider and attribute names
|
||||
key = f"{source['provider']}_{source['attribute']}"
|
||||
provider_counts[key] = provider_counts.get(key, 0) + 1
|
||||
|
||||
# Use the most common provider/attribute as the primary label (raw)
|
||||
primary_source = max(provider_counts.items(), key=lambda x: x[1])[0] if provider_counts else "unknown_correlation"
|
||||
|
||||
metadata = {
|
||||
'value': value,
|
||||
'sources': all_sources,
|
||||
'nodes': list(unique_nodes)
|
||||
})
|
||||
return all_correlations
|
||||
'correlated_nodes': list(nodes),
|
||||
'sources': sources,
|
||||
'primary_source': primary_source,
|
||||
'correlation_count': len(nodes)
|
||||
}
|
||||
|
||||
def add_node(self, node_id: str, node_type: NodeType, attributes: Optional[Dict[str, Any]] = None,
|
||||
description: str = "", metadata: Optional[Dict[str, Any]] = None) -> bool:
|
||||
"""Add a node to the graph, update attributes, and process correlations."""
|
||||
is_new_node = not self.graph.has_node(node_id)
|
||||
if is_new_node:
|
||||
self.graph.add_node(node_id, type=node_type.value,
|
||||
added_timestamp=datetime.now(timezone.utc).isoformat(),
|
||||
attributes=attributes or {},
|
||||
description=description,
|
||||
metadata=metadata or {})
|
||||
else:
|
||||
# Safely merge new attributes into existing attributes
|
||||
if attributes:
|
||||
existing_attributes = self.graph.nodes[node_id].get('attributes', {})
|
||||
existing_attributes.update(attributes)
|
||||
self.graph.nodes[node_id]['attributes'] = existing_attributes
|
||||
if description:
|
||||
self.graph.nodes[node_id]['description'] = description
|
||||
if metadata:
|
||||
existing_metadata = self.graph.nodes[node_id].get('metadata', {})
|
||||
existing_metadata.update(metadata)
|
||||
self.graph.nodes[node_id]['metadata'] = existing_metadata
|
||||
self.add_node(correlation_node_id, NodeType.CORRELATION_OBJECT, metadata=metadata)
|
||||
#print(f"Created correlation node {correlation_node_id} for value '{value}' with {len(nodes)} nodes")
|
||||
|
||||
if attributes and node_type != NodeType.CORRELATION_OBJECT:
|
||||
correlations = self._check_for_correlations(node_id, attributes)
|
||||
for corr in correlations:
|
||||
value = corr['value']
|
||||
# Create edges from each node to the correlation node
|
||||
for source in sources:
|
||||
node_id = source['node_id']
|
||||
provider = source['provider']
|
||||
attribute = source['attribute']
|
||||
|
||||
# STEP 1: Substring check against all existing nodes
|
||||
if self._correlation_value_matches_existing_node(value):
|
||||
# Skip creating correlation node - would be redundant
|
||||
continue
|
||||
if self.graph.has_node(node_id) and not self.graph.has_edge(node_id, correlation_node_id):
|
||||
# Format relationship label as "corr_provider_attribute"
|
||||
relationship_label = f"corr_{provider}_{attribute}"
|
||||
|
||||
eligible_nodes = set(corr['nodes'])
|
||||
self.add_edge(
|
||||
source_id=node_id,
|
||||
target_id=correlation_node_id,
|
||||
relationship_type=relationship_label,
|
||||
confidence_score=0.9,
|
||||
source_provider=provider,
|
||||
raw_data={
|
||||
'correlation_value': value,
|
||||
'original_attribute': attribute,
|
||||
'correlation_type': 'attribute_matching'
|
||||
}
|
||||
)
|
||||
|
||||
if len(eligible_nodes) < 2:
|
||||
# Need at least 2 nodes to create a correlation
|
||||
continue
|
||||
#print(f"Added correlation edge: {node_id} -> {correlation_node_id} ({relationship_label})")
|
||||
|
||||
# STEP 3: Check for existing correlation node with same connection pattern
|
||||
correlation_nodes_with_pattern = self._find_correlation_nodes_with_same_pattern(eligible_nodes)
|
||||
|
||||
if correlation_nodes_with_pattern:
|
||||
# STEP 4: Merge with existing correlation node
|
||||
target_correlation_node = correlation_nodes_with_pattern[0]
|
||||
self._merge_correlation_values(target_correlation_node, value, corr)
|
||||
else:
|
||||
# STEP 5: Create new correlation node for eligible nodes only
|
||||
correlation_node_id = f"corr_{abs(hash(str(sorted(eligible_nodes))))}"
|
||||
self.add_node(correlation_node_id, NodeType.CORRELATION_OBJECT,
|
||||
metadata={'values': [value], 'sources': corr['sources'],
|
||||
'correlated_nodes': list(eligible_nodes)})
|
||||
|
||||
# Create edges from eligible nodes to this correlation node with better labeling
|
||||
for c_node_id in eligible_nodes:
|
||||
if self.graph.has_node(c_node_id):
|
||||
# Find the best attribute name for this node
|
||||
meaningful_attr = self._find_best_attribute_name_for_node(c_node_id, corr['sources'])
|
||||
relationship_type = f"c_{meaningful_attr}"
|
||||
self.add_edge(c_node_id, correlation_node_id, relationship_type, confidence_score=0.9)
|
||||
|
||||
self._update_correlation_index(node_id, attributes)
|
||||
|
||||
self.last_modified = datetime.now(timezone.utc).isoformat()
|
||||
return is_new_node
|
||||
|
||||
def _find_best_attribute_name_for_node(self, node_id: str, sources: List[Dict]) -> str:
|
||||
"""Find the best attribute name for a correlation edge by looking at the sources."""
|
||||
node_sources = [s for s in sources if s['node_id'] == node_id]
|
||||
|
||||
if not node_sources:
|
||||
return "correlation"
|
||||
|
||||
# Use the meaningful_attr if available
|
||||
for source in node_sources:
|
||||
meaningful_attr = source.get('meaningful_attr')
|
||||
if meaningful_attr and meaningful_attr != "unknown":
|
||||
return meaningful_attr
|
||||
|
||||
# Fallback to parent_attr
|
||||
for source in node_sources:
|
||||
parent_attr = source.get('parent_attr')
|
||||
if parent_attr:
|
||||
return parent_attr
|
||||
|
||||
# Last resort - extract from path
|
||||
for source in node_sources:
|
||||
path = source.get('path', '')
|
||||
if path:
|
||||
extracted = self._extract_meaningful_attribute(path)
|
||||
if extracted != "unknown":
|
||||
return extracted
|
||||
|
||||
return "correlation"
|
||||
|
||||
def _has_direct_edge_bidirectional(self, node_a: str, node_b: str) -> bool:
|
||||
"""
|
||||
Check if there's a direct edge between two nodes in either direction.
|
||||
Returns True if node_a→node_b OR node_b→node_a exists.
|
||||
Returns True if node_aâ†'node_b OR node_bâ†'node_a exists.
|
||||
"""
|
||||
return (self.graph.has_edge(node_a, node_b) or
|
||||
self.graph.has_edge(node_b, node_a))
|
||||
@@ -382,19 +304,60 @@ class GraphManager:
|
||||
f"across {node_count} nodes"
|
||||
)
|
||||
|
||||
def add_node(self, node_id: str, node_type: NodeType, attributes: Optional[List[Dict[str, Any]]] = None,
|
||||
description: str = "", metadata: Optional[Dict[str, Any]] = None) -> bool:
|
||||
"""
|
||||
Add a node to the graph, update attributes, and process correlations.
|
||||
Now compatible with unified data model - attributes are dictionaries from converted StandardAttribute objects.
|
||||
"""
|
||||
is_new_node = not self.graph.has_node(node_id)
|
||||
if is_new_node:
|
||||
self.graph.add_node(node_id, type=node_type.value,
|
||||
added_timestamp=datetime.now(timezone.utc).isoformat(),
|
||||
attributes=attributes or [], # Store as a list from the start
|
||||
description=description,
|
||||
metadata=metadata or {})
|
||||
else:
|
||||
# Safely merge new attributes into the existing list of attributes
|
||||
if attributes:
|
||||
existing_attributes = self.graph.nodes[node_id].get('attributes', [])
|
||||
|
||||
# Handle cases where old data might still be in dictionary format
|
||||
if not isinstance(existing_attributes, list):
|
||||
existing_attributes = []
|
||||
|
||||
# Create a set of existing attribute names for efficient duplicate checking
|
||||
existing_attr_names = {attr['name'] for attr in existing_attributes}
|
||||
|
||||
for new_attr in attributes:
|
||||
if new_attr['name'] not in existing_attr_names:
|
||||
existing_attributes.append(new_attr)
|
||||
existing_attr_names.add(new_attr['name'])
|
||||
|
||||
self.graph.nodes[node_id]['attributes'] = existing_attributes
|
||||
if description:
|
||||
self.graph.nodes[node_id]['description'] = description
|
||||
if metadata:
|
||||
existing_metadata = self.graph.nodes[node_id].get('metadata', {})
|
||||
existing_metadata.update(metadata)
|
||||
self.graph.nodes[node_id]['metadata'] = existing_metadata
|
||||
|
||||
self.last_modified = datetime.now(timezone.utc).isoformat()
|
||||
return is_new_node
|
||||
|
||||
def add_edge(self, source_id: str, target_id: str, relationship_type: str,
|
||||
confidence_score: float = 0.5, source_provider: str = "unknown",
|
||||
raw_data: Optional[Dict[str, Any]] = None) -> bool:
|
||||
"""Add or update an edge between two nodes, ensuring nodes exist."""
|
||||
"""
|
||||
UPDATED: Add or update an edge between two nodes with raw relationship labels.
|
||||
"""
|
||||
if not self.graph.has_node(source_id) or not self.graph.has_node(target_id):
|
||||
return False
|
||||
|
||||
new_confidence = confidence_score
|
||||
|
||||
if relationship_type.startswith("c_"):
|
||||
# UPDATED: Use raw relationship type - no formatting
|
||||
edge_label = relationship_type
|
||||
else:
|
||||
edge_label = f"{source_provider}_{relationship_type}"
|
||||
|
||||
if self.graph.has_edge(source_id, target_id):
|
||||
# If edge exists, update confidence if the new score is higher.
|
||||
@@ -404,7 +367,7 @@ class GraphManager:
|
||||
self.graph.edges[source_id, target_id]['updated_by'] = source_provider
|
||||
return False
|
||||
|
||||
# Add a new edge with all attributes.
|
||||
# Add a new edge with raw attributes
|
||||
self.graph.add_edge(source_id, target_id,
|
||||
relationship_type=edge_label,
|
||||
confidence_score=new_confidence,
|
||||
@@ -414,6 +377,69 @@ class GraphManager:
|
||||
self.last_modified = datetime.now(timezone.utc).isoformat()
|
||||
return True
|
||||
|
||||
def extract_node_from_large_entity(self, large_entity_id: str, node_id_to_extract: str) -> bool:
|
||||
"""
|
||||
Removes a node from a large entity's internal lists and updates its count.
|
||||
This prepares the large entity for the node's promotion to a regular node.
|
||||
"""
|
||||
if not self.graph.has_node(large_entity_id):
|
||||
return False
|
||||
|
||||
node_data = self.graph.nodes[large_entity_id]
|
||||
attributes = node_data.get('attributes', [])
|
||||
|
||||
# Find the 'nodes' attribute dictionary in the list
|
||||
nodes_attr = next((attr for attr in attributes if attr.get('name') == 'nodes'), None)
|
||||
|
||||
# Remove from the list of member nodes
|
||||
if nodes_attr and 'value' in nodes_attr and isinstance(nodes_attr['value'], list) and node_id_to_extract in nodes_attr['value']:
|
||||
nodes_attr['value'].remove(node_id_to_extract)
|
||||
|
||||
# Find the 'count' attribute and update it
|
||||
count_attr = next((attr for attr in attributes if attr.get('name') == 'count'), None)
|
||||
if count_attr:
|
||||
count_attr['value'] = len(nodes_attr['value'])
|
||||
else:
|
||||
# This can happen if the node was already extracted, which is not an error.
|
||||
print(f"Warning: Node {node_id_to_extract} not found in the 'nodes' list of {large_entity_id}.")
|
||||
return True # Proceed as if successful
|
||||
|
||||
self.last_modified = datetime.now(timezone.utc).isoformat()
|
||||
return True
|
||||
|
||||
def remove_node(self, node_id: str) -> bool:
|
||||
"""Remove a node and its connected edges from the graph."""
|
||||
if not self.graph.has_node(node_id):
|
||||
return False
|
||||
|
||||
# Remove node from the graph (NetworkX handles removing connected edges)
|
||||
self.graph.remove_node(node_id)
|
||||
|
||||
# Clean up the correlation index
|
||||
keys_to_delete = []
|
||||
for value, data in self.correlation_index.items():
|
||||
if isinstance(data, dict) and 'nodes' in data:
|
||||
# Updated correlation structure
|
||||
if node_id in data['nodes']:
|
||||
data['nodes'].discard(node_id)
|
||||
# Remove sources for this node
|
||||
data['sources'] = [s for s in data['sources'] if s['node_id'] != node_id]
|
||||
if not data['nodes']: # If no other nodes are associated, remove it
|
||||
keys_to_delete.append(value)
|
||||
else:
|
||||
# Legacy correlation structure (fallback)
|
||||
if isinstance(data, set) and node_id in data:
|
||||
data.discard(node_id)
|
||||
if not data:
|
||||
keys_to_delete.append(value)
|
||||
|
||||
for key in keys_to_delete:
|
||||
if key in self.correlation_index:
|
||||
del self.correlation_index[key]
|
||||
|
||||
self.last_modified = datetime.now(timezone.utc).isoformat()
|
||||
return True
|
||||
|
||||
def get_node_count(self) -> int:
|
||||
"""Get total number of nodes in the graph."""
|
||||
return self.graph.number_of_nodes()
|
||||
@@ -426,54 +452,59 @@ class GraphManager:
|
||||
"""Get all nodes of a specific type."""
|
||||
return [n for n, d in self.graph.nodes(data=True) if d.get('type') == node_type.value]
|
||||
|
||||
def get_neighbors(self, node_id: str) -> List[str]:
|
||||
"""Get all unique neighbors (predecessors and successors) for a node."""
|
||||
if not self.graph.has_node(node_id):
|
||||
return []
|
||||
return list(set(self.graph.predecessors(node_id)) | set(self.graph.successors(node_id)))
|
||||
|
||||
def get_high_confidence_edges(self, min_confidence: float = 0.8) -> List[Tuple[str, str, Dict]]:
|
||||
"""Get edges with confidence score above a given threshold."""
|
||||
return [(u, v, d) for u, v, d in self.graph.edges(data=True)
|
||||
if d.get('confidence_score', 0) >= min_confidence]
|
||||
|
||||
def get_graph_data(self) -> Dict[str, Any]:
|
||||
"""Export graph data formatted for frontend visualization."""
|
||||
"""
|
||||
Export graph data formatted for frontend visualization.
|
||||
SIMPLIFIED: No certificate styling - frontend handles all visual styling.
|
||||
"""
|
||||
nodes = []
|
||||
for node_id, attrs in self.graph.nodes(data=True):
|
||||
node_data = {'id': node_id, 'label': node_id, 'type': attrs.get('type', 'unknown'),
|
||||
'attributes': attrs.get('attributes', {}),
|
||||
node_data = {
|
||||
'id': node_id,
|
||||
'label': node_id,
|
||||
'type': attrs.get('type', 'unknown'),
|
||||
'attributes': attrs.get('attributes', []), # Raw attributes list
|
||||
'description': attrs.get('description', ''),
|
||||
'metadata': attrs.get('metadata', {}),
|
||||
'added_timestamp': attrs.get('added_timestamp')}
|
||||
# Customize node appearance based on type and attributes
|
||||
node_type = node_data['type']
|
||||
attributes = node_data['attributes']
|
||||
if node_type == 'domain' and attributes.get('certificates', {}).get('has_valid_cert') is False:
|
||||
node_data['color'] = {'background': '#c7c7c7', 'border': '#999'} # Gray for invalid cert
|
||||
'added_timestamp': attrs.get('added_timestamp')
|
||||
}
|
||||
|
||||
# Add incoming and outgoing edges to node data
|
||||
if self.graph.has_node(node_id):
|
||||
node_data['incoming_edges'] = [{'from': u, 'data': d} for u, _, d in self.graph.in_edges(node_id, data=True)]
|
||||
node_data['outgoing_edges'] = [{'to': v, 'data': d} for _, v, d in self.graph.out_edges(node_id, data=True)]
|
||||
node_data['incoming_edges'] = [
|
||||
{'from': u, 'data': d} for u, _, d in self.graph.in_edges(node_id, data=True)
|
||||
]
|
||||
node_data['outgoing_edges'] = [
|
||||
{'to': v, 'data': d} for _, v, d in self.graph.out_edges(node_id, data=True)
|
||||
]
|
||||
|
||||
nodes.append(node_data)
|
||||
|
||||
edges = []
|
||||
for source, target, attrs in self.graph.edges(data=True):
|
||||
edges.append({'from': source, 'to': target,
|
||||
edges.append({
|
||||
'from': source,
|
||||
'to': target,
|
||||
'label': attrs.get('relationship_type', ''),
|
||||
'confidence_score': attrs.get('confidence_score', 0),
|
||||
'source_provider': attrs.get('source_provider', ''),
|
||||
'discovery_timestamp': attrs.get('discovery_timestamp')})
|
||||
'discovery_timestamp': attrs.get('discovery_timestamp')
|
||||
})
|
||||
|
||||
return {
|
||||
'nodes': nodes, 'edges': edges,
|
||||
'nodes': nodes,
|
||||
'edges': edges,
|
||||
'statistics': self.get_statistics()['basic_metrics']
|
||||
}
|
||||
|
||||
def export_json(self) -> Dict[str, Any]:
|
||||
"""Export complete graph data as a JSON-serializable dictionary."""
|
||||
graph_data = nx.node_link_data(self.graph) # Use NetworkX's built-in robust serializer
|
||||
graph_data = nx.node_link_data(self.graph, edges="edges")
|
||||
return {
|
||||
'export_metadata': {
|
||||
'export_timestamp': datetime.now(timezone.utc).isoformat(),
|
||||
@@ -481,15 +512,20 @@ class GraphManager:
|
||||
'last_modified': self.last_modified,
|
||||
'total_nodes': self.get_node_count(),
|
||||
'total_edges': self.get_edge_count(),
|
||||
'graph_format': 'dnsrecon_v1_nodeling'
|
||||
'graph_format': 'dnsrecon_v1_unified_model'
|
||||
},
|
||||
'graph': graph_data,
|
||||
'statistics': self.get_statistics()
|
||||
}
|
||||
|
||||
def _get_confidence_distribution(self) -> Dict[str, int]:
|
||||
"""Get distribution of edge confidence scores."""
|
||||
"""Get distribution of edge confidence scores with empty graph handling."""
|
||||
distribution = {'high': 0, 'medium': 0, 'low': 0}
|
||||
|
||||
# FIXED: Handle empty graph case
|
||||
if self.get_edge_count() == 0:
|
||||
return distribution
|
||||
|
||||
for _, _, data in self.graph.edges(data=True):
|
||||
confidence = data.get('confidence_score', 0)
|
||||
if confidence >= 0.8:
|
||||
@@ -501,22 +537,42 @@ class GraphManager:
|
||||
return distribution
|
||||
|
||||
def get_statistics(self) -> Dict[str, Any]:
|
||||
"""Get comprehensive statistics about the graph."""
|
||||
stats = {'basic_metrics': {'total_nodes': self.get_node_count(),
|
||||
'total_edges': self.get_edge_count(),
|
||||
"""Get comprehensive statistics about the graph with proper empty graph handling."""
|
||||
|
||||
# FIXED: Handle empty graph case properly
|
||||
node_count = self.get_node_count()
|
||||
edge_count = self.get_edge_count()
|
||||
|
||||
stats = {
|
||||
'basic_metrics': {
|
||||
'total_nodes': node_count,
|
||||
'total_edges': edge_count,
|
||||
'creation_time': self.creation_time,
|
||||
'last_modified': self.last_modified},
|
||||
'node_type_distribution': {}, 'relationship_type_distribution': {},
|
||||
'last_modified': self.last_modified
|
||||
},
|
||||
'node_type_distribution': {},
|
||||
'relationship_type_distribution': {},
|
||||
'confidence_distribution': self._get_confidence_distribution(),
|
||||
'provider_distribution': {}}
|
||||
# Calculate distributions
|
||||
'provider_distribution': {}
|
||||
}
|
||||
|
||||
# FIXED: Only calculate distributions if we have data
|
||||
if node_count > 0:
|
||||
# Calculate node type distributions
|
||||
for node_type in NodeType:
|
||||
stats['node_type_distribution'][node_type.value] = self.get_nodes_by_type(node_type).__len__()
|
||||
count = len(self.get_nodes_by_type(node_type))
|
||||
if count > 0: # Only include types that exist
|
||||
stats['node_type_distribution'][node_type.value] = count
|
||||
|
||||
if edge_count > 0:
|
||||
# Calculate edge distributions
|
||||
for _, _, data in self.graph.edges(data=True):
|
||||
rel_type = data.get('relationship_type', 'unknown')
|
||||
stats['relationship_type_distribution'][rel_type] = stats['relationship_type_distribution'].get(rel_type, 0) + 1
|
||||
|
||||
provider = data.get('source_provider', 'unknown')
|
||||
stats['provider_distribution'][provider] = stats['provider_distribution'].get(provider, 0) + 1
|
||||
|
||||
return stats
|
||||
|
||||
def clear(self) -> None:
|
||||
|
||||
@@ -50,7 +50,7 @@ class ForensicLogger:
|
||||
session_id: Unique identifier for this reconnaissance session
|
||||
"""
|
||||
self.session_id = session_id or self._generate_session_id()
|
||||
#self.lock = threading.Lock()
|
||||
self.lock = threading.Lock()
|
||||
|
||||
# Initialize audit trail storage
|
||||
self.api_requests: List[APIRequest] = []
|
||||
@@ -86,6 +86,8 @@ class ForensicLogger:
|
||||
# Remove the unpickleable 'logger' attribute
|
||||
if 'logger' in state:
|
||||
del state['logger']
|
||||
if 'lock' in state:
|
||||
del state['lock']
|
||||
return state
|
||||
|
||||
def __setstate__(self, state):
|
||||
@@ -101,6 +103,7 @@ class ForensicLogger:
|
||||
console_handler = logging.StreamHandler()
|
||||
console_handler.setFormatter(formatter)
|
||||
self.logger.addHandler(console_handler)
|
||||
self.lock = threading.Lock()
|
||||
|
||||
def _generate_session_id(self) -> str:
|
||||
"""Generate unique session identifier."""
|
||||
@@ -149,7 +152,7 @@ class ForensicLogger:
|
||||
|
||||
# Log to standard logger
|
||||
if error:
|
||||
self.logger.error(f"API Request Failed - {provider}: {url} - {error}")
|
||||
self.logger.error(f"API Request Failed.")
|
||||
else:
|
||||
self.logger.info(f"API Request - {provider}: {url} - Status: {status_code}")
|
||||
|
||||
@@ -194,7 +197,7 @@ class ForensicLogger:
|
||||
self.logger.info(f"Scan Started - Target: {target_domain}, Depth: {recursion_depth}")
|
||||
self.logger.info(f"Enabled Providers: {', '.join(enabled_providers)}")
|
||||
|
||||
self.session_metadata['target_domains'].add(target_domain)
|
||||
self.session_metadata['target_domains'].update(target_domain)
|
||||
|
||||
def log_scan_complete(self) -> None:
|
||||
"""Log the completion of a reconnaissance scan."""
|
||||
|
||||
107
core/provider_result.py
Normal file
107
core/provider_result.py
Normal file
@@ -0,0 +1,107 @@
|
||||
# dnsrecon-reduced/core/provider_result.py
|
||||
|
||||
"""
|
||||
Unified data model for DNSRecon passive reconnaissance.
|
||||
Standardizes the data structure across all providers to ensure consistent processing.
|
||||
"""
|
||||
|
||||
from typing import Any, Optional, List, Dict
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
|
||||
|
||||
@dataclass
|
||||
class StandardAttribute:
|
||||
"""A unified data structure for a single piece of information about a node."""
|
||||
target_node: str
|
||||
name: str
|
||||
value: Any
|
||||
type: str
|
||||
provider: str
|
||||
confidence: float
|
||||
timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
metadata: Optional[Dict[str, Any]] = field(default_factory=dict)
|
||||
|
||||
def __post_init__(self):
|
||||
"""Validate the attribute after initialization."""
|
||||
if not isinstance(self.confidence, (int, float)) or not 0.0 <= self.confidence <= 1.0:
|
||||
raise ValueError(f"Confidence must be between 0.0 and 1.0, got {self.confidence}")
|
||||
|
||||
|
||||
@dataclass
|
||||
class Relationship:
|
||||
"""A unified data structure for a directional link between two nodes."""
|
||||
source_node: str
|
||||
target_node: str
|
||||
relationship_type: str
|
||||
confidence: float
|
||||
provider: str
|
||||
timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
raw_data: Optional[Dict[str, Any]] = field(default_factory=dict)
|
||||
|
||||
def __post_init__(self):
|
||||
"""Validate the relationship after initialization."""
|
||||
if not isinstance(self.confidence, (int, float)) or not 0.0 <= self.confidence <= 1.0:
|
||||
raise ValueError(f"Confidence must be between 0.0 and 1.0, got {self.confidence}")
|
||||
|
||||
|
||||
@dataclass
|
||||
class ProviderResult:
|
||||
"""A container for all data returned by a provider from a single query."""
|
||||
attributes: List[StandardAttribute] = field(default_factory=list)
|
||||
relationships: List[Relationship] = field(default_factory=list)
|
||||
|
||||
def add_attribute(self, target_node: str, name: str, value: Any, attr_type: str,
|
||||
provider: str, confidence: float = 0.8,
|
||||
metadata: Optional[Dict[str, Any]] = None) -> None:
|
||||
"""Helper method to add an attribute to the result."""
|
||||
self.attributes.append(StandardAttribute(
|
||||
target_node=target_node,
|
||||
name=name,
|
||||
value=value,
|
||||
type=attr_type,
|
||||
provider=provider,
|
||||
confidence=confidence,
|
||||
metadata=metadata or {}
|
||||
))
|
||||
|
||||
def add_relationship(self, source_node: str, target_node: str, relationship_type: str,
|
||||
provider: str, confidence: float = 0.8,
|
||||
raw_data: Optional[Dict[str, Any]] = None) -> None:
|
||||
"""Helper method to add a relationship to the result."""
|
||||
self.relationships.append(Relationship(
|
||||
source_node=source_node,
|
||||
target_node=target_node,
|
||||
relationship_type=relationship_type,
|
||||
confidence=confidence,
|
||||
provider=provider,
|
||||
raw_data=raw_data or {}
|
||||
))
|
||||
|
||||
def get_discovered_nodes(self) -> set:
|
||||
"""Get all unique node identifiers discovered in this result."""
|
||||
nodes = set()
|
||||
|
||||
# Add nodes from relationships
|
||||
for rel in self.relationships:
|
||||
nodes.add(rel.source_node)
|
||||
nodes.add(rel.target_node)
|
||||
|
||||
# Add nodes from attributes
|
||||
for attr in self.attributes:
|
||||
nodes.add(attr.target_node)
|
||||
|
||||
return nodes
|
||||
|
||||
def get_relationship_count(self) -> int:
|
||||
"""Get the total number of relationships in this result."""
|
||||
return len(self.relationships)
|
||||
|
||||
def get_attribute_count(self) -> int:
|
||||
"""Get the total number of attributes in this result."""
|
||||
return len(self.attributes)
|
||||
|
||||
##TODO
|
||||
#def is_large_entity(self, threshold: int) -> bool:
|
||||
# """Check if this result qualifies as a large entity based on relationship count."""
|
||||
# return self.get_relationship_count() > threshold
|
||||
28
core/rate_limiter.py
Normal file
28
core/rate_limiter.py
Normal file
@@ -0,0 +1,28 @@
|
||||
# dnsrecon-reduced/core/rate_limiter.py
|
||||
|
||||
import time
|
||||
|
||||
class GlobalRateLimiter:
|
||||
def __init__(self, redis_client):
|
||||
self.redis = redis_client
|
||||
|
||||
def is_rate_limited(self, key, limit, period):
|
||||
"""
|
||||
Check if a key is rate-limited.
|
||||
"""
|
||||
now = time.time()
|
||||
key = f"rate_limit:{key}"
|
||||
|
||||
# Remove old timestamps
|
||||
self.redis.zremrangebyscore(key, 0, now - period)
|
||||
|
||||
# Check the count
|
||||
count = self.redis.zcard(key)
|
||||
if count >= limit:
|
||||
return True
|
||||
|
||||
# Add new timestamp
|
||||
self.redis.zadd(key, {now: now})
|
||||
self.redis.expire(key, period)
|
||||
|
||||
return False
|
||||
949
core/scanner.py
949
core/scanner.py
File diff suppressed because it is too large
Load Diff
@@ -5,18 +5,15 @@ import time
|
||||
import uuid
|
||||
import redis
|
||||
import pickle
|
||||
from typing import Dict, Optional, Any, List
|
||||
from typing import Dict, Optional, Any
|
||||
|
||||
from core.scanner import Scanner
|
||||
from config import config
|
||||
|
||||
# WARNING: Using pickle can be a security risk if the data source is not trusted.
|
||||
# In this case, we are only serializing/deserializing our own trusted Scanner objects,
|
||||
# which is generally safe. Do not unpickle data from untrusted sources.
|
||||
|
||||
class SessionManager:
|
||||
"""
|
||||
Manages multiple scanner instances for concurrent user sessions using Redis.
|
||||
FIXED: Manages multiple scanner instances for concurrent user sessions using Redis.
|
||||
Now more conservative about session creation to preserve API keys and configuration.
|
||||
"""
|
||||
|
||||
def __init__(self, session_timeout_minutes: int = 0):
|
||||
@@ -28,7 +25,10 @@ class SessionManager:
|
||||
|
||||
self.redis_client = redis.StrictRedis(db=0, decode_responses=False)
|
||||
self.session_timeout = session_timeout_minutes * 60 # Convert to seconds
|
||||
self.lock = threading.Lock() # Lock for local operations, Redis handles atomic ops
|
||||
self.lock = threading.Lock()
|
||||
|
||||
# FIXED: Add a creation lock to prevent race conditions
|
||||
self.creation_lock = threading.Lock()
|
||||
|
||||
# Start cleanup thread
|
||||
self.cleanup_thread = threading.Thread(target=self._cleanup_loop, daemon=True)
|
||||
@@ -40,7 +40,7 @@ class SessionManager:
|
||||
"""Prepare SessionManager for pickling."""
|
||||
state = self.__dict__.copy()
|
||||
# Exclude unpickleable attributes - Redis client and threading objects
|
||||
unpicklable_attrs = ['lock', 'cleanup_thread', 'redis_client']
|
||||
unpicklable_attrs = ['lock', 'cleanup_thread', 'redis_client', 'creation_lock']
|
||||
for attr in unpicklable_attrs:
|
||||
if attr in state:
|
||||
del state[attr]
|
||||
@@ -50,9 +50,9 @@ class SessionManager:
|
||||
"""Restore SessionManager after unpickling."""
|
||||
self.__dict__.update(state)
|
||||
# Re-initialize unpickleable attributes
|
||||
import redis
|
||||
self.redis_client = redis.StrictRedis(db=0, decode_responses=False)
|
||||
self.lock = threading.Lock()
|
||||
self.creation_lock = threading.Lock()
|
||||
self.cleanup_thread = threading.Thread(target=self._cleanup_loop, daemon=True)
|
||||
self.cleanup_thread.start()
|
||||
|
||||
@@ -66,8 +66,10 @@ class SessionManager:
|
||||
|
||||
def create_session(self) -> str:
|
||||
"""
|
||||
Create a new user session and store it in Redis.
|
||||
FIXED: Create a new user session with thread-safe creation to prevent duplicates.
|
||||
"""
|
||||
# FIXED: Use creation lock to prevent race conditions
|
||||
with self.creation_lock:
|
||||
session_id = str(uuid.uuid4())
|
||||
print(f"=== CREATING SESSION {session_id} IN REDIS ===")
|
||||
|
||||
@@ -99,6 +101,7 @@ class SessionManager:
|
||||
self.redis_client.setex(stop_key, self.session_timeout, b'0')
|
||||
|
||||
print(f"Session {session_id} stored in Redis with stop signal initialized")
|
||||
print(f"Session has {len(scanner_instance.providers)} providers: {[p.get_name() for p in scanner_instance.providers]}")
|
||||
return session_id
|
||||
|
||||
except Exception as e:
|
||||
@@ -212,7 +215,14 @@ class SessionManager:
|
||||
# Immediately save to Redis for GUI updates
|
||||
success = self._save_session_data(session_id, session_data)
|
||||
if success:
|
||||
print(f"Scanner state updated for session {session_id} (status: {scanner.status})")
|
||||
# Only log occasionally to reduce noise
|
||||
if hasattr(self, '_last_update_log'):
|
||||
if time.time() - self._last_update_log > 5: # Log every 5 seconds max
|
||||
#print(f"Scanner state updated for session {session_id} (status: {scanner.status})")
|
||||
self._last_update_log = time.time()
|
||||
else:
|
||||
#print(f"Scanner state updated for session {session_id} (status: {scanner.status})")
|
||||
self._last_update_log = time.time()
|
||||
else:
|
||||
print(f"WARNING: Failed to save scanner state for session {session_id}")
|
||||
return success
|
||||
|
||||
@@ -3,14 +3,15 @@ Data provider modules for DNSRecon.
|
||||
Contains implementations for various reconnaissance data sources.
|
||||
"""
|
||||
|
||||
from .base_provider import BaseProvider, RateLimiter
|
||||
from .base_provider import BaseProvider
|
||||
from .crtsh_provider import CrtShProvider
|
||||
from .dns_provider import DNSProvider
|
||||
from .shodan_provider import ShodanProvider
|
||||
from core.rate_limiter import GlobalRateLimiter
|
||||
|
||||
__all__ = [
|
||||
'BaseProvider',
|
||||
'RateLimiter',
|
||||
'GlobalRateLimiter',
|
||||
'CrtShProvider',
|
||||
'DNSProvider',
|
||||
'ShodanProvider'
|
||||
|
||||
@@ -4,49 +4,17 @@ import time
|
||||
import requests
|
||||
import threading
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Dict, Any, Optional, Tuple
|
||||
from typing import Dict, Any, Optional
|
||||
|
||||
from core.logger import get_forensic_logger
|
||||
|
||||
|
||||
class RateLimiter:
|
||||
"""Simple rate limiter for API calls."""
|
||||
|
||||
def __init__(self, requests_per_minute: int):
|
||||
"""
|
||||
Initialize rate limiter.
|
||||
|
||||
Args:
|
||||
requests_per_minute: Maximum requests allowed per minute
|
||||
"""
|
||||
self.requests_per_minute = requests_per_minute
|
||||
self.min_interval = 60.0 / requests_per_minute
|
||||
self.last_request_time = 0
|
||||
|
||||
def __getstate__(self):
|
||||
"""RateLimiter is fully picklable, return full state."""
|
||||
return self.__dict__.copy()
|
||||
|
||||
def __setstate__(self, state):
|
||||
"""Restore RateLimiter state."""
|
||||
self.__dict__.update(state)
|
||||
|
||||
def wait_if_needed(self) -> None:
|
||||
"""Wait if necessary to respect rate limits."""
|
||||
current_time = time.time()
|
||||
time_since_last = current_time - self.last_request_time
|
||||
|
||||
if time_since_last < self.min_interval:
|
||||
sleep_time = self.min_interval - time_since_last
|
||||
time.sleep(sleep_time)
|
||||
|
||||
self.last_request_time = time.time()
|
||||
from core.rate_limiter import GlobalRateLimiter
|
||||
from core.provider_result import ProviderResult
|
||||
|
||||
|
||||
class BaseProvider(ABC):
|
||||
"""
|
||||
Abstract base class for all DNSRecon data providers.
|
||||
Now supports session-specific configuration.
|
||||
Now supports session-specific configuration and returns standardized ProviderResult objects.
|
||||
"""
|
||||
|
||||
def __init__(self, name: str, rate_limit: int = 60, timeout: int = 30, session_config=None):
|
||||
@@ -68,11 +36,9 @@ class BaseProvider(ABC):
|
||||
# Fallback to global config for backwards compatibility
|
||||
from config import config as global_config
|
||||
self.config = global_config
|
||||
actual_rate_limit = rate_limit
|
||||
actual_timeout = timeout
|
||||
|
||||
self.name = name
|
||||
self.rate_limiter = RateLimiter(actual_rate_limit)
|
||||
self.timeout = actual_timeout
|
||||
self._local = threading.local()
|
||||
self.logger = get_forensic_logger()
|
||||
@@ -136,7 +102,7 @@ class BaseProvider(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def query_domain(self, domain: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||
def query_domain(self, domain: str) -> ProviderResult:
|
||||
"""
|
||||
Query the provider for information about a domain.
|
||||
|
||||
@@ -144,12 +110,12 @@ class BaseProvider(ABC):
|
||||
domain: Domain to investigate
|
||||
|
||||
Returns:
|
||||
List of tuples: (source_node, target_node, relationship_type, confidence, raw_data)
|
||||
ProviderResult containing standardized attributes and relationships
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def query_ip(self, ip: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||
def query_ip(self, ip: str) -> ProviderResult:
|
||||
"""
|
||||
Query the provider for information about an IP address.
|
||||
|
||||
@@ -157,7 +123,7 @@ class BaseProvider(ABC):
|
||||
ip: IP address to investigate
|
||||
|
||||
Returns:
|
||||
List of tuples: (source_node, target_node, relationship_type, confidence, raw_data)
|
||||
ProviderResult containing standardized attributes and relationships
|
||||
"""
|
||||
pass
|
||||
|
||||
@@ -167,13 +133,13 @@ class BaseProvider(ABC):
|
||||
target_indicator: str = "") -> Optional[requests.Response]:
|
||||
"""
|
||||
Make a rate-limited HTTP request.
|
||||
FIXED: Returns response without automatically raising HTTPError exceptions.
|
||||
Individual providers should handle status codes appropriately.
|
||||
"""
|
||||
if self._is_stop_requested():
|
||||
print(f"Request cancelled before start: {url}")
|
||||
return None
|
||||
|
||||
self.rate_limiter.wait_if_needed()
|
||||
|
||||
start_time = time.time()
|
||||
response = None
|
||||
error = None
|
||||
@@ -205,8 +171,14 @@ class BaseProvider(ABC):
|
||||
raise ValueError(f"Unsupported HTTP method: {method}")
|
||||
|
||||
print(f"Response status: {response.status_code}")
|
||||
response.raise_for_status()
|
||||
|
||||
# FIXED: Don't automatically raise for HTTP error status codes
|
||||
# Let individual providers handle status codes appropriately
|
||||
# Only count 2xx responses as successful
|
||||
if 200 <= response.status_code < 300:
|
||||
self.successful_requests += 1
|
||||
else:
|
||||
self.failed_requests += 1
|
||||
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
self.logger.log_api_request(
|
||||
@@ -297,5 +269,5 @@ class BaseProvider(ABC):
|
||||
'failed_requests': self.failed_requests,
|
||||
'success_rate': (self.successful_requests / self.total_requests * 100) if self.total_requests > 0 else 0,
|
||||
'relationships_found': self.total_relationships_found,
|
||||
'rate_limit': self.rate_limiter.requests_per_minute
|
||||
'rate_limit': self.config.get_rate_limit(self.name)
|
||||
}
|
||||
@@ -2,51 +2,41 @@
|
||||
|
||||
import json
|
||||
import re
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Any, Tuple, Set
|
||||
from typing import List, Dict, Any, Set
|
||||
from urllib.parse import quote
|
||||
from datetime import datetime, timezone
|
||||
|
||||
# New dependency required for this provider
|
||||
try:
|
||||
import psycopg2
|
||||
import psycopg2.extras
|
||||
PSYCOPG2_AVAILABLE = True
|
||||
except ImportError:
|
||||
PSYCOPG2_AVAILABLE = False
|
||||
import requests
|
||||
|
||||
from .base_provider import BaseProvider
|
||||
from core.provider_result import ProviderResult
|
||||
from utils.helpers import _is_valid_domain
|
||||
|
||||
# We use requests only to raise the same exception type for compatibility with core retry logic
|
||||
import requests
|
||||
|
||||
|
||||
class CrtShProvider(BaseProvider):
|
||||
"""
|
||||
Provider for querying crt.sh certificate transparency database via its public PostgreSQL endpoint.
|
||||
This version is designed to be a drop-in, high-performance replacement for the API-based provider.
|
||||
It preserves the same caching and data processing logic.
|
||||
Provider for querying crt.sh certificate transparency database.
|
||||
FIXED: Now properly creates domain and CA nodes instead of large entities.
|
||||
Returns standardized ProviderResult objects with caching support.
|
||||
"""
|
||||
|
||||
def __init__(self, name=None, session_config=None):
|
||||
"""Initialize CrtShDB provider with session-specific configuration."""
|
||||
"""Initialize CrtSh provider with session-specific configuration."""
|
||||
super().__init__(
|
||||
name="crtsh",
|
||||
rate_limit=0, # No rate limit for direct DB access
|
||||
timeout=60, # Increased timeout for potentially long DB queries
|
||||
rate_limit=60,
|
||||
timeout=15,
|
||||
session_config=session_config
|
||||
)
|
||||
# Database connection details
|
||||
self.db_host = "crt.sh"
|
||||
self.db_port = 5432
|
||||
self.db_name = "certwatch"
|
||||
self.db_user = "guest"
|
||||
self.base_url = "https://crt.sh/"
|
||||
self._stop_event = None
|
||||
|
||||
# Initialize cache directory (same as original provider)
|
||||
self.cache_dir = Path('cache') / 'crtsh'
|
||||
self.cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
# Initialize cache directory (separate from BaseProvider's HTTP cache)
|
||||
self.domain_cache_dir = Path('cache') / 'crtsh'
|
||||
self.domain_cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Compile regex for date filtering for efficiency
|
||||
self.date_pattern = re.compile(r'^\d{4}-\d{2}-\d{2}[ T]\d{2}:\d{2}:\d{2}')
|
||||
|
||||
def get_name(self) -> str:
|
||||
"""Return the provider name."""
|
||||
@@ -54,7 +44,7 @@ class CrtShProvider(BaseProvider):
|
||||
|
||||
def get_display_name(self) -> str:
|
||||
"""Return the provider display name for the UI."""
|
||||
return "crt.sh (DB)"
|
||||
return "crt.sh"
|
||||
|
||||
def requires_api_key(self) -> bool:
|
||||
"""Return True if the provider requires an API key."""
|
||||
@@ -65,162 +55,19 @@ class CrtShProvider(BaseProvider):
|
||||
return {'domains': True, 'ips': False}
|
||||
|
||||
def is_available(self) -> bool:
|
||||
"""
|
||||
Check if the provider can be used. Requires the psycopg2 library.
|
||||
"""
|
||||
if not PSYCOPG2_AVAILABLE:
|
||||
self.logger.logger.warning("psycopg2 library not found. CrtShDBProvider is unavailable. "
|
||||
"Please run 'pip install psycopg2-binary'.")
|
||||
return False
|
||||
"""Check if the provider is configured to be used."""
|
||||
return True
|
||||
|
||||
def _query_crtsh(self, domain: str) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Query the crt.sh PostgreSQL database for raw certificate data.
|
||||
Raises exceptions for DB/network errors to allow core logic to retry.
|
||||
"""
|
||||
conn = None
|
||||
certificates = []
|
||||
|
||||
# SQL Query to find all certificate IDs related to the domain (including subdomains),
|
||||
# then retrieve comprehensive details for each certificate, mimicking the JSON API structure.
|
||||
sql_query = """
|
||||
WITH certificates_of_interest AS (
|
||||
SELECT DISTINCT ci.certificate_id
|
||||
FROM certificate_identity ci
|
||||
WHERE ci.name_value ILIKE %(domain_wildcard)s OR ci.name_value = %(domain)s
|
||||
)
|
||||
SELECT
|
||||
c.id,
|
||||
c.serial_number,
|
||||
c.not_before,
|
||||
c.not_after,
|
||||
(SELECT min(entry_timestamp) FROM ct_log_entry cle WHERE cle.certificate_id = c.id) as entry_timestamp,
|
||||
ca.id as issuer_ca_id,
|
||||
ca.name as issuer_name,
|
||||
(SELECT array_to_string(array_agg(DISTINCT ci.name_value), E'\n') FROM certificate_identity ci WHERE ci.certificate_id = c.id) as name_value,
|
||||
(SELECT name_value FROM certificate_identity ci WHERE ci.certificate_id = c.id AND ci.name_type = 'commonName' LIMIT 1) as common_name
|
||||
FROM
|
||||
certificate c
|
||||
JOIN ca ON c.issuer_ca_id = ca.id
|
||||
WHERE c.id IN (SELECT certificate_id FROM certificates_of_interest);
|
||||
"""
|
||||
|
||||
try:
|
||||
conn = psycopg2.connect(
|
||||
dbname=self.db_name,
|
||||
user=self.db_user,
|
||||
host=self.db_host,
|
||||
port=self.db_port,
|
||||
connect_timeout=self.timeout
|
||||
)
|
||||
|
||||
with conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cursor:
|
||||
cursor.execute(sql_query, {'domain': domain, 'domain_wildcard': f'%.{domain}'})
|
||||
results = cursor.fetchall()
|
||||
certificates = [dict(row) for row in results]
|
||||
|
||||
self.logger.logger.info(f"crt.sh DB query for '{domain}' returned {len(certificates)} certificates.")
|
||||
|
||||
except psycopg2.Error as e:
|
||||
self.logger.logger.error(f"PostgreSQL query failed for {domain}: {e}")
|
||||
# Raise a RequestException to be compatible with the existing retry logic in the core application
|
||||
raise requests.exceptions.RequestException(f"PostgreSQL query failed: {e}") from e
|
||||
finally:
|
||||
if conn:
|
||||
conn.close()
|
||||
|
||||
return certificates
|
||||
|
||||
def query_domain(self, domain: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||
"""
|
||||
Query crt.sh for certificates containing the domain with caching support.
|
||||
Properly raises exceptions for network errors to allow core logic retries.
|
||||
"""
|
||||
if not _is_valid_domain(domain):
|
||||
return []
|
||||
|
||||
if self._stop_event and self._stop_event.is_set():
|
||||
return []
|
||||
|
||||
cache_file = self._get_cache_file_path(domain)
|
||||
cache_status = self._get_cache_status(cache_file)
|
||||
|
||||
certificates = []
|
||||
|
||||
try:
|
||||
if cache_status == "fresh":
|
||||
certificates = self._load_cached_certificates(cache_file)
|
||||
self.logger.logger.info(f"Using cached data for {domain} ({len(certificates)} certificates)")
|
||||
|
||||
elif cache_status == "not_found":
|
||||
# Fresh query from DB, create new cache
|
||||
certificates = self._query_crtsh(domain)
|
||||
if certificates:
|
||||
self._create_cache_file(cache_file, domain, self._serialize_certs_for_cache(certificates))
|
||||
else:
|
||||
self.logger.logger.info(f"No certificates found for {domain}, not caching")
|
||||
|
||||
elif cache_status == "stale":
|
||||
try:
|
||||
new_certificates = self._query_crtsh(domain)
|
||||
if new_certificates:
|
||||
certificates = self._append_to_cache(cache_file, self._serialize_certs_for_cache(new_certificates))
|
||||
else:
|
||||
certificates = self._load_cached_certificates(cache_file)
|
||||
except requests.exceptions.RequestException:
|
||||
certificates = self._load_cached_certificates(cache_file)
|
||||
if certificates:
|
||||
self.logger.logger.warning(f"DB query failed for {domain}, using stale cache data.")
|
||||
else:
|
||||
raise
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
# Re-raise so core logic can retry
|
||||
self.logger.logger.error(f"DB query failed for {domain}: {e}")
|
||||
raise e
|
||||
except json.JSONDecodeError as e:
|
||||
# JSON parsing errors from cache should also be handled
|
||||
self.logger.logger.error(f"Failed to parse JSON from cache for {domain}: {e}")
|
||||
raise e
|
||||
|
||||
if self._stop_event and self._stop_event.is_set():
|
||||
return []
|
||||
|
||||
if not certificates:
|
||||
return []
|
||||
|
||||
return self._process_certificates_to_relationships(domain, certificates)
|
||||
|
||||
def _serialize_certs_for_cache(self, certificates: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Serialize certificate data for JSON caching, converting datetime objects to ISO strings.
|
||||
"""
|
||||
serialized_certs = []
|
||||
for cert in certificates:
|
||||
serialized_cert = cert.copy()
|
||||
for key in ['not_before', 'not_after', 'entry_timestamp']:
|
||||
if isinstance(serialized_cert.get(key), datetime):
|
||||
# Ensure datetime is timezone-aware before converting
|
||||
dt_obj = serialized_cert[key]
|
||||
if dt_obj.tzinfo is None:
|
||||
dt_obj = dt_obj.replace(tzinfo=timezone.utc)
|
||||
serialized_cert[key] = dt_obj.isoformat()
|
||||
serialized_certs.append(serialized_cert)
|
||||
return serialized_certs
|
||||
|
||||
# --- All methods below are copied directly from the original CrtShProvider ---
|
||||
# They are compatible because _query_crtsh returns data in the same format
|
||||
# as the original _query_crtsh_api method. A small adjustment is made to
|
||||
# _parse_certificate_date to handle datetime objects directly from the DB.
|
||||
|
||||
def _get_cache_file_path(self, domain: str) -> Path:
|
||||
"""Generate cache file path for a domain."""
|
||||
safe_domain = domain.replace('.', '_').replace('/', '_').replace('\\', '_')
|
||||
return self.cache_dir / f"{safe_domain}.json"
|
||||
return self.domain_cache_dir / f"{safe_domain}.json"
|
||||
|
||||
def _get_cache_status(self, cache_file_path: Path) -> str:
|
||||
"""Check cache status for a domain."""
|
||||
"""
|
||||
Check cache status for a domain.
|
||||
Returns: 'not_found', 'fresh', or 'stale'
|
||||
"""
|
||||
if not cache_file_path.exists():
|
||||
return "not_found"
|
||||
|
||||
@@ -245,130 +92,312 @@ class CrtShProvider(BaseProvider):
|
||||
self.logger.logger.warning(f"Invalid cache file format for {cache_file_path}: {e}")
|
||||
return "stale"
|
||||
|
||||
def _load_cached_certificates(self, cache_file_path: Path) -> List[Dict[str, Any]]:
|
||||
"""Load certificates from cache file."""
|
||||
def query_domain(self, domain: str) -> ProviderResult:
|
||||
"""
|
||||
FIXED: Query crt.sh for certificates containing the domain.
|
||||
Now properly creates domain and CA nodes instead of large entities.
|
||||
|
||||
Args:
|
||||
domain: Domain to investigate
|
||||
|
||||
Returns:
|
||||
ProviderResult containing discovered relationships and attributes
|
||||
"""
|
||||
if not _is_valid_domain(domain):
|
||||
return ProviderResult()
|
||||
|
||||
if self._stop_event and self._stop_event.is_set():
|
||||
return ProviderResult()
|
||||
|
||||
cache_file = self._get_cache_file_path(domain)
|
||||
cache_status = self._get_cache_status(cache_file)
|
||||
|
||||
result = ProviderResult()
|
||||
|
||||
try:
|
||||
if cache_status == "fresh":
|
||||
result = self._load_from_cache(cache_file)
|
||||
self.logger.logger.info(f"Using fresh cached crt.sh data for {domain}")
|
||||
|
||||
else: # "stale" or "not_found"
|
||||
# Query the API for the latest certificates
|
||||
new_raw_certs = self._query_crtsh_api(domain)
|
||||
|
||||
if self._stop_event and self._stop_event.is_set():
|
||||
return ProviderResult()
|
||||
|
||||
# Combine with old data if cache is stale
|
||||
if cache_status == "stale":
|
||||
old_raw_certs = self._load_raw_data_from_cache(cache_file)
|
||||
combined_certs = old_raw_certs + new_raw_certs
|
||||
|
||||
# Deduplicate the combined list
|
||||
seen_ids = set()
|
||||
unique_certs = []
|
||||
for cert in combined_certs:
|
||||
cert_id = cert.get('id')
|
||||
if cert_id not in seen_ids:
|
||||
unique_certs.append(cert)
|
||||
seen_ids.add(cert_id)
|
||||
|
||||
raw_certificates_to_process = unique_certs
|
||||
self.logger.logger.info(f"Refreshed and merged cache for {domain}. Total unique certs: {len(raw_certificates_to_process)}")
|
||||
else: # "not_found"
|
||||
raw_certificates_to_process = new_raw_certs
|
||||
|
||||
# FIXED: Process certificates to create proper domain and CA nodes
|
||||
result = self._process_certificates_to_result_fixed(domain, raw_certificates_to_process)
|
||||
self.logger.logger.info(f"Created fresh result for {domain} ({result.get_relationship_count()} relationships)")
|
||||
|
||||
# Save the new result and the raw data to the cache
|
||||
self._save_result_to_cache(cache_file, result, raw_certificates_to_process, domain)
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
self.logger.logger.error(f"API query failed for {domain}: {e}")
|
||||
if cache_status != "not_found":
|
||||
result = self._load_from_cache(cache_file)
|
||||
self.logger.logger.warning(f"Using stale cache for {domain} due to API failure.")
|
||||
else:
|
||||
raise e # Re-raise if there's no cache to fall back on
|
||||
|
||||
return result
|
||||
|
||||
def query_ip(self, ip: str) -> ProviderResult:
|
||||
"""
|
||||
Query crt.sh for certificates containing the IP address.
|
||||
Note: crt.sh doesn't typically index by IP, so this returns empty results.
|
||||
|
||||
Args:
|
||||
ip: IP address to investigate
|
||||
|
||||
Returns:
|
||||
Empty ProviderResult (crt.sh doesn't support IP-based certificate queries effectively)
|
||||
"""
|
||||
return ProviderResult()
|
||||
|
||||
def _load_from_cache(self, cache_file_path: Path) -> ProviderResult:
|
||||
"""Load processed crt.sh data from a cache file."""
|
||||
try:
|
||||
with open(cache_file_path, 'r') as f:
|
||||
cache_data = json.load(f)
|
||||
return cache_data.get('certificates', [])
|
||||
cache_content = json.load(f)
|
||||
|
||||
result = ProviderResult()
|
||||
|
||||
# Reconstruct relationships
|
||||
for rel_data in cache_content.get("relationships", []):
|
||||
result.add_relationship(
|
||||
source_node=rel_data["source_node"],
|
||||
target_node=rel_data["target_node"],
|
||||
relationship_type=rel_data["relationship_type"],
|
||||
provider=rel_data["provider"],
|
||||
confidence=rel_data["confidence"],
|
||||
raw_data=rel_data.get("raw_data", {})
|
||||
)
|
||||
|
||||
# Reconstruct attributes
|
||||
for attr_data in cache_content.get("attributes", []):
|
||||
result.add_attribute(
|
||||
target_node=attr_data["target_node"],
|
||||
name=attr_data["name"],
|
||||
value=attr_data["value"],
|
||||
attr_type=attr_data["type"],
|
||||
provider=attr_data["provider"],
|
||||
confidence=attr_data["confidence"],
|
||||
metadata=attr_data.get("metadata", {})
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
except (json.JSONDecodeError, FileNotFoundError, KeyError) as e:
|
||||
self.logger.logger.error(f"Failed to load cached certificates from {cache_file_path}: {e}")
|
||||
return ProviderResult()
|
||||
|
||||
def _load_raw_data_from_cache(self, cache_file_path: Path) -> List[Dict[str, Any]]:
|
||||
"""Load only the raw certificate data from a cache file."""
|
||||
try:
|
||||
with open(cache_file_path, 'r') as f:
|
||||
cache_content = json.load(f)
|
||||
return cache_content.get("raw_certificates", [])
|
||||
except (json.JSONDecodeError, FileNotFoundError):
|
||||
return []
|
||||
|
||||
def _create_cache_file(self, cache_file_path: Path, domain: str, certificates: List[Dict[str, Any]]) -> None:
|
||||
"""Create new cache file with certificates."""
|
||||
def _save_result_to_cache(self, cache_file_path: Path, result: ProviderResult, raw_certificates: List[Dict[str, Any]], domain: str) -> None:
|
||||
"""Save processed crt.sh result and raw data to a cache file."""
|
||||
try:
|
||||
cache_data = {
|
||||
"domain": domain,
|
||||
"first_cached": datetime.now(timezone.utc).isoformat(),
|
||||
"last_upstream_query": datetime.now(timezone.utc).isoformat(),
|
||||
"upstream_query_count": 1,
|
||||
"certificates": certificates
|
||||
"raw_certificates": raw_certificates, # Store the raw data for deduplication
|
||||
"relationships": [
|
||||
{
|
||||
"source_node": rel.source_node,
|
||||
"target_node": rel.target_node,
|
||||
"relationship_type": rel.relationship_type,
|
||||
"confidence": rel.confidence,
|
||||
"provider": rel.provider,
|
||||
"raw_data": rel.raw_data
|
||||
} for rel in result.relationships
|
||||
],
|
||||
"attributes": [
|
||||
{
|
||||
"target_node": attr.target_node,
|
||||
"name": attr.name,
|
||||
"value": attr.value,
|
||||
"type": attr.type,
|
||||
"provider": attr.provider,
|
||||
"confidence": attr.confidence,
|
||||
"metadata": attr.metadata
|
||||
} for attr in result.attributes
|
||||
]
|
||||
}
|
||||
cache_file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(cache_file_path, 'w') as f:
|
||||
json.dump(cache_data, f, separators=(',', ':'))
|
||||
self.logger.logger.info(f"Created cache file for {domain} with {len(certificates)} certificates")
|
||||
json.dump(cache_data, f, separators=(',', ':'), default=str)
|
||||
except Exception as e:
|
||||
self.logger.logger.warning(f"Failed to create cache file for {domain}: {e}")
|
||||
self.logger.logger.warning(f"Failed to save cache file for {domain}: {e}")
|
||||
|
||||
def _query_crtsh_api(self, domain: str) -> List[Dict[str, Any]]:
|
||||
"""Query crt.sh API for raw certificate data."""
|
||||
url = f"{self.base_url}?q={quote(domain)}&output=json"
|
||||
response = self.make_request(url, target_indicator=domain)
|
||||
|
||||
if not response or response.status_code != 200:
|
||||
raise requests.exceptions.RequestException(f"crt.sh API returned status {response.status_code if response else 'None'}")
|
||||
|
||||
def _append_to_cache(self, cache_file_path: Path, new_certificates: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
"""Append new certificates to existing cache and return all certificates."""
|
||||
try:
|
||||
with open(cache_file_path, 'r') as f:
|
||||
cache_data = json.load(f)
|
||||
certificates = response.json()
|
||||
except json.JSONDecodeError:
|
||||
self.logger.logger.error(f"crt.sh returned invalid JSON for {domain}")
|
||||
return []
|
||||
|
||||
existing_ids = {cert.get('id') for cert in cache_data.get('certificates', [])}
|
||||
added_count = 0
|
||||
for cert in new_certificates:
|
||||
cert_id = cert.get('id')
|
||||
if cert_id and cert_id not in existing_ids:
|
||||
cache_data['certificates'].append(cert)
|
||||
existing_ids.add(cert_id)
|
||||
added_count += 1
|
||||
if not certificates:
|
||||
return []
|
||||
|
||||
cache_data['last_upstream_query'] = datetime.now(timezone.utc).isoformat()
|
||||
cache_data['upstream_query_count'] = cache_data.get('upstream_query_count', 0) + 1
|
||||
return certificates
|
||||
|
||||
with open(cache_file_path, 'w') as f:
|
||||
json.dump(cache_data, f, separators=(',', ':'))
|
||||
|
||||
total_certs = len(cache_data['certificates'])
|
||||
self.logger.logger.info(f"Appended {added_count} new certificates to cache. Total: {total_certs}")
|
||||
return cache_data['certificates']
|
||||
except Exception as e:
|
||||
self.logger.logger.warning(f"Failed to append to cache: {e}")
|
||||
return new_certificates
|
||||
|
||||
def _parse_issuer_organization(self, issuer_dn: str) -> str:
|
||||
"""Parse the issuer Distinguished Name to extract just the organization name."""
|
||||
if not issuer_dn: return issuer_dn
|
||||
try:
|
||||
components = [comp.strip() for comp in issuer_dn.split(',')]
|
||||
for component in components:
|
||||
if component.startswith('O='):
|
||||
org_name = component[2:].strip()
|
||||
if org_name.startswith('"') and org_name.endswith('"'):
|
||||
org_name = org_name[1:-1]
|
||||
return org_name
|
||||
return issuer_dn
|
||||
except Exception as e:
|
||||
self.logger.logger.debug(f"Failed to parse issuer DN '{issuer_dn}': {e}")
|
||||
return issuer_dn
|
||||
|
||||
def _parse_certificate_date(self, date_input: Any) -> datetime:
|
||||
def _process_certificates_to_result_fixed(self, query_domain: str, certificates: List[Dict[str, Any]]) -> ProviderResult:
|
||||
"""
|
||||
Parse certificate date from various formats (string from cache, datetime from DB).
|
||||
FIXED: Process certificates to create proper domain and CA nodes.
|
||||
Now creates individual domain nodes instead of large entities.
|
||||
"""
|
||||
if isinstance(date_input, datetime):
|
||||
# If it's already a datetime object from the DB, just ensure it's UTC
|
||||
if date_input.tzinfo is None:
|
||||
return date_input.replace(tzinfo=timezone.utc)
|
||||
return date_input
|
||||
result = ProviderResult()
|
||||
|
||||
date_string = str(date_input)
|
||||
if not date_string:
|
||||
raise ValueError("Empty date string")
|
||||
if self._stop_event and self._stop_event.is_set():
|
||||
self.logger.logger.info(f"CrtSh processing cancelled before processing for domain: {query_domain}")
|
||||
return result
|
||||
|
||||
try:
|
||||
if 'Z' in date_string:
|
||||
return datetime.fromisoformat(date_string.replace('Z', '+00:00'))
|
||||
# Handle standard ISO format with or without timezone
|
||||
dt = datetime.fromisoformat(date_string)
|
||||
if dt.tzinfo is None:
|
||||
return dt.replace(tzinfo=timezone.utc)
|
||||
return dt
|
||||
except ValueError as e:
|
||||
try:
|
||||
# Fallback for other formats
|
||||
return datetime.strptime(date_string[:19], "%Y-%m-%dT%H:%M:%S").replace(tzinfo=timezone.utc)
|
||||
except Exception:
|
||||
raise ValueError(f"Unable to parse date: {date_string}") from e
|
||||
all_discovered_domains = set()
|
||||
processed_issuers = set()
|
||||
|
||||
def _is_cert_valid(self, cert_data: Dict[str, Any]) -> bool:
|
||||
"""Check if a certificate is currently valid based on its expiry date."""
|
||||
try:
|
||||
not_after_str = cert_data.get('not_after')
|
||||
if not not_after_str: return False
|
||||
for i, cert_data in enumerate(certificates):
|
||||
if i % 10 == 0 and self._stop_event and self._stop_event.is_set():
|
||||
self.logger.logger.info(f"CrtSh processing cancelled at certificate {i} for domain: {query_domain}")
|
||||
break
|
||||
|
||||
not_after_date = self._parse_certificate_date(not_after_str)
|
||||
not_before_str = cert_data.get('not_before')
|
||||
now = datetime.now(timezone.utc)
|
||||
is_not_expired = not_after_date > now
|
||||
# Extract all domains from this certificate
|
||||
cert_domains = self._extract_domains_from_certificate(cert_data)
|
||||
all_discovered_domains.update(cert_domains)
|
||||
|
||||
# FIXED: Create CA nodes for certificate issuers (not as domain metadata)
|
||||
issuer_name = self._parse_issuer_organization(cert_data.get('issuer_name', ''))
|
||||
if issuer_name and issuer_name not in processed_issuers:
|
||||
# Create relationship from query domain to CA
|
||||
result.add_relationship(
|
||||
source_node=query_domain,
|
||||
target_node=issuer_name,
|
||||
relationship_type='crtsh_cert_issuer',
|
||||
provider=self.name,
|
||||
confidence=0.95,
|
||||
raw_data={'issuer_dn': cert_data.get('issuer_name', '')}
|
||||
)
|
||||
processed_issuers.add(issuer_name)
|
||||
|
||||
# Add certificate metadata to each domain in this certificate
|
||||
cert_metadata = self._extract_certificate_metadata(cert_data)
|
||||
for cert_domain in cert_domains:
|
||||
if not _is_valid_domain(cert_domain):
|
||||
continue
|
||||
|
||||
# Add certificate attributes to the domain
|
||||
for key, value in cert_metadata.items():
|
||||
if value is not None:
|
||||
result.add_attribute(
|
||||
target_node=cert_domain,
|
||||
name=f"cert_{key}",
|
||||
value=value,
|
||||
attr_type='certificate_data',
|
||||
provider=self.name,
|
||||
confidence=0.9,
|
||||
metadata={'certificate_id': cert_data.get('id')}
|
||||
)
|
||||
|
||||
if self._stop_event and self._stop_event.is_set():
|
||||
self.logger.logger.info(f"CrtSh query cancelled before relationship creation for domain: {query_domain}")
|
||||
return result
|
||||
|
||||
# FIXED: Create selective relationships to avoid large entities
|
||||
# Only create relationships to domains that are closely related
|
||||
for discovered_domain in all_discovered_domains:
|
||||
if discovered_domain == query_domain:
|
||||
continue
|
||||
|
||||
if not _is_valid_domain(discovered_domain):
|
||||
continue
|
||||
|
||||
# FIXED: Only create relationships for domains that share a meaningful connection
|
||||
# This prevents creating too many relationships that trigger large entity creation
|
||||
if self._should_create_relationship(query_domain, discovered_domain):
|
||||
confidence = self._calculate_domain_relationship_confidence(
|
||||
query_domain, discovered_domain, [], all_discovered_domains
|
||||
)
|
||||
|
||||
result.add_relationship(
|
||||
source_node=query_domain,
|
||||
target_node=discovered_domain,
|
||||
relationship_type='crtsh_san_certificate',
|
||||
provider=self.name,
|
||||
confidence=confidence,
|
||||
raw_data={'relationship_type': 'certificate_discovery'}
|
||||
)
|
||||
|
||||
self.log_relationship_discovery(
|
||||
source_node=query_domain,
|
||||
target_node=discovered_domain,
|
||||
relationship_type='crtsh_san_certificate',
|
||||
confidence_score=confidence,
|
||||
raw_data={'relationship_type': 'certificate_discovery'},
|
||||
discovery_method="certificate_transparency_analysis"
|
||||
)
|
||||
|
||||
self.logger.logger.info(f"CrtSh processing completed for {query_domain}: {len(all_discovered_domains)} domains, {result.get_relationship_count()} relationships")
|
||||
return result
|
||||
|
||||
def _should_create_relationship(self, source_domain: str, target_domain: str) -> bool:
|
||||
"""
|
||||
FIXED: Determine if a relationship should be created between two domains.
|
||||
This helps avoid creating too many relationships that trigger large entity creation.
|
||||
"""
|
||||
# Always create relationships for subdomains
|
||||
if target_domain.endswith(f'.{source_domain}') or source_domain.endswith(f'.{target_domain}'):
|
||||
return True
|
||||
|
||||
# Create relationships for domains that share a common parent (up to 2 levels)
|
||||
source_parts = source_domain.split('.')
|
||||
target_parts = target_domain.split('.')
|
||||
|
||||
# Check if they share the same root domain (last 2 parts)
|
||||
if len(source_parts) >= 2 and len(target_parts) >= 2:
|
||||
source_root = '.'.join(source_parts[-2:])
|
||||
target_root = '.'.join(target_parts[-2:])
|
||||
return source_root == target_root
|
||||
|
||||
if not_before_str:
|
||||
not_before_date = self._parse_certificate_date(not_before_str)
|
||||
is_not_before_valid = not_before_date <= now
|
||||
return is_not_expired and is_not_before_valid
|
||||
return is_not_expired
|
||||
except Exception as e:
|
||||
self.logger.logger.debug(f"Certificate validity check failed: {e}")
|
||||
return False
|
||||
|
||||
def _extract_certificate_metadata(self, cert_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
# This method works as-is.
|
||||
"""Extract comprehensive metadata from certificate data."""
|
||||
raw_issuer_name = cert_data.get('issuer_name', '')
|
||||
parsed_issuer_name = self._parse_issuer_organization(raw_issuer_name)
|
||||
|
||||
metadata = {
|
||||
'certificate_id': cert_data.get('id'),
|
||||
'serial_number': cert_data.get('serial_number'),
|
||||
@@ -378,136 +407,174 @@ class CrtShProvider(BaseProvider):
|
||||
'not_before': cert_data.get('not_before'),
|
||||
'not_after': cert_data.get('not_after'),
|
||||
'entry_timestamp': cert_data.get('entry_timestamp'),
|
||||
'source': 'crt.sh (DB)'
|
||||
'source': 'crtsh'
|
||||
}
|
||||
|
||||
try:
|
||||
if metadata['not_before'] and metadata['not_after']:
|
||||
not_before = self._parse_certificate_date(metadata['not_before'])
|
||||
not_after = self._parse_certificate_date(metadata['not_after'])
|
||||
|
||||
metadata['validity_period_days'] = (not_after - not_before).days
|
||||
metadata['is_currently_valid'] = self._is_cert_valid(cert_data)
|
||||
metadata['expires_soon'] = (not_after - datetime.now(timezone.utc)).days <= 30
|
||||
metadata['not_before'] = not_before.strftime('%Y-%m-%d %H:%M:%S UTC')
|
||||
metadata['not_after'] = not_after.strftime('%Y-%m-%d %H:%M:%S UTC')
|
||||
|
||||
# Keep raw date format or convert to standard format
|
||||
metadata['not_before'] = not_before.isoformat()
|
||||
metadata['not_after'] = not_after.isoformat()
|
||||
|
||||
except Exception as e:
|
||||
self.logger.logger.debug(f"Error computing certificate metadata: {e}")
|
||||
metadata['is_currently_valid'] = False
|
||||
metadata['expires_soon'] = False
|
||||
|
||||
return metadata
|
||||
|
||||
def _process_certificates_to_relationships(self, domain: str, certificates: List[Dict[str, Any]]) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||
# This method works as-is.
|
||||
relationships = []
|
||||
if self._stop_event and self._stop_event.is_set(): return []
|
||||
domain_certificates = {}
|
||||
all_discovered_domains = set()
|
||||
for i, cert_data in enumerate(certificates):
|
||||
if i % 5 == 0 and self._stop_event and self._stop_event.is_set(): break
|
||||
cert_metadata = self._extract_certificate_metadata(cert_data)
|
||||
cert_domains = self._extract_domains_from_certificate(cert_data)
|
||||
all_discovered_domains.update(cert_domains)
|
||||
for cert_domain in cert_domains:
|
||||
if not _is_valid_domain(cert_domain): continue
|
||||
if cert_domain not in domain_certificates:
|
||||
domain_certificates[cert_domain] = []
|
||||
domain_certificates[cert_domain].append(cert_metadata)
|
||||
if self._stop_event and self._stop_event.is_set(): return []
|
||||
for i, discovered_domain in enumerate(all_discovered_domains):
|
||||
if discovered_domain == domain: continue
|
||||
if i % 10 == 0 and self._stop_event and self._stop_event.is_set(): break
|
||||
if not _is_valid_domain(discovered_domain): continue
|
||||
query_domain_certs = domain_certificates.get(domain, [])
|
||||
discovered_domain_certs = domain_certificates.get(discovered_domain, [])
|
||||
shared_certificates = self._find_shared_certificates(query_domain_certs, discovered_domain_certs)
|
||||
confidence = self._calculate_domain_relationship_confidence(
|
||||
domain, discovered_domain, shared_certificates, all_discovered_domains
|
||||
)
|
||||
relationship_raw_data = {
|
||||
'relationship_type': 'certificate_discovery',
|
||||
'shared_certificates': shared_certificates,
|
||||
'total_shared_certs': len(shared_certificates),
|
||||
'discovery_context': self._determine_relationship_context(discovered_domain, domain),
|
||||
'domain_certificates': {
|
||||
domain: self._summarize_certificates(query_domain_certs),
|
||||
discovered_domain: self._summarize_certificates(discovered_domain_certs)
|
||||
}
|
||||
}
|
||||
relationships.append((
|
||||
domain, discovered_domain, 'san_certificate', confidence, relationship_raw_data
|
||||
))
|
||||
self.log_relationship_discovery(
|
||||
source_node=domain, target_node=discovered_domain, relationship_type='san_certificate',
|
||||
confidence_score=confidence, raw_data=relationship_raw_data,
|
||||
discovery_method="certificate_transparency_analysis"
|
||||
)
|
||||
return relationships
|
||||
def _parse_issuer_organization(self, issuer_dn: str) -> str:
|
||||
"""Parse the issuer Distinguished Name to extract just the organization name."""
|
||||
if not issuer_dn:
|
||||
return issuer_dn
|
||||
|
||||
# --- All remaining helper methods are identical to the original and fully compatible ---
|
||||
# They are included here for completeness.
|
||||
|
||||
def _find_shared_certificates(self, certs1: List[Dict[str, Any]], certs2: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
cert1_ids = {cert.get('certificate_id') for cert in certs1 if cert.get('certificate_id')}
|
||||
return [cert for cert in certs2 if cert.get('certificate_id') in cert1_ids]
|
||||
|
||||
def _summarize_certificates(self, certificates: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
if not certificates: return {'total_certificates': 0, 'valid_certificates': 0, 'expired_certificates': 0, 'expires_soon_count': 0, 'unique_issuers': [], 'latest_certificate': None, 'has_valid_cert': False}
|
||||
valid_count = sum(1 for cert in certificates if cert.get('is_currently_valid'))
|
||||
expires_soon_count = sum(1 for cert in certificates if cert.get('expires_soon'))
|
||||
unique_issuers = list(set(cert.get('issuer_name') for cert in certificates if cert.get('issuer_name')))
|
||||
latest_cert, latest_date = None, None
|
||||
for cert in certificates:
|
||||
try:
|
||||
if cert.get('not_before'):
|
||||
cert_date = self._parse_certificate_date(cert['not_before'])
|
||||
if latest_date is None or cert_date > latest_date:
|
||||
latest_date, latest_cert = cert_date, cert
|
||||
except Exception: continue
|
||||
return {'total_certificates': len(certificates), 'valid_certificates': valid_count, 'expired_certificates': len(certificates) - valid_count, 'expires_soon_count': expires_soon_count, 'unique_issuers': unique_issuers, 'latest_certificate': latest_cert, 'has_valid_cert': valid_count > 0, 'certificate_details': certificates}
|
||||
components = [comp.strip() for comp in issuer_dn.split(',')]
|
||||
|
||||
def _calculate_domain_relationship_confidence(self, domain1: str, domain2: str, shared_certificates: List[Dict[str, Any]], all_discovered_domains: Set[str]) -> float:
|
||||
base_confidence, context_bonus, shared_bonus, validity_bonus, issuer_bonus = 0.9, 0.0, 0.0, 0.0, 0.0
|
||||
relationship_context = self._determine_relationship_context(domain2, domain1)
|
||||
if relationship_context == 'subdomain': context_bonus = 0.1
|
||||
elif relationship_context == 'parent_domain': context_bonus = 0.05
|
||||
if shared_certificates:
|
||||
if len(shared_certificates) >= 3: shared_bonus = 0.1
|
||||
elif len(shared_certificates) >= 2: shared_bonus = 0.05
|
||||
else: shared_bonus = 0.02
|
||||
if any(cert.get('is_currently_valid') for cert in shared_certificates): validity_bonus = 0.05
|
||||
for cert in shared_certificates:
|
||||
if any(ca in cert.get('issuer_name', '').lower() for ca in ['let\'s encrypt', 'digicert', 'sectigo', 'globalsign']):
|
||||
issuer_bonus = max(issuer_bonus, 0.03)
|
||||
break
|
||||
return max(0.1, min(1.0, base_confidence + context_bonus + shared_bonus + validity_bonus + issuer_bonus))
|
||||
for component in components:
|
||||
if component.startswith('O='):
|
||||
org_name = component[2:].strip()
|
||||
if org_name.startswith('"') and org_name.endswith('"'):
|
||||
org_name = org_name[1:-1]
|
||||
return org_name
|
||||
|
||||
def _determine_relationship_context(self, cert_domain: str, query_domain: str) -> str:
|
||||
if cert_domain == query_domain: return 'exact_match'
|
||||
if cert_domain.endswith(f'.{query_domain}'): return 'subdomain'
|
||||
if query_domain.endswith(f'.{cert_domain}'): return 'parent_domain'
|
||||
return 'related_domain'
|
||||
return issuer_dn
|
||||
|
||||
def query_ip(self, ip: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||
return []
|
||||
except Exception as e:
|
||||
self.logger.logger.debug(f"Failed to parse issuer DN '{issuer_dn}': {e}")
|
||||
return issuer_dn
|
||||
|
||||
def _parse_certificate_date(self, date_string: str) -> datetime:
|
||||
"""Parse certificate date from crt.sh format."""
|
||||
if not date_string:
|
||||
raise ValueError("Empty date string")
|
||||
|
||||
try:
|
||||
if date_string.endswith('Z'):
|
||||
return datetime.fromisoformat(date_string[:-1]).replace(tzinfo=timezone.utc)
|
||||
elif '+' in date_string or date_string.endswith('UTC'):
|
||||
date_string = date_string.replace('UTC', '').strip()
|
||||
if '+' in date_string:
|
||||
date_string = date_string.split('+')[0]
|
||||
return datetime.fromisoformat(date_string).replace(tzinfo=timezone.utc)
|
||||
else:
|
||||
return datetime.fromisoformat(date_string).replace(tzinfo=timezone.utc)
|
||||
except Exception as e:
|
||||
try:
|
||||
return datetime.strptime(date_string[:19], "%Y-%m-%dT%H:%M:%S").replace(tzinfo=timezone.utc)
|
||||
except Exception:
|
||||
raise ValueError(f"Unable to parse date: {date_string}") from e
|
||||
|
||||
def _is_cert_valid(self, cert_data: Dict[str, Any]) -> bool:
|
||||
"""Check if a certificate is currently valid based on its expiry date."""
|
||||
try:
|
||||
not_after_str = cert_data.get('not_after')
|
||||
if not not_after_str:
|
||||
return False
|
||||
|
||||
not_after_date = self._parse_certificate_date(not_after_str)
|
||||
not_before_str = cert_data.get('not_before')
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
is_not_expired = not_after_date > now
|
||||
|
||||
if not_before_str:
|
||||
not_before_date = self._parse_certificate_date(not_before_str)
|
||||
is_not_before_valid = not_before_date <= now
|
||||
return is_not_expired and is_not_before_valid
|
||||
|
||||
return is_not_expired
|
||||
|
||||
except Exception as e:
|
||||
return False
|
||||
|
||||
def _extract_domains_from_certificate(self, cert_data: Dict[str, Any]) -> Set[str]:
|
||||
"""Extract all domains from certificate data."""
|
||||
domains = set()
|
||||
if cn := cert_data.get('common_name'):
|
||||
if cleaned := self._clean_domain_name(cn):
|
||||
domains.update(cleaned)
|
||||
if nv := cert_data.get('name_value'):
|
||||
for line in nv.split('\n'):
|
||||
if cleaned := self._clean_domain_name(line.strip()):
|
||||
domains.update(cleaned)
|
||||
|
||||
# Extract from common name
|
||||
common_name = cert_data.get('common_name', '')
|
||||
if common_name:
|
||||
cleaned_cn = self._clean_domain_name(common_name)
|
||||
if cleaned_cn:
|
||||
domains.update(cleaned_cn)
|
||||
|
||||
# Extract from name_value field (contains SANs)
|
||||
name_value = cert_data.get('name_value', '')
|
||||
if name_value:
|
||||
for line in name_value.split('\n'):
|
||||
cleaned_domains = self._clean_domain_name(line.strip())
|
||||
if cleaned_domains:
|
||||
domains.update(cleaned_domains)
|
||||
|
||||
return domains
|
||||
|
||||
def _clean_domain_name(self, domain_name: str) -> List[str]:
|
||||
if not domain_name: return []
|
||||
domain = domain_name.strip().lower().split('://', 1)[-1].split('/', 1)[0]
|
||||
if ':' in domain and not domain.count(':') > 1: domain = domain.split(':', 1)[0]
|
||||
cleaned_domains = [domain, domain[2:]] if domain.startswith('*.') else [domain]
|
||||
"""Clean and normalize domain name from certificate data."""
|
||||
if not domain_name:
|
||||
return []
|
||||
|
||||
domain = domain_name.strip().lower()
|
||||
|
||||
if domain.startswith(('http://', 'https://')):
|
||||
domain = domain.split('://', 1)[1]
|
||||
|
||||
if '/' in domain:
|
||||
domain = domain.split('/', 1)[0]
|
||||
|
||||
if ':' in domain and not domain.count(':') > 1:
|
||||
domain = domain.split(':', 1)[0]
|
||||
|
||||
cleaned_domains = []
|
||||
if domain.startswith('*.'):
|
||||
cleaned_domains.append(domain)
|
||||
cleaned_domains.append(domain[2:])
|
||||
else:
|
||||
cleaned_domains.append(domain)
|
||||
|
||||
final_domains = []
|
||||
for d in cleaned_domains:
|
||||
d = re.sub(r'[^\w\-\.]', '', d)
|
||||
if d and not d.startswith(('.', '-')) and not d.endswith(('.', '-')):
|
||||
final_domains.append(d)
|
||||
|
||||
return [d for d in final_domains if _is_valid_domain(d)]
|
||||
|
||||
def _calculate_domain_relationship_confidence(self, domain1: str, domain2: str,
|
||||
shared_certificates: List[Dict[str, Any]],
|
||||
all_discovered_domains: Set[str]) -> float:
|
||||
"""Calculate confidence score for domain relationship based on various factors."""
|
||||
base_confidence = 0.9
|
||||
|
||||
# Adjust confidence based on domain relationship context
|
||||
relationship_context = self._determine_relationship_context(domain2, domain1)
|
||||
|
||||
if relationship_context == 'exact_match':
|
||||
context_bonus = 0.0
|
||||
elif relationship_context == 'subdomain':
|
||||
context_bonus = 0.1
|
||||
elif relationship_context == 'parent_domain':
|
||||
context_bonus = 0.05
|
||||
else:
|
||||
context_bonus = 0.0
|
||||
|
||||
final_confidence = base_confidence + context_bonus
|
||||
return max(0.1, min(1.0, final_confidence))
|
||||
|
||||
def _determine_relationship_context(self, cert_domain: str, query_domain: str) -> str:
|
||||
"""Determine the context of the relationship between certificate domain and query domain."""
|
||||
if cert_domain == query_domain:
|
||||
return 'exact_match'
|
||||
elif cert_domain.endswith(f'.{query_domain}'):
|
||||
return 'subdomain'
|
||||
elif query_domain.endswith(f'.{cert_domain}'):
|
||||
return 'parent_domain'
|
||||
else:
|
||||
return 'related_domain'
|
||||
@@ -1,15 +1,16 @@
|
||||
# dnsrecon/providers/dns_provider.py
|
||||
|
||||
from dns import resolver, reversename
|
||||
from typing import List, Dict, Any, Tuple
|
||||
from typing import Dict
|
||||
from .base_provider import BaseProvider
|
||||
from utils.helpers import _is_valid_ip, _is_valid_domain
|
||||
from core.provider_result import ProviderResult
|
||||
from utils.helpers import _is_valid_ip, _is_valid_domain, get_ip_version
|
||||
|
||||
|
||||
class DNSProvider(BaseProvider):
|
||||
"""
|
||||
Provider for standard DNS resolution and reverse DNS lookups.
|
||||
Now uses session-specific configuration.
|
||||
Now returns standardized ProviderResult objects with IPv4 and IPv6 support.
|
||||
"""
|
||||
|
||||
def __init__(self, name=None, session_config=None):
|
||||
@@ -25,7 +26,6 @@ class DNSProvider(BaseProvider):
|
||||
self.resolver = resolver.Resolver()
|
||||
self.resolver.timeout = 5
|
||||
self.resolver.lifetime = 10
|
||||
#self.resolver.nameservers = ['127.0.0.1']
|
||||
|
||||
def get_name(self) -> str:
|
||||
"""Return the provider name."""
|
||||
@@ -47,78 +47,116 @@ class DNSProvider(BaseProvider):
|
||||
"""DNS is always available - no API key required."""
|
||||
return True
|
||||
|
||||
def query_domain(self, domain: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||
def query_domain(self, domain: str) -> ProviderResult:
|
||||
"""
|
||||
Query DNS records for the domain to discover relationships.
|
||||
...
|
||||
Query DNS records for the domain to discover relationships and attributes.
|
||||
FIXED: Now creates separate attributes for each DNS record type.
|
||||
|
||||
Args:
|
||||
domain: Domain to investigate
|
||||
|
||||
Returns:
|
||||
ProviderResult containing discovered relationships and attributes
|
||||
"""
|
||||
if not _is_valid_domain(domain):
|
||||
return []
|
||||
return ProviderResult()
|
||||
|
||||
relationships = []
|
||||
result = ProviderResult()
|
||||
|
||||
# Query all record types
|
||||
# Query all record types - each gets its own attribute
|
||||
for record_type in ['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SOA', 'TXT', 'SRV', 'CAA']:
|
||||
try:
|
||||
relationships.extend(self._query_record(domain, record_type))
|
||||
except resolver.NoAnswer:
|
||||
self._query_record(domain, record_type, result)
|
||||
#except resolver.NoAnswer:
|
||||
# This is not an error, just a confirmation that the record doesn't exist.
|
||||
self.logger.logger.debug(f"No {record_type} record found for {domain}")
|
||||
#self.logger.logger.debug(f"No {record_type} record found for {domain}")
|
||||
except Exception as e:
|
||||
self.failed_requests += 1
|
||||
self.logger.logger.debug(f"{record_type} record query failed for {domain}: {e}")
|
||||
# Optionally, you might want to re-raise other, more serious exceptions.
|
||||
|
||||
return relationships
|
||||
return result
|
||||
|
||||
def query_ip(self, ip: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||
def query_ip(self, ip: str) -> ProviderResult:
|
||||
"""
|
||||
Query reverse DNS for the IP address.
|
||||
Query reverse DNS for the IP address (supports both IPv4 and IPv6).
|
||||
|
||||
Args:
|
||||
ip: IP address to investigate
|
||||
ip: IP address to investigate (IPv4 or IPv6)
|
||||
|
||||
Returns:
|
||||
List of relationships discovered from reverse DNS
|
||||
ProviderResult containing discovered relationships and attributes
|
||||
"""
|
||||
if not _is_valid_ip(ip):
|
||||
return []
|
||||
return ProviderResult()
|
||||
|
||||
relationships = []
|
||||
result = ProviderResult()
|
||||
ip_version = get_ip_version(ip)
|
||||
|
||||
try:
|
||||
# Perform reverse DNS lookup
|
||||
# Perform reverse DNS lookup (works for both IPv4 and IPv6)
|
||||
self.total_requests += 1
|
||||
reverse_name = reversename.from_address(ip)
|
||||
response = self.resolver.resolve(reverse_name, 'PTR')
|
||||
self.successful_requests += 1
|
||||
|
||||
ptr_records = []
|
||||
for ptr_record in response:
|
||||
hostname = str(ptr_record).rstrip('.')
|
||||
|
||||
if _is_valid_domain(hostname):
|
||||
# Determine appropriate forward relationship type based on IP version
|
||||
if ip_version == 6:
|
||||
relationship_type = 'dns_aaaa_record'
|
||||
record_prefix = 'AAAA'
|
||||
else:
|
||||
relationship_type = 'dns_a_record'
|
||||
record_prefix = 'A'
|
||||
|
||||
# Add the relationship
|
||||
result.add_relationship(
|
||||
source_node=ip,
|
||||
target_node=hostname,
|
||||
relationship_type='dns_ptr_record',
|
||||
provider=self.name,
|
||||
confidence=0.8,
|
||||
raw_data={
|
||||
'query_type': 'PTR',
|
||||
'ip_address': ip,
|
||||
'ip_version': ip_version,
|
||||
'hostname': hostname,
|
||||
'ttl': response.ttl
|
||||
}
|
||||
)
|
||||
|
||||
relationships.append((
|
||||
ip,
|
||||
hostname,
|
||||
'ptr_record',
|
||||
0.8,
|
||||
raw_data
|
||||
))
|
||||
# Add to PTR records list
|
||||
ptr_records.append(f"PTR: {hostname}")
|
||||
|
||||
# Log the relationship discovery
|
||||
self.log_relationship_discovery(
|
||||
source_node=ip,
|
||||
target_node=hostname,
|
||||
relationship_type='ptr_record',
|
||||
relationship_type='dns_ptr_record',
|
||||
confidence_score=0.8,
|
||||
raw_data=raw_data,
|
||||
discovery_method="reverse_dns_lookup"
|
||||
raw_data={
|
||||
'query_type': 'PTR',
|
||||
'ip_address': ip,
|
||||
'ip_version': ip_version,
|
||||
'hostname': hostname,
|
||||
'ttl': response.ttl
|
||||
},
|
||||
discovery_method=f"reverse_dns_lookup_ipv{ip_version}"
|
||||
)
|
||||
|
||||
# Add PTR records as separate attribute
|
||||
if ptr_records:
|
||||
result.add_attribute(
|
||||
target_node=ip,
|
||||
name='ptr_records', # Specific name for PTR records
|
||||
value=ptr_records,
|
||||
attr_type='dns_record',
|
||||
provider=self.name,
|
||||
confidence=0.8,
|
||||
metadata={'ttl': response.ttl, 'ip_version': ip_version}
|
||||
)
|
||||
|
||||
except resolver.NXDOMAIN:
|
||||
@@ -130,22 +168,28 @@ class DNSProvider(BaseProvider):
|
||||
# Re-raise the exception so the scanner can handle the failure
|
||||
raise e
|
||||
|
||||
return relationships
|
||||
return result
|
||||
|
||||
def _query_record(self, domain: str, record_type: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||
def _query_record(self, domain: str, record_type: str, result: ProviderResult) -> None:
|
||||
"""
|
||||
Query a specific type of DNS record for the domain.
|
||||
FIXED: Query DNS records with unique attribute names for each record type.
|
||||
Enhanced to better handle IPv6 AAAA records.
|
||||
"""
|
||||
relationships = []
|
||||
try:
|
||||
self.total_requests += 1
|
||||
response = self.resolver.resolve(domain, record_type)
|
||||
self.successful_requests += 1
|
||||
|
||||
dns_records = []
|
||||
|
||||
for record in response:
|
||||
target = ""
|
||||
if record_type in ['A', 'AAAA']:
|
||||
target = str(record)
|
||||
# Validate that the IP address is properly formed
|
||||
if not _is_valid_ip(target):
|
||||
self.logger.logger.debug(f"Invalid IP address in {record_type} record: {target}")
|
||||
continue
|
||||
elif record_type in ['CNAME', 'NS', 'PTR']:
|
||||
target = str(record.target).rstrip('.')
|
||||
elif record_type == 'MX':
|
||||
@@ -153,32 +197,56 @@ class DNSProvider(BaseProvider):
|
||||
elif record_type == 'SOA':
|
||||
target = str(record.mname).rstrip('.')
|
||||
elif record_type in ['TXT']:
|
||||
# TXT records are treated as metadata, not relationships.
|
||||
# Keep raw TXT record value
|
||||
txt_value = str(record).strip('"')
|
||||
dns_records.append(txt_value) # Just the value for TXT
|
||||
continue
|
||||
elif record_type == 'SRV':
|
||||
target = str(record.target).rstrip('.')
|
||||
elif record_type == 'CAA':
|
||||
target = f"{record.flags} {record.tag.decode('utf-8')} \"{record.value.decode('utf-8')}\""
|
||||
# Keep raw CAA record format
|
||||
caa_value = f"{record.flags} {record.tag.decode('utf-8')} \"{record.value.decode('utf-8')}\""
|
||||
dns_records.append(caa_value) # Just the value for CAA
|
||||
continue
|
||||
else:
|
||||
target = str(record)
|
||||
|
||||
if target:
|
||||
# Determine IP version for metadata if this is an IP record
|
||||
ip_version = None
|
||||
if record_type in ['A', 'AAAA'] and _is_valid_ip(target):
|
||||
ip_version = get_ip_version(target)
|
||||
|
||||
raw_data = {
|
||||
'query_type': record_type,
|
||||
'domain': domain,
|
||||
'value': target,
|
||||
'ttl': response.ttl
|
||||
}
|
||||
relationship_type = f"{record_type.lower()}_record"
|
||||
confidence = 0.8 # Default confidence for DNS records
|
||||
|
||||
relationships.append((
|
||||
domain,
|
||||
target,
|
||||
relationship_type,
|
||||
confidence,
|
||||
raw_data
|
||||
))
|
||||
if ip_version:
|
||||
raw_data['ip_version'] = ip_version
|
||||
|
||||
relationship_type = f"dns_{record_type.lower()}_record"
|
||||
confidence = 0.8
|
||||
|
||||
# Add relationship
|
||||
result.add_relationship(
|
||||
source_node=domain,
|
||||
target_node=target,
|
||||
relationship_type=relationship_type,
|
||||
provider=self.name,
|
||||
confidence=confidence,
|
||||
raw_data=raw_data
|
||||
)
|
||||
|
||||
# Add target to records list
|
||||
dns_records.append(target)
|
||||
|
||||
# Log relationship discovery with IP version info
|
||||
discovery_method = f"dns_{record_type.lower()}_record"
|
||||
if ip_version:
|
||||
discovery_method += f"_ipv{ip_version}"
|
||||
|
||||
self.log_relationship_discovery(
|
||||
source_node=domain,
|
||||
@@ -186,13 +254,33 @@ class DNSProvider(BaseProvider):
|
||||
relationship_type=relationship_type,
|
||||
confidence_score=confidence,
|
||||
raw_data=raw_data,
|
||||
discovery_method=f"dns_{record_type.lower()}_record"
|
||||
discovery_method=discovery_method
|
||||
)
|
||||
|
||||
# FIXED: Create attribute with specific name for each record type
|
||||
if dns_records:
|
||||
# Use record type specific attribute name (e.g., 'a_records', 'mx_records', etc.)
|
||||
attribute_name = f"{record_type.lower()}_records"
|
||||
|
||||
metadata = {'record_type': record_type, 'ttl': response.ttl}
|
||||
|
||||
# Add IP version info for A/AAAA records
|
||||
if record_type in ['A', 'AAAA'] and dns_records:
|
||||
first_ip_version = get_ip_version(dns_records[0])
|
||||
if first_ip_version:
|
||||
metadata['ip_version'] = first_ip_version
|
||||
|
||||
result.add_attribute(
|
||||
target_node=domain,
|
||||
name=attribute_name, # UNIQUE name for each record type!
|
||||
value=dns_records,
|
||||
attr_type='dns_record_list',
|
||||
provider=self.name,
|
||||
confidence=0.8,
|
||||
metadata=metadata
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.failed_requests += 1
|
||||
self.logger.logger.debug(f"{record_type} record query failed for {domain}: {e}")
|
||||
# Re-raise the exception so the scanner can handle it
|
||||
raise e
|
||||
|
||||
return relationships
|
||||
@@ -1,15 +1,20 @@
|
||||
# dnsrecon/providers/shodan_provider.py
|
||||
|
||||
import json
|
||||
from typing import List, Dict, Any, Tuple
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any
|
||||
from datetime import datetime, timezone
|
||||
import requests
|
||||
|
||||
from .base_provider import BaseProvider
|
||||
from utils.helpers import _is_valid_ip, _is_valid_domain
|
||||
from core.provider_result import ProviderResult
|
||||
from utils.helpers import _is_valid_ip, _is_valid_domain, get_ip_version, normalize_ip
|
||||
|
||||
|
||||
class ShodanProvider(BaseProvider):
|
||||
"""
|
||||
Provider for querying Shodan API for IP address and hostname information.
|
||||
Now uses session-specific API keys.
|
||||
Provider for querying Shodan API for IP address information.
|
||||
Now returns standardized ProviderResult objects with caching support for IPv4 and IPv6.
|
||||
"""
|
||||
|
||||
def __init__(self, name=None, session_config=None):
|
||||
@@ -23,6 +28,10 @@ class ShodanProvider(BaseProvider):
|
||||
self.base_url = "https://api.shodan.io"
|
||||
self.api_key = self.config.get_api_key('shodan')
|
||||
|
||||
# Initialize cache directory
|
||||
self.cache_dir = Path('cache') / 'shodan'
|
||||
self.cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def is_available(self) -> bool:
|
||||
"""Check if Shodan provider is available (has valid API key in this session)."""
|
||||
return self.api_key is not None and len(self.api_key.strip()) > 0
|
||||
@@ -33,7 +42,7 @@ class ShodanProvider(BaseProvider):
|
||||
|
||||
def get_display_name(self) -> str:
|
||||
"""Return the provider display name for the UI."""
|
||||
return "shodan"
|
||||
return "Shodan"
|
||||
|
||||
def requires_api_key(self) -> bool:
|
||||
"""Return True if the provider requires an API key."""
|
||||
@@ -41,267 +50,300 @@ class ShodanProvider(BaseProvider):
|
||||
|
||||
def get_eligibility(self) -> Dict[str, bool]:
|
||||
"""Return a dictionary indicating if the provider can query domains and/or IPs."""
|
||||
return {'domains': True, 'ips': True}
|
||||
return {'domains': False, 'ips': True}
|
||||
|
||||
def query_domain(self, domain: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||
def _get_cache_file_path(self, ip: str) -> Path:
|
||||
"""
|
||||
Query Shodan for information about a domain.
|
||||
Uses Shodan's hostname search to find associated IPs.
|
||||
Generate cache file path for an IP address (IPv4 or IPv6).
|
||||
IPv6 addresses contain colons which are replaced with underscores for filesystem safety.
|
||||
"""
|
||||
# Normalize the IP address first to ensure consistent caching
|
||||
normalized_ip = normalize_ip(ip)
|
||||
if not normalized_ip:
|
||||
# Fallback for invalid IPs
|
||||
safe_ip = ip.replace('.', '_').replace(':', '_')
|
||||
else:
|
||||
# Replace problematic characters for both IPv4 and IPv6
|
||||
safe_ip = normalized_ip.replace('.', '_').replace(':', '_')
|
||||
|
||||
return self.cache_dir / f"{safe_ip}.json"
|
||||
|
||||
def _get_cache_status(self, cache_file_path: Path) -> str:
|
||||
"""
|
||||
Check cache status for an IP.
|
||||
Returns: 'not_found', 'fresh', or 'stale'
|
||||
"""
|
||||
if not cache_file_path.exists():
|
||||
return "not_found"
|
||||
|
||||
try:
|
||||
with open(cache_file_path, 'r') as f:
|
||||
cache_data = json.load(f)
|
||||
|
||||
last_query_str = cache_data.get("last_upstream_query")
|
||||
if not last_query_str:
|
||||
return "stale"
|
||||
|
||||
last_query = datetime.fromisoformat(last_query_str.replace('Z', '+00:00'))
|
||||
hours_since_query = (datetime.now(timezone.utc) - last_query).total_seconds() / 3600
|
||||
|
||||
cache_timeout = self.config.cache_timeout_hours
|
||||
if hours_since_query < cache_timeout:
|
||||
return "fresh"
|
||||
else:
|
||||
return "stale"
|
||||
|
||||
except (json.JSONDecodeError, ValueError, KeyError):
|
||||
return "stale"
|
||||
|
||||
def query_domain(self, domain: str) -> ProviderResult:
|
||||
"""
|
||||
Domain queries are no longer supported for the Shodan provider.
|
||||
|
||||
Args:
|
||||
domain: Domain to investigate
|
||||
|
||||
Returns:
|
||||
List of relationships discovered from Shodan data
|
||||
Empty ProviderResult
|
||||
"""
|
||||
if not _is_valid_domain(domain) or not self.is_available():
|
||||
return []
|
||||
return ProviderResult()
|
||||
|
||||
relationships = []
|
||||
|
||||
try:
|
||||
# Search for hostname in Shodan
|
||||
search_query = f"hostname:{domain}"
|
||||
url = f"{self.base_url}/shodan/host/search"
|
||||
params = {
|
||||
'key': self.api_key,
|
||||
'query': search_query,
|
||||
'minify': True # Get minimal data to reduce bandwidth
|
||||
}
|
||||
|
||||
response = self.make_request(url, method="GET", params=params, target_indicator=domain)
|
||||
|
||||
if not response or response.status_code != 200:
|
||||
return []
|
||||
|
||||
data = response.json()
|
||||
|
||||
if 'matches' not in data:
|
||||
return []
|
||||
|
||||
# Process search results
|
||||
for match in data['matches']:
|
||||
ip_address = match.get('ip_str')
|
||||
hostnames = match.get('hostnames', [])
|
||||
|
||||
if ip_address and domain in hostnames:
|
||||
raw_data = {
|
||||
'ip_address': ip_address,
|
||||
'hostnames': hostnames,
|
||||
'country': match.get('location', {}).get('country_name', ''),
|
||||
'city': match.get('location', {}).get('city', ''),
|
||||
'isp': match.get('isp', ''),
|
||||
'org': match.get('org', ''),
|
||||
'ports': match.get('ports', []),
|
||||
'last_update': match.get('last_update', '')
|
||||
}
|
||||
|
||||
relationships.append((
|
||||
domain,
|
||||
ip_address,
|
||||
'a_record', # Domain resolves to IP
|
||||
0.8,
|
||||
raw_data
|
||||
))
|
||||
|
||||
self.log_relationship_discovery(
|
||||
source_node=domain,
|
||||
target_node=ip_address,
|
||||
relationship_type='a_record',
|
||||
confidence_score=0.8,
|
||||
raw_data=raw_data,
|
||||
discovery_method="shodan_hostname_search"
|
||||
)
|
||||
|
||||
# Also create relationships to other hostnames on the same IP
|
||||
for hostname in hostnames:
|
||||
if hostname != domain and _is_valid_domain(hostname):
|
||||
hostname_raw_data = {
|
||||
'shared_ip': ip_address,
|
||||
'all_hostnames': hostnames,
|
||||
'discovery_context': 'shared_hosting'
|
||||
}
|
||||
|
||||
relationships.append((
|
||||
domain,
|
||||
hostname,
|
||||
'passive_dns', # Shared hosting relationship
|
||||
0.6, # Lower confidence for shared hosting
|
||||
hostname_raw_data
|
||||
))
|
||||
|
||||
self.log_relationship_discovery(
|
||||
source_node=domain,
|
||||
target_node=hostname,
|
||||
relationship_type='passive_dns',
|
||||
confidence_score=0.6,
|
||||
raw_data=hostname_raw_data,
|
||||
discovery_method="shodan_shared_hosting"
|
||||
)
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
self.logger.logger.error(f"Failed to parse JSON response from Shodan: {e}")
|
||||
|
||||
return relationships
|
||||
|
||||
def query_ip(self, ip: str) -> List[Tuple[str, str, str, float, Dict[str, Any]]]:
|
||||
def query_ip(self, ip: str) -> ProviderResult:
|
||||
"""
|
||||
Query Shodan for information about an IP address.
|
||||
Query Shodan for information about an IP address (IPv4 or IPv6), with caching of processed data.
|
||||
|
||||
Args:
|
||||
ip: IP address to investigate
|
||||
ip: IP address to investigate (IPv4 or IPv6)
|
||||
|
||||
Returns:
|
||||
List of relationships discovered from Shodan IP data
|
||||
ProviderResult containing discovered relationships and attributes
|
||||
"""
|
||||
if not _is_valid_ip(ip) or not self.is_available():
|
||||
return []
|
||||
return ProviderResult()
|
||||
|
||||
relationships = []
|
||||
# Normalize IP address for consistent processing
|
||||
normalized_ip = normalize_ip(ip)
|
||||
if not normalized_ip:
|
||||
return ProviderResult()
|
||||
|
||||
cache_file = self._get_cache_file_path(normalized_ip)
|
||||
cache_status = self._get_cache_status(cache_file)
|
||||
|
||||
result = ProviderResult()
|
||||
|
||||
try:
|
||||
# Query Shodan host information
|
||||
url = f"{self.base_url}/shodan/host/{ip}"
|
||||
if cache_status == "fresh":
|
||||
result = self._load_from_cache(cache_file)
|
||||
self.logger.logger.info(f"Using cached Shodan data for {normalized_ip}")
|
||||
else: # "stale" or "not_found"
|
||||
url = f"{self.base_url}/shodan/host/{normalized_ip}"
|
||||
params = {'key': self.api_key}
|
||||
response = self.make_request(url, method="GET", params=params, target_indicator=normalized_ip)
|
||||
|
||||
response = self.make_request(url, method="GET", params=params, target_indicator=ip)
|
||||
|
||||
if not response or response.status_code != 200:
|
||||
return []
|
||||
|
||||
if response and response.status_code == 200:
|
||||
data = response.json()
|
||||
|
||||
# Extract hostname relationships
|
||||
hostnames = data.get('hostnames', [])
|
||||
for hostname in hostnames:
|
||||
if _is_valid_domain(hostname):
|
||||
raw_data = {
|
||||
'ip_address': ip,
|
||||
'hostname': hostname,
|
||||
'country': data.get('country_name', ''),
|
||||
'city': data.get('city', ''),
|
||||
'isp': data.get('isp', ''),
|
||||
'org': data.get('org', ''),
|
||||
'asn': data.get('asn', ''),
|
||||
'ports': data.get('ports', []),
|
||||
'last_update': data.get('last_update', ''),
|
||||
'os': data.get('os', '')
|
||||
}
|
||||
|
||||
relationships.append((
|
||||
ip,
|
||||
hostname,
|
||||
'a_record', # IP resolves to hostname
|
||||
0.8,
|
||||
raw_data
|
||||
))
|
||||
|
||||
self.log_relationship_discovery(
|
||||
source_node=ip,
|
||||
target_node=hostname,
|
||||
relationship_type='a_record',
|
||||
confidence_score=0.8,
|
||||
raw_data=raw_data,
|
||||
discovery_method="shodan_host_lookup"
|
||||
)
|
||||
|
||||
# Extract ASN relationship if available
|
||||
asn = data.get('asn')
|
||||
if asn:
|
||||
# Ensure the ASN starts with "AS"
|
||||
if isinstance(asn, str) and asn.startswith('AS'):
|
||||
asn_name = asn
|
||||
asn_number = asn[2:]
|
||||
# Process the data into ProviderResult BEFORE caching
|
||||
result = self._process_shodan_data(normalized_ip, data)
|
||||
self._save_to_cache(cache_file, result, data) # Save both result and raw data
|
||||
elif response and response.status_code == 404:
|
||||
# Handle 404 "No information available" as successful empty result
|
||||
try:
|
||||
error_data = response.json()
|
||||
if "No information available" in error_data.get('error', ''):
|
||||
# This is a successful query - Shodan just has no data
|
||||
self.logger.logger.debug(f"Shodan has no information for {normalized_ip}")
|
||||
result = ProviderResult() # Empty but successful result
|
||||
# Cache the empty result to avoid repeated queries
|
||||
self._save_to_cache(cache_file, result, {'error': 'No information available'})
|
||||
else:
|
||||
asn_name = f"AS{asn}"
|
||||
asn_number = str(asn)
|
||||
# Some other 404 error - treat as failure
|
||||
raise requests.exceptions.RequestException(f"Shodan API returned 404: {error_data}")
|
||||
except (ValueError, KeyError):
|
||||
# Could not parse JSON response - treat as failure
|
||||
raise requests.exceptions.RequestException(f"Shodan API returned 404 with unparseable response")
|
||||
elif cache_status == "stale":
|
||||
# If API fails on a stale cache, use the old data
|
||||
result = self._load_from_cache(cache_file)
|
||||
else:
|
||||
# Other HTTP error codes should be treated as failures
|
||||
status_code = response.status_code if response else "No response"
|
||||
raise requests.exceptions.RequestException(f"Shodan API returned HTTP {status_code}")
|
||||
|
||||
asn_raw_data = {
|
||||
'ip_address': ip,
|
||||
'asn': asn_number,
|
||||
'isp': data.get('isp', ''),
|
||||
'org': data.get('org', '')
|
||||
}
|
||||
except requests.exceptions.RequestException as e:
|
||||
self.logger.logger.info(f"Shodan API query returned no info for {normalized_ip}: {e}")
|
||||
if cache_status == "stale":
|
||||
result = self._load_from_cache(cache_file)
|
||||
else:
|
||||
# Re-raise for retry scheduling - but only for actual failures
|
||||
raise e
|
||||
|
||||
relationships.append((
|
||||
ip,
|
||||
asn_name,
|
||||
'asn_membership',
|
||||
0.7,
|
||||
asn_raw_data
|
||||
))
|
||||
return result
|
||||
|
||||
self.log_relationship_discovery(
|
||||
source_node=ip,
|
||||
target_node=asn_name,
|
||||
relationship_type='asn_membership',
|
||||
confidence_score=0.7,
|
||||
raw_data=asn_raw_data,
|
||||
discovery_method="shodan_asn_lookup"
|
||||
def _load_from_cache(self, cache_file_path: Path) -> ProviderResult:
|
||||
"""Load processed Shodan data from a cache file."""
|
||||
try:
|
||||
with open(cache_file_path, 'r') as f:
|
||||
cache_content = json.load(f)
|
||||
|
||||
result = ProviderResult()
|
||||
|
||||
# Reconstruct relationships
|
||||
for rel_data in cache_content.get("relationships", []):
|
||||
result.add_relationship(
|
||||
source_node=rel_data["source_node"],
|
||||
target_node=rel_data["target_node"],
|
||||
relationship_type=rel_data["relationship_type"],
|
||||
provider=rel_data["provider"],
|
||||
confidence=rel_data["confidence"],
|
||||
raw_data=rel_data.get("raw_data", {})
|
||||
)
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
self.logger.logger.error(f"Failed to parse JSON response from Shodan: {e}")
|
||||
# Reconstruct attributes
|
||||
for attr_data in cache_content.get("attributes", []):
|
||||
result.add_attribute(
|
||||
target_node=attr_data["target_node"],
|
||||
name=attr_data["name"],
|
||||
value=attr_data["value"],
|
||||
attr_type=attr_data["type"],
|
||||
provider=attr_data["provider"],
|
||||
confidence=attr_data["confidence"],
|
||||
metadata=attr_data.get("metadata", {})
|
||||
)
|
||||
|
||||
return relationships
|
||||
return result
|
||||
|
||||
def search_by_organization(self, org_name: str) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Search Shodan for hosts belonging to a specific organization.
|
||||
|
||||
Args:
|
||||
org_name: Organization name to search for
|
||||
|
||||
Returns:
|
||||
List of host information dictionaries
|
||||
"""
|
||||
if not self.is_available():
|
||||
return []
|
||||
except (json.JSONDecodeError, FileNotFoundError, KeyError):
|
||||
return ProviderResult()
|
||||
|
||||
def _save_to_cache(self, cache_file_path: Path, result: ProviderResult, raw_data: Dict[str, Any]) -> None:
|
||||
"""Save processed Shodan data to a cache file."""
|
||||
try:
|
||||
search_query = f"org:\"{org_name}\""
|
||||
url = f"{self.base_url}/shodan/host/search"
|
||||
params = {
|
||||
'key': self.api_key,
|
||||
'query': search_query,
|
||||
'minify': True
|
||||
cache_data = {
|
||||
"last_upstream_query": datetime.now(timezone.utc).isoformat(),
|
||||
"raw_data": raw_data, # Preserve original for forensic purposes
|
||||
"relationships": [
|
||||
{
|
||||
"source_node": rel.source_node,
|
||||
"target_node": rel.target_node,
|
||||
"relationship_type": rel.relationship_type,
|
||||
"confidence": rel.confidence,
|
||||
"provider": rel.provider,
|
||||
"raw_data": rel.raw_data
|
||||
} for rel in result.relationships
|
||||
],
|
||||
"attributes": [
|
||||
{
|
||||
"target_node": attr.target_node,
|
||||
"name": attr.name,
|
||||
"value": attr.value,
|
||||
"type": attr.type,
|
||||
"provider": attr.provider,
|
||||
"confidence": attr.confidence,
|
||||
"metadata": attr.metadata
|
||||
} for attr in result.attributes
|
||||
]
|
||||
}
|
||||
|
||||
response = self.make_request(url, method="GET", params=params, target_indicator=org_name)
|
||||
|
||||
if response and response.status_code == 200:
|
||||
data = response.json()
|
||||
return data.get('matches', [])
|
||||
|
||||
with open(cache_file_path, 'w') as f:
|
||||
json.dump(cache_data, f, separators=(',', ':'), default=str)
|
||||
except Exception as e:
|
||||
self.logger.logger.error(f"Error searching Shodan by organization {org_name}: {e}")
|
||||
self.logger.logger.warning(f"Failed to save Shodan cache for {cache_file_path.name}: {e}")
|
||||
|
||||
return []
|
||||
|
||||
def get_host_services(self, ip: str) -> List[Dict[str, Any]]:
|
||||
def _process_shodan_data(self, ip: str, data: Dict[str, Any]) -> ProviderResult:
|
||||
"""
|
||||
Get service information for a specific IP address.
|
||||
|
||||
Args:
|
||||
ip: IP address to query
|
||||
|
||||
Returns:
|
||||
List of service information dictionaries
|
||||
VERIFIED: Process Shodan data creating ISP nodes with ASN attributes and proper relationships.
|
||||
Enhanced to include IP version information for IPv6 addresses.
|
||||
"""
|
||||
if not _is_valid_ip(ip) or not self.is_available():
|
||||
return []
|
||||
result = ProviderResult()
|
||||
|
||||
try:
|
||||
url = f"{self.base_url}/shodan/host/{ip}"
|
||||
params = {'key': self.api_key}
|
||||
# Determine IP version for metadata
|
||||
ip_version = get_ip_version(ip)
|
||||
|
||||
response = self.make_request(url, method="GET", params=params, target_indicator=ip)
|
||||
# VERIFIED: Extract ISP information and create proper ISP node with ASN
|
||||
isp_name = data.get('org')
|
||||
asn_value = data.get('asn')
|
||||
|
||||
if response and response.status_code == 200:
|
||||
data = response.json()
|
||||
return data.get('data', []) # Service banners
|
||||
if isp_name and asn_value:
|
||||
# Create relationship from IP to ISP
|
||||
result.add_relationship(
|
||||
source_node=ip,
|
||||
target_node=isp_name,
|
||||
relationship_type='shodan_isp',
|
||||
provider=self.name,
|
||||
confidence=0.9,
|
||||
raw_data={'asn': asn_value, 'shodan_org': isp_name, 'ip_version': ip_version}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.logger.error(f"Error getting Shodan services for IP {ip}: {e}")
|
||||
# Add ASN as attribute to the ISP node
|
||||
result.add_attribute(
|
||||
target_node=isp_name,
|
||||
name='asn',
|
||||
value=asn_value,
|
||||
attr_type='isp_info',
|
||||
provider=self.name,
|
||||
confidence=0.9,
|
||||
metadata={'description': 'Autonomous System Number from Shodan', 'ip_version': ip_version}
|
||||
)
|
||||
|
||||
return []
|
||||
# Also add organization name as attribute to ISP node for completeness
|
||||
result.add_attribute(
|
||||
target_node=isp_name,
|
||||
name='organization_name',
|
||||
value=isp_name,
|
||||
attr_type='isp_info',
|
||||
provider=self.name,
|
||||
confidence=0.9,
|
||||
metadata={'description': 'Organization name from Shodan', 'ip_version': ip_version}
|
||||
)
|
||||
|
||||
# Process hostnames (reverse DNS)
|
||||
for key, value in data.items():
|
||||
if key == 'hostnames':
|
||||
for hostname in value:
|
||||
if _is_valid_domain(hostname):
|
||||
# Use appropriate relationship type based on IP version
|
||||
if ip_version == 6:
|
||||
relationship_type = 'shodan_aaaa_record'
|
||||
else:
|
||||
relationship_type = 'shodan_a_record'
|
||||
|
||||
result.add_relationship(
|
||||
source_node=ip,
|
||||
target_node=hostname,
|
||||
relationship_type=relationship_type,
|
||||
provider=self.name,
|
||||
confidence=0.8,
|
||||
raw_data={**data, 'ip_version': ip_version}
|
||||
)
|
||||
self.log_relationship_discovery(
|
||||
source_node=ip,
|
||||
target_node=hostname,
|
||||
relationship_type=relationship_type,
|
||||
confidence_score=0.8,
|
||||
raw_data={**data, 'ip_version': ip_version},
|
||||
discovery_method=f"shodan_host_lookup_ipv{ip_version}"
|
||||
)
|
||||
elif key == 'ports':
|
||||
# Add open ports as attributes to the IP
|
||||
for port in value:
|
||||
result.add_attribute(
|
||||
target_node=ip,
|
||||
name='shodan_open_port',
|
||||
value=port,
|
||||
attr_type='shodan_network_info',
|
||||
provider=self.name,
|
||||
confidence=0.9,
|
||||
metadata={'ip_version': ip_version}
|
||||
)
|
||||
elif isinstance(value, (str, int, float, bool)) and value is not None:
|
||||
# Add other Shodan fields as IP attributes (keep raw field names)
|
||||
result.add_attribute(
|
||||
target_node=ip,
|
||||
name=key, # Raw field name from Shodan API
|
||||
value=value,
|
||||
attr_type='shodan_info',
|
||||
provider=self.name,
|
||||
confidence=0.9,
|
||||
metadata={'ip_version': ip_version}
|
||||
)
|
||||
|
||||
return result
|
||||
@@ -1,11 +1,10 @@
|
||||
Flask>=2.3.3
|
||||
networkx>=3.1
|
||||
requests>=2.31.0
|
||||
python-dateutil>=2.8.2
|
||||
Werkzeug>=2.3.7
|
||||
urllib3>=2.0.0
|
||||
dnspython>=2.4.2
|
||||
Flask
|
||||
networkx
|
||||
requests
|
||||
python-dateutil
|
||||
Werkzeug
|
||||
urllib3
|
||||
dnspython
|
||||
gunicorn
|
||||
redis
|
||||
python-dotenv
|
||||
psycopg2-binary
|
||||
1641
static/css/main.css
1641
static/css/main.css
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
1903
static/js/main.js
1903
static/js/main.js
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,6 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
@@ -7,8 +8,11 @@
|
||||
<link rel="stylesheet" href="{{ url_for('static', filename='css/main.css') }}">
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/vis/4.21.0/vis.min.js"></script>
|
||||
<link href="https://cdnjs.cloudflare.com/ajax/libs/vis/4.21.0/vis.min.css" rel="stylesheet" type="text/css">
|
||||
<link href="https://fonts.googleapis.com/css2?family=Roboto+Mono:wght@300;400;500;700&family=Special+Elite&display=swap" rel="stylesheet">
|
||||
<link
|
||||
href="https://fonts.googleapis.com/css2?family=Roboto+Mono:wght@300;400;500;700&family=Special+Elite&display=swap"
|
||||
rel="stylesheet">
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div class="container">
|
||||
<header class="header">
|
||||
@@ -32,19 +36,8 @@
|
||||
|
||||
<div class="form-container">
|
||||
<div class="input-group">
|
||||
<label for="target-domain">Target Domain</label>
|
||||
<input type="text" id="target-domain" placeholder="example.com" autocomplete="off">
|
||||
</div>
|
||||
|
||||
<div class="input-group">
|
||||
<label for="max-depth">Recursion Depth</label>
|
||||
<select id="max-depth">
|
||||
<option value="1">Depth 1 - Direct relationships</option>
|
||||
<option value="2" selected>Depth 2 - Recommended</option>
|
||||
<option value="3">Depth 3 - Extended analysis</option>
|
||||
<option value="4">Depth 4 - Deep reconnaissance</option>
|
||||
<option value="5">Depth 5 - Maximum depth</option>
|
||||
</select>
|
||||
<label for="target-input">Target Domain or IP</label>
|
||||
<input type="text" id="target-input" placeholder="example.com or 8.8.8.8" autocomplete="off">
|
||||
</div>
|
||||
|
||||
<div class="button-group">
|
||||
@@ -60,13 +53,13 @@
|
||||
<span class="btn-icon">[STOP]</span>
|
||||
<span>Terminate Scan</span>
|
||||
</button>
|
||||
<button id="export-results" class="btn btn-secondary">
|
||||
<button id="export-options" class="btn btn-secondary">
|
||||
<span class="btn-icon">[EXPORT]</span>
|
||||
<span>Download Results</span>
|
||||
<span>Export Options</span>
|
||||
</button>
|
||||
<button id="configure-api-keys" class="btn btn-secondary">
|
||||
<button id="configure-settings" class="btn btn-secondary">
|
||||
<span class="btn-icon">[API]</span>
|
||||
<span>Configure API Keys</span>
|
||||
<span>Settings</span>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
@@ -104,38 +97,33 @@
|
||||
<div class="progress-bar">
|
||||
<div id="progress-fill" class="progress-fill"></div>
|
||||
</div>
|
||||
<div class="progress-placeholder">
|
||||
<span class="status-label">
|
||||
⚠️ <strong>Important:</strong> Scanning large public services (e.g., Google, Cloudflare,
|
||||
AWS) is
|
||||
<strong>discouraged</strong> due to rate limits (e.g., crt.sh).
|
||||
<br><br>
|
||||
Our task scheduler operates on a <strong>priority-based queue</strong>:
|
||||
Short, targeted tasks like DNS are processed first, while resource-intensive requests (e.g.,
|
||||
crt.sh)
|
||||
are <strong>automatically deprioritized</strong> and may be processed later.
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<section class="visualization-panel">
|
||||
<div class="panel-header">
|
||||
<h2>Infrastructure Map</h2>
|
||||
<div class="view-controls">
|
||||
<div class="filter-group">
|
||||
<label for="node-type-filter">Node Type:</label>
|
||||
<select id="node-type-filter">
|
||||
<option value="all">All</option>
|
||||
<option value="domain">Domain</option>
|
||||
<option value="ip">IP</option>
|
||||
<option value="asn">ASN</option>
|
||||
<option value="correlation_object">Correlation Object</option>
|
||||
<option value="large_entity">Large Entity</option>
|
||||
</select>
|
||||
</div>
|
||||
<div class="filter-group">
|
||||
<label for="confidence-filter">Min Confidence:</label>
|
||||
<input type="range" id="confidence-filter" min="0" max="1" step="0.1" value="0">
|
||||
<span id="confidence-value">0</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="network-graph" class="graph-container">
|
||||
<div class="graph-placeholder">
|
||||
<div class="placeholder-content">
|
||||
<div class="placeholder-icon">[○]</div>
|
||||
<div class="placeholder-icon">[◯]</div>
|
||||
<div class="placeholder-text">Infrastructure map will appear here</div>
|
||||
<div class="placeholder-subtext">Start a reconnaissance scan to visualize relationships</div>
|
||||
<div class="placeholder-subtext">Start a reconnaissance scan to visualize relationships
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -145,29 +133,30 @@
|
||||
<div class="legend-color" style="background-color: #00ff41;"></div>
|
||||
<span>Domains</span>
|
||||
</div>
|
||||
<div class="legend-item">
|
||||
<div class="legend-color" style="background-color: #c92f2f;"></div>
|
||||
<span>Domain (no valid cert)</span>
|
||||
</div>
|
||||
<div class="legend-item">
|
||||
<div class="legend-color" style="background-color: #c7c7c7;"></div>
|
||||
<span>Domain (never had cert)</span>
|
||||
</div>
|
||||
<div class="legend-item">
|
||||
<div class="legend-color" style="background-color: #ff9900;"></div>
|
||||
<span>IP Addresses</span>
|
||||
</div>
|
||||
<div class="legend-item">
|
||||
<div class="legend-color" style="background-color: #c7c7c7;"></div>
|
||||
<span>Domain (invalid cert)</span>
|
||||
</div>
|
||||
<div class="legend-item">
|
||||
<div class="legend-color" style="background-color: #9d4edd;"></div>
|
||||
<span>Correlation Objects</span>
|
||||
</div>
|
||||
<div class="legend-item">
|
||||
<div class="legend-edge high-confidence"></div>
|
||||
<span>High Confidence</span>
|
||||
</div>
|
||||
<div class="legend-item">
|
||||
<div class="legend-edge medium-confidence"></div>
|
||||
<span>Medium Confidence</span>
|
||||
<div class="legend-color" style="background-color: #00aaff;"></div>
|
||||
<span>ISPs</span>
|
||||
</div>
|
||||
<div class="legend-item">
|
||||
<div class="legend-color" style="background-color: #ff6b6b;"></div>
|
||||
<span>Large Entity</span>
|
||||
<span>Certificate Authorities</span>
|
||||
</div>
|
||||
|
||||
<div class="legend-item">
|
||||
<div class="legend-color" style="background-color: #9d4edd;"></div>
|
||||
<span>Correlation Objects</span>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
@@ -205,24 +194,79 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="api-key-modal" class="modal">
|
||||
<!-- Settings Modal -->
|
||||
<div id="settings-modal" class="modal">
|
||||
<div class="modal-content">
|
||||
<div class="modal-header">
|
||||
<h3>Configure API Keys</h3>
|
||||
<button id="api-key-modal-close" class="modal-close">[×]</button>
|
||||
<h3>Scanner Configuration</h3>
|
||||
<button id="settings-modal-close" class="modal-close">[×]</button>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<p class="modal-description">
|
||||
Enter your API keys for enhanced data providers. Keys are stored in memory for the current session only and are never saved to disk.
|
||||
<div class="modal-details">
|
||||
<!-- Scan Settings Section -->
|
||||
<section class="modal-section">
|
||||
<details open>
|
||||
<summary>
|
||||
<span>⚙️ Scan Settings</span>
|
||||
</summary>
|
||||
<div class="modal-section-content">
|
||||
<div class="input-group">
|
||||
<label for="max-depth">Recursion Depth</label>
|
||||
<select id="max-depth">
|
||||
<option value="1">Depth 1 - Direct relationships</option>
|
||||
<option value="2" selected>Depth 2 - Recommended</option>
|
||||
<option value="3">Depth 3 - Extended analysis</option>
|
||||
<option value="4">Depth 4 - Deep reconnaissance</option>
|
||||
<option value="5">Depth 5 - Maximum depth</option>
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
</details>
|
||||
</section>
|
||||
|
||||
<!-- Provider Configuration Section -->
|
||||
<section class="modal-section">
|
||||
<details open>
|
||||
<summary>
|
||||
<span>🔧 Provider Configuration</span>
|
||||
<span class="merge-badge" id="provider-count">0</span>
|
||||
</summary>
|
||||
<div class="modal-section-content">
|
||||
<div id="provider-config-list">
|
||||
<!-- Dynamically populated -->
|
||||
</div>
|
||||
</div>
|
||||
</details>
|
||||
</section>
|
||||
|
||||
<!-- API Keys Section -->
|
||||
<section class="modal-section">
|
||||
<details>
|
||||
<summary>
|
||||
<span>🔑 API Keys</span>
|
||||
<span class="merge-badge" id="api-key-count">0</span>
|
||||
</summary>
|
||||
<div class="modal-section-content">
|
||||
<p class="placeholder-subtext" style="margin-bottom: 1rem;">
|
||||
⚠️ API keys are stored in memory for the current session only.
|
||||
Only provide API keys you don't use for anything else.
|
||||
</p>
|
||||
<div id="api-key-inputs">
|
||||
<!-- Dynamically populated -->
|
||||
</div>
|
||||
<div class="button-group" style="flex-direction: row; justify-content: flex-end;">
|
||||
<button id="reset-api-keys" class="btn btn-secondary">
|
||||
<span>Reset</span>
|
||||
</div>
|
||||
</details>
|
||||
</section>
|
||||
|
||||
<!-- Action Buttons -->
|
||||
<div class="button-group" style="margin-top: 1.5rem;">
|
||||
<button id="save-settings" class="btn btn-primary">
|
||||
<span class="btn-icon">[SAVE]</span>
|
||||
<span>Save Configuration</span>
|
||||
</button>
|
||||
<button id="save-api-keys" class="btn btn-primary">
|
||||
<span>Save Keys</span>
|
||||
<button id="reset-settings" class="btn btn-secondary">
|
||||
<span class="btn-icon">[RESET]</span>
|
||||
<span>Reset to Defaults</span>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
@@ -230,19 +274,42 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
function copyToClipboard(elementId) {
|
||||
const element = document.getElementById(elementId);
|
||||
const textToCopy = element.innerText;
|
||||
navigator.clipboard.writeText(textToCopy).then(() => {
|
||||
// Optional: Show a success message
|
||||
console.log('Copied to clipboard');
|
||||
}).catch(err => {
|
||||
console.error('Failed to copy: ', err);
|
||||
});
|
||||
}
|
||||
</script>
|
||||
<!-- Export Modal -->
|
||||
<div id="export-modal" class="modal">
|
||||
<div class="modal-content">
|
||||
<div class="modal-header">
|
||||
<h3>Export Options</h3>
|
||||
<button id="export-modal-close" class="modal-close">[×]</button>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<div class="modal-details">
|
||||
<section class="modal-section">
|
||||
<details open>
|
||||
<summary>
|
||||
<span>📊 Available Exports</span>
|
||||
</summary>
|
||||
<div class="modal-section-content">
|
||||
<div class="button-group" style="margin-top: 1rem;">
|
||||
<button id="export-graph-json" class="btn btn-primary">
|
||||
<span class="btn-icon">[JSON]</span>
|
||||
<span>Export Graph Data</span>
|
||||
</button>
|
||||
<div class="status-row" style="margin-top: 0.5rem;">
|
||||
<span class="status-label">Complete graph data with forensic audit trail,
|
||||
provider statistics, and scan metadata in JSON format for analysis and
|
||||
archival.</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</details>
|
||||
</section>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<script src="{{ url_for('static', filename='js/graph.js') }}"></script>
|
||||
<script src="{{ url_for('static', filename='js/main.js') }}"></script>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
@@ -1,3 +1,8 @@
|
||||
# dnsrecon-reduced/utils/helpers.py
|
||||
|
||||
import ipaddress
|
||||
from typing import Union
|
||||
|
||||
def _is_valid_domain(domain: str) -> bool:
|
||||
"""
|
||||
Basic domain validation.
|
||||
@@ -26,25 +31,64 @@ def _is_valid_domain(domain: str) -> bool:
|
||||
|
||||
def _is_valid_ip(ip: str) -> bool:
|
||||
"""
|
||||
Basic IP address validation.
|
||||
IP address validation supporting both IPv4 and IPv6.
|
||||
|
||||
Args:
|
||||
ip: IP address string to validate
|
||||
|
||||
Returns:
|
||||
True if IP appears valid
|
||||
True if IP appears valid (IPv4 or IPv6)
|
||||
"""
|
||||
if not ip:
|
||||
return False
|
||||
|
||||
try:
|
||||
parts = ip.split('.')
|
||||
if len(parts) != 4:
|
||||
return False
|
||||
|
||||
for part in parts:
|
||||
num = int(part)
|
||||
if not 0 <= num <= 255:
|
||||
return False
|
||||
|
||||
# This handles both IPv4 and IPv6 validation
|
||||
ipaddress.ip_address(ip.strip())
|
||||
return True
|
||||
|
||||
except (ValueError, AttributeError):
|
||||
return False
|
||||
|
||||
def is_valid_target(target: str) -> bool:
|
||||
"""
|
||||
Checks if the target is a valid domain or IP address (IPv4/IPv6).
|
||||
|
||||
Args:
|
||||
target: The target string to validate.
|
||||
|
||||
Returns:
|
||||
True if the target is a valid domain or IP, False otherwise.
|
||||
"""
|
||||
return _is_valid_domain(target) or _is_valid_ip(target)
|
||||
|
||||
def get_ip_version(ip: str) -> Union[int, None]:
|
||||
"""
|
||||
Get the IP version (4 or 6) of a valid IP address.
|
||||
|
||||
Args:
|
||||
ip: IP address string
|
||||
|
||||
Returns:
|
||||
4 for IPv4, 6 for IPv6, None if invalid
|
||||
"""
|
||||
try:
|
||||
addr = ipaddress.ip_address(ip.strip())
|
||||
return addr.version
|
||||
except (ValueError, AttributeError):
|
||||
return None
|
||||
|
||||
def normalize_ip(ip: str) -> Union[str, None]:
|
||||
"""
|
||||
Normalize an IP address to its canonical form.
|
||||
|
||||
Args:
|
||||
ip: IP address string
|
||||
|
||||
Returns:
|
||||
Normalized IP address string, None if invalid
|
||||
"""
|
||||
try:
|
||||
addr = ipaddress.ip_address(ip.strip())
|
||||
return str(addr)
|
||||
except (ValueError, AttributeError):
|
||||
return None
|
||||
Reference in New Issue
Block a user