109 Commits

Author SHA1 Message Date
overcuriousity
c4e6a8998a iteration on ws implementation 2025-09-20 16:52:05 +02:00
overcuriousity
75a595c9cb try to implement websockets 2025-09-20 14:17:17 +02:00
3ee23c9d05 Merge pull request 'remove-large-entity-temporarily' (#3) from remove-large-entity-temporarily into main
Reviewed-on: mstoeck3/dnsrecon#3
2025-09-19 12:29:26 +00:00
overcuriousity
8d402ab4b1 postgres 2025-09-19 14:28:37 +02:00
overcuriousity
7472e6f416 fixes to hint for incomplete data 2025-09-19 12:35:28 +02:00
overcuriousity
eabb532557 almost fixed 2025-09-19 01:10:07 +02:00
overcuriousity
0a6d12de9a large entity recreation 2025-09-19 00:38:26 +02:00
overcuriousity
332805709d remove 2025-09-18 23:44:24 +02:00
overcuriousity
1558731c1c attempt fix large entity 2025-09-18 23:22:49 +02:00
overcuriousity
95cebbf935 bug fixes, improvements 2025-09-18 22:39:12 +02:00
overcuriousity
4c48917993 fixes for scheduler 2025-09-18 21:32:26 +02:00
overcuriousity
9d9afa6a08 fixes 2025-09-18 21:04:29 +02:00
overcuriousity
12f834bb65 correlation engine 2025-09-18 20:51:13 +02:00
overcuriousity
cbfd40ee98 adjustments to shodan & export manager 2025-09-18 19:22:58 +02:00
overcuriousity
d4081e1a32 export manager modularized 2025-09-18 17:42:39 +02:00
overcuriousity
15227b392d readme file & some ux improvements 2025-09-18 00:24:35 +02:00
overcuriousity
fdc26dcf15 executive summary 2025-09-18 00:13:37 +02:00
140ef54674 Merge pull request 'data-model' (#2) from data-model into main
Reviewed-on: mstoeck3/dnsrecon#2
2025-09-17 21:56:17 +00:00
overcuriousity
aae459446c update requirements, fix some bugs 2025-09-17 23:55:41 +02:00
overcuriousity
98e1b2280b new node types 2025-09-17 22:42:08 +02:00
overcuriousity
cd14198452 smaller css 2025-09-17 22:39:26 +02:00
overcuriousity
284660ab8c new node types 2025-09-17 22:09:39 +02:00
overcuriousity
ecfb27e02a new scheduling, removed many debug prints 2025-09-17 21:47:03 +02:00
overcuriousity
39b4242200 fix cli last task started 2025-09-17 21:35:54 +02:00
overcuriousity
a56755320c initial targets managed in backend 2025-09-17 21:29:18 +02:00
overcuriousity
b985f1e5f0 potential bugfix for the right click hiding 2025-09-17 21:15:52 +02:00
overcuriousity
8ae4fdbf80 UX improvements 2025-09-17 21:12:11 +02:00
overcuriousity
d0ee415f0d enhancements 2025-09-17 19:42:14 +02:00
overcuriousity
173c3dcf92 some adjustments for clarity 2025-09-17 17:10:11 +02:00
overcuriousity
ec755b17ad remove many unnecessary debug print, improve large entity handling 2025-09-17 13:31:35 +02:00
overcuriousity
469c133f1b fix session handling 2025-09-17 11:18:06 +02:00
overcuriousity
f775c61731 iterating on fixes 2025-09-17 11:08:50 +02:00
overcuriousity
b984189e08 scheduler fixes 2025-09-17 00:31:12 +02:00
overcuriousity
f2db739fa1 attempt to fix some logic 2025-09-17 00:05:48 +02:00
overcuriousity
47ce7ff883 format keys reduction 2025-09-16 23:17:23 +02:00
overcuriousity
229746e1ec improving the display 2025-09-16 22:25:46 +02:00
overcuriousity
733e1da640 new data model refinement 2025-09-16 21:23:02 +02:00
overcuriousity
97aa18f788 implement new data api 2025-09-16 20:21:08 +02:00
overcuriousity
15421dd4a5 update caching logic 2025-09-16 15:36:29 +02:00
overcuriousity
ad4086b156 fix root computation 2025-09-16 15:25:39 +02:00
overcuriousity
0e92ec6e9a readme 2025-09-16 01:00:21 +02:00
overcuriousity
baa57bfac2 update the shodan to use only ip 2025-09-16 00:57:24 +02:00
overcuriousity
f0f80be955 finalize pop-out 2025-09-16 00:32:46 +02:00
overcuriousity
ecc143ddbb extraction feature works 2025-09-16 00:08:27 +02:00
overcuriousity
2c48316477 extract from node feature 2025-09-16 00:01:24 +02:00
overcuriousity
fc098aed28 remove cache 2025-09-15 22:48:49 +02:00
9285226cbc Merge pull request 'new-scheduler' (#1) from new-scheduler into main
Reviewed-on: mstoeck3/dnsrecon#1
2025-09-15 20:45:47 +00:00
overcuriousity
350055fcec successfully implemented scheduler 2025-09-15 22:44:38 +02:00
overcuriousity
4a5ecf7a37 new highest-priority-first scheduler 2025-09-15 22:21:17 +02:00
overcuriousity
71b2855d01 fixes to iteration context menu 2025-09-15 21:37:19 +02:00
overcuriousity
93a258170a context menu option 2025-09-15 21:27:21 +02:00
overcuriousity
e2d4e12057 also allow ip lookups in scan 2025-09-15 21:00:57 +02:00
overcuriousity
c076ee028f main page refinement 2025-09-15 20:44:45 +02:00
overcuriousity
cbfac0922a fix node modals 2025-09-15 20:27:43 +02:00
overcuriousity
881f7b74e5 fix graph delete&revert 2025-09-15 20:20:15 +02:00
overcuriousity
c347581a6c fix graph trueRoot 2025-09-15 20:11:40 +02:00
overcuriousity
30ee21f087 revert graph.js refactor 2025-09-15 18:06:11 +02:00
overcuriousity
2496ca26a5 small fixes 2025-09-15 17:52:09 +02:00
overcuriousity
8aa3c4933e fix large entity 2025-09-15 14:12:02 +02:00
overcuriousity
fc326a66c8 fix large entity 2025-09-15 13:58:30 +02:00
overcuriousity
51902e3155 it 2025-09-15 13:35:58 +02:00
overcuriousity
a261d706c8 update style 2025-09-15 01:10:28 +02:00
overcuriousity
2410e689b8 visual enhancements 2025-09-15 00:25:27 +02:00
overcuriousity
62470673fe integrate checkbox filters 2025-09-14 23:54:27 +02:00
overcuriousity
2658bd148b context menu 2025-09-14 23:42:45 +02:00
overcuriousity
f02381910d Merge branch 'main' of https://git.cc24.dev/mstoeck3/dnsrecon 2025-09-14 23:10:16 +02:00
overcuriousity
674ac59c98 iteration 2025-09-14 23:09:38 +02:00
434d1f4803 dump.rdb gelöscht 2025-09-14 20:55:33 +00:00
overcuriousity
eb9eea127b it 2025-09-14 22:37:23 +02:00
overcuriousity
ae07635ab6 update edge labels 2025-09-14 20:50:09 +02:00
overcuriousity
d7adf9ad8b it 2025-09-14 20:22:09 +02:00
overcuriousity
39ce0e9d11 great progress 2025-09-14 19:12:12 +02:00
overcuriousity
926f9e1096 fixes 2025-09-14 19:06:20 +02:00
overcuriousity
9499e62ccc it 2025-09-14 18:45:02 +02:00
overcuriousity
89ae06482e it 2025-09-14 18:02:15 +02:00
overcuriousity
7fe7ca41ba it 2025-09-14 17:40:18 +02:00
overcuriousity
949fbdbb45 itteration 2025-09-14 17:18:56 +02:00
overcuriousity
689e8c00d4 unify config 2025-09-14 16:17:26 +02:00
overcuriousity
3511f18f9a it 2025-09-14 16:07:58 +02:00
overcuriousity
72f7056bc7 it 2025-09-14 15:31:18 +02:00
overcuriousity
2ae33bc5ba it 2025-09-14 15:00:00 +02:00
overcuriousity
c91913fa13 it 2025-09-14 14:28:04 +02:00
overcuriousity
2185177a84 it 2025-09-14 01:21:38 +02:00
overcuriousity
b7a57f1552 it 2025-09-13 23:45:36 +02:00
overcuriousity
41d556e2ce node src dest display 2025-09-13 21:17:04 +02:00
overcuriousity
2974312278 data model refinement 2025-09-13 21:10:27 +02:00
overcuriousity
930fdca500 modularize, shodan qs 2025-09-13 17:14:16 +02:00
overcuriousity
2925512a4d it 2025-09-13 16:27:31 +02:00
overcuriousity
717f103596 fix large entity 2025-09-13 16:09:10 +02:00
overcuriousity
612f414d2a fix large entity 2025-09-13 15:38:05 +02:00
overcuriousity
53baf2e291 it 2025-09-13 11:52:22 +02:00
overcuriousity
84810cdbb0 retreived scanner 2025-09-13 00:42:12 +02:00
overcuriousity
d36fb7d814 fix? 2025-09-13 00:39:00 +02:00
overcuriousity
c0b820c96c fix attempt 2025-09-13 00:03:21 +02:00
overcuriousity
03c52abd1b it 2025-09-12 23:54:06 +02:00
overcuriousity
2d62191aa0 fix attempt 2025-09-12 14:57:09 +02:00
overcuriousity
d2e4c6ee49 fix attempt 2025-09-12 14:47:12 +02:00
overcuriousity
9e66fd0785 fix attempt 2025-09-12 14:42:13 +02:00
overcuriousity
b250109736 fix attempt 2025-09-12 14:37:10 +02:00
overcuriousity
a535d25714 fix attempt 2025-09-12 14:26:48 +02:00
overcuriousity
4f69cabd41 other fixes for redis 2025-09-12 14:23:33 +02:00
overcuriousity
8b7a0656bb fix session manager 2025-09-12 14:18:55 +02:00
overcuriousity
007ebbfd73 fix for redis 2025-09-12 14:17:11 +02:00
overcuriousity
3ecfca95e6 it 2025-09-12 14:11:09 +02:00
overcuriousity
7e2473b521 prod staging 2025-09-12 11:41:50 +02:00
overcuriousity
f445187025 it 2025-09-12 10:08:03 +02:00
overcuriousity
df4e1703c4 it 2025-09-11 22:15:08 +02:00
overcuriousity
646b569ced it 2025-09-11 21:38:04 +02:00
overcuriousity
b47e679992 it 2025-09-11 20:37:43 +02:00
28 changed files with 9558 additions and 4604 deletions

34
.env.example Normal file
View File

@@ -0,0 +1,34 @@
# ===============================================
# DNSRecon Environment Variables
# ===============================================
# Copy this file to .env and fill in your values.
# --- API Keys ---
# Add your Shodan API key for the Shodan provider to be enabled.
SHODAN_API_KEY=
# --- Flask & Session Settings ---
# A strong, random secret key is crucial for session security.
FLASK_SECRET_KEY=your-very-secret-and-random-key-here
FLASK_HOST=127.0.0.1
FLASK_PORT=5000
FLASK_DEBUG=True
# How long a user's session in the browser lasts (in hours).
FLASK_PERMANENT_SESSION_LIFETIME_HOURS=2
# How long inactive scanner data is stored in Redis (in minutes).
SESSION_TIMEOUT_MINUTES=60
# --- Application Core Settings ---
# The default number of levels to recurse when scanning.
DEFAULT_RECURSION_DEPTH=2
# Default timeout for provider API requests in seconds.
DEFAULT_TIMEOUT=30
# The number of concurrent provider requests to make.
MAX_CONCURRENT_REQUESTS=1
# The number of results from a provider that triggers the "large entity" grouping.
LARGE_ENTITY_THRESHOLD=100
# The number of times to retry a target if a provider fails.
MAX_RETRIES_PER_TARGET=8
# How long cached provider responses are stored (in hours).
CACHE_TIMEOUT_HOURS=12

2
.gitignore vendored
View File

@@ -168,3 +168,5 @@ cython_debug/
# option (not recommended) you can uncomment the following to ignore the entire idea folder. # option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/ #.idea/
dump.rdb
cache/

458
README.md
View File

@@ -1,273 +1,259 @@
# DNSRecon - Passive Infrastructure Reconnaissance Tool # DNSRecon - Passive Infrastructure Reconnaissance Tool
DNSRecon is an interactive, passive reconnaissance tool designed to map adversary infrastructure. It operates on a "free-by-default" model, ensuring core functionality without subscriptions, while allowing power users to enhance its capabilities with paid API keys. DNSRecon is an interactive, passive reconnaissance tool designed to map adversary infrastructure. It operates on a "free-by-default" model, ensuring core functionality without subscriptions, while allowing power users to enhance its capabilities with paid API keys. It is aimed at cybersecurity researchers, pentesters, and administrators who want to understand the public footprint of a target domain.
**Current Status: Phase 1 Implementation** **Repo Link:** [https://git.cc24.dev/mstoeck3/dnsrecon](https://git.cc24.dev/mstoeck3/dnsrecon)
- ✅ Core infrastructure and graph engine
- ✅ Certificate transparency data provider (crt.sh) -----
- ✅ Basic web interface with real-time visualization
- ✅ Forensic logging system ## Concept and Philosophy
- ✅ JSON export functionality
The core philosophy of DNSRecon is to provide a comprehensive and accurate map of a target's infrastructure using only **passive data sources** by default. This means that, out of the box, DNSRecon will not send any traffic to the target's servers. Instead, it queries public and historical data sources to build a picture of the target's online presence. This approach is ideal for researchers and pentesters who want to gather intelligence without alerting the target, and for administrators who want to see what information about their own infrastructure is publicly available.
For power users who require more in-depth information, DNSRecon can be configured to use API keys for services like Shodan, which provides a wealth of information about internet-connected devices. However, this is an optional feature, and the core functionality of the tool will always remain free and passive.
-----
## Features ## Features
### Core Capabilities * **Passive Reconnaissance**: Gathers data without direct contact with target infrastructure.
- **Zero Contact Reconnaissance**: Passive data gathering without touching target infrastructure * **In-Memory Graph Analysis**: Uses NetworkX for efficient relationship mapping.
- **In-Memory Graph Analysis**: Uses NetworkX for efficient relationship mapping * **Real-Time Visualization**: The graph updates dynamically as the scan progresses.
- **Real-Time Visualization**: Interactive graph updates during scanning * **Forensic Logging**: A complete audit trail of all reconnaissance activities is maintained.
- **Forensic Logging**: Complete audit trail of all reconnaissance activities * **Confidence Scoring**: Relationships are weighted based on the reliability of the data source.
- **Confidence Scoring**: Weighted relationships based on data source reliability * **Session Management**: Supports concurrent user sessions with isolated scanner instances.
* **Extensible Provider Architecture**: Easily add new data sources to expand the tool's capabilities.
* **Web-Based UI**: An intuitive and interactive web interface for managing scans and visualizing results.
### Data Sources (Phase 1) -----
- **Certificate Transparency (crt.sh)**: Discovers domain relationships through SSL certificate SAN analysis
- **Basic DNS Resolution**: A/AAAA record lookups for IP relationships
### Visualization ## Technical Architecture
- **Interactive Network Graph**: Powered by vis.js with cybersecurity theme
- **Node Types**: Domains, IP addresses, certificates, ASNs
- **Confidence-Based Styling**: Visual indicators for relationship strength
- **Real-Time Updates**: Graph builds dynamically as relationships are discovered
## Installation DNSRecon is a web-based application built with a modern technology stack:
* **Backend**: The backend is a **Flask** application that provides a REST API for the frontend and manages the scanning process.
* **Scanning Engine**: The core scanning engine is a multi-threaded Python application that uses a provider-based architecture to query different data sources.
* **Session Management**: **Redis** is used for session management, allowing for concurrent user sessions with isolated scanner instances.
* **Data Storage**: The application uses an in-memory graph to store and analyze the relationships between different pieces of information. The graph is built using the **NetworkX** library.
* **Frontend**: The frontend is a single-page application that uses JavaScript to interact with the backend API and visualize the graph.
-----
## Data Sources
DNSRecon queries the following data sources:
* **DNS**: Standard DNS lookups (A, AAAA, CNAME, MX, NS, SOA, TXT).
* **crt.sh**: A certificate transparency log that provides information about SSL/TLS certificates.
* **Shodan**: A search engine for internet-connected devices (requires an API key).
-----
## Installation and Setup
### Prerequisites ### Prerequisites
- Python 3.8 or higher
- Modern web browser with JavaScript enabled
### Setup * Python 3.8 or higher
1. **Clone or create the project directory**: * A modern web browser with JavaScript enabled
```bash * A Linux host for running the application
mkdir dnsrecon
cd dnsrecon
```
2. **Install Python dependencies**: ### 1\. Clone the Project
```bash
pip install -r requirements.txt
```
3. **Verify the directory structure**:
```
dnsrecon/
├── app.py
├── config.py
├── requirements.txt
├── core/
│ ├── __init__.py
│ ├── graph_manager.py
│ ├── scanner.py
│ └── logger.py
├── providers/
│ ├── __init__.py
│ ├── base_provider.py
│ └── crtsh_provider.py
├── static/
│ ├── css/
│ │ └── main.css
│ └── js/
│ ├── graph.js
│ └── main.js
└── templates/
└── index.html
```
## Usage
### Starting the Application
1. **Run the Flask application**:
```bash
python app.py
```
2. **Open your web browser** and navigate to:
```
http://127.0.0.1:5000
```
### Basic Reconnaissance Workflow
1. **Enter Target Domain**: Input the domain you want to investigate (e.g., `example.com`)
2. **Select Recursion Depth**:
- **Depth 1**: Direct relationships only
- **Depth 2**: Recommended for most investigations
- **Depth 3+**: Extended analysis for comprehensive mapping
3. **Start Reconnaissance**: Click "Start Reconnaissance" to begin passive data gathering
4. **Monitor Progress**: Watch the real-time graph build as relationships are discovered
5. **Analyze Results**: Interact with the graph to explore relationships and click nodes for detailed information
6. **Export Data**: Download complete results including graph data and forensic audit trail
### Understanding the Visualization
#### Node Types
- 🟢 **Green Circles**: Domain names
- 🟠 **Orange Squares**: IP addresses
- ⚪ **Gray Diamonds**: SSL certificates
- 🔵 **Blue Triangles**: ASN (Autonomous System) information
#### Edge Confidence
- **Thick Green Lines**: High confidence (≥80%) - Certificate SAN relationships
- **Medium Orange Lines**: Medium confidence (60-79%) - DNS record relationships
- **Thin Gray Lines**: Lower confidence (<60%) - Passive DNS or uncertain relationships
### Example Investigation
Let's investigate `github.com`:
1. Enter `github.com` as the target domain
2. Set recursion depth to 2
3. Start the scan
4. Observe relationships to other GitHub domains discovered through certificate analysis
5. Export results for further analysis
Expected discoveries might include:
- `*.github.com` domains through certificate SANs
- `github.io` and related domains
- Associated IP addresses
- Certificate authority relationships
## Configuration
### Environment Variables
You can configure DNSRecon using environment variables:
```bash ```bash
# API keys for future providers (Phase 2) git clone https://git.cc24.dev/mstoeck3/dnsrecon
export VIRUSTOTAL_API_KEY="your_api_key_here" cd dnsrecon
export SHODAN_API_KEY="your_api_key_here"
# Application settings
export DEFAULT_RECURSION_DEPTH=2
export FLASK_DEBUG=False
``` ```
### Rate Limiting ### 2\. Install Python Dependencies
DNSRecon includes built-in rate limiting to be respectful to data sources:
- **crt.sh**: 60 requests per minute
- **DNS queries**: 100 requests per minute
## Data Export Format It is highly recommended to use a virtual environment:
```bash
python3 -m venv venv
source venv/bin/activate
pip install -r requirements.txt
```
The `requirements.txt` file contains the following dependencies:
* Flask
* networkx
* requests
* python-dateutil
* Werkzeug
* urllib3
* dnspython
* gunicorn
* redis
* python-dotenv
### 3\. Configure the Application
DNSRecon is configured using a `.env` file. You can copy the provided example file and edit it to suit your needs:
```bash
cp .env.example .env
```
The following environment variables are available for configuration:
| Variable | Description | Default |
| :--- | :--- | :--- |
| `SHODAN_API_KEY` | Your Shodan API key. | |
| `FLASK_SECRET_KEY`| A strong, random secret key for session security. | `your-very-secret-and-random-key-here` |
| `FLASK_HOST` | The host address for the Flask application. | `127.0.0.1` |
| `FLASK_PORT` | The port for the Flask application. | `5000` |
| `FLASK_DEBUG` | Enable or disable Flask's debug mode. | `True` |
| `FLASK_PERMANENT_SESSION_LIFETIME_HOURS`| How long a user's session in the browser lasts (in hours). | `2` |
| `SESSION_TIMEOUT_MINUTES` | How long inactive scanner data is stored in Redis (in minutes). | `60` |
| `DEFAULT_RECURSION_DEPTH` | The default number of levels to recurse when scanning. | `2` |
| `DEFAULT_TIMEOUT` | Default timeout for provider API requests in seconds. | `30` |
| `MAX_CONCURRENT_REQUESTS`| The number of concurrent provider requests to make. | `5` |
| `LARGE_ENTITY_THRESHOLD`| The number of results from a provider that triggers the "large entity" grouping. | `100` |
| `MAX_RETRIES_PER_TARGET`| The number of times to retry a target if a provider fails. | `8` |
| `CACHE_EXPIRY_HOURS`| How long cached provider responses are stored (in hours). | `12` |
-----
## Running the Application
For development, you can run the application using the following command:
Results are exported as JSON with the following structure:
```json
{
"scan_metadata": {
"target_domain": "example.com",
"max_depth": 2,
"final_status": "completed"
},
"graph_data": {
"nodes": [...],
"edges": [...]
},
"forensic_audit": {
"session_metadata": {...},
"api_requests": [...],
"relationships": [...]
},
"provider_statistics": {...}
}
```
## Forensic Integrity
DNSRecon maintains complete forensic integrity:
- **API Request Logging**: Every external request is logged with timestamps, URLs, and responses
- **Relationship Provenance**: Each discovered relationship includes source provider and discovery method
- **Session Tracking**: Unique session IDs for investigation continuity
- **Confidence Metadata**: Scoring rationale for all relationships
- **Export Integrity**: Complete audit trail included in all exports
## Architecture Overview
### Core Components
- **GraphManager**: NetworkX-based in-memory graph with confidence scoring
- **Scanner**: Multi-provider orchestration with depth-limited BFS exploration
- **ForensicLogger**: Thread-safe audit trail with structured logging
- **BaseProvider**: Abstract interface for data source plugins
### Data Flow
1. User initiates scan via web interface
2. Scanner coordinates multiple data providers
3. Relationships discovered and added to in-memory graph
4. Real-time updates sent to web interface
5. Graph visualization updates dynamically
6. Complete audit trail maintained throughout
## Troubleshooting
### Common Issues
**Graph not displaying**:
- Ensure JavaScript is enabled in your browser
- Check browser console for errors
- Verify vis.js library is loading correctly
**Scan fails to start**:
- Check target domain is valid
- Ensure crt.sh is accessible from your network
- Review Flask console output for errors
**No relationships discovered**:
- Some domains may have limited certificate transparency data
- Try a well-known domain like `google.com` to verify functionality
- Check provider status in the interface
### Debug Mode
Enable debug mode for verbose logging:
```bash ```bash
export FLASK_DEBUG=True
python app.py python app.py
``` ```
## Development Roadmap For production, it is recommended to use a more robust server, such as Gunicorn:
### Phase 2 (Planned) ```bash
- Multi-provider system with Shodan and VirusTotal integration gunicorn --workers 4 --bind 0.0.0.0:5000 app:app
- Real-time scanning with enhanced visualization ```
- Provider health monitoring and failure recovery
### Phase 3 (Planned) -----
- Advanced correlation algorithms
- Enhanced forensic reporting
- Performance optimization for large investigations
## Security Considerations ## Systemd Service
- **No Persistent Storage**: All data stored in memory only To run DNSRecon as a service that starts automatically on boot, you can use `systemd`.
- **API Keys**: Stored in memory only, never written to disk
- **Rate Limiting**: Prevents abuse of external services
- **Local Use Only**: No authentication required (designed for local use)
## Contributing ### 1\. Create a `.service` file
DNSRecon follows a phased development approach. Currently in Phase 1 with core infrastructure completed. Create a new service file in `/etc/systemd/system/`:
### Code Quality Standards ```bash
- Follow PEP 8 for Python code sudo nano /etc/systemd/system/dnsrecon.service
- Comprehensive docstrings for all functions ```
- Type hints where appropriate
- Forensic logging for all external interactions ### 2\. Add the Service Configuration
Paste the following configuration into the file. **Remember to replace `/path/to/your/dnsrecon` and `your_user` with your actual project path and username.**
```ini
[Unit]
Description=DNSRecon Application
After=network.target
[Service]
User=your_user
Group=your_user
WorkingDirectory=/path/to/your/dnsrecon
ExecStart=/path/to/your/dnsrecon/venv/bin/gunicorn --workers 4 --bind 0.0.0.0:5000 app:app
Restart=always
Environment="SECRET_KEY=your-super-secret-and-random-key"
Environment="FLASK_ENV=production"
Environment="FLASK_DEBUG=False"
Environment="SHODAN_API_KEY=your_shodan_key"
[Install]
WantedBy=multi-user.target
```
### 3\. Enable and Start the Service
Reload the `systemd` daemon, enable the service to start on boot, and then start it immediately:
```bash
sudo systemctl daemon-reload
sudo systemctl enable dnsrecon.service
sudo systemctl start dnsrecon.service
```
You can check the status of the service at any time with:
```bash
sudo systemctl status dnsrecon.service
```
-----
## Updating the Application
To update the application, you should first pull the latest changes from the git repository. Then, you will need to wipe the Redis database and the local cache to ensure that you are using the latest data.
### 1\. Update the Code
```bash
git pull
```
### 2\. Wipe the Redis Database
```bash
redis-cli FLUSHALL
```
### 3\. Wipe the Local Cache
```bash
rm -rf cache/*
```
### 4\. Restart the Service
```bash
sudo systemctl restart dnsrecon.service
```
-----
## Extensibility
DNSRecon is designed to be extensible, and adding new providers is a straightforward process. To add a new provider, you will need to create a new Python file in the `providers` directory that inherits from the `BaseProvider` class. The new provider will need to implement the following methods:
* `get_name()`: Return the name of the provider.
* `get_display_name()`: Return a display-friendly name for the provider.
* `requires_api_key()`: Return `True` if the provider requires an API key.
* `get_eligibility()`: Return a dictionary indicating whether the provider can query domains and/or IPs.
* `is_available()`: Return `True` if the provider is available (e.g., if an API key is configured).
* `query_domain(domain)`: Query the provider for information about a domain.
* `query_ip(ip)`: Query the provider for information about an IP address.
-----
## Unique Capabilities and Limitations
### Unique Capabilities
* **Graph-Based Analysis**: The use of a graph-based data model allows for a more intuitive and powerful analysis of the relationships between different pieces of information.
* **Real-Time Visualization**: The real-time visualization of the graph provides immediate feedback and allows for a more interactive and engaging analysis experience.
* **Session Management**: The session management feature allows multiple users to use the application concurrently without interfering with each other's work.
### Limitations
* **Passive-Only by Default**: While the passive-only approach is a key feature of the tool, it also means that the information it can gather is limited to what is publicly available.
* **No Active Scanning**: The tool does not perform any active scanning, such as port scanning or vulnerability scanning.
-----
## License ## License
This project is intended for legitimate security research and infrastructure analysis. Users are responsible for compliance with applicable laws and regulations. This project is licensed under the terms of the **BSD-3-Clause** license.
## Support Copyright (c) 2025 mstoeck3.
For issues and questions: Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Check the troubleshooting section above
2. Review the Flask console output for error details
3. Ensure all dependencies are properly installed
--- 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
**DNSRecon v1.0 - Phase 1 Implementation** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*Passive Infrastructure Reconnaissance for Security Professionals*

869
app.py

File diff suppressed because it is too large Load Diff

169
config.py
View File

@@ -1,3 +1,5 @@
# dnsrecon-reduced/config.py
""" """
Configuration management for DNSRecon tool. Configuration management for DNSRecon tool.
Handles API key storage, rate limiting, and default settings. Handles API key storage, rate limiting, and default settings.
@@ -5,116 +7,151 @@ Handles API key storage, rate limiting, and default settings.
import os import os
from typing import Dict, Optional from typing import Dict, Optional
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
class Config: class Config:
"""Configuration manager for DNSRecon application.""" """Configuration manager for DNSRecon application."""
def __init__(self): def __init__(self):
"""Initialize configuration with default values.""" """Initialize configuration with default values."""
self.api_keys: Dict[str, Optional[str]] = { self.api_keys: Dict[str, Optional[str]] = {}
'shodan': None,
'virustotal': None
}
# Default settings # --- General Settings ---
self.default_recursion_depth = 2 self.default_recursion_depth = 2
self.default_timeout = 30 self.default_timeout = 60
self.max_concurrent_requests = 5 self.max_concurrent_requests = 1
self.large_entity_threshold = 100 self.large_entity_threshold = 100
self.max_retries_per_target = 8
# Rate limiting settings (requests per minute) # --- Provider Caching Settings ---
self.cache_timeout_hours = 6 # Provider-specific cache timeout
# --- Rate Limiting (requests per minute) ---
self.rate_limits = { self.rate_limits = {
'crtsh': 60, # Free service, be respectful 'crtsh': 5,
'virustotal': 4, # Free tier limit 'shodan': 60,
'shodan': 60, # API dependent 'dns': 100,
'dns': 100 # Local DNS queries 'correlation': 0 # Set to 0 to make sure correlations run last
} }
# Provider settings # --- Provider Settings ---
self.enabled_providers = { self.enabled_providers = {
'crtsh': True, # Always enabled (free) 'crtsh': True,
'dns': True, # Always enabled (free) 'dns': True,
'virustotal': False, # Requires API key 'shodan': False,
'shodan': False # Requires API key 'correlation': True # Enable the new provider by default
} }
# Logging configuration # --- Logging ---
self.log_level = 'INFO' self.log_level = 'INFO'
self.log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' self.log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
# Flask configuration # --- Flask & Session Settings ---
self.flask_host = '127.0.0.1' self.flask_host = '127.0.0.1'
self.flask_port = 5000 self.flask_port = 5000
self.flask_debug = True self.flask_debug = True
self.flask_secret_key = 'default-secret-key-change-me'
self.flask_permanent_session_lifetime_hours = 2
self.session_timeout_minutes = 60
def set_api_key(self, provider: str, api_key: str) -> bool: # Load environment variables to override defaults
self.load_from_env()
def load_from_env(self):
"""Load configuration from environment variables."""
self.set_api_key('shodan', os.getenv('SHODAN_API_KEY'))
# Override settings from environment
self.default_recursion_depth = int(os.getenv('DEFAULT_RECURSION_DEPTH', self.default_recursion_depth))
self.default_timeout = int(os.getenv('DEFAULT_TIMEOUT', self.default_timeout))
self.max_concurrent_requests = int(os.getenv('MAX_CONCURRENT_REQUESTS', self.max_concurrent_requests))
self.large_entity_threshold = int(os.getenv('LARGE_ENTITY_THRESHOLD', self.large_entity_threshold))
self.max_retries_per_target = int(os.getenv('MAX_RETRIES_PER_TARGET', self.max_retries_per_target))
self.cache_timeout_hours = int(os.getenv('CACHE_TIMEOUT_HOURS', self.cache_timeout_hours))
# Override Flask and session settings
self.flask_host = os.getenv('FLASK_HOST', self.flask_host)
self.flask_port = int(os.getenv('FLASK_PORT', self.flask_port))
self.flask_debug = os.getenv('FLASK_DEBUG', str(self.flask_debug)).lower() == 'true'
self.flask_secret_key = os.getenv('FLASK_SECRET_KEY', self.flask_secret_key)
self.flask_permanent_session_lifetime_hours = int(os.getenv('FLASK_PERMANENT_SESSION_LIFETIME_HOURS', self.flask_permanent_session_lifetime_hours))
self.session_timeout_minutes = int(os.getenv('SESSION_TIMEOUT_MINUTES', self.session_timeout_minutes))
def set_api_key(self, provider: str, api_key: Optional[str]) -> bool:
"""Set API key for a provider."""
self.api_keys[provider] = api_key
if api_key:
self.enabled_providers[provider] = True
return True
def set_provider_enabled(self, provider: str, enabled: bool) -> bool:
""" """
Set API key for a provider. Set provider enabled status for the session.
Args: Args:
provider: Provider name (shodan, virustotal) provider: Provider name
api_key: API key string enabled: Whether the provider should be enabled
Returns: Returns:
bool: True if key was set successfully True if the setting was applied successfully
""" """
if provider in self.api_keys: provider_key = provider.lower()
self.api_keys[provider] = api_key self.enabled_providers[provider_key] = enabled
self.enabled_providers[provider] = True if api_key else False return True
return True
return False def get_provider_enabled(self, provider: str) -> bool:
def get_api_key(self, provider: str) -> Optional[str]:
""" """
Get API key for a provider. Get provider enabled status.
Args: Args:
provider: Provider name provider: Provider name
Returns: Returns:
API key or None if not set True if the provider is enabled
""" """
provider_key = provider.lower()
return self.enabled_providers.get(provider_key, True) # Default to enabled
def bulk_set_provider_settings(self, provider_settings: dict) -> dict:
"""
Set multiple provider settings at once.
Args:
provider_settings: Dict of provider_name -> {'enabled': bool, ...}
Returns:
Dict with results for each provider
"""
results = {}
for provider_name, settings in provider_settings.items():
provider_key = provider_name.lower()
try:
if 'enabled' in settings:
self.enabled_providers[provider_key] = settings['enabled']
results[provider_key] = {'success': True, 'enabled': settings['enabled']}
else:
results[provider_key] = {'success': False, 'error': 'No enabled setting provided'}
except Exception as e:
results[provider_key] = {'success': False, 'error': str(e)}
return results
def get_api_key(self, provider: str) -> Optional[str]:
"""Get API key for a provider."""
return self.api_keys.get(provider) return self.api_keys.get(provider)
def is_provider_enabled(self, provider: str) -> bool: def is_provider_enabled(self, provider: str) -> bool:
""" """Check if a provider is enabled."""
Check if a provider is enabled.
Args:
provider: Provider name
Returns:
bool: True if provider is enabled
"""
return self.enabled_providers.get(provider, False) return self.enabled_providers.get(provider, False)
def get_rate_limit(self, provider: str) -> int: def get_rate_limit(self, provider: str) -> int:
""" """Get rate limit for a provider."""
Get rate limit for a provider.
Args:
provider: Provider name
Returns:
Rate limit in requests per minute
"""
return self.rate_limits.get(provider, 60) return self.rate_limits.get(provider, 60)
def load_from_env(self):
"""Load configuration from environment variables."""
if os.getenv('VIRUSTOTAL_API_KEY'):
self.set_api_key('virustotal', os.getenv('VIRUSTOTAL_API_KEY'))
if os.getenv('SHODAN_API_KEY'):
self.set_api_key('shodan', os.getenv('SHODAN_API_KEY'))
# Override default settings from environment
self.default_recursion_depth = int(os.getenv('DEFAULT_RECURSION_DEPTH', '2'))
self.flask_debug = os.getenv('FLASK_DEBUG', 'True').lower() == 'true'
self.default_timeout = 30
self.max_concurrent_requests = 5
# Global configuration instance # Global configuration instance
config = Config() config = Config()

View File

@@ -1,28 +1,25 @@
""" """
Core modules for DNSRecon passive reconnaissance tool. Core modules for DNSRecon passive reconnaissance tool.
Contains graph management, scanning orchestration, and forensic logging. Contains graph management, scanning orchestration, and forensic logging.
Phase 2: Enhanced with concurrent processing and real-time capabilities.
""" """
from .graph_manager import GraphManager, NodeType, RelationshipType from .graph_manager import GraphManager, NodeType
from .scanner import Scanner, ScanStatus # Remove 'scanner' global instance from .scanner import Scanner, ScanStatus
from .logger import ForensicLogger, get_forensic_logger, new_session from .logger import ForensicLogger, get_forensic_logger, new_session
from .session_manager import session_manager # Add session manager from .session_manager import session_manager
from .session_config import SessionConfig, create_session_config # Add session config from .session_config import SessionConfig, create_session_config
__all__ = [ __all__ = [
'GraphManager', 'GraphManager',
'NodeType', 'NodeType',
'RelationshipType',
'Scanner', 'Scanner',
'ScanStatus', 'ScanStatus',
# 'scanner', # Remove this - no more global scanner
'ForensicLogger', 'ForensicLogger',
'get_forensic_logger', 'get_forensic_logger',
'new_session', 'new_session',
'session_manager', # Add this 'session_manager',
'SessionConfig', # Add this 'SessionConfig',
'create_session_config' # Add this 'create_session_config'
] ]
__version__ = "1.0.0-phase2" __version__ = "1.0.0-phase2"

View File

@@ -1,12 +1,15 @@
# dnsrecon-reduced/core/graph_manager.py
""" """
Graph data model for DNSRecon using NetworkX. Graph data model for DNSRecon using NetworkX.
Manages in-memory graph storage with confidence scoring and forensic metadata. Manages in-memory graph storage with confidence scoring and forensic metadata.
Now fully compatible with the unified ProviderResult data model.
FIXED: Added proper pickle support to prevent weakref serialization errors.
""" """
import re
from datetime import datetime from datetime import datetime, timezone
from typing import Dict, List, Any, Optional, Tuple
from enum import Enum from enum import Enum
from datetime import timezone from typing import Dict, List, Any, Optional, Tuple
import networkx as nx import networkx as nx
@@ -15,132 +18,161 @@ class NodeType(Enum):
"""Enumeration of supported node types.""" """Enumeration of supported node types."""
DOMAIN = "domain" DOMAIN = "domain"
IP = "ip" IP = "ip"
ASN = "asn" ISP = "isp"
DNS_RECORD = "dns_record" CA = "ca"
LARGE_ENTITY = "large_entity" LARGE_ENTITY = "large_entity"
CORRELATION_OBJECT = "correlation_object"
def __repr__(self):
class RelationshipType(Enum): return self.value
"""Enumeration of supported relationship types with confidence scores."""
SAN_CERTIFICATE = ("san", 0.9)
A_RECORD = ("a_record", 0.8)
AAAA_RECORD = ("aaaa_record", 0.8)
CNAME_RECORD = ("cname", 0.8)
MX_RECORD = ("mx_record", 0.7)
NS_RECORD = ("ns_record", 0.7)
PTR_RECORD = ("ptr_record", 0.8)
SOA_RECORD = ("soa_record", 0.7)
TXT_RECORD = ("txt_record", 0.7)
SRV_RECORD = ("srv_record", 0.7)
CAA_RECORD = ("caa_record", 0.7)
DNSKEY_RECORD = ("dnskey_record", 0.7)
DS_RECORD = ("ds_record", 0.7)
RRSIG_RECORD = ("rrsig_record", 0.7)
SSHFP_RECORD = ("sshfp_record", 0.7)
TLSA_RECORD = ("tlsa_record", 0.7)
NAPTR_RECORD = ("naptr_record", 0.7)
SPF_RECORD = ("spf_record", 0.7)
DNS_RECORD = ("dns_record", 0.8)
PASSIVE_DNS = ("passive_dns", 0.6)
ASN_MEMBERSHIP = ("asn", 0.7)
def __init__(self, relationship_name: str, default_confidence: float):
self.relationship_name = relationship_name
self.default_confidence = default_confidence
class GraphManager: class GraphManager:
""" """
Thread-safe graph manager for DNSRecon infrastructure mapping. Thread-safe graph manager for DNSRecon infrastructure mapping.
Uses NetworkX for in-memory graph storage with confidence scoring. Uses NetworkX for in-memory graph storage with confidence scoring.
Compatible with unified ProviderResult data model.
FIXED: Added proper pickle support to handle NetworkX graph serialization.
""" """
def __init__(self): def __init__(self):
"""Initialize empty directed graph.""" """Initialize empty directed graph."""
self.graph = nx.DiGraph() self.graph = nx.DiGraph()
# self.lock = threading.Lock()
self.creation_time = datetime.now(timezone.utc).isoformat() self.creation_time = datetime.now(timezone.utc).isoformat()
self.last_modified = self.creation_time self.last_modified = self.creation_time
def add_node(self, node_id: str, node_type: NodeType, def __getstate__(self):
metadata: Optional[Dict[str, Any]] = None) -> bool: """Prepare GraphManager for pickling by converting NetworkX graph to serializable format."""
""" state = self.__dict__.copy()
Add a node to the graph.
# Convert NetworkX graph to a serializable format
if hasattr(self, 'graph') and self.graph:
# Extract all nodes with their data
nodes_data = {}
for node_id, attrs in self.graph.nodes(data=True):
nodes_data[node_id] = dict(attrs)
# Extract all edges with their data
edges_data = []
for source, target, attrs in self.graph.edges(data=True):
edges_data.append({
'source': source,
'target': target,
'attributes': dict(attrs)
})
# Replace the NetworkX graph with serializable data
state['_graph_nodes'] = nodes_data
state['_graph_edges'] = edges_data
del state['graph']
return state
Args: def __setstate__(self, state):
node_id: Unique identifier for the node """Restore GraphManager after unpickling by reconstructing NetworkX graph."""
node_type: Type of the node (Domain, IP, Certificate, ASN) # Restore basic attributes
metadata: Additional metadata for the node self.__dict__.update(state)
Returns: # Reconstruct NetworkX graph from serializable data
bool: True if node was added, False if it already exists self.graph = nx.DiGraph()
# Restore nodes
if hasattr(self, '_graph_nodes'):
for node_id, attrs in self._graph_nodes.items():
self.graph.add_node(node_id, **attrs)
del self._graph_nodes
# Restore edges
if hasattr(self, '_graph_edges'):
for edge_data in self._graph_edges:
self.graph.add_edge(
edge_data['source'],
edge_data['target'],
**edge_data['attributes']
)
del self._graph_edges
def add_node(self, node_id: str, node_type: NodeType, attributes: Optional[List[Dict[str, Any]]] = None,
description: str = "", metadata: Optional[Dict[str, Any]] = None) -> bool:
""" """
if self.graph.has_node(node_id): Add a node to the graph, update attributes, and process correlations.
# Update metadata if node exists Now compatible with unified data model - attributes are dictionaries from converted StandardAttribute objects.
existing_metadata = self.graph.nodes[node_id].get('metadata', {}) """
is_new_node = not self.graph.has_node(node_id)
if is_new_node:
self.graph.add_node(node_id, type=node_type.value,
added_timestamp=datetime.now(timezone.utc).isoformat(),
attributes=attributes or [], # Store as a list from the start
description=description,
metadata=metadata or {})
else:
# Safely merge new attributes into the existing list of attributes
if attributes:
existing_attributes = self.graph.nodes[node_id].get('attributes', [])
# Handle cases where old data might still be in dictionary format
if not isinstance(existing_attributes, list):
existing_attributes = []
# Create a set of existing attribute names for efficient duplicate checking
existing_attr_names = {attr['name'] for attr in existing_attributes}
for new_attr in attributes:
if new_attr['name'] not in existing_attr_names:
existing_attributes.append(new_attr)
existing_attr_names.add(new_attr['name'])
self.graph.nodes[node_id]['attributes'] = existing_attributes
if description:
self.graph.nodes[node_id]['description'] = description
if metadata: if metadata:
existing_metadata = self.graph.nodes[node_id].get('metadata', {})
existing_metadata.update(metadata) existing_metadata.update(metadata)
self.graph.nodes[node_id]['metadata'] = existing_metadata self.graph.nodes[node_id]['metadata'] = existing_metadata
self.last_modified = datetime.now(timezone.utc).isoformat()
return is_new_node
def add_edge(self, source_id: str, target_id: str, relationship_type: str,
confidence_score: float = 0.5, source_provider: str = "unknown",
raw_data: Optional[Dict[str, Any]] = None) -> bool:
"""
UPDATED: Add or update an edge between two nodes with raw relationship labels.
"""
if not self.graph.has_node(source_id) or not self.graph.has_node(target_id):
return False return False
node_attributes = { new_confidence = confidence_score
'type': node_type.value,
'added_timestamp': datetime.now(timezone.utc).isoformat(), # UPDATED: Use raw relationship type - no formatting
'metadata': metadata or {} edge_label = relationship_type
}
self.graph.add_node(node_id, **node_attributes)
self.last_modified = datetime.now(timezone.utc).isoformat()
return True
def add_edge(self, source_id: str, target_id: str,
relationship_type: RelationshipType,
confidence_score: Optional[float] = None,
source_provider: str = "unknown",
raw_data: Optional[Dict[str, Any]] = None) -> bool:
"""
Add an edge between two nodes.
Args:
source_id: Source node identifier
target_id: Target node identifier
relationship_type: Type of relationship
confidence_score: Custom confidence score (overrides default)
source_provider: Provider that discovered this relationship
raw_data: Raw data from provider response
Returns:
bool: True if edge was added, False if it already exists
"""
if not self.graph.has_node(source_id) or not self.graph.has_node(target_id):
# If the target node is a subdomain, it should be added.
# The scanner will handle this logic.
pass
# Check if edge already exists
if self.graph.has_edge(source_id, target_id): if self.graph.has_edge(source_id, target_id):
# Update confidence score if new score is higher # If edge exists, update confidence if the new score is higher.
existing_confidence = self.graph.edges[source_id, target_id]['confidence_score'] if new_confidence > self.graph.edges[source_id, target_id].get('confidence_score', 0):
new_confidence = confidence_score or relationship_type.default_confidence
if new_confidence > existing_confidence:
self.graph.edges[source_id, target_id]['confidence_score'] = new_confidence self.graph.edges[source_id, target_id]['confidence_score'] = new_confidence
self.graph.edges[source_id, target_id]['updated_timestamp'] = datetime.now(timezone.utc).isoformat() self.graph.edges[source_id, target_id]['updated_timestamp'] = datetime.now(timezone.utc).isoformat()
self.graph.edges[source_id, target_id]['updated_by'] = source_provider self.graph.edges[source_id, target_id]['updated_by'] = source_provider
return False return False
edge_attributes = { # Add a new edge with raw attributes
'relationship_type': relationship_type.relationship_name, self.graph.add_edge(source_id, target_id,
'confidence_score': confidence_score or relationship_type.default_confidence, relationship_type=edge_label,
'source_provider': source_provider, confidence_score=new_confidence,
'discovery_timestamp': datetime.now(timezone.utc).isoformat(), source_provider=source_provider,
'raw_data': raw_data or {} discovery_timestamp=datetime.now(timezone.utc).isoformat(),
} raw_data=raw_data or {})
self.last_modified = datetime.now(timezone.utc).isoformat()
return True
self.graph.add_edge(source_id, target_id, **edge_attributes) def remove_node(self, node_id: str) -> bool:
"""Remove a node and its connected edges from the graph."""
if not self.graph.has_node(node_id):
return False
# Remove node from the graph (NetworkX handles removing connected edges)
self.graph.remove_node(node_id)
self.last_modified = datetime.now(timezone.utc).isoformat() self.last_modified = datetime.now(timezone.utc).isoformat()
return True return True
@@ -153,266 +185,115 @@ class GraphManager:
return self.graph.number_of_edges() return self.graph.number_of_edges()
def get_nodes_by_type(self, node_type: NodeType) -> List[str]: def get_nodes_by_type(self, node_type: NodeType) -> List[str]:
""" """Get all nodes of a specific type."""
Get all nodes of a specific type. return [n for n, d in self.graph.nodes(data=True) if d.get('type') == node_type.value]
Args:
node_type: Type of nodes to retrieve
Returns:
List of node identifiers
"""
return [
node_id for node_id, attributes in self.graph.nodes(data=True)
if attributes.get('type') == node_type.value
]
def get_neighbors(self, node_id: str) -> List[str]:
"""
Get all neighboring nodes (both incoming and outgoing).
Args:
node_id: Node identifier
Returns:
List of neighboring node identifiers
"""
if not self.graph.has_node(node_id):
return []
predecessors = list(self.graph.predecessors(node_id))
successors = list(self.graph.successors(node_id))
return list(set(predecessors + successors))
def get_high_confidence_edges(self, min_confidence: float = 0.8) -> List[Tuple[str, str, Dict]]: def get_high_confidence_edges(self, min_confidence: float = 0.8) -> List[Tuple[str, str, Dict]]:
""" """Get edges with confidence score above a given threshold."""
Get edges with confidence score above threshold. return [(u, v, d) for u, v, d in self.graph.edges(data=True)
if d.get('confidence_score', 0) >= min_confidence]
Args:
min_confidence: Minimum confidence threshold
Returns:
List of tuples (source, target, attributes)
"""
return [
(source, target, attributes)
for source, target, attributes in self.graph.edges(data=True)
if attributes.get('confidence_score', 0) >= min_confidence
]
def get_graph_data(self) -> Dict[str, Any]: def get_graph_data(self) -> Dict[str, Any]:
""" """
Export graph data for visualization. Export graph data formatted for frontend visualization.
Uses comprehensive metadata collected during scanning. SIMPLIFIED: No certificate styling - frontend handles all visual styling.
""" """
nodes = [] nodes = []
edges = [] for node_id, attrs in self.graph.nodes(data=True):
# Create nodes with the comprehensive metadata already collected
for node_id, attributes in self.graph.nodes(data=True):
node_data = { node_data = {
'id': node_id, 'id': node_id,
'label': node_id, 'label': node_id,
'type': attributes.get('type', 'unknown'), 'type': attrs.get('type', 'unknown'),
'metadata': attributes.get('metadata', {}), 'attributes': attrs.get('attributes', []), # Raw attributes list
'added_timestamp': attributes.get('added_timestamp') 'description': attrs.get('description', ''),
'metadata': attrs.get('metadata', {}),
'added_timestamp': attrs.get('added_timestamp'),
'max_depth_reached': attrs.get('metadata', {}).get('max_depth_reached', False)
} }
# Handle certificate node labeling # Add incoming and outgoing edges to node data
if node_id.startswith('cert_'): if self.graph.has_node(node_id):
# For certificate nodes, create a more informative label node_data['incoming_edges'] = [
cert_metadata = node_data['metadata'] {'from': u, 'data': d} for u, _, d in self.graph.in_edges(node_id, data=True)
issuer = cert_metadata.get('issuer_name', 'Unknown') ]
valid_status = "" if cert_metadata.get('is_currently_valid') else "" node_data['outgoing_edges'] = [
node_data['label'] = f"Certificate {valid_status}\n{issuer[:30]}..." {'to': v, 'data': d} for _, v, d in self.graph.out_edges(node_id, data=True)
]
# Color coding by type
type_colors = {
'domain': {
'background': '#00ff41',
'border': '#00aa2e',
'highlight': {'background': '#44ff75', 'border': '#00ff41'},
'hover': {'background': '#22ff63', 'border': '#00cc35'}
},
'ip': {
'background': '#ff9900',
'border': '#cc7700',
'highlight': {'background': '#ffbb44', 'border': '#ff9900'},
'hover': {'background': '#ffaa22', 'border': '#dd8800'}
},
'asn': {
'background': '#00aaff',
'border': '#0088cc',
'highlight': {'background': '#44ccff', 'border': '#00aaff'},
'hover': {'background': '#22bbff', 'border': '#0099dd'}
},
'dns_record': {
'background': '#9d4edd',
'border': '#7b2cbf',
'highlight': {'background': '#c77dff', 'border': '#9d4edd'},
'hover': {'background': '#b392f0', 'border': '#8b5cf6'}
},
'large_entity': {
'background': '#ff6b6b',
'border': '#cc3a3a',
'highlight': {'background': '#ff8c8c', 'border': '#ff6b6b'},
'hover': {'background': '#ff7a7a', 'border': '#dd4a4a'}
}
}
node_color_config = type_colors.get(attributes.get('type', 'unknown'), type_colors['domain'])
node_data['color'] = node_color_config
# Add certificate validity indicator if available
metadata = node_data['metadata']
if 'certificate_data' in metadata and 'has_valid_cert' in metadata['certificate_data']:
node_data['has_valid_cert'] = metadata['certificate_data']['has_valid_cert']
nodes.append(node_data) nodes.append(node_data)
# Create edges (unchanged from original) edges = []
for source, target, attributes in self.graph.edges(data=True): for source, target, attrs in self.graph.edges(data=True):
edge_data = { edges.append({
'from': source, 'from': source,
'to': target, 'to': target,
'label': attributes.get('relationship_type', ''), 'label': attrs.get('relationship_type', ''),
'confidence_score': attributes.get('confidence_score', 0), 'confidence_score': attrs.get('confidence_score', 0),
'source_provider': attributes.get('source_provider', ''), 'source_provider': attrs.get('source_provider', ''),
'discovery_timestamp': attributes.get('discovery_timestamp') 'discovery_timestamp': attrs.get('discovery_timestamp')
} })
# Enhanced edge styling based on confidence
confidence = attributes.get('confidence_score', 0)
if confidence >= 0.8:
edge_data['color'] = {
'color': '#00ff41',
'highlight': '#44ff75',
'hover': '#22ff63',
'inherit': False
}
edge_data['width'] = 4
elif confidence >= 0.6:
edge_data['color'] = {
'color': '#ff9900',
'highlight': '#ffbb44',
'hover': '#ffaa22',
'inherit': False
}
edge_data['width'] = 3
else:
edge_data['color'] = {
'color': '#666666',
'highlight': '#888888',
'hover': '#777777',
'inherit': False
}
edge_data['width'] = 2
# Add dashed line for low confidence
if confidence < 0.6:
edge_data['dashes'] = [5, 5]
edges.append(edge_data)
return { return {
'nodes': nodes, 'nodes': nodes,
'edges': edges, 'edges': edges,
'statistics': { 'statistics': self.get_statistics()['basic_metrics']
'node_count': len(nodes),
'edge_count': len(edges),
'creation_time': self.creation_time,
'last_modified': self.last_modified
}
} }
def export_json(self) -> Dict[str, Any]:
"""
Export complete graph data as JSON for download.
Returns:
Dictionary containing complete graph data with metadata
"""
# Get basic graph data
graph_data = self.get_graph_data()
# Add comprehensive metadata
export_data = {
'export_metadata': {
'export_timestamp': datetime.now(timezone.utc).isoformat(),
'graph_creation_time': self.creation_time,
'last_modified': self.last_modified,
'total_nodes': self.graph.number_of_nodes(),
'total_edges': self.graph.number_of_edges(),
'graph_format': 'dnsrecon_v1'
},
'nodes': graph_data['nodes'],
'edges': graph_data['edges'],
'node_types': [node_type.value for node_type in NodeType],
'relationship_types': [
{
'name': rel_type.relationship_name,
'default_confidence': rel_type.default_confidence
}
for rel_type in RelationshipType
],
'confidence_distribution': self._get_confidence_distribution()
}
return export_data
def _get_confidence_distribution(self) -> Dict[str, int]: def _get_confidence_distribution(self) -> Dict[str, int]:
"""Get distribution of confidence scores.""" """Get distribution of edge confidence scores with empty graph handling."""
distribution = {'high': 0, 'medium': 0, 'low': 0} distribution = {'high': 0, 'medium': 0, 'low': 0}
for _, _, attributes in self.graph.edges(data=True): # FIXED: Handle empty graph case
confidence = attributes.get('confidence_score', 0) if self.get_edge_count() == 0:
return distribution
for _, _, data in self.graph.edges(data=True):
confidence = data.get('confidence_score', 0)
if confidence >= 0.8: if confidence >= 0.8:
distribution['high'] += 1 distribution['high'] += 1
elif confidence >= 0.6: elif confidence >= 0.6:
distribution['medium'] += 1 distribution['medium'] += 1
else: else:
distribution['low'] += 1 distribution['low'] += 1
return distribution return distribution
def get_statistics(self) -> Dict[str, Any]: def get_statistics(self) -> Dict[str, Any]:
""" """Get comprehensive statistics about the graph with proper empty graph handling."""
Get comprehensive graph statistics.
# FIXED: Handle empty graph case properly
Returns: node_count = self.get_node_count()
Dictionary containing various graph metrics edge_count = self.get_edge_count()
"""
stats = { stats = {
'basic_metrics': { 'basic_metrics': {
'total_nodes': self.graph.number_of_nodes(), 'total_nodes': node_count,
'total_edges': self.graph.number_of_edges(), 'total_edges': edge_count,
'creation_time': self.creation_time, 'creation_time': self.creation_time,
'last_modified': self.last_modified 'last_modified': self.last_modified
}, },
'node_type_distribution': {}, 'node_type_distribution': {},
'relationship_type_distribution': {}, 'relationship_type_distribution': {},
'confidence_distribution': self._get_confidence_distribution(), 'confidence_distribution': self._get_confidence_distribution(),
'provider_distribution': {} 'provider_distribution': {}
} }
# Node type distribution # FIXED: Only calculate distributions if we have data
for node_type in NodeType: if node_count > 0:
count = len(self.get_nodes_by_type(node_type)) # Calculate node type distributions
stats['node_type_distribution'][node_type.value] = count for node_type in NodeType:
count = len(self.get_nodes_by_type(node_type))
# Relationship type distribution if count > 0: # Only include types that exist
for _, _, attributes in self.graph.edges(data=True): stats['node_type_distribution'][node_type.value] = count
rel_type = attributes.get('relationship_type', 'unknown')
stats['relationship_type_distribution'][rel_type] = \ if edge_count > 0:
stats['relationship_type_distribution'].get(rel_type, 0) + 1 # Calculate edge distributions
for _, _, data in self.graph.edges(data=True):
# Provider distribution rel_type = data.get('relationship_type', 'unknown')
for _, _, attributes in self.graph.edges(data=True): stats['relationship_type_distribution'][rel_type] = stats['relationship_type_distribution'].get(rel_type, 0) + 1
provider = attributes.get('source_provider', 'unknown')
stats['provider_distribution'][provider] = \ provider = data.get('source_provider', 'unknown')
stats['provider_distribution'].get(provider, 0) + 1 stats['provider_distribution'][provider] = stats['provider_distribution'].get(provider, 0) + 1
return stats return stats
def clear(self) -> None: def clear(self) -> None:

View File

@@ -1,7 +1,4 @@
""" # dnsrecon/core/logger.py
Forensic logging system for DNSRecon tool.
Provides structured audit trail for all reconnaissance activities.
"""
import logging import logging
import threading import threading
@@ -43,9 +40,10 @@ class ForensicLogger:
""" """
Thread-safe forensic logging system for DNSRecon. Thread-safe forensic logging system for DNSRecon.
Maintains detailed audit trail of all reconnaissance activities. Maintains detailed audit trail of all reconnaissance activities.
FIXED: Enhanced pickle support to prevent weakref issues in logging handlers.
""" """
def __init__(self, session_id: str = None): def __init__(self, session_id: str = ""):
""" """
Initialize forensic logger. Initialize forensic logger.
@@ -53,7 +51,7 @@ class ForensicLogger:
session_id: Unique identifier for this reconnaissance session session_id: Unique identifier for this reconnaissance session
""" """
self.session_id = session_id or self._generate_session_id() self.session_id = session_id or self._generate_session_id()
#self.lock = threading.Lock() self.lock = threading.Lock()
# Initialize audit trail storage # Initialize audit trail storage
self.api_requests: List[APIRequest] = [] self.api_requests: List[APIRequest] = []
@@ -68,21 +66,75 @@ class ForensicLogger:
'target_domains': set() 'target_domains': set()
} }
# Configure standard logger # Configure standard logger with simple setup to avoid weakrefs
self.logger = logging.getLogger(f'dnsrecon.{self.session_id}') self.logger = logging.getLogger(f'dnsrecon.{self.session_id}')
self.logger.setLevel(logging.INFO) self.logger.setLevel(logging.INFO)
# Create formatter for structured logging # Create minimal formatter
formatter = logging.Formatter( formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s' '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
) )
# Add console handler if not already present # Add console handler only if not already present (avoid duplicate handlers)
if not self.logger.handlers: if not self.logger.handlers:
console_handler = logging.StreamHandler() console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter) console_handler.setFormatter(formatter)
self.logger.addHandler(console_handler) self.logger.addHandler(console_handler)
def __getstate__(self):
"""
FIXED: Prepare ForensicLogger for pickling by excluding problematic objects.
"""
state = self.__dict__.copy()
# Remove potentially unpickleable attributes that may contain weakrefs
unpicklable_attrs = ['logger', 'lock']
for attr in unpicklable_attrs:
if attr in state:
del state[attr]
# Convert sets to lists for JSON serialization compatibility
if 'session_metadata' in state:
metadata = state['session_metadata'].copy()
if 'providers_used' in metadata and isinstance(metadata['providers_used'], set):
metadata['providers_used'] = list(metadata['providers_used'])
if 'target_domains' in metadata and isinstance(metadata['target_domains'], set):
metadata['target_domains'] = list(metadata['target_domains'])
state['session_metadata'] = metadata
return state
def __setstate__(self, state):
"""
FIXED: Restore ForensicLogger after unpickling by reconstructing components.
"""
self.__dict__.update(state)
# Re-initialize threading lock
self.lock = threading.Lock()
# Re-initialize logger with minimal setup
self.logger = logging.getLogger(f'dnsrecon.{self.session_id}')
self.logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
# Only add handler if not already present
if not self.logger.handlers:
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
self.logger.addHandler(console_handler)
# Convert lists back to sets if needed
if 'session_metadata' in self.__dict__:
metadata = self.session_metadata
if 'providers_used' in metadata and isinstance(metadata['providers_used'], list):
metadata['providers_used'] = set(metadata['providers_used'])
if 'target_domains' in metadata and isinstance(metadata['target_domains'], list):
metadata['target_domains'] = set(metadata['target_domains'])
def _generate_session_id(self) -> str: def _generate_session_id(self) -> str:
"""Generate unique session identifier.""" """Generate unique session identifier."""
return f"dnsrecon_{datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')}" return f"dnsrecon_{datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')}"
@@ -121,18 +173,23 @@ class ForensicLogger:
discovery_context=discovery_context discovery_context=discovery_context
) )
self.api_requests.append(api_request) with self.lock:
self.session_metadata['total_requests'] += 1 self.api_requests.append(api_request)
self.session_metadata['providers_used'].add(provider) self.session_metadata['total_requests'] += 1
self.session_metadata['providers_used'].add(provider)
if target_indicator:
self.session_metadata['target_domains'].add(target_indicator)
if target_indicator: # Log to standard logger with error handling
self.session_metadata['target_domains'].add(target_indicator) try:
if error:
# Log to standard logger self.logger.error(f"API Request Failed - {provider}: {url}")
if error: else:
self.logger.error(f"API Request Failed - {provider}: {url} - {error}") self.logger.info(f"API Request - {provider}: {url} - Status: {status_code}")
else: except Exception:
self.logger.info(f"API Request - {provider}: {url} - Status: {status_code}") # If logging fails, continue without breaking the application
pass
def log_relationship_discovery(self, source_node: str, target_node: str, def log_relationship_discovery(self, source_node: str, target_node: str,
relationship_type: str, confidence_score: float, relationship_type: str, confidence_score: float,
@@ -161,31 +218,44 @@ class ForensicLogger:
discovery_method=discovery_method discovery_method=discovery_method
) )
self.relationships.append(relationship) with self.lock:
self.session_metadata['total_relationships'] += 1 self.relationships.append(relationship)
self.session_metadata['total_relationships'] += 1
self.logger.info( # Log to standard logger with error handling
f"Relationship Discovered - {source_node} -> {target_node} " try:
f"({relationship_type}) - Confidence: {confidence_score:.2f} - Provider: {provider}" self.logger.info(
) f"Relationship Discovered - {source_node} -> {target_node} "
f"({relationship_type}) - Confidence: {confidence_score:.2f} - Provider: {provider}"
)
except Exception:
# If logging fails, continue without breaking the application
pass
def log_scan_start(self, target_domain: str, recursion_depth: int, def log_scan_start(self, target_domain: str, recursion_depth: int,
enabled_providers: List[str]) -> None: enabled_providers: List[str]) -> None:
"""Log the start of a reconnaissance scan.""" """Log the start of a reconnaissance scan."""
self.logger.info(f"Scan Started - Target: {target_domain}, Depth: {recursion_depth}") try:
self.logger.info(f"Enabled Providers: {', '.join(enabled_providers)}") self.logger.info(f"Scan Started - Target: {target_domain}, Depth: {recursion_depth}")
self.logger.info(f"Enabled Providers: {', '.join(enabled_providers)}")
self.session_metadata['target_domains'].add(target_domain)
with self.lock:
self.session_metadata['target_domains'].add(target_domain)
except Exception:
pass
def log_scan_complete(self) -> None: def log_scan_complete(self) -> None:
"""Log the completion of a reconnaissance scan.""" """Log the completion of a reconnaissance scan."""
self.session_metadata['end_time'] = datetime.now(timezone.utc).isoformat() with self.lock:
self.session_metadata['providers_used'] = list(self.session_metadata['providers_used']) self.session_metadata['end_time'] = datetime.now(timezone.utc).isoformat()
self.session_metadata['target_domains'] = list(self.session_metadata['target_domains']) # Convert sets to lists for serialization
self.session_metadata['providers_used'] = list(self.session_metadata['providers_used'])
self.session_metadata['target_domains'] = list(self.session_metadata['target_domains'])
self.logger.info(f"Scan Complete - Session: {self.session_id}") try:
self.logger.info(f"Total API Requests: {self.session_metadata['total_requests']}") self.logger.info(f"Scan Complete - Session: {self.session_id}")
self.logger.info(f"Total Relationships: {self.session_metadata['total_relationships']}") except Exception:
pass
def export_audit_trail(self) -> Dict[str, Any]: def export_audit_trail(self) -> Dict[str, Any]:
""" """
@@ -194,12 +264,13 @@ class ForensicLogger:
Returns: Returns:
Dictionary containing complete session audit trail Dictionary containing complete session audit trail
""" """
return { with self.lock:
'session_metadata': self.session_metadata.copy(), return {
'api_requests': [asdict(req) for req in self.api_requests], 'session_metadata': self.session_metadata.copy(),
'relationships': [asdict(rel) for rel in self.relationships], 'api_requests': [asdict(req) for req in self.api_requests],
'export_timestamp': datetime.now(timezone.utc).isoformat() 'relationships': [asdict(rel) for rel in self.relationships],
} 'export_timestamp': datetime.now(timezone.utc).isoformat()
}
def get_forensic_summary(self) -> Dict[str, Any]: def get_forensic_summary(self) -> Dict[str, Any]:
""" """
@@ -209,7 +280,13 @@ class ForensicLogger:
Dictionary containing summary statistics Dictionary containing summary statistics
""" """
provider_stats = {} provider_stats = {}
for provider in self.session_metadata['providers_used']:
# Ensure providers_used is a set for iteration
providers_used = self.session_metadata['providers_used']
if isinstance(providers_used, list):
providers_used = set(providers_used)
for provider in providers_used:
provider_requests = [req for req in self.api_requests if req.provider == provider] provider_requests = [req for req in self.api_requests if req.provider == provider]
provider_relationships = [rel for rel in self.relationships if rel.provider == provider] provider_relationships = [rel for rel in self.relationships if rel.provider == provider]

107
core/provider_result.py Normal file
View File

@@ -0,0 +1,107 @@
# dnsrecon-reduced/core/provider_result.py
"""
Unified data model for DNSRecon passive reconnaissance.
Standardizes the data structure across all providers to ensure consistent processing.
"""
from typing import Any, Optional, List, Dict
from dataclasses import dataclass, field
from datetime import datetime, timezone
@dataclass
class StandardAttribute:
"""A unified data structure for a single piece of information about a node."""
target_node: str
name: str
value: Any
type: str
provider: str
confidence: float
timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
metadata: Optional[Dict[str, Any]] = field(default_factory=dict)
def __post_init__(self):
"""Validate the attribute after initialization."""
if not isinstance(self.confidence, (int, float)) or not 0.0 <= self.confidence <= 1.0:
raise ValueError(f"Confidence must be between 0.0 and 1.0, got {self.confidence}")
@dataclass
class Relationship:
"""A unified data structure for a directional link between two nodes."""
source_node: str
target_node: str
relationship_type: str
confidence: float
provider: str
timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
raw_data: Optional[Dict[str, Any]] = field(default_factory=dict)
def __post_init__(self):
"""Validate the relationship after initialization."""
if not isinstance(self.confidence, (int, float)) or not 0.0 <= self.confidence <= 1.0:
raise ValueError(f"Confidence must be between 0.0 and 1.0, got {self.confidence}")
@dataclass
class ProviderResult:
"""A container for all data returned by a provider from a single query."""
attributes: List[StandardAttribute] = field(default_factory=list)
relationships: List[Relationship] = field(default_factory=list)
def add_attribute(self, target_node: str, name: str, value: Any, attr_type: str,
provider: str, confidence: float = 0.8,
metadata: Optional[Dict[str, Any]] = None) -> None:
"""Helper method to add an attribute to the result."""
self.attributes.append(StandardAttribute(
target_node=target_node,
name=name,
value=value,
type=attr_type,
provider=provider,
confidence=confidence,
metadata=metadata or {}
))
def add_relationship(self, source_node: str, target_node: str, relationship_type: str,
provider: str, confidence: float = 0.8,
raw_data: Optional[Dict[str, Any]] = None) -> None:
"""Helper method to add a relationship to the result."""
self.relationships.append(Relationship(
source_node=source_node,
target_node=target_node,
relationship_type=relationship_type,
confidence=confidence,
provider=provider,
raw_data=raw_data or {}
))
def get_discovered_nodes(self) -> set:
"""Get all unique node identifiers discovered in this result."""
nodes = set()
# Add nodes from relationships
for rel in self.relationships:
nodes.add(rel.source_node)
nodes.add(rel.target_node)
# Add nodes from attributes
for attr in self.attributes:
nodes.add(attr.target_node)
return nodes
def get_relationship_count(self) -> int:
"""Get the total number of relationships in this result."""
return len(self.relationships)
def get_attribute_count(self) -> int:
"""Get the total number of attributes in this result."""
return len(self.attributes)
##TODO
#def is_large_entity(self, threshold: int) -> bool:
# """Check if this result qualifies as a large entity based on relationship count."""
# return self.get_relationship_count() > threshold

28
core/rate_limiter.py Normal file
View File

@@ -0,0 +1,28 @@
# dnsrecon-reduced/core/rate_limiter.py
import time
class GlobalRateLimiter:
def __init__(self, redis_client):
self.redis = redis_client
def is_rate_limited(self, key, limit, period):
"""
Check if a key is rate-limited.
"""
now = time.time()
key = f"rate_limit:{key}"
# Remove old timestamps
self.redis.zremrangebyscore(key, 0, now - period)
# Check the count
count = self.redis.zcard(key)
if count >= limit:
return True
# Add new timestamp
self.redis.zadd(key, {now: now})
self.redis.expire(key, period)
return False

File diff suppressed because it is too large Load Diff

View File

@@ -3,11 +3,9 @@ Per-session configuration management for DNSRecon.
Provides isolated configuration instances for each user session. Provides isolated configuration instances for each user session.
""" """
import os from config import Config
from typing import Dict, Optional
class SessionConfig(Config):
class SessionConfig:
""" """
Session-specific configuration that inherits from global config Session-specific configuration that inherits from global config
but maintains isolated API keys and provider settings. but maintains isolated API keys and provider settings.
@@ -15,112 +13,8 @@ class SessionConfig:
def __init__(self): def __init__(self):
"""Initialize session config with global defaults.""" """Initialize session config with global defaults."""
# Copy all attributes from global config super().__init__()
self.api_keys: Dict[str, Optional[str]] = {
'shodan': None,
'virustotal': None
}
# Default settings (copied from global config)
self.default_recursion_depth = 2
self.default_timeout = 30
self.max_concurrent_requests = 5
self.large_entity_threshold = 100
# Rate limiting settings (per session)
self.rate_limits = {
'crtsh': 60,
'virustotal': 4,
'shodan': 60,
'dns': 100
}
# Provider settings (per session)
self.enabled_providers = {
'crtsh': True,
'dns': True,
'virustotal': False,
'shodan': False
}
# Logging configuration
self.log_level = 'INFO'
self.log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
# Flask configuration (shared)
self.flask_host = '127.0.0.1'
self.flask_port = 5000
self.flask_debug = True
def set_api_key(self, provider: str, api_key: str) -> bool:
"""
Set API key for a provider in this session.
Args:
provider: Provider name (shodan, virustotal)
api_key: API key string
Returns:
bool: True if key was set successfully
"""
if provider in self.api_keys:
self.api_keys[provider] = api_key
self.enabled_providers[provider] = True if api_key else False
return True
return False
def get_api_key(self, provider: str) -> Optional[str]:
"""
Get API key for a provider in this session.
Args:
provider: Provider name
Returns:
API key or None if not set
"""
return self.api_keys.get(provider)
def is_provider_enabled(self, provider: str) -> bool:
"""
Check if a provider is enabled in this session.
Args:
provider: Provider name
Returns:
bool: True if provider is enabled
"""
return self.enabled_providers.get(provider, False)
def get_rate_limit(self, provider: str) -> int:
"""
Get rate limit for a provider in this session.
Args:
provider: Provider name
Returns:
Rate limit in requests per minute
"""
return self.rate_limits.get(provider, 60)
def load_from_env(self):
"""Load configuration from environment variables (only if not already set)."""
if os.getenv('VIRUSTOTAL_API_KEY') and not self.api_keys['virustotal']:
self.set_api_key('virustotal', os.getenv('VIRUSTOTAL_API_KEY'))
if os.getenv('SHODAN_API_KEY') and not self.api_keys['shodan']:
self.set_api_key('shodan', os.getenv('SHODAN_API_KEY'))
# Override default settings from environment
self.default_recursion_depth = int(os.getenv('DEFAULT_RECURSION_DEPTH', '2'))
self.default_timeout = 30
self.max_concurrent_requests = 5
def create_session_config() -> 'SessionConfig':
def create_session_config() -> SessionConfig:
"""Create a new session configuration instance.""" """Create a new session configuration instance."""
session_config = SessionConfig() return SessionConfig()
session_config.load_from_env()
return session_config

View File

@@ -1,281 +1,533 @@
""" # dnsrecon/core/session_manager.py
Session manager for DNSRecon multi-user support.
Manages individual scanner instances per user session with automatic cleanup.
"""
import threading import threading
import time import time
import uuid import uuid
import redis
import pickle
from typing import Dict, Optional, Any from typing import Dict, Optional, Any
from datetime import datetime, timezone import copy
from core.scanner import Scanner from core.scanner import Scanner
from config import config
class SessionManager: class SessionManager:
""" """
Manages multiple scanner instances for concurrent user sessions. FIXED: Manages multiple scanner instances for concurrent user sessions using Redis.
Provides session isolation and automatic cleanup of inactive sessions. Enhanced to properly maintain WebSocket connections throughout scan lifecycle.
""" """
def __init__(self, session_timeout_minutes: int = 60): def __init__(self, session_timeout_minutes: int = 0):
""" """
Initialize session manager. Initialize session manager with a Redis backend.
Args:
session_timeout_minutes: Minutes of inactivity before session cleanup
""" """
self.sessions: Dict[str, Dict[str, Any]] = {} if session_timeout_minutes is None:
session_timeout_minutes = config.session_timeout_minutes
self.redis_client = redis.StrictRedis(db=0, decode_responses=False)
self.session_timeout = session_timeout_minutes * 60 # Convert to seconds self.session_timeout = session_timeout_minutes * 60 # Convert to seconds
self.lock = threading.Lock() self.lock = threading.Lock()
# FIXED: Add a creation lock to prevent race conditions
self.creation_lock = threading.Lock()
# Track active socketio connections per session
self.active_socketio_connections = {}
# Start cleanup thread # Start cleanup thread
self.cleanup_thread = threading.Thread(target=self._cleanup_loop, daemon=True) self.cleanup_thread = threading.Thread(target=self._cleanup_loop, daemon=True)
self.cleanup_thread.start() self.cleanup_thread.start()
print(f"SessionManager initialized with {session_timeout_minutes}min timeout") print(f"SessionManager initialized with Redis backend and {session_timeout_minutes}min timeout")
def create_session(self) -> str: def __getstate__(self):
"""Prepare SessionManager for pickling."""
state = self.__dict__.copy()
# Exclude unpickleable attributes - Redis client and threading objects
unpicklable_attrs = ['lock', 'cleanup_thread', 'redis_client', 'creation_lock', 'active_socketio_connections']
for attr in unpicklable_attrs:
if attr in state:
del state[attr]
return state
def __setstate__(self, state):
"""Restore SessionManager after unpickling."""
self.__dict__.update(state)
# Re-initialize unpickleable attributes
self.redis_client = redis.StrictRedis(db=0, decode_responses=False)
self.lock = threading.Lock()
self.creation_lock = threading.Lock()
self.active_socketio_connections = {}
self.cleanup_thread = threading.Thread(target=self._cleanup_loop, daemon=True)
self.cleanup_thread.start()
def _get_session_key(self, session_id: str) -> str:
"""Generates the Redis key for a session."""
return f"dnsrecon:session:{session_id}"
def _get_stop_signal_key(self, session_id: str) -> str:
"""Generates the Redis key for a session's stop signal."""
return f"dnsrecon:stop:{session_id}"
def register_socketio_connection(self, session_id: str, socketio) -> None:
""" """
Create a new user session with dedicated scanner instance and configuration. FIXED: Register a socketio connection for a session.
Enhanced with better debugging and race condition protection. This ensures the connection is maintained throughout the session lifecycle.
Returns:
Unique session ID
""" """
session_id = str(uuid.uuid4()) with self.lock:
self.active_socketio_connections[session_id] = socketio
print(f"Registered socketio connection for session {session_id}")
def get_socketio_connection(self, session_id: str):
"""
FIXED: Get the active socketio connection for a session.
"""
with self.lock:
return self.active_socketio_connections.get(session_id)
def _prepare_scanner_for_storage(self, scanner: Scanner, session_id: str) -> Scanner:
"""
FIXED: Prepare scanner for storage by ensuring proper cleanup of unpicklable objects.
Now preserves socketio connection info for restoration.
"""
# Set the session ID on the scanner for cross-process stop signal management
scanner.session_id = session_id
print(f"=== CREATING SESSION {session_id} ===") # FIXED: Don't set socketio to None if we want to preserve real-time updates
# Instead, we'll restore it when loading the scanner
scanner.socketio = None
try: # Force cleanup of any threading objects that might cause issues
# Create session-specific configuration if hasattr(scanner, 'stop_event'):
from core.session_config import create_session_config scanner.stop_event = None
session_config = create_session_config() if hasattr(scanner, 'scan_thread'):
scanner.scan_thread = None
if hasattr(scanner, 'executor'):
scanner.executor = None
if hasattr(scanner, 'status_logger_thread'):
scanner.status_logger_thread = None
if hasattr(scanner, 'status_logger_stop_event'):
scanner.status_logger_stop_event = None
return scanner
def create_session(self, socketio=None) -> str:
"""
FIXED: Create a new user session with enhanced WebSocket management.
"""
# FIXED: Use creation lock to prevent race conditions
with self.creation_lock:
session_id = str(uuid.uuid4())
print(f"=== CREATING SESSION {session_id} IN REDIS ===")
print(f"Created session config for {session_id}") # FIXED: Register socketio connection first
if socketio:
self.register_socketio_connection(session_id, socketio)
# Create scanner with session config try:
from core.scanner import Scanner from core.session_config import create_session_config
scanner_instance = Scanner(session_config=session_config) session_config = create_session_config()
print(f"Created scanner instance {id(scanner_instance)} for session {session_id}") # Create scanner WITHOUT socketio to avoid weakref issues
print(f"Initial scanner status: {scanner_instance.status}") scanner_instance = Scanner(session_config=session_config, socketio=None)
with self.lock: # Prepare scanner for storage (removes problematic objects)
self.sessions[session_id] = { scanner_instance = self._prepare_scanner_for_storage(scanner_instance, session_id)
session_data = {
'scanner': scanner_instance, 'scanner': scanner_instance,
'config': session_config, 'config': session_config,
'created_at': time.time(), 'created_at': time.time(),
'last_activity': time.time(), 'last_activity': time.time(),
'user_agent': '',
'status': 'active' 'status': 'active'
} }
print(f"Session {session_id} stored in session manager") # Test serialization before storing to catch issues early
print(f"Total active sessions: {len([s for s in self.sessions.values() if s['status'] == 'active'])}") try:
print(f"=== SESSION {session_id} CREATED SUCCESSFULLY ===") test_serialization = pickle.dumps(session_data)
print(f"Session serialization test successful ({len(test_serialization)} bytes)")
return session_id except Exception as pickle_error:
print(f"PICKLE TEST FAILED: {pickle_error}")
except Exception as e: # Try to identify the problematic object
print(f"ERROR: Failed to create session {session_id}: {e}") for key, value in session_data.items():
raise try:
pickle.dumps(value)
print(f" {key}: OK")
except Exception as item_error:
print(f" {key}: FAILED - {item_error}")
raise pickle_error
# Store in Redis
session_key = self._get_session_key(session_id)
self.redis_client.setex(session_key, self.session_timeout, test_serialization)
# Initialize stop signal as False
stop_key = self._get_stop_signal_key(session_id)
self.redis_client.setex(stop_key, self.session_timeout, b'0')
print(f"Session {session_id} stored in Redis with stop signal initialized")
print(f"Session has {len(scanner_instance.providers)} providers: {[p.get_name() for p in scanner_instance.providers]}")
return session_id
except Exception as e:
print(f"ERROR: Failed to create session {session_id}: {e}")
import traceback
traceback.print_exc()
raise
def get_session(self, session_id: str) -> Optional[object]: def set_stop_signal(self, session_id: str) -> bool:
""" """
Get scanner instance for a session with enhanced debugging. Set the stop signal for a session (cross-process safe).
Args: Args:
session_id: Session identifier session_id: Session identifier
Returns: Returns:
Scanner instance or None if session doesn't exist bool: True if signal was set successfully
"""
try:
stop_key = self._get_stop_signal_key(session_id)
# Set stop signal to '1' with the same TTL as the session
self.redis_client.setex(stop_key, self.session_timeout, b'1')
print(f"Stop signal set for session {session_id}")
return True
except Exception as e:
print(f"ERROR: Failed to set stop signal for session {session_id}: {e}")
return False
def is_stop_requested(self, session_id: str) -> bool:
"""
Check if stop is requested for a session (cross-process safe).
Args:
session_id: Session identifier
Returns:
bool: True if stop is requested
"""
try:
stop_key = self._get_stop_signal_key(session_id)
value = self.redis_client.get(stop_key)
return value == b'1' if value is not None else False
except Exception as e:
print(f"ERROR: Failed to check stop signal for session {session_id}: {e}")
return False
def clear_stop_signal(self, session_id: str) -> bool:
"""
Clear the stop signal for a session.
Args:
session_id: Session identifier
Returns:
bool: True if signal was cleared successfully
"""
try:
stop_key = self._get_stop_signal_key(session_id)
self.redis_client.setex(stop_key, self.session_timeout, b'0')
print(f"Stop signal cleared for session {session_id}")
return True
except Exception as e:
print(f"ERROR: Failed to clear stop signal for session {session_id}: {e}")
return False
def _get_session_data(self, session_id: str) -> Optional[Dict[str, Any]]:
"""Retrieves and deserializes session data from Redis."""
try:
session_key = self._get_session_key(session_id)
serialized_data = self.redis_client.get(session_key)
if serialized_data:
session_data = pickle.loads(serialized_data)
# Ensure the scanner has the correct session ID for stop signal checking
if 'scanner' in session_data and session_data['scanner']:
session_data['scanner'].session_id = session_id
# FIXED: Restore socketio connection from our registry
socketio_conn = self.get_socketio_connection(session_id)
if socketio_conn:
session_data['scanner'].socketio = socketio_conn
print(f"Restored socketio connection for session {session_id}")
else:
print(f"No socketio connection found for session {session_id}")
session_data['scanner'].socketio = None
return session_data
return None
except Exception as e:
print(f"ERROR: Failed to get session data for {session_id}: {e}")
import traceback
traceback.print_exc()
return None
def _save_session_data(self, session_id: str, session_data: Dict[str, Any]) -> bool:
"""
Serializes and saves session data back to Redis with updated TTL.
FIXED: Now preserves socketio connection during storage.
Returns:
bool: True if save was successful
"""
try:
session_key = self._get_session_key(session_id)
# Create a deep copy to avoid modifying the original scanner object
session_data_to_save = copy.deepcopy(session_data)
# Prepare scanner for storage if it exists
if 'scanner' in session_data_to_save and session_data_to_save['scanner']:
# FIXED: Preserve the original socketio connection before preparing for storage
original_socketio = session_data_to_save['scanner'].socketio
session_data_to_save['scanner'] = self._prepare_scanner_for_storage(
session_data_to_save['scanner'],
session_id
)
# FIXED: If we had a socketio connection, make sure it's registered
if original_socketio and session_id not in self.active_socketio_connections:
self.register_socketio_connection(session_id, original_socketio)
serialized_data = pickle.dumps(session_data_to_save)
result = self.redis_client.setex(session_key, self.session_timeout, serialized_data)
return result
except Exception as e:
print(f"ERROR: Failed to save session data for {session_id}: {e}")
import traceback
traceback.print_exc()
return False
def update_session_scanner(self, session_id: str, scanner: 'Scanner') -> bool:
"""
FIXED: Updates just the scanner object in a session with immediate persistence.
Now maintains socketio connection throughout the update process.
Returns:
bool: True if update was successful
"""
try:
session_data = self._get_session_data(session_id)
if session_data:
# FIXED: Preserve socketio connection before preparing for storage
original_socketio = scanner.socketio
# Prepare scanner for storage
scanner = self._prepare_scanner_for_storage(scanner, session_id)
session_data['scanner'] = scanner
session_data['last_activity'] = time.time()
# FIXED: Restore socketio connection after preparation
if original_socketio:
self.register_socketio_connection(session_id, original_socketio)
session_data['scanner'].socketio = original_socketio
# Immediately save to Redis for GUI updates
success = self._save_session_data(session_id, session_data)
if success:
# Only log occasionally to reduce noise
if hasattr(self, '_last_update_log'):
if time.time() - self._last_update_log > 5: # Log every 5 seconds max
self._last_update_log = time.time()
else:
self._last_update_log = time.time()
else:
print(f"WARNING: Failed to save scanner state for session {session_id}")
return success
else:
print(f"WARNING: Session {session_id} not found for scanner update")
return False
except Exception as e:
print(f"ERROR: Failed to update scanner for session {session_id}: {e}")
import traceback
traceback.print_exc()
return False
def update_scanner_status(self, session_id: str, status: str) -> bool:
"""
Quickly update just the scanner status for immediate GUI feedback.
Args:
session_id: Session identifier
status: New scanner status
Returns:
bool: True if update was successful
"""
try:
session_data = self._get_session_data(session_id)
if session_data and 'scanner' in session_data:
session_data['scanner'].status = status
session_data['last_activity'] = time.time()
success = self._save_session_data(session_id, session_data)
if success:
print(f"Scanner status updated to '{status}' for session {session_id}")
else:
print(f"WARNING: Failed to save status update for session {session_id}")
return success
return False
except Exception as e:
print(f"ERROR: Failed to update scanner status for session {session_id}: {e}")
return False
def get_session(self, session_id: str) -> Optional[Scanner]:
"""
FIXED: Get scanner instance for a session from Redis with proper socketio restoration.
""" """
if not session_id: if not session_id:
print("get_session called with empty session_id")
return None return None
with self.lock: session_data = self._get_session_data(session_id)
if session_id not in self.sessions:
print(f"Session {session_id} not found in session manager")
print(f"Available sessions: {list(self.sessions.keys())}")
return None
session_data = self.sessions[session_id]
# Check if session is still active
if session_data['status'] != 'active':
print(f"Session {session_id} is not active (status: {session_data['status']})")
return None
# Update last activity
session_data['last_activity'] = time.time()
scanner = session_data['scanner']
print(f"Retrieved scanner {id(scanner)} for session {session_id}")
print(f"Scanner status: {scanner.status}")
return scanner
def get_or_create_session(self, session_id: Optional[str] = None) -> tuple[str, Scanner]:
"""
Get existing session or create new one.
Args: if not session_data or session_data.get('status') != 'active':
session_id: Optional existing session ID return None
Returns:
Tuple of (session_id, scanner_instance)
"""
if session_id and self.get_session(session_id):
return session_id, self.get_session(session_id)
else:
new_session_id = self.create_session()
return new_session_id, self.get_session(new_session_id)
def terminate_session(self, session_id: str) -> bool:
"""
Terminate a specific session and cleanup resources.
Args: # Update last activity and save back to Redis
session_id: Session to terminate session_data['last_activity'] = time.time()
self._save_session_data(session_id, session_data)
scanner = session_data.get('scanner')
if scanner:
# Ensure the scanner can check the Redis-based stop signal
scanner.session_id = session_id
Returns: # FIXED: Restore socketio connection from our registry
True if session was terminated successfully socketio_conn = self.get_socketio_connection(session_id)
if socketio_conn:
scanner.socketio = socketio_conn
print(f"✓ Restored socketio connection for session {session_id}")
else:
scanner.socketio = None
print(f"⚠️ No socketio connection found for session {session_id}")
return scanner
def get_session_status_only(self, session_id: str) -> Optional[str]:
""" """
with self.lock: Get just the scanner status without full session retrieval (for performance).
if session_id not in self.sessions:
return False
session_data = self.sessions[session_id]
scanner = session_data['scanner']
# Stop any running scan
try:
if scanner.status == 'running':
scanner.stop_scan()
print(f"Stopped scan for session: {session_id}")
except Exception as e:
print(f"Error stopping scan for session {session_id}: {e}")
# Mark as terminated
session_data['status'] = 'terminated'
session_data['terminated_at'] = time.time()
# Remove from active sessions after a brief delay to allow cleanup
threading.Timer(5.0, lambda: self._remove_session(session_id)).start()
print(f"Terminated session: {session_id}")
return True
def _remove_session(self, session_id: str) -> None:
"""Remove session from memory."""
with self.lock:
if session_id in self.sessions:
del self.sessions[session_id]
print(f"Removed session from memory: {session_id}")
def get_session_info(self, session_id: str) -> Optional[Dict[str, Any]]:
"""
Get session information without updating activity.
Args: Args:
session_id: Session identifier session_id: Session identifier
Returns: Returns:
Session information dictionary or None Scanner status string or None if not found
""" """
with self.lock: try:
if session_id not in self.sessions: session_data = self._get_session_data(session_id)
return None if session_data and 'scanner' in session_data:
return session_data['scanner'].status
return None
except Exception as e:
print(f"ERROR: Failed to get session status for {session_id}: {e}")
return None
def terminate_session(self, session_id: str) -> bool:
"""
Terminate a specific session in Redis with reliable stop signal and immediate status update.
"""
print(f"=== TERMINATING SESSION {session_id} ===")
try:
# First, set the stop signal
self.set_stop_signal(session_id)
session_data = self.sessions[session_id] # Update scanner status to stopped immediately for GUI feedback
scanner = session_data['scanner'] self.update_scanner_status(session_id, 'stopped')
return { session_data = self._get_session_data(session_id)
'session_id': session_id, if not session_data:
'created_at': datetime.fromtimestamp(session_data['created_at'], timezone.utc).isoformat(), print(f"Session {session_id} not found")
'last_activity': datetime.fromtimestamp(session_data['last_activity'], timezone.utc).isoformat(), return False
'status': session_data['status'],
'scan_status': scanner.status, scanner = session_data.get('scanner')
'current_target': scanner.current_target, if scanner and scanner.status == 'running':
'uptime_seconds': time.time() - session_data['created_at'] print(f"Stopping scan for session: {session_id}")
} # The scanner will check the Redis stop signal
scanner.stop_scan()
def list_active_sessions(self) -> Dict[str, Dict[str, Any]]:
""" # Update the scanner state immediately
List all active sessions with enhanced debugging info. self.update_session_scanner(session_id, scanner)
Returns: # Wait a moment for graceful shutdown
Dictionary of session information time.sleep(0.5)
"""
active_sessions = {} # FIXED: Clean up socketio connection
with self.lock:
with self.lock: if session_id in self.active_socketio_connections:
for session_id, session_data in self.sessions.items(): del self.active_socketio_connections[session_id]
if session_data['status'] == 'active': print(f"Cleaned up socketio connection for session {session_id}")
scanner = session_data['scanner']
active_sessions[session_id] = { # Delete session data and stop signal from Redis
'session_id': session_id, session_key = self._get_session_key(session_id)
'created_at': datetime.fromtimestamp(session_data['created_at'], timezone.utc).isoformat(), stop_key = self._get_stop_signal_key(session_id)
'last_activity': datetime.fromtimestamp(session_data['last_activity'], timezone.utc).isoformat(), self.redis_client.delete(session_key)
'status': session_data['status'], self.redis_client.delete(stop_key)
'scan_status': scanner.status,
'current_target': scanner.current_target, print(f"Terminated and removed session from Redis: {session_id}")
'uptime_seconds': time.time() - session_data['created_at'], return True
'scanner_object_id': id(scanner)
} except Exception as e:
print(f"ERROR: Failed to terminate session {session_id}: {e}")
return active_sessions import traceback
traceback.print_exc()
return False
def _cleanup_loop(self) -> None: def _cleanup_loop(self) -> None:
"""Background thread to cleanup inactive sessions.""" """
Background thread to cleanup inactive sessions and orphaned stop signals.
"""
while True: while True:
try: try:
current_time = time.time() # Clean up orphaned stop signals
sessions_to_cleanup = [] stop_keys = self.redis_client.keys("dnsrecon:stop:*")
for stop_key in stop_keys:
with self.lock: # Extract session ID from stop key
for session_id, session_data in self.sessions.items(): session_id = stop_key.decode('utf-8').split(':')[-1]
if session_data['status'] != 'active': session_key = self._get_session_key(session_id)
continue
# If session doesn't exist but stop signal does, clean it up
inactive_time = current_time - session_data['last_activity'] if not self.redis_client.exists(session_key):
self.redis_client.delete(stop_key)
print(f"Cleaned up orphaned stop signal for session {session_id}")
# Also clean up socketio connection
with self.lock:
if session_id in self.active_socketio_connections:
del self.active_socketio_connections[session_id]
print(f"Cleaned up orphaned socketio for session {session_id}")
if inactive_time > self.session_timeout:
sessions_to_cleanup.append(session_id)
# Cleanup outside of lock to avoid deadlock
for session_id in sessions_to_cleanup:
print(f"Cleaning up inactive session: {session_id}")
self.terminate_session(session_id)
# Sleep for 5 minutes between cleanup cycles
time.sleep(300)
except Exception as e: except Exception as e:
print(f"Error in session cleanup loop: {e}") print(f"Error in cleanup loop: {e}")
time.sleep(60) # Sleep for 1 minute on error
time.sleep(300) # Sleep for 5 minutes
def get_statistics(self) -> Dict[str, Any]: def get_statistics(self) -> Dict[str, Any]:
""" """Get session manager statistics."""
Get session manager statistics. try:
session_keys = self.redis_client.keys("dnsrecon:session:*")
Returns: stop_keys = self.redis_client.keys("dnsrecon:stop:*")
Statistics dictionary
""" active_sessions = len(session_keys)
with self.lock: running_scans = 0
active_count = sum(1 for s in self.sessions.values() if s['status'] == 'active')
running_scans = sum(1 for s in self.sessions.values() for session_key in session_keys:
if s['status'] == 'active' and s['scanner'].status == 'running') session_id = session_key.decode('utf-8').split(':')[-1]
status = self.get_session_status_only(session_id)
if status == 'running':
running_scans += 1
return { return {
'total_sessions': len(self.sessions), 'total_active_sessions': active_sessions,
'active_sessions': active_count,
'running_scans': running_scans, 'running_scans': running_scans,
'session_timeout_minutes': self.session_timeout / 60 'total_stop_signals': len(stop_keys),
'active_socketio_connections': len(self.active_socketio_connections)
}
except Exception as e:
print(f"ERROR: Failed to get statistics: {e}")
return {
'total_active_sessions': 0,
'running_scans': 0,
'total_stop_signals': 0,
'active_socketio_connections': 0
} }
# Global session manager instance # Global session manager instance
session_manager = SessionManager(session_timeout_minutes=60) session_manager = SessionManager(session_timeout_minutes=60)

View File

@@ -3,19 +3,20 @@ Data provider modules for DNSRecon.
Contains implementations for various reconnaissance data sources. Contains implementations for various reconnaissance data sources.
""" """
from .base_provider import BaseProvider, RateLimiter from .base_provider import BaseProvider
from .crtsh_provider import CrtShProvider from .crtsh_provider import CrtShProvider
from .dns_provider import DNSProvider from .dns_provider import DNSProvider
from .shodan_provider import ShodanProvider from .shodan_provider import ShodanProvider
from .virustotal_provider import VirusTotalProvider from .correlation_provider import CorrelationProvider
from core.rate_limiter import GlobalRateLimiter
__all__ = [ __all__ = [
'BaseProvider', 'BaseProvider',
'RateLimiter', 'GlobalRateLimiter',
'CrtShProvider', 'CrtShProvider',
'DNSProvider', 'DNSProvider',
'ShodanProvider', 'ShodanProvider',
'VirusTotalProvider' 'CorrelationProvider'
] ]
__version__ = "1.0.0-phase2" __version__ = "0.0.0-rc"

View File

@@ -3,45 +3,19 @@
import time import time
import requests import requests
import threading import threading
import os
import json
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from typing import List, Dict, Any, Optional, Tuple from typing import Dict, Any, Optional
from core.logger import get_forensic_logger from core.logger import get_forensic_logger
from core.graph_manager import RelationshipType from core.rate_limiter import GlobalRateLimiter
from core.provider_result import ProviderResult
class RateLimiter:
"""Simple rate limiter for API calls."""
def __init__(self, requests_per_minute: int):
"""
Initialize rate limiter.
Args:
requests_per_minute: Maximum requests allowed per minute
"""
self.requests_per_minute = requests_per_minute
self.min_interval = 60.0 / requests_per_minute
self.last_request_time = 0
def wait_if_needed(self) -> None:
"""Wait if necessary to respect rate limits."""
current_time = time.time()
time_since_last = current_time - self.last_request_time
if time_since_last < self.min_interval:
sleep_time = self.min_interval - time_since_last
time.sleep(sleep_time)
self.last_request_time = time.time()
class BaseProvider(ABC): class BaseProvider(ABC):
""" """
Abstract base class for all DNSRecon data providers. Abstract base class for all DNSRecon data providers.
Now supports session-specific configuration. Now supports session-specific configuration and returns standardized ProviderResult objects.
FIXED: Enhanced pickle support to prevent weakref serialization errors.
""" """
def __init__(self, name: str, rate_limit: int = 60, timeout: int = 30, session_config=None): def __init__(self, name: str, rate_limit: int = 60, timeout: int = 30, session_config=None):
@@ -63,32 +37,74 @@ class BaseProvider(ABC):
# Fallback to global config for backwards compatibility # Fallback to global config for backwards compatibility
from config import config as global_config from config import config as global_config
self.config = global_config self.config = global_config
actual_rate_limit = rate_limit
actual_timeout = timeout actual_timeout = timeout
self.name = name self.name = name
self.rate_limiter = RateLimiter(actual_rate_limit)
self.timeout = actual_timeout self.timeout = actual_timeout
self._local = threading.local() self._local = threading.local()
self.logger = get_forensic_logger() self.logger = get_forensic_logger()
self._stop_event = None self._stop_event = None
# Caching configuration (per session)
self.cache_dir = f'.cache/{id(self.config)}' # Unique cache per session config
self.cache_expiry = 12 * 3600 # 12 hours in seconds
if not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir)
# Statistics (per provider instance) # Statistics (per provider instance)
self.total_requests = 0 self.total_requests = 0
self.successful_requests = 0 self.successful_requests = 0
self.failed_requests = 0 self.failed_requests = 0
self.total_relationships_found = 0 self.total_relationships_found = 0
print(f"Initialized {name} provider with session-specific config (rate: {actual_rate_limit}/min)") def __getstate__(self):
"""Prepare BaseProvider for pickling by excluding unpicklable objects."""
state = self.__dict__.copy()
# Exclude unpickleable attributes that may contain weakrefs
unpicklable_attrs = [
'_local', # Thread-local storage (contains requests.Session)
'_stop_event', # Threading event
'logger', # Logger may contain weakrefs in handlers
]
for attr in unpicklable_attrs:
if attr in state:
del state[attr]
# Also handle any potential weakrefs in the config object
if 'config' in state and hasattr(state['config'], '__getstate__'):
# If config has its own pickle support, let it handle itself
pass
elif 'config' in state:
# Otherwise, ensure config doesn't contain unpicklable objects
try:
# Test if config can be pickled
import pickle
pickle.dumps(state['config'])
except (TypeError, AttributeError):
# If config can't be pickled, we'll recreate it during unpickling
state['_config_class'] = type(state['config']).__name__
del state['config']
return state
def __setstate__(self, state):
"""Restore BaseProvider after unpickling by reconstructing threading objects."""
self.__dict__.update(state)
# Re-initialize unpickleable attributes
self._local = threading.local()
self._stop_event = None
self.logger = get_forensic_logger()
# Recreate config if it was removed during pickling
if not hasattr(self, 'config') and hasattr(self, '_config_class'):
if self._config_class == 'Config':
from config import config as global_config
self.config = global_config
elif self._config_class == 'SessionConfig':
from core.session_config import create_session_config
self.config = create_session_config()
del self._config_class
@property @property
def session(self): def session(self):
"""Get or create thread-local requests session."""
if not hasattr(self._local, 'session'): if not hasattr(self._local, 'session'):
self._local.session = requests.Session() self._local.session = requests.Session()
self._local.session.headers.update({ self._local.session.headers.update({
@@ -101,13 +117,28 @@ class BaseProvider(ABC):
"""Return the provider name.""" """Return the provider name."""
pass pass
@abstractmethod
def get_display_name(self) -> str:
"""Return the provider display name for the UI."""
pass
@abstractmethod
def requires_api_key(self) -> bool:
"""Return True if the provider requires an API key."""
pass
@abstractmethod
def get_eligibility(self) -> Dict[str, bool]:
"""Return a dictionary indicating if the provider can query domains and/or IPs."""
pass
@abstractmethod @abstractmethod
def is_available(self) -> bool: def is_available(self) -> bool:
"""Check if the provider is available and properly configured.""" """Check if the provider is available and properly configured."""
pass pass
@abstractmethod @abstractmethod
def query_domain(self, domain: str) -> List[Tuple[str, str, RelationshipType, float, Dict[str, Any]]]: def query_domain(self, domain: str) -> ProviderResult:
""" """
Query the provider for information about a domain. Query the provider for information about a domain.
@@ -115,12 +146,12 @@ class BaseProvider(ABC):
domain: Domain to investigate domain: Domain to investigate
Returns: Returns:
List of tuples: (source_node, target_node, relationship_type, confidence, raw_data) ProviderResult containing standardized attributes and relationships
""" """
pass pass
@abstractmethod @abstractmethod
def query_ip(self, ip: str) -> List[Tuple[str, str, RelationshipType, float, Dict[str, Any]]]: def query_ip(self, ip: str) -> ProviderResult:
""" """
Query the provider for information about an IP address. Query the provider for information about an IP address.
@@ -128,170 +159,100 @@ class BaseProvider(ABC):
ip: IP address to investigate ip: IP address to investigate
Returns: Returns:
List of tuples: (source_node, target_node, relationship_type, confidence, raw_data) ProviderResult containing standardized attributes and relationships
""" """
pass pass
def make_request(self, url: str, method: str = "GET", def make_request(self, url: str, method: str = "GET",
params: Optional[Dict[str, Any]] = None, params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None, headers: Optional[Dict[str, str]] = None,
target_indicator: str = "", target_indicator: str = "") -> Optional[requests.Response]:
max_retries: int = 3) -> Optional[requests.Response]:
""" """
Make a rate-limited HTTP request with forensic logging and retry logic. Make a rate-limited HTTP request.
Now supports cancellation via stop_event from scanner. FIXED: Returns response without automatically raising HTTPError exceptions.
Individual providers should handle status codes appropriately.
""" """
# Check for cancellation before starting if self._is_stop_requested():
if hasattr(self, '_stop_event') and self._stop_event and self._stop_event.is_set():
print(f"Request cancelled before start: {url}") print(f"Request cancelled before start: {url}")
return None return None
# Create a unique cache key start_time = time.time()
cache_key = f"{self.name}_{hash(f'{method}:{url}:{json.dumps(params, sort_keys=True)}')}.json" response = None
cache_path = os.path.join(self.cache_dir, cache_key) error = None
# Check cache try:
if os.path.exists(cache_path): self.total_requests += 1
cache_age = time.time() - os.path.getmtime(cache_path)
if cache_age < self.cache_expiry:
print(f"Returning cached response for: {url}")
with open(cache_path, 'r') as f:
cached_data = json.load(f)
response = requests.Response()
response.status_code = cached_data['status_code']
response._content = cached_data['content'].encode('utf-8')
response.headers = cached_data['headers']
return response
for attempt in range(max_retries + 1): request_headers = dict(self.session.headers).copy()
# Check for cancellation before each attempt if headers:
if hasattr(self, '_stop_event') and self._stop_event and self._stop_event.is_set(): request_headers.update(headers)
print(f"Request cancelled during attempt {attempt + 1}: {url}")
return None
# Apply rate limiting (but reduce wait time if cancellation is requested) print(f"Making {method} request to: {url}")
if hasattr(self, '_stop_event') and self._stop_event and self._stop_event.is_set():
break
self.rate_limiter.wait_if_needed()
# Check again after rate limiting if method.upper() == "GET":
if hasattr(self, '_stop_event') and self._stop_event and self._stop_event.is_set(): response = self.session.get(
print(f"Request cancelled after rate limiting: {url}") url,
return None params=params,
headers=request_headers,
start_time = time.time() timeout=self.timeout
response = None
error = None
try:
self.total_requests += 1
# Prepare request
request_headers = self.session.headers.copy()
if headers:
request_headers.update(headers)
print(f"Making {method} request to: {url} (attempt {attempt + 1})")
# Use shorter timeout if termination is requested
request_timeout = self.timeout
if hasattr(self, '_stop_event') and self._stop_event and self._stop_event.is_set():
request_timeout = min(5, self.timeout) # Max 5 seconds if termination requested
# Make request
if method.upper() == "GET":
response = self.session.get(
url,
params=params,
headers=request_headers,
timeout=request_timeout
)
elif method.upper() == "POST":
response = self.session.post(
url,
json=params,
headers=request_headers,
timeout=request_timeout
)
else:
raise ValueError(f"Unsupported HTTP method: {method}")
print(f"Response status: {response.status_code}")
response.raise_for_status()
self.successful_requests += 1
# Success - log, cache, and return
duration_ms = (time.time() - start_time) * 1000
self.logger.log_api_request(
provider=self.name,
url=url,
method=method.upper(),
status_code=response.status_code,
response_size=len(response.content),
duration_ms=duration_ms,
error=None,
target_indicator=target_indicator
) )
# Cache the successful response to disk elif method.upper() == "POST":
with open(cache_path, 'w') as f: response = self.session.post(
json.dump({ url,
'status_code': response.status_code, json=params,
'content': response.text, headers=request_headers,
'headers': dict(response.headers) timeout=self.timeout
}, f) )
return response else:
raise ValueError(f"Unsupported HTTP method: {method}")
except requests.exceptions.RequestException as e: print(f"Response status: {response.status_code}")
error = str(e)
# FIXED: Don't automatically raise for HTTP error status codes
# Let individual providers handle status codes appropriately
# Only count 2xx responses as successful
if 200 <= response.status_code < 300:
self.successful_requests += 1
else:
self.failed_requests += 1 self.failed_requests += 1
print(f"Request failed (attempt {attempt + 1}): {error}")
duration_ms = (time.time() - start_time) * 1000
# Check for cancellation before retrying self.logger.log_api_request(
if hasattr(self, '_stop_event') and self._stop_event and self._stop_event.is_set(): provider=self.name,
print(f"Request cancelled, not retrying: {url}") url=url,
break method=method.upper(),
status_code=response.status_code,
# Check if we should retry response_size=len(response.content),
if attempt < max_retries and self._should_retry(e): duration_ms=duration_ms,
backoff_time = (2 ** attempt) * 1 # Exponential backoff: 1s, 2s, 4s error=None,
print(f"Retrying in {backoff_time} seconds...") target_indicator=target_indicator
)
# Shorter backoff if termination is requested
if hasattr(self, '_stop_event') and self._stop_event and self._stop_event.is_set(): return response
backoff_time = min(0.5, backoff_time)
# Sleep with cancellation checking
sleep_start = time.time()
while time.time() - sleep_start < backoff_time:
if hasattr(self, '_stop_event') and self._stop_event and self._stop_event.is_set():
print(f"Request cancelled during backoff: {url}")
return None
time.sleep(0.1) # Check every 100ms
continue
else:
break
except Exception as e: except requests.exceptions.RequestException as e:
error = f"Unexpected error: {str(e)}" error = str(e)
self.failed_requests += 1 self.failed_requests += 1
print(f"Unexpected error: {error}") duration_ms = (time.time() - start_time) * 1000
break self.logger.log_api_request(
provider=self.name,
url=url,
method=method.upper(),
status_code=response.status_code if response else None,
response_size=len(response.content) if response else None,
duration_ms=duration_ms,
error=error,
target_indicator=target_indicator
)
raise e
# All attempts failed - log and return None def _is_stop_requested(self) -> bool:
duration_ms = (time.time() - start_time) * 1000 """
self.logger.log_api_request( Enhanced stop signal checking that handles both local and Redis-based signals.
provider=self.name, """
url=url, if hasattr(self, '_stop_event') and self._stop_event and self._stop_event.is_set():
method=method.upper(), return True
status_code=response.status_code if response else None, return False
response_size=len(response.content) if response else None,
duration_ms=duration_ms,
error=error,
target_indicator=target_indicator
)
return None
def set_stop_event(self, stop_event: threading.Event) -> None: def set_stop_event(self, stop_event: threading.Event) -> None:
""" """
@@ -302,30 +263,8 @@ class BaseProvider(ABC):
""" """
self._stop_event = stop_event self._stop_event = stop_event
def _should_retry(self, exception: requests.exceptions.RequestException) -> bool:
"""
Determine if a request should be retried based on the exception.
Args:
exception: The request exception that occurred
Returns:
True if the request should be retried
"""
# Retry on connection errors, timeouts, and 5xx server errors
if isinstance(exception, (requests.exceptions.ConnectionError,
requests.exceptions.Timeout)):
return True
if isinstance(exception, requests.exceptions.HTTPError):
if hasattr(exception, 'response') and exception.response:
# Retry on server errors (5xx) but not client errors (4xx)
return exception.response.status_code >= 500
return False
def log_relationship_discovery(self, source_node: str, target_node: str, def log_relationship_discovery(self, source_node: str, target_node: str,
relationship_type: RelationshipType, relationship_type: str,
confidence_score: float, confidence_score: float,
raw_data: Dict[str, Any], raw_data: Dict[str, Any],
discovery_method: str) -> None: discovery_method: str) -> None:
@@ -345,7 +284,7 @@ class BaseProvider(ABC):
self.logger.log_relationship_discovery( self.logger.log_relationship_discovery(
source_node=source_node, source_node=source_node,
target_node=target_node, target_node=target_node,
relationship_type=relationship_type.relationship_name, relationship_type=relationship_type,
confidence_score=confidence_score, confidence_score=confidence_score,
provider=self.name, provider=self.name,
raw_data=raw_data, raw_data=raw_data,
@@ -366,5 +305,5 @@ class BaseProvider(ABC):
'failed_requests': self.failed_requests, 'failed_requests': self.failed_requests,
'success_rate': (self.successful_requests / self.total_requests * 100) if self.total_requests > 0 else 0, 'success_rate': (self.successful_requests / self.total_requests * 100) if self.total_requests > 0 else 0,
'relationships_found': self.total_relationships_found, 'relationships_found': self.total_relationships_found,
'rate_limit': self.rate_limiter.requests_per_minute 'rate_limit': self.config.get_rate_limit(self.name)
} }

View File

@@ -0,0 +1,220 @@
# dnsrecon/providers/correlation_provider.py
import re
from typing import Dict, Any, List
from .base_provider import BaseProvider
from core.provider_result import ProviderResult
from core.graph_manager import NodeType, GraphManager
class CorrelationProvider(BaseProvider):
"""
A provider that finds correlations between nodes in the graph.
FIXED: Enhanced pickle support to prevent weakref issues with graph references.
"""
def __init__(self, name: str = "correlation", session_config=None):
"""
Initialize the correlation provider.
"""
super().__init__(name, session_config=session_config)
self.graph: GraphManager | None = None
self.correlation_index = {}
self.date_pattern = re.compile(r'^\d{4}-\d{2}-\d{2}[ T]\d{2}:\d{2}:\d{2}')
self.EXCLUDED_KEYS = [
'cert_source',
'cert_issuer_ca_id',
'cert_common_name',
'cert_validity_period_days',
'cert_issuer_name',
'cert_serial_number',
'cert_entry_timestamp',
'cert_not_before',
'cert_not_after',
'dns_ttl',
'timestamp',
'last_update',
'updated_timestamp',
'discovery_timestamp',
'query_timestamp',
]
def __getstate__(self):
"""
FIXED: Prepare CorrelationProvider for pickling by excluding graph reference.
"""
state = super().__getstate__()
# Remove graph reference to prevent circular dependencies and weakrefs
if 'graph' in state:
del state['graph']
# Also handle correlation_index which might contain complex objects
if 'correlation_index' in state:
# Clear correlation index as it will be rebuilt when needed
state['correlation_index'] = {}
return state
def __setstate__(self, state):
"""
FIXED: Restore CorrelationProvider after unpickling.
"""
super().__setstate__(state)
# Re-initialize graph reference (will be set by scanner)
self.graph = None
# Re-initialize correlation index
self.correlation_index = {}
# Re-compile regex pattern
self.date_pattern = re.compile(r'^\d{4}-\d{2}-\d{2}[ T]\d{2}:\d{2}:\d{2}')
def get_name(self) -> str:
"""Return the provider name."""
return "correlation"
def get_display_name(self) -> str:
"""Return the provider display name for the UI."""
return "Correlation Engine"
def requires_api_key(self) -> bool:
"""Return True if the provider requires an API key."""
return False
def get_eligibility(self) -> Dict[str, bool]:
"""Return a dictionary indicating if the provider can query domains and/or IPs."""
return {'domains': True, 'ips': True}
def is_available(self) -> bool:
"""Check if the provider is available and properly configured."""
return True
def query_domain(self, domain: str) -> ProviderResult:
"""
Query the provider for information about a domain.
"""
return self._find_correlations(domain)
def query_ip(self, ip: str) -> ProviderResult:
"""
Query the provider for information about an IP address.
"""
return self._find_correlations(ip)
def set_graph_manager(self, graph_manager: GraphManager):
"""
Set the graph manager for the provider to use.
"""
self.graph = graph_manager
def _find_correlations(self, node_id: str) -> ProviderResult:
"""
Find correlations for a given node.
FIXED: Added safety checks to prevent issues when graph is None.
"""
result = ProviderResult()
# FIXED: Ensure self.graph is not None before proceeding
if not self.graph or not self.graph.graph.has_node(node_id):
return result
try:
node_attributes = self.graph.graph.nodes[node_id].get('attributes', [])
except Exception as e:
# If there's any issue accessing the graph, return empty result
print(f"Warning: Could not access graph for correlation analysis: {e}")
return result
for attr in node_attributes:
attr_name = attr.get('name')
attr_value = attr.get('value')
attr_provider = attr.get('provider', 'unknown')
should_exclude = (
any(excluded_key in attr_name or attr_name == excluded_key for excluded_key in self.EXCLUDED_KEYS) or
not isinstance(attr_value, (str, int, float, bool)) or
attr_value is None or
isinstance(attr_value, bool) or
(isinstance(attr_value, str) and (
len(attr_value) < 4 or
self.date_pattern.match(attr_value) or
attr_value.lower() in ['unknown', 'none', 'null', 'n/a', 'true', 'false', '0', '1']
)) or
(isinstance(attr_value, (int, float)) and (
attr_value == 0 or
attr_value == 1 or
abs(attr_value) > 1000000
))
)
if should_exclude:
continue
if attr_value not in self.correlation_index:
self.correlation_index[attr_value] = {
'nodes': set(),
'sources': []
}
self.correlation_index[attr_value]['nodes'].add(node_id)
source_info = {
'node_id': node_id,
'provider': attr_provider,
'attribute': attr_name,
'path': f"{attr_provider}_{attr_name}"
}
existing_sources = [s for s in self.correlation_index[attr_value]['sources']
if s['node_id'] == node_id and s['path'] == source_info['path']]
if not existing_sources:
self.correlation_index[attr_value]['sources'].append(source_info)
if len(self.correlation_index[attr_value]['nodes']) > 1:
self._create_correlation_relationships(attr_value, self.correlation_index[attr_value], result)
return result
def _create_correlation_relationships(self, value: Any, correlation_data: Dict[str, Any], result: ProviderResult):
"""
Create correlation relationships and add them to the provider result.
"""
correlation_node_id = f"corr_{hash(str(value)) & 0x7FFFFFFF}"
nodes = correlation_data['nodes']
sources = correlation_data['sources']
# Add the correlation node as an attribute to the result
result.add_attribute(
target_node=correlation_node_id,
name="correlation_value",
value=value,
attr_type=str(type(value)),
provider=self.name,
confidence=0.9,
metadata={
'correlated_nodes': list(nodes),
'sources': sources,
}
)
for source in sources:
node_id = source['node_id']
provider = source['provider']
attribute = source['attribute']
relationship_label = f"corr_{provider}_{attribute}"
# Add the relationship to the result
result.add_relationship(
source_node=node_id,
target_node=correlation_node_id,
relationship_type=relationship_label,
provider=self.name,
confidence=0.9,
raw_data={
'correlation_value': value,
'original_attribute': attribute,
'correlation_type': 'attribute_matching'
}
)

File diff suppressed because it is too large Load Diff

View File

@@ -1,20 +1,20 @@
# dnsrecon/providers/dns_provider.py # dnsrecon/providers/dns_provider.py
import dns.resolver from dns import resolver, reversename
import dns.reversename from typing import Dict
from typing import List, Dict, Any, Tuple
from .base_provider import BaseProvider from .base_provider import BaseProvider
from utils.helpers import _is_valid_ip, _is_valid_domain from core.provider_result import ProviderResult
from core.graph_manager import RelationshipType from utils.helpers import _is_valid_ip, _is_valid_domain, get_ip_version
class DNSProvider(BaseProvider): class DNSProvider(BaseProvider):
""" """
Provider for standard DNS resolution and reverse DNS lookups. Provider for standard DNS resolution and reverse DNS lookups.
Now uses session-specific configuration. Now returns standardized ProviderResult objects with IPv4 and IPv6 support.
FIXED: Enhanced pickle support to prevent resolver serialization issues.
""" """
def __init__(self, session_config=None): def __init__(self, name=None, session_config=None):
"""Initialize DNS provider with session-specific configuration.""" """Initialize DNS provider with session-specific configuration."""
super().__init__( super().__init__(
name="dns", name="dns",
@@ -24,7 +24,23 @@ class DNSProvider(BaseProvider):
) )
# Configure DNS resolver # Configure DNS resolver
self.resolver = dns.resolver.Resolver() self.resolver = resolver.Resolver()
self.resolver.timeout = 5
self.resolver.lifetime = 10
def __getstate__(self):
"""Prepare the object for pickling by excluding resolver."""
state = super().__getstate__()
# Remove the unpickleable 'resolver' attribute
if 'resolver' in state:
del state['resolver']
return state
def __setstate__(self, state):
"""Restore the object after unpickling by reconstructing resolver."""
super().__setstate__(state)
# Re-initialize the 'resolver' attribute
self.resolver = resolver.Resolver()
self.resolver.timeout = 5 self.resolver.timeout = 5
self.resolver.lifetime = 10 self.resolver.lifetime = 10
@@ -32,147 +48,256 @@ class DNSProvider(BaseProvider):
"""Return the provider name.""" """Return the provider name."""
return "dns" return "dns"
def get_display_name(self) -> str:
"""Return the provider display name for the UI."""
return "DNS"
def requires_api_key(self) -> bool:
"""Return True if the provider requires an API key."""
return False
def get_eligibility(self) -> Dict[str, bool]:
"""Return a dictionary indicating if the provider can query domains and/or IPs."""
return {'domains': True, 'ips': True}
def is_available(self) -> bool: def is_available(self) -> bool:
"""DNS is always available - no API key required.""" """DNS is always available - no API key required."""
return True return True
def query_domain(self, domain: str) -> List[Tuple[str, str, RelationshipType, float, Dict[str, Any]]]: def query_domain(self, domain: str) -> ProviderResult:
""" """
Query DNS records for the domain to discover relationships. Query DNS records for the domain to discover relationships and attributes.
FIXED: Now creates separate attributes for each DNS record type.
Args: Args:
domain: Domain to investigate domain: Domain to investigate
Returns: Returns:
List of relationships discovered from DNS analysis ProviderResult containing discovered relationships and attributes
""" """
if not _is_valid_domain(domain): if not _is_valid_domain(domain):
return [] return ProviderResult()
relationships = [] result = ProviderResult()
# Query all record types # Query all record types - each gets its own attribute
for record_type in ['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SOA', 'TXT', 'SRV', 'CAA', 'DNSKEY', 'DS', 'RRSIG', 'SSHFP', 'TLSA', 'NAPTR', 'SPF']: for record_type in ['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SOA', 'TXT', 'SRV', 'CAA']:
relationships.extend(self._query_record(domain, record_type)) try:
self._query_record(domain, record_type, result)
#except resolver.NoAnswer:
# This is not an error, just a confirmation that the record doesn't exist.
#self.logger.logger.debug(f"No {record_type} record found for {domain}")
except Exception as e:
self.failed_requests += 1
self.logger.logger.debug(f"{record_type} record query failed for {domain}: {e}")
return relationships return result
def query_ip(self, ip: str) -> List[Tuple[str, str, RelationshipType, float, Dict[str, Any]]]: def query_ip(self, ip: str) -> ProviderResult:
""" """
Query reverse DNS for the IP address. Query reverse DNS for the IP address (supports both IPv4 and IPv6).
Args: Args:
ip: IP address to investigate ip: IP address to investigate (IPv4 or IPv6)
Returns: Returns:
List of relationships discovered from reverse DNS ProviderResult containing discovered relationships and attributes
""" """
if not _is_valid_ip(ip): if not _is_valid_ip(ip):
return [] return ProviderResult()
relationships = [] result = ProviderResult()
ip_version = get_ip_version(ip)
try: try:
# Perform reverse DNS lookup # Perform reverse DNS lookup (works for both IPv4 and IPv6)
self.total_requests += 1 self.total_requests += 1
reverse_name = dns.reversename.from_address(ip) reverse_name = reversename.from_address(ip)
response = self.resolver.resolve(reverse_name, 'PTR') response = self.resolver.resolve(reverse_name, 'PTR')
self.successful_requests += 1 self.successful_requests += 1
ptr_records = []
for ptr_record in response: for ptr_record in response:
hostname = str(ptr_record).rstrip('.') hostname = str(ptr_record).rstrip('.')
if _is_valid_domain(hostname): if _is_valid_domain(hostname):
raw_data = { # Determine appropriate forward relationship type based on IP version
'query_type': 'PTR', if ip_version == 6:
'ip_address': ip, relationship_type = 'shodan_aaaa_record'
'hostname': hostname, record_prefix = 'AAAA'
'ttl': response.ttl else:
} relationship_type = 'shodan_a_record'
record_prefix = 'A'
# Add the relationship
result.add_relationship(
source_node=ip,
target_node=hostname,
relationship_type='dns_ptr_record',
provider=self.name,
confidence=0.8,
raw_data={
'query_type': 'PTR',
'ip_address': ip,
'ip_version': ip_version,
'hostname': hostname,
'ttl': response.ttl
}
)
relationships.append(( # Add to PTR records list
ip, ptr_records.append(f"PTR: {hostname}")
hostname,
RelationshipType.PTR_RECORD,
RelationshipType.PTR_RECORD.default_confidence,
raw_data
))
# Log the relationship discovery
self.log_relationship_discovery( self.log_relationship_discovery(
source_node=ip, source_node=ip,
target_node=hostname, target_node=hostname,
relationship_type=RelationshipType.PTR_RECORD, relationship_type='dns_ptr_record',
confidence_score=RelationshipType.PTR_RECORD.default_confidence, confidence_score=0.8,
raw_data=raw_data, raw_data={
discovery_method="reverse_dns_lookup" 'query_type': 'PTR',
'ip_address': ip,
'ip_version': ip_version,
'hostname': hostname,
'ttl': response.ttl
},
discovery_method=f"reverse_dns_lookup_ipv{ip_version}"
) )
# Add PTR records as separate attribute
if ptr_records:
result.add_attribute(
target_node=ip,
name='ptr_records', # Specific name for PTR records
value=ptr_records,
attr_type='dns_record',
provider=self.name,
confidence=0.8,
metadata={'ttl': response.ttl, 'ip_version': ip_version}
)
except resolver.NXDOMAIN:
self.failed_requests += 1
self.logger.logger.debug(f"Reverse DNS lookup failed for {ip}: NXDOMAIN")
except Exception as e: except Exception as e:
self.failed_requests += 1 self.failed_requests += 1
self.logger.logger.debug(f"Reverse DNS lookup failed for {ip}: {e}") self.logger.logger.debug(f"Reverse DNS lookup failed for {ip}: {e}")
# Re-raise the exception so the scanner can handle the failure
raise e
return relationships return result
def _query_record(self, domain: str, record_type: str) -> List[Tuple[str, str, RelationshipType, float, Dict[str, Any]]]: def _query_record(self, domain: str, record_type: str, result: ProviderResult) -> None:
""" """
Query a specific type of DNS record for the domain. FIXED: Query DNS records with unique attribute names for each record type.
Enhanced to better handle IPv6 AAAA records.
""" """
relationships = []
try: try:
self.total_requests += 1 self.total_requests += 1
response = self.resolver.resolve(domain, record_type) response = self.resolver.resolve(domain, record_type)
self.successful_requests += 1 self.successful_requests += 1
dns_records = []
for record in response: for record in response:
target = "" target = ""
if record_type in ['A', 'AAAA']: if record_type in ['A', 'AAAA']:
target = str(record) target = str(record)
# Validate that the IP address is properly formed
if not _is_valid_ip(target):
self.logger.logger.debug(f"Invalid IP address in {record_type} record: {target}")
continue
elif record_type in ['CNAME', 'NS', 'PTR']: elif record_type in ['CNAME', 'NS', 'PTR']:
target = str(record.target).rstrip('.') target = str(record.target).rstrip('.')
elif record_type == 'MX': elif record_type == 'MX':
target = str(record.exchange).rstrip('.') target = str(record.exchange).rstrip('.')
elif record_type == 'SOA': elif record_type == 'SOA':
target = str(record.mname).rstrip('.') target = str(record.mname).rstrip('.')
elif record_type in ['TXT', 'SPF']: elif record_type in ['TXT']:
target = b' '.join(record.strings).decode('utf-8', 'ignore') # Keep raw TXT record value
txt_value = str(record).strip('"')
dns_records.append(txt_value) # Just the value for TXT
continue
elif record_type == 'SRV': elif record_type == 'SRV':
target = str(record.target).rstrip('.') target = str(record.target).rstrip('.')
elif record_type == 'CAA': elif record_type == 'CAA':
target = f"{record.flags} {record.tag.decode('utf-8')} \"{record.value.decode('utf-8')}\"" # Keep raw CAA record format
caa_value = f"{record.flags} {record.tag.decode('utf-8')} \"{record.value.decode('utf-8')}\""
dns_records.append(caa_value) # Just the value for CAA
continue
else: else:
target = str(record) target = str(record)
if target: if target:
# Determine IP version for metadata if this is an IP record
ip_version = None
if record_type in ['A', 'AAAA'] and _is_valid_ip(target):
ip_version = get_ip_version(target)
raw_data = { raw_data = {
'query_type': record_type, 'query_type': record_type,
'domain': domain, 'domain': domain,
'value': target, 'value': target,
'ttl': response.ttl 'ttl': response.ttl
} }
try:
relationship_type_enum = getattr(RelationshipType, f"{record_type}_RECORD") if ip_version:
relationships.append(( raw_data['ip_version'] = ip_version
domain,
target, relationship_type = f"dns_{record_type.lower()}_record"
relationship_type_enum, confidence = 0.8
relationship_type_enum.default_confidence,
raw_data
))
self.log_relationship_discovery( # Add relationship
source_node=domain, result.add_relationship(
target_node=target, source_node=domain,
relationship_type=relationship_type_enum, target_node=target,
confidence_score=relationship_type_enum.default_confidence, relationship_type=relationship_type,
raw_data=raw_data, provider=self.name,
discovery_method=f"dns_{record_type.lower()}_record" confidence=confidence,
) raw_data=raw_data
except AttributeError: )
self.logger.logger.error(f"Unsupported record type '{record_type}' encountered for domain {domain}")
# Add target to records list
dns_records.append(target)
# Log relationship discovery with IP version info
discovery_method = f"dns_{record_type.lower()}_record"
if ip_version:
discovery_method += f"_ipv{ip_version}"
self.log_relationship_discovery(
source_node=domain,
target_node=target,
relationship_type=relationship_type,
confidence_score=confidence,
raw_data=raw_data,
discovery_method=discovery_method
)
# FIXED: Create attribute with specific name for each record type
if dns_records:
# Use record type specific attribute name (e.g., 'a_records', 'mx_records', etc.)
attribute_name = f"{record_type.lower()}_records"
metadata = {'record_type': record_type, 'ttl': response.ttl}
# Add IP version info for A/AAAA records
if record_type in ['A', 'AAAA'] and dns_records:
first_ip_version = get_ip_version(dns_records[0])
if first_ip_version:
metadata['ip_version'] = first_ip_version
result.add_attribute(
target_node=domain,
name=attribute_name, # UNIQUE name for each record type!
value=dns_records,
attr_type='dns_record_list',
provider=self.name,
confidence=0.8,
metadata=metadata
)
except Exception as e: except Exception as e:
self.failed_requests += 1 self.failed_requests += 1
self.logger.logger.debug(f"{record_type} record query failed for {domain}: {e}") self.logger.logger.debug(f"{record_type} record query failed for {domain}: {e}")
raise e
return relationships

View File

@@ -1,22 +1,23 @@
""" # dnsrecon/providers/shodan_provider.py
Shodan provider for DNSRecon.
Discovers IP relationships and infrastructure context through Shodan API.
"""
import json import json
from typing import List, Dict, Any, Tuple from pathlib import Path
from typing import Dict, Any
from datetime import datetime, timezone
import requests
from .base_provider import BaseProvider from .base_provider import BaseProvider
from utils.helpers import _is_valid_ip, _is_valid_domain from core.provider_result import ProviderResult
from core.graph_manager import RelationshipType from utils.helpers import _is_valid_ip, _is_valid_domain, get_ip_version, normalize_ip
class ShodanProvider(BaseProvider): class ShodanProvider(BaseProvider):
""" """
Provider for querying Shodan API for IP address and hostname information. Provider for querying Shodan API for IP address information.
Now uses session-specific API keys. Now returns standardized ProviderResult objects with caching support for IPv4 and IPv6.
""" """
def __init__(self, session_config=None): def __init__(self, name=None, session_config=None):
"""Initialize Shodan provider with session-specific configuration.""" """Initialize Shodan provider with session-specific configuration."""
super().__init__( super().__init__(
name="shodan", name="shodan",
@@ -26,273 +27,442 @@ class ShodanProvider(BaseProvider):
) )
self.base_url = "https://api.shodan.io" self.base_url = "https://api.shodan.io"
self.api_key = self.config.get_api_key('shodan') self.api_key = self.config.get_api_key('shodan')
# FIXED: Don't fail initialization on connection issues - defer to actual usage
self._connection_tested = False
self._connection_works = False
# Initialize cache directory
self.cache_dir = Path('cache') / 'shodan'
self.cache_dir.mkdir(parents=True, exist_ok=True)
def __getstate__(self):
"""Prepare the object for pickling."""
state = super().__getstate__()
return state
def __setstate__(self, state):
"""Restore the object after unpickling."""
super().__setstate__(state)
def _check_api_connection(self) -> bool:
"""
FIXED: Lazy connection checking - only test when actually needed.
Don't block provider initialization on network issues.
"""
if self._connection_tested:
return self._connection_works
if not self.api_key:
self._connection_tested = True
self._connection_works = False
return False
try:
print(f"Testing Shodan API connection with key: {self.api_key[:8]}...")
response = self.session.get(f"{self.base_url}/api-info?key={self.api_key}", timeout=5)
self._connection_works = response.status_code == 200
print(f"Shodan API test result: {response.status_code} - {'Success' if self._connection_works else 'Failed'}")
except requests.exceptions.RequestException as e:
print(f"Shodan API connection test failed: {e}")
self._connection_works = False
finally:
self._connection_tested = True
return self._connection_works
def is_available(self) -> bool: def is_available(self) -> bool:
"""Check if Shodan provider is available (has valid API key in this session).""" """
return self.api_key is not None and len(self.api_key.strip()) > 0 FIXED: Check if Shodan provider is available based on API key presence.
Don't require successful connection test during initialization.
"""
has_api_key = self.api_key is not None and len(self.api_key.strip()) > 0
if not has_api_key:
return False
# FIXED: Only test connection on first actual usage, not during initialization
return True
def get_name(self) -> str: def get_name(self) -> str:
"""Return the provider name.""" """Return the provider name."""
return "shodan" return "shodan"
def get_display_name(self) -> str:
def query_domain(self, domain: str) -> List[Tuple[str, str, RelationshipType, float, Dict[str, Any]]]: """Return the provider display name for the UI."""
return "Shodan"
def requires_api_key(self) -> bool:
"""Return True if the provider requires an API key."""
return True
def get_eligibility(self) -> Dict[str, bool]:
"""Return a dictionary indicating if the provider can query domains and/or IPs."""
return {'domains': False, 'ips': True}
def _get_cache_file_path(self, ip: str) -> Path:
""" """
Query Shodan for information about a domain. Generate cache file path for an IP address (IPv4 or IPv6).
Uses Shodan's hostname search to find associated IPs. IPv6 addresses contain colons which are replaced with underscores for filesystem safety.
Args:
domain: Domain to investigate
Returns:
List of relationships discovered from Shodan data
""" """
if not _is_valid_domain(domain) or not self.is_available(): # Normalize the IP address first to ensure consistent caching
return [] normalized_ip = normalize_ip(ip)
if not normalized_ip:
# Fallback for invalid IPs
safe_ip = ip.replace('.', '_').replace(':', '_')
else:
# Replace problematic characters for both IPv4 and IPv6
safe_ip = normalized_ip.replace('.', '_').replace(':', '_')
relationships = [] return self.cache_dir / f"{safe_ip}.json"
def _get_cache_status(self, cache_file_path: Path) -> str:
"""
Check cache status for an IP.
Returns: 'not_found', 'fresh', or 'stale'
"""
if not cache_file_path.exists():
return "not_found"
try: try:
# Search for hostname in Shodan with open(cache_file_path, 'r') as f:
search_query = f"hostname:{domain}" cache_data = json.load(f)
url = f"{self.base_url}/shodan/host/search"
params = {
'key': self.api_key,
'query': search_query,
'minify': True # Get minimal data to reduce bandwidth
}
response = self.make_request(url, method="GET", params=params, target_indicator=domain) last_query_str = cache_data.get("last_upstream_query")
if not last_query_str:
return "stale"
if not response or response.status_code != 200: last_query = datetime.fromisoformat(last_query_str.replace('Z', '+00:00'))
return [] hours_since_query = (datetime.now(timezone.utc) - last_query).total_seconds() / 3600
data = response.json() cache_timeout = self.config.cache_timeout_hours
if hours_since_query < cache_timeout:
if 'matches' not in data: return "fresh"
return [] else:
return "stale"
# Process search results
for match in data['matches']:
ip_address = match.get('ip_str')
hostnames = match.get('hostnames', [])
if ip_address and domain in hostnames: except (json.JSONDecodeError, ValueError, KeyError):
raw_data = { return "stale"
'ip_address': ip_address,
'hostnames': hostnames, def query_domain(self, domain: str) -> ProviderResult:
'country': match.get('location', {}).get('country_name', ''), """
'city': match.get('location', {}).get('city', ''), Shodan does not support domain queries. This method returns an empty result.
'isp': match.get('isp', ''), """
'org': match.get('org', ''), return ProviderResult()
'ports': match.get('ports', []),
'last_update': match.get('last_update', '') def query_ip(self, ip: str) -> ProviderResult:
} """
Query Shodan for information about an IP address (IPv4 or IPv6), with caching of processed data.
relationships.append(( FIXED: Proper 404 handling to prevent unnecessary retries.
domain,
ip_address, Args:
RelationshipType.A_RECORD, # Domain resolves to IP ip: IP address to investigate (IPv4 or IPv6)
RelationshipType.A_RECORD.default_confidence,
raw_data Returns:
)) ProviderResult containing discovered relationships and attributes
self.log_relationship_discovery( Raises:
source_node=domain, Exception: For temporary failures that should be retried (timeouts, 502/503 errors, connection issues)
target_node=ip_address, """
relationship_type=RelationshipType.A_RECORD, if not _is_valid_ip(ip):
confidence_score=RelationshipType.A_RECORD.default_confidence, return ProviderResult()
raw_data=raw_data,
discovery_method="shodan_hostname_search" # Test connection only when actually making requests
) if not self._check_api_connection():
print(f"Shodan API not available for {ip} - API key: {'present' if self.api_key else 'missing'}")
# Also create relationships to other hostnames on the same IP return ProviderResult()
for hostname in hostnames:
if hostname != domain and _is_valid_domain(hostname): # Normalize IP address for consistent processing
hostname_raw_data = { normalized_ip = normalize_ip(ip)
'shared_ip': ip_address, if not normalized_ip:
'all_hostnames': hostnames, return ProviderResult()
'discovery_context': 'shared_hosting'
} cache_file = self._get_cache_file_path(normalized_ip)
cache_status = self._get_cache_status(cache_file)
relationships.append((
domain, if cache_status == "fresh":
hostname, self.logger.logger.debug(f"Using fresh cache for Shodan query: {normalized_ip}")
RelationshipType.PASSIVE_DNS, # Shared hosting relationship return self._load_from_cache(cache_file)
0.6, # Lower confidence for shared hosting
hostname_raw_data # Need to query API
)) self.logger.logger.debug(f"Querying Shodan API for: {normalized_ip}")
url = f"{self.base_url}/shodan/host/{normalized_ip}"
self.log_relationship_discovery( params = {'key': self.api_key}
source_node=domain,
target_node=hostname, try:
relationship_type=RelationshipType.PASSIVE_DNS, response = self.make_request(url, method="GET", params=params, target_indicator=normalized_ip)
confidence_score=0.6,
raw_data=hostname_raw_data, if not response:
discovery_method="shodan_shared_hosting" self.logger.logger.warning(f"Shodan API unreachable for {normalized_ip} - network failure")
) if cache_status == "stale":
self.logger.logger.info(f"Using stale cache for {normalized_ip} due to network failure")
return self._load_from_cache(cache_file)
else:
# FIXED: Treat network failures as "no information" rather than retryable errors
self.logger.logger.info(f"No Shodan data available for {normalized_ip} due to network failure")
result = ProviderResult() # Empty result
network_failure_data = {'shodan_status': 'network_unreachable', 'error': 'API unreachable'}
self._save_to_cache(cache_file, result, network_failure_data)
return result
# FIXED: Handle different status codes more precisely
if response.status_code == 200:
self.logger.logger.debug(f"Shodan returned data for {normalized_ip}")
try:
data = response.json()
result = self._process_shodan_data(normalized_ip, data)
self._save_to_cache(cache_file, result, data)
return result
except json.JSONDecodeError as e:
self.logger.logger.error(f"Invalid JSON response from Shodan for {normalized_ip}: {e}")
if cache_status == "stale":
return self._load_from_cache(cache_file)
else:
raise requests.exceptions.RequestException("Invalid JSON response from Shodan - should retry")
elif response.status_code == 404:
# FIXED: 404 = "no information available" - successful but empty result, don't retry
self.logger.logger.debug(f"Shodan has no information for {normalized_ip} (404)")
result = ProviderResult() # Empty but successful result
# Cache the empty result to avoid repeated queries
empty_data = {'shodan_status': 'no_information', 'status_code': 404}
self._save_to_cache(cache_file, result, empty_data)
return result
elif response.status_code in [401, 403]:
# Authentication/authorization errors - permanent failures, don't retry
self.logger.logger.error(f"Shodan API authentication failed for {normalized_ip} (HTTP {response.status_code})")
return ProviderResult() # Empty result, don't retry
elif response.status_code == 429:
# Rate limiting - should be handled by rate limiter, but if we get here, retry
self.logger.logger.warning(f"Shodan API rate limited for {normalized_ip} (HTTP {response.status_code})")
if cache_status == "stale":
self.logger.logger.info(f"Using stale cache for {normalized_ip} due to rate limiting")
return self._load_from_cache(cache_file)
else:
raise requests.exceptions.RequestException(f"Shodan API rate limited (HTTP {response.status_code}) - should retry")
elif response.status_code in [500, 502, 503, 504]:
# Server errors - temporary failures that should be retried
self.logger.logger.warning(f"Shodan API server error for {normalized_ip} (HTTP {response.status_code})")
if cache_status == "stale":
self.logger.logger.info(f"Using stale cache for {normalized_ip} due to server error")
return self._load_from_cache(cache_file)
else:
raise requests.exceptions.RequestException(f"Shodan API server error (HTTP {response.status_code}) - should retry")
else:
# FIXED: Other HTTP status codes - treat as no information available, don't retry
self.logger.logger.info(f"Shodan returned status {response.status_code} for {normalized_ip} - treating as no information")
result = ProviderResult() # Empty result
no_info_data = {'shodan_status': 'no_information', 'status_code': response.status_code}
self._save_to_cache(cache_file, result, no_info_data)
return result
except requests.exceptions.Timeout:
# Timeout errors - should be retried
self.logger.logger.warning(f"Shodan API timeout for {normalized_ip}")
if cache_status == "stale":
self.logger.logger.info(f"Using stale cache for {normalized_ip} due to timeout")
return self._load_from_cache(cache_file)
else:
raise # Re-raise timeout for retry
except requests.exceptions.ConnectionError:
# Connection errors - should be retried
self.logger.logger.warning(f"Shodan API connection error for {normalized_ip}")
if cache_status == "stale":
self.logger.logger.info(f"Using stale cache for {normalized_ip} due to connection error")
return self._load_from_cache(cache_file)
else:
raise # Re-raise connection error for retry
except json.JSONDecodeError:
# JSON parsing error - treat as temporary failure
self.logger.logger.error(f"Invalid JSON response from Shodan for {normalized_ip}")
if cache_status == "stale":
self.logger.logger.info(f"Using stale cache for {normalized_ip} due to JSON parsing error")
return self._load_from_cache(cache_file)
else:
raise requests.exceptions.RequestException("Invalid JSON response from Shodan - should retry")
# FIXED: Remove the generic RequestException handler that was causing 404s to retry
# Now only specific exceptions that should be retried are re-raised
except json.JSONDecodeError as e:
self.logger.logger.error(f"Failed to parse JSON response from Shodan: {e}")
except Exception as e: except Exception as e:
self.logger.logger.error(f"Error querying Shodan for domain {domain}: {e}") # FIXED: Unexpected exceptions - log but treat as no information available, don't retry
self.logger.logger.warning(f"Unexpected exception in Shodan query for {normalized_ip}: {e}")
return relationships result = ProviderResult() # Empty result
error_data = {'shodan_status': 'error', 'error': str(e)}
def query_ip(self, ip: str) -> List[Tuple[str, str, RelationshipType, float, Dict[str, Any]]]: self._save_to_cache(cache_file, result, error_data)
""" return result
Query Shodan for information about an IP address.
def _load_from_cache(self, cache_file_path: Path) -> ProviderResult:
Args: """Load processed Shodan data from a cache file."""
ip: IP address to investigate
Returns:
List of relationships discovered from Shodan IP data
"""
if not _is_valid_ip(ip) or not self.is_available():
return []
relationships = []
try: try:
# Query Shodan host information with open(cache_file_path, 'r') as f:
url = f"{self.base_url}/shodan/host/{ip}" cache_content = json.load(f)
params = {'key': self.api_key}
response = self.make_request(url, method="GET", params=params, target_indicator=ip) result = ProviderResult()
if not response or response.status_code != 200: # Reconstruct relationships
return [] for rel_data in cache_content.get("relationships", []):
result.add_relationship(
data = response.json() source_node=rel_data["source_node"],
target_node=rel_data["target_node"],
# Extract hostname relationships relationship_type=rel_data["relationship_type"],
hostnames = data.get('hostnames', []) provider=rel_data["provider"],
for hostname in hostnames: confidence=rel_data["confidence"],
if _is_valid_domain(hostname): raw_data=rel_data.get("raw_data", {})
raw_data = {
'ip_address': ip,
'hostname': hostname,
'country': data.get('country_name', ''),
'city': data.get('city', ''),
'isp': data.get('isp', ''),
'org': data.get('org', ''),
'asn': data.get('asn', ''),
'ports': data.get('ports', []),
'last_update': data.get('last_update', ''),
'os': data.get('os', '')
}
relationships.append((
ip,
hostname,
RelationshipType.A_RECORD, # IP resolves to hostname
RelationshipType.A_RECORD.default_confidence,
raw_data
))
self.log_relationship_discovery(
source_node=ip,
target_node=hostname,
relationship_type=RelationshipType.A_RECORD,
confidence_score=RelationshipType.A_RECORD.default_confidence,
raw_data=raw_data,
discovery_method="shodan_host_lookup"
)
# Extract ASN relationship if available
asn = data.get('asn')
if asn:
asn_name = f"AS{asn}"
asn_raw_data = {
'ip_address': ip,
'asn': asn,
'isp': data.get('isp', ''),
'org': data.get('org', '')
}
relationships.append((
ip,
asn_name,
RelationshipType.ASN_MEMBERSHIP,
RelationshipType.ASN_MEMBERSHIP.default_confidence,
asn_raw_data
))
self.log_relationship_discovery(
source_node=ip,
target_node=asn_name,
relationship_type=RelationshipType.ASN_MEMBERSHIP,
confidence_score=RelationshipType.ASN_MEMBERSHIP.default_confidence,
raw_data=asn_raw_data,
discovery_method="shodan_asn_lookup"
) )
except json.JSONDecodeError as e:
self.logger.logger.error(f"Failed to parse JSON response from Shodan: {e}")
except Exception as e:
self.logger.logger.error(f"Error querying Shodan for IP {ip}: {e}")
return relationships
def search_by_organization(self, org_name: str) -> List[Dict[str, Any]]:
"""
Search Shodan for hosts belonging to a specific organization.
Args:
org_name: Organization name to search for
Returns: # Reconstruct attributes
List of host information dictionaries for attr_data in cache_content.get("attributes", []):
""" result.add_attribute(
if not self.is_available(): target_node=attr_data["target_node"],
return [] name=attr_data["name"],
value=attr_data["value"],
attr_type=attr_data["type"],
provider=attr_data["provider"],
confidence=attr_data["confidence"],
metadata=attr_data.get("metadata", {})
)
return result
except (json.JSONDecodeError, FileNotFoundError, KeyError):
return ProviderResult()
def _save_to_cache(self, cache_file_path: Path, result: ProviderResult, raw_data: Dict[str, Any]) -> None:
"""Save processed Shodan data to a cache file."""
try: try:
search_query = f"org:\"{org_name}\"" cache_data = {
url = f"{self.base_url}/shodan/host/search" "last_upstream_query": datetime.now(timezone.utc).isoformat(),
params = { "raw_data": raw_data, # Preserve original for forensic purposes
'key': self.api_key, "relationships": [
'query': search_query, {
'minify': True "source_node": rel.source_node,
"target_node": rel.target_node,
"relationship_type": rel.relationship_type,
"confidence": rel.confidence,
"provider": rel.provider,
"raw_data": rel.raw_data
} for rel in result.relationships
],
"attributes": [
{
"target_node": attr.target_node,
"name": attr.name,
"value": attr.value,
"type": attr.type,
"provider": attr.provider,
"confidence": attr.confidence,
"metadata": attr.metadata
} for attr in result.attributes
]
} }
with open(cache_file_path, 'w') as f:
response = self.make_request(url, method="GET", params=params, target_indicator=org_name) json.dump(cache_data, f, separators=(',', ':'), default=str)
if response and response.status_code == 200:
data = response.json()
return data.get('matches', [])
except Exception as e: except Exception as e:
self.logger.logger.error(f"Error searching Shodan by organization {org_name}: {e}") self.logger.logger.warning(f"Failed to save Shodan cache for {cache_file_path.name}: {e}")
return [] def _process_shodan_data(self, ip: str, data: Dict[str, Any]) -> ProviderResult:
def get_host_services(self, ip: str) -> List[Dict[str, Any]]:
""" """
Get service information for a specific IP address. VERIFIED: Process Shodan data creating ISP nodes with ASN attributes and proper relationships.
Enhanced to include IP version information for IPv6 addresses.
Args:
ip: IP address to query
Returns:
List of service information dictionaries
""" """
if not _is_valid_ip(ip) or not self.is_available(): result = ProviderResult()
return []
try: # Determine IP version for metadata
url = f"{self.base_url}/shodan/host/{ip}" ip_version = get_ip_version(ip)
params = {'key': self.api_key}
# VERIFIED: Extract ISP information and create proper ISP node with ASN
isp_name = data.get('org')
asn_value = data.get('asn')
if isp_name and asn_value:
# Create relationship from IP to ISP
result.add_relationship(
source_node=ip,
target_node=isp_name,
relationship_type='shodan_isp',
provider=self.name,
confidence=0.9,
raw_data={'asn': asn_value, 'shodan_org': isp_name, 'ip_version': ip_version}
)
response = self.make_request(url, method="GET", params=params, target_indicator=ip) # Add ASN as attribute to the ISP node
result.add_attribute(
target_node=isp_name,
name='asn',
value=asn_value,
attr_type='isp_info',
provider=self.name,
confidence=0.9,
metadata={'description': 'Autonomous System Number from Shodan', 'ip_version': ip_version}
)
if response and response.status_code == 200: # Also add organization name as attribute to ISP node for completeness
data = response.json() result.add_attribute(
return data.get('data', []) # Service banners target_node=isp_name,
name='organization_name',
except Exception as e: value=isp_name,
self.logger.logger.error(f"Error getting Shodan services for IP {ip}: {e}") attr_type='isp_info',
provider=self.name,
return [] confidence=0.9,
metadata={'description': 'Organization name from Shodan', 'ip_version': ip_version}
)
# Process hostnames (reverse DNS)
for key, value in data.items():
if key == 'hostnames':
for hostname in value:
if _is_valid_domain(hostname):
# Use appropriate relationship type based on IP version
if ip_version == 6:
relationship_type = 'shodan_aaaa_record'
else:
relationship_type = 'shodan_a_record'
result.add_relationship(
source_node=ip,
target_node=hostname,
relationship_type=relationship_type,
provider=self.name,
confidence=0.8,
raw_data={**data, 'ip_version': ip_version}
)
self.log_relationship_discovery(
source_node=ip,
target_node=hostname,
relationship_type=relationship_type,
confidence_score=0.8,
raw_data={**data, 'ip_version': ip_version},
discovery_method=f"shodan_host_lookup_ipv{ip_version}"
)
elif key == 'ports':
# Add open ports as attributes to the IP
for port in value:
result.add_attribute(
target_node=ip,
name='shodan_open_port',
value=port,
attr_type='shodan_network_info',
provider=self.name,
confidence=0.9,
metadata={'ip_version': ip_version}
)
elif isinstance(value, (str, int, float, bool)) and value is not None:
# Add other Shodan fields as IP attributes (keep raw field names)
result.add_attribute(
target_node=ip,
name=key, # Raw field name from Shodan API
value=value,
attr_type='shodan_info',
provider=self.name,
confidence=0.9,
metadata={'ip_version': ip_version}
)
return result

View File

@@ -1,333 +0,0 @@
"""
VirusTotal provider for DNSRecon.
Discovers domain relationships through passive DNS and URL analysis.
"""
import json
from typing import List, Dict, Any, Tuple
from .base_provider import BaseProvider
from utils.helpers import _is_valid_ip, _is_valid_domain
from core.graph_manager import RelationshipType
class VirusTotalProvider(BaseProvider):
"""
Provider for querying VirusTotal API for passive DNS and domain reputation data.
Now uses session-specific API keys and rate limits.
"""
def __init__(self, session_config=None):
"""Initialize VirusTotal provider with session-specific configuration."""
super().__init__(
name="virustotal",
rate_limit=4, # Free tier: 4 requests per minute
timeout=30,
session_config=session_config
)
self.base_url = "https://www.virustotal.com/vtapi/v2"
self.api_key = self.config.get_api_key('virustotal')
def is_available(self) -> bool:
"""Check if VirusTotal provider is available (has valid API key in this session)."""
return self.api_key is not None and len(self.api_key.strip()) > 0
def get_name(self) -> str:
"""Return the provider name."""
return "virustotal"
def query_domain(self, domain: str) -> List[Tuple[str, str, RelationshipType, float, Dict[str, Any]]]:
"""
Query VirusTotal for domain information including passive DNS.
Args:
domain: Domain to investigate
Returns:
List of relationships discovered from VirusTotal data
"""
if not _is_valid_domain(domain) or not self.is_available():
return []
relationships = []
# Query domain report
domain_relationships = self._query_domain_report(domain)
relationships.extend(domain_relationships)
# Query passive DNS for the domain
passive_dns_relationships = self._query_passive_dns_domain(domain)
relationships.extend(passive_dns_relationships)
return relationships
def query_ip(self, ip: str) -> List[Tuple[str, str, RelationshipType, float, Dict[str, Any]]]:
"""
Query VirusTotal for IP address information including passive DNS.
Args:
ip: IP address to investigate
Returns:
List of relationships discovered from VirusTotal IP data
"""
if not _is_valid_ip(ip) or not self.is_available():
return []
relationships = []
# Query IP report
ip_relationships = self._query_ip_report(ip)
relationships.extend(ip_relationships)
# Query passive DNS for the IP
passive_dns_relationships = self._query_passive_dns_ip(ip)
relationships.extend(passive_dns_relationships)
return relationships
def _query_domain_report(self, domain: str) -> List[Tuple[str, str, RelationshipType, float, Dict[str, Any]]]:
"""Query VirusTotal domain report."""
relationships = []
try:
url = f"{self.base_url}/domain/report"
params = {
'apikey': self.api_key,
'domain': domain,
'allinfo': 1 # Get comprehensive information
}
response = self.make_request(url, method="GET", params=params, target_indicator=domain)
if not response or response.status_code != 200:
return []
data = response.json()
if data.get('response_code') != 1:
return []
# Extract resolved IPs
resolutions = data.get('resolutions', [])
for resolution in resolutions:
ip_address = resolution.get('ip_address')
last_resolved = resolution.get('last_resolved')
if ip_address and _is_valid_ip(ip_address):
raw_data = {
'domain': domain,
'ip_address': ip_address,
'last_resolved': last_resolved,
'source': 'virustotal_domain_report'
}
relationships.append((
domain,
ip_address,
RelationshipType.PASSIVE_DNS,
RelationshipType.PASSIVE_DNS.default_confidence,
raw_data
))
self.log_relationship_discovery(
source_node=domain,
target_node=ip_address,
relationship_type=RelationshipType.PASSIVE_DNS,
confidence_score=RelationshipType.PASSIVE_DNS.default_confidence,
raw_data=raw_data,
discovery_method="virustotal_domain_resolution"
)
# Extract subdomains
subdomains = data.get('subdomains', [])
for subdomain in subdomains:
if subdomain != domain and _is_valid_domain(subdomain):
raw_data = {
'parent_domain': domain,
'subdomain': subdomain,
'source': 'virustotal_subdomain_discovery'
}
relationships.append((
domain,
subdomain,
RelationshipType.PASSIVE_DNS,
0.7, # Medium-high confidence for subdomains
raw_data
))
self.log_relationship_discovery(
source_node=domain,
target_node=subdomain,
relationship_type=RelationshipType.PASSIVE_DNS,
confidence_score=0.7,
raw_data=raw_data,
discovery_method="virustotal_subdomain_discovery"
)
except json.JSONDecodeError as e:
self.logger.logger.error(f"Failed to parse JSON response from VirusTotal: {e}")
except Exception as e:
self.logger.logger.error(f"Error querying VirusTotal domain report for {domain}: {e}")
return relationships
def _query_ip_report(self, ip: str) -> List[Tuple[str, str, RelationshipType, float, Dict[str, Any]]]:
"""Query VirusTotal IP report."""
relationships = []
try:
url = f"{self.base_url}/ip-address/report"
params = {
'apikey': self.api_key,
'ip': ip
}
response = self.make_request(url, method="GET", params=params, target_indicator=ip)
if not response or response.status_code != 200:
return []
data = response.json()
if data.get('response_code') != 1:
return []
# Extract resolved domains
resolutions = data.get('resolutions', [])
for resolution in resolutions:
hostname = resolution.get('hostname')
last_resolved = resolution.get('last_resolved')
if hostname and _is_valid_domain(hostname):
raw_data = {
'ip_address': ip,
'hostname': hostname,
'last_resolved': last_resolved,
'source': 'virustotal_ip_report'
}
relationships.append((
ip,
hostname,
RelationshipType.PASSIVE_DNS,
RelationshipType.PASSIVE_DNS.default_confidence,
raw_data
))
self.log_relationship_discovery(
source_node=ip,
target_node=hostname,
relationship_type=RelationshipType.PASSIVE_DNS,
confidence_score=RelationshipType.PASSIVE_DNS.default_confidence,
raw_data=raw_data,
discovery_method="virustotal_ip_resolution"
)
except json.JSONDecodeError as e:
self.logger.logger.error(f"Failed to parse JSON response from VirusTotal: {e}")
except Exception as e:
self.logger.logger.error(f"Error querying VirusTotal IP report for {ip}: {e}")
return relationships
def _query_passive_dns_domain(self, domain: str) -> List[Tuple[str, str, RelationshipType, float, Dict[str, Any]]]:
"""Query VirusTotal passive DNS for domain."""
# Note: VirusTotal's passive DNS API might require a premium subscription
# This is a placeholder for the endpoint structure
return []
def _query_passive_dns_ip(self, ip: str) -> List[Tuple[str, str, RelationshipType, float, Dict[str, Any]]]:
"""Query VirusTotal passive DNS for IP."""
# Note: VirusTotal's passive DNS API might require a premium subscription
# This is a placeholder for the endpoint structure
return []
def get_domain_reputation(self, domain: str) -> Dict[str, Any]:
"""
Get domain reputation information from VirusTotal.
Args:
domain: Domain to check reputation for
Returns:
Dictionary containing reputation data
"""
if not _is_valid_domain(domain) or not self.is_available():
return {}
try:
url = f"{self.base_url}/domain/report"
params = {
'apikey': self.api_key,
'domain': domain
}
response = self.make_request(url, method="GET", params=params, target_indicator=domain)
if response and response.status_code == 200:
data = response.json()
if data.get('response_code') == 1:
return {
'positives': data.get('positives', 0),
'total': data.get('total', 0),
'scan_date': data.get('scan_date', ''),
'permalink': data.get('permalink', ''),
'reputation_score': self._calculate_reputation_score(data)
}
except Exception as e:
self.logger.logger.error(f"Error getting VirusTotal reputation for domain {domain}: {e}")
return {}
def get_ip_reputation(self, ip: str) -> Dict[str, Any]:
"""
Get IP reputation information from VirusTotal.
Args:
ip: IP address to check reputation for
Returns:
Dictionary containing reputation data
"""
if not _is_valid_ip(ip) or not self.is_available():
return {}
try:
url = f"{self.base_url}/ip-address/report"
params = {
'apikey': self.api_key,
'ip': ip
}
response = self.make_request(url, method="GET", params=params, target_indicator=ip)
if response and response.status_code == 200:
data = response.json()
if data.get('response_code') == 1:
return {
'positives': data.get('positives', 0),
'total': data.get('total', 0),
'scan_date': data.get('scan_date', ''),
'permalink': data.get('permalink', ''),
'reputation_score': self._calculate_reputation_score(data)
}
except Exception as e:
self.logger.logger.error(f"Error getting VirusTotal reputation for IP {ip}: {e}")
return {}
def _calculate_reputation_score(self, data: Dict[str, Any]) -> float:
"""Calculate a normalized reputation score (0.0 to 1.0)."""
positives = data.get('positives', 0)
total = data.get('total', 1) # Avoid division by zero
if total == 0:
return 1.0 # No data means neutral
# Score is inverse of detection ratio (lower detection = higher reputation)
return max(0.0, 1.0 - (positives / total))

View File

@@ -1,7 +1,13 @@
Flask>=2.3.3 Flask
networkx>=3.1 networkx
requests>=2.31.0 requests
python-dateutil>=2.8.2 python-dateutil
Werkzeug>=2.3.7 Werkzeug
urllib3>=2.0.0 urllib3
dnspython>=2.4.2 dnspython
gunicorn
redis
python-dotenv
psycopg2-binary
Flask-SocketIO
eventlet

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,14 +1,19 @@
<!DOCTYPE html> <!DOCTYPE html>
<html lang="en"> <html lang="en">
<head> <head>
<meta charset="UTF-8"> <meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0"> <meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>DNSRecon - Infrastructure Reconnaissance</title> <title>DNSRecon - Infrastructure Reconnaissance</title>
<link rel="stylesheet" href="{{ url_for('static', filename='css/main.css') }}"> <link rel="stylesheet" href="{{ url_for('static', filename='css/main.css') }}">
<script src="https://cdnjs.cloudflare.com/ajax/libs/vis/4.21.0/vis.min.js"></script> <script src="https://cdnjs.cloudflare.com/ajax/libs/vis/4.21.0/vis.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/socket.io/4.7.2/socket.io.js"></script>
<link href="https://cdnjs.cloudflare.com/ajax/libs/vis/4.21.0/vis.min.css" rel="stylesheet" type="text/css"> <link href="https://cdnjs.cloudflare.com/ajax/libs/vis/4.21.0/vis.min.css" rel="stylesheet" type="text/css">
<link href="https://fonts.googleapis.com/css2?family=Roboto+Mono:wght@300;400;500;700&family=Special+Elite&display=swap" rel="stylesheet"> <link
href="https://fonts.googleapis.com/css2?family=Roboto+Mono:wght@300;400;500;700&family=Special+Elite&display=swap"
rel="stylesheet">
</head> </head>
<body> <body>
<div class="container"> <div class="container">
<header class="header"> <header class="header">
@@ -29,40 +34,33 @@
<div class="panel-header"> <div class="panel-header">
<h2>Target Configuration</h2> <h2>Target Configuration</h2>
</div> </div>
<div class="form-container"> <div class="form-container">
<div class="input-group"> <div class="input-group">
<label for="target-domain">Target Domain</label> <label for="target-input">Target Domain or IP</label>
<input type="text" id="target-domain" placeholder="example.com" autocomplete="off"> <input type="text" id="target-input" placeholder="example.com or 8.8.8.8" autocomplete="off">
</div> </div>
<div class="input-group">
<label for="max-depth">Recursion Depth</label>
<select id="max-depth">
<option value="1">Depth 1 - Direct relationships</option>
<option value="2" selected>Depth 2 - Recommended</option>
<option value="3">Depth 3 - Extended analysis</option>
<option value="4">Depth 4 - Deep reconnaissance</option>
<option value="5">Depth 5 - Maximum depth</option>
</select>
</div>
<div class="button-group"> <div class="button-group">
<button id="start-scan" class="btn btn-primary"> <button id="start-scan" class="btn btn-primary">
<span class="btn-icon">[RUN]</span> <span class="btn-icon">[RUN]</span>
<span>Start Reconnaissance</span> <span>Start Reconnaissance</span>
</button> </button>
<button id="add-to-graph" class="btn btn-primary">
<span class="btn-icon">[ADD]</span>
<span>Add to Graph</span>
</button>
<button id="stop-scan" class="btn btn-secondary" disabled> <button id="stop-scan" class="btn btn-secondary" disabled>
<span class="btn-icon">[STOP]</span> <span class="btn-icon">[STOP]</span>
<span>Terminate Scan</span> <span>Terminate Scan</span>
</button> </button>
<button id="export-results" class="btn btn-secondary"> <button id="export-options" class="btn btn-secondary">
<span class="btn-icon">[EXPORT]</span> <span class="btn-icon">[EXPORT]</span>
<span>Download Results</span> <span>Export Options</span>
</button> </button>
<button id="configure-api-keys" class="btn btn-secondary"> <button id="configure-settings" class="btn btn-secondary">
<span class="btn-icon">[API]</span> <span class="btn-icon">[API]</span>
<span>Configure API Keys</span> <span>Settings</span>
</button> </button>
</div> </div>
</div> </div>
@@ -72,7 +70,7 @@
<div class="panel-header"> <div class="panel-header">
<h2>Reconnaissance Status</h2> <h2>Reconnaissance Status</h2>
</div> </div>
<div class="status-content"> <div class="status-content">
<div class="status-row"> <div class="status-row">
<span class="status-label">Current Status:</span> <span class="status-label">Current Status:</span>
@@ -86,72 +84,80 @@
<span class="status-label">Depth:</span> <span class="status-label">Depth:</span>
<span id="depth-display" class="status-value">0/0</span> <span id="depth-display" class="status-value">0/0</span>
</div> </div>
<div class="status-row">
<span class="status-label">Progress:</span>
<span id="progress-display" class="status-value">0%</span>
</div>
<div class="status-row">
<span class="status-label">Indicators:</span>
<span id="indicators-display" class="status-value">0</span>
</div>
<div class="status-row"> <div class="status-row">
<span class="status-label">Relationships:</span> <span class="status-label">Relationships:</span>
<span id="relationships-display" class="status-value">0</span> <span id="relationships-display" class="status-value">0</span>
</div> </div>
</div> </div>
<div class="progress-bar"> <div class="progress-container">
<div id="progress-fill" class="progress-fill"></div> <div class="progress-info">
<span id="progress-label">Progress:</span>
<span id="progress-compact">0/0</span>
</div>
<div class="progress-bar">
<div id="progress-fill" class="progress-fill"></div>
</div>
<div class="progress-placeholder">
<span class="status-label">
⚠️ <strong>Important:</strong> Scanning large public services (e.g., Google, Cloudflare,
AWS) is
<strong>discouraged</strong> due to rate limits (e.g., crt.sh).
<br><br>
Our task scheduler operates on a <strong>priority-based queue</strong>:
Short, targeted tasks like DNS are processed first, while resource-intensive requests (e.g.,
crt.sh)
are <strong>automatically deprioritized</strong> and may be processed later.
</span>
</div>
</div> </div>
</section> </section>
<section class="visualization-panel"> <section class="visualization-panel">
<div class="panel-header"> <div class="panel-header">
<h2>Infrastructure Map</h2> <h2>Infrastructure Map</h2>
<div class="view-controls">
<button id="reset-view" class="btn-icon-small" title="Reset View">[↻]</button>
<button id="fit-view" class="btn-icon-small" title="Fit to Screen">[□]</button>
</div>
</div> </div>
<div id="network-graph" class="graph-container"> <div id="network-graph" class="graph-container">
<div class="graph-placeholder"> <div class="graph-placeholder">
<div class="placeholder-content"> <div class="placeholder-content">
<div class="placeholder-icon">[]</div> <div class="placeholder-icon">[]</div>
<div class="placeholder-text">Infrastructure map will appear here</div> <div class="placeholder-text">Infrastructure map will appear here</div>
<div class="placeholder-subtext">Start a reconnaissance scan to visualize relationships</div> <div class="placeholder-subtext">Start a reconnaissance scan to visualize relationships
</div>
</div> </div>
</div> </div>
</div> </div>
<div class="legend"> <div class="legend">
<div class="legend-item"> <div class="legend-item">
<div class="legend-color" style="background-color: #00ff41;"></div> <div class="legend-color" style="background-color: #00ff41;"></div>
<span>Domains</span> <span>Domains</span>
</div> </div>
<div class="legend-item">
<div class="legend-color" style="background-color: #c92f2f;"></div>
<span>Domain (no valid cert)</span>
</div>
<div class="legend-item">
<div class="legend-color" style="background-color: #c7c7c7;"></div>
<span>Domain (never had cert)</span>
</div>
<div class="legend-item"> <div class="legend-item">
<div class="legend-color" style="background-color: #ff9900;"></div> <div class="legend-color" style="background-color: #ff9900;"></div>
<span>IP Addresses</span> <span>IP Addresses</span>
</div> </div>
<div class="legend-item"> <div class="legend-item">
<div class="legend-color" style="background-color: #c7c7c7;"></div> <div class="legend-color" style="background-color: #00aaff;"></div>
<span>Certificates</span> <span>ISPs</span>
</div>
<div class="legend-item">
<div class="legend-color" style="background-color: #9d4edd;"></div>
<span>DNS Records</span>
</div>
<div class="legend-item">
<div class="legend-edge high-confidence"></div>
<span>High Confidence</span>
</div>
<div class="legend-item">
<div class="legend-edge medium-confidence"></div>
<span>Medium Confidence</span>
</div> </div>
<div class="legend-item"> <div class="legend-item">
<div class="legend-color" style="background-color: #ff6b6b;"></div> <div class="legend-color" style="background-color: #ff6b6b;"></div>
<span>Large Entity</span> <span>Certificate Authorities</span>
</div>
<div class="legend-item">
<div class="legend-color" style="background-color: #9d4edd;"></div>
<span>Correlation Objects</span>
</div> </div>
</div> </div>
</section> </section>
@@ -160,15 +166,15 @@
<div class="panel-header"> <div class="panel-header">
<h2>Data Providers</h2> <h2>Data Providers</h2>
</div> </div>
<div id="provider-list" class="provider-list"> <div id="provider-list" class="provider-list">
</div> </div>
</section> </section>
</main> </main>
<footer class="footer"> <footer class="footer">
<div class="footer-content"> <div class="footer-content">
<span>DNSRecon v1.0 - Phase 1 Implementation</span> <span>v0.0.0rc</span>
<span class="footer-separator">|</span> <span class="footer-separator">|</span>
<span>Passive Infrastructure Reconnaissance</span> <span>Passive Infrastructure Reconnaissance</span>
<span class="footer-separator">|</span> <span class="footer-separator">|</span>
@@ -184,57 +190,133 @@
</div> </div>
<div class="modal-body"> <div class="modal-body">
<div id="modal-details"> <div id="modal-details">
</div> </div>
</div> </div>
</div> </div>
</div> </div>
<div id="api-key-modal" class="modal"> <div id="settings-modal" class="modal">
<div class="modal-content"> <div class="modal-content">
<div class="modal-header"> <div class="modal-header">
<h3>Configure API Keys</h3> <h3>Scanner Configuration</h3>
<button id="api-key-modal-close" class="modal-close">[×]</button> <button id="settings-modal-close" class="modal-close">[×]</button>
</div> </div>
<div class="modal-body"> <div class="modal-body">
<p class="modal-description"> <div class="modal-details">
Enter your API keys for enhanced data providers. Keys are stored in memory for the current session only and are never saved to disk. <section class="modal-section">
</p> <details open>
<div class="apikey-section"> <summary>
<label for="virustotal-api-key">VirusTotal API Key</label> <span>⚙️ Scan Settings</span>
<input type="password" id="virustotal-api-key" placeholder="Enter VirusTotal API Key"> </summary>
<p class="apikey-help">Enables passive DNS and domain reputation lookups.</p> <div class="modal-section-content">
<div class="input-group">
<label for="max-depth">Recursion Depth</label>
<select id="max-depth">
<option value="1">Depth 1 - Direct relationships</option>
<option value="2" selected>Depth 2 - Recommended</option>
<option value="3">Depth 3 - Extended analysis</option>
<option value="4">Depth 4 - Deep reconnaissance</option>
<option value="5">Depth 5 - Maximum depth</option>
</select>
</div>
</div>
</details>
</section>
<section class="modal-section">
<details open>
<summary>
<span>🔧 Provider Configuration</span>
<span class="merge-badge" id="provider-count">0</span>
</summary>
<div class="modal-section-content">
<div id="provider-config-list">
</div>
</div>
</details>
</section>
<section class="modal-section">
<details>
<summary>
<span>🔑 API Keys</span>
<span class="merge-badge" id="api-key-count">0</span>
</summary>
<div class="modal-section-content">
<p class="placeholder-subtext" style="margin-bottom: 1rem;">
⚠️ API keys are stored in memory for the current session only.
Only provide API keys you don't use for anything else.
</p>
<div id="api-key-inputs">
</div>
</div>
</details>
</section>
<div class="button-group" style="margin-top: 1.5rem;">
<button id="save-settings" class="btn btn-primary">
<span class="btn-icon">[SAVE]</span>
<span>Save Configuration</span>
</button>
<button id="reset-settings" class="btn btn-secondary">
<span class="btn-icon">[RESET]</span>
<span>Reset to Defaults</span>
</button>
</div>
</div> </div>
<div class="apikey-section"> </div>
<label for="shodan-api-key">Shodan API Key</label> </div>
<input type="password" id="shodan-api-key" placeholder="Enter Shodan API Key"> </div>
<p class="apikey-help">Provides infrastructure context and service information.</p>
</div> <div id="export-modal" class="modal">
<div class="button-group" style="flex-direction: row; justify-content: flex-end;"> <div class="modal-content">
<button id="reset-api-keys" class="btn btn-secondary"> <div class="modal-header">
<span>Reset</span> <h3>Export Options</h3>
</button> <button id="export-modal-close" class="modal-close">[×]</button>
<button id="save-api-keys" class="btn btn-primary"> </div>
<span>Save Keys</span> <div class="modal-body">
</button> <div class="modal-details">
<section class="modal-section">
<details open>
<summary>
<span>📊 Available Exports</span>
</summary>
<div class="modal-section-content">
<div class="button-group" style="margin-top: 1rem;">
<button id="export-graph-json" class="btn btn-primary">
<span class="btn-icon">[JSON]</span>
<span>Export Graph Data</span>
</button>
<div class="status-row" style="margin-top: 0.5rem;">
<span class="status-label">Complete graph data with forensic audit trail,
provider statistics, and scan metadata in JSON format for analysis and
archival.</span>
</div>
<button id="export-targets-txt" class="btn btn-primary" style="margin-top: 1rem;">
<span class="btn-icon">[TXT]</span>
<span>Export Targets</span>
</button>
<div class="status-row" style="margin-top: 0.5rem;">
<span class="status-label">A simple text file containing all discovered domains and IP addresses.</span>
</div>
<button id="export-executive-summary" class="btn btn-primary" style="margin-top: 1rem;">
<span class="btn-icon">[TXT]</span>
<span>Export Executive Summary</span>
</button>
<div class="status-row" style="margin-top: 0.5rem;">
<span class="status-label">A natural-language summary of the scan findings.</span>
</div>
</div>
</div>
</details>
</section>
</div> </div>
</div> </div>
</div> </div>
</div> </div>
</div> </div>
<script>
function copyToClipboard(elementId) {
const element = document.getElementById(elementId);
const textToCopy = element.innerText;
navigator.clipboard.writeText(textToCopy).then(() => {
// Optional: Show a success message
console.log('Copied to clipboard');
}).catch(err => {
console.error('Failed to copy: ', err);
});
}
</script>
<script src="{{ url_for('static', filename='js/graph.js') }}"></script> <script src="{{ url_for('static', filename='js/graph.js') }}"></script>
<script src="{{ url_for('static', filename='js/main.js') }}"></script> <script src="{{ url_for('static', filename='js/main.js') }}"></script>
</body> </body>
</html> </html>

View File

@@ -0,0 +1,22 @@
# dnsrecon-reduced/utils/__init__.py
"""
Utility modules for DNSRecon.
Contains helper functions, export management, and supporting utilities.
"""
from .helpers import is_valid_target, _is_valid_domain, _is_valid_ip, get_ip_version, normalize_ip
from .export_manager import export_manager, ExportManager, CustomJSONEncoder
__all__ = [
'is_valid_target',
'_is_valid_domain',
'_is_valid_ip',
'get_ip_version',
'normalize_ip',
'export_manager',
'ExportManager',
'CustomJSONEncoder'
]
__version__ = "1.0.0"

849
utils/export_manager.py Normal file
View File

@@ -0,0 +1,849 @@
# dnsrecon-reduced/utils/export_manager.py
"""
Centralized export functionality for DNSRecon.
Handles all data export operations with forensic integrity and proper formatting.
ENHANCED: Professional forensic executive summary generation for court-ready documentation.
"""
import json
from datetime import datetime, timezone
from typing import Dict, Any, List, Optional, Set, Tuple
from decimal import Decimal
from collections import defaultdict, Counter
import networkx as nx
from utils.helpers import _is_valid_domain, _is_valid_ip
class ExportManager:
"""
Centralized manager for all DNSRecon export operations.
Maintains forensic integrity and provides consistent export formats.
ENHANCED: Advanced forensic analysis and professional reporting capabilities.
"""
def __init__(self):
"""Initialize export manager."""
pass
def export_scan_results(self, scanner) -> Dict[str, Any]:
"""
Export complete scan results with forensic metadata.
Args:
scanner: Scanner instance with completed scan data
Returns:
Complete scan results dictionary
"""
graph_data = self.export_graph_json(scanner.graph)
audit_trail = scanner.logger.export_audit_trail()
provider_stats = {}
for provider in scanner.providers:
provider_stats[provider.get_name()] = provider.get_statistics()
results = {
'scan_metadata': {
'target_domain': scanner.current_target,
'max_depth': scanner.max_depth,
'final_status': scanner.status,
'total_indicators_processed': scanner.indicators_processed,
'enabled_providers': list(provider_stats.keys()),
'session_id': scanner.session_id
},
'graph_data': graph_data,
'forensic_audit': audit_trail,
'provider_statistics': provider_stats,
'scan_summary': scanner.logger.get_forensic_summary()
}
# Add export metadata
results['export_metadata'] = {
'export_timestamp': datetime.now(timezone.utc).isoformat(),
'export_version': '1.0.0',
'forensic_integrity': 'maintained'
}
return results
def export_targets_list(self, scanner) -> str:
"""
Export all discovered domains and IPs as a text file.
Args:
scanner: Scanner instance with graph data
Returns:
Newline-separated list of targets
"""
nodes = scanner.graph.get_graph_data().get('nodes', [])
targets = {
node['id'] for node in nodes
if _is_valid_domain(node['id']) or _is_valid_ip(node['id'])
}
return "\n".join(sorted(list(targets)))
def generate_executive_summary(self, scanner) -> str:
"""
ENHANCED: Generate a comprehensive, court-ready forensic executive summary.
Args:
scanner: Scanner instance with completed scan data
Returns:
Professional forensic summary formatted for investigative use
"""
report = []
now = datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC')
# Get comprehensive data for analysis
graph_data = scanner.graph.get_graph_data()
nodes = graph_data.get('nodes', [])
edges = graph_data.get('edges', [])
audit_trail = scanner.logger.export_audit_trail()
# Perform advanced analysis
infrastructure_analysis = self._analyze_infrastructure_patterns(nodes, edges)
# === HEADER AND METADATA ===
report.extend([
"=" * 80,
"DIGITAL INFRASTRUCTURE RECONNAISSANCE REPORT",
"=" * 80,
"",
f"Report Generated: {now}",
f"Investigation Target: {scanner.current_target}",
f"Analysis Session: {scanner.session_id}",
f"Scan Depth: {scanner.max_depth} levels",
f"Final Status: {scanner.status.upper()}",
""
])
# === EXECUTIVE SUMMARY ===
report.extend([
"EXECUTIVE SUMMARY",
"-" * 40,
"",
f"This report presents the findings of a comprehensive passive reconnaissance analysis "
f"conducted against the target '{scanner.current_target}'. The investigation employed "
f"multiple intelligence sources and discovered {len(nodes)} distinct digital entities "
f"connected through {len(edges)} verified relationships.",
"",
f"The analysis reveals a digital infrastructure comprising {infrastructure_analysis['domains']} "
f"domain names, {infrastructure_analysis['ips']} IP addresses, and {infrastructure_analysis['isps']} "
f"infrastructure service providers. Certificate transparency analysis identified "
f"{infrastructure_analysis['cas']} certificate authorities managing the cryptographic "
f"infrastructure for the investigated entities.",
"",
])
# === METHODOLOGY ===
report.extend([
"INVESTIGATIVE METHODOLOGY",
"-" * 40,
"",
"This analysis employed passive reconnaissance techniques using the following verified data sources:",
""
])
provider_info = {
'dns': 'Standard DNS resolution and reverse DNS lookups',
'crtsh': 'Certificate Transparency database analysis via crt.sh',
'shodan': 'Internet-connected device intelligence via Shodan API'
}
for provider in scanner.providers:
provider_name = provider.get_name()
stats = provider.get_statistics()
description = provider_info.get(provider_name, f'{provider_name} data provider')
report.extend([
f"{provider.get_display_name()}: {description}",
f" - Total Requests: {stats['total_requests']}",
f" - Success Rate: {stats['success_rate']:.1f}%",
f" - Relationships Discovered: {stats['relationships_found']}",
""
])
# === INFRASTRUCTURE ANALYSIS ===
report.extend([
"INFRASTRUCTURE ANALYSIS",
"-" * 40,
""
])
# Domain Analysis
if infrastructure_analysis['domains'] > 0:
report.extend([
f"Domain Name Infrastructure ({infrastructure_analysis['domains']} entities):",
""
])
domain_details = self._get_detailed_domain_analysis(nodes, edges)
for domain_info in domain_details[:10]: # Top 10 domains
report.extend([
f"{domain_info['domain']}",
f" - Type: {domain_info['classification']}",
f" - Connected IPs: {len(domain_info['ips'])}",
f" - Certificate Status: {domain_info['cert_status']}",
f" - Relationship Confidence: {domain_info['avg_confidence']:.2f}",
])
if domain_info['security_notes']:
report.extend([
f" - Security Notes: {', '.join(domain_info['security_notes'])}",
])
report.append("")
# IP Address Analysis
if infrastructure_analysis['ips'] > 0:
report.extend([
f"IP Address Infrastructure ({infrastructure_analysis['ips']} entities):",
""
])
ip_details = self._get_detailed_ip_analysis(nodes, edges)
for ip_info in ip_details[:8]: # Top 8 IPs
report.extend([
f"{ip_info['ip']} ({ip_info['version']})",
f" - Associated Domains: {len(ip_info['domains'])}",
f" - ISP: {ip_info['isp'] or 'Unknown'}",
f" - Geographic Location: {ip_info['location'] or 'Not determined'}",
])
if ip_info['open_ports']:
report.extend([
f" - Exposed Services: {', '.join(map(str, ip_info['open_ports'][:5]))}"
+ (f" (and {len(ip_info['open_ports']) - 5} more)" if len(ip_info['open_ports']) > 5 else ""),
])
report.append("")
# === RELATIONSHIP ANALYSIS ===
report.extend([
"ENTITY RELATIONSHIP ANALYSIS",
"-" * 40,
""
])
# Network topology insights
topology = self._analyze_network_topology(nodes, edges)
report.extend([
f"Network Topology Assessment:",
f"• Central Hubs: {len(topology['hubs'])} entities serve as primary connection points",
f"• Isolated Clusters: {len(topology['clusters'])} distinct groupings identified",
f"• Relationship Density: {topology['density']:.3f} (0=sparse, 1=fully connected)",
f"• Average Path Length: {topology['avg_path_length']:.2f} degrees of separation",
""
])
# Key relationships
key_relationships = self._identify_key_relationships(edges)
if key_relationships:
report.extend([
"Critical Infrastructure Relationships:",
""
])
for rel in key_relationships[:8]: # Top 8 relationships
confidence_desc = self._describe_confidence(rel['confidence'])
report.extend([
f"{rel['source']}{rel['target']}",
f" - Relationship: {self._humanize_relationship_type(rel['type'])}",
f" - Evidence Strength: {confidence_desc} ({rel['confidence']:.2f})",
f" - Discovery Method: {rel['provider']}",
""
])
# === CERTIFICATE ANALYSIS ===
cert_analysis = self._analyze_certificate_infrastructure(nodes)
if cert_analysis['total_certs'] > 0:
report.extend([
"CERTIFICATE INFRASTRUCTURE ANALYSIS",
"-" * 40,
"",
f"Certificate Status Overview:",
f"• Total Certificates Analyzed: {cert_analysis['total_certs']}",
f"• Valid Certificates: {cert_analysis['valid']}",
f"• Expired/Invalid: {cert_analysis['expired']}",
f"• Certificate Authorities: {len(cert_analysis['cas'])}",
""
])
if cert_analysis['cas']:
report.extend([
"Certificate Authority Distribution:",
""
])
for ca, count in cert_analysis['cas'].most_common(5):
report.extend([
f"{ca}: {count} certificate(s)",
])
report.append("")
# === TECHNICAL APPENDIX ===
report.extend([
"TECHNICAL APPENDIX",
"-" * 40,
"",
"Data Quality Assessment:",
f"• Total API Requests: {audit_trail.get('session_metadata', {}).get('total_requests', 0)}",
f"• Data Providers Used: {len(audit_trail.get('session_metadata', {}).get('providers_used', []))}",
f"• Relationship Confidence Distribution:",
])
# Confidence distribution
confidence_dist = self._calculate_confidence_distribution(edges)
for level, count in confidence_dist.items():
percentage = (count / len(edges) * 100) if edges else 0
report.extend([
f" - {level.title()} Confidence (≥{self._get_confidence_threshold(level)}): {count} ({percentage:.1f}%)",
])
report.extend([
"",
"Correlation Analysis:",
f"• Entity Correlations Identified: {len(scanner.graph.correlation_index)}",
f"• Cross-Reference Validation: {self._count_cross_validated_relationships(edges)} relationships verified by multiple sources",
""
])
# === CONCLUSION ===
report.extend([
"CONCLUSION",
"-" * 40,
"",
self._generate_conclusion(scanner.current_target, infrastructure_analysis,
len(edges)),
"",
"This analysis was conducted using passive reconnaissance techniques and represents "
"the digital infrastructure observable through public data sources at the time of investigation. "
"All findings are supported by verifiable technical evidence and documented through "
"a complete audit trail maintained for forensic integrity.",
"",
f"Investigation completed: {now}",
f"Report authenticated by: DNSRecon v{self._get_version()}",
"",
"=" * 80,
"END OF REPORT",
"=" * 80
])
return "\n".join(report)
def _analyze_infrastructure_patterns(self, nodes: List[Dict], edges: List[Dict]) -> Dict[str, Any]:
"""Analyze infrastructure patterns and classify entities."""
analysis = {
'domains': len([n for n in nodes if n['type'] == 'domain']),
'ips': len([n for n in nodes if n['type'] == 'ip']),
'isps': len([n for n in nodes if n['type'] == 'isp']),
'cas': len([n for n in nodes if n['type'] == 'ca']),
'correlations': len([n for n in nodes if n['type'] == 'correlation_object'])
}
return analysis
def _get_detailed_domain_analysis(self, nodes: List[Dict], edges: List[Dict]) -> List[Dict[str, Any]]:
"""Generate detailed analysis for each domain."""
domain_nodes = [n for n in nodes if n['type'] == 'domain']
domain_analysis = []
for domain in domain_nodes:
# Find connected IPs
connected_ips = [e['to'] for e in edges
if e['from'] == domain['id'] and _is_valid_ip(e['to'])]
# Determine classification
classification = "Primary Domain"
if domain['id'].startswith('www.'):
classification = "Web Interface"
elif any(subdomain in domain['id'] for subdomain in ['api.', 'mail.', 'smtp.']):
classification = "Service Endpoint"
elif domain['id'].count('.') > 1:
classification = "Subdomain"
# Certificate status
cert_status = self._determine_certificate_status(domain)
# Security notes
security_notes = []
if cert_status == "Expired/Invalid":
security_notes.append("Certificate validation issues")
if len(connected_ips) == 0:
security_notes.append("No IP resolution found")
if len(connected_ips) > 5:
security_notes.append("Multiple IP endpoints")
# Average confidence
domain_edges = [e for e in edges if e['from'] == domain['id']]
avg_confidence = sum(e['confidence_score'] for e in domain_edges) / len(domain_edges) if domain_edges else 0
domain_analysis.append({
'domain': domain['id'],
'classification': classification,
'ips': connected_ips,
'cert_status': cert_status,
'security_notes': security_notes,
'avg_confidence': avg_confidence
})
# Sort by number of connections (most connected first)
return sorted(domain_analysis, key=lambda x: len(x['ips']), reverse=True)
def _get_detailed_ip_analysis(self, nodes: List[Dict], edges: List[Dict]) -> List[Dict[str, Any]]:
"""Generate detailed analysis for each IP address."""
ip_nodes = [n for n in nodes if n['type'] == 'ip']
ip_analysis = []
for ip in ip_nodes:
# Find connected domains
connected_domains = [e['from'] for e in edges
if e['to'] == ip['id'] and _is_valid_domain(e['from'])]
# Extract metadata from attributes
ip_version = "IPv4"
location = None
isp = None
open_ports = []
for attr in ip.get('attributes', []):
if attr.get('name') == 'country':
location = attr.get('value')
elif attr.get('name') == 'org':
isp = attr.get('value')
elif attr.get('name') == 'shodan_open_port':
open_ports.append(attr.get('value'))
elif 'ipv6' in str(attr.get('metadata', {})).lower():
ip_version = "IPv6"
# Find ISP from relationships
if not isp:
isp_edges = [e for e in edges if e['from'] == ip['id'] and e['label'].endswith('_isp')]
isp = isp_edges[0]['to'] if isp_edges else None
ip_analysis.append({
'ip': ip['id'],
'version': ip_version,
'domains': connected_domains,
'isp': isp,
'location': location,
'open_ports': open_ports
})
# Sort by number of connected domains
return sorted(ip_analysis, key=lambda x: len(x['domains']), reverse=True)
def _analyze_network_topology(self, nodes: List[Dict], edges: List[Dict]) -> Dict[str, Any]:
"""Analyze network topology and identify key structural patterns."""
if not nodes or not edges:
return {'hubs': [], 'clusters': [], 'density': 0, 'avg_path_length': 0}
# Create NetworkX graph
G = nx.DiGraph()
for node in nodes:
G.add_node(node['id'])
for edge in edges:
G.add_edge(edge['from'], edge['to'])
# Convert to undirected for certain analyses
G_undirected = G.to_undirected()
# Identify hubs (nodes with high degree centrality)
centrality = nx.degree_centrality(G_undirected)
hub_threshold = max(centrality.values()) * 0.7 if centrality else 0
hubs = [node for node, cent in centrality.items() if cent >= hub_threshold]
# Find connected components (clusters)
clusters = list(nx.connected_components(G_undirected))
# Calculate density
density = nx.density(G_undirected)
# Calculate average path length (for largest component)
if G_undirected.number_of_nodes() > 1:
largest_cc = max(nx.connected_components(G_undirected), key=len)
subgraph = G_undirected.subgraph(largest_cc)
try:
avg_path_length = nx.average_shortest_path_length(subgraph)
except:
avg_path_length = 0
else:
avg_path_length = 0
return {
'hubs': hubs,
'clusters': clusters,
'density': density,
'avg_path_length': avg_path_length
}
def _identify_key_relationships(self, edges: List[Dict]) -> List[Dict[str, Any]]:
"""Identify the most significant relationships in the infrastructure."""
# Score relationships by confidence and type importance
relationship_importance = {
'dns_a_record': 0.9,
'dns_aaaa_record': 0.9,
'crtsh_cert_issuer': 0.8,
'shodan_isp': 0.8,
'crtsh_san_certificate': 0.7,
'dns_mx_record': 0.7,
'dns_ns_record': 0.7
}
scored_edges = []
for edge in edges:
base_confidence = edge.get('confidence_score', 0)
type_weight = relationship_importance.get(edge.get('label', ''), 0.5)
combined_score = (base_confidence * 0.7) + (type_weight * 0.3)
scored_edges.append({
'source': edge['from'],
'target': edge['to'],
'type': edge.get('label', ''),
'confidence': base_confidence,
'provider': edge.get('source_provider', ''),
'score': combined_score
})
# Return top relationships by score
return sorted(scored_edges, key=lambda x: x['score'], reverse=True)
def _analyze_certificate_infrastructure(self, nodes: List[Dict]) -> Dict[str, Any]:
"""Analyze certificate infrastructure across all domains."""
domain_nodes = [n for n in nodes if n['type'] == 'domain']
ca_nodes = [n for n in nodes if n['type'] == 'ca']
valid_certs = 0
expired_certs = 0
total_certs = 0
cas = Counter()
for domain in domain_nodes:
for attr in domain.get('attributes', []):
if attr.get('name') == 'cert_is_currently_valid':
total_certs += 1
if attr.get('value') is True:
valid_certs += 1
else:
expired_certs += 1
elif attr.get('name') == 'cert_issuer_name':
issuer = attr.get('value')
if issuer:
cas[issuer] += 1
return {
'total_certs': total_certs,
'valid': valid_certs,
'expired': expired_certs,
'cas': cas
}
def _has_expired_certificates(self, domain_node: Dict) -> bool:
"""Check if domain has expired certificates."""
for attr in domain_node.get('attributes', []):
if (attr.get('name') == 'cert_is_currently_valid' and
attr.get('value') is False):
return True
return False
def _determine_certificate_status(self, domain_node: Dict) -> str:
"""Determine the certificate status for a domain."""
has_valid = False
has_expired = False
has_any = False
for attr in domain_node.get('attributes', []):
if attr.get('name') == 'cert_is_currently_valid':
has_any = True
if attr.get('value') is True:
has_valid = True
else:
has_expired = True
if not has_any:
return "No Certificate Data"
elif has_valid and not has_expired:
return "Valid"
elif has_expired and not has_valid:
return "Expired/Invalid"
else:
return "Mixed Status"
def _describe_confidence(self, confidence: float) -> str:
"""Convert confidence score to descriptive text."""
if confidence >= 0.9:
return "Very High"
elif confidence >= 0.8:
return "High"
elif confidence >= 0.6:
return "Medium"
elif confidence >= 0.4:
return "Low"
else:
return "Very Low"
def _humanize_relationship_type(self, rel_type: str) -> str:
"""Convert technical relationship types to human-readable descriptions."""
type_map = {
'dns_a_record': 'DNS A Record Resolution',
'dns_aaaa_record': 'DNS AAAA Record (IPv6) Resolution',
'dns_mx_record': 'Email Server (MX) Configuration',
'dns_ns_record': 'Name Server Delegation',
'dns_cname_record': 'DNS Alias (CNAME) Resolution',
'crtsh_cert_issuer': 'SSL Certificate Issuer Relationship',
'crtsh_san_certificate': 'Shared SSL Certificate',
'shodan_isp': 'Internet Service Provider Assignment',
'shodan_a_record': 'IP-to-Domain Resolution (Shodan)',
'dns_ptr_record': 'Reverse DNS Resolution'
}
return type_map.get(rel_type, rel_type.replace('_', ' ').title())
def _calculate_confidence_distribution(self, edges: List[Dict]) -> Dict[str, int]:
"""Calculate confidence score distribution."""
distribution = {'high': 0, 'medium': 0, 'low': 0}
for edge in edges:
confidence = edge.get('confidence_score', 0)
if confidence >= 0.8:
distribution['high'] += 1
elif confidence >= 0.6:
distribution['medium'] += 1
else:
distribution['low'] += 1
return distribution
def _get_confidence_threshold(self, level: str) -> str:
"""Get confidence threshold for a level."""
thresholds = {'high': '0.80', 'medium': '0.60', 'low': '0.00'}
return thresholds.get(level, '0.00')
def _count_cross_validated_relationships(self, edges: List[Dict]) -> int:
"""Count relationships verified by multiple providers."""
# Group edges by source-target pair
edge_pairs = defaultdict(list)
for edge in edges:
pair_key = f"{edge['from']}->{edge['to']}"
edge_pairs[pair_key].append(edge.get('source_provider', ''))
# Count pairs with multiple providers
cross_validated = 0
for pair, providers in edge_pairs.items():
if len(set(providers)) > 1: # Multiple unique providers
cross_validated += 1
return cross_validated
def _generate_security_recommendations(self, infrastructure_analysis: Dict) -> List[str]:
"""Generate actionable security recommendations."""
recommendations = []
# Check for complex infrastructure
if infrastructure_analysis['ips'] > 10:
recommendations.append(
"Document and validate the necessity of extensive IP address infrastructure"
)
if infrastructure_analysis['correlations'] > 5:
recommendations.append(
"Investigate shared infrastructure components for operational security implications"
)
if not recommendations:
recommendations.append(
"Continue monitoring for changes in the identified digital infrastructure"
)
return recommendations
def _generate_conclusion(self, target: str, infrastructure_analysis: Dict, total_relationships: int) -> str:
"""Generate a professional conclusion for the report."""
conclusion_parts = [
f"The passive reconnaissance analysis of '{target}' has successfully mapped "
f"a digital infrastructure ecosystem consisting of {infrastructure_analysis['domains']} "
f"domain names, {infrastructure_analysis['ips']} IP addresses, and "
f"{total_relationships} verified inter-entity relationships."
]
conclusion_parts.append(
"All findings in this report are based on publicly available information and "
"passive reconnaissance techniques. The analysis maintains full forensic integrity "
"with complete audit trails for all data collection activities."
)
return " ".join(conclusion_parts)
def _count_bidirectional_relationships(self, graph) -> int:
"""Count bidirectional relationships in the graph."""
count = 0
for u, v in graph.edges():
if graph.has_edge(v, u):
count += 1
return count // 2 # Each pair counted twice
def _identify_hub_nodes(self, graph, nodes: List[Dict]) -> List[str]:
"""Identify nodes that serve as major hubs in the network."""
if not graph.nodes():
return []
degree_centrality = nx.degree_centrality(graph.to_undirected())
threshold = max(degree_centrality.values()) * 0.8 if degree_centrality else 0
return [node for node, centrality in degree_centrality.items()
if centrality >= threshold]
def _get_version(self) -> str:
"""Get DNSRecon version for report authentication."""
return "1.0.0-forensic"
def export_graph_json(self, graph_manager) -> Dict[str, Any]:
"""
Export complete graph data as a JSON-serializable dictionary.
Moved from GraphManager to centralize export functionality.
Args:
graph_manager: GraphManager instance with graph data
Returns:
Complete graph data with export metadata
"""
graph_data = nx.node_link_data(graph_manager.graph, edges="edges")
return {
'export_metadata': {
'export_timestamp': datetime.now(timezone.utc).isoformat(),
'graph_creation_time': graph_manager.creation_time,
'last_modified': graph_manager.last_modified,
'total_nodes': graph_manager.get_node_count(),
'total_edges': graph_manager.get_edge_count(),
'graph_format': 'dnsrecon_v1_unified_model'
},
'graph': graph_data,
'statistics': graph_manager.get_statistics()
}
def serialize_to_json(self, data: Dict[str, Any], indent: int = 2) -> str:
"""
Serialize data to JSON with custom handling for non-serializable objects.
Args:
data: Data to serialize
indent: JSON indentation level
Returns:
JSON string representation
"""
try:
return json.dumps(data, indent=indent, cls=CustomJSONEncoder, ensure_ascii=False)
except Exception:
# Fallback to aggressive cleaning
cleaned_data = self._clean_for_json(data)
return json.dumps(cleaned_data, indent=indent, ensure_ascii=False)
def _clean_for_json(self, obj, max_depth: int = 10, current_depth: int = 0) -> Any:
"""
Recursively clean an object to make it JSON serializable.
Handles circular references and problematic object types.
Args:
obj: Object to clean
max_depth: Maximum recursion depth
current_depth: Current recursion depth
Returns:
JSON-serializable object
"""
if current_depth > max_depth:
return f"<max_depth_exceeded_{type(obj).__name__}>"
if obj is None or isinstance(obj, (bool, int, float, str)):
return obj
elif isinstance(obj, datetime):
return obj.isoformat()
elif isinstance(obj, (set, frozenset)):
return list(obj)
elif isinstance(obj, dict):
cleaned = {}
for key, value in obj.items():
try:
# Ensure key is string
clean_key = str(key) if not isinstance(key, str) else key
cleaned[clean_key] = self._clean_for_json(value, max_depth, current_depth + 1)
except Exception:
cleaned[str(key)] = f"<serialization_error_{type(value).__name__}>"
return cleaned
elif isinstance(obj, (list, tuple)):
cleaned = []
for item in obj:
try:
cleaned.append(self._clean_for_json(item, max_depth, current_depth + 1))
except Exception:
cleaned.append(f"<serialization_error_{type(item).__name__}>")
return cleaned
elif hasattr(obj, '__dict__'):
try:
return self._clean_for_json(obj.__dict__, max_depth, current_depth + 1)
except Exception:
return str(obj)
elif hasattr(obj, 'value'):
# For enum-like objects
return obj.value
else:
return str(obj)
def generate_filename(self, target: str, export_type: str, timestamp: Optional[datetime] = None) -> str:
"""
Generate standardized filename for exports.
Args:
target: Target domain/IP being scanned
export_type: Type of export (json, txt, summary)
timestamp: Optional timestamp (defaults to now)
Returns:
Formatted filename with forensic naming convention
"""
if timestamp is None:
timestamp = datetime.now(timezone.utc)
timestamp_str = timestamp.strftime('%Y%m%d_%H%M%S')
safe_target = "".join(c for c in target if c.isalnum() or c in ('-', '_', '.')).rstrip()
extension_map = {
'json': 'json',
'txt': 'txt',
'summary': 'txt',
'targets': 'txt'
}
extension = extension_map.get(export_type, 'txt')
return f"dnsrecon_{export_type}_{safe_target}_{timestamp_str}.{extension}"
class CustomJSONEncoder(json.JSONEncoder):
"""Custom JSON encoder to handle non-serializable objects."""
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
elif isinstance(obj, set):
return list(obj)
elif isinstance(obj, Decimal):
return float(obj)
elif hasattr(obj, '__dict__'):
# For custom objects, try to serialize their dict representation
try:
return obj.__dict__
except:
return str(obj)
elif hasattr(obj, 'value') and hasattr(obj, 'name'):
# For enum objects
return obj.value
else:
# For any other non-serializable object, convert to string
return str(obj)
# Global export manager instance
export_manager = ExportManager()

View File

@@ -1,3 +1,8 @@
# dnsrecon-reduced/utils/helpers.py
import ipaddress
from typing import Union
def _is_valid_domain(domain: str) -> bool: def _is_valid_domain(domain: str) -> bool:
""" """
Basic domain validation. Basic domain validation.
@@ -26,25 +31,64 @@ def _is_valid_domain(domain: str) -> bool:
def _is_valid_ip(ip: str) -> bool: def _is_valid_ip(ip: str) -> bool:
""" """
Basic IP address validation. IP address validation supporting both IPv4 and IPv6.
Args: Args:
ip: IP address string to validate ip: IP address string to validate
Returns: Returns:
True if IP appears valid True if IP appears valid (IPv4 or IPv6)
""" """
if not ip:
return False
try: try:
parts = ip.split('.') # This handles both IPv4 and IPv6 validation
if len(parts) != 4: ipaddress.ip_address(ip.strip())
return False
for part in parts:
num = int(part)
if not 0 <= num <= 255:
return False
return True return True
except (ValueError, AttributeError): except (ValueError, AttributeError):
return False return False
def is_valid_target(target: str) -> bool:
"""
Checks if the target is a valid domain or IP address (IPv4/IPv6).
Args:
target: The target string to validate.
Returns:
True if the target is a valid domain or IP, False otherwise.
"""
return _is_valid_domain(target) or _is_valid_ip(target)
def get_ip_version(ip: str) -> Union[int, None]:
"""
Get the IP version (4 or 6) of a valid IP address.
Args:
ip: IP address string
Returns:
4 for IPv4, 6 for IPv6, None if invalid
"""
try:
addr = ipaddress.ip_address(ip.strip())
return addr.version
except (ValueError, AttributeError):
return None
def normalize_ip(ip: str) -> Union[str, None]:
"""
Normalize an IP address to its canonical form.
Args:
ip: IP address string
Returns:
Normalized IP address string, None if invalid
"""
try:
addr = ipaddress.ip_address(ip.strip())
return str(addr)
except (ValueError, AttributeError):
return None