forensic-ai #4

Merged
mstoeck3 merged 20 commits from forensic-ai into main 2025-08-05 20:56:02 +00:00
4 changed files with 400 additions and 138 deletions
Showing only changes of commit ec1969b2e2 - Show all commits

View File

@ -1,17 +1,17 @@
# ============================================================================ # ============================================================================
# ForensicPathways Environment Configuration # ForensicPathways Environment Configuration - COMPLETE
# ============================================================================ # ============================================================================
# Copy this file to .env and adjust the values below. # Copy this file to .env and adjust the values below.
# Settings are ordered by likelihood of needing adjustment during setup. # This file covers ALL environment variables used in the codebase.
# ============================================================================ # ============================================================================
# 1. CORE APPLICATION SETTINGS (REQUIRED - ADJUST FOR YOUR SETUP) # 1. CORE APPLICATION SETTINGS (REQUIRED)
# ============================================================================ # ============================================================================
# Your application's public URL (used for redirects and links) # Your application's public URL (used for redirects and links)
PUBLIC_BASE_URL=http://localhost:4321 PUBLIC_BASE_URL=http://localhost:4321
# Application environment (development, production, staging) # Application environment
NODE_ENV=development NODE_ENV=development
# Secret key for session encryption (CHANGE IN PRODUCTION!) # Secret key for session encryption (CHANGE IN PRODUCTION!)
@ -22,19 +22,99 @@ AUTH_SECRET=your-secret-key-change-in-production-please
# ============================================================================ # ============================================================================
# Main AI Analysis Service (for query processing and recommendations) # Main AI Analysis Service (for query processing and recommendations)
# Example uses Mistral AI - adjust endpoint/model as needed # Examples: http://localhost:11434 (Ollama), https://api.mistral.ai, https://api.openai.com
AI_ANALYZER_ENDPOINT=https://api.mistral.ai/v1 AI_ANALYZER_ENDPOINT=https://api.mistral.ai/v1/chat/completions
AI_ANALYZER_API_KEY=your-mistral-api-key-here AI_ANALYZER_API_KEY=
AI_ANALYZER_MODEL=mistral-small-latest AI_ANALYZER_MODEL=mistral/mistral-small-latest
# Vector Embeddings Service (for semantic search - can use same provider) # Vector Embeddings Service (for semantic search)
# Leave API_KEY empty for Ollama, use actual key for cloud services
AI_EMBEDDINGS_ENABLED=true AI_EMBEDDINGS_ENABLED=true
AI_EMBEDDINGS_ENDPOINT=https://api.mistral.ai/v1/embeddings AI_EMBEDDINGS_ENDPOINT=https://api.mistral.ai/v1/embeddings
AI_EMBEDDINGS_API_KEY=your-mistral-api-key-here AI_EMBEDDINGS_API_KEY=
AI_EMBEDDINGS_MODEL=mistral-embed AI_EMBEDDINGS_MODEL=mistral-embed
# ============================================================================ # ============================================================================
# 3. AUTHENTICATION (OPTIONAL - SET TO 'true' IF NEEDED) # 3. AI PIPELINE CONFIGURATION (CONTEXT & PERFORMANCE TUNING)
# ============================================================================
# === SIMILARITY SEARCH STAGE ===
# How many similar tools/concepts embeddings search returns as candidates
# 🔍 This is the FIRST filter - vector similarity matching
# Lower = faster, less comprehensive | Higher = slower, more comprehensive
AI_EMBEDDING_CANDIDATES=40
# Minimum similarity score threshold (0.0-1.0)
# Lower = more results but less relevant | Higher = fewer but more relevant
AI_SIMILARITY_THRESHOLD=0.3
# === AI SELECTION STAGE ===
# Maximum tools the AI can select from embedding candidates
# 🤖 This is the SECOND filter - AI intelligent selection
# Should be ≤ AI_EMBEDDING_CANDIDATES
AI_MAX_SELECTED_ITEMS=25
# Maximum tools sent to AI for detailed analysis (micro-tasks)
# 📋 This is the FINAL context size sent to AI models
# Lower = less AI context, faster responses | Higher = more context, slower
AI_MAX_TOOLS_TO_ANALYZE=20
# Maximum concepts sent to AI for background knowledge selection
# 📚 Concepts are smaller than tools, so can be higher
AI_MAX_CONCEPTS_TO_ANALYZE=10
# === CONTEXT FLOW SUMMARY ===
# 1. Vector Search: 111 total tools → AI_EMBEDDING_CANDIDATES (40) most similar
# 2. AI Selection: 40 candidates → AI_MAX_SELECTED_ITEMS (25) best matches
# 3. AI Analysis: 25 selected → AI_MAX_TOOLS_TO_ANALYZE (20) for micro-tasks
# 4. Final Output: Recommendations based on analyzed subset
# ============================================================================
# 4. AI PERFORMANCE & RATE LIMITING
# ============================================================================
# === USER RATE LIMITS (per minute) ===
# Main queries per user per minute
AI_RATE_LIMIT_MAX_REQUESTS=4
# Total AI micro-task calls per user per minute (across all micro-tasks)
AI_MICRO_TASK_TOTAL_LIMIT=30
# === PIPELINE TIMING ===
# Delay between micro-tasks within a single query (milliseconds)
# Higher = gentler on AI service | Lower = faster responses
AI_MICRO_TASK_DELAY_MS=500
# Delay between queued requests (milliseconds)
AI_RATE_LIMIT_DELAY_MS=2000
# === EMBEDDINGS BATCH PROCESSING ===
# How many embeddings to generate per API call
AI_EMBEDDINGS_BATCH_SIZE=10
# Delay between embedding batches (milliseconds)
AI_EMBEDDINGS_BATCH_DELAY_MS=1000
# ============================================================================
# 5. AI CONTEXT & TOKEN MANAGEMENT
# ============================================================================
# Maximum context tokens to maintain across micro-tasks
# Controls how much conversation history is preserved between AI calls
AI_MAX_CONTEXT_TOKENS=3000
# Maximum tokens per individual AI prompt
# Larger = more context per call | Smaller = faster responses
AI_MAX_PROMPT_TOKENS=1200
# Timeout for individual micro-tasks (milliseconds)
AI_MICRO_TASK_TIMEOUT_MS=25000
# Maximum size of the processing queue
AI_QUEUE_MAX_SIZE=50
# ============================================================================
# 6. AUTHENTICATION & AUTHORIZATION (OPTIONAL)
# ============================================================================ # ============================================================================
# Enable authentication for different features # Enable authentication for different features
@ -48,30 +128,47 @@ OIDC_CLIENT_ID=your-client-id
OIDC_CLIENT_SECRET=your-client-secret OIDC_CLIENT_SECRET=your-client-secret
# ============================================================================ # ============================================================================
# 4. ADVANCED AI CONFIGURATION (FINE-TUNING - DEFAULT VALUES USUALLY WORK) # 7. FILE UPLOADS - NEXTCLOUD INTEGRATION (OPTIONAL)
# ============================================================================ # ============================================================================
# Pipeline Performance Settings # Nextcloud server for file uploads (knowledgebase contributions)
AI_MAX_SELECTED_ITEMS=60 # Tools analyzed per micro-task # Leave empty to disable file upload functionality
AI_EMBEDDING_CANDIDATES=60 # Vector search candidates NEXTCLOUD_ENDPOINT=https://your-nextcloud.com
AI_MICRO_TASK_DELAY_MS=500 # Delay between AI micro-tasks
# Rate Limiting (requests per minute) # Nextcloud credentials (app password recommended)
AI_RATE_LIMIT_MAX_REQUESTS=6 # Main query rate limit NEXTCLOUD_USERNAME=your-username
AI_MICRO_TASK_RATE_LIMIT=15 # Micro-task rate limit NEXTCLOUD_PASSWORD=your-app-password
AI_RATE_LIMIT_DELAY_MS=3000 # Delay between rate-limited calls
# Embeddings Batch Processing # Upload directory on Nextcloud (will be created if doesn't exist)
AI_EMBEDDINGS_BATCH_SIZE=20 # Embeddings processed per batch NEXTCLOUD_UPLOAD_PATH=/kb-media
AI_EMBEDDINGS_BATCH_DELAY_MS=1000 # Delay between embedding batches
# Timeouts and Limits # Public URL base for sharing uploaded files
AI_MICRO_TASK_TIMEOUT_MS=25000 # Max time per micro-task # Usually your Nextcloud base URL + share path
AI_QUEUE_MAX_SIZE=50 # Max queued requests NEXTCLOUD_PUBLIC_URL=https://your-nextcloud.com/s/
AI_SIMILARITY_THRESHOLD=0.3 # Vector similarity threshold
# ============================================================================ # ============================================================================
# 5. FORENSIC AUDIT SYSTEM (OPTIONAL - FOR TRANSPARENCY AND DEBUGGING) # 8. GIT CONTRIBUTIONS - ISSUE CREATION (OPTIONAL)
# ============================================================================
# Git provider: gitea, github, or gitlab
GIT_PROVIDER=gitea
# Repository URL (used to extract owner/name)
# Example: https://git.example.com/owner/forensic-pathways.git
GIT_REPO_URL=https://git.example.com/owner/forensic-pathways.git
# API endpoint for your git provider
# Gitea: https://git.example.com/api/v1
# GitHub: https://api.github.com
# GitLab: https://gitlab.example.com/api/v4
GIT_API_ENDPOINT=https://git.example.com/api/v1
# Personal access token or API token for creating issues
# Generate this in your git provider's settings
GIT_API_TOKEN=your-git-api-token
# ============================================================================
# 9. AUDIT & DEBUGGING (OPTIONAL)
# ============================================================================ # ============================================================================
# Enable detailed audit trail of AI decision-making # Enable detailed audit trail of AI decision-making
@ -80,38 +177,49 @@ FORENSIC_AUDIT_ENABLED=false
# Audit detail level: minimal, standard, verbose # Audit detail level: minimal, standard, verbose
FORENSIC_AUDIT_DETAIL_LEVEL=standard FORENSIC_AUDIT_DETAIL_LEVEL=standard
# Audit retention and limits # Audit retention time (hours)
FORENSIC_AUDIT_RETENTION_HOURS=72 # Keep audit data for 3 days FORENSIC_AUDIT_RETENTION_HOURS=24
FORENSIC_AUDIT_MAX_ENTRIES=50 # Max entries per request
# Maximum audit entries per request
FORENSIC_AUDIT_MAX_ENTRIES=50
# Enable detailed AI pipeline logging
AI_PIPELINE_DEBUG=false
# Enable performance metrics collection
AI_PERFORMANCE_METRICS=false
# Enable detailed micro-task debugging
AI_MICRO_TASK_DEBUG=false
# ============================================================================ # ============================================================================
# 6. QUALITY CONTROL AND BIAS DETECTION (OPTIONAL - ADVANCED FEATURES) # 10. QUALITY CONTROL & BIAS DETECTION (ADVANCED)
# ============================================================================ # ============================================================================
# Confidence Scoring Weights (must sum to 1.0) # Confidence scoring weights (must sum to 1.0)
CONFIDENCE_EMBEDDINGS_WEIGHT=0.3 CONFIDENCE_EMBEDDINGS_WEIGHT=0.3
CONFIDENCE_CONSENSUS_WEIGHT=0.25 CONFIDENCE_CONSENSUS_WEIGHT=0.25
CONFIDENCE_DOMAIN_MATCH_WEIGHT=0.25 CONFIDENCE_DOMAIN_MATCH_WEIGHT=0.25
CONFIDENCE_FRESHNESS_WEIGHT=0.2 CONFIDENCE_FRESHNESS_WEIGHT=0.2
# Confidence Thresholds (0-100) # Confidence thresholds (0-100)
CONFIDENCE_MINIMUM_THRESHOLD=40 CONFIDENCE_MINIMUM_THRESHOLD=40
CONFIDENCE_MEDIUM_THRESHOLD=60 CONFIDENCE_MEDIUM_THRESHOLD=60
CONFIDENCE_HIGH_THRESHOLD=80 CONFIDENCE_HIGH_THRESHOLD=80
# Bias Detection Settings # Bias detection settings
BIAS_DETECTION_ENABLED=false BIAS_DETECTION_ENABLED=false
BIAS_POPULARITY_THRESHOLD=0.7 # Detect over-popular tools BIAS_POPULARITY_THRESHOLD=0.7
BIAS_DIVERSITY_MINIMUM=0.6 # Require recommendation diversity BIAS_DIVERSITY_MINIMUM=0.6
BIAS_CELEBRITY_TOOLS="Volatility 3,Wireshark,Autopsy,Maltego" BIAS_CELEBRITY_TOOLS=""
# Quality Control Thresholds # Quality control thresholds
QUALITY_MIN_RESPONSE_LENGTH=50 # Minimum AI response length QUALITY_MIN_RESPONSE_LENGTH=50
QUALITY_MIN_SELECTION_COUNT=1 # Minimum tools selected QUALITY_MIN_SELECTION_COUNT=1
QUALITY_MAX_PROCESSING_TIME_MS=30000 # Max processing time QUALITY_MAX_PROCESSING_TIME_MS=30000
# ============================================================================ # ============================================================================
# 7. USER INTERFACE PREFERENCES (OPTIONAL - UI DEFAULTS) # 11. USER INTERFACE DEFAULTS (OPTIONAL)
# ============================================================================ # ============================================================================
# Default UI behavior (users can override) # Default UI behavior (users can override)
@ -121,34 +229,76 @@ UI_SHOW_BIAS_WARNINGS=true
UI_AUDIT_TRAIL_COLLAPSIBLE=true UI_AUDIT_TRAIL_COLLAPSIBLE=true
# ============================================================================ # ============================================================================
# 8. EXTERNAL INTEGRATIONS (OPTIONAL - ONLY IF USING THESE SERVICES) # 12. CACHING & PERFORMANCE (OPTIONAL)
# ============================================================================ # ============================================================================
# Nextcloud Integration (for file uploads) # Cache AI responses (milliseconds)
# NEXTCLOUD_ENDPOINT=https://your-nextcloud.com AI_RESPONSE_CACHE_TTL_MS=3600000
# NEXTCLOUD_USERNAME=your-username
# NEXTCLOUD_PASSWORD=your-password # Queue cleanup interval (milliseconds)
# NEXTCLOUD_UPLOAD_PATH=/kb-media AI_QUEUE_CLEANUP_INTERVAL_MS=300000
# NEXTCLOUD_PUBLIC_URL=https://your-nextcloud.com/s/
# ============================================================================ # ============================================================================
# 9. PERFORMANCE AND MONITORING (OPTIONAL - FOR PRODUCTION OPTIMIZATION) # PERFORMANCE TUNING PRESETS
# ============================================================================ # ============================================================================
# Caching and Queue Management # 🚀 FOR FASTER RESPONSES (less comprehensive):
AI_RESPONSE_CACHE_TTL_MS=3600000 # Cache responses for 1 hour # AI_EMBEDDING_CANDIDATES=20
AI_QUEUE_CLEANUP_INTERVAL_MS=300000 # Cleanup queue every 5 minutes # AI_MAX_SELECTED_ITEMS=15
# AI_MAX_TOOLS_TO_ANALYZE=10
# AI_MICRO_TASK_DELAY_MS=200
# AI_MAX_CONTEXT_TOKENS=2000
# Debug and Monitoring # 🎯 FOR BETTER QUALITY (more comprehensive):
AI_MICRO_TASK_DEBUG=false # Enable detailed micro-task logging # AI_EMBEDDING_CANDIDATES=60
AI_PERFORMANCE_METRICS=false # Enable performance tracking # AI_MAX_SELECTED_ITEMS=40
# AI_MAX_TOOLS_TO_ANALYZE=30
# AI_MICRO_TASK_DELAY_MS=800
# AI_MAX_CONTEXT_TOKENS=4000
# 🔋 FOR LOW-POWER SYSTEMS (minimal resources):
# AI_EMBEDDING_CANDIDATES=15
# AI_MAX_SELECTED_ITEMS=10
# AI_MAX_TOOLS_TO_ANALYZE=8
# AI_RATE_LIMIT_MAX_REQUESTS=2
# AI_MICRO_TASK_DELAY_MS=1000
# ============================================================================ # ============================================================================
# SETUP CHECKLIST: # FEATURE COMBINATIONS GUIDE
# ============================================================================ # ============================================================================
# 1. Set PUBLIC_BASE_URL to your domain
# 2. Change AUTH_SECRET to a secure random string # 📝 BASIC SETUP (AI only):
# 3. Configure AI service endpoints and API keys # - Configure AI_ANALYZER_* and AI_EMBEDDINGS_*
# 4. Set authentication options if needed # - Leave authentication, file uploads, and git disabled
# 5. Test with default advanced settings before adjusting
# 🔐 WITH AUTHENTICATION:
# - Set AUTHENTICATION_NECESSARY_* to true
# - Configure OIDC_* settings
# 📁 WITH FILE UPLOADS:
# - Configure all NEXTCLOUD_* settings
# - Test connection before enabling in UI
# 🔄 WITH CONTRIBUTIONS:
# - Configure all GIT_* settings
# - Test API token permissions for issue creation
# 🔍 WITH FULL MONITORING:
# - Enable FORENSIC_AUDIT_ENABLED=true
# - Enable AI_PIPELINE_DEBUG=true
# - Configure audit retention and detail level
# ============================================================================
# SETUP CHECKLIST
# ============================================================================
# ✅ 1. Set PUBLIC_BASE_URL to your domain
# ✅ 2. Change AUTH_SECRET to a secure random string
# ✅ 3. Configure AI endpoints (Ollama: leave API_KEY empty)
# ✅ 4. Start with default AI values, tune based on performance
# ✅ 5. Enable authentication if needed (configure OIDC)
# ✅ 6. Configure Nextcloud if file uploads needed
# ✅ 7. Configure Git provider if contributions needed
# ✅ 8. Test with a simple query to verify pipeline works
# ✅ 9. Enable audit trail for transparency if desired
# ✅ 10. Tune performance settings based on usage patterns
# ============================================================================ # ============================================================================

View File

@ -1,4 +1,5 @@
// src/pages/api/ai/enhance-input.ts - ENHANCED with forensics methodology // src/pages/api/ai/enhance-input.ts - Enhanced AI service compatibility
import type { APIRoute } from 'astro'; import type { APIRoute } from 'astro';
import { withAPIAuth } from '../../../utils/auth.js'; import { withAPIAuth } from '../../../utils/auth.js';
import { apiError, apiServerError, createAuthErrorResponse } from '../../../utils/api.js'; import { apiError, apiServerError, createAuthErrorResponse } from '../../../utils/api.js';
@ -93,6 +94,45 @@ ${input}
`.trim(); `.trim();
} }
// Enhanced AI service call function
async function callAIService(prompt: string): Promise<Response> {
const endpoint = AI_ENDPOINT;
const apiKey = AI_ANALYZER_API_KEY;
const model = AI_ANALYZER_MODEL;
// Simple headers - add auth only if API key exists
let headers: Record<string, string> = {
'Content-Type': 'application/json'
};
// Add authentication if API key is provided
if (apiKey) {
headers['Authorization'] = `Bearer ${apiKey}`;
console.log('[ENHANCE API] Using API key authentication');
} else {
console.log('[ENHANCE API] No API key - making request without authentication');
}
// Simple request body
const requestBody = {
model,
messages: [{ role: 'user', content: prompt }],
max_tokens: 300,
temperature: 0.7,
top_p: 0.9,
frequency_penalty: 0.2,
presence_penalty: 0.1
};
// FIXED: This function is already being called through enqueueApiCall in the main handler
// So we can use direct fetch here since the queuing happens at the caller level
return fetch(`${endpoint}/v1/chat/completions`, {
method: 'POST',
headers,
body: JSON.stringify(requestBody)
});
}
export const POST: APIRoute = async ({ request }) => { export const POST: APIRoute = async ({ request }) => {
try { try {
const authResult = await withAPIAuth(request, 'ai'); const authResult = await withAPIAuth(request, 'ai');
@ -121,31 +161,11 @@ export const POST: APIRoute = async ({ request }) => {
const systemPrompt = createEnhancementPrompt(sanitizedInput); const systemPrompt = createEnhancementPrompt(sanitizedInput);
const taskId = `enhance_${userId}_${Date.now()}_${Math.random().toString(36).substr(2, 4)}`; const taskId = `enhance_${userId}_${Date.now()}_${Math.random().toString(36).substr(2, 4)}`;
const aiResponse = await enqueueApiCall(() => const aiResponse = await enqueueApiCall(() => callAIService(systemPrompt), taskId);
fetch(`${AI_ENDPOINT}/v1/chat/completions`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${AI_ANALYZER_API_KEY}`
},
body: JSON.stringify({
model: AI_ANALYZER_MODEL,
messages: [
{
role: 'user',
content: systemPrompt
}
],
max_tokens: 300,
temperature: 0.7,
top_p: 0.9,
frequency_penalty: 0.2,
presence_penalty: 0.1
})
}), taskId);
if (!aiResponse.ok) { if (!aiResponse.ok) {
console.error('AI enhancement error:', await aiResponse.text()); const errorText = await aiResponse.text();
console.error('[ENHANCE API] AI enhancement error:', errorText, 'Status:', aiResponse.status);
return apiServerError.unavailable('Enhancement service unavailable'); return apiServerError.unavailable('Enhancement service unavailable');
} }
@ -188,7 +208,7 @@ export const POST: APIRoute = async ({ request }) => {
questions = []; questions = [];
} }
console.log(`[AI Enhancement] User: ${userId}, Forensics Questions: ${questions.length}, Input length: ${sanitizedInput.length}`); console.log(`[ENHANCE API] User: ${userId}, Forensics Questions: ${questions.length}, Input length: ${sanitizedInput.length}`);
return new Response(JSON.stringify({ return new Response(JSON.stringify({
success: true, success: true,

View File

@ -66,6 +66,11 @@ interface AnalysisContext {
auditTrail: AuditEntry[]; auditTrail: AuditEntry[];
} }
interface SimilarityResult extends EmbeddingData {
similarity: number;
}
class ImprovedMicroTaskAIPipeline { class ImprovedMicroTaskAIPipeline {
private config: AIConfig; private config: AIConfig;
private maxSelectedItems: number; private maxSelectedItems: number;
@ -267,39 +272,62 @@ class ImprovedMicroTaskAIPipeline {
userQuery, userQuery,
this.embeddingCandidates, this.embeddingCandidates,
this.similarityThreshold this.similarityThreshold
); ) as SimilarityResult[]; // Type assertion for similarity property
const toolNames = new Set<string>(); console.log(`[IMPROVED PIPELINE] Embeddings found ${similarItems.length} similar items`);
const conceptNames = new Set<string>();
similarItems.forEach(item => { // FIXED: Create lookup maps for O(1) access while preserving original data
if (item.type === 'tool') toolNames.add(item.name); const toolsMap = new Map<string, any>(toolsData.tools.map((tool: any) => [tool.name, tool]));
if (item.type === 'concept') conceptNames.add(item.name); const conceptsMap = new Map<string, any>(toolsData.concepts.map((concept: any) => [concept.name, concept]));
// FIXED: Process in similarity order, preserving the ranking
const similarTools = similarItems
.filter((item): item is SimilarityResult => item.type === 'tool')
.map(item => toolsMap.get(item.name))
.filter((tool): tool is any => tool !== undefined); // Proper type guard
const similarConcepts = similarItems
.filter((item): item is SimilarityResult => item.type === 'concept')
.map(item => conceptsMap.get(item.name))
.filter((concept): concept is any => concept !== undefined); // Proper type guard
console.log(`[IMPROVED PIPELINE] Similarity-ordered results: ${similarTools.length} tools, ${similarConcepts.length} concepts`);
// Log the first few tools to verify ordering is preserved
if (similarTools.length > 0) {
console.log(`[IMPROVED PIPELINE] Top similar tools (in similarity order):`);
similarTools.slice(0, 5).forEach((tool, idx) => {
const originalSimilarItem = similarItems.find(item => item.name === tool.name);
console.log(` ${idx + 1}. ${tool.name} (similarity: ${originalSimilarItem?.similarity?.toFixed(4) || 'N/A'})`);
}); });
}
console.log(`[IMPROVED PIPELINE] Embeddings found: ${toolNames.size} tools, ${conceptNames.size} concepts`); if (similarTools.length >= 15) {
candidateTools = similarTools;
if (toolNames.size >= 15) { candidateConcepts = similarConcepts;
candidateTools = toolsData.tools.filter((tool: any) => toolNames.has(tool.name));
candidateConcepts = toolsData.concepts.filter((concept: any) => conceptNames.has(concept.name));
selectionMethod = 'embeddings_candidates'; selectionMethod = 'embeddings_candidates';
console.log(`[IMPROVED PIPELINE] Using embeddings candidates: ${candidateTools.length} tools`); console.log(`[IMPROVED PIPELINE] Using embeddings candidates in similarity order: ${candidateTools.length} tools`);
} else { } else {
console.log(`[IMPROVED PIPELINE] Embeddings insufficient (${toolNames.size} < 15), using full dataset`); console.log(`[IMPROVED PIPELINE] Embeddings insufficient (${similarTools.length} < 15), using full dataset`);
candidateTools = toolsData.tools; candidateTools = toolsData.tools;
candidateConcepts = toolsData.concepts; candidateConcepts = toolsData.concepts;
selectionMethod = 'full_dataset'; selectionMethod = 'full_dataset';
} }
// NEW: Add Audit Entry for Embeddings Search // NEW: Add Audit Entry for Embeddings Search with ordering verification
if (this.auditConfig.enabled) { if (this.auditConfig.enabled) {
this.addAuditEntry(null, 'retrieval', 'embeddings-search', this.addAuditEntry(null, 'retrieval', 'embeddings-search',
{ query: userQuery, threshold: this.similarityThreshold, candidates: this.embeddingCandidates }, { query: userQuery, threshold: this.similarityThreshold, candidates: this.embeddingCandidates },
{ candidatesFound: similarItems.length, toolNames: Array.from(toolNames), conceptNames: Array.from(conceptNames) }, {
similarItems.length >= 15 ? 85 : 60, // Confidence based on result quality candidatesFound: similarItems.length,
toolsInOrder: similarTools.slice(0, 3).map((t: any) => t.name),
conceptsInOrder: similarConcepts.slice(0, 3).map((c: any) => c.name),
orderingPreserved: true
},
similarTools.length >= 15 ? 85 : 60,
embeddingsStart, embeddingsStart,
{ selectionMethod, embeddingsEnabled: true } { selectionMethod, embeddingsEnabled: true, orderingFixed: true }
); );
} }
} else { } else {
@ -309,7 +337,7 @@ class ImprovedMicroTaskAIPipeline {
selectionMethod = 'full_dataset'; selectionMethod = 'full_dataset';
} }
console.log(`[IMPROVED PIPELINE] AI will analyze FULL DATA of ${candidateTools.length} candidate tools`); console.log(`[IMPROVED PIPELINE] AI will analyze ${candidateTools.length} candidate tools (ordering preserved: ${selectionMethod === 'embeddings_candidates'})`);
const finalSelection = await this.aiSelectionWithFullData(userQuery, candidateTools, candidateConcepts, mode, selectionMethod); const finalSelection = await this.aiSelectionWithFullData(userQuery, candidateTools, candidateConcepts, mode, selectionMethod);
return { return {
@ -735,22 +763,42 @@ ${JSON.stringify(conceptsWithFullData.slice(0, 10), null, 2)}`;
} }
private async callAI(prompt: string, maxTokens: number = 1000): Promise<string> { private async callAI(prompt: string, maxTokens: number = 1000): Promise<string> {
const response = await fetch(`${this.config.endpoint}/v1/chat/completions`, { const endpoint = this.config.endpoint;
method: 'POST', const apiKey = this.config.apiKey;
headers: { const model = this.config.model;
'Content-Type': 'application/json',
'Authorization': `Bearer ${this.config.apiKey}` // Simple headers - add auth only if API key exists
}, let headers: Record<string, string> = {
body: JSON.stringify({ 'Content-Type': 'application/json'
model: this.config.model, };
// Add authentication if API key is provided
if (apiKey) {
headers['Authorization'] = `Bearer ${apiKey}`;
console.log('[AI PIPELINE] Using API key authentication');
} else {
console.log('[AI PIPELINE] No API key - making request without authentication');
}
// Simple request body
const requestBody = {
model,
messages: [{ role: 'user', content: prompt }], messages: [{ role: 'user', content: prompt }],
max_tokens: maxTokens, max_tokens: maxTokens,
temperature: 0.3 temperature: 0.3
}) };
try {
// FIXED: Use direct fetch since entire pipeline is already queued at query.ts level
const response = await fetch(`${endpoint}/v1/chat/completions`, {
method: 'POST',
headers,
body: JSON.stringify(requestBody)
}); });
if (!response.ok) { if (!response.ok) {
const errorText = await response.text(); const errorText = await response.text();
console.error(`[AI PIPELINE] AI API Error ${response.status}:`, errorText);
throw new Error(`AI API error: ${response.status} - ${errorText}`); throw new Error(`AI API error: ${response.status} - ${errorText}`);
} }
@ -758,10 +806,16 @@ ${JSON.stringify(conceptsWithFullData.slice(0, 10), null, 2)}`;
const content = data.choices?.[0]?.message?.content; const content = data.choices?.[0]?.message?.content;
if (!content) { if (!content) {
console.error('[AI PIPELINE] No response content:', data);
throw new Error('No response from AI model'); throw new Error('No response from AI model');
} }
return content; return content;
} catch (error) {
console.error('[AI PIPELINE] AI service call failed:', error.message);
throw error;
}
} }
async processQuery(userQuery: string, mode: string): Promise<AnalysisResult> { async processQuery(userQuery: string, mode: string): Promise<AnalysisResult> {

View File

@ -24,6 +24,10 @@ interface EmbeddingsDatabase {
embeddings: EmbeddingData[]; embeddings: EmbeddingData[];
} }
interface SimilarityResult extends EmbeddingData {
similarity: number;
}
class EmbeddingsService { class EmbeddingsService {
private embeddings: EmbeddingData[] = []; private embeddings: EmbeddingData[] = [];
private isInitialized = false; private isInitialized = false;
@ -211,8 +215,9 @@ class EmbeddingsService {
return dotProduct / (Math.sqrt(normA) * Math.sqrt(normB)); return dotProduct / (Math.sqrt(normA) * Math.sqrt(normB));
} }
async findSimilar(query: string, maxResults: number = 30, threshold: number = 0.3): Promise<EmbeddingData[]> { async findSimilar(query: string, maxResults: number = 30, threshold: number = 0.3): Promise<SimilarityResult[]> {
if (!this.enabled || !this.isInitialized || this.embeddings.length === 0) { if (!this.enabled || !this.isInitialized || this.embeddings.length === 0) {
console.log('[EMBEDDINGS] Service not available for similarity search');
return []; return [];
} }
@ -221,18 +226,51 @@ class EmbeddingsService {
const queryEmbeddings = await this.generateEmbeddingsBatch([query.toLowerCase()]); const queryEmbeddings = await this.generateEmbeddingsBatch([query.toLowerCase()]);
const queryEmbedding = queryEmbeddings[0]; const queryEmbedding = queryEmbeddings[0];
// Calculate similarities console.log(`[EMBEDDINGS] Computing similarities for ${this.embeddings.length} items`);
const similarities = this.embeddings.map(item => ({
// Calculate similarities - properly typed
const similarities: SimilarityResult[] = this.embeddings.map(item => ({
...item, ...item,
similarity: this.cosineSimilarity(queryEmbedding, item.embedding) similarity: this.cosineSimilarity(queryEmbedding, item.embedding)
})); }));
// Filter by threshold and sort by similarity // Filter by threshold and sort by similarity (descending - highest first)
return similarities const results = similarities
.filter(item => item.similarity >= threshold) .filter(item => item.similarity >= threshold)
.sort((a, b) => b.similarity - a.similarity) .sort((a, b) => b.similarity - a.similarity) // CRITICAL: Ensure descending order
.slice(0, maxResults); .slice(0, maxResults);
// ENHANCED: Verify ordering is correct
const orderingValid = results.every((item, index) => {
if (index === 0) return true;
return item.similarity <= results[index - 1].similarity;
});
if (!orderingValid) {
console.error('[EMBEDDINGS] CRITICAL: Similarity ordering is broken!');
results.forEach((item, idx) => {
console.error(` ${idx}: ${item.name} = ${item.similarity.toFixed(4)}`);
});
}
// ENHANCED: Log top results for debugging
console.log(`[EMBEDDINGS] Found ${results.length} similar items (threshold: ${threshold})`);
if (results.length > 0) {
console.log('[EMBEDDINGS] Top 5 similarity matches:');
results.slice(0, 5).forEach((item, idx) => {
console.log(` ${idx + 1}. ${item.name} (${item.type}) = ${item.similarity.toFixed(4)}`);
});
// Verify first result is indeed the highest
const topSimilarity = results[0].similarity;
const hasHigherSimilarity = results.some(item => item.similarity > topSimilarity);
if (hasHigherSimilarity) {
console.error('[EMBEDDINGS] CRITICAL: Top result is not actually the highest similarity!');
}
}
return results;
} catch (error) { } catch (error) {
console.error('[EMBEDDINGS] Failed to find similar items:', error); console.error('[EMBEDDINGS] Failed to find similar items:', error);
return []; return [];
@ -257,7 +295,7 @@ class EmbeddingsService {
// Global instance // Global instance
const embeddingsService = new EmbeddingsService(); const embeddingsService = new EmbeddingsService();
export { embeddingsService, type EmbeddingData }; export { embeddingsService, type EmbeddingData, type SimilarityResult };
// Auto-initialize on import in server environment // Auto-initialize on import in server environment
if (typeof window === 'undefined' && process.env.NODE_ENV !== 'test') { if (typeof window === 'undefined' && process.env.NODE_ENV !== 'test') {