audit trail phase 1 and 2

This commit is contained in:
overcuriousity
2025-08-02 12:57:38 +02:00
parent 57c507915f
commit fd05f8f291
7 changed files with 1367 additions and 449 deletions

View File

@@ -13,16 +13,23 @@ OIDC_ENDPOINT=https://your-oidc-provider.com
OIDC_CLIENT_ID=your-client-id
OIDC_CLIENT_SECRET=your-client-secret
# ===================================================================
# AI CONFIGURATION - Complete Reference for Improved Pipeline
# ===================================================================
# === STRATEGIC AI MODEL (Large context, analytical reasoning, precise output) ===
AI_STRATEGIC_ENDPOINT=https://llm.mikoshi.de
AI_STRATEGIC_API_KEY=sREDACTED3w
AI_STRATEGIC_MODEL='mistral/mistral-large-latest'
AI_STRATEGIC_MAX_CONTEXT_TOKENS=32000
AI_STRATEGIC_MAX_OUTPUT_TOKENS=1000
AI_STRATEGIC_TEMPERATURE=0.2
# === CORE AI ENDPOINTS & MODELS ===
AI_API_ENDPOINT=https://llm.mikoshi.de
AI_API_KEY=sREDACTED3w
AI_MODEL='mistral/mistral-small-latest'
# === TACTICAL AI MODEL (Text generation, descriptions, cost-optimized) ===
AI_TACTICAL_ENDPOINT=https://llm.mikoshi.de
AI_TACTICAL_API_KEY=skREDACTEDw3w
AI_TACTICAL_MODEL='mistral/mistral-small-latest'
AI_TACTICAL_MAX_CONTEXT_TOKENS=8000
AI_TACTICAL_MAX_OUTPUT_TOKENS=500
AI_TACTICAL_TEMPERATURE=0.3
# === IMPROVED PIPELINE: Use separate analyzer model (mistral-small is fine) ===
# === LEGACY COMPATIBILITY (DEPRECATED - will be removed in next version) ===
AI_ANALYZER_ENDPOINT=https://llm.mikoshi.de
AI_ANALYZER_API_KEY=skREDACTEDw3w
AI_ANALYZER_MODEL='mistral/mistral-small-latest'
@@ -35,19 +42,31 @@ AI_EMBEDDINGS_MODEL=mistral-embed
AI_EMBEDDINGS_BATCH_SIZE=20
AI_EMBEDDINGS_BATCH_DELAY_MS=1000
# === PIPELINE: VectorIndex (HNSW) Configuration ===
AI_MAX_SELECTED_ITEMS=60 # Tools visible to each micro-task
AI_EMBEDDING_CANDIDATES=60 # VectorIndex candidates (HNSW is more efficient)
AI_SIMILARITY_THRESHOLD=0.3 # Not used by VectorIndex (uses cosine distance internally)
# === FORENSIC ENHANCEMENT CONFIGURATION ===
FORENSIC_AUDIT_ENABLED=true
FORENSIC_CONFIDENCE_SCORING_ENABLED=true
FORENSIC_BIAS_DETECTION_ENABLED=true
FORENSIC_AUDIT_RETENTION_DAYS=90
FORENSIC_AUDIT_DETAIL_LEVEL=detailed
# === CONFIGURABLE THRESHOLDS (NO MORE HARD-CODED VALUES) ===
AI_MAX_SELECTED_ITEMS=60
AI_EMBEDDING_CANDIDATES=60
AI_SIMILARITY_THRESHOLD=0.3
AI_CONFIDENCE_THRESHOLD=0.7
AI_BIAS_ALERT_THRESHOLD=0.8
TOOL_POPULARITY_BIAS_THRESHOLD=0.75
EMBEDDINGS_CONFIDENCE_THRESHOLD=0.6
SELECTION_CONFIDENCE_MINIMUM=0.5
# === MICRO-TASK CONFIGURATION ===
AI_MICRO_TASK_DELAY_MS=500 # Delay between micro-tasks
AI_MICRO_TASK_TIMEOUT_MS=25000 # Timeout per micro-task (increased for full context)
AI_MICRO_TASK_DELAY_MS=500
AI_MICRO_TASK_TIMEOUT_MS=25000
# === RATE LIMITING ===
AI_RATE_LIMIT_DELAY_MS=3000 # Main rate limit delay
AI_RATE_LIMIT_MAX_REQUESTS=6 # Main requests per minute (reduced - fewer but richer calls)
AI_MICRO_TASK_RATE_LIMIT=15 # Micro-task requests per minute (was 30)
AI_RATE_LIMIT_DELAY_MS=3000
AI_RATE_LIMIT_MAX_REQUESTS=6
AI_MICRO_TASK_RATE_LIMIT=15
# === QUEUE MANAGEMENT ===
AI_QUEUE_MAX_SIZE=50
@@ -58,15 +77,6 @@ AI_MICRO_TASK_DEBUG=true
AI_PERFORMANCE_METRICS=true
AI_RESPONSE_CACHE_TTL_MS=3600000
# ===================================================================
# LEGACY VARIABLES (still used but less important)
# ===================================================================
# These are still used by other parts of the system:
AI_RESPONSE_CACHE_TTL_MS=3600000 # For caching responses
AI_QUEUE_MAX_SIZE=50 # Queue management
AI_QUEUE_CLEANUP_INTERVAL_MS=300000 # Queue cleanup
# === Application Configuration ===
PUBLIC_BASE_URL=http://localhost:4321
NODE_ENV=development