forensic-pathways/.env.example
overcuriousity 1b9d9b437b improvements
2025-08-01 13:59:06 +02:00

79 lines
3.0 KiB
Plaintext

# ===========================================
# ForensicPathways Environment Configuration
# ===========================================
# === Authentication Configuration ===
AUTHENTICATION_NECESSARY=false
AUTHENTICATION_NECESSARY_CONTRIBUTIONS=false
AUTHENTICATION_NECESSARY_AI=false
AUTH_SECRET=your-secret-key-change-in-production
# OIDC Configuration (if authentication enabled)
OIDC_ENDPOINT=https://your-oidc-provider.com
OIDC_CLIENT_ID=your-client-id
OIDC_CLIENT_SECRET=your-client-secret
# ===================================================================
# AI CONFIGURATION - Complete Reference for Improved Pipeline
# ===================================================================
# === CORE AI ENDPOINTS & MODELS ===
AI_API_ENDPOINT=https://llm.mikoshi.de
AI_API_KEY=sREDACTED3w
AI_MODEL='mistral/mistral-small-latest'
# === IMPROVED PIPELINE: Use separate analyzer model (mistral-small is fine) ===
AI_ANALYZER_ENDPOINT=https://llm.mikoshi.de
AI_ANALYZER_API_KEY=skREDACTEDw3w
AI_ANALYZER_MODEL='mistral/mistral-small-latest'
# === EMBEDDINGS CONFIGURATION ===
AI_EMBEDDINGS_ENABLED=true
AI_EMBEDDINGS_ENDPOINT=https://api.mistral.ai/v1/embeddings
AI_EMBEDDINGS_API_KEY=ZREDACTED3wL
AI_EMBEDDINGS_MODEL=mistral-embed
AI_EMBEDDINGS_BATCH_SIZE=20
AI_EMBEDDINGS_BATCH_DELAY_MS=1000
# === PIPELINE: VectorIndex (HNSW) Configuration ===
AI_MAX_SELECTED_ITEMS=60 # Tools visible to each micro-task
AI_EMBEDDING_CANDIDATES=60 # VectorIndex candidates (HNSW is more efficient)
AI_SIMILARITY_THRESHOLD=0.3 # Not used by VectorIndex (uses cosine distance internally)
# === MICRO-TASK CONFIGURATION ===
AI_MICRO_TASK_DELAY_MS=500 # Delay between micro-tasks
AI_MICRO_TASK_TIMEOUT_MS=25000 # Timeout per micro-task (increased for full context)
# === RATE LIMITING ===
AI_RATE_LIMIT_DELAY_MS=3000 # Main rate limit delay
AI_RATE_LIMIT_MAX_REQUESTS=6 # Main requests per minute (reduced - fewer but richer calls)
AI_MICRO_TASK_RATE_LIMIT=15 # Micro-task requests per minute (was 30)
# === QUEUE MANAGEMENT ===
AI_QUEUE_MAX_SIZE=50
AI_QUEUE_CLEANUP_INTERVAL_MS=300000
# === PERFORMANCE & MONITORING ===
AI_MICRO_TASK_DEBUG=true
AI_PERFORMANCE_METRICS=true
AI_RESPONSE_CACHE_TTL_MS=3600000
# ===================================================================
# LEGACY VARIABLES (still used but less important)
# ===================================================================
# These are still used by other parts of the system:
AI_RESPONSE_CACHE_TTL_MS=3600000 # For caching responses
AI_QUEUE_MAX_SIZE=50 # Queue management
AI_QUEUE_CLEANUP_INTERVAL_MS=300000 # Queue cleanup
# === Application Configuration ===
PUBLIC_BASE_URL=http://localhost:4321
NODE_ENV=development
# Nextcloud Integration (Optional)
NEXTCLOUD_ENDPOINT=https://your-nextcloud.com
NEXTCLOUD_USERNAME=your-username
NEXTCLOUD_PASSWORD=your-password
NEXTCLOUD_UPLOAD_PATH=/kb-media
NEXTCLOUD_PUBLIC_URL=https://your-nextcloud.com/s/