fix ai auth, deploy script visual orgasm

This commit is contained in:
overcuriousity 2025-08-07 15:41:42 +02:00
parent 8516a39fcb
commit b28f9b9213
3 changed files with 1016 additions and 390 deletions

View File

@ -1,256 +1,184 @@
# ============================================================================
# ForensicPathways Environment Configuration - COMPLETE
# ForensicPathways Environment Configuration
# ============================================================================
# Copy this file to .env and adjust the values below.
# This file covers ALL environment variables used in the codebase.
# Copy this file to .env and configure the REQUIRED values below.
# Optional features can be enabled by uncommenting and configuring them.
# ============================================================================
# 1. CORE APPLICATION SETTINGS (REQUIRED)
# 🔥 CRITICAL - REQUIRED FOR BASIC OPERATION
# ============================================================================
# Your application's public URL (used for redirects and links)
PUBLIC_BASE_URL=http://localhost:4321
# Secret key for session encryption (GENERATE A SECURE RANDOM STRING!)
AUTH_SECRET=your-secret-key-change-in-production-please
# Primary AI service for query processing (REQUIRED for core functionality)
AI_ANALYZER_ENDPOINT=https://api.mistral.ai/v1/chat/completions
AI_ANALYZER_API_KEY=your-ai-api-key-here
AI_ANALYZER_MODEL=mistral/mistral-small-latest
# ============================================================================
# ⚙️ IMPORTANT - CORE FEATURES CONFIGURATION
# ============================================================================
# Application environment
NODE_ENV=development
# Secret key for session encryption (CHANGE IN PRODUCTION!)
AUTH_SECRET=your-secret-key-change-in-production-please
# ============================================================================
# 2. AI SERVICES CONFIGURATION (REQUIRED FOR AI FEATURES)
# ============================================================================
# Main AI Analysis Service (for query processing and recommendations)
# Examples: http://localhost:11434 (Ollama), https://api.mistral.ai, https://api.openai.com
AI_ANALYZER_ENDPOINT=https://api.mistral.ai/v1/chat/completions
AI_ANALYZER_API_KEY=
AI_ANALYZER_MODEL=mistral/mistral-small-latest
# Vector Embeddings Service (for semantic search)
# Leave API_KEY empty for Ollama, use actual key for cloud services
AI_EMBEDDINGS_ENABLED=true
AI_EMBEDDINGS_ENDPOINT=https://api.mistral.ai/v1/embeddings
AI_EMBEDDINGS_API_KEY=
AI_EMBEDDINGS_MODEL=mistral-embed
# ============================================================================
# 3. AI PIPELINE CONFIGURATION (CONTEXT & PERFORMANCE TUNING)
# ============================================================================
# === SIMILARITY SEARCH STAGE ===
# How many similar tools/concepts embeddings search returns as candidates
# 🔍 This is the FIRST filter - vector similarity matching
# Lower = faster, less comprehensive | Higher = slower, more comprehensive
AI_EMBEDDING_CANDIDATES=50
# Minimum similarity score threshold (0.0-1.0)
# Lower = more results but less relevant | Higher = fewer but more relevant
AI_SIMILARITY_THRESHOLD=0.3
# === AI SELECTION FROM EMBEDDINGS ===
# When embeddings are enabled, how many top tools to send with full context
# 🎯 This is the SECOND filter - take best N from embeddings results
AI_EMBEDDING_SELECTION_LIMIT=30
AI_EMBEDDING_CONCEPTS_LIMIT=15
# Maximum tools/concepts sent to AI when embeddings are DISABLED
# Set to 0 for no limit (WARNING: may cause token overflow with large datasets)
AI_NO_EMBEDDINGS_TOOL_LIMIT=0
AI_NO_EMBEDDINGS_CONCEPT_LIMIT=0
# === AI SELECTION STAGE ===
# Maximum tools the AI can select from embedding candidates
# 🤖 This is the SECOND filter - AI intelligent selection
# Should be ≤ AI_EMBEDDING_CANDIDATES
AI_MAX_SELECTED_ITEMS=25
# === EMBEDDINGS EFFICIENCY THRESHOLDS ===
# Minimum tools required for embeddings to be considered useful
AI_EMBEDDINGS_MIN_TOOLS=8
# Maximum percentage of total tools that embeddings can return to be considered "filtering"
AI_EMBEDDINGS_MAX_REDUCTION_RATIO=0.75
# === CONTEXT FLOW SUMMARY ===
# 1. Vector Search: 111 total tools → AI_EMBEDDING_CANDIDATES (40) most similar
# 2. AI Selection: 40 candidates → AI_MAX_SELECTED_ITEMS (25) best matches
# 3. Final Output: Recommendations based on analyzed subset
# ============================================================================
# 4. AI PERFORMANCE & RATE LIMITING
# ============================================================================
# === USER RATE LIMITS (per minute) ===
# Main queries per user per minute
AI_RATE_LIMIT_MAX_REQUESTS=4
# Total AI micro-task calls per user per minute (across all micro-tasks)
AI_MICRO_TASK_TOTAL_LIMIT=30
# === PIPELINE TIMING ===
# Delay between micro-tasks within a single query (milliseconds)
# Higher = gentler on AI service | Lower = faster responses
AI_MICRO_TASK_DELAY_MS=500
# Delay between queued requests (milliseconds)
AI_RATE_LIMIT_DELAY_MS=2000
# === EMBEDDINGS BATCH PROCESSING ===
# How many embeddings to generate per API call
AI_EMBEDDINGS_BATCH_SIZE=10
# Delay between embedding batches (milliseconds)
AI_EMBEDDINGS_BATCH_DELAY_MS=1000
# Maximum tools sent to AI for detailed analysis (micro-tasks)
AI_MAX_TOOLS_TO_ANALYZE=20
AI_MAX_CONCEPTS_TO_ANALYZE=10
# ============================================================================
# 5. AI CONTEXT & TOKEN MANAGEMENT
# ============================================================================
# Maximum context tokens to maintain across micro-tasks
# Controls how much conversation history is preserved between AI calls
AI_MAX_CONTEXT_TOKENS=4000
# Maximum tokens per individual AI prompt
# Larger = more context per call | Smaller = faster responses
AI_MAX_PROMPT_TOKENS=2500
# ============================================================================
# 6. AUTHENTICATION & AUTHORIZATION (OPTIONAL)
# ============================================================================
# Enable authentication for different features
# === AUTHENTICATION & SECURITY ===
# Set to true to require authentication (RECOMMENDED for production)
AUTHENTICATION_NECESSARY_CONTRIBUTIONS=false
AUTHENTICATION_NECESSARY_AI=false
# OIDC Provider Settings (only needed if authentication enabled)
OIDC_ENDPOINT=https://your-oidc-provider.com
# OIDC Provider Configuration
OIDC_ENDPOINT=https://your-nextcloud.com/index.php/apps/oidc
OIDC_CLIENT_ID=your-client-id
OIDC_CLIENT_SECRET=your-client-secret
# ============================================================================
# 7. FILE UPLOADS - NEXTCLOUD INTEGRATION (OPTIONAL)
# ============================================================================
# === FILE HANDLING ===
# Nextcloud server for file uploads (knowledgebase contributions)
# Leave empty to disable file upload functionality
NEXTCLOUD_ENDPOINT=https://your-nextcloud.com
# Nextcloud credentials (app password recommended)
NEXTCLOUD_USERNAME=your-username
NEXTCLOUD_PASSWORD=your-app-password
# Upload directory on Nextcloud (will be created if doesn't exist)
NEXTCLOUD_UPLOAD_PATH=/kb-media
# Public URL base for sharing uploaded files
# Usually your Nextcloud base URL + share path
NEXTCLOUD_PUBLIC_URL=https://your-nextcloud.com/s/
# ============================================================================
# 8. GIT CONTRIBUTIONS - ISSUE CREATION (OPTIONAL)
# ============================================================================
# === COLLABORATION & CONTRIBUTIONS ===
# Git provider: gitea, github, or gitlab
GIT_PROVIDER=gitea
# Repository URL (used to extract owner/name)
# Example: https://git.example.com/owner/forensic-pathways.git
GIT_REPO_URL=https://git.example.com/owner/forensic-pathways.git
# API endpoint for your git provider
# Gitea: https://git.example.com/api/v1
# GitHub: https://api.github.com
# GitLab: https://gitlab.example.com/api/v4
GIT_API_ENDPOINT=https://git.example.com/api/v1
# Personal access token or API token for creating issues
# Generate this in your git provider's settings
GIT_API_TOKEN=your-git-api-token
# ============================================================================
# 9. AUDIT & DEBUGGING (OPTIONAL)
# ============================================================================
# Enable detailed audit trail of AI decision-making
# === AUDIT TRAIL (Important for forensic work) ===
FORENSIC_AUDIT_ENABLED=true
# Audit detail level: minimal, standard, verbose
FORENSIC_AUDIT_DETAIL_LEVEL=standard
# Audit retention time (hours)
FORENSIC_AUDIT_RETENTION_HOURS=24
# Maximum audit entries per request
FORENSIC_AUDIT_MAX_ENTRIES=50
# ============================================================================
# 10. SIMPLIFIED CONFIDENCE SCORING SYSTEM
# ============================================================================
# === AI SEMANTIC SEARCH ===
# Enable semantic search (highly recommended for better results)
AI_EMBEDDINGS_ENABLED=true
AI_EMBEDDINGS_ENDPOINT=https://api.mistral.ai/v1/embeddings
AI_EMBEDDINGS_API_KEY=your-embeddings-api-key-here
AI_EMBEDDINGS_MODEL=mistral-embed
# Confidence component weights (must sum to 1.0)
CONFIDENCE_SEMANTIC_WEIGHT=0.5 # Weight for vector similarity quality
CONFIDENCE_SUITABILITY_WEIGHT=0.5 # Weight for AI-determined task fitness
# Confidence thresholds (0-100)
CONFIDENCE_MINIMUM_THRESHOLD=50 # Below this = weak recommendation
CONFIDENCE_MEDIUM_THRESHOLD=70 # 40-59 = weak, 60-79 = moderate
CONFIDENCE_HIGH_THRESHOLD=80 # 80+ = strong recommendation
# User rate limiting (queries per minute)
AI_RATE_LIMIT_MAX_REQUESTS=4
# ============================================================================
# PERFORMANCE TUNING PRESETS
# 🎛️ PERFORMANCE TUNING - SENSIBLE DEFAULTS PROVIDED
# ============================================================================
# 🚀 FOR FASTER RESPONSES (prevent token overflow):
# AI_NO_EMBEDDINGS_TOOL_LIMIT=25
# AI_NO_EMBEDDINGS_CONCEPT_LIMIT=10
# === AI Pipeline Configuration ===
# These values are pre-tuned for optimal performance - adjust only if needed
# Vector similarity search settings
AI_EMBEDDING_CANDIDATES=50
AI_SIMILARITY_THRESHOLD=0.3
AI_EMBEDDING_SELECTION_LIMIT=30
AI_EMBEDDING_CONCEPTS_LIMIT=15
# AI selection limits
AI_MAX_SELECTED_ITEMS=25
AI_MAX_TOOLS_TO_ANALYZE=20
AI_MAX_CONCEPTS_TO_ANALYZE=10
# Efficiency thresholds
AI_EMBEDDINGS_MIN_TOOLS=8
AI_EMBEDDINGS_MAX_REDUCTION_RATIO=0.75
# Fallback limits when embeddings are disabled
AI_NO_EMBEDDINGS_TOOL_LIMIT=25
AI_NO_EMBEDDINGS_CONCEPT_LIMIT=10
# === Rate Limiting & Timing ===
AI_MICRO_TASK_TOTAL_LIMIT=30
AI_MICRO_TASK_DELAY_MS=500
AI_RATE_LIMIT_DELAY_MS=2000
# === Embeddings Batch Processing ===
AI_EMBEDDINGS_BATCH_SIZE=10
AI_EMBEDDINGS_BATCH_DELAY_MS=1000
# === Context Management ===
AI_MAX_CONTEXT_TOKENS=4000
AI_MAX_PROMPT_TOKENS=2500
# === Confidence Scoring ===
CONFIDENCE_SEMANTIC_WEIGHT=0.5
CONFIDENCE_SUITABILITY_WEIGHT=0.5
CONFIDENCE_MINIMUM_THRESHOLD=50
CONFIDENCE_MEDIUM_THRESHOLD=70
CONFIDENCE_HIGH_THRESHOLD=80
# 🎯 FOR FULL DATABASE ACCESS (risk of truncation):
# AI_NO_EMBEDDINGS_TOOL_LIMIT=0
# AI_NO_EMBEDDINGS_CONCEPT_LIMIT=0
# 🔋 FOR LOW-POWER SYSTEMS:
# AI_NO_EMBEDDINGS_TOOL_LIMIT=15
# ============================================================================
# FEATURE COMBINATIONS GUIDE
# 📋 QUICK SETUP CHECKLIST
# ============================================================================
# 📝 BASIC SETUP (AI only):
# - Configure AI_ANALYZER_* and AI_EMBEDDINGS_*
# - Leave authentication, file uploads, and git disabled
# 🔐 WITH AUTHENTICATION:
# - Set AUTHENTICATION_NECESSARY_* to true
# - Configure OIDC_* settings
# 📁 WITH FILE UPLOADS:
# - Configure all NEXTCLOUD_* settings
# - Test connection before enabling in UI
# 🔄 WITH CONTRIBUTIONS:
# - Configure all GIT_* settings
# - Test API token permissions for issue creation
# 🔍 WITH FULL MONITORING:
# - Enable FORENSIC_AUDIT_ENABLED=true
# - Configure audit retention and detail level
#
# MINIMUM FOR DEVELOPMENT/TESTING:
# 1. ✅ Set PUBLIC_BASE_URL to your domain/localhost
# 2. ✅ Generate secure AUTH_SECRET (use: openssl rand -base64 32)
# 3. ✅ Configure AI_ANALYZER_ENDPOINT and API_KEY for your AI service
# 4. ✅ Test basic functionality
#
# PRODUCTION-READY DEPLOYMENT:
# 5. ✅ Enable authentication (configure AUTHENTICATION_* and OIDC_*)
# 6. ✅ Configure file handling (set NEXTCLOUD_* for uploads)
# 7. ✅ Enable collaboration (set GIT_* for contributions)
# 8. ✅ Enable audit trail (verify FORENSIC_AUDIT_ENABLED=true)
# 9. ✅ Configure embeddings for better search (AI_EMBEDDINGS_*)
# 10. ✅ Adjust rate limits based on expected usage
# ============================================================================
# SETUP CHECKLIST
# 🏃‍♂️ PERFORMANCE PRESETS - UNCOMMENT ONE IF NEEDED
# ============================================================================
# ✅ 1. Set PUBLIC_BASE_URL to your domain
# ✅ 2. Change AUTH_SECRET to a secure random string
# ✅ 3. Configure AI endpoints (Ollama: leave API_KEY empty)
# ✅ 4. Start with default AI values, tune based on performance
# ✅ 5. Enable authentication if needed (configure OIDC)
# ✅ 6. Configure Nextcloud if file uploads needed
# ✅ 7. Configure Git provider if contributions needed
# ✅ 8. Test with a simple query to verify pipeline works
# ✅ 9. Enable audit trail for transparency if desired
# ✅ 10. Tune performance settings based on usage patterns
# ============================================================================
# 🚀 SPEED OPTIMIZED (faster responses, less comprehensive):
# AI_EMBEDDING_CANDIDATES=25
# AI_MAX_SELECTED_ITEMS=15
# AI_MAX_TOOLS_TO_ANALYZE=10
# AI_MICRO_TASK_DELAY_MS=250
# 🎯 ACCURACY OPTIMIZED (slower responses, more comprehensive):
# AI_EMBEDDING_CANDIDATES=100
# AI_MAX_SELECTED_ITEMS=50
# AI_MAX_TOOLS_TO_ANALYZE=40
# AI_MICRO_TASK_DELAY_MS=1000
# 🔋 RESOURCE CONSTRAINED (for limited AI quotas):
# AI_RATE_LIMIT_MAX_REQUESTS=2
# AI_MICRO_TASK_TOTAL_LIMIT=15
# AI_MAX_TOOLS_TO_ANALYZE=10
# AI_EMBEDDINGS_ENABLED=false
# ============================================================================
# 🌐 AI SERVICE EXAMPLES
# ============================================================================
# === OLLAMA (Local) ===
# AI_ANALYZER_ENDPOINT=http://localhost:11434/v1/chat/completions
# AI_ANALYZER_API_KEY=
# AI_ANALYZER_MODEL=llama3.1:8b
# AI_EMBEDDINGS_ENDPOINT=http://localhost:11434/v1/embeddings
# AI_EMBEDDINGS_API_KEY=
# AI_EMBEDDINGS_MODEL=nomic-embed-text
# === OPENAI ===
# AI_ANALYZER_ENDPOINT=https://api.openai.com/v1/chat/completions
# AI_ANALYZER_API_KEY=sk-your-openai-key
# AI_ANALYZER_MODEL=gpt-4o-mini
# AI_EMBEDDINGS_ENDPOINT=https://api.openai.com/v1/embeddings
# AI_EMBEDDINGS_API_KEY=sk-your-openai-key
# AI_EMBEDDINGS_MODEL=text-embedding-3-small
# === MISTRAL (Default) ===
# AI_ANALYZER_ENDPOINT=https://api.mistral.ai/v1/chat/completions
# AI_ANALYZER_API_KEY=your-mistral-key
# AI_ANALYZER_MODEL=mistral-small-latest
# AI_EMBEDDINGS_ENDPOINT=https://api.mistral.ai/v1/embeddings
# AI_EMBEDDINGS_API_KEY=your-mistral-key
# AI_EMBEDDINGS_MODEL=mistral-embed

963
deploy.sh

File diff suppressed because it is too large Load Diff

View File

@ -6,10 +6,19 @@ import ToolMatrix from '../components/ToolMatrix.astro';
import AIQueryInterface from '../components/AIQueryInterface.astro';
import TargetedScenarios from '../components/TargetedScenarios.astro';
import { getToolsData } from '../utils/dataService.js';
import { withAPIAuth, getAuthRequirementForContext } from '../utils/auth.js';
const data = await getToolsData();
const tools = data.tools;
const phases = data.phases;
// Check AI authentication requirements
const aiAuthRequired = getAuthRequirementForContext('ai');
let aiAuthContext: { authenticated: boolean; userId: string; session?: any; authRequired: boolean; } | null = null;
if (aiAuthRequired) {
aiAuthContext = await withAPIAuth(Astro.request, 'ai');
}
---
<BaseLayout title="~/">
@ -36,17 +45,33 @@ const phases = data.phases;
</div>
</div>
<button id="ai-query-btn" class="btn btn-accent btn-lg ai-primary-btn">
<svg width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
<path d="M9 11H5a2 2 0 0 0-2 2v7a2 2 0 0 0 2 2h14a2 2 0 0 0 2-2v-7a2 2 0 0 0-2-2h-4"/>
<path d="M9 11V7a3 3 0 0 1 6 0v4"/>
</svg>
KI-Beratung starten
<svg width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" class="ml-2">
<line x1="7" y1="17" x2="17" y2="7"/>
<polyline points="7,7 17,7 17,17"/>
</svg>
</button>
{aiAuthRequired && !aiAuthContext?.authenticated ? (
<div class="ai-auth-required">
<button id="ai-login-btn" class="btn btn-accent btn-lg">
<svg width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" style="margin-right: 0.5rem;">
<path d="M15 3h4a2 2 0 0 1 2 2v14a2 2 0 0 1-2 2h-4"/>
<polyline points="10 17 15 12 10 7"/>
<line x1="15" y1="12" x2="3" y2="12"/>
</svg>
Anmelden für KI-Beratung
</button>
<p style="margin-top: 0.75rem; font-size: 0.875rem; color: var(--color-text-secondary); text-align: center;">
Authentifizierung erforderlich für KI-Features
</p>
</div>
) : (
<button id="ai-query-btn" class="btn btn-accent btn-lg ai-primary-btn">
<svg width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
<path d="M9 11H5a2 2 0 0 0-2 2v7a2 2 0 0 0 2 2h14a2 2 0 0 0 2-2v-7a2 2 0 0 0-2-2h-4"/>
<path d="M9 11V7a3 3 0 0 1 6 0v4"/>
</svg>
KI-Beratung starten
<svg width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" class="ml-2">
<line x1="7" y1="17" x2="17" y2="7"/>
<polyline points="7,7 17,7 17,17"/>
</svg>
</button>
)}
<div class="ai-features-mini">
<span class="badge badge-secondary">Workflow-Empfehlungen</span>
@ -178,7 +203,39 @@ const phases = data.phases;
<ToolFilters data={data} />
</section>
<AIQueryInterface />
{aiAuthRequired && !aiAuthContext?.authenticated ? (
<section id="ai-interface" class="ai-interface hidden">
<div class="ai-query-section">
<div class="content-center-lg">
<div class="card" style="text-align: center; padding: 3rem; border-left: 4px solid var(--color-accent);">
<div style="margin-bottom: 2rem;">
<svg width="64" height="64" viewBox="0 0 24 24" fill="none" stroke="var(--color-accent)" stroke-width="1.5" style="margin: 0 auto;">
<path d="M9 11H5a2 2 0 0 0-2 2v7a2 2 0 0 0 2 2h14a2 2 0 0 0 2-2v-7a2 2 0 0 0-2-2h-4"/>
<path d="M9 11V7a3 3 0 0 1 6 0v4"/>
<circle cx="12" cy="12" r="2"/>
</svg>
</div>
<h2 style="margin-bottom: 1rem; color: var(--color-primary);">Anmeldung erforderlich</h2>
<p style="margin-bottom: 2rem; color: var(--color-text-secondary); line-height: 1.6;">
Für die Nutzung der KI-Beratung ist eine Authentifizierung erforderlich.
Melden Sie sich an, um personalisierten Workflow-Empfehlungen und Tool-Analysen zu erhalten.
</p>
<a href={`/api/auth/login?returnTo=${encodeURIComponent(Astro.url.toString())}`}
class="btn btn-accent btn-lg">
<svg width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" style="margin-right: 0.5rem;">
<path d="M15 3h4a2 2 0 0 1 2 2v14a2 2 0 0 1-2 2h-4"/>
<polyline points="10 17 15 12 10 7"/>
<line x1="15" y1="12" x2="3" y2="12"/>
</svg>
Anmelden
</a>
</div>
</div>
</div>
</section>
) : (
<AIQueryInterface />
)}
<section id="tools-grid" style="padding-bottom: 2rem;">
<div class="grid-auto-fit" id="tools-container">
@ -195,7 +252,7 @@ const phases = data.phases;
<ToolMatrix data={data} />
</BaseLayout>
<script define:vars={{ toolsData: data.tools, phases: data.phases }}>
<script define:vars={{ toolsData: data.tools, phases: data.phases, aiAuthRequired: aiAuthRequired, aiAuthenticated: aiAuthContext?.authenticated }}>
window.toolsData = toolsData;
window.selectApproach = function(approach) {
@ -268,12 +325,21 @@ const phases = data.phases;
const filtersSection = document.getElementById('filters-section');
const noResults = document.getElementById('no-results');
const aiQueryBtn = document.getElementById('ai-query-btn');
const aiLoginBtn = document.getElementById('ai-login-btn');
if (!toolsContainer || !toolsGrid || !matrixContainer || !noResults || !aiInterface || !filtersSection) {
console.error('Required DOM elements not found');
return;
}
// Handle AI authentication button click
if (aiLoginBtn) {
aiLoginBtn.addEventListener('click', () => {
const currentUrl = encodeURIComponent(window.location.href);
window.location.href = `/api/auth/login?returnTo=${currentUrl}`;
});
}
if (aiQueryBtn) {
aiQueryBtn.addEventListener('click', () => {
aiQueryBtn.classList.add('activated');
@ -319,6 +385,14 @@ const phases = data.phases;
if (filtersSection) filtersSection.style.display = 'block';
break;
case 'ai':
// Only show AI interface if authentication allows it
if (aiAuthRequired && !aiAuthenticated) {
console.log('[AUTH] AI access denied, redirecting to login');
const currentUrl = encodeURIComponent(window.location.href);
window.location.href = `/api/auth/login?returnTo=${currentUrl}`;
return;
}
if (aiInterface) aiInterface.style.display = 'block';
if (filtersSection) {
@ -344,6 +418,7 @@ const phases = data.phases;
}
}
// Rest of the existing code remains the same...
window.navigateToGrid = function(toolName) {
console.log('Navigating to grid for tool:', toolName);