airefactor #19

Merged
mstoeck3 merged 25 commits from airefactor into main 2025-08-17 22:59:31 +00:00
7 changed files with 132 additions and 250 deletions
Showing only changes of commit 70fb012d63 - Show all commits

View File

@ -1594,6 +1594,7 @@ class AIQueryInterface {
const exportData = {
metadata: {
timestamp: new Date().toISOString(),
version: '2.0',
toolsDataHash: toolsDataHash,
aiModel: aiModel,
aiParameters: aiParameters,
@ -1607,8 +1608,7 @@ class AIQueryInterface {
auditTrail: undefined
},
auditTrail: this.currentRecommendation.auditTrail || [],
rawContext: rawContext,
checksum: this.calculateDataChecksum(this.currentRecommendation)
rawContext: rawContext
};
const blob = new Blob([JSON.stringify(exportData, null, 2)], {
@ -1622,42 +1622,16 @@ class AIQueryInterface {
a.click();
URL.revokeObjectURL(url);
console.log('[AI Interface] Analysis downloaded with enhanced structure:', {
version: '1.1',
console.log('[AI Interface] Analysis downloaded with verified hash structure:', {
version: '2.0',
aiModel,
toolsDataHash: toolsDataHash.slice(0, 8) + '...',
toolsDataHash: toolsDataHash.slice(0, 12) + '...',
tokensUsed: aiParameters.totalTokensUsed,
auditEntries: exportData.auditTrail.length,
checksum: exportData.checksum.slice(0, 8) + '...'
hashVerifiable: toolsDataHash !== 'unknown'
});
}
calculateDataChecksum(data) {
if (!data) return 'empty';
try {
const keyData = {
recommendedToolsCount: data.recommended_tools?.length || 0,
backgroundKnowledgeCount: data.background_knowledge?.length || 0,
hasScenarioAnalysis: !!(data.scenario_analysis || data.problem_analysis),
hasApproach: !!data.investigation_approach,
processingTimeMs: data.processingStats?.processingTimeMs || 0
};
const dataString = JSON.stringify(keyData);
let hash = 0;
for (let i = 0; i < dataString.length; i++) {
const char = dataString.charCodeAt(i);
hash = ((hash << 5) - hash) + char;
hash = hash & hash;
}
return Math.abs(hash).toString(36);
} catch (error) {
console.error('[AI Interface] Checksum calculation failed:', error);
return 'error';
}
}
validateUploadStructure(data) {
try {
const isValid = !!(

View File

@ -4,15 +4,12 @@ import { aiService } from './aiService.js';
import { toolSelector, type SelectionContext } from './toolSelector.js';
import { confidenceScoring, type AnalysisContext } from './confidenceScoring.js';
import { embeddingsService } from './embeddings.js';
import { auditService, type AuditEntry } from './auditService.js';
import { auditService } from './auditService.js';
import { JSONParser } from './jsonUtils.js';
import { getPrompt } from '../config/prompts.js';
import 'dotenv/config';
interface PipelineConfig {
microTaskDelay: number;
//maxContextTokens: number;
maxPromptTokens: number;
taskRelevanceModeration: {
maxInitialScore: number;
maxWithPhaseBonus: number;
@ -36,7 +33,6 @@ interface MicroTaskResult {
interface AnalysisResult {
recommendation: any;
processingStats: {
//embeddingsUsed: boolean;
candidatesFromEmbeddings: number;
finalSelectedItems: number;
processingTimeMs: number;
@ -57,7 +53,6 @@ interface PipelineContext {
mode: string;
filteredData: any;
contextHistory: string[];
//maxContextLength: number;
currentContextLength: number;
scenarioAnalysis?: string;
problemAnalysis?: string;
@ -91,16 +86,12 @@ class AIPipeline {
constructor() {
this.config = {
microTaskDelay: parseInt(process.env.AI_MICRO_TASK_DELAY_MS || '500', 10),
//maxContextTokens: parseInt(process.env.AI_MAX_CONTEXT_TOKENS || '4000', 10),
maxPromptTokens: parseInt(process.env.AI_MAX_PROMPT_TOKENS || '1500', 10),
taskRelevanceModeration: {
maxInitialScore: 85,
maxWithPhaseBonus: 95,
moderationThreshold: 80
}
};
console.log('[AI-PIPELINE] Initialized with dynamic phase handling');
}
async processQuery(userQuery: string, mode: string): Promise<AnalysisResult> {
@ -108,8 +99,6 @@ class AIPipeline {
let completedTasks = 0;
let failedTasks = 0;
this.totalTokensUsed = 0;
console.log('[AI-PIPELINE] Starting', mode, 'analysis pipeline');
auditService.clearAuditTrail();
@ -118,90 +107,102 @@ class AIPipeline {
const aiConfig = aiService.getConfig();
const toolsDataHash = getDataVersion?.() || 'unknown';
// Record the tools.yaml version being used
auditService.addEntry(
'initialization',
'tools-data-loaded',
{
toolsFile: 'tools.yaml',
hashAlgorithm: 'SHA256'
},
{
toolsDataHash: toolsDataHash,
toolsCount: toolsData.tools.length,
conceptsCount: toolsData.concepts.length,
domainsCount: toolsData.domains.length,
phasesCount: toolsData.phases.length
},
100,
Date.now(),
{
toolsDataHash: toolsDataHash,
verification: `Users can verify with: sha256sum src/data/tools.yaml`,
dataVersion: toolsDataHash.slice(0, 12),
reasoning: `Geladen: ${toolsData.tools.length} Tools, ${toolsData.concepts.length} Konzepte aus tools.yaml (Hash: ${toolsDataHash.slice(0, 12)}...)`
}
);
const context: PipelineContext = {
userQuery,
mode,
filteredData: {},
contextHistory: [],
//maxContextLength: this.config.maxContextTokens,
currentContextLength: 0,
seenToolNames: new Set<string>(),
embeddingsSimilarities: new Map<string, number>(),
phaseMetadata: this.initializePhaseMetadata(toolsData.phases)
};
console.log('[AI-PIPELINE] Phase 1: Tool candidate selection');
const candidateSelectionStart = Date.now();
const candidateData = await toolSelector.getIntelligentCandidates(userQuery, toolsData, mode, context);
const selectionConfidence = this.calculateToolSelectionConfidence(
candidateData.tools.length,
toolsData.tools.length,
//candidateData.selectionMethod,
candidateData.concepts.length
);
auditService.addToolSelection(
candidateData.tools.map(t => t.name),
toolsData.tools.map(t => t.name),
//candidateData.selectionMethod,
selectionConfidence,
candidateSelectionStart,
{
//embeddingsUsed: embeddingsService.isEnabled(),
toolsDataHash: toolsDataHash,
totalCandidatesFound: candidateData.tools.length + candidateData.concepts.length,
//selectionMethod: candidateData.selectionMethod,
reductionRatio: candidateData.tools.length / toolsData.tools.length
}
);
context.filteredData = candidateData;
console.log('[AI-PIPELINE] Phase 2: Contextual analysis');
const analysisResult = await this.analyzeScenario(context, startTime);
const analysisResult = await this.analyzeScenario(context, startTime, toolsDataHash);
if (analysisResult.success) completedTasks++; else failedTasks++;
this.trackTokenUsage(analysisResult.aiUsage);
await this.delay(this.config.microTaskDelay);
const approachResult = await this.generateApproach(context, startTime);
const approachResult = await this.generateApproach(context, startTime, toolsDataHash);
if (approachResult.success) completedTasks++; else failedTasks++;
this.trackTokenUsage(approachResult.aiUsage);
await this.delay(this.config.microTaskDelay);
const considerationsResult = await this.generateCriticalConsiderations(context, startTime);
const considerationsResult = await this.generateCriticalConsiderations(context, startTime, toolsDataHash);
if (considerationsResult.success) completedTasks++; else failedTasks++;
this.trackTokenUsage(considerationsResult.aiUsage);
await this.delay(this.config.microTaskDelay);
console.log('[AI-PIPELINE] Phase 3: Tool-specific analysis');
if (mode === 'workflow') {
const workflowResults = await this.processWorkflowMode(context, toolsData, completedTasks, failedTasks, startTime);
const workflowResults = await this.processWorkflowMode(context, toolsData, completedTasks, failedTasks, startTime, toolsDataHash);
completedTasks = workflowResults.completed;
failedTasks = workflowResults.failed;
} else {
const toolResults = await this.processToolMode(context, completedTasks, failedTasks, startTime);
const toolResults = await this.processToolMode(context, completedTasks, failedTasks, startTime, toolsDataHash);
completedTasks = toolResults.completed;
failedTasks = toolResults.failed;
}
console.log('[AI-PIPELINE] Phase 4: Knowledge synthesis');
const knowledgeResult = await this.selectBackgroundKnowledge(context, startTime);
const knowledgeResult = await this.selectBackgroundKnowledge(context, startTime, toolsDataHash);
if (knowledgeResult.success) completedTasks++; else failedTasks++;
this.trackTokenUsage(knowledgeResult.aiUsage);
await this.delay(this.config.microTaskDelay);
const finalResult = await this.generateFinalRecommendations(context, startTime);
const finalResult = await this.generateFinalRecommendations(context, startTime, toolsDataHash);
if (finalResult.success) completedTasks++; else failedTasks++;
this.trackTokenUsage(finalResult.aiUsage);
const recommendation = this.buildRecommendation(context, mode, finalResult.content);
const recommendation = this.buildRecommendation(context, mode, finalResult.content, toolsDataHash);
const processingStats = {
//embeddingsUsed: embeddingsService.isEnabled(),
candidatesFromEmbeddings: candidateData.tools.length,
finalSelectedItems: (context.selectedTools?.length || 0) + (context.backgroundKnowledge?.length || 0),
processingTimeMs: Date.now() - startTime,
@ -216,16 +217,6 @@ class AIPipeline {
maxTokensUsed: 32768
};
console.log('[AI-PIPELINE] Pipeline completed successfully:', {
mode,
processingTimeMs: processingStats.processingTimeMs,
completedTasks,
failedTasks,
finalItems: processingStats.finalSelectedItems,
totalTokensUsed: this.totalTokensUsed,
auditEntries: processingStats.auditEntriesGenerated
});
const finalAuditTrail = auditService.finalizeAuditTrail();
return {
@ -249,7 +240,6 @@ class AIPipeline {
phaseComplexity: Map<string, number>;
} {
if (!phases || !Array.isArray(phases)) {
console.warn('[AI-PIPELINE] No phases data available, using fallback');
return {
phases: [],
criticalPhaseIds: [],
@ -268,20 +258,12 @@ class AIPipeline {
const phaseComplexity = new Map<string, number>();
phases.forEach(phase => {
let complexity = 1;
if (phase.typical_tools?.length > 5) complexity += 1;
if (phase.key_activities?.length > 3) complexity += 1;
if (phase.description?.length > 100) complexity += 1;
phaseComplexity.set(phase.id, complexity);
});
console.log('[AI-PIPELINE] Initialized phase metadata:', {
totalPhases: phases.length,
criticalPhases: criticalPhaseIds.length,
avgComplexity: Array.from(phaseComplexity.values()).reduce((sum, c) => sum + c, 0) / phases.length
});
return {
phases,
criticalPhaseIds,
@ -292,11 +274,9 @@ class AIPipeline {
private calculateToolSelectionConfidence(
selectedCount: number,
totalCount: number,
//method: string,
conceptsCount: number
): number {
let confidence = 50;
const selectionRatio = selectedCount / totalCount;
if (selectionRatio >= 0.05 && selectionRatio <= 0.20) {
@ -307,17 +287,8 @@ class AIPipeline {
confidence -= 15;
}
//if (method.includes('embeddings')) {
//confidence += 15;
//}
if (conceptsCount > 0) {
confidence += 10;
}
if (selectedCount >= 8 && selectedCount <= 25) {
confidence += 10;
}
if (conceptsCount > 0) confidence += 10;
if (selectedCount >= 8 && selectedCount <= 25) confidence += 10;
return Math.min(95, Math.max(40, confidence));
}
@ -327,7 +298,8 @@ class AIPipeline {
toolsData: any,
completedTasks: number,
failedTasks: number,
pipelineStart: number
pipelineStart: number,
toolsDataHash: string
): Promise<{ completed: number; failed: number }> {
const phases = toolsData.phases || [];
@ -337,10 +309,7 @@ class AIPipeline {
tool && tool.phases && Array.isArray(tool.phases) && tool.phases.includes(phase.id)
);
if (phaseTools.length === 0) {
console.log(`[AI-PIPELINE] No tools available for phase: ${phase.id}`);
continue;
}
if (phaseTools.length === 0) continue;
const selections = await toolSelector.selectToolsForPhase(context.userQuery, phase, phaseTools, context);
@ -370,6 +339,7 @@ class AIPipeline {
phaseConfidence,
phaseStart,
{
toolsDataHash: toolsDataHash,
phaseId: phase.id,
availableToolsCount: phaseTools.length,
selectedToolsCount: selections.length,
@ -383,7 +353,6 @@ class AIPipeline {
if (tool) {
const moderatedTaskRelevance = this.moderateTaskRelevance(sel.taskRelevance);
const priority = this.derivePriorityFromScore(moderatedTaskRelevance);
const dynamicLimitations = this.generateDynamicLimitations(tool, phase, sel);
this.addToolToSelection(context, tool, phase.id, priority, sel.justification, moderatedTaskRelevance, dynamicLimitations);
@ -405,6 +374,7 @@ class AIPipeline {
moderatedTaskRelevance || 70,
phaseStart,
{
toolsDataHash: toolsDataHash,
toolType: tool.type,
priority,
moderationApplied: sel.taskRelevance !== moderatedTaskRelevance,
@ -418,7 +388,7 @@ class AIPipeline {
await this.delay(this.config.microTaskDelay);
}
const completionResult = await this.completeUnderrepresentedPhases(context, toolsData, pipelineStart);
const completionResult = await this.completeUnderrepresentedPhases(context, toolsData, pipelineStart, toolsDataHash);
completedTasks += completionResult.completed;
failedTasks += completionResult.failed;
@ -437,7 +407,6 @@ class AIPipeline {
}
): number {
let confidence = 60;
const isCritical = phaseMetadata?.criticalPhaseIds.includes(phaseId) || false;
const phaseComplexity = phaseMetadata?.phaseComplexity.get(phaseId) || 1;
@ -454,13 +423,8 @@ class AIPipeline {
confidence += 10;
}
if (isCritical && selectedCount >= 2) {
confidence += 10;
}
if (phaseComplexity > 2 && selectedCount >= phaseComplexity) {
confidence += 5;
}
if (isCritical && selectedCount >= 2) confidence += 10;
if (phaseComplexity > 2 && selectedCount >= phaseComplexity) confidence += 5;
const avgRelevance = selections.length > 0 ?
selections.reduce((sum, s) => sum + (s.taskRelevance || 70), 0) / selections.length : 0;
@ -504,12 +468,13 @@ class AIPipeline {
context: PipelineContext,
completedTasks: number,
failedTasks: number,
pipelineStart: number
pipelineStart: number,
toolsDataHash: string
): Promise<{ completed: number; failed: number }> {
const topTools = context.filteredData.tools.slice(0, 3);
for (let i = 0; i < topTools.length; i++) {
const evaluationResult = await this.evaluateSpecificTool(context, topTools[i], i + 1, pipelineStart);
const evaluationResult = await this.evaluateSpecificTool(context, topTools[i], i + 1, pipelineStart, toolsDataHash);
if (evaluationResult.success) completedTasks++; else failedTasks++;
this.trackTokenUsage(evaluationResult.aiUsage);
await this.delay(this.config.microTaskDelay);
@ -521,7 +486,8 @@ class AIPipeline {
private async completeUnderrepresentedPhases(
context: PipelineContext,
toolsData: any,
pipelineStart: number
pipelineStart: number,
toolsDataHash: string
): Promise<{ completed: number; failed: number }> {
const phases = toolsData.phases || [];
const selectedPhases = new Map<string, number>();
@ -538,15 +504,10 @@ class AIPipeline {
return count <= 1;
});
if (underrepresentedPhases.length === 0) {
console.log('[AI-PIPELINE] All phases adequately represented');
return { completed: 0, failed: 0 };
}
console.log('[AI-PIPELINE] Completing underrepresented phases:', underrepresentedPhases.map((p: any) => p.id).join(', '));
if (underrepresentedPhases.length === 0) return { completed: 0, failed: 0 };
for (const phase of underrepresentedPhases) {
const result = await this.completePhaseWithSemanticSearchAndAI(context, phase, toolsData, pipelineStart);
const result = await this.completePhaseWithSemanticSearchAndAI(context, phase, toolsData, pipelineStart, toolsDataHash);
if (result.success) completedTasks++; else failedTasks++;
await this.delay(this.config.microTaskDelay);
}
@ -558,13 +519,12 @@ class AIPipeline {
context: PipelineContext,
phase: any,
toolsData: any,
pipelineStart: number
pipelineStart: number,
toolsDataHash: string
): Promise<MicroTaskResult> {
const phaseStart = Date.now();
const phaseQuery = `forensic ${phase.name.toLowerCase()} tools methods`;
console.log('[AI-PIPELINE] Phase completion for:', phase.id);
try {
const phaseResults = await embeddingsService.findSimilar(phaseQuery, 20, 0.2);
@ -574,6 +534,7 @@ class AIPipeline {
0.2,
phaseStart,
{
toolsDataHash: toolsDataHash,
phaseId: phase.id,
phaseName: phase.name,
completionPurpose: 'underrepresented-phase-enhancement'
@ -581,7 +542,6 @@ class AIPipeline {
);
if (phaseResults.length === 0) {
console.log('[AI-PIPELINE] No semantic results for phase:', phase.id);
return {
taskType: 'phase-completion',
content: '',
@ -613,7 +573,6 @@ class AIPipeline {
.slice(0, 2);
if (phaseTools.length === 0) {
console.log('[AI-PIPELINE] No suitable tools for phase completion:', phase.id);
return {
taskType: 'phase-completion',
content: '',
@ -623,10 +582,9 @@ class AIPipeline {
}
const selectionPrompt = getPrompt('generatePhaseCompletionPrompt', context.userQuery, phase, phaseTools, phaseConcepts);
const selectionResult = await this.callMicroTaskAI(selectionPrompt, context, 800, 'phase-completion-selection');
const selectionResult = await this.callMicroTaskAI(selectionPrompt, context, 'phase-completion-selection');
if (!selectionResult.success) {
console.error('[AI-PIPELINE] Phase completion selection failed for:', phase.id);
return {
taskType: 'phase-completion',
content: '',
@ -648,7 +606,6 @@ class AIPipeline {
.slice(0, 2);
if (validTools.length === 0) {
console.log('[AI-PIPELINE] No valid tools selected for phase completion:', phase.id);
return {
taskType: 'phase-completion',
content: selection.completionReasoning || '',
@ -660,8 +617,6 @@ class AIPipeline {
const actualToolsAdded = validTools.map(tool => tool.name);
for (const tool of validTools) {
console.log('[AI-PIPELINE] Generating AI reasoning for phase completion tool:', tool.name);
const reasoningPrompt = getPrompt(
'phaseCompletionReasoning',
context.userQuery,
@ -671,7 +626,7 @@ class AIPipeline {
selection.completionReasoning || 'Nachergänzung zur Vervollständigung der Phasenabdeckung'
);
const reasoningResult = await this.callMicroTaskAI(reasoningPrompt, context, 400, 'phase-completion-reasoning');
const reasoningResult = await this.callMicroTaskAI(reasoningPrompt, context, 'phase-completion-reasoning');
let detailedJustification: string;
let moderatedTaskRelevance = 75;
@ -695,8 +650,6 @@ class AIPipeline {
moderatedTaskRelevance,
dynamicLimitations
);
console.log('[AI-PIPELINE] Added phase completion tool with AI reasoning:', tool.name);
}
auditService.addPhaseCompletion(
@ -705,6 +658,7 @@ class AIPipeline {
selection.completionReasoning || `${actualToolsAdded.length} Tools für ${phase.name} hinzugefügt`,
phaseStart,
{
toolsDataHash: toolsDataHash,
toolsAdded: actualToolsAdded,
toolType: validTools[0]?.type,
semanticSimilarity: phaseResults.find(r => r.name === validTools[0]?.name)?.similarity,
@ -723,8 +677,6 @@ class AIPipeline {
};
} catch (error) {
console.error('[AI-PIPELINE] Phase completion failed for:', phase.id, error);
return {
taskType: 'phase-completion',
content: '',
@ -756,9 +708,7 @@ class AIPipeline {
}
private moderateTaskRelevance(taskRelevance: number): number {
if (typeof taskRelevance !== 'number') {
return 70;
}
if (typeof taskRelevance !== 'number') return 70;
let moderated = Math.min(taskRelevance, this.config.taskRelevanceModeration.maxInitialScore);
@ -770,13 +720,12 @@ class AIPipeline {
return Math.round(Math.min(moderated, this.config.taskRelevanceModeration.maxInitialScore));
}
private async analyzeScenario(context: PipelineContext, pipelineStart: number): Promise<MicroTaskResult> {
console.log('[AI-PIPELINE] Micro-task: Scenario analysis');
private async analyzeScenario(context: PipelineContext, pipelineStart: number, toolsDataHash: string): Promise<MicroTaskResult> {
const taskStart = Date.now();
const isWorkflow = context.mode === 'workflow';
const prompt = getPrompt('scenarioAnalysis', isWorkflow, context.userQuery);
const result = await this.callMicroTaskAI(prompt, context, 400, 'scenario-analysis');
const result = await this.callMicroTaskAI(prompt, context, 'scenario-analysis');
if (result.success) {
if (isWorkflow) {
@ -801,6 +750,7 @@ class AIPipeline {
`Analysierte ${isWorkflow ? 'Szenario' : 'Problem'} basierend auf Nutzereingabe: "${context.userQuery.slice(0, 100)}..." - Identifizierte Kernaspekte und Herausforderungen für forensische Untersuchung`,
taskStart,
{
toolsDataHash: toolsDataHash,
microTaskType: 'scenario-analysis',
analysisType: isWorkflow ? 'scenario' : 'problem',
contentLength: result.content.length,
@ -814,13 +764,12 @@ class AIPipeline {
return result;
}
private async generateApproach(context: PipelineContext, pipelineStart: number): Promise<MicroTaskResult> {
console.log('[AI-PIPELINE] Micro-task: Investigation approach');
private async generateApproach(context: PipelineContext, pipelineStart: number, toolsDataHash: string): Promise<MicroTaskResult> {
const taskStart = Date.now();
const isWorkflow = context.mode === 'workflow';
const prompt = getPrompt('investigationApproach', isWorkflow, context.userQuery);
const result = await this.callMicroTaskAI(prompt, context, 400, 'investigation-approach');
const result = await this.callMicroTaskAI(prompt, context, 'investigation-approach');
if (result.success) {
context.investigationApproach = result.content;
@ -840,6 +789,7 @@ class AIPipeline {
`Entwickelte ${isWorkflow ? 'Untersuchungs' : 'Lösungs'}ansatz unter Berücksichtigung der Szenario-Analyse - Strukturierte Herangehensweise für forensische Methodik`,
taskStart,
{
toolsDataHash: toolsDataHash,
microTaskType: 'investigation-approach',
approachType: isWorkflow ? 'investigation' : 'solution',
contentLength: result.content.length,
@ -854,13 +804,12 @@ class AIPipeline {
return result;
}
private async generateCriticalConsiderations(context: PipelineContext, pipelineStart: number): Promise<MicroTaskResult> {
console.log('[AI-PIPELINE] Micro-task: Critical considerations');
private async generateCriticalConsiderations(context: PipelineContext, pipelineStart: number, toolsDataHash: string): Promise<MicroTaskResult> {
const taskStart = Date.now();
const isWorkflow = context.mode === 'workflow';
const prompt = getPrompt('criticalConsiderations', isWorkflow, context.userQuery);
const result = await this.callMicroTaskAI(prompt, context, 350, 'critical-considerations');
const result = await this.callMicroTaskAI(prompt, context, 'critical-considerations');
if (result.success) {
context.criticalConsiderations = result.content;
@ -880,6 +829,7 @@ class AIPipeline {
'Identifizierte kritische Überlegungen für forensische Untersuchung - Berücksichtigung von Beweissicherung, Chain of Custody und methodischen Herausforderungen',
taskStart,
{
toolsDataHash: toolsDataHash,
microTaskType: 'critical-considerations',
contentLength: result.content.length,
decisionBasis: 'ai-analysis',
@ -896,9 +846,9 @@ class AIPipeline {
context: PipelineContext,
tool: any,
rank: number,
pipelineStart: number
pipelineStart: number,
toolsDataHash: string
): Promise<MicroTaskResult> {
console.log('[AI-PIPELINE] Micro-task: Tool evaluation for:', tool.name);
const taskStart = Date.now();
const existingSelection = context.selectedTools?.find((st: any) => st.tool && st.tool.name === tool.name);
const originalTaskRelevance = existingSelection?.taskRelevance || 70;
@ -906,7 +856,7 @@ class AIPipeline {
const priority = this.derivePriorityFromScore(moderatedTaskRelevance);
const prompt = getPrompt('toolEvaluation', context.userQuery, tool, rank, moderatedTaskRelevance);
const result = await this.callMicroTaskAI(prompt, context, 1000, 'tool-evaluation');
const result = await this.callMicroTaskAI(prompt, context, 'tool-evaluation');
if (result.success) {
const evaluation = JSONParser.safeParseJSON(result.content, {
@ -942,6 +892,7 @@ class AIPipeline {
`Bewertete Tool "${tool.name}" (Rang ${rank}) - Analysierte Eignung für spezifische Aufgabenstellung mit Fokus auf praktische Anwendbarkeit und methodische Integration`,
taskStart,
{
toolsDataHash: toolsDataHash,
microTaskType: 'tool-evaluation',
toolName: tool.name,
toolType: tool.type,
@ -964,8 +915,7 @@ class AIPipeline {
return result;
}
private async selectBackgroundKnowledge(context: PipelineContext, pipelineStart: number): Promise<MicroTaskResult> {
console.log('[AI-PIPELINE] Micro-task: Background knowledge selection');
private async selectBackgroundKnowledge(context: PipelineContext, pipelineStart: number, toolsDataHash: string): Promise<MicroTaskResult> {
const taskStart = Date.now();
const availableConcepts = context.filteredData.concepts;
@ -980,7 +930,7 @@ class AIPipeline {
const selectedToolNames = context.selectedTools?.map((st: any) => st.tool && st.tool.name).filter(Boolean) || [];
const prompt = getPrompt('backgroundKnowledgeSelection', context.userQuery, context.mode, selectedToolNames, availableConcepts);
const result = await this.callMicroTaskAI(prompt, context, 700, 'background-knowledge');
const result = await this.callMicroTaskAI(prompt, context, 'background-knowledge');
if (result.success) {
const selections = JSONParser.safeParseJSON(result.content, []);
@ -1017,6 +967,7 @@ class AIPipeline {
finalConfidence,
taskStart,
{
toolsDataHash: toolsDataHash,
microTaskType: 'background-knowledge',
availableConceptsCount: availableConcepts.length,
selectedConceptsCount: context.backgroundKnowledge.length,
@ -1041,32 +992,25 @@ class AIPipeline {
): number {
let bonus = 0;
if (selectedKnowledge.length > 0) {
bonus += 10;
}
if (selectedKnowledge.length > 0) bonus += 10;
const ratio = selectedKnowledge.length / availableConcepts.length;
if (ratio >= 0.1 && ratio <= 0.3) {
bonus += 15;
}
if (ratio >= 0.1 && ratio <= 0.3) bonus += 15;
const hasGoodReasonings = selectedKnowledge.some(bk =>
bk.relevance && bk.relevance.length > 30
);
if (hasGoodReasonings) {
bonus += 10;
}
if (hasGoodReasonings) bonus += 10;
return bonus;
}
private async generateFinalRecommendations(context: PipelineContext, pipelineStart: number): Promise<MicroTaskResult> {
console.log('[AI-PIPELINE] Micro-task: Final recommendations');
private async generateFinalRecommendations(context: PipelineContext, pipelineStart: number, toolsDataHash: string): Promise<MicroTaskResult> {
const taskStart = Date.now();
const selectedToolNames = context.selectedTools?.map((st: any) => st.tool && st.tool.name).filter(Boolean) || [];
const prompt = getPrompt('finalRecommendations', context.mode === 'workflow', context.userQuery, selectedToolNames);
const result = await this.callMicroTaskAI(prompt, context, 350, 'final-recommendations');
const result = await this.callMicroTaskAI(prompt, context, 'final-recommendations');
if (result.success) {
const confidence = auditService.calculateAIResponseConfidence(
@ -1086,6 +1030,7 @@ class AIPipeline {
`Generierte abschließende ${context.mode}-Empfehlungen basierend auf ausgewählten ${selectedToolNames.length} Tools - Synthese aller Analyseschritte zu kohärenter Handlungsempfehlung`,
taskStart,
{
toolsDataHash: toolsDataHash,
microTaskType: 'final-recommendations',
mode: context.mode,
selectedToolsCount: selectedToolNames.length,
@ -1105,30 +1050,17 @@ class AIPipeline {
private calculateSynthesisBonus(selectedToolNames: string[], context: PipelineContext): number {
let bonus = 0;
if (selectedToolNames.length >= 3) {
bonus += 10;
}
if (context.backgroundKnowledge && context.backgroundKnowledge.length > 0) {
bonus += 10;
}
if (context.scenarioAnalysis || context.problemAnalysis) {
bonus += 5;
}
if (context.investigationApproach) {
bonus += 5;
}
if (selectedToolNames.length >= 3) bonus += 10;
if (context.backgroundKnowledge && context.backgroundKnowledge.length > 0) bonus += 10;
if (context.scenarioAnalysis || context.problemAnalysis) bonus += 5;
if (context.investigationApproach) bonus += 5;
return bonus;
}
private buildRecommendation(context: PipelineContext, mode: string, finalContent: string): any {
private buildRecommendation(context: PipelineContext, mode: string, finalContent: string, toolsDataHash: string): any {
const isWorkflow = mode === 'workflow';
console.log('[AI-PIPELINE] Building recommendation for', mode, 'mode with', context.selectedTools?.length || 0, 'tools');
const base = {
[isWorkflow ? 'scenario_analysis' : 'problem_analysis']:
isWorkflow ? context.scenarioAnalysis : context.problemAnalysis,
@ -1161,6 +1093,7 @@ class AIPipeline {
confidence,
Date.now(),
{
toolsDataHash: toolsDataHash,
phase: st.phase,
priority: st.priority,
toolType: st.tool.type,
@ -1205,6 +1138,7 @@ class AIPipeline {
confidence,
Date.now(),
{
toolsDataHash: toolsDataHash,
rank: st.tool.evaluation?.rank || 1,
toolType: st.tool.type,
moderatedTaskRelevance: st.taskRelevance
@ -1237,7 +1171,6 @@ class AIPipeline {
private async callMicroTaskAI(
prompt: string,
context: PipelineContext,
maxTokens: number = 500,
taskType: string = 'micro-task'
): Promise<MicroTaskResult> {
const startTime = Date.now();
@ -1245,15 +1178,11 @@ class AIPipeline {
let contextPrompt = prompt;
if (context.contextHistory.length > 0) {
const contextSection = `BISHERIGE ANALYSE:\n${context.contextHistory.join('\n\n')}\n\nAKTUELLE AUFGABE:\n`;
const combinedPrompt = contextSection + prompt;
if (aiService.estimateTokens(combinedPrompt) <= this.config.maxPromptTokens) {
contextPrompt = combinedPrompt;
}
contextPrompt = contextSection + prompt;
}
try {
const response = await aiService.callMicroTaskAI(contextPrompt, maxTokens);
const response = await aiService.callMicroTaskAI(contextPrompt);
return {
taskType,
@ -1280,12 +1209,10 @@ class AIPipeline {
context.contextHistory.push(newEntry);
context.currentContextLength += entryTokens;
/*while (context.currentContextLength > this.config.maxContextTokens && context.contextHistory.length > 1) {
if (context.contextHistory.length > 1) {
const removed = context.contextHistory.shift()!;
context.currentContextLength -= aiService.estimateTokens(removed);
}*/
const removed = context.contextHistory.shift()!;
context.currentContextLength -= aiService.estimateTokens(removed);
}
}
private addToolToSelection(

View File

@ -8,7 +8,6 @@ export interface AIServiceConfig {
}
export interface AICallOptions {
maxTokens?: number;
temperature?: number;
timeout?: number;
}
@ -34,9 +33,8 @@ class AIService {
};
this.defaultOptions = {
maxTokens: 32768,
temperature: 0.3,
timeout: 30000
timeout: 60000
};
console.log('[AI-SERVICE] Initialized with model:', this.config.model);
@ -55,7 +53,6 @@ class AIService {
console.log('[AI-SERVICE] Making API call:', {
promptLength: prompt.length,
maxTokens: mergedOptions.maxTokens,
temperature: mergedOptions.temperature
});
@ -70,7 +67,6 @@ class AIService {
const requestBody = {
model: this.config.model,
messages: [{ role: 'user', content: prompt }],
max_tokens: mergedOptions.maxTokens,
temperature: mergedOptions.temperature
};
@ -122,11 +118,10 @@ class AIService {
}
}
async callMicroTaskAI(prompt: string, maxTokens: number = 500): Promise<AIResponse> {
async callMicroTaskAI(prompt: string): Promise<AIResponse> {
return this.callAI(prompt, {
maxTokens,
temperature: 0.3,
timeout: 15000
timeout: 30000
});
}
@ -134,14 +129,6 @@ class AIService {
return Math.ceil(text.length / 4);
}
validatePromptLength(prompt: string, maxTokens: number = 35000): void {
const estimatedTokens = this.estimateTokens(prompt);
if (estimatedTokens > maxTokens) {
console.warn('[AI-SERVICE] WARNING: Prompt may exceed model limits:', estimatedTokens);
throw new Error(`Prompt too long: ${estimatedTokens} tokens (max: ${maxTokens})`);
}
}
getConfig(): AIServiceConfig {
return { ...this.config };
}

View File

@ -85,7 +85,7 @@ let cachedData: ToolsData | null = null;
let cachedRandomizedData: ToolsData | null = null;
let cachedCompressedData: EnhancedCompressedToolsData | null = null;
let lastRandomizationDate: string | null = null;
let dataVersion: string | null = null;
let cachedToolsHash: string | null = null;
function seededRandom(seed: number): () => number {
let x = Math.sin(seed) * 10000;
@ -110,17 +110,6 @@ function shuffleArray<T>(array: T[], randomFn: () => number): T[] {
return shuffled;
}
function generateDataVersion(data: any): string {
const str = JSON.stringify(data, Object.keys(data).sort());
let hash = 0;
for (let i = 0; i < str.length; i++) {
const char = str.charCodeAt(i);
hash = ((hash << 5) - hash) + char;
hash = hash & hash;
}
return Math.abs(hash).toString(36);
}
async function loadRawData(): Promise<ToolsData> {
if (!cachedData) {
const yamlPath = path.join(process.cwd(), 'src/data/tools.yaml');
@ -142,8 +131,9 @@ async function loadRawData(): Promise<ToolsData> {
};
}
dataVersion = generateDataVersion(cachedData);
console.log(`[DATA SERVICE] Loaded enhanced data version: ${dataVersion}`);
const { getToolsFileHash } = await import('./hashUtils.js');
cachedToolsHash = await getToolsFileHash();
console.log(`[DATA SERVICE] Loaded data with hash: ${cachedToolsHash.slice(0, 12)}...`);
} catch (error) {
if (error instanceof z.ZodError) {
@ -234,7 +224,7 @@ export async function getCompressedToolsDataForAI(): Promise<EnhancedCompressedT
}
export function getDataVersion(): string | null {
return dataVersion;
return cachedToolsHash;
}
export function clearCache(): void {
@ -242,7 +232,7 @@ export function clearCache(): void {
cachedRandomizedData = null;
cachedCompressedData = null;
lastRandomizationDate = null;
dataVersion = null;
cachedToolsHash = null;
console.log('[DATA SERVICE] Enhanced cache cleared');
}

View File

@ -100,7 +100,8 @@ class EmbeddingsService {
await fs.mkdir(path.dirname(this.embeddingsPath), { recursive: true });
const toolsData = await getCompressedToolsDataForAI();
const currentDataHash = await this.hashToolsFile();
const { getToolsFileHash } = await import('./hashUtils.js');
const currentDataHash = await getToolsFileHash();
const existing = await this.loadEmbeddings();
@ -129,12 +130,6 @@ class EmbeddingsService {
}
}
private async hashToolsFile(): Promise<string> {
const file = path.join(process.cwd(), 'src', 'data', 'tools.yaml');
const raw = await fs.readFile(file, 'utf8');
return crypto.createHash('sha256').update(raw).digest('hex');
}
private async loadEmbeddings(): Promise<EmbeddingsDatabase | null> {
try {
const data = await fs.readFile(this.embeddingsPath, 'utf8');

20
src/utils/hashUtils.ts Normal file
View File

@ -0,0 +1,20 @@
// src/utils/hashUtils.ts
import { promises as fs } from 'fs';
import path from 'path';
import crypto from 'crypto';
export async function getToolsFileHash(): Promise<string> {
const file = path.join(process.cwd(), 'src', 'data', 'tools.yaml');
const raw = await fs.readFile(file, 'utf8');
return crypto.createHash('sha256').update(raw).digest('hex');
}
export function getToolsFileHashSync(): string | null {
try {
const file = path.join(process.cwd(), 'src', 'data', 'tools.yaml');
const raw = require('fs').readFileSync(file, 'utf8');
return crypto.createHash('sha256').update(raw).digest('hex');
} catch {
return null;
}
}

View File

@ -38,7 +38,6 @@ export interface SelectionContext {
export interface ToolSelectionResult {
selectedTools: any[];
selectedConcepts: any[];
//selectionMethod: string;
confidence: number;
}
@ -84,13 +83,11 @@ class ToolSelector {
domains: any[];
phases: any[];
'domain-agnostic-software': any[];
//selectionMethod: string;
}> {
console.log('[TOOL-SELECTOR] Getting intelligent candidates for query');
let candidateTools: any[] = [];
let candidateConcepts: any[] = [];
//let selectionMethod = 'unknown';
context.embeddingsSimilarities.clear();
@ -133,23 +130,19 @@ class ToolSelector {
if (similarTools.length >= this.config.embeddingsMinTools && reductionRatio <= this.config.embeddingsMaxReductionRatio) {
candidateTools = similarTools;
candidateConcepts = similarConcepts;
//selectionMethod = 'embeddings_candidates';
console.log('[TOOL-SELECTOR] Using embeddings filtering:', totalAvailableTools, '→', similarTools.length, 'tools');
} else {
console.log('[TOOL-SELECTOR] Embeddings filtering insufficient, using full dataset');
candidateTools = toolsData.tools;
candidateConcepts = toolsData.concepts;
//selectionMethod = 'full_dataset';
}
const selection = await this.performAISelection(
userQuery,
candidateTools,
candidateConcepts,
mode,
//selectionMethod,
context
);
@ -158,8 +151,7 @@ class ToolSelector {
concepts: selection.selectedConcepts,
domains: toolsData.domains,
phases: toolsData.phases,
'domain-agnostic-software': toolsData['domain-agnostic-software'],
//selectionMethod
'domain-agnostic-software': toolsData['domain-agnostic-software']
};
}
@ -206,8 +198,6 @@ class ToolSelector {
const basePrompt = getPrompt('toolSelection', mode, userQuery, this.config.maxSelectedItems);
const prompt = getPrompt('toolSelectionWithData', basePrompt, toolsToSend, conceptsToSend);
aiService.validatePromptLength(prompt);
console.log('[TOOL-SELECTOR] Sending to AI:',
toolsToSend.filter((t: any) => t.type === 'method').length, 'methods,',
toolsToSend.filter((t: any) => t.type === 'software').length, 'software,',
@ -215,7 +205,7 @@ class ToolSelector {
);
try {
const response = await aiService.callAI(prompt, { maxTokens: 32768 });
const response = await aiService.callAI(prompt);
const result = JSONParser.safeParseJSON(response.content, null);
if (!result || !Array.isArray(result.selectedTools) || !Array.isArray(result.selectedConcepts)) {
@ -256,7 +246,6 @@ class ToolSelector {
}
}
async selectToolsForPhase(
userQuery: string,
phase: any,
@ -278,7 +267,7 @@ class ToolSelector {
const prompt = getPrompt('phaseToolSelection', userQuery, phase, availableTools);
try {
const response = await aiService.callMicroTaskAI(prompt, 1000);
const response = await aiService.callMicroTaskAI(prompt);
const selections = JSONParser.safeParseJSON(response.content, []);
if (Array.isArray(selections)) {