cleanup, prompt centralization
This commit is contained in:
@@ -470,13 +470,11 @@ class AIPipeline {
|
||||
pipelineStart: number,
|
||||
toolsDataHash: string
|
||||
): Promise<{ completed: number; failed: number }> {
|
||||
// Evaluate ALL candidates handed over by the embeddings pre-filter.
|
||||
const candidates = context.filteredData.tools || [];
|
||||
if (!Array.isArray(candidates) || candidates.length === 0) {
|
||||
return { completed: completedTasks, failed: failedTasks };
|
||||
}
|
||||
|
||||
// Evaluate every candidate (no slicing here)
|
||||
for (let i = 0; i < candidates.length; i++) {
|
||||
const evaluationResult = await this.evaluateSpecificTool(context, candidates[i], i + 1, pipelineStart, toolsDataHash);
|
||||
if (evaluationResult.success) completedTasks++; else failedTasks++;
|
||||
@@ -484,15 +482,12 @@ class AIPipeline {
|
||||
await this.delay(this.config.microTaskDelay);
|
||||
}
|
||||
|
||||
// At this point, context.selectedTools may contain 0..N evaluated items (added by evaluateSpecificTool).
|
||||
// Now we sort them by AI-derived taskRelevance (after moderation) and keep ONLY the top 3 for UI.
|
||||
if (Array.isArray(context.selectedTools) && context.selectedTools.length > 0) {
|
||||
context.selectedTools.sort((a: any, b: any) => {
|
||||
const ar = typeof a.taskRelevance === 'number' ? a.taskRelevance : -1;
|
||||
const br = typeof b.taskRelevance === 'number' ? b.taskRelevance : -1;
|
||||
if (br !== ar) return br - ar;
|
||||
|
||||
// tie-breakers without domain heuristics:
|
||||
const aLen = (a.justification || '').length;
|
||||
const bLen = (b.justification || '').length;
|
||||
if (bLen !== aLen) return bLen - aLen;
|
||||
@@ -502,7 +497,6 @@ class AIPipeline {
|
||||
return aRank - bRank;
|
||||
});
|
||||
|
||||
// Keep top 3 only
|
||||
context.selectedTools = context.selectedTools.slice(0, 3);
|
||||
}
|
||||
|
||||
@@ -877,7 +871,6 @@ class AIPipeline {
|
||||
): Promise<MicroTaskResult> {
|
||||
const taskStart = Date.now();
|
||||
|
||||
// Build prompt WITHOUT any baseline score
|
||||
const prompt = getPrompt('toolEvaluation', context.userQuery, tool, rank);
|
||||
const result = await this.callMicroTaskAI(prompt, context, 'tool-evaluation');
|
||||
|
||||
@@ -885,16 +878,13 @@ class AIPipeline {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Parse strictly; do NOT provide a default with a score.
|
||||
const evaluation = JSONParser.safeParseJSON(result.content, null);
|
||||
|
||||
// Require a numeric score produced by the model; otherwise, don't add this tool.
|
||||
const aiProvided = evaluation && typeof evaluation.taskRelevance === 'number' && Number.isFinite(evaluation.taskRelevance)
|
||||
? Math.round(evaluation.taskRelevance)
|
||||
: null;
|
||||
|
||||
if (aiProvided === null) {
|
||||
// Log the malformed output but avoid injecting a synthetic score.
|
||||
auditService.addAIDecision(
|
||||
'tool-evaluation',
|
||||
prompt,
|
||||
@@ -920,7 +910,6 @@ class AIPipeline {
|
||||
const moderatedTaskRelevance = this.moderateTaskRelevance(aiProvided);
|
||||
const priority = this.derivePriorityFromScore(moderatedTaskRelevance);
|
||||
|
||||
// Keep original fields if present; coerce to strings/arrays safely.
|
||||
const detailed_explanation = String(evaluation?.detailed_explanation || '').trim();
|
||||
const implementation_approach = String(evaluation?.implementation_approach || '').trim();
|
||||
const pros = Array.isArray(evaluation?.pros) ? evaluation.pros : [];
|
||||
|
||||
Reference in New Issue
Block a user