Add detailed logging to AI filtering for debugging

Added console logging throughout the AI screening process to help
diagnose issues when all projects get flagged. Logs model being used,
batch processing, token usage, and actual error messages.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Matt 2026-02-03 10:36:51 +01:00
parent c063f5bba3
commit d45eccea47
1 changed files with 16 additions and 3 deletions

View File

@ -274,6 +274,7 @@ export async function executeAIScreening(
const openai = await getOpenAI()
if (!openai) {
// No OpenAI configured — flag all for manual review
console.warn('[AI Filtering] OpenAI client not available - API key may not be configured')
for (const p of projects) {
results.set(p.id, {
meetsCriteria: false,
@ -287,6 +288,7 @@ export async function executeAIScreening(
}
const model = await getConfiguredModel()
console.log(`[AI Filtering] Using model: ${model} for ${projects.length} projects`)
// Anonymize project data — use numeric IDs
const anonymizedProjects = projects.map((p, i) => ({
@ -319,6 +321,8 @@ ${JSON.stringify(
Return your evaluation as JSON.`
console.log(`[AI Filtering] Processing batch ${Math.floor(i / batchSize) + 1}, ${batch.length} projects`)
const response = await openai.chat.completions.create({
model,
messages: [
@ -330,6 +334,8 @@ Return your evaluation as JSON.`
max_tokens: 4000,
})
console.log(`[AI Filtering] Batch completed, usage: ${response.usage?.total_tokens} tokens`)
const content = response.choices[0]?.message?.content
if (content) {
try {
@ -344,6 +350,8 @@ Return your evaluation as JSON.`
}>
}
console.log(`[AI Filtering] Parsed ${parsed.projects?.length || 0} results from response`)
for (const result of parsed.projects) {
const anon = batch.find((b) => b.project_id === result.project_id)
if (anon) {
@ -356,8 +364,10 @@ Return your evaluation as JSON.`
})
}
}
} catch {
} catch (parseError) {
// Parse error — flag batch for manual review
console.error('[AI Filtering] JSON parse error:', parseError)
console.error('[AI Filtering] Raw response content:', content.slice(0, 500))
for (const item of batch) {
results.set(item.real_id, {
meetsCriteria: false,
@ -368,15 +378,18 @@ Return your evaluation as JSON.`
})
}
}
} else {
console.error('[AI Filtering] Empty response content from API')
}
}
} catch {
} catch (error) {
// OpenAI error — flag all for manual review
console.error('[AI Filtering] OpenAI API error:', error)
for (const p of projects) {
results.set(p.id, {
meetsCriteria: false,
confidence: 0,
reasoning: 'AI screening error — flagged for manual review',
reasoning: `AI screening error — flagged for manual review`,
qualityScore: 5,
spamRisk: false,
})