From d068d9b6f68255dfc8f8cbad165fc0a0b6f8d64b Mon Sep 17 00:00:00 2001 From: Matt Date: Tue, 3 Feb 2026 10:46:38 +0100 Subject: [PATCH] Improve AI filtering error handling and visibility - Add listAvailableModels() and validateModel() to openai.ts - Improve testOpenAIConnection() to test configured model - Add checkAIStatus endpoint to filtering router - Add pre-execution AI config check in executeRules - Improve error messages in AI filtering service (rate limit, quota, etc.) - Add AI status warning banner on round detail page for filtering rounds Now admins get clear errors when AI is misconfigured instead of silent flags. Co-Authored-By: Claude Opus 4.5 --- src/app/(admin)/admin/rounds/[id]/page.tsx | 20 ++++ src/lib/openai.ts | 107 +++++++++++++++++++-- src/server/routers/filtering.ts | 64 ++++++++++++ src/server/services/ai-filtering.ts | 31 +++++- 4 files changed, 214 insertions(+), 8 deletions(-) diff --git a/src/app/(admin)/admin/rounds/[id]/page.tsx b/src/app/(admin)/admin/rounds/[id]/page.tsx index 3ff016b..4881c60 100644 --- a/src/app/(admin)/admin/rounds/[id]/page.tsx +++ b/src/app/(admin)/admin/rounds/[id]/page.tsx @@ -84,6 +84,10 @@ function RoundDetailContent({ roundId }: { roundId: string }) { { roundId }, { enabled: isFilteringRound } ) + const { data: aiStatus } = trpc.filtering.checkAIStatus.useQuery( + { roundId }, + { enabled: isFilteringRound } + ) const utils = trpc.useUtils() const updateStatus = trpc.round.updateStatus.useMutation({ @@ -485,6 +489,22 @@ function RoundDetailContent({ roundId }: { roundId: string }) { + {/* AI Status Warning */} + {aiStatus?.hasAIRules && !aiStatus?.configured && ( +
+ +
+

AI Configuration Required

+

+ {aiStatus.error || 'AI screening rules require OpenAI to be configured.'} +

+
+ +
+ )} + {/* Stats */} {filteringStats && filteringStats.total > 0 ? (
diff --git a/src/lib/openai.ts b/src/lib/openai.ts index 8682a50..81c8b0c 100644 --- a/src/lib/openai.ts +++ b/src/lib/openai.ts @@ -66,12 +66,12 @@ export async function isOpenAIConfigured(): Promise { } /** - * Test OpenAI connection + * List available models from OpenAI API */ -export async function testOpenAIConnection(): Promise<{ +export async function listAvailableModels(): Promise<{ success: boolean + models?: string[] error?: string - model?: string }> { try { const client = await getOpenAI() @@ -83,9 +83,90 @@ export async function testOpenAIConnection(): Promise<{ } } - // Simple test request + const response = await client.models.list() + const chatModels = response.data + .filter((m) => m.id.includes('gpt') || m.id.includes('o1') || m.id.includes('o3') || m.id.includes('o4')) + .map((m) => m.id) + .sort() + + return { + success: true, + models: chatModels, + } + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : 'Unknown error', + } + } +} + +/** + * Validate that a specific model is available + */ +export async function validateModel(modelId: string): Promise<{ + valid: boolean + error?: string +}> { + try { + const client = await getOpenAI() + + if (!client) { + return { + valid: false, + error: 'OpenAI API key not configured', + } + } + + // Try a minimal completion with the model + await client.chat.completions.create({ + model: modelId, + messages: [{ role: 'user', content: 'test' }], + max_tokens: 1, + }) + + return { valid: true } + } catch (error) { + const message = error instanceof Error ? error.message : 'Unknown error' + // Check for specific model errors + if (message.includes('does not exist') || message.includes('model_not_found')) { + return { + valid: false, + error: `Model "${modelId}" is not available with your API key`, + } + } + return { + valid: false, + error: message, + } + } +} + +/** + * Test OpenAI connection with the configured model + */ +export async function testOpenAIConnection(): Promise<{ + success: boolean + error?: string + model?: string + modelTested?: string +}> { + try { + const client = await getOpenAI() + + if (!client) { + return { + success: false, + error: 'OpenAI API key not configured', + } + } + + // Get the configured model + const configuredModel = await getConfiguredModel() + + // Test with the configured model const response = await client.chat.completions.create({ - model: 'gpt-4o-mini', + model: configuredModel, messages: [{ role: 'user', content: 'Hello' }], max_tokens: 5, }) @@ -93,11 +174,25 @@ export async function testOpenAIConnection(): Promise<{ return { success: true, model: response.model, + modelTested: configuredModel, } } catch (error) { + const message = error instanceof Error ? error.message : 'Unknown error' + const configuredModel = await getConfiguredModel() + + // Check for model-specific errors + if (message.includes('does not exist') || message.includes('model_not_found')) { + return { + success: false, + error: `Model "${configuredModel}" is not available. Check Settings → AI to select a valid model.`, + modelTested: configuredModel, + } + } + return { success: false, - error: error instanceof Error ? error.message : 'Unknown error', + error: message, + modelTested: configuredModel, } } } diff --git a/src/server/routers/filtering.ts b/src/server/routers/filtering.ts index 086a36f..e2c9888 100644 --- a/src/server/routers/filtering.ts +++ b/src/server/routers/filtering.ts @@ -4,8 +4,48 @@ import { Prisma } from '@prisma/client' import { router, adminProcedure, protectedProcedure } from '../trpc' import { executeFilteringRules } from '../services/ai-filtering' import { logAudit } from '../utils/audit' +import { isOpenAIConfigured, testOpenAIConnection } from '@/lib/openai' export const filteringRouter = router({ + /** + * Check if AI is configured and ready for filtering + */ + checkAIStatus: protectedProcedure + .input(z.object({ roundId: z.string() })) + .query(async ({ ctx, input }) => { + // Check if round has AI rules + const aiRules = await ctx.prisma.filteringRule.count({ + where: { + roundId: input.roundId, + ruleType: 'AI_SCREENING', + isActive: true, + }, + }) + + if (aiRules === 0) { + return { hasAIRules: false, configured: true, error: null } + } + + // Check if OpenAI is configured + const configured = await isOpenAIConfigured() + if (!configured) { + return { + hasAIRules: true, + configured: false, + error: 'OpenAI API key not configured', + } + } + + // Test the connection + const testResult = await testOpenAIConnection() + return { + hasAIRules: true, + configured: testResult.success, + error: testResult.error || null, + model: testResult.modelTested, + } + }), + /** * Get filtering rules for a round */ @@ -146,6 +186,30 @@ export const filteringRouter = router({ }) } + // Check if any AI_SCREENING rules exist + const hasAIRules = rules.some((r) => r.ruleType === 'AI_SCREENING' && r.isActive) + + if (hasAIRules) { + // Verify OpenAI is configured before proceeding + const aiConfigured = await isOpenAIConfigured() + if (!aiConfigured) { + throw new TRPCError({ + code: 'PRECONDITION_FAILED', + message: + 'AI screening rules require OpenAI to be configured. Go to Settings → AI to configure your API key.', + }) + } + + // Also verify the model works + const testResult = await testOpenAIConnection() + if (!testResult.success) { + throw new TRPCError({ + code: 'PRECONDITION_FAILED', + message: `AI configuration error: ${testResult.error}. Go to Settings → AI to fix.`, + }) + } + } + // Get projects in this round const roundProjectEntries = await ctx.prisma.roundProject.findMany({ where: { roundId: input.roundId }, diff --git a/src/server/services/ai-filtering.ts b/src/server/services/ai-filtering.ts index 88c903f..55ed79c 100644 --- a/src/server/services/ai-filtering.ts +++ b/src/server/services/ai-filtering.ts @@ -383,13 +383,40 @@ Return your evaluation as JSON.` } } } catch (error) { - // OpenAI error — flag all for manual review + // OpenAI error — flag all for manual review with specific error info console.error('[AI Filtering] OpenAI API error:', error) + + // Extract meaningful error message + let errorType = 'unknown_error' + let errorDetail = 'Unknown error occurred' + + if (error instanceof Error) { + const message = error.message.toLowerCase() + if (message.includes('rate_limit') || message.includes('rate limit')) { + errorType = 'rate_limit' + errorDetail = 'OpenAI rate limit exceeded. Try again in a few minutes.' + } else if (message.includes('model') && (message.includes('not found') || message.includes('does not exist'))) { + errorType = 'model_not_found' + errorDetail = 'The configured AI model is not available. Check Settings → AI.' + } else if (message.includes('insufficient_quota') || message.includes('quota')) { + errorType = 'quota_exceeded' + errorDetail = 'OpenAI API quota exceeded. Check your billing settings.' + } else if (message.includes('invalid_api_key') || message.includes('unauthorized')) { + errorType = 'invalid_api_key' + errorDetail = 'Invalid OpenAI API key. Check Settings → AI.' + } else if (message.includes('context_length') || message.includes('token')) { + errorType = 'context_length' + errorDetail = 'Request too large. Try with fewer projects or shorter descriptions.' + } else { + errorDetail = error.message + } + } + for (const p of projects) { results.set(p.id, { meetsCriteria: false, confidence: 0, - reasoning: `AI screening error — flagged for manual review`, + reasoning: `AI screening error (${errorType}): ${errorDetail}`, qualityScore: 5, spamRisk: false, })