Improve AI filtering error handling and visibility

- Add listAvailableModels() and validateModel() to openai.ts
- Improve testOpenAIConnection() to test configured model
- Add checkAIStatus endpoint to filtering router
- Add pre-execution AI config check in executeRules
- Improve error messages in AI filtering service (rate limit, quota, etc.)
- Add AI status warning banner on round detail page for filtering rounds

Now admins get clear errors when AI is misconfigured instead of silent flags.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Matt 2026-02-03 10:46:38 +01:00
parent d45eccea47
commit d068d9b6f6
4 changed files with 214 additions and 8 deletions

View File

@ -84,6 +84,10 @@ function RoundDetailContent({ roundId }: { roundId: string }) {
{ roundId },
{ enabled: isFilteringRound }
)
const { data: aiStatus } = trpc.filtering.checkAIStatus.useQuery(
{ roundId },
{ enabled: isFilteringRound }
)
const utils = trpc.useUtils()
const updateStatus = trpc.round.updateStatus.useMutation({
@ -485,6 +489,22 @@ function RoundDetailContent({ roundId }: { roundId: string }) {
</div>
</CardHeader>
<CardContent className="space-y-4">
{/* AI Status Warning */}
{aiStatus?.hasAIRules && !aiStatus?.configured && (
<div className="flex items-center gap-3 p-4 rounded-lg bg-amber-500/10 border border-amber-500/20">
<AlertTriangle className="h-5 w-5 text-amber-600 flex-shrink-0" />
<div className="flex-1">
<p className="font-medium text-amber-700">AI Configuration Required</p>
<p className="text-sm text-amber-600">
{aiStatus.error || 'AI screening rules require OpenAI to be configured.'}
</p>
</div>
<Button variant="outline" size="sm" asChild>
<Link href="/admin/settings">Configure AI</Link>
</Button>
</div>
)}
{/* Stats */}
{filteringStats && filteringStats.total > 0 ? (
<div className="grid gap-4 sm:grid-cols-4">

View File

@ -66,12 +66,12 @@ export async function isOpenAIConfigured(): Promise<boolean> {
}
/**
* Test OpenAI connection
* List available models from OpenAI API
*/
export async function testOpenAIConnection(): Promise<{
export async function listAvailableModels(): Promise<{
success: boolean
models?: string[]
error?: string
model?: string
}> {
try {
const client = await getOpenAI()
@ -83,9 +83,90 @@ export async function testOpenAIConnection(): Promise<{
}
}
// Simple test request
const response = await client.models.list()
const chatModels = response.data
.filter((m) => m.id.includes('gpt') || m.id.includes('o1') || m.id.includes('o3') || m.id.includes('o4'))
.map((m) => m.id)
.sort()
return {
success: true,
models: chatModels,
}
} catch (error) {
return {
success: false,
error: error instanceof Error ? error.message : 'Unknown error',
}
}
}
/**
* Validate that a specific model is available
*/
export async function validateModel(modelId: string): Promise<{
valid: boolean
error?: string
}> {
try {
const client = await getOpenAI()
if (!client) {
return {
valid: false,
error: 'OpenAI API key not configured',
}
}
// Try a minimal completion with the model
await client.chat.completions.create({
model: modelId,
messages: [{ role: 'user', content: 'test' }],
max_tokens: 1,
})
return { valid: true }
} catch (error) {
const message = error instanceof Error ? error.message : 'Unknown error'
// Check for specific model errors
if (message.includes('does not exist') || message.includes('model_not_found')) {
return {
valid: false,
error: `Model "${modelId}" is not available with your API key`,
}
}
return {
valid: false,
error: message,
}
}
}
/**
* Test OpenAI connection with the configured model
*/
export async function testOpenAIConnection(): Promise<{
success: boolean
error?: string
model?: string
modelTested?: string
}> {
try {
const client = await getOpenAI()
if (!client) {
return {
success: false,
error: 'OpenAI API key not configured',
}
}
// Get the configured model
const configuredModel = await getConfiguredModel()
// Test with the configured model
const response = await client.chat.completions.create({
model: 'gpt-4o-mini',
model: configuredModel,
messages: [{ role: 'user', content: 'Hello' }],
max_tokens: 5,
})
@ -93,11 +174,25 @@ export async function testOpenAIConnection(): Promise<{
return {
success: true,
model: response.model,
modelTested: configuredModel,
}
} catch (error) {
const message = error instanceof Error ? error.message : 'Unknown error'
const configuredModel = await getConfiguredModel()
// Check for model-specific errors
if (message.includes('does not exist') || message.includes('model_not_found')) {
return {
success: false,
error: error instanceof Error ? error.message : 'Unknown error',
error: `Model "${configuredModel}" is not available. Check Settings → AI to select a valid model.`,
modelTested: configuredModel,
}
}
return {
success: false,
error: message,
modelTested: configuredModel,
}
}
}

View File

@ -4,8 +4,48 @@ import { Prisma } from '@prisma/client'
import { router, adminProcedure, protectedProcedure } from '../trpc'
import { executeFilteringRules } from '../services/ai-filtering'
import { logAudit } from '../utils/audit'
import { isOpenAIConfigured, testOpenAIConnection } from '@/lib/openai'
export const filteringRouter = router({
/**
* Check if AI is configured and ready for filtering
*/
checkAIStatus: protectedProcedure
.input(z.object({ roundId: z.string() }))
.query(async ({ ctx, input }) => {
// Check if round has AI rules
const aiRules = await ctx.prisma.filteringRule.count({
where: {
roundId: input.roundId,
ruleType: 'AI_SCREENING',
isActive: true,
},
})
if (aiRules === 0) {
return { hasAIRules: false, configured: true, error: null }
}
// Check if OpenAI is configured
const configured = await isOpenAIConfigured()
if (!configured) {
return {
hasAIRules: true,
configured: false,
error: 'OpenAI API key not configured',
}
}
// Test the connection
const testResult = await testOpenAIConnection()
return {
hasAIRules: true,
configured: testResult.success,
error: testResult.error || null,
model: testResult.modelTested,
}
}),
/**
* Get filtering rules for a round
*/
@ -146,6 +186,30 @@ export const filteringRouter = router({
})
}
// Check if any AI_SCREENING rules exist
const hasAIRules = rules.some((r) => r.ruleType === 'AI_SCREENING' && r.isActive)
if (hasAIRules) {
// Verify OpenAI is configured before proceeding
const aiConfigured = await isOpenAIConfigured()
if (!aiConfigured) {
throw new TRPCError({
code: 'PRECONDITION_FAILED',
message:
'AI screening rules require OpenAI to be configured. Go to Settings → AI to configure your API key.',
})
}
// Also verify the model works
const testResult = await testOpenAIConnection()
if (!testResult.success) {
throw new TRPCError({
code: 'PRECONDITION_FAILED',
message: `AI configuration error: ${testResult.error}. Go to Settings → AI to fix.`,
})
}
}
// Get projects in this round
const roundProjectEntries = await ctx.prisma.roundProject.findMany({
where: { roundId: input.roundId },

View File

@ -383,13 +383,40 @@ Return your evaluation as JSON.`
}
}
} catch (error) {
// OpenAI error — flag all for manual review
// OpenAI error — flag all for manual review with specific error info
console.error('[AI Filtering] OpenAI API error:', error)
// Extract meaningful error message
let errorType = 'unknown_error'
let errorDetail = 'Unknown error occurred'
if (error instanceof Error) {
const message = error.message.toLowerCase()
if (message.includes('rate_limit') || message.includes('rate limit')) {
errorType = 'rate_limit'
errorDetail = 'OpenAI rate limit exceeded. Try again in a few minutes.'
} else if (message.includes('model') && (message.includes('not found') || message.includes('does not exist'))) {
errorType = 'model_not_found'
errorDetail = 'The configured AI model is not available. Check Settings → AI.'
} else if (message.includes('insufficient_quota') || message.includes('quota')) {
errorType = 'quota_exceeded'
errorDetail = 'OpenAI API quota exceeded. Check your billing settings.'
} else if (message.includes('invalid_api_key') || message.includes('unauthorized')) {
errorType = 'invalid_api_key'
errorDetail = 'Invalid OpenAI API key. Check Settings → AI.'
} else if (message.includes('context_length') || message.includes('token')) {
errorType = 'context_length'
errorDetail = 'Request too large. Try with fewer projects or shorter descriptions.'
} else {
errorDetail = error.message
}
}
for (const p of projects) {
results.set(p.id, {
meetsCriteria: false,
confidence: 0,
reasoning: `AI screening error — flagged for manual review`,
reasoning: `AI screening error (${errorType}): ${errorDetail}`,
qualityScore: 5,
spamRisk: false,
})