diff --git a/src/server/services/ai-assignment.ts b/src/server/services/ai-assignment.ts index be1ec9d..fa8253a 100644 --- a/src/server/services/ai-assignment.ts +++ b/src/server/services/ai-assignment.ts @@ -136,7 +136,18 @@ async function processAssignmentBatch( maxTokens: 4000, }) - const response = await openai.chat.completions.create(params) + let response + try { + response = await openai.chat.completions.create(params) + } catch (apiError) { + // Provide clearer error for model-related issues + const errorMsg = apiError instanceof Error ? apiError.message : String(apiError) + if (errorMsg.includes('model') || errorMsg.includes('does not exist')) { + throw new Error(`Invalid AI model "${model}". Please check the model name in Settings > AI Configuration.`) + } + throw apiError + } + const usage = extractTokenUsage(response) tokensUsed = usage.totalTokens @@ -157,7 +168,15 @@ async function processAssignmentBatch( const content = response.choices[0]?.message?.content if (!content) { - throw new Error('No response from AI') + // Check if response indicates an issue + const finishReason = response.choices[0]?.finish_reason + if (finishReason === 'content_filter') { + throw new Error('AI response was filtered. Try a different model or simplify the project descriptions.') + } + if (!response.choices || response.choices.length === 0) { + throw new Error(`No response from model "${model}". This model may not exist or may not be available. Please verify the model name.`) + } + throw new Error(`Empty response from AI model "${model}". The model may not support this type of request.`) } const parsed = JSON.parse(content) as {