diff --git a/src/lib/openai.ts b/src/lib/openai.ts index 2114c95..bbddb95 100644 --- a/src/lib/openai.ts +++ b/src/lib/openai.ts @@ -70,6 +70,30 @@ export function supportsTemperature(model: string): boolean { ) } +/** + * Check if a model requires higher token limits due to reasoning overhead. + * GPT-5 nano especially needs more tokens as reasoning consumes output budget. + */ +export function needsHigherTokenLimit(model: string): boolean { + const modelLower = model.toLowerCase() + return modelLower.includes('nano') || modelLower.includes('gpt-5') +} + +/** + * Get minimum recommended max_tokens for a model. + * Reasoning models need higher limits because internal reasoning consumes tokens. + */ +export function getMinTokenLimit(model: string, requestedLimit?: number): number | undefined { + // For GPT-5 nano, reasoning uses significant token budget + // If user requests < 8000, bump it up or remove limit + if (needsHigherTokenLimit(model)) { + const minLimit = 16000 // Ensure enough headroom for reasoning + if (!requestedLimit) return undefined // No limit = model default + return Math.max(requestedLimit, minLimit) + } + return requestedLimit +} + // ─── Chat Completion Parameter Builder ─────────────────────────────────────── type MessageRole = 'system' | 'user' | 'assistant' | 'developer' @@ -124,11 +148,13 @@ export function buildCompletionParams( // Token limit parameter differs between model types // Newer models (GPT-5+, o-series) use max_completion_tokens - if (options.maxTokens) { + // Also ensure sufficient tokens for models with reasoning overhead (GPT-5 nano) + const effectiveMaxTokens = getMinTokenLimit(model, options.maxTokens) + if (effectiveMaxTokens) { if (usesNewTokenParam(model)) { - params.max_completion_tokens = options.maxTokens + params.max_completion_tokens = effectiveMaxTokens } else { - params.max_tokens = options.maxTokens + params.max_tokens = effectiveMaxTokens } }