Fix GPT-5 max_completion_tokens parameter detection
Build and Push Docker Image / build (push) Successful in 8m35s
Details
Build and Push Docker Image / build (push) Successful in 8m35s
Details
GPT-5 and newer models require max_completion_tokens instead of max_tokens. Added usesNewTokenParam() to detect GPT-5+ models separately from reasoning model restrictions (temperature, json_object, system messages). Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
parent
fd82a9b981
commit
c0ce6f9f1f
|
|
@ -19,8 +19,15 @@ const globalForOpenAI = globalThis as unknown as {
|
|||
*/
|
||||
const REASONING_MODEL_PREFIXES = ['o1', 'o3', 'o4']
|
||||
|
||||
/**
|
||||
* Models that use max_completion_tokens instead of max_tokens.
|
||||
* This includes reasoning models AND newer GPT models (GPT-5+).
|
||||
*/
|
||||
const NEW_TOKEN_PARAM_PREFIXES = ['o1', 'o3', 'o4', 'gpt-5', 'gpt-6', 'gpt-7']
|
||||
|
||||
/**
|
||||
* Check if a model is a reasoning model (o1, o3, o4 series)
|
||||
* These models have additional restrictions (no temperature, no json_object, etc.)
|
||||
*/
|
||||
export function isReasoningModel(model: string): boolean {
|
||||
const modelLower = model.toLowerCase()
|
||||
|
|
@ -31,6 +38,19 @@ export function isReasoningModel(model: string): boolean {
|
|||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a model requires max_completion_tokens instead of max_tokens.
|
||||
* This includes reasoning models AND newer GPT models (GPT-5+).
|
||||
*/
|
||||
export function usesNewTokenParam(model: string): boolean {
|
||||
const modelLower = model.toLowerCase()
|
||||
return NEW_TOKEN_PARAM_PREFIXES.some(prefix =>
|
||||
modelLower.startsWith(prefix) ||
|
||||
modelLower.includes(`/${prefix}`) ||
|
||||
modelLower.includes(`-${prefix}`)
|
||||
)
|
||||
}
|
||||
|
||||
// ─── Chat Completion Parameter Builder ───────────────────────────────────────
|
||||
|
||||
type MessageRole = 'system' | 'user' | 'assistant' | 'developer'
|
||||
|
|
@ -84,8 +104,9 @@ export function buildCompletionParams(
|
|||
}
|
||||
|
||||
// Token limit parameter differs between model types
|
||||
// Newer models (GPT-5+, o-series) use max_completion_tokens
|
||||
if (options.maxTokens) {
|
||||
if (isReasoning) {
|
||||
if (usesNewTokenParam(model)) {
|
||||
params.max_completion_tokens = options.maxTokens
|
||||
} else {
|
||||
params.max_tokens = options.maxTokens
|
||||
|
|
|
|||
Loading…
Reference in New Issue