Add LiteLLM proxy support for ChatGPT subscription AI access

- Add ai_provider setting: 'openai' (API key) or 'litellm' (ChatGPT subscription proxy)
- Auto-strip max_tokens/max_completion_tokens for chatgpt/ prefix models
  (ChatGPT subscription backend rejects token limit fields)
- LiteLLM mode: dummy API key when none configured, base URL required
- isOpenAIConfigured() checks base URL instead of API key for LiteLLM
- listAvailableModels() returns manualEntry flag for LiteLLM (no models.list)
- Settings UI: conditional fields, info banner, manual model input with
  chatgpt/ prefix examples when LiteLLM selected
- All 7 AI services work transparently via buildCompletionParams()

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Matt 2026-02-16 15:48:34 +01:00
parent 014bb15890
commit b2279067e2
3 changed files with 129 additions and 19 deletions

View File

@ -4,7 +4,7 @@ import { useForm } from 'react-hook-form'
import { zodResolver } from '@hookform/resolvers/zod' import { zodResolver } from '@hookform/resolvers/zod'
import { z } from 'zod' import { z } from 'zod'
import { toast } from 'sonner' import { toast } from 'sonner'
import { Cog, Loader2, Zap, AlertCircle, RefreshCw, SlidersHorizontal } from 'lucide-react' import { Cog, Loader2, Zap, AlertCircle, RefreshCw, SlidersHorizontal, Info } from 'lucide-react'
import { trpc } from '@/lib/trpc/client' import { trpc } from '@/lib/trpc/client'
import { Button } from '@/components/ui/button' import { Button } from '@/components/ui/button'
import { Input } from '@/components/ui/input' import { Input } from '@/components/ui/input'
@ -67,7 +67,10 @@ export function AISettingsForm({ settings }: AISettingsFormProps) {
}, },
}) })
// Fetch available models from OpenAI API const watchProvider = form.watch('ai_provider')
const isLiteLLM = watchProvider === 'litellm'
// Fetch available models from OpenAI API (skip for LiteLLM — no models.list support)
const { const {
data: modelsData, data: modelsData,
isLoading: modelsLoading, isLoading: modelsLoading,
@ -76,6 +79,7 @@ export function AISettingsForm({ settings }: AISettingsFormProps) {
} = trpc.settings.listAIModels.useQuery(undefined, { } = trpc.settings.listAIModels.useQuery(undefined, {
staleTime: 5 * 60 * 1000, // Cache for 5 minutes staleTime: 5 * 60 * 1000, // Cache for 5 minutes
retry: false, retry: false,
enabled: !isLiteLLM,
}) })
const updateSettings = trpc.settings.updateMultiple.useMutation({ const updateSettings = trpc.settings.updateMultiple.useMutation({
@ -182,32 +186,50 @@ export function AISettingsForm({ settings }: AISettingsFormProps) {
</SelectTrigger> </SelectTrigger>
</FormControl> </FormControl>
<SelectContent> <SelectContent>
<SelectItem value="openai">OpenAI</SelectItem> <SelectItem value="openai">OpenAI (API Key)</SelectItem>
<SelectItem value="litellm">LiteLLM Proxy (ChatGPT Subscription)</SelectItem>
</SelectContent> </SelectContent>
</Select> </Select>
<FormDescription> <FormDescription>
AI provider for smart assignment suggestions {field.value === 'litellm'
? 'Route AI calls through a LiteLLM proxy connected to your ChatGPT Plus/Pro subscription'
: 'Direct OpenAI API access using your API key'}
</FormDescription> </FormDescription>
<FormMessage /> <FormMessage />
</FormItem> </FormItem>
)} )}
/> />
{isLiteLLM && (
<Alert>
<Info className="h-4 w-4" />
<AlertDescription>
<strong>LiteLLM Proxy Mode</strong> AI calls will be routed through your LiteLLM proxy
using your ChatGPT subscription. Token limits are automatically stripped (not supported by ChatGPT backend).
Make sure your LiteLLM proxy is running and accessible.
</AlertDescription>
</Alert>
)}
<FormField <FormField
control={form.control} control={form.control}
name="openai_api_key" name="openai_api_key"
render={({ field }) => ( render={({ field }) => (
<FormItem> <FormItem>
<FormLabel>API Key</FormLabel> <FormLabel>{isLiteLLM ? 'API Key (Optional)' : 'API Key'}</FormLabel>
<FormControl> <FormControl>
<Input <Input
type="password" type="password"
placeholder={settings.openai_api_key ? '••••••••' : 'Enter API key'} placeholder={isLiteLLM
? 'Optional — leave blank for default'
: (settings.openai_api_key ? '••••••••' : 'Enter API key')}
{...field} {...field}
/> />
</FormControl> </FormControl>
<FormDescription> <FormDescription>
Your OpenAI API key. Leave blank to keep the existing key. {isLiteLLM
? 'LiteLLM proxy usually does not require an API key. Leave blank to use default.'
: 'Your OpenAI API key. Leave blank to keep the existing key.'}
</FormDescription> </FormDescription>
<FormMessage /> <FormMessage />
</FormItem> </FormItem>
@ -219,16 +241,26 @@ export function AISettingsForm({ settings }: AISettingsFormProps) {
name="openai_base_url" name="openai_base_url"
render={({ field }) => ( render={({ field }) => (
<FormItem> <FormItem>
<FormLabel>API Base URL (Optional)</FormLabel> <FormLabel>{isLiteLLM ? 'LiteLLM Proxy URL' : 'API Base URL (Optional)'}</FormLabel>
<FormControl> <FormControl>
<Input <Input
placeholder="https://api.openai.com/v1" placeholder={isLiteLLM ? 'http://localhost:4000' : 'https://api.openai.com/v1'}
{...field} {...field}
/> />
</FormControl> </FormControl>
<FormDescription> <FormDescription>
{isLiteLLM ? (
<>
URL of your LiteLLM proxy. Typically{' '}
<code className="text-xs bg-muted px-1 rounded">http://localhost:4000</code>{' '}
or your server address.
</>
) : (
<>
Custom base URL for OpenAI-compatible providers. Leave blank for OpenAI. Custom base URL for OpenAI-compatible providers. Leave blank for OpenAI.
Use <code className="text-xs bg-muted px-1 rounded">https://openrouter.ai/api/v1</code> for OpenRouter (access Claude, Gemini, Llama, etc.) Use <code className="text-xs bg-muted px-1 rounded">https://openrouter.ai/api/v1</code> for OpenRouter.
</>
)}
</FormDescription> </FormDescription>
<FormMessage /> <FormMessage />
</FormItem> </FormItem>
@ -242,7 +274,7 @@ export function AISettingsForm({ settings }: AISettingsFormProps) {
<FormItem> <FormItem>
<div className="flex items-center justify-between"> <div className="flex items-center justify-between">
<FormLabel>Model</FormLabel> <FormLabel>Model</FormLabel>
{modelsData?.success && ( {!isLiteLLM && modelsData?.success && !modelsData?.manualEntry && (
<Button <Button
type="button" type="button"
variant="ghost" variant="ghost"
@ -256,7 +288,13 @@ export function AISettingsForm({ settings }: AISettingsFormProps) {
)} )}
</div> </div>
{modelsLoading ? ( {isLiteLLM || modelsData?.manualEntry ? (
<Input
value={field.value}
onChange={(e) => field.onChange(e.target.value)}
placeholder="chatgpt/gpt-5.2"
/>
) : modelsLoading ? (
<Skeleton className="h-10 w-full" /> <Skeleton className="h-10 w-full" />
) : modelsError || !modelsData?.success ? ( ) : modelsError || !modelsData?.success ? (
<div className="space-y-2"> <div className="space-y-2">
@ -303,7 +341,15 @@ export function AISettingsForm({ settings }: AISettingsFormProps) {
</Select> </Select>
)} )}
<FormDescription> <FormDescription>
{form.watch('ai_model')?.startsWith('o') ? ( {isLiteLLM ? (
<>
Enter the model ID with the{' '}
<code className="text-xs bg-muted px-1 rounded">chatgpt/</code> prefix.
Examples:{' '}
<code className="text-xs bg-muted px-1 rounded">chatgpt/gpt-5.2</code>,{' '}
<code className="text-xs bg-muted px-1 rounded">chatgpt/gpt-5.2-codex</code>
</>
) : form.watch('ai_model')?.startsWith('o') ? (
<span className="flex items-center gap-1 text-purple-600"> <span className="flex items-center gap-1 text-purple-600">
<SlidersHorizontal className="h-3 w-3" /> <SlidersHorizontal className="h-3 w-3" />
Reasoning model - optimized for complex analysis tasks Reasoning model - optimized for complex analysis tasks

View File

@ -8,6 +8,33 @@ const globalForOpenAI = globalThis as unknown as {
openaiInitialized: boolean openaiInitialized: boolean
} }
// ─── Provider Detection ─────────────────────────────────────────────────────
/**
* Get the configured AI provider from SystemSettings.
* Returns 'openai' (default) or 'litellm' (ChatGPT subscription proxy).
*/
export async function getConfiguredProvider(): Promise<'openai' | 'litellm'> {
try {
const setting = await prisma.systemSettings.findUnique({
where: { key: 'ai_provider' },
})
const value = setting?.value || 'openai'
return value === 'litellm' ? 'litellm' : 'openai'
} catch {
return 'openai'
}
}
/**
* Check if a model ID indicates LiteLLM ChatGPT subscription routing.
* Models like 'chatgpt/gpt-5.2' use the chatgpt/ prefix.
* Used by buildCompletionParams (sync) to strip unsupported token limit fields.
*/
export function isLiteLLMChatGPTModel(model: string): boolean {
return model.toLowerCase().startsWith('chatgpt/')
}
// ─── Model Type Detection ──────────────────────────────────────────────────── // ─── Model Type Detection ────────────────────────────────────────────────────
/** /**
@ -168,6 +195,12 @@ export function buildCompletionParams(
params.response_format = { type: 'json_object' } params.response_format = { type: 'json_object' }
} }
// LiteLLM ChatGPT subscription models reject token limit fields
if (isLiteLLMChatGPTModel(model)) {
delete params.max_tokens
delete params.max_completion_tokens
}
return params return params
} }
@ -209,8 +242,12 @@ async function getBaseURL(): Promise<string | undefined> {
*/ */
async function createOpenAIClient(): Promise<OpenAI | null> { async function createOpenAIClient(): Promise<OpenAI | null> {
const apiKey = await getOpenAIApiKey() const apiKey = await getOpenAIApiKey()
const provider = await getConfiguredProvider()
if (!apiKey) { // LiteLLM proxy may not require a real API key
const effectiveApiKey = apiKey || (provider === 'litellm' ? 'sk-litellm' : null)
if (!effectiveApiKey) {
console.warn('OpenAI API key not configured') console.warn('OpenAI API key not configured')
return null return null
} }
@ -218,11 +255,11 @@ async function createOpenAIClient(): Promise<OpenAI | null> {
const baseURL = await getBaseURL() const baseURL = await getBaseURL()
if (baseURL) { if (baseURL) {
console.log(`[OpenAI] Using custom base URL: ${baseURL}`) console.log(`[OpenAI] Using custom base URL: ${baseURL} (provider: ${provider})`)
} }
return new OpenAI({ return new OpenAI({
apiKey, apiKey: effectiveApiKey,
...(baseURL ? { baseURL } : {}), ...(baseURL ? { baseURL } : {}),
}) })
} }
@ -259,6 +296,12 @@ export function resetOpenAIClient(): void {
* Check if OpenAI is configured and available * Check if OpenAI is configured and available
*/ */
export async function isOpenAIConfigured(): Promise<boolean> { export async function isOpenAIConfigured(): Promise<boolean> {
const provider = await getConfiguredProvider()
if (provider === 'litellm') {
// LiteLLM just needs a base URL configured
const baseURL = await getBaseURL()
return !!baseURL
}
const apiKey = await getOpenAIApiKey() const apiKey = await getOpenAIApiKey()
return !!apiKey return !!apiKey
} }
@ -270,8 +313,20 @@ export async function listAvailableModels(): Promise<{
success: boolean success: boolean
models?: string[] models?: string[]
error?: string error?: string
manualEntry?: boolean
}> { }> {
try { try {
const provider = await getConfiguredProvider()
// LiteLLM proxy for ChatGPT subscription doesn't support models.list()
if (provider === 'litellm') {
return {
success: true,
models: [],
manualEntry: true,
}
}
const client = await getOpenAI() const client = await getOpenAI()
if (!client) { if (!client) {

View File

@ -201,8 +201,8 @@ export const settingsRouter = router({
clearStorageProviderCache() clearStorageProviderCache()
} }
// Reset OpenAI client if API key or base URL changed // Reset OpenAI client if API key, base URL, model, or provider changed
if (input.settings.some((s) => s.key === 'openai_api_key' || s.key === 'openai_base_url' || s.key === 'ai_model')) { if (input.settings.some((s) => s.key === 'openai_api_key' || s.key === 'openai_base_url' || s.key === 'ai_model' || s.key === 'ai_provider')) {
const { resetOpenAIClient } = await import('@/lib/openai') const { resetOpenAIClient } = await import('@/lib/openai')
resetOpenAIClient() resetOpenAIClient()
} }
@ -247,6 +247,15 @@ export const settingsRouter = router({
listAIModels: superAdminProcedure.query(async () => { listAIModels: superAdminProcedure.query(async () => {
const result = await listAvailableModels() const result = await listAvailableModels()
// LiteLLM mode: manual model entry, no listing available
if (result.manualEntry) {
return {
success: true,
models: [],
manualEntry: true,
}
}
if (!result.success || !result.models) { if (!result.success || !result.models) {
return { return {
success: false, success: false,