Add LiteLLM proxy support for ChatGPT subscription AI access
- Add ai_provider setting: 'openai' (API key) or 'litellm' (ChatGPT subscription proxy) - Auto-strip max_tokens/max_completion_tokens for chatgpt/ prefix models (ChatGPT subscription backend rejects token limit fields) - LiteLLM mode: dummy API key when none configured, base URL required - isOpenAIConfigured() checks base URL instead of API key for LiteLLM - listAvailableModels() returns manualEntry flag for LiteLLM (no models.list) - Settings UI: conditional fields, info banner, manual model input with chatgpt/ prefix examples when LiteLLM selected - All 7 AI services work transparently via buildCompletionParams() Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
parent
014bb15890
commit
b2279067e2
|
|
@ -4,7 +4,7 @@ import { useForm } from 'react-hook-form'
|
|||
import { zodResolver } from '@hookform/resolvers/zod'
|
||||
import { z } from 'zod'
|
||||
import { toast } from 'sonner'
|
||||
import { Cog, Loader2, Zap, AlertCircle, RefreshCw, SlidersHorizontal } from 'lucide-react'
|
||||
import { Cog, Loader2, Zap, AlertCircle, RefreshCw, SlidersHorizontal, Info } from 'lucide-react'
|
||||
import { trpc } from '@/lib/trpc/client'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import { Input } from '@/components/ui/input'
|
||||
|
|
@ -67,7 +67,10 @@ export function AISettingsForm({ settings }: AISettingsFormProps) {
|
|||
},
|
||||
})
|
||||
|
||||
// Fetch available models from OpenAI API
|
||||
const watchProvider = form.watch('ai_provider')
|
||||
const isLiteLLM = watchProvider === 'litellm'
|
||||
|
||||
// Fetch available models from OpenAI API (skip for LiteLLM — no models.list support)
|
||||
const {
|
||||
data: modelsData,
|
||||
isLoading: modelsLoading,
|
||||
|
|
@ -76,6 +79,7 @@ export function AISettingsForm({ settings }: AISettingsFormProps) {
|
|||
} = trpc.settings.listAIModels.useQuery(undefined, {
|
||||
staleTime: 5 * 60 * 1000, // Cache for 5 minutes
|
||||
retry: false,
|
||||
enabled: !isLiteLLM,
|
||||
})
|
||||
|
||||
const updateSettings = trpc.settings.updateMultiple.useMutation({
|
||||
|
|
@ -182,32 +186,50 @@ export function AISettingsForm({ settings }: AISettingsFormProps) {
|
|||
</SelectTrigger>
|
||||
</FormControl>
|
||||
<SelectContent>
|
||||
<SelectItem value="openai">OpenAI</SelectItem>
|
||||
<SelectItem value="openai">OpenAI (API Key)</SelectItem>
|
||||
<SelectItem value="litellm">LiteLLM Proxy (ChatGPT Subscription)</SelectItem>
|
||||
</SelectContent>
|
||||
</Select>
|
||||
<FormDescription>
|
||||
AI provider for smart assignment suggestions
|
||||
{field.value === 'litellm'
|
||||
? 'Route AI calls through a LiteLLM proxy connected to your ChatGPT Plus/Pro subscription'
|
||||
: 'Direct OpenAI API access using your API key'}
|
||||
</FormDescription>
|
||||
<FormMessage />
|
||||
</FormItem>
|
||||
)}
|
||||
/>
|
||||
|
||||
{isLiteLLM && (
|
||||
<Alert>
|
||||
<Info className="h-4 w-4" />
|
||||
<AlertDescription>
|
||||
<strong>LiteLLM Proxy Mode</strong> — AI calls will be routed through your LiteLLM proxy
|
||||
using your ChatGPT subscription. Token limits are automatically stripped (not supported by ChatGPT backend).
|
||||
Make sure your LiteLLM proxy is running and accessible.
|
||||
</AlertDescription>
|
||||
</Alert>
|
||||
)}
|
||||
|
||||
<FormField
|
||||
control={form.control}
|
||||
name="openai_api_key"
|
||||
render={({ field }) => (
|
||||
<FormItem>
|
||||
<FormLabel>API Key</FormLabel>
|
||||
<FormLabel>{isLiteLLM ? 'API Key (Optional)' : 'API Key'}</FormLabel>
|
||||
<FormControl>
|
||||
<Input
|
||||
type="password"
|
||||
placeholder={settings.openai_api_key ? '••••••••' : 'Enter API key'}
|
||||
placeholder={isLiteLLM
|
||||
? 'Optional — leave blank for default'
|
||||
: (settings.openai_api_key ? '••••••••' : 'Enter API key')}
|
||||
{...field}
|
||||
/>
|
||||
</FormControl>
|
||||
<FormDescription>
|
||||
Your OpenAI API key. Leave blank to keep the existing key.
|
||||
{isLiteLLM
|
||||
? 'LiteLLM proxy usually does not require an API key. Leave blank to use default.'
|
||||
: 'Your OpenAI API key. Leave blank to keep the existing key.'}
|
||||
</FormDescription>
|
||||
<FormMessage />
|
||||
</FormItem>
|
||||
|
|
@ -219,16 +241,26 @@ export function AISettingsForm({ settings }: AISettingsFormProps) {
|
|||
name="openai_base_url"
|
||||
render={({ field }) => (
|
||||
<FormItem>
|
||||
<FormLabel>API Base URL (Optional)</FormLabel>
|
||||
<FormLabel>{isLiteLLM ? 'LiteLLM Proxy URL' : 'API Base URL (Optional)'}</FormLabel>
|
||||
<FormControl>
|
||||
<Input
|
||||
placeholder="https://api.openai.com/v1"
|
||||
placeholder={isLiteLLM ? 'http://localhost:4000' : 'https://api.openai.com/v1'}
|
||||
{...field}
|
||||
/>
|
||||
</FormControl>
|
||||
<FormDescription>
|
||||
Custom base URL for OpenAI-compatible providers. Leave blank for OpenAI.
|
||||
Use <code className="text-xs bg-muted px-1 rounded">https://openrouter.ai/api/v1</code> for OpenRouter (access Claude, Gemini, Llama, etc.)
|
||||
{isLiteLLM ? (
|
||||
<>
|
||||
URL of your LiteLLM proxy. Typically{' '}
|
||||
<code className="text-xs bg-muted px-1 rounded">http://localhost:4000</code>{' '}
|
||||
or your server address.
|
||||
</>
|
||||
) : (
|
||||
<>
|
||||
Custom base URL for OpenAI-compatible providers. Leave blank for OpenAI.
|
||||
Use <code className="text-xs bg-muted px-1 rounded">https://openrouter.ai/api/v1</code> for OpenRouter.
|
||||
</>
|
||||
)}
|
||||
</FormDescription>
|
||||
<FormMessage />
|
||||
</FormItem>
|
||||
|
|
@ -242,7 +274,7 @@ export function AISettingsForm({ settings }: AISettingsFormProps) {
|
|||
<FormItem>
|
||||
<div className="flex items-center justify-between">
|
||||
<FormLabel>Model</FormLabel>
|
||||
{modelsData?.success && (
|
||||
{!isLiteLLM && modelsData?.success && !modelsData?.manualEntry && (
|
||||
<Button
|
||||
type="button"
|
||||
variant="ghost"
|
||||
|
|
@ -256,7 +288,13 @@ export function AISettingsForm({ settings }: AISettingsFormProps) {
|
|||
)}
|
||||
</div>
|
||||
|
||||
{modelsLoading ? (
|
||||
{isLiteLLM || modelsData?.manualEntry ? (
|
||||
<Input
|
||||
value={field.value}
|
||||
onChange={(e) => field.onChange(e.target.value)}
|
||||
placeholder="chatgpt/gpt-5.2"
|
||||
/>
|
||||
) : modelsLoading ? (
|
||||
<Skeleton className="h-10 w-full" />
|
||||
) : modelsError || !modelsData?.success ? (
|
||||
<div className="space-y-2">
|
||||
|
|
@ -303,7 +341,15 @@ export function AISettingsForm({ settings }: AISettingsFormProps) {
|
|||
</Select>
|
||||
)}
|
||||
<FormDescription>
|
||||
{form.watch('ai_model')?.startsWith('o') ? (
|
||||
{isLiteLLM ? (
|
||||
<>
|
||||
Enter the model ID with the{' '}
|
||||
<code className="text-xs bg-muted px-1 rounded">chatgpt/</code> prefix.
|
||||
Examples:{' '}
|
||||
<code className="text-xs bg-muted px-1 rounded">chatgpt/gpt-5.2</code>,{' '}
|
||||
<code className="text-xs bg-muted px-1 rounded">chatgpt/gpt-5.2-codex</code>
|
||||
</>
|
||||
) : form.watch('ai_model')?.startsWith('o') ? (
|
||||
<span className="flex items-center gap-1 text-purple-600">
|
||||
<SlidersHorizontal className="h-3 w-3" />
|
||||
Reasoning model - optimized for complex analysis tasks
|
||||
|
|
|
|||
|
|
@ -8,6 +8,33 @@ const globalForOpenAI = globalThis as unknown as {
|
|||
openaiInitialized: boolean
|
||||
}
|
||||
|
||||
// ─── Provider Detection ─────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Get the configured AI provider from SystemSettings.
|
||||
* Returns 'openai' (default) or 'litellm' (ChatGPT subscription proxy).
|
||||
*/
|
||||
export async function getConfiguredProvider(): Promise<'openai' | 'litellm'> {
|
||||
try {
|
||||
const setting = await prisma.systemSettings.findUnique({
|
||||
where: { key: 'ai_provider' },
|
||||
})
|
||||
const value = setting?.value || 'openai'
|
||||
return value === 'litellm' ? 'litellm' : 'openai'
|
||||
} catch {
|
||||
return 'openai'
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a model ID indicates LiteLLM ChatGPT subscription routing.
|
||||
* Models like 'chatgpt/gpt-5.2' use the chatgpt/ prefix.
|
||||
* Used by buildCompletionParams (sync) to strip unsupported token limit fields.
|
||||
*/
|
||||
export function isLiteLLMChatGPTModel(model: string): boolean {
|
||||
return model.toLowerCase().startsWith('chatgpt/')
|
||||
}
|
||||
|
||||
// ─── Model Type Detection ────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
|
|
@ -168,6 +195,12 @@ export function buildCompletionParams(
|
|||
params.response_format = { type: 'json_object' }
|
||||
}
|
||||
|
||||
// LiteLLM ChatGPT subscription models reject token limit fields
|
||||
if (isLiteLLMChatGPTModel(model)) {
|
||||
delete params.max_tokens
|
||||
delete params.max_completion_tokens
|
||||
}
|
||||
|
||||
return params
|
||||
}
|
||||
|
||||
|
|
@ -209,8 +242,12 @@ async function getBaseURL(): Promise<string | undefined> {
|
|||
*/
|
||||
async function createOpenAIClient(): Promise<OpenAI | null> {
|
||||
const apiKey = await getOpenAIApiKey()
|
||||
const provider = await getConfiguredProvider()
|
||||
|
||||
if (!apiKey) {
|
||||
// LiteLLM proxy may not require a real API key
|
||||
const effectiveApiKey = apiKey || (provider === 'litellm' ? 'sk-litellm' : null)
|
||||
|
||||
if (!effectiveApiKey) {
|
||||
console.warn('OpenAI API key not configured')
|
||||
return null
|
||||
}
|
||||
|
|
@ -218,11 +255,11 @@ async function createOpenAIClient(): Promise<OpenAI | null> {
|
|||
const baseURL = await getBaseURL()
|
||||
|
||||
if (baseURL) {
|
||||
console.log(`[OpenAI] Using custom base URL: ${baseURL}`)
|
||||
console.log(`[OpenAI] Using custom base URL: ${baseURL} (provider: ${provider})`)
|
||||
}
|
||||
|
||||
return new OpenAI({
|
||||
apiKey,
|
||||
apiKey: effectiveApiKey,
|
||||
...(baseURL ? { baseURL } : {}),
|
||||
})
|
||||
}
|
||||
|
|
@ -259,6 +296,12 @@ export function resetOpenAIClient(): void {
|
|||
* Check if OpenAI is configured and available
|
||||
*/
|
||||
export async function isOpenAIConfigured(): Promise<boolean> {
|
||||
const provider = await getConfiguredProvider()
|
||||
if (provider === 'litellm') {
|
||||
// LiteLLM just needs a base URL configured
|
||||
const baseURL = await getBaseURL()
|
||||
return !!baseURL
|
||||
}
|
||||
const apiKey = await getOpenAIApiKey()
|
||||
return !!apiKey
|
||||
}
|
||||
|
|
@ -270,8 +313,20 @@ export async function listAvailableModels(): Promise<{
|
|||
success: boolean
|
||||
models?: string[]
|
||||
error?: string
|
||||
manualEntry?: boolean
|
||||
}> {
|
||||
try {
|
||||
const provider = await getConfiguredProvider()
|
||||
|
||||
// LiteLLM proxy for ChatGPT subscription doesn't support models.list()
|
||||
if (provider === 'litellm') {
|
||||
return {
|
||||
success: true,
|
||||
models: [],
|
||||
manualEntry: true,
|
||||
}
|
||||
}
|
||||
|
||||
const client = await getOpenAI()
|
||||
|
||||
if (!client) {
|
||||
|
|
|
|||
|
|
@ -201,8 +201,8 @@ export const settingsRouter = router({
|
|||
clearStorageProviderCache()
|
||||
}
|
||||
|
||||
// Reset OpenAI client if API key or base URL changed
|
||||
if (input.settings.some((s) => s.key === 'openai_api_key' || s.key === 'openai_base_url' || s.key === 'ai_model')) {
|
||||
// Reset OpenAI client if API key, base URL, model, or provider changed
|
||||
if (input.settings.some((s) => s.key === 'openai_api_key' || s.key === 'openai_base_url' || s.key === 'ai_model' || s.key === 'ai_provider')) {
|
||||
const { resetOpenAIClient } = await import('@/lib/openai')
|
||||
resetOpenAIClient()
|
||||
}
|
||||
|
|
@ -247,6 +247,15 @@ export const settingsRouter = router({
|
|||
listAIModels: superAdminProcedure.query(async () => {
|
||||
const result = await listAvailableModels()
|
||||
|
||||
// LiteLLM mode: manual model entry, no listing available
|
||||
if (result.manualEntry) {
|
||||
return {
|
||||
success: true,
|
||||
models: [],
|
||||
manualEntry: true,
|
||||
}
|
||||
}
|
||||
|
||||
if (!result.success || !result.models) {
|
||||
return {
|
||||
success: false,
|
||||
|
|
|
|||
Loading…
Reference in New Issue