diff --git a/apps/client/src/services/llm_chat.ts b/apps/client/src/services/llm_chat.ts index 13f282fe02..e4263aa896 100644 --- a/apps/client/src/services/llm_chat.ts +++ b/apps/client/src/services/llm_chat.ts @@ -3,10 +3,10 @@ import type { LlmChatConfig, LlmCitation, LlmMessage, LlmModelInfo,LlmUsage } fr import server from "./server.js"; /** - * Fetch available models for a provider. + * Fetch available models from all configured providers. */ -export async function getAvailableModels(provider: string = "anthropic"): Promise { - const response = await server.get<{ models?: LlmModelInfo[] }>(`llm-chat/models?provider=${encodeURIComponent(provider)}`); +export async function getAvailableModels(): Promise { + const response = await server.get<{ models?: LlmModelInfo[] }>("llm-chat/models"); return response.models ?? []; } diff --git a/apps/client/src/widgets/type_widgets/llm_chat/useLlmChat.ts b/apps/client/src/widgets/type_widgets/llm_chat/useLlmChat.ts index eb5033cc73..ddca7deb98 100644 --- a/apps/client/src/widgets/type_widgets/llm_chat/useLlmChat.ts +++ b/apps/client/src/widgets/type_widgets/llm_chat/useLlmChat.ts @@ -239,8 +239,10 @@ export function useLlmChat( .join("") })); + const selectedModelProvider = availableModels.find(m => m.id === selectedModel)?.provider; const streamOptions: Parameters[1] = { model: selectedModel || undefined, + provider: selectedModelProvider, enableWebSearch, enableNoteTools, contextNoteId, diff --git a/apps/server/src/routes/api/llm_chat.ts b/apps/server/src/routes/api/llm_chat.ts index 8302687e13..dd5bf149c8 100644 --- a/apps/server/src/routes/api/llm_chat.ts +++ b/apps/server/src/routes/api/llm_chat.ts @@ -2,7 +2,7 @@ import type { LlmMessage } from "@triliumnext/commons"; import type { Request, Response } from "express"; import { generateChatTitle } from "../../services/llm/chat_title.js"; -import { getProviderByType, hasConfiguredProviders, type LlmProviderConfig } from "../../services/llm/index.js"; +import { getAllModels, getProviderByType, hasConfiguredProviders, type LlmProviderConfig } from "../../services/llm/index.js"; import { streamToChunks } from "../../services/llm/stream.js"; import log from "../../services/log.js"; import { safeExtractMessageAndStackFromError } from "../../services/utils.js"; @@ -88,19 +88,14 @@ async function streamChat(req: Request, res: Response) { } /** - * Get available models for a provider. + * Get available models from all configured providers. */ -function getModels(req: Request, _res: Response) { - const providerType = req.query.provider as string || "anthropic"; - - // Return empty array when no providers configured - client handles this gracefully +function getModels(_req: Request, _res: Response) { if (!hasConfiguredProviders()) { return { models: [] }; } - const llmProvider = getProviderByType(providerType); - const models = llmProvider.getAvailableModels(); - return { models }; + return { models: getAllModels() }; } export default { diff --git a/apps/server/src/services/llm/index.ts b/apps/server/src/services/llm/index.ts index 2727dc428b..4a29c37a6e 100644 --- a/apps/server/src/services/llm/index.ts +++ b/apps/server/src/services/llm/index.ts @@ -1,4 +1,4 @@ -import type { LlmProvider } from "./types.js"; +import type { LlmProvider, ModelInfo } from "./types.js"; import { AnthropicProvider } from "./providers/anthropic.js"; import { OpenAiProvider } from "./providers/openai.js"; import optionService from "../options.js"; @@ -97,6 +97,35 @@ export function hasConfiguredProviders(): boolean { return getConfiguredProviders().length > 0; } +/** + * Get all models from all configured providers, tagged with their provider type. + */ +export function getAllModels(): ModelInfo[] { + const configs = getConfiguredProviders(); + const seenProviderTypes = new Set(); + const allModels: ModelInfo[] = []; + + for (const config of configs) { + // Only include models once per provider type (not per config instance) + if (seenProviderTypes.has(config.provider)) { + continue; + } + seenProviderTypes.add(config.provider); + + try { + const provider = getProvider(config.id); + const models = provider.getAvailableModels(); + for (const model of models) { + allModels.push({ ...model, provider: config.provider }); + } + } catch (e) { + log.error(`Failed to get models from provider ${config.provider}: ${e}`); + } + } + + return allModels; +} + /** * Clear the provider cache. Call this when provider configurations change. */ diff --git a/apps/server/src/services/llm/types.ts b/apps/server/src/services/llm/types.ts index 0e4ae71b32..3924d8601c 100644 --- a/apps/server/src/services/llm/types.ts +++ b/apps/server/src/services/llm/types.ts @@ -38,6 +38,8 @@ export interface ModelInfo { id: string; /** Human-readable name (e.g., "Claude Sonnet 4") */ name: string; + /** Provider type that owns this model (e.g., "anthropic", "openai") */ + provider?: string; /** Pricing per million tokens */ pricing: ModelPricing; /** Whether this is the default model */ diff --git a/packages/commons/src/lib/llm_api.ts b/packages/commons/src/lib/llm_api.ts index 5f6525bcc6..7554d9d40c 100644 --- a/packages/commons/src/lib/llm_api.ts +++ b/packages/commons/src/lib/llm_api.ts @@ -63,6 +63,8 @@ export interface LlmModelInfo { id: string; /** Human-readable name (e.g., "Claude Sonnet 4") */ name: string; + /** Provider type that owns this model (e.g., "anthropic", "openai") */ + provider?: string; /** Pricing per million tokens */ pricing: LlmModelPricing; /** Whether this is the default model */