fix(llm): not returning full list of models

This commit is contained in:
Elian Doran
2026-03-31 18:59:02 +03:00
parent cabce14a49
commit f04f47d17a
6 changed files with 43 additions and 13 deletions

View File

@@ -3,10 +3,10 @@ import type { LlmChatConfig, LlmCitation, LlmMessage, LlmModelInfo,LlmUsage } fr
import server from "./server.js";
/**
* Fetch available models for a provider.
* Fetch available models from all configured providers.
*/
export async function getAvailableModels(provider: string = "anthropic"): Promise<LlmModelInfo[]> {
const response = await server.get<{ models?: LlmModelInfo[] }>(`llm-chat/models?provider=${encodeURIComponent(provider)}`);
export async function getAvailableModels(): Promise<LlmModelInfo[]> {
const response = await server.get<{ models?: LlmModelInfo[] }>("llm-chat/models");
return response.models ?? [];
}

View File

@@ -239,8 +239,10 @@ export function useLlmChat(
.join("")
}));
const selectedModelProvider = availableModels.find(m => m.id === selectedModel)?.provider;
const streamOptions: Parameters<typeof streamChatCompletion>[1] = {
model: selectedModel || undefined,
provider: selectedModelProvider,
enableWebSearch,
enableNoteTools,
contextNoteId,

View File

@@ -2,7 +2,7 @@ import type { LlmMessage } from "@triliumnext/commons";
import type { Request, Response } from "express";
import { generateChatTitle } from "../../services/llm/chat_title.js";
import { getProviderByType, hasConfiguredProviders, type LlmProviderConfig } from "../../services/llm/index.js";
import { getAllModels, getProviderByType, hasConfiguredProviders, type LlmProviderConfig } from "../../services/llm/index.js";
import { streamToChunks } from "../../services/llm/stream.js";
import log from "../../services/log.js";
import { safeExtractMessageAndStackFromError } from "../../services/utils.js";
@@ -88,19 +88,14 @@ async function streamChat(req: Request, res: Response) {
}
/**
* Get available models for a provider.
* Get available models from all configured providers.
*/
function getModels(req: Request, _res: Response) {
const providerType = req.query.provider as string || "anthropic";
// Return empty array when no providers configured - client handles this gracefully
function getModels(_req: Request, _res: Response) {
if (!hasConfiguredProviders()) {
return { models: [] };
}
const llmProvider = getProviderByType(providerType);
const models = llmProvider.getAvailableModels();
return { models };
return { models: getAllModels() };
}
export default {

View File

@@ -1,4 +1,4 @@
import type { LlmProvider } from "./types.js";
import type { LlmProvider, ModelInfo } from "./types.js";
import { AnthropicProvider } from "./providers/anthropic.js";
import { OpenAiProvider } from "./providers/openai.js";
import optionService from "../options.js";
@@ -97,6 +97,35 @@ export function hasConfiguredProviders(): boolean {
return getConfiguredProviders().length > 0;
}
/**
* Get all models from all configured providers, tagged with their provider type.
*/
export function getAllModels(): ModelInfo[] {
const configs = getConfiguredProviders();
const seenProviderTypes = new Set<string>();
const allModels: ModelInfo[] = [];
for (const config of configs) {
// Only include models once per provider type (not per config instance)
if (seenProviderTypes.has(config.provider)) {
continue;
}
seenProviderTypes.add(config.provider);
try {
const provider = getProvider(config.id);
const models = provider.getAvailableModels();
for (const model of models) {
allModels.push({ ...model, provider: config.provider });
}
} catch (e) {
log.error(`Failed to get models from provider ${config.provider}: ${e}`);
}
}
return allModels;
}
/**
* Clear the provider cache. Call this when provider configurations change.
*/

View File

@@ -38,6 +38,8 @@ export interface ModelInfo {
id: string;
/** Human-readable name (e.g., "Claude Sonnet 4") */
name: string;
/** Provider type that owns this model (e.g., "anthropic", "openai") */
provider?: string;
/** Pricing per million tokens */
pricing: ModelPricing;
/** Whether this is the default model */

View File

@@ -63,6 +63,8 @@ export interface LlmModelInfo {
id: string;
/** Human-readable name (e.g., "Claude Sonnet 4") */
name: string;
/** Provider type that owns this model (e.g., "anthropic", "openai") */
provider?: string;
/** Pricing per million tokens */
pricing: LlmModelPricing;
/** Whether this is the default model */