From 261e5b59e06e83527d208a6c0b58300c7575b889 Mon Sep 17 00:00:00 2001 From: Elian Doran Date: Sun, 29 Mar 2026 12:44:53 +0300 Subject: [PATCH] refactor(llm): use shared types in commons --- CLAUDE.md | 5 ++ apps/client/src/services/llm_chat.ts | 30 ++--------- .../type_widgets/llm_chat/ChatMessage.tsx | 4 +- .../widgets/type_widgets/llm_chat/LlmChat.tsx | 11 ++-- apps/server/src/routes/api/llm_chat.ts | 3 +- apps/server/src/services/llm/index.ts | 2 +- .../src/services/llm/providers/anthropic.ts | 3 +- apps/server/src/services/llm/types.ts | 45 +++------------- packages/commons/src/index.ts | 1 + packages/commons/src/lib/llm_api.ts | 53 +++++++++++++++++++ 10 files changed, 83 insertions(+), 74 deletions(-) create mode 100644 packages/commons/src/lib/llm_api.ts diff --git a/CLAUDE.md b/CLAUDE.md index 1cc5e198d5..be265e5bd0 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -129,6 +129,11 @@ Trilium provides powerful user scripting capabilities: - **Do not use `crypto.randomUUID()`** or other Web Crypto APIs that require secure contexts - Trilium can run over HTTP, not just HTTPS - Use `randomString()` from `apps/client/src/services/utils.ts` for generating IDs instead +### Shared Types Policy +- Types shared between client and server belong in `@triliumnext/commons` (`packages/commons/src/lib/`) +- Import shared types directly from `@triliumnext/commons` - do not re-export them from app-specific modules +- Keep app-specific types (e.g., `LlmProvider` for server, `StreamCallbacks` for client) in their respective apps + ## Common Development Tasks ### Adding New Note Types diff --git a/apps/client/src/services/llm_chat.ts b/apps/client/src/services/llm_chat.ts index 20f0ea1283..a985683f7c 100644 --- a/apps/client/src/services/llm_chat.ts +++ b/apps/client/src/services/llm_chat.ts @@ -1,32 +1,12 @@ +import type { LlmMessage, LlmCitation, LlmChatConfig } from "@triliumnext/commons"; import server from "./server.js"; -export interface ChatMessage { - role: "user" | "assistant" | "system"; - content: string; -} - -export interface ChatConfig { - provider?: string; - model?: string; - systemPrompt?: string; - enableWebSearch?: boolean; - enableExtendedThinking?: boolean; - /** Token budget for extended thinking (default: 10000) */ - thinkingBudget?: number; -} - -export interface Citation { - url?: string; - title?: string; - citedText?: string; -} - export interface StreamCallbacks { onChunk: (text: string) => void; onThinking?: (text: string) => void; onToolUse?: (toolName: string, input: Record) => void; onToolResult?: (toolName: string, result: string) => void; - onCitation?: (citation: Citation) => void; + onCitation?: (citation: LlmCitation) => void; onError: (error: string) => void; onDone: () => void; } @@ -35,8 +15,8 @@ export interface StreamCallbacks { * Stream a chat completion from the LLM API using Server-Sent Events. */ export async function streamChatCompletion( - messages: ChatMessage[], - config: ChatConfig, + messages: LlmMessage[], + config: LlmChatConfig, callbacks: StreamCallbacks ): Promise { const headers = await server.getHeaders(); @@ -103,7 +83,7 @@ export async function streamChatCompletion( callbacks.onDone(); break; } - } catch (e) { + } catch { // Ignore JSON parse errors for partial data } } diff --git a/apps/client/src/widgets/type_widgets/llm_chat/ChatMessage.tsx b/apps/client/src/widgets/type_widgets/llm_chat/ChatMessage.tsx index e5e33757a0..113a35c422 100644 --- a/apps/client/src/widgets/type_widgets/llm_chat/ChatMessage.tsx +++ b/apps/client/src/widgets/type_widgets/llm_chat/ChatMessage.tsx @@ -1,7 +1,7 @@ +import type { LlmCitation } from "@triliumnext/commons"; import { useMemo } from "preact/hooks"; import { marked } from "marked"; import { t } from "../../../services/i18n.js"; -import type { Citation } from "../../../services/llm_chat.js"; import "./LlmChat.css"; // Configure marked for safe rendering @@ -17,7 +17,7 @@ interface StoredMessage { role: "user" | "assistant" | "system"; content: string; createdAt: string; - citations?: Citation[]; + citations?: LlmCitation[]; /** Message type for special rendering. Defaults to "message" if omitted. */ type?: MessageType; } diff --git a/apps/client/src/widgets/type_widgets/llm_chat/LlmChat.tsx b/apps/client/src/widgets/type_widgets/llm_chat/LlmChat.tsx index 37d1a08b75..bab59fef71 100644 --- a/apps/client/src/widgets/type_widgets/llm_chat/LlmChat.tsx +++ b/apps/client/src/widgets/type_widgets/llm_chat/LlmChat.tsx @@ -1,6 +1,7 @@ +import type { LlmCitation, LlmMessage } from "@triliumnext/commons"; import { useCallback, useEffect, useRef, useState } from "preact/hooks"; import { t } from "../../../services/i18n.js"; -import { streamChatCompletion, type ChatMessage as ChatMessageData, type Citation } from "../../../services/llm_chat.js"; +import { streamChatCompletion } from "../../../services/llm_chat.js"; import { randomString } from "../../../services/utils.js"; import { useEditorSpacedUpdate } from "../../react/hooks.js"; import { TypeWidgetProps } from "../type_widget.js"; @@ -14,7 +15,7 @@ interface StoredMessage { role: "user" | "assistant" | "system"; content: string; createdAt: string; - citations?: Citation[]; + citations?: LlmCitation[]; /** Message type for special rendering. Defaults to "message" if omitted. */ type?: MessageType; } @@ -33,7 +34,7 @@ export default function LlmChat({ note, ntxId, noteContext }: TypeWidgetProps) { const [streamingContent, setStreamingContent] = useState(""); const [streamingThinking, setStreamingThinking] = useState(""); const [toolActivity, setToolActivity] = useState(null); - const [pendingCitations, setPendingCitations] = useState([]); + const [pendingCitations, setPendingCitations] = useState([]); const [enableWebSearch, setEnableWebSearch] = useState(true); const [enableExtendedThinking, setEnableExtendedThinking] = useState(false); const [error, setError] = useState(null); @@ -126,9 +127,9 @@ export default function LlmChat({ note, ntxId, noteContext }: TypeWidgetProps) { let assistantContent = ""; let thinkingContent = ""; - const citations: Citation[] = []; + const citations: LlmCitation[] = []; - const apiMessages: ChatMessageData[] = newMessages.map(m => ({ + const apiMessages: LlmMessage[] = newMessages.map(m => ({ role: m.role, content: m.content })); diff --git a/apps/server/src/routes/api/llm_chat.ts b/apps/server/src/routes/api/llm_chat.ts index fd97d19924..357717e4cc 100644 --- a/apps/server/src/routes/api/llm_chat.ts +++ b/apps/server/src/routes/api/llm_chat.ts @@ -1,6 +1,7 @@ import type { Request, Response } from "express"; +import type { LlmMessage } from "@triliumnext/commons"; -import { getProvider, type LlmMessage, type LlmProviderConfig } from "../../services/llm/index.js"; +import { getProvider, type LlmProviderConfig } from "../../services/llm/index.js"; interface ChatRequest { messages: LlmMessage[]; diff --git a/apps/server/src/services/llm/index.ts b/apps/server/src/services/llm/index.ts index 3b45f8c031..82a88ee1ce 100644 --- a/apps/server/src/services/llm/index.ts +++ b/apps/server/src/services/llm/index.ts @@ -23,4 +23,4 @@ export function clearProviderCache(): void { cachedProviders = {}; } -export * from "./types.js"; +export type { LlmProvider, LlmProviderConfig } from "./types.js"; diff --git a/apps/server/src/services/llm/providers/anthropic.ts b/apps/server/src/services/llm/providers/anthropic.ts index d76f5edd6f..5f217a15ec 100644 --- a/apps/server/src/services/llm/providers/anthropic.ts +++ b/apps/server/src/services/llm/providers/anthropic.ts @@ -1,6 +1,7 @@ import Anthropic from "@anthropic-ai/sdk"; +import type { LlmMessage, LlmStreamChunk } from "@triliumnext/commons"; -import type { LlmMessage, LlmProvider, LlmProviderConfig,LlmStreamChunk } from "../types.js"; +import type { LlmProvider, LlmProviderConfig } from "../types.js"; const DEFAULT_MODEL = "claude-sonnet-4-20250514"; const DEFAULT_MAX_TOKENS = 8096; diff --git a/apps/server/src/services/llm/types.ts b/apps/server/src/services/llm/types.ts index f6bef71b77..38f64ea61e 100644 --- a/apps/server/src/services/llm/types.ts +++ b/apps/server/src/services/llm/types.ts @@ -1,50 +1,17 @@ /** - * LLM Provider types for chat integration. - * Provider-agnostic interfaces to support multiple LLM backends. + * Server-specific LLM Provider types. + * Shared types (LlmMessage, LlmCitation, LlmStreamChunk, LlmChatConfig) + * should be imported from @triliumnext/commons. */ -export interface LlmMessage { - role: "user" | "assistant" | "system"; - content: string; -} +import type { LlmChatConfig, LlmMessage, LlmStreamChunk } from "@triliumnext/commons"; /** - * Citation information extracted from LLM responses. - * May include URL (for web search) or document metadata (for document citations). + * Extended provider config with server-specific options. */ -export interface LlmCitation { - /** Source URL (typically from web search) */ - url?: string; - /** Document or page title */ - title?: string; - /** The text that was cited */ - citedText?: string; -} - -/** - * Stream chunk types for real-time updates. - */ -export type LlmStreamChunk = - | { type: "text"; content: string } - | { type: "thinking"; content: string } - | { type: "tool_use"; toolName: string; toolInput: Record } - | { type: "tool_result"; toolName: string; result: string } - | { type: "citation"; citation: LlmCitation } - | { type: "error"; error: string } - | { type: "done" }; - -export interface LlmProviderConfig { - provider?: string; - model?: string; +export interface LlmProviderConfig extends LlmChatConfig { maxTokens?: number; temperature?: number; - systemPrompt?: string; - /** Enable web search tool */ - enableWebSearch?: boolean; - /** Enable extended thinking for deeper reasoning */ - enableExtendedThinking?: boolean; - /** Token budget for extended thinking (default: 10000) */ - thinkingBudget?: number; } export interface LlmProvider { diff --git a/packages/commons/src/index.ts b/packages/commons/src/index.ts index b208bfd3b6..faae4922eb 100644 --- a/packages/commons/src/index.ts +++ b/packages/commons/src/index.ts @@ -16,3 +16,4 @@ export * from "./lib/notes.js"; export * from "./lib/week_utils.js"; export { default as BUILTIN_ATTRIBUTES } from "./lib/builtin_attributes.js"; export * from "./lib/spreadsheet/render_to_html.js"; +export * from "./lib/llm_api.js"; diff --git a/packages/commons/src/lib/llm_api.ts b/packages/commons/src/lib/llm_api.ts new file mode 100644 index 0000000000..6197f184ac --- /dev/null +++ b/packages/commons/src/lib/llm_api.ts @@ -0,0 +1,53 @@ +/** + * Shared LLM types for chat integration. + * Used by both client and server for API communication. + */ + +/** + * A chat message in the conversation. + */ +export interface LlmMessage { + role: "user" | "assistant" | "system"; + content: string; +} + +/** + * Citation information extracted from LLM responses. + * May include URL (for web search) or document metadata (for document citations). + */ +export interface LlmCitation { + /** Source URL (typically from web search) */ + url?: string; + /** Document or page title */ + title?: string; + /** The text that was cited */ + citedText?: string; +} + +/** + * Configuration for LLM chat requests. + */ +export interface LlmChatConfig { + provider?: string; + model?: string; + systemPrompt?: string; + /** Enable web search tool */ + enableWebSearch?: boolean; + /** Enable extended thinking for deeper reasoning */ + enableExtendedThinking?: boolean; + /** Token budget for extended thinking (default: 10000) */ + thinkingBudget?: number; +} + +/** + * Stream chunk types for real-time SSE updates. + * Defines the protocol between server and client. + */ +export type LlmStreamChunk = + | { type: "text"; content: string } + | { type: "thinking"; content: string } + | { type: "tool_use"; toolName: string; toolInput: Record } + | { type: "tool_result"; toolName: string; result: string } + | { type: "citation"; citation: LlmCitation } + | { type: "error"; error: string } + | { type: "done" };