refactor(llm): use shared types in commons

This commit is contained in:
Elian Doran
2026-03-29 12:44:53 +03:00
parent fa7ec01329
commit 261e5b59e0
10 changed files with 83 additions and 74 deletions

View File

@@ -129,6 +129,11 @@ Trilium provides powerful user scripting capabilities:
- **Do not use `crypto.randomUUID()`** or other Web Crypto APIs that require secure contexts - Trilium can run over HTTP, not just HTTPS
- Use `randomString()` from `apps/client/src/services/utils.ts` for generating IDs instead
### Shared Types Policy
- Types shared between client and server belong in `@triliumnext/commons` (`packages/commons/src/lib/`)
- Import shared types directly from `@triliumnext/commons` - do not re-export them from app-specific modules
- Keep app-specific types (e.g., `LlmProvider` for server, `StreamCallbacks` for client) in their respective apps
## Common Development Tasks
### Adding New Note Types

View File

@@ -1,32 +1,12 @@
import type { LlmMessage, LlmCitation, LlmChatConfig } from "@triliumnext/commons";
import server from "./server.js";
export interface ChatMessage {
role: "user" | "assistant" | "system";
content: string;
}
export interface ChatConfig {
provider?: string;
model?: string;
systemPrompt?: string;
enableWebSearch?: boolean;
enableExtendedThinking?: boolean;
/** Token budget for extended thinking (default: 10000) */
thinkingBudget?: number;
}
export interface Citation {
url?: string;
title?: string;
citedText?: string;
}
export interface StreamCallbacks {
onChunk: (text: string) => void;
onThinking?: (text: string) => void;
onToolUse?: (toolName: string, input: Record<string, unknown>) => void;
onToolResult?: (toolName: string, result: string) => void;
onCitation?: (citation: Citation) => void;
onCitation?: (citation: LlmCitation) => void;
onError: (error: string) => void;
onDone: () => void;
}
@@ -35,8 +15,8 @@ export interface StreamCallbacks {
* Stream a chat completion from the LLM API using Server-Sent Events.
*/
export async function streamChatCompletion(
messages: ChatMessage[],
config: ChatConfig,
messages: LlmMessage[],
config: LlmChatConfig,
callbacks: StreamCallbacks
): Promise<void> {
const headers = await server.getHeaders();
@@ -103,7 +83,7 @@ export async function streamChatCompletion(
callbacks.onDone();
break;
}
} catch (e) {
} catch {
// Ignore JSON parse errors for partial data
}
}

View File

@@ -1,7 +1,7 @@
import type { LlmCitation } from "@triliumnext/commons";
import { useMemo } from "preact/hooks";
import { marked } from "marked";
import { t } from "../../../services/i18n.js";
import type { Citation } from "../../../services/llm_chat.js";
import "./LlmChat.css";
// Configure marked for safe rendering
@@ -17,7 +17,7 @@ interface StoredMessage {
role: "user" | "assistant" | "system";
content: string;
createdAt: string;
citations?: Citation[];
citations?: LlmCitation[];
/** Message type for special rendering. Defaults to "message" if omitted. */
type?: MessageType;
}

View File

@@ -1,6 +1,7 @@
import type { LlmCitation, LlmMessage } from "@triliumnext/commons";
import { useCallback, useEffect, useRef, useState } from "preact/hooks";
import { t } from "../../../services/i18n.js";
import { streamChatCompletion, type ChatMessage as ChatMessageData, type Citation } from "../../../services/llm_chat.js";
import { streamChatCompletion } from "../../../services/llm_chat.js";
import { randomString } from "../../../services/utils.js";
import { useEditorSpacedUpdate } from "../../react/hooks.js";
import { TypeWidgetProps } from "../type_widget.js";
@@ -14,7 +15,7 @@ interface StoredMessage {
role: "user" | "assistant" | "system";
content: string;
createdAt: string;
citations?: Citation[];
citations?: LlmCitation[];
/** Message type for special rendering. Defaults to "message" if omitted. */
type?: MessageType;
}
@@ -33,7 +34,7 @@ export default function LlmChat({ note, ntxId, noteContext }: TypeWidgetProps) {
const [streamingContent, setStreamingContent] = useState("");
const [streamingThinking, setStreamingThinking] = useState("");
const [toolActivity, setToolActivity] = useState<string | null>(null);
const [pendingCitations, setPendingCitations] = useState<Citation[]>([]);
const [pendingCitations, setPendingCitations] = useState<LlmCitation[]>([]);
const [enableWebSearch, setEnableWebSearch] = useState(true);
const [enableExtendedThinking, setEnableExtendedThinking] = useState(false);
const [error, setError] = useState<string | null>(null);
@@ -126,9 +127,9 @@ export default function LlmChat({ note, ntxId, noteContext }: TypeWidgetProps) {
let assistantContent = "";
let thinkingContent = "";
const citations: Citation[] = [];
const citations: LlmCitation[] = [];
const apiMessages: ChatMessageData[] = newMessages.map(m => ({
const apiMessages: LlmMessage[] = newMessages.map(m => ({
role: m.role,
content: m.content
}));

View File

@@ -1,6 +1,7 @@
import type { Request, Response } from "express";
import type { LlmMessage } from "@triliumnext/commons";
import { getProvider, type LlmMessage, type LlmProviderConfig } from "../../services/llm/index.js";
import { getProvider, type LlmProviderConfig } from "../../services/llm/index.js";
interface ChatRequest {
messages: LlmMessage[];

View File

@@ -23,4 +23,4 @@ export function clearProviderCache(): void {
cachedProviders = {};
}
export * from "./types.js";
export type { LlmProvider, LlmProviderConfig } from "./types.js";

View File

@@ -1,6 +1,7 @@
import Anthropic from "@anthropic-ai/sdk";
import type { LlmMessage, LlmStreamChunk } from "@triliumnext/commons";
import type { LlmMessage, LlmProvider, LlmProviderConfig,LlmStreamChunk } from "../types.js";
import type { LlmProvider, LlmProviderConfig } from "../types.js";
const DEFAULT_MODEL = "claude-sonnet-4-20250514";
const DEFAULT_MAX_TOKENS = 8096;

View File

@@ -1,50 +1,17 @@
/**
* LLM Provider types for chat integration.
* Provider-agnostic interfaces to support multiple LLM backends.
* Server-specific LLM Provider types.
* Shared types (LlmMessage, LlmCitation, LlmStreamChunk, LlmChatConfig)
* should be imported from @triliumnext/commons.
*/
export interface LlmMessage {
role: "user" | "assistant" | "system";
content: string;
}
import type { LlmChatConfig, LlmMessage, LlmStreamChunk } from "@triliumnext/commons";
/**
* Citation information extracted from LLM responses.
* May include URL (for web search) or document metadata (for document citations).
* Extended provider config with server-specific options.
*/
export interface LlmCitation {
/** Source URL (typically from web search) */
url?: string;
/** Document or page title */
title?: string;
/** The text that was cited */
citedText?: string;
}
/**
* Stream chunk types for real-time updates.
*/
export type LlmStreamChunk =
| { type: "text"; content: string }
| { type: "thinking"; content: string }
| { type: "tool_use"; toolName: string; toolInput: Record<string, unknown> }
| { type: "tool_result"; toolName: string; result: string }
| { type: "citation"; citation: LlmCitation }
| { type: "error"; error: string }
| { type: "done" };
export interface LlmProviderConfig {
provider?: string;
model?: string;
export interface LlmProviderConfig extends LlmChatConfig {
maxTokens?: number;
temperature?: number;
systemPrompt?: string;
/** Enable web search tool */
enableWebSearch?: boolean;
/** Enable extended thinking for deeper reasoning */
enableExtendedThinking?: boolean;
/** Token budget for extended thinking (default: 10000) */
thinkingBudget?: number;
}
export interface LlmProvider {

View File

@@ -16,3 +16,4 @@ export * from "./lib/notes.js";
export * from "./lib/week_utils.js";
export { default as BUILTIN_ATTRIBUTES } from "./lib/builtin_attributes.js";
export * from "./lib/spreadsheet/render_to_html.js";
export * from "./lib/llm_api.js";

View File

@@ -0,0 +1,53 @@
/**
* Shared LLM types for chat integration.
* Used by both client and server for API communication.
*/
/**
* A chat message in the conversation.
*/
export interface LlmMessage {
role: "user" | "assistant" | "system";
content: string;
}
/**
* Citation information extracted from LLM responses.
* May include URL (for web search) or document metadata (for document citations).
*/
export interface LlmCitation {
/** Source URL (typically from web search) */
url?: string;
/** Document or page title */
title?: string;
/** The text that was cited */
citedText?: string;
}
/**
* Configuration for LLM chat requests.
*/
export interface LlmChatConfig {
provider?: string;
model?: string;
systemPrompt?: string;
/** Enable web search tool */
enableWebSearch?: boolean;
/** Enable extended thinking for deeper reasoning */
enableExtendedThinking?: boolean;
/** Token budget for extended thinking (default: 10000) */
thinkingBudget?: number;
}
/**
* Stream chunk types for real-time SSE updates.
* Defines the protocol between server and client.
*/
export type LlmStreamChunk =
| { type: "text"; content: string }
| { type: "thinking"; content: string }
| { type: "tool_use"; toolName: string; toolInput: Record<string, unknown> }
| { type: "tool_result"; toolName: string; result: string }
| { type: "citation"; citation: LlmCitation }
| { type: "error"; error: string }
| { type: "done" };