chore(llm): set up for ChatGPT

This commit is contained in:
Elian Doran
2026-03-31 18:51:19 +03:00
parent 5f669684c4
commit cabce14a49
5 changed files with 233 additions and 10 deletions

View File

@@ -19,7 +19,8 @@ export interface ProviderType {
}
export const PROVIDER_TYPES: ProviderType[] = [
{ id: "anthropic", name: "Anthropic" }
{ id: "anthropic", name: "Anthropic" },
{ id: "openai", name: "OpenAI" }
];
interface AddProviderModalProps {

View File

@@ -31,6 +31,7 @@
},
"dependencies": {
"@ai-sdk/anthropic": "^2.0.0",
"@ai-sdk/openai": "2.0.101",
"ai": "^5.0.0",
"better-sqlite3": "12.8.0",
"html-to-text": "9.0.5",

View File

@@ -1,5 +1,6 @@
import type { LlmProvider } from "./types.js";
import { AnthropicProvider } from "./providers/anthropic.js";
import { OpenAiProvider } from "./providers/openai.js";
import optionService from "../options.js";
import log from "../log.js";
@@ -16,7 +17,8 @@ export interface LlmProviderSetup {
/** Factory functions for creating provider instances */
const providerFactories: Record<string, (apiKey: string) => LlmProvider> = {
anthropic: (apiKey) => new AnthropicProvider(apiKey)
anthropic: (apiKey) => new AnthropicProvider(apiKey),
openai: (apiKey) => new OpenAiProvider(apiKey)
};
/** Cache of instantiated providers by their config ID */

View File

@@ -0,0 +1,202 @@
import { createOpenAI, type OpenAIProvider as OpenAISDKProvider } from "@ai-sdk/openai";
import { generateText, streamText, stepCountIs, type CoreMessage, type ToolSet } from "ai";
import type { LlmMessage } from "@triliumnext/commons";
import becca from "../../../becca/becca.js";
import { getSkillsSummary } from "../skills/index.js";
import { noteTools, attributeTools, hierarchyTools, skillTools, currentNoteTools } from "../tools/index.js";
import type { LlmProvider, LlmProviderConfig, ModelInfo, ModelPricing, StreamResult } from "../types.js";
const DEFAULT_MODEL = "gpt-4.1";
const DEFAULT_MAX_TOKENS = 8096;
const TITLE_MODEL = "gpt-4.1-mini";
const TITLE_MAX_TOKENS = 30;
/**
* Calculate effective cost for comparison (weighted average: 1 input + 3 output).
*/
function effectiveCost(pricing: ModelPricing): number {
return (pricing.input + 3 * pricing.output) / 4;
}
/**
* Available OpenAI models with pricing (USD per million tokens).
* Source: https://platform.openai.com/docs/pricing
*/
const BASE_MODELS: Omit<ModelInfo, "costMultiplier">[] = [
// ===== Current Models =====
{
id: "gpt-4.1",
name: "GPT-4.1",
pricing: { input: 2, output: 8 },
contextWindow: 1047576,
isDefault: true
},
{
id: "gpt-4.1-mini",
name: "GPT-4.1 Mini",
pricing: { input: 0.4, output: 1.6 },
contextWindow: 1047576
},
{
id: "gpt-4.1-nano",
name: "GPT-4.1 Nano",
pricing: { input: 0.1, output: 0.4 },
contextWindow: 1047576
},
{
id: "o3",
name: "o3",
pricing: { input: 2, output: 8 },
contextWindow: 200000
},
{
id: "o4-mini",
name: "o4-mini",
pricing: { input: 1.1, output: 4.4 },
contextWindow: 200000
},
// ===== Legacy Models =====
{
id: "gpt-4o",
name: "GPT-4o",
pricing: { input: 2.5, output: 10 },
contextWindow: 128000,
isLegacy: true
},
{
id: "gpt-4o-mini",
name: "GPT-4o Mini",
pricing: { input: 0.15, output: 0.6 },
contextWindow: 128000,
isLegacy: true
}
];
const baselineModel = BASE_MODELS.find(m => m.isDefault) || BASE_MODELS[0];
const baselineCost = effectiveCost(baselineModel.pricing);
const AVAILABLE_MODELS: ModelInfo[] = BASE_MODELS.map(m => ({
...m,
costMultiplier: Math.round((effectiveCost(m.pricing) / baselineCost) * 10) / 10
}));
const MODEL_PRICING: Record<string, ModelPricing> = Object.fromEntries(
AVAILABLE_MODELS.map(m => [m.id, m.pricing])
);
/**
* Build a lightweight context hint about the current note.
*/
function buildNoteHint(noteId: string): string | null {
const note = becca.getNote(noteId);
if (!note) {
return null;
}
return `The user is currently viewing a ${note.type} note titled "${note.title}". Use the get_current_note tool to read its content if needed.`;
}
export class OpenAiProvider implements LlmProvider {
name = "openai";
private openai: OpenAISDKProvider;
constructor(apiKey: string) {
if (!apiKey) {
throw new Error("API key is required for OpenAI provider");
}
this.openai = createOpenAI({ apiKey });
}
chat(messages: LlmMessage[], config: LlmProviderConfig): StreamResult {
let systemPrompt = config.systemPrompt || messages.find(m => m.role === "system")?.content;
const chatMessages = messages.filter(m => m.role !== "system");
// Add a lightweight hint about the current note
if (config.contextNoteId) {
const noteHint = buildNoteHint(config.contextNoteId);
if (noteHint) {
systemPrompt = systemPrompt
? `${systemPrompt}\n\n${noteHint}`
: noteHint;
}
}
// Add skills hint
if (config.enableNoteTools) {
const skillsHint = `You have access to skills that provide specialized instructions. Load a skill with the load_skill tool before performing complex operations.\n\nAvailable skills:\n${getSkillsSummary()}`;
systemPrompt = systemPrompt
? `${systemPrompt}\n\n${skillsHint}`
: skillsHint;
}
const coreMessages: CoreMessage[] = [];
if (systemPrompt) {
coreMessages.push({
role: "system",
content: systemPrompt
});
}
for (const m of chatMessages) {
coreMessages.push({
role: m.role as "user" | "assistant",
content: m.content
});
}
const model = this.openai(config.model || DEFAULT_MODEL);
const streamOptions: Parameters<typeof streamText>[0] = {
model,
messages: coreMessages,
maxOutputTokens: config.maxTokens || DEFAULT_MAX_TOKENS
};
// Build tools object
const tools: ToolSet = {};
if (config.contextNoteId) {
Object.assign(tools, currentNoteTools(config.contextNoteId));
}
if (config.enableNoteTools) {
Object.assign(tools, noteTools);
Object.assign(tools, attributeTools);
Object.assign(tools, hierarchyTools);
Object.assign(tools, skillTools);
}
if (Object.keys(tools).length > 0) {
streamOptions.tools = tools;
streamOptions.stopWhen = stepCountIs(5);
streamOptions.toolChoice = "auto";
}
return streamText(streamOptions);
}
getModelPricing(model: string): ModelPricing | undefined {
return MODEL_PRICING[model];
}
getAvailableModels(): ModelInfo[] {
return AVAILABLE_MODELS;
}
async generateTitle(firstMessage: string): Promise<string> {
const { text } = await generateText({
model: this.openai(TITLE_MODEL),
maxOutputTokens: TITLE_MAX_TOKENS,
messages: [
{
role: "user",
content: `Summarize the following message as a very short chat title (max 6 words). Reply with ONLY the title, no quotes or punctuation at the end.\n\nMessage: ${firstMessage}`
}
]
});
return text.trim();
}
}

33
pnpm-lock.yaml generated
View File

@@ -559,6 +559,9 @@ importers:
'@ai-sdk/anthropic':
specifier: ^2.0.0
version: 2.0.71(zod@4.3.6)
'@ai-sdk/openai':
specifier: 2.0.101
version: 2.0.101(zod@4.3.6)
ai:
specifier: ^5.0.0
version: 5.0.161(zod@4.3.6)
@@ -1548,6 +1551,12 @@ packages:
peerDependencies:
zod: ^3.25.76 || ^4.1.8
'@ai-sdk/openai@2.0.101':
resolution: {integrity: sha512-kQ52HLV45T3bQbRzWExXW6+pkg3Nvq4dUnZHUPJXWgkUUsAhZjxHrXqPOc/0yfn/4+Dn2uLmIgAkP9IfzMMcNg==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.25.76 || ^4.1.8
'@ai-sdk/provider-utils@3.0.22':
resolution: {integrity: sha512-fFT1KfUUKktfAFm5mClJhS1oux9tP2qgzmEZVl5UdwltQ1LO/s8hd7znVrgKzivwv1s1FIPza0s9OpJaNB/vHw==}
engines: {node: '>=18'}
@@ -16040,6 +16049,12 @@ snapshots:
'@vercel/oidc': 3.1.0
zod: 4.3.6
'@ai-sdk/openai@2.0.101(zod@4.3.6)':
dependencies:
'@ai-sdk/provider': 2.0.1
'@ai-sdk/provider-utils': 3.0.22(zod@4.3.6)
zod: 4.3.6
'@ai-sdk/provider-utils@3.0.22(zod@4.3.6)':
dependencies:
'@ai-sdk/provider': 2.0.1
@@ -17247,8 +17262,6 @@ snapshots:
'@ckeditor/ckeditor5-table': 47.6.1
'@ckeditor/ckeditor5-utils': 47.6.1
ckeditor5: 47.6.1
transitivePeerDependencies:
- supports-color
'@ckeditor/ckeditor5-emoji@47.6.1':
dependencies:
@@ -17384,6 +17397,8 @@ snapshots:
'@ckeditor/ckeditor5-ui': 47.6.1
'@ckeditor/ckeditor5-utils': 47.6.1
ckeditor5: 47.6.1
transitivePeerDependencies:
- supports-color
'@ckeditor/ckeditor5-highlight@47.6.1':
dependencies:
@@ -17393,6 +17408,8 @@ snapshots:
'@ckeditor/ckeditor5-ui': 47.6.1
'@ckeditor/ckeditor5-utils': 47.6.1
ckeditor5: 47.6.1
transitivePeerDependencies:
- supports-color
'@ckeditor/ckeditor5-horizontal-line@47.6.1':
dependencies:
@@ -17402,6 +17419,8 @@ snapshots:
'@ckeditor/ckeditor5-utils': 47.6.1
'@ckeditor/ckeditor5-widget': 47.6.1
ckeditor5: 47.6.1
transitivePeerDependencies:
- supports-color
'@ckeditor/ckeditor5-html-embed@47.6.1':
dependencies:
@@ -17411,6 +17430,8 @@ snapshots:
'@ckeditor/ckeditor5-utils': 47.6.1
'@ckeditor/ckeditor5-widget': 47.6.1
ckeditor5: 47.6.1
transitivePeerDependencies:
- supports-color
'@ckeditor/ckeditor5-html-support@47.6.1':
dependencies:
@@ -17468,6 +17489,8 @@ snapshots:
'@ckeditor/ckeditor5-ui': 47.6.1
'@ckeditor/ckeditor5-utils': 47.6.1
ckeditor5: 47.6.1
transitivePeerDependencies:
- supports-color
'@ckeditor/ckeditor5-inspector@5.0.0': {}
@@ -17592,8 +17615,6 @@ snapshots:
'@ckeditor/ckeditor5-ui': 47.6.1
'@ckeditor/ckeditor5-utils': 47.6.1
ckeditor5: 47.6.1
transitivePeerDependencies:
- supports-color
'@ckeditor/ckeditor5-operations-compressor@47.6.1':
dependencies:
@@ -17709,8 +17730,6 @@ snapshots:
'@ckeditor/ckeditor5-ui': 47.6.1
'@ckeditor/ckeditor5-utils': 47.6.1
ckeditor5: 47.6.1
transitivePeerDependencies:
- supports-color
'@ckeditor/ckeditor5-restricted-editing@47.6.1':
dependencies:
@@ -17899,8 +17918,6 @@ snapshots:
'@ckeditor/ckeditor5-icons': 47.6.1
'@ckeditor/ckeditor5-ui': 47.6.1
'@ckeditor/ckeditor5-utils': 47.6.1
transitivePeerDependencies:
- supports-color
'@ckeditor/ckeditor5-upload@47.6.1':
dependencies: