mirror of
https://github.com/zadam/trilium.git
synced 2025-11-02 19:36:12 +01:00
chore(nx/server): move source code
This commit is contained in:
247
apps/server/src/services/llm/providers/openai_service.ts
Normal file
247
apps/server/src/services/llm/providers/openai_service.ts
Normal file
@@ -0,0 +1,247 @@
|
||||
import options from '../../options.js';
|
||||
import { BaseAIService } from '../base_ai_service.js';
|
||||
import type { ChatCompletionOptions, ChatResponse, Message, StreamChunk } from '../ai_interface.js';
|
||||
import { getOpenAIOptions } from './providers.js';
|
||||
import OpenAI from 'openai';
|
||||
|
||||
export class OpenAIService extends BaseAIService {
|
||||
private openai: OpenAI | null = null;
|
||||
|
||||
constructor() {
|
||||
super('OpenAI');
|
||||
}
|
||||
|
||||
isAvailable(): boolean {
|
||||
return super.isAvailable() && !!options.getOption('openaiApiKey');
|
||||
}
|
||||
|
||||
private getClient(apiKey: string, baseUrl?: string): OpenAI {
|
||||
if (!this.openai) {
|
||||
this.openai = new OpenAI({
|
||||
apiKey,
|
||||
baseURL: baseUrl
|
||||
});
|
||||
}
|
||||
return this.openai;
|
||||
}
|
||||
|
||||
async generateChatCompletion(messages: Message[], opts: ChatCompletionOptions = {}): Promise<ChatResponse> {
|
||||
if (!this.isAvailable()) {
|
||||
throw new Error('OpenAI service is not available. Check API key and AI settings.');
|
||||
}
|
||||
|
||||
// Get provider-specific options from the central provider manager
|
||||
const providerOptions = getOpenAIOptions(opts);
|
||||
|
||||
// Initialize the OpenAI client
|
||||
const client = this.getClient(providerOptions.apiKey, providerOptions.baseUrl);
|
||||
|
||||
const systemPrompt = this.getSystemPrompt(providerOptions.systemPrompt || options.getOption('aiSystemPrompt'));
|
||||
|
||||
// Ensure we have a system message
|
||||
const systemMessageExists = messages.some(m => m.role === 'system');
|
||||
const messagesWithSystem = systemMessageExists
|
||||
? messages
|
||||
: [{ role: 'system', content: systemPrompt }, ...messages];
|
||||
|
||||
try {
|
||||
// Create params object for the OpenAI SDK
|
||||
const params: OpenAI.Chat.ChatCompletionCreateParams = {
|
||||
model: providerOptions.model,
|
||||
messages: messagesWithSystem as OpenAI.Chat.ChatCompletionMessageParam[],
|
||||
temperature: providerOptions.temperature,
|
||||
max_tokens: providerOptions.max_tokens,
|
||||
stream: providerOptions.stream,
|
||||
top_p: providerOptions.top_p,
|
||||
frequency_penalty: providerOptions.frequency_penalty,
|
||||
presence_penalty: providerOptions.presence_penalty
|
||||
};
|
||||
|
||||
// Add tools if enabled
|
||||
if (providerOptions.enableTools && providerOptions.tools && providerOptions.tools.length > 0) {
|
||||
params.tools = providerOptions.tools as OpenAI.Chat.ChatCompletionTool[];
|
||||
}
|
||||
|
||||
if (providerOptions.tool_choice) {
|
||||
params.tool_choice = providerOptions.tool_choice as OpenAI.Chat.ChatCompletionToolChoiceOption;
|
||||
}
|
||||
|
||||
// Log the request parameters
|
||||
console.log('OpenAI API Request:', JSON.stringify({
|
||||
endpoint: 'chat.completions.create',
|
||||
model: params.model,
|
||||
messages: params.messages,
|
||||
temperature: params.temperature,
|
||||
max_tokens: params.max_tokens,
|
||||
stream: params.stream,
|
||||
tools: params.tools,
|
||||
tool_choice: params.tool_choice
|
||||
}, null, 2));
|
||||
|
||||
// If streaming is requested
|
||||
if (providerOptions.stream) {
|
||||
params.stream = true;
|
||||
|
||||
// Get stream from OpenAI SDK
|
||||
const stream = await client.chat.completions.create(params);
|
||||
console.log('OpenAI API Stream Started');
|
||||
|
||||
// Create a closure to hold accumulated tool calls
|
||||
let accumulatedToolCalls: any[] = [];
|
||||
|
||||
// Return a response with the stream handler
|
||||
const response: ChatResponse = {
|
||||
text: '', // Initial empty text, will be populated during streaming
|
||||
model: params.model,
|
||||
provider: this.getName(),
|
||||
// Add tool_calls property that will be populated during streaming
|
||||
tool_calls: [],
|
||||
stream: async (callback) => {
|
||||
let completeText = '';
|
||||
|
||||
try {
|
||||
// Process the stream
|
||||
if (Symbol.asyncIterator in stream) {
|
||||
for await (const chunk of stream as AsyncIterable<OpenAI.Chat.ChatCompletionChunk>) {
|
||||
// Log each chunk received from OpenAI
|
||||
console.log('OpenAI API Stream Chunk:', JSON.stringify(chunk, null, 2));
|
||||
|
||||
const content = chunk.choices[0]?.delta?.content || '';
|
||||
const isDone = !!chunk.choices[0]?.finish_reason;
|
||||
|
||||
// Check for tool calls in the delta
|
||||
const deltaToolCalls = chunk.choices[0]?.delta?.tool_calls;
|
||||
|
||||
if (deltaToolCalls) {
|
||||
// Process and accumulate tool calls from this chunk
|
||||
for (const deltaToolCall of deltaToolCalls) {
|
||||
const toolCallId = deltaToolCall.index;
|
||||
|
||||
// Initialize or update the accumulated tool call
|
||||
if (!accumulatedToolCalls[toolCallId]) {
|
||||
accumulatedToolCalls[toolCallId] = {
|
||||
id: deltaToolCall.id || `call_${toolCallId}`,
|
||||
type: deltaToolCall.type || 'function',
|
||||
function: {
|
||||
name: '',
|
||||
arguments: ''
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Update function name if present
|
||||
if (deltaToolCall.function?.name) {
|
||||
accumulatedToolCalls[toolCallId].function.name =
|
||||
deltaToolCall.function.name;
|
||||
}
|
||||
|
||||
// Append to function arguments if present
|
||||
if (deltaToolCall.function?.arguments) {
|
||||
accumulatedToolCalls[toolCallId].function.arguments +=
|
||||
deltaToolCall.function.arguments;
|
||||
}
|
||||
}
|
||||
|
||||
// Important: Update the response's tool_calls with accumulated tool calls
|
||||
response.tool_calls = accumulatedToolCalls.filter(Boolean);
|
||||
}
|
||||
|
||||
if (content) {
|
||||
completeText += content;
|
||||
}
|
||||
|
||||
// Send the chunk to the caller with raw data and any accumulated tool calls
|
||||
const streamChunk: StreamChunk = {
|
||||
text: content,
|
||||
done: isDone,
|
||||
raw: chunk as unknown as Record<string, unknown>
|
||||
};
|
||||
|
||||
// Add accumulated tool calls to raw data for compatibility with tool execution display
|
||||
if (accumulatedToolCalls.length > 0) {
|
||||
// Add tool calls to raw data for proper display
|
||||
streamChunk.raw = {
|
||||
...streamChunk.raw as object,
|
||||
tool_calls: accumulatedToolCalls.filter(Boolean)
|
||||
} as Record<string, unknown>;
|
||||
}
|
||||
|
||||
await callback(streamChunk);
|
||||
|
||||
if (isDone) {
|
||||
console.log('OpenAI API Stream Complete. Final text length:', completeText.length);
|
||||
if (accumulatedToolCalls.length > 0) {
|
||||
console.log('OpenAI API Tool Calls:', JSON.stringify(accumulatedToolCalls, null, 2));
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Fallback for non-iterable response
|
||||
console.warn('Stream is not iterable, falling back to non-streaming response');
|
||||
console.log('OpenAI API Non-iterable Stream Response:', JSON.stringify(stream, null, 2));
|
||||
|
||||
if ('choices' in stream) {
|
||||
const content = stream.choices[0]?.message?.content || '';
|
||||
completeText = content;
|
||||
|
||||
// Check if there are tool calls in the non-stream response
|
||||
const toolCalls = stream.choices[0]?.message?.tool_calls;
|
||||
if (toolCalls) {
|
||||
response.tool_calls = toolCalls;
|
||||
console.log('OpenAI API Tool Calls in Non-iterable Response:', JSON.stringify(toolCalls, null, 2));
|
||||
}
|
||||
|
||||
await callback({
|
||||
text: content,
|
||||
done: true,
|
||||
raw: stream as unknown as Record<string, unknown>,
|
||||
tool_calls: toolCalls
|
||||
});
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error processing stream:', error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
// Update the response's text with the complete text
|
||||
response.text = completeText;
|
||||
|
||||
// Return the complete text
|
||||
return completeText;
|
||||
}
|
||||
};
|
||||
|
||||
return response;
|
||||
} else {
|
||||
// Non-streaming response
|
||||
params.stream = false;
|
||||
|
||||
const completion = await client.chat.completions.create(params);
|
||||
|
||||
// Log the full response from OpenAI
|
||||
console.log('OpenAI API Response:', JSON.stringify(completion, null, 2));
|
||||
|
||||
if (!('choices' in completion)) {
|
||||
throw new Error('Unexpected response format from OpenAI API');
|
||||
}
|
||||
|
||||
return {
|
||||
text: completion.choices[0].message.content || '',
|
||||
model: completion.model,
|
||||
provider: this.getName(),
|
||||
usage: {
|
||||
promptTokens: completion.usage?.prompt_tokens,
|
||||
completionTokens: completion.usage?.completion_tokens,
|
||||
totalTokens: completion.usage?.total_tokens
|
||||
},
|
||||
tool_calls: completion.choices[0].message.tool_calls
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('OpenAI service error:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user