mirror of
https://github.com/zadam/trilium.git
synced 2025-12-23 16:49:58 +01:00
Merge branch 'develop' of https://github.com/TriliumNext/Notes into develop
This commit is contained in:
@@ -408,7 +408,7 @@ async function reprocessAllNotes(req: Request, res: Response) {
|
||||
try {
|
||||
// Wrap the operation in cls.init to ensure proper context
|
||||
cls.init(async () => {
|
||||
await vectorStore.reprocessAllNotes();
|
||||
await indexService.reprocessAllNotes();
|
||||
log.info("Embedding reprocessing completed successfully");
|
||||
});
|
||||
} catch (error: any) {
|
||||
@@ -782,6 +782,49 @@ async function getIndexRebuildStatus(req: Request, res: Response) {
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Start embedding generation when AI is enabled
|
||||
*/
|
||||
async function startEmbeddings(req: Request, res: Response) {
|
||||
try {
|
||||
log.info("Starting embedding generation system");
|
||||
|
||||
// Initialize the index service if not already initialized
|
||||
await indexService.initialize();
|
||||
|
||||
// Start automatic indexing
|
||||
await indexService.startEmbeddingGeneration();
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: "Embedding generation started"
|
||||
};
|
||||
} catch (error: any) {
|
||||
log.error(`Error starting embeddings: ${error.message || 'Unknown error'}`);
|
||||
throw new Error(`Failed to start embeddings: ${error.message || 'Unknown error'}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop embedding generation when AI is disabled
|
||||
*/
|
||||
async function stopEmbeddings(req: Request, res: Response) {
|
||||
try {
|
||||
log.info("Stopping embedding generation system");
|
||||
|
||||
// Stop automatic indexing
|
||||
await indexService.stopEmbeddingGeneration();
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: "Embedding generation stopped"
|
||||
};
|
||||
} catch (error: any) {
|
||||
log.error(`Error stopping embeddings: ${error.message || 'Unknown error'}`);
|
||||
throw new Error(`Failed to stop embeddings: ${error.message || 'Unknown error'}`);
|
||||
}
|
||||
}
|
||||
|
||||
export default {
|
||||
findSimilarNotes,
|
||||
searchByText,
|
||||
@@ -794,5 +837,7 @@ export default {
|
||||
retryFailedNote,
|
||||
retryAllFailedNotes,
|
||||
rebuildIndex,
|
||||
getIndexRebuildStatus
|
||||
getIndexRebuildStatus,
|
||||
startEmbeddings,
|
||||
stopEmbeddings
|
||||
};
|
||||
|
||||
@@ -825,7 +825,10 @@ async function streamMessage(req: Request, res: Response) {
|
||||
success: true,
|
||||
message: 'Streaming initiated successfully'
|
||||
});
|
||||
log.info(`Sent immediate success response for streaming setup`);
|
||||
|
||||
// Mark response as handled to prevent apiResultHandler from processing it again
|
||||
(res as any).triliumResponseHandled = true;
|
||||
|
||||
|
||||
// Create a new response object for streaming through WebSocket only
|
||||
// We won't use HTTP streaming since we've already sent the HTTP response
|
||||
@@ -889,78 +892,33 @@ async function streamMessage(req: Request, res: Response) {
|
||||
thinking: showThinking ? 'Initializing streaming LLM response...' : undefined
|
||||
});
|
||||
|
||||
// Instead of trying to reimplement the streaming logic ourselves,
|
||||
// delegate to restChatService but set up the correct protocol:
|
||||
// 1. We've already sent a success response to the initial POST
|
||||
// 2. Now we'll have restChatService process the actual streaming through WebSocket
|
||||
// Process the LLM request using the existing service but with streaming setup
|
||||
// Since we've already sent the initial HTTP response, we'll use the WebSocket for streaming
|
||||
try {
|
||||
// Import the WebSocket service for sending messages
|
||||
const wsService = (await import('../../services/ws.js')).default;
|
||||
|
||||
// Create a simple pass-through response object that won't write to the HTTP response
|
||||
// but will allow restChatService to send WebSocket messages
|
||||
const dummyResponse = {
|
||||
writableEnded: false,
|
||||
// Implement methods that would normally be used by restChatService
|
||||
write: (_chunk: string) => {
|
||||
// Silent no-op - we're only using WebSocket
|
||||
return true;
|
||||
// Call restChatService with streaming mode enabled
|
||||
// The important part is setting method to GET to indicate streaming mode
|
||||
await restChatService.handleSendMessage({
|
||||
...req,
|
||||
method: 'GET', // Indicate streaming mode
|
||||
query: {
|
||||
...req.query,
|
||||
stream: 'true' // Add the required stream parameter
|
||||
},
|
||||
end: (_chunk?: string) => {
|
||||
// Log when streaming is complete via WebSocket
|
||||
log.info(`[${chatNoteId}] Completed HTTP response handling during WebSocket streaming`);
|
||||
return dummyResponse;
|
||||
body: {
|
||||
content: enhancedContent,
|
||||
useAdvancedContext: useAdvancedContext === true,
|
||||
showThinking: showThinking === true
|
||||
},
|
||||
setHeader: (name: string, _value: string) => {
|
||||
// Only log for content-type to reduce noise
|
||||
if (name.toLowerCase() === 'content-type') {
|
||||
log.info(`[${chatNoteId}] Setting up streaming for WebSocket only`);
|
||||
}
|
||||
return dummyResponse;
|
||||
}
|
||||
};
|
||||
params: { chatNoteId }
|
||||
} as unknown as Request, res);
|
||||
} catch (streamError) {
|
||||
log.error(`Error during WebSocket streaming: ${streamError}`);
|
||||
|
||||
// Process the streaming now through WebSocket only
|
||||
try {
|
||||
log.info(`[${chatNoteId}] Processing LLM streaming through WebSocket after successful initiation at ${new Date().toISOString()}`);
|
||||
|
||||
// Call restChatService with our enhanced request and dummy response
|
||||
// The important part is setting method to GET to indicate streaming mode
|
||||
await restChatService.handleSendMessage({
|
||||
...req,
|
||||
method: 'GET', // Indicate streaming mode
|
||||
query: {
|
||||
...req.query,
|
||||
stream: 'true' // Add the required stream parameter
|
||||
},
|
||||
body: {
|
||||
content: enhancedContent,
|
||||
useAdvancedContext: useAdvancedContext === true,
|
||||
showThinking: showThinking === true
|
||||
},
|
||||
params: { chatNoteId }
|
||||
} as unknown as Request, dummyResponse as unknown as Response);
|
||||
|
||||
log.info(`[${chatNoteId}] WebSocket streaming completed at ${new Date().toISOString()}`);
|
||||
} catch (streamError) {
|
||||
log.error(`[${chatNoteId}] Error during WebSocket streaming: ${streamError}`);
|
||||
|
||||
// Send error message through WebSocket
|
||||
wsService.sendMessageToAllClients({
|
||||
type: 'llm-stream',
|
||||
chatNoteId: chatNoteId,
|
||||
error: `Error during streaming: ${streamError}`,
|
||||
done: true
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
log.error(`Error during streaming: ${error}`);
|
||||
|
||||
// Send error to client via WebSocket
|
||||
// Send error message through WebSocket
|
||||
wsService.sendMessageToAllClients({
|
||||
type: 'llm-stream',
|
||||
chatNoteId: chatNoteId,
|
||||
error: `Error processing message: ${error}`,
|
||||
error: `Error during streaming: ${streamError}`,
|
||||
done: true
|
||||
});
|
||||
}
|
||||
|
||||
@@ -66,12 +66,13 @@ async function listModels(req: Request, res: Response) {
|
||||
const apiKey = await options.getOption('openaiApiKey');
|
||||
|
||||
if (!apiKey) {
|
||||
throw new Error('OpenAI API key is not configured');
|
||||
// Log warning but don't throw - some OpenAI-compatible endpoints don't require API keys
|
||||
log.info('OpenAI API key is not configured when listing models. This may cause issues with official OpenAI endpoints.');
|
||||
}
|
||||
|
||||
// Initialize OpenAI client with the API key and base URL
|
||||
// Initialize OpenAI client with the API key (or empty string) and base URL
|
||||
const openai = new OpenAI({
|
||||
apiKey,
|
||||
apiKey: apiKey || '', // Default to empty string if no API key
|
||||
baseURL: openaiBaseUrl
|
||||
});
|
||||
|
||||
@@ -84,9 +85,9 @@ async function listModels(req: Request, res: Response) {
|
||||
// Include all models as chat models, without filtering by specific model names
|
||||
// This allows models from providers like OpenRouter to be displayed
|
||||
const chatModels = allModels
|
||||
.filter((model) =>
|
||||
.filter((model) =>
|
||||
// Exclude models that are explicitly for embeddings
|
||||
!model.id.includes('embedding') &&
|
||||
!model.id.includes('embedding') &&
|
||||
!model.id.includes('embed')
|
||||
)
|
||||
.map((model) => ({
|
||||
|
||||
@@ -96,22 +96,26 @@ const ALLOWED_OPTIONS = new Set<OptionNames>([
|
||||
"aiEnabled",
|
||||
"aiTemperature",
|
||||
"aiSystemPrompt",
|
||||
"aiProviderPrecedence",
|
||||
"aiSelectedProvider",
|
||||
"openaiApiKey",
|
||||
"openaiBaseUrl",
|
||||
"openaiDefaultModel",
|
||||
"openaiEmbeddingModel",
|
||||
"openaiEmbeddingApiKey",
|
||||
"openaiEmbeddingBaseUrl",
|
||||
"anthropicApiKey",
|
||||
"anthropicBaseUrl",
|
||||
"anthropicDefaultModel",
|
||||
"voyageApiKey",
|
||||
"voyageEmbeddingModel",
|
||||
"voyageEmbeddingBaseUrl",
|
||||
"ollamaBaseUrl",
|
||||
"ollamaDefaultModel",
|
||||
"ollamaEmbeddingModel",
|
||||
"ollamaEmbeddingBaseUrl",
|
||||
"embeddingAutoUpdateEnabled",
|
||||
"embeddingDimensionStrategy",
|
||||
"embeddingProviderPrecedence",
|
||||
"embeddingSelectedProvider",
|
||||
"embeddingSimilarityThreshold",
|
||||
"embeddingBatchSize",
|
||||
"embeddingUpdateInterval",
|
||||
|
||||
@@ -400,6 +400,8 @@ function register(app: express.Application) {
|
||||
asyncApiRoute(PST, "/api/llm/embeddings/retry-all-failed", embeddingsRoute.retryAllFailedNotes);
|
||||
asyncApiRoute(PST, "/api/llm/embeddings/rebuild-index", embeddingsRoute.rebuildIndex);
|
||||
asyncApiRoute(GET, "/api/llm/embeddings/index-rebuild-status", embeddingsRoute.getIndexRebuildStatus);
|
||||
asyncApiRoute(PST, "/api/llm/embeddings/start", embeddingsRoute.startEmbeddings);
|
||||
asyncApiRoute(PST, "/api/llm/embeddings/stop", embeddingsRoute.stopEmbeddings);
|
||||
|
||||
// LLM provider endpoints - moved under /api/llm/providers hierarchy
|
||||
asyncApiRoute(GET, "/api/llm/providers/ollama/models", ollamaRoute.listModels);
|
||||
|
||||
Reference in New Issue
Block a user