Files
Trilium/src/services/llm/trilium_context_service.ts

758 lines
32 KiB
TypeScript
Raw Normal View History

2025-03-10 03:34:48 +00:00
import becca from "../../becca/becca.js";
import vectorStore from "./embeddings/index.js";
2025-03-10 03:34:48 +00:00
import providerManager from "./embeddings/providers.js";
import options from "../options.js";
import log from "../log.js";
import type { Message } from "./ai_interface.js";
import { cosineSimilarity } from "./embeddings/index.js";
2025-03-10 18:53:36 +00:00
import sanitizeHtml from "sanitize-html";
2025-03-19 16:19:48 +00:00
import aiServiceManager from "./ai_service_manager.js";
2025-03-10 03:34:48 +00:00
/**
* TriliumContextService provides intelligent context management for working with large knowledge bases
* through limited context window LLMs like Ollama.
*
* It creates a "meta-prompting" approach where the first LLM call is used
* to determine what information might be needed to answer the query,
* then only the relevant context is loaded, before making the final
* response.
*/
class TriliumContextService {
private initialized = false;
private initPromise: Promise<void> | null = null;
private provider: any = null;
// Cache for recently used context to avoid repeated embedding lookups
private recentQueriesCache = new Map<string, {
timestamp: number,
relevantNotes: any[]
}>();
// Configuration
private cacheExpiryMs = 5 * 60 * 1000; // 5 minutes
2025-03-17 21:02:05 +00:00
private metaPrompt = `You are an AI assistant that decides what information needs to be retrieved from a user's knowledge base called TriliumNext Notes to answer the user's question.
2025-03-10 03:34:48 +00:00
Given the user's question, generate 3-5 specific search queries that would help find relevant information.
Each query should be focused on a different aspect of the question.
Format your answer as a JSON array of strings, with each string being a search query.
Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`;
constructor() {
this.setupCacheCleanup();
}
/**
* Initialize the service
*/
async initialize() {
if (this.initialized) return;
// Use a promise to prevent multiple simultaneous initializations
if (this.initPromise) return this.initPromise;
this.initPromise = (async () => {
try {
2025-03-17 19:41:31 +00:00
// Get user's configured provider or fallback to ollama
const providerId = await options.getOption('embeddingsDefaultProvider') || 'ollama';
2025-03-10 03:34:48 +00:00
this.provider = providerManager.getEmbeddingProvider(providerId);
2025-03-17 19:41:31 +00:00
// If specified provider not found, try ollama as first fallback for self-hosted usage
2025-03-17 19:36:58 +00:00
if (!this.provider && providerId !== 'ollama') {
2025-03-17 19:41:31 +00:00
log.info(`Embedding provider ${providerId} not found, trying ollama as fallback`);
2025-03-17 19:36:58 +00:00
this.provider = providerManager.getEmbeddingProvider('ollama');
}
2025-03-17 19:41:31 +00:00
// If ollama not found, try openai as a second fallback
if (!this.provider && providerId !== 'openai') {
log.info(`Embedding provider ollama not found, trying openai as fallback`);
this.provider = providerManager.getEmbeddingProvider('openai');
}
2025-03-17 19:36:58 +00:00
// Final fallback to local provider which should always exist
if (!this.provider) {
log.info(`No embedding provider found, falling back to local provider`);
this.provider = providerManager.getEmbeddingProvider('local');
}
2025-03-10 03:34:48 +00:00
if (!this.provider) {
2025-03-17 19:36:58 +00:00
throw new Error(`No embedding provider available. Could not initialize context service.`);
2025-03-10 03:34:48 +00:00
}
this.initialized = true;
2025-03-17 19:36:58 +00:00
log.info(`Trilium context service initialized with provider: ${this.provider.name}`);
2025-03-10 03:34:48 +00:00
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
log.error(`Failed to initialize Trilium context service: ${errorMessage}`);
throw error;
} finally {
this.initPromise = null;
}
})();
return this.initPromise;
}
/**
* Set up periodic cache cleanup
*/
private setupCacheCleanup() {
setInterval(() => {
const now = Date.now();
for (const [key, data] of this.recentQueriesCache.entries()) {
if (now - data.timestamp > this.cacheExpiryMs) {
this.recentQueriesCache.delete(key);
}
}
}, 60000); // Run cleanup every minute
}
/**
* Generate search queries to find relevant information for the user question
* @param userQuestion - The user's question
* @param llmService - The LLM service to use for generating queries
* @returns Array of search queries
*/
async generateSearchQueries(userQuestion: string, llmService: any): Promise<string[]> {
try {
const messages: Message[] = [
{ role: "system", content: this.metaPrompt },
{ role: "user", content: userQuestion }
];
const options = {
temperature: 0.3,
maxTokens: 300
};
2025-03-10 04:28:56 +00:00
// Get the response from the LLM using the correct method name
const response = await llmService.generateChatCompletion(messages, options);
const responseText = response.text; // Extract the text from the response object
2025-03-10 03:34:48 +00:00
try {
2025-03-17 21:02:05 +00:00
// Remove code blocks, quotes, and clean up the response text
let jsonStr = responseText
.replace(/```(?:json)?|```/g, '') // Remove code block markers
.replace(/[\u201C\u201D]/g, '"') // Replace smart quotes with straight quotes
.trim();
// Check if the text might contain a JSON array (has square brackets)
if (jsonStr.includes('[') && jsonStr.includes(']')) {
// Extract just the array part if there's explanatory text
const arrayMatch = jsonStr.match(/\[[\s\S]*\]/);
if (arrayMatch) {
jsonStr = arrayMatch[0];
}
2025-03-10 03:34:48 +00:00
2025-03-17 21:02:05 +00:00
// Try to parse the JSON
try {
const queries = JSON.parse(jsonStr);
if (Array.isArray(queries) && queries.length > 0) {
return queries.map(q => typeof q === 'string' ? q : String(q)).filter(Boolean);
}
} catch (innerError) {
// If parsing fails, log it and continue to the fallback
log.info(`JSON parse error: ${innerError}. Will use fallback parsing for: ${jsonStr}`);
}
2025-03-10 03:34:48 +00:00
}
2025-03-17 21:02:05 +00:00
// Fallback 1: Try to extract an array manually by splitting on commas between quotes
if (jsonStr.includes('[') && jsonStr.includes(']')) {
const arrayContent = jsonStr.substring(
jsonStr.indexOf('[') + 1,
jsonStr.lastIndexOf(']')
);
// Use regex to match quoted strings, handling escaped quotes
const stringMatches = arrayContent.match(/"((?:\\.|[^"\\])*)"/g);
if (stringMatches && stringMatches.length > 0) {
return stringMatches
.map((m: string) => m.substring(1, m.length - 1)) // Remove surrounding quotes
.filter((s: string) => s.length > 0);
}
}
// Fallback 2: Extract queries line by line
2025-03-10 04:28:56 +00:00
const lines = responseText.split('\n')
2025-03-10 03:34:48 +00:00
.map((line: string) => line.trim())
2025-03-17 21:02:05 +00:00
.filter((line: string) =>
line.length > 0 &&
!line.startsWith('```') &&
!line.match(/^\d+\.?\s*$/) && // Skip numbered list markers alone
!line.match(/^\[|\]$/) // Skip lines that are just brackets
);
2025-03-10 03:34:48 +00:00
if (lines.length > 0) {
2025-03-17 21:02:05 +00:00
// Remove numbering, quotes and other list markers from each line
return lines.map((line: string) => {
return line
.replace(/^\d+\.?\s*/, '') // Remove numbered list markers (1., 2., etc)
.replace(/^[-*•]\s*/, '') // Remove bullet list markers
.replace(/^["']|["']$/g, '') // Remove surrounding quotes
.trim();
}).filter((s: string) => s.length > 0);
2025-03-10 03:34:48 +00:00
}
2025-03-17 21:02:05 +00:00
} catch (parseError) {
log.error(`Error parsing search queries: ${parseError}`);
2025-03-10 03:34:48 +00:00
}
2025-03-17 21:02:05 +00:00
// If all else fails, just use the original question
return [userQuestion];
2025-03-10 03:34:48 +00:00
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
log.error(`Error generating search queries: ${errorMessage}`);
// Fallback to just using the original question
return [userQuestion];
}
}
/**
* Find relevant notes using multiple search queries
* @param queries - Array of search queries
* @param contextNoteId - Optional note ID to restrict search to a branch
* @param limit - Max notes to return
* @returns Array of relevant notes
*/
async findRelevantNotesMultiQuery(
queries: string[],
contextNoteId: string | null = null,
limit = 10
): Promise<any[]> {
if (!this.initialized) {
await this.initialize();
}
try {
// Cache key combining all queries
const cacheKey = JSON.stringify({ queries, contextNoteId, limit });
// Check if we have a recent cache hit
const cached = this.recentQueriesCache.get(cacheKey);
if (cached) {
return cached.relevantNotes;
}
// Array to store all results with their similarity scores
const allResults: {
noteId: string,
title: string,
content: string | null,
similarity: number,
branchId?: string
}[] = [];
// Set to keep track of note IDs we've seen to avoid duplicates
const seenNoteIds = new Set<string>();
2025-03-17 21:02:05 +00:00
// Log the provider and model being used
log.info(`Searching with embedding provider: ${this.provider.name}, model: ${this.provider.getConfig().model}`);
2025-03-10 03:34:48 +00:00
// Process each query
for (const query of queries) {
2025-03-10 04:28:56 +00:00
// Get embeddings for this query using the correct method name
const queryEmbedding = await this.provider.generateEmbeddings(query);
2025-03-17 21:02:05 +00:00
log.info(`Generated embedding for query: "${query}" (${queryEmbedding.length} dimensions)`);
2025-03-10 03:34:48 +00:00
// Find notes similar to this query
let results;
if (contextNoteId) {
// Find within a specific context/branch
results = await this.findNotesInBranch(
queryEmbedding,
contextNoteId,
Math.min(limit, 5) // Limit per query
);
2025-03-17 21:02:05 +00:00
log.info(`Found ${results.length} notes within branch context for query: "${query}"`);
2025-03-10 03:34:48 +00:00
} else {
// Search all notes
results = await vectorStore.findSimilarNotes(
queryEmbedding,
2025-03-10 04:28:56 +00:00
this.provider.name, // Use name property instead of id
this.provider.getConfig().model, // Use getConfig().model instead of modelId
2025-03-10 03:34:48 +00:00
Math.min(limit, 5), // Limit per query
0.5 // Lower threshold to get more diverse results
);
2025-03-17 21:02:05 +00:00
log.info(`Found ${results.length} notes in vector store for query: "${query}"`);
2025-03-10 03:34:48 +00:00
}
// Process results
for (const result of results) {
if (!seenNoteIds.has(result.noteId)) {
seenNoteIds.add(result.noteId);
// Get the note from Becca
const note = becca.notes[result.noteId];
if (!note) continue;
// Add to our results
allResults.push({
noteId: result.noteId,
title: note.title,
content: note.type === 'text' ? note.getContent() as string : null,
similarity: result.similarity,
branchId: note.getBranches()[0]?.branchId
});
}
}
}
// Sort by similarity and take the top 'limit' results
const sortedResults = allResults
.sort((a, b) => b.similarity - a.similarity)
.slice(0, limit);
2025-03-17 21:02:05 +00:00
log.info(`Total unique relevant notes found across all queries: ${sortedResults.length}`);
2025-03-10 03:34:48 +00:00
// Cache the results
this.recentQueriesCache.set(cacheKey, {
timestamp: Date.now(),
relevantNotes: sortedResults
});
return sortedResults;
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
log.error(`Error finding relevant notes: ${errorMessage}`);
return [];
}
}
/**
* Find notes in a specific branch/context
* @param embedding - Query embedding
* @param contextNoteId - Note ID to restrict search to
* @param limit - Max notes to return
* @returns Array of relevant notes
*/
private async findNotesInBranch(
embedding: Float32Array,
contextNoteId: string,
limit = 5
): Promise<{noteId: string, similarity: number}[]> {
try {
// Get the subtree note IDs
const subtreeNoteIds = await this.getSubtreeNoteIds(contextNoteId);
if (subtreeNoteIds.length === 0) {
return [];
}
// Get all embeddings for these notes using vectorStore instead of direct SQL
const similarities: {noteId: string, similarity: number}[] = [];
for (const noteId of subtreeNoteIds) {
const noteEmbedding = await vectorStore.getEmbeddingForNote(
noteId,
2025-03-10 04:28:56 +00:00
this.provider.name, // Use name property instead of id
this.provider.getConfig().model // Use getConfig().model instead of modelId
2025-03-10 03:34:48 +00:00
);
if (noteEmbedding) {
const similarity = cosineSimilarity(embedding, noteEmbedding.embedding);
if (similarity > 0.5) { // Apply similarity threshold
similarities.push({
noteId,
similarity
});
}
}
}
// Sort by similarity and return top results
return similarities
.sort((a, b) => b.similarity - a.similarity)
.slice(0, limit);
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
log.error(`Error finding notes in branch: ${errorMessage}`);
return [];
}
}
/**
* Get all note IDs in a subtree (including the root note)
* @param rootNoteId - Root note ID
* @returns Array of note IDs
*/
private async getSubtreeNoteIds(rootNoteId: string): Promise<string[]> {
const note = becca.notes[rootNoteId];
if (!note) {
return [];
}
// Use becca to walk the note tree instead of direct SQL
const noteIds = new Set<string>([rootNoteId]);
// Helper function to collect all children
const collectChildNotes = (noteId: string) => {
// Use becca.getNote(noteId).getChildNotes() to get child notes
const parentNote = becca.notes[noteId];
if (!parentNote) return;
// Get all branches where this note is the parent
for (const branch of Object.values(becca.branches)) {
if (branch.parentNoteId === noteId && !branch.isDeleted) {
const childNoteId = branch.noteId;
if (!noteIds.has(childNoteId)) {
noteIds.add(childNoteId);
// Recursively collect children of this child
collectChildNotes(childNoteId);
}
}
}
};
// Start collecting from the root
collectChildNotes(rootNoteId);
return Array.from(noteIds);
}
/**
2025-03-11 23:04:51 +00:00
* Build context string from retrieved notes
2025-03-10 03:34:48 +00:00
*/
2025-03-11 23:04:51 +00:00
async buildContextFromNotes(sources: any[], query: string): Promise<string> {
2025-03-10 03:34:48 +00:00
if (!sources || sources.length === 0) {
2025-03-10 04:28:56 +00:00
// Return a default context instead of empty string
return "I am an AI assistant helping you with your Trilium notes. " +
"I couldn't find any specific notes related to your query, but I'll try to assist you " +
"with general knowledge about Trilium or other topics you're interested in.";
2025-03-10 03:34:48 +00:00
}
2025-03-17 21:02:05 +00:00
// Get provider name to adjust context for different models
const providerId = this.provider?.name || 'default';
// Import the constants dynamically to avoid circular dependencies
const { LLM_CONSTANTS } = await import('../../routes/api/llm.js');
// Get appropriate context size and format based on provider
const maxTotalLength =
providerId === 'openai' ? LLM_CONSTANTS.CONTEXT_WINDOW.OPENAI :
providerId === 'anthropic' ? LLM_CONSTANTS.CONTEXT_WINDOW.ANTHROPIC :
providerId === 'ollama' ? LLM_CONSTANTS.CONTEXT_WINDOW.OLLAMA :
LLM_CONSTANTS.CONTEXT_WINDOW.DEFAULT;
// Use a format appropriate for the model family
// Anthropic has a specific system message format that works better with certain structures
const isAnthropicFormat = providerId === 'anthropic';
// Start with different headers based on provider
let context = isAnthropicFormat
? `I'm your AI assistant helping with your Trilium notes database. For your query: "${query}", I found these relevant notes:\n\n`
: `I've found some relevant information in your notes that may help answer: "${query}"\n\n`;
2025-03-10 03:34:48 +00:00
2025-03-11 23:04:51 +00:00
// Sort sources by similarity if available to prioritize most relevant
if (sources[0] && sources[0].similarity !== undefined) {
sources = [...sources].sort((a, b) => (b.similarity || 0) - (a.similarity || 0));
}
// Track total context length to avoid oversized context
let currentLength = context.length;
const maxNoteContentLength = Math.min(LLM_CONSTANTS.CONTENT.MAX_NOTE_CONTENT_LENGTH,
Math.floor(maxTotalLength / Math.max(1, sources.length)));
2025-03-11 22:47:36 +00:00
sources.forEach((source) => {
2025-03-11 23:04:51 +00:00
// Check if adding this source would exceed our total limit
if (currentLength >= maxTotalLength) return;
2025-03-17 21:02:05 +00:00
// Build source section with formatting appropriate for the provider
2025-03-11 23:04:51 +00:00
let sourceSection = `### ${source.title}\n`;
2025-03-11 22:47:36 +00:00
// Add relationship context if available
if (source.parentTitle) {
2025-03-11 23:04:51 +00:00
sourceSection += `Part of: ${source.parentTitle}\n`;
}
// Add attributes if available (for better context)
if (source.noteId) {
const note = becca.notes[source.noteId];
if (note) {
const labels = note.getLabels();
if (labels.length > 0) {
sourceSection += `Labels: ${labels.map(l => `#${l.name}${l.value ? '=' + l.value : ''}`).join(' ')}\n`;
}
}
2025-03-11 22:47:36 +00:00
}
2025-03-10 03:34:48 +00:00
if (source.content) {
2025-03-10 18:53:36 +00:00
// Clean up HTML content before adding it to the context
let cleanContent = this.sanitizeNoteContent(source.content, source.type, source.mime);
2025-03-10 03:34:48 +00:00
// Truncate content if it's too long
2025-03-11 23:04:51 +00:00
if (cleanContent.length > maxNoteContentLength) {
cleanContent = cleanContent.substring(0, maxNoteContentLength) + " [content truncated due to length]";
2025-03-10 03:34:48 +00:00
}
2025-03-11 23:04:51 +00:00
sourceSection += `${cleanContent}\n`;
2025-03-10 03:34:48 +00:00
} else {
2025-03-11 23:04:51 +00:00
sourceSection += "[This note doesn't contain textual content]\n";
2025-03-10 03:34:48 +00:00
}
2025-03-11 23:04:51 +00:00
sourceSection += "\n";
// Check if adding this section would exceed total length limit
if (currentLength + sourceSection.length <= maxTotalLength) {
context += sourceSection;
currentLength += sourceSection.length;
}
2025-03-10 03:34:48 +00:00
});
2025-03-17 21:02:05 +00:00
// Add provider-specific instructions
if (isAnthropicFormat) {
context += "When you refer to any information from these notes, cite the note title explicitly (e.g., \"According to the note [Title]...\"). " +
"If the provided notes don't answer the query fully, acknowledge that and then use your general knowledge to help.\n\n" +
"Be concise but thorough in your responses.";
} else {
context += "When referring to information from these notes in your response, please cite them by their titles " +
"(e.g., \"According to your note on [Title]...\") rather than using labels like \"Note 1\" or \"Note 2\".\n\n" +
"If the information doesn't contain what you need, just say so and use your general knowledge instead.";
}
2025-03-10 03:34:48 +00:00
return context;
}
2025-03-10 18:53:36 +00:00
/**
* Sanitize note content for use in context, removing HTML tags
*/
private sanitizeNoteContent(content: string, type?: string, mime?: string): string {
if (!content) return '';
// If it's likely HTML content
if (
(type === 'text' && mime === 'text/html') ||
content.includes('<div') ||
content.includes('<p>') ||
content.includes('<span')
) {
// Use sanitizeHtml to remove all HTML tags
content = sanitizeHtml(content, {
allowedTags: [],
allowedAttributes: {},
textFilter: (text) => {
// Replace multiple newlines with a single one
return text.replace(/\n\s*\n/g, '\n\n');
}
});
// Additional cleanup for remaining HTML entities
content = content
.replace(/&nbsp;/g, ' ')
.replace(/&lt;/g, '<')
.replace(/&gt;/g, '>')
.replace(/&amp;/g, '&')
.replace(/&quot;/g, '"')
.replace(/&#39;/g, "'");
}
// Normalize whitespace
content = content.replace(/\s+/g, ' ').trim();
return content;
}
2025-03-10 03:34:48 +00:00
/**
* Process a user query with the Trilium-specific approach:
* 1. Generate search queries from the original question
* 2. Find relevant notes using those queries
* 3. Build a context string from the relevant notes
*
* @param userQuestion - The user's original question
* @param llmService - The LLM service to use
* @param contextNoteId - Optional note ID to restrict search to
* @returns Object with context and notes
*/
async processQuery(userQuestion: string, llmService: any, contextNoteId: string | null = null) {
if (!this.initialized) {
2025-03-10 04:28:56 +00:00
try {
await this.initialize();
} catch (error) {
log.error(`Failed to initialize TriliumContextService: ${error}`);
// Return a fallback response if initialization fails
return {
context: "I am an AI assistant helping you with your Trilium notes. " +
"I'll try to assist you with general knowledge about your query.",
notes: [],
queries: [userQuestion]
};
}
2025-03-10 03:34:48 +00:00
}
2025-03-10 04:28:56 +00:00
try {
// Step 1: Generate search queries
let searchQueries: string[];
try {
searchQueries = await this.generateSearchQueries(userQuestion, llmService);
} catch (error) {
log.error(`Error generating search queries, using fallback: ${error}`);
searchQueries = [userQuestion]; // Fallback to using the original question
}
log.info(`Generated search queries: ${JSON.stringify(searchQueries)}`);
2025-03-10 03:34:48 +00:00
2025-03-10 04:28:56 +00:00
// Step 2: Find relevant notes using those queries
let relevantNotes: any[] = [];
try {
relevantNotes = await this.findRelevantNotesMultiQuery(
searchQueries,
contextNoteId,
8 // Get more notes since we're using multiple queries
);
} catch (error) {
log.error(`Error finding relevant notes: ${error}`);
// Continue with empty notes list
}
2025-03-10 03:34:48 +00:00
2025-03-10 04:28:56 +00:00
// Step 3: Build context from the notes
2025-03-11 23:04:51 +00:00
const context = await this.buildContextFromNotes(relevantNotes, userQuestion);
2025-03-10 03:34:48 +00:00
2025-03-10 04:28:56 +00:00
return {
context,
notes: relevantNotes,
queries: searchQueries
};
} catch (error) {
log.error(`Error in processQuery: ${error}`);
// Return a fallback response if anything fails
return {
context: "I am an AI assistant helping you with your Trilium notes. " +
"I encountered an error while processing your query, but I'll try to assist you anyway.",
notes: [],
queries: [userQuestion]
};
}
2025-03-10 03:34:48 +00:00
}
2025-03-19 16:19:48 +00:00
/**
* Enhance LLM context with agent tools
*
* This adds context from agent tools such as:
* 1. Vector search results relevant to the query
* 2. Note hierarchy information
* 3. Query decomposition planning
* 4. Contextual thinking visualization
*
* @param noteId The current note being viewed
* @param query The user's query
* @param showThinking Whether to include the agent's thinking process
* @returns Enhanced context string
*/
async getAgentToolsContext(noteId: string, query: string, showThinking: boolean = false): Promise<string> {
try {
const agentTools = aiServiceManager.getAgentTools();
let context = "";
// 1. Get vector search results related to the query
try {
const vectorSearchTool = agentTools.getVectorSearchTool();
const searchResults = await vectorSearchTool.searchNotes(query, {
parentNoteId: noteId,
maxResults: 5
});
if (searchResults.length > 0) {
context += "## Related Information\n\n";
for (const result of searchResults) {
context += `### ${result.title}\n`;
context += `${result.contentPreview}\n\n`;
}
context += "\n";
}
} catch (error: any) {
log.error(`Error getting vector search context: ${error.message}`);
}
// 2. Get note structure context
try {
const navigatorTool = agentTools.getNoteNavigatorTool();
const noteContext = navigatorTool.getNoteContextDescription(noteId);
if (noteContext) {
context += "## Current Note Context\n\n";
context += noteContext + "\n\n";
}
} catch (error: any) {
log.error(`Error getting note structure context: ${error.message}`);
}
// 3. Use query decomposition if it's a complex query
try {
const decompositionTool = agentTools.getQueryDecompositionTool();
const complexity = decompositionTool.assessQueryComplexity(query);
if (complexity > 5) { // Only for fairly complex queries
const decomposed = decompositionTool.decomposeQuery(query);
if (decomposed.subQueries.length > 1) {
context += "## Query Analysis\n\n";
context += `This is a complex query (complexity: ${complexity}/10). It can be broken down into:\n\n`;
for (const sq of decomposed.subQueries) {
context += `- ${sq.text}\n Reason: ${sq.reason}\n\n`;
}
}
}
} catch (error: any) {
log.error(`Error decomposing query: ${error.message}`);
}
// 4. Show thinking process if enabled
if (showThinking) {
try {
const thinkingTool = agentTools.getContextualThinkingTool();
const thinkingId = thinkingTool.startThinking(query);
// Add a thinking step to demonstrate the feature
// In a real implementation, the LLM would add these steps
thinkingTool.addThinkingStep(
"Analyzing the query to understand what information is needed",
"observation",
{ confidence: 1.0 }
);
// Add sample thinking for the context
const parentId = thinkingTool.addThinkingStep(
"Looking for related notes in the knowledge base",
"hypothesis",
{ confidence: 0.9 }
);
if (parentId) {
// Use the VectorSearchTool to find relevant notes
const vectorSearchTool = aiServiceManager.getVectorSearchTool();
const searchResults = await vectorSearchTool.searchNotes(query, {
parentNoteId: parentId,
maxResults: 5
});
if (searchResults.length > 0) {
context += "## Related Information\n\n";
for (const result of searchResults) {
context += `### ${result.title}\n`;
context += `${result.contentPreview}\n\n`;
}
context += "\n";
}
}
thinkingTool.addThinkingStep(
"The most relevant information appears to be in the current note and its semantic neighborhood",
"conclusion",
{ confidence: 0.85 }
);
// Complete the thinking and add it to context
thinkingTool.completeThinking(thinkingId);
context += "## Thinking Process\n\n";
context += thinkingTool.getThinkingSummary(thinkingId) + "\n\n";
} catch (error: any) {
log.error(`Error generating thinking process: ${error.message}`);
}
}
return context;
} catch (error: any) {
log.error(`Error getting agent tools context: ${error.message}`);
return "";
}
}
2025-03-10 03:34:48 +00:00
}
export default new TriliumContextService();