chore(nx): move all monorepo-style in subfolder for processing

This commit is contained in:
Elian Doran
2025-04-22 10:06:06 +03:00
parent 2e200eab39
commit 62dbcc0a2e
1469 changed files with 16 additions and 16 deletions

View File

@@ -1,495 +0,0 @@
/**
* Communication functions for LLM Chat
*/
import server from "../../services/server.js";
import type { SessionResponse } from "./types.js";
/**
* Create a new chat session
*/
export async function createChatSession(currentNoteId?: string): Promise<{chatNoteId: string | null, noteId: string | null}> {
try {
const resp = await server.post<SessionResponse>('llm/chat', {
title: 'Note Chat',
currentNoteId: currentNoteId // Pass the current note ID if available
});
if (resp && resp.id) {
// The backend might provide the noteId separately from the chatNoteId
// If noteId is provided, use it; otherwise, we'll need to query for it separately
return {
chatNoteId: resp.id,
noteId: resp.noteId || null
};
}
} catch (error) {
console.error('Failed to create chat session:', error);
}
return {
chatNoteId: null,
noteId: null
};
}
/**
* Check if a session exists
*/
export async function checkSessionExists(chatNoteId: string): Promise<boolean> {
try {
// Validate that we have a proper note ID format, not a session ID
// Note IDs in Trilium are typically longer or in a different format
if (chatNoteId && chatNoteId.length === 16 && /^[A-Za-z0-9]+$/.test(chatNoteId)) {
console.warn(`Invalid note ID format detected: ${chatNoteId} appears to be a legacy session ID`);
return false;
}
const sessionCheck = await server.getWithSilentNotFound<any>(`llm/chat/${chatNoteId}`);
return !!(sessionCheck && sessionCheck.id);
} catch (error: any) {
console.log(`Error checking chat note ${chatNoteId}:`, error);
return false;
}
}
/**
* Set up streaming response via WebSocket
*/
export async function setupStreamingResponse(
chatNoteId: string,
messageParams: any,
onContentUpdate: (content: string, isDone?: boolean) => void,
onThinkingUpdate: (thinking: string) => void,
onToolExecution: (toolData: any) => void,
onComplete: () => void,
onError: (error: Error) => void
): Promise<void> {
// Validate that we have a proper note ID format, not a session ID
if (chatNoteId && chatNoteId.length === 16 && /^[A-Za-z0-9]+$/.test(chatNoteId)) {
console.error(`Invalid note ID format: ${chatNoteId} appears to be a legacy session ID`);
onError(new Error("Invalid note ID format - using a legacy session ID"));
return;
}
return new Promise((resolve, reject) => {
let assistantResponse = '';
let postToolResponse = ''; // Separate accumulator for post-tool execution content
let receivedAnyContent = false;
let receivedPostToolContent = false; // Track if we've started receiving post-tool content
let timeoutId: number | null = null;
let initialTimeoutId: number | null = null;
let cleanupTimeoutId: number | null = null;
let receivedAnyMessage = false;
let toolsExecuted = false; // Flag to track if tools were executed in this session
let toolExecutionCompleted = false; // Flag to track if tool execution is completed
let eventListener: ((event: Event) => void) | null = null;
let lastMessageTimestamp = 0;
// Create a unique identifier for this response process
const responseId = `llm-stream-${Date.now()}-${Math.floor(Math.random() * 1000)}`;
console.log(`[${responseId}] Setting up WebSocket streaming for chat note ${chatNoteId}`);
// Send the initial request to initiate streaming
(async () => {
try {
const streamResponse = await server.post<any>(`llm/chat/${chatNoteId}/messages/stream`, {
content: messageParams.content,
useAdvancedContext: messageParams.useAdvancedContext,
showThinking: messageParams.showThinking,
options: {
temperature: 0.7,
maxTokens: 2000
}
});
if (!streamResponse || !streamResponse.success) {
console.error(`[${responseId}] Failed to initiate streaming`);
reject(new Error('Failed to initiate streaming'));
return;
}
console.log(`[${responseId}] Streaming initiated successfully`);
} catch (error) {
console.error(`[${responseId}] Error initiating streaming:`, error);
reject(error);
return;
}
})();
// Function to safely perform cleanup
const performCleanup = () => {
if (cleanupTimeoutId) {
window.clearTimeout(cleanupTimeoutId);
cleanupTimeoutId = null;
}
console.log(`[${responseId}] Performing final cleanup of event listener`);
cleanupEventListener(eventListener);
onComplete();
resolve();
};
// Function to schedule cleanup with ability to cancel
const scheduleCleanup = (delay: number) => {
// Clear any existing cleanup timeout
if (cleanupTimeoutId) {
window.clearTimeout(cleanupTimeoutId);
}
console.log(`[${responseId}] Scheduling listener cleanup in ${delay}ms`);
// Set new cleanup timeout
cleanupTimeoutId = window.setTimeout(() => {
// Only clean up if no messages received recently (in last 2 seconds)
const timeSinceLastMessage = Date.now() - lastMessageTimestamp;
if (timeSinceLastMessage > 2000) {
performCleanup();
} else {
console.log(`[${responseId}] Received message recently, delaying cleanup`);
// Reschedule cleanup
scheduleCleanup(2000);
}
}, delay);
};
// Create a message handler for CustomEvents
eventListener = (event: Event) => {
const customEvent = event as CustomEvent;
const message = customEvent.detail;
// Only process messages for our chat note
if (!message || message.chatNoteId !== chatNoteId) {
return;
}
// Update last message timestamp
lastMessageTimestamp = Date.now();
// Cancel any pending cleanup when we receive a new message
if (cleanupTimeoutId) {
console.log(`[${responseId}] Cancelling scheduled cleanup due to new message`);
window.clearTimeout(cleanupTimeoutId);
cleanupTimeoutId = null;
}
console.log(`[${responseId}] LLM Stream message received via CustomEvent: chatNoteId=${chatNoteId}, content=${!!message.content}, contentLength=${message.content?.length || 0}, thinking=${!!message.thinking}, toolExecution=${!!message.toolExecution}, done=${!!message.done}, type=${message.type || 'llm-stream'}`);
// Mark first message received
if (!receivedAnyMessage) {
receivedAnyMessage = true;
console.log(`[${responseId}] First message received for chat note ${chatNoteId}`);
// Clear the initial timeout since we've received a message
if (initialTimeoutId !== null) {
window.clearTimeout(initialTimeoutId);
initialTimeoutId = null;
}
}
// Handle specific message types
if (message.type === 'tool_execution_start') {
toolsExecuted = true; // Mark that tools were executed
onThinkingUpdate('Executing tools...');
// Also trigger tool execution UI with a specific format
onToolExecution({
action: 'start',
tool: 'tools',
result: 'Executing tools...'
});
return; // Skip accumulating content from this message
}
if (message.type === 'tool_result' && message.toolExecution) {
toolsExecuted = true; // Mark that tools were executed
console.log(`[${responseId}] Processing tool result: ${JSON.stringify(message.toolExecution)}`);
// If tool execution doesn't have an action, add 'result' as the default
if (!message.toolExecution.action) {
message.toolExecution.action = 'result';
}
// First send a 'start' action to ensure the container is created
onToolExecution({
action: 'start',
tool: 'tools',
result: 'Tool execution initialized'
});
// Then send the actual tool execution data
onToolExecution(message.toolExecution);
// Mark tool execution as completed if this is a result or error
if (message.toolExecution.action === 'result' || message.toolExecution.action === 'complete' || message.toolExecution.action === 'error') {
toolExecutionCompleted = true;
console.log(`[${responseId}] Tool execution completed`);
}
return; // Skip accumulating content from this message
}
if (message.type === 'tool_execution_error' && message.toolExecution) {
toolsExecuted = true; // Mark that tools were executed
toolExecutionCompleted = true; // Mark tool execution as completed
onToolExecution({
...message.toolExecution,
action: 'error',
error: message.toolExecution.error || 'Unknown error during tool execution'
});
return; // Skip accumulating content from this message
}
if (message.type === 'tool_completion_processing') {
toolsExecuted = true; // Mark that tools were executed
toolExecutionCompleted = true; // Tools are done, now processing the result
onThinkingUpdate('Generating response with tool results...');
// Also trigger tool execution UI with a specific format
onToolExecution({
action: 'generating',
tool: 'tools',
result: 'Generating response with tool results...'
});
return; // Skip accumulating content from this message
}
// Handle content updates
if (message.content) {
console.log(`[${responseId}] Received content chunk of length ${message.content.length}, preview: "${message.content.substring(0, 50)}${message.content.length > 50 ? '...' : ''}"`);
// If tools were executed and completed, and we're now getting new content,
// this is likely the final response after tool execution from Anthropic
if (toolsExecuted && toolExecutionCompleted && message.content) {
console.log(`[${responseId}] Post-tool execution content detected`);
// If this is the first post-tool chunk, indicate we're starting a new response
if (!receivedPostToolContent) {
receivedPostToolContent = true;
postToolResponse = ''; // Clear any previous post-tool response
console.log(`[${responseId}] First post-tool content chunk, starting fresh accumulation`);
}
// Accumulate post-tool execution content
postToolResponse += message.content;
console.log(`[${responseId}] Accumulated post-tool content, now ${postToolResponse.length} chars`);
// Update the UI with the accumulated post-tool content
// This replaces the pre-tool content with our accumulated post-tool content
onContentUpdate(postToolResponse, message.done || false);
} else {
// Standard content handling for non-tool cases or initial tool response
// Check if this is a duplicated message containing the same content we already have
if (message.done && assistantResponse.includes(message.content)) {
console.log(`[${responseId}] Ignoring duplicated content in done message`);
} else {
// Add to our accumulated response
assistantResponse += message.content;
}
// Update the UI immediately with each chunk
onContentUpdate(assistantResponse, message.done || false);
}
receivedAnyContent = true;
// Reset timeout since we got content
if (timeoutId !== null) {
window.clearTimeout(timeoutId);
}
// Set new timeout
timeoutId = window.setTimeout(() => {
console.warn(`[${responseId}] Stream timeout for chat note ${chatNoteId}`);
// Clean up
performCleanup();
reject(new Error('Stream timeout'));
}, 30000);
}
// Handle tool execution updates (legacy format and standard format with llm-stream type)
if (message.toolExecution) {
// Only process if we haven't already handled this message via specific message types
if (message.type === 'llm-stream' || !message.type) {
console.log(`[${responseId}] Received tool execution update: action=${message.toolExecution.action || 'unknown'}`);
toolsExecuted = true; // Mark that tools were executed
// Mark tool execution as completed if this is a result or error
if (message.toolExecution.action === 'result' ||
message.toolExecution.action === 'complete' ||
message.toolExecution.action === 'error') {
toolExecutionCompleted = true;
console.log(`[${responseId}] Tool execution completed via toolExecution message`);
}
onToolExecution(message.toolExecution);
}
}
// Handle tool calls from the raw data or direct in message (OpenAI format)
const toolCalls = message.tool_calls || (message.raw && message.raw.tool_calls);
if (toolCalls && Array.isArray(toolCalls)) {
console.log(`[${responseId}] Received tool calls: ${toolCalls.length} tools`);
toolsExecuted = true; // Mark that tools were executed
// First send a 'start' action to ensure the container is created
onToolExecution({
action: 'start',
tool: 'tools',
result: 'Tool execution initialized'
});
// Then process each tool call
for (const toolCall of toolCalls) {
let args = toolCall.function?.arguments || {};
// Try to parse arguments if they're a string
if (typeof args === 'string') {
try {
args = JSON.parse(args);
} catch (e) {
console.log(`[${responseId}] Could not parse tool arguments as JSON: ${e}`);
args = { raw: args };
}
}
onToolExecution({
action: 'executing',
tool: toolCall.function?.name || 'unknown',
toolCallId: toolCall.id,
args: args
});
}
}
// Handle thinking state updates
if (message.thinking) {
console.log(`[${responseId}] Received thinking update: ${message.thinking.substring(0, 50)}...`);
onThinkingUpdate(message.thinking);
}
// Handle completion
if (message.done) {
console.log(`[${responseId}] Stream completed for chat note ${chatNoteId}, has content: ${!!message.content}, content length: ${message.content?.length || 0}, current response: ${assistantResponse.length} chars`);
// Dump message content to console for debugging
if (message.content) {
console.log(`[${responseId}] CONTENT IN DONE MESSAGE (first 200 chars): "${message.content.substring(0, 200)}..."`);
// Check if the done message contains the exact same content as our accumulated response
// We normalize by removing whitespace to avoid false negatives due to spacing differences
const normalizedMessage = message.content.trim();
const normalizedResponse = assistantResponse.trim();
if (normalizedMessage === normalizedResponse) {
console.log(`[${responseId}] Final message is identical to accumulated response, no need to update`);
}
// If the done message is longer but contains our accumulated response, use the done message
else if (normalizedMessage.includes(normalizedResponse) && normalizedMessage.length > normalizedResponse.length) {
console.log(`[${responseId}] Final message is more complete than accumulated response, using it`);
assistantResponse = message.content;
}
// If the done message is different and not already included, append it to avoid duplication
else if (!normalizedResponse.includes(normalizedMessage) && normalizedMessage.length > 0) {
console.log(`[${responseId}] Final message has unique content, using it`);
assistantResponse = message.content;
}
// Otherwise, we already have the content accumulated, so no need to update
else {
console.log(`[${responseId}] Already have this content accumulated, not updating`);
}
}
// Clear timeout if set
if (timeoutId !== null) {
window.clearTimeout(timeoutId);
timeoutId = null;
}
// Always mark as done when we receive the done flag
onContentUpdate(assistantResponse, true);
// Set a longer delay before cleanup to allow for post-tool execution messages
// Especially important for Anthropic which may send final message after tool execution
const cleanupDelay = toolsExecuted ? 15000 : 1000; // 15 seconds if tools were used, otherwise 1 second
console.log(`[${responseId}] Setting cleanup delay of ${cleanupDelay}ms since toolsExecuted=${toolsExecuted}`);
scheduleCleanup(cleanupDelay);
}
};
// Register event listener for the custom event
try {
window.addEventListener('llm-stream-message', eventListener);
console.log(`[${responseId}] Event listener added for llm-stream-message events`);
} catch (err) {
console.error(`[${responseId}] Error setting up event listener:`, err);
reject(err);
return;
}
// Set initial timeout for receiving any message
initialTimeoutId = window.setTimeout(() => {
console.warn(`[${responseId}] No messages received for initial period in chat note ${chatNoteId}`);
if (!receivedAnyMessage) {
console.error(`[${responseId}] WebSocket connection not established for chat note ${chatNoteId}`);
if (timeoutId !== null) {
window.clearTimeout(timeoutId);
}
// Clean up
cleanupEventListener(eventListener);
// Show error message to user
reject(new Error('WebSocket connection not established'));
}
}, 10000);
});
}
/**
* Clean up an event listener
*/
function cleanupEventListener(listener: ((event: Event) => void) | null): void {
if (listener) {
try {
window.removeEventListener('llm-stream-message', listener);
console.log(`Successfully removed event listener`);
} catch (err) {
console.error(`Error removing event listener:`, err);
}
}
}
/**
* Get a direct response from the server without streaming
*/
export async function getDirectResponse(chatNoteId: string, messageParams: any): Promise<any> {
try {
// Validate that we have a proper note ID format, not a session ID
if (chatNoteId && chatNoteId.length === 16 && /^[A-Za-z0-9]+$/.test(chatNoteId)) {
console.error(`Invalid note ID format: ${chatNoteId} appears to be a legacy session ID`);
throw new Error("Invalid note ID format - using a legacy session ID");
}
const postResponse = await server.post<any>(`llm/chat/${chatNoteId}/messages`, {
message: messageParams.content,
includeContext: messageParams.useAdvancedContext,
options: {
temperature: 0.7,
maxTokens: 2000
}
});
return postResponse;
} catch (error) {
console.error('Error getting direct response:', error);
throw error;
}
}
/**
* Get embedding statistics
*/
export async function getEmbeddingStats(): Promise<any> {
return server.get('llm/embeddings/stats');
}

View File

@@ -1,6 +0,0 @@
/**
* LLM Chat Panel Widget Module
*/
import LlmChatPanel from './llm_chat_panel.js';
export default LlmChatPanel;

File diff suppressed because it is too large Load Diff

View File

@@ -1,59 +0,0 @@
/**
* Message processing functions for LLM Chat
*/
import type { ToolExecutionStep } from "./types.js";
/**
* Extract tool execution steps from the DOM that are within the chat flow
*/
export function extractInChatToolSteps(chatMessagesElement: HTMLElement): ToolExecutionStep[] {
const steps: ToolExecutionStep[] = [];
// Look for tool execution in the chat flow
const toolExecutionElement = chatMessagesElement.querySelector('.chat-tool-execution');
if (toolExecutionElement) {
// Find all tool step elements
const stepElements = toolExecutionElement.querySelectorAll('.tool-step');
stepElements.forEach(stepEl => {
const stepHtml = stepEl.innerHTML;
// Determine the step type based on icons or classes present
let type = 'info';
let name: string | undefined;
let content = '';
if (stepHtml.includes('bx-code-block')) {
type = 'executing';
content = 'Executing tools...';
} else if (stepHtml.includes('bx-terminal')) {
type = 'result';
// Extract the tool name from the step
const nameMatch = stepHtml.match(/<span[^>]*>Tool: ([^<]+)<\/span>/);
name = nameMatch ? nameMatch[1] : 'unknown';
// Extract the content from the div with class mt-1 ps-3
const contentEl = stepEl.querySelector('.mt-1.ps-3');
content = contentEl ? contentEl.innerHTML : '';
} else if (stepHtml.includes('bx-error-circle')) {
type = 'error';
const nameMatch = stepHtml.match(/<span[^>]*>Tool: ([^<]+)<\/span>/);
name = nameMatch ? nameMatch[1] : 'unknown';
const contentEl = stepEl.querySelector('.mt-1.ps-3.text-danger');
content = contentEl ? contentEl.innerHTML : '';
} else if (stepHtml.includes('bx-message-dots')) {
type = 'generating';
content = 'Generating response with tool results...';
} else if (stepHtml.includes('bx-loader-alt')) {
// Skip the initializing spinner
return;
}
steps.push({ type, name, content });
});
}
return steps;
}

View File

@@ -1,55 +0,0 @@
/**
* Types for LLM Chat Panel
*/
export interface ChatResponse {
id: string;
messages: Array<{role: string; content: string}>;
sources?: Array<{noteId: string; title: string}>;
}
export interface SessionResponse {
id: string;
title: string;
noteId?: string;
}
export interface ToolExecutionStep {
type: string;
name?: string;
content: string;
}
export interface MessageData {
role: string;
content: string;
timestamp?: Date;
}
export interface ChatData {
messages: MessageData[];
chatNoteId: string | null;
noteId?: string | null;
toolSteps: ToolExecutionStep[];
sources?: Array<{
noteId: string;
title: string;
similarity?: number;
content?: string;
}>;
metadata?: {
model?: string;
provider?: string;
temperature?: number;
maxTokens?: number;
lastUpdated?: string;
toolExecutions?: Array<{
id: string;
name: string;
arguments: any;
result: any;
error?: string;
timestamp: string;
}>;
};
}

View File

@@ -1,251 +0,0 @@
/**
* UI-related functions for LLM Chat
*/
import { t } from "../../services/i18n.js";
import type { ToolExecutionStep } from "./types.js";
import { formatMarkdown, applyHighlighting } from "./utils.js";
// Template for the chat widget
export const TPL = `
<div class="note-context-chat h-100 w-100 d-flex flex-column">
<!-- Move validation warning outside the card with better styling -->
<div class="provider-validation-warning alert alert-warning m-2 border-left border-warning" style="display: none; padding-left: 15px; border-left: 4px solid #ffc107; background-color: rgba(255, 248, 230, 0.9); font-size: 0.9rem; box-shadow: 0 2px 5px rgba(0,0,0,0.05);"></div>
<div class="note-context-chat-container flex-grow-1 overflow-auto p-3">
<div class="note-context-chat-messages"></div>
<div class="loading-indicator" style="display: none;">
<div class="spinner-border spinner-border-sm text-primary" role="status">
<span class="visually-hidden">Loading...</span>
</div>
<span class="ms-2">${t('ai_llm.agent.processing')}</span>
</div>
</div>
<div class="sources-container p-2 border-top" style="display: none;">
<h6 class="m-0 p-1 d-flex align-items-center">
<i class="bx bx-link-alt me-1"></i> ${t('ai_llm.sources')}
<span class="badge bg-primary rounded-pill ms-2 sources-count"></span>
</h6>
<div class="sources-list mt-2"></div>
</div>
<form class="note-context-chat-form d-flex flex-column border-top p-2">
<div class="d-flex chat-input-container mb-2">
<textarea
class="form-control note-context-chat-input"
placeholder="${t('ai_llm.enter_message')}"
rows="2"
></textarea>
<button type="submit" class="btn btn-primary note-context-chat-send-button ms-2 d-flex align-items-center justify-content-center">
<i class="bx bx-send"></i>
</button>
</div>
<div class="d-flex align-items-center context-option-container mt-1 justify-content-end">
<small class="text-muted me-auto fst-italic">Options:</small>
<div class="form-check form-switch me-3 small">
<input class="form-check-input use-advanced-context-checkbox" type="checkbox" id="useEnhancedContext" checked>
<label class="form-check-label small" for="useEnhancedContext" title="${t('ai.enhanced_context_description')}">
${t('ai_llm.use_enhanced_context')}
<i class="bx bx-info-circle small text-muted"></i>
</label>
</div>
<div class="form-check form-switch small">
<input class="form-check-input show-thinking-checkbox" type="checkbox" id="showThinking">
<label class="form-check-label small" for="showThinking" title="${t('ai.show_thinking_description')}">
${t('ai_llm.show_thinking')}
<i class="bx bx-info-circle small text-muted"></i>
</label>
</div>
</div>
</form>
</div>
`;
/**
* Add a message to the chat UI
*/
export function addMessageToChat(messagesContainer: HTMLElement, chatContainer: HTMLElement, role: 'user' | 'assistant', content: string) {
const messageElement = document.createElement('div');
messageElement.className = `chat-message ${role}-message mb-3 d-flex`;
const avatarElement = document.createElement('div');
avatarElement.className = 'message-avatar d-flex align-items-center justify-content-center me-2';
if (role === 'user') {
avatarElement.innerHTML = '<i class="bx bx-user"></i>';
avatarElement.classList.add('user-avatar');
} else {
avatarElement.innerHTML = '<i class="bx bx-bot"></i>';
avatarElement.classList.add('assistant-avatar');
}
const contentElement = document.createElement('div');
contentElement.className = 'message-content p-3 rounded flex-grow-1';
if (role === 'user') {
contentElement.classList.add('user-content', 'bg-light');
} else {
contentElement.classList.add('assistant-content');
}
// Format the content with markdown
contentElement.innerHTML = formatMarkdown(content);
messageElement.appendChild(avatarElement);
messageElement.appendChild(contentElement);
messagesContainer.appendChild(messageElement);
// Apply syntax highlighting to any code blocks in the message
applyHighlighting(contentElement);
// Scroll to bottom
chatContainer.scrollTop = chatContainer.scrollHeight;
}
/**
* Show sources in the UI
*/
export function showSources(
sourcesList: HTMLElement,
sourcesContainer: HTMLElement,
sourcesCount: HTMLElement,
sources: Array<{noteId: string, title: string}>,
onSourceClick: (noteId: string) => void
) {
sourcesList.innerHTML = '';
sourcesCount.textContent = sources.length.toString();
sources.forEach(source => {
const sourceElement = document.createElement('div');
sourceElement.className = 'source-item p-2 mb-1 border rounded d-flex align-items-center';
// Create the direct link to the note
sourceElement.innerHTML = `
<div class="d-flex align-items-center w-100">
<a href="#root/${source.noteId}"
data-note-id="${source.noteId}"
class="source-link text-truncate d-flex align-items-center"
title="Open note: ${source.title}">
<i class="bx bx-file-blank me-1"></i>
<span class="source-title">${source.title}</span>
</a>
</div>`;
// Add click handler
sourceElement.querySelector('.source-link')?.addEventListener('click', (e) => {
e.preventDefault();
e.stopPropagation();
onSourceClick(source.noteId);
return false;
});
sourcesList.appendChild(sourceElement);
});
sourcesContainer.style.display = 'block';
}
/**
* Hide sources in the UI
*/
export function hideSources(sourcesContainer: HTMLElement) {
sourcesContainer.style.display = 'none';
}
/**
* Show loading indicator
*/
export function showLoadingIndicator(loadingIndicator: HTMLElement) {
const logId = `ui-${Date.now()}`;
console.log(`[${logId}] Showing loading indicator`);
try {
loadingIndicator.style.display = 'flex';
const forceUpdate = loadingIndicator.offsetHeight;
console.log(`[${logId}] Loading indicator initialized`);
} catch (err) {
console.error(`[${logId}] Error showing loading indicator:`, err);
}
}
/**
* Hide loading indicator
*/
export function hideLoadingIndicator(loadingIndicator: HTMLElement) {
const logId = `ui-${Date.now()}`;
console.log(`[${logId}] Hiding loading indicator`);
try {
loadingIndicator.style.display = 'none';
const forceUpdate = loadingIndicator.offsetHeight;
console.log(`[${logId}] Loading indicator hidden`);
} catch (err) {
console.error(`[${logId}] Error hiding loading indicator:`, err);
}
}
/**
* Render tool steps as HTML for display in chat
*/
export function renderToolStepsHtml(steps: ToolExecutionStep[]): string {
if (!steps || steps.length === 0) return '';
let html = '';
steps.forEach(step => {
let icon, labelClass, content;
switch (step.type) {
case 'executing':
icon = 'bx-code-block text-primary';
labelClass = '';
content = `<div class="d-flex align-items-center">
<i class="bx ${icon} me-1"></i>
<span>${step.content}</span>
</div>`;
break;
case 'result':
icon = 'bx-terminal text-success';
labelClass = 'fw-bold';
content = `<div class="d-flex align-items-center">
<i class="bx ${icon} me-1"></i>
<span class="${labelClass}">Tool: ${step.name || 'unknown'}</span>
</div>
<div class="mt-1 ps-3">${step.content}</div>`;
break;
case 'error':
icon = 'bx-error-circle text-danger';
labelClass = 'fw-bold text-danger';
content = `<div class="d-flex align-items-center">
<i class="bx ${icon} me-1"></i>
<span class="${labelClass}">Tool: ${step.name || 'unknown'}</span>
</div>
<div class="mt-1 ps-3 text-danger">${step.content}</div>`;
break;
case 'generating':
icon = 'bx-message-dots text-info';
labelClass = '';
content = `<div class="d-flex align-items-center">
<i class="bx ${icon} me-1"></i>
<span>${step.content}</span>
</div>`;
break;
default:
icon = 'bx-info-circle text-muted';
labelClass = '';
content = `<div class="d-flex align-items-center">
<i class="bx ${icon} me-1"></i>
<span>${step.content}</span>
</div>`;
}
html += `<div class="tool-step my-1">${content}</div>`;
});
return html;
}

View File

@@ -1,93 +0,0 @@
/**
* Utility functions for LLM Chat
*/
import { marked } from "marked";
import { applySyntaxHighlight } from "../../services/syntax_highlight.js";
/**
* Format markdown content for display
*/
export function formatMarkdown(content: string): string {
if (!content) return '';
// First, extract HTML thinking visualization to protect it from replacements
const thinkingBlocks: string[] = [];
let processedContent = content.replace(/<div class=['"](thinking-process|reasoning-process)['"][\s\S]*?<\/div>/g, (match) => {
const placeholder = `__THINKING_BLOCK_${thinkingBlocks.length}__`;
thinkingBlocks.push(match);
return placeholder;
});
// Use marked library to parse the markdown
const markedContent = marked(processedContent, {
breaks: true, // Convert line breaks to <br>
gfm: true, // Enable GitHub Flavored Markdown
silent: true // Ignore errors
});
// Handle potential promise (though it shouldn't be with our options)
if (typeof markedContent === 'string') {
processedContent = markedContent;
} else {
console.warn('Marked returned a promise unexpectedly');
// Use the original content as fallback
processedContent = content;
}
// Restore thinking visualization blocks
thinkingBlocks.forEach((block, index) => {
processedContent = processedContent.replace(`__THINKING_BLOCK_${index}__`, block);
});
return processedContent;
}
/**
* Simple HTML escaping for safer content display
*/
export function escapeHtml(text: string): string {
if (typeof text !== 'string') {
text = String(text || '');
}
return text
.replace(/&/g, '&amp;')
.replace(/</g, '&lt;')
.replace(/>/g, '&gt;')
.replace(/"/g, '&quot;')
.replace(/'/g, '&#039;');
}
/**
* Apply syntax highlighting to content
*/
export function applyHighlighting(element: HTMLElement): void {
applySyntaxHighlight($(element));
}
/**
* Format tool arguments for display
*/
export function formatToolArgs(args: any): string {
if (!args || typeof args !== 'object') return '';
return Object.entries(args)
.map(([key, value]) => {
// Format the value based on its type
let displayValue;
if (typeof value === 'string') {
displayValue = value.length > 50 ? `"${value.substring(0, 47)}..."` : `"${value}"`;
} else if (value === null) {
displayValue = 'null';
} else if (Array.isArray(value)) {
displayValue = '[...]'; // Simplified array representation
} else if (typeof value === 'object') {
displayValue = '{...}'; // Simplified object representation
} else {
displayValue = String(value);
}
return `<span class="text-primary">${escapeHtml(key)}</span>: ${escapeHtml(displayValue)}`;
})
.join(', ');
}

View File

@@ -1,104 +0,0 @@
/**
* Validation functions for LLM Chat
*/
import options from "../../services/options.js";
import { getEmbeddingStats } from "./communication.js";
/**
* Validate embedding providers configuration
*/
export async function validateEmbeddingProviders(validationWarning: HTMLElement): Promise<void> {
try {
// Check if AI is enabled
const aiEnabled = options.is('aiEnabled');
if (!aiEnabled) {
validationWarning.style.display = 'none';
return;
}
// Get provider precedence
const precedenceStr = options.get('aiProviderPrecedence') || 'openai,anthropic,ollama';
let precedenceList: string[] = [];
if (precedenceStr) {
if (precedenceStr.startsWith('[') && precedenceStr.endsWith(']')) {
precedenceList = JSON.parse(precedenceStr);
} else if (precedenceStr.includes(',')) {
precedenceList = precedenceStr.split(',').map(p => p.trim());
} else {
precedenceList = [precedenceStr];
}
}
// Get enabled providers - this is a simplification since we don't have direct DB access
// We'll determine enabled status based on the presence of keys or settings
const enabledProviders: string[] = [];
// OpenAI is enabled if API key is set
const openaiKey = options.get('openaiApiKey');
if (openaiKey) {
enabledProviders.push('openai');
}
// Anthropic is enabled if API key is set
const anthropicKey = options.get('anthropicApiKey');
if (anthropicKey) {
enabledProviders.push('anthropic');
}
// Ollama is enabled if base URL is set
const ollamaBaseUrl = options.get('ollamaBaseUrl');
if (ollamaBaseUrl) {
enabledProviders.push('ollama');
}
// Local is always available
enabledProviders.push('local');
// Perform validation checks
const allPrecedenceEnabled = precedenceList.every((p: string) => enabledProviders.includes(p));
// Get embedding queue status
const embeddingStats = await getEmbeddingStats() as {
success: boolean,
stats: {
totalNotesCount: number;
embeddedNotesCount: number;
queuedNotesCount: number;
failedNotesCount: number;
lastProcessedDate: string | null;
percentComplete: number;
}
};
const queuedNotes = embeddingStats?.stats?.queuedNotesCount || 0;
const hasEmbeddingsInQueue = queuedNotes > 0;
// Show warning if there are issues
if (!allPrecedenceEnabled || hasEmbeddingsInQueue) {
let message = '<i class="bx bx-error-circle me-2"></i><strong>AI Provider Configuration Issues</strong>';
message += '<ul class="mb-1 ps-4">';
if (!allPrecedenceEnabled) {
const disabledProviders = precedenceList.filter((p: string) => !enabledProviders.includes(p));
message += `<li>The following providers in your precedence list are not enabled: ${disabledProviders.join(', ')}.</li>`;
}
if (hasEmbeddingsInQueue) {
message += `<li>Currently processing embeddings for ${queuedNotes} notes. Some AI features may produce incomplete results until processing completes.</li>`;
}
message += '</ul>';
message += '<div class="mt-2"><a href="javascript:" class="settings-link btn btn-sm btn-outline-secondary"><i class="bx bx-cog me-1"></i>Open AI Settings</a></div>';
// Update HTML content
validationWarning.innerHTML = message;
validationWarning.style.display = 'block';
} else {
validationWarning.style.display = 'none';
}
} catch (error) {
console.error('Error validating embedding providers:', error);
validationWarning.style.display = 'none';
}
}