feat(llm): remove everything to do with embeddings

This commit is contained in:
perf3ct
2025-06-07 18:11:12 +00:00
parent 70cdc100d9
commit 44a45780b7
71 changed files with 543 additions and 10165 deletions

View File

@@ -23,4 +23,4 @@ export interface EntityChange {
instanceId?: string | null;
}
export type EntityType = "notes" | "branches" | "attributes" | "note_reordering" | "revisions" | "options" | "attachments" | "blobs" | "etapi_tokens" | "note_embeddings";
export type EntityType = "notes" | "branches" | "attributes" | "note_reordering" | "revisions" | "options" | "attachments" | "blobs" | "etapi_tokens";

View File

@@ -35,7 +35,7 @@ async function processEntityChanges(entityChanges: EntityChange[]) {
loadResults.addOption(attributeEntity.name);
} else if (ec.entityName === "attachments") {
processAttachment(loadResults, ec);
} else if (ec.entityName === "blobs" || ec.entityName === "etapi_tokens" || ec.entityName === "note_embeddings") {
} else if (ec.entityName === "blobs" || ec.entityName === "etapi_tokens") {
// NOOP - these entities are handled at the backend level and don't require frontend processing
} else {
throw new Error(`Unknown entityName '${ec.entityName}'`);

View File

@@ -64,7 +64,6 @@ type EntityRowMappings = {
options: OptionRow;
revisions: RevisionRow;
note_reordering: NoteReorderingRow;
note_embeddings: NoteEmbeddingRow;
};
export type EntityRowNames = keyof EntityRowMappings;

View File

@@ -1124,10 +1124,8 @@
"layout-horizontal-description": "launcher bar is underneath the tab bar, the tab bar is now full width."
},
"ai_llm": {
"embeddings_configuration": "Embeddings Configuration",
"not_started": "Not started",
"title": "AI & Embedding Settings",
"embedding_statistics": "Embedding Statistics",
"title": "AI Settings",
"processed_notes": "Processed Notes",
"total_notes": "Total Notes",
"progress": "Progress",
@@ -1135,7 +1133,6 @@
"failed_notes": "Failed Notes",
"last_processed": "Last Processed",
"refresh_stats": "Refresh Statistics",
"no_failed_embeddings": "No failed embeddings found.",
"enable_ai_features": "Enable AI/LLM features",
"enable_ai_description": "Enable AI features like note summarization, content generation, and other LLM capabilities",
"openai_tab": "OpenAI",
@@ -1160,20 +1157,16 @@
"anthropic_api_key_description": "Your Anthropic API key for accessing Claude models",
"default_model": "Default Model",
"openai_model_description": "Examples: gpt-4o, gpt-4-turbo, gpt-3.5-turbo",
"embedding_model": "Embedding Model",
"openai_embedding_model_description": "Model used for generating embeddings (text-embedding-3-small recommended)",
"base_url": "Base URL",
"openai_url_description": "Default: https://api.openai.com/v1",
"anthropic_settings": "Anthropic Settings",
"anthropic_url_description": "Base URL for the Anthropic API (default: https://api.anthropic.com)",
"anthropic_model_description": "Anthropic Claude models for chat completion",
"voyage_settings": "Voyage AI Settings",
"voyage_api_key_description": "Your Voyage AI API key for accessing embeddings services",
"ollama_settings": "Ollama Settings",
"ollama_url_description": "URL for the Ollama API (default: http://localhost:11434)",
"ollama_model_description": "Ollama model to use for chat completion",
"anthropic_configuration": "Anthropic Configuration",
"voyage_embedding_model_description": "Voyage AI embedding models for text embeddings (voyage-2 recommended)",
"voyage_configuration": "Voyage AI Configuration",
"voyage_url_description": "Default: https://api.voyageai.com/v1",
"ollama_configuration": "Ollama Configuration",
@@ -1181,28 +1174,10 @@
"enable_ollama_description": "Enable Ollama for local AI model usage",
"ollama_url": "Ollama URL",
"ollama_model": "Ollama Model",
"ollama_embedding_model": "Embedding Model",
"ollama_embedding_model_description": "Specialized model for generating embeddings (vector representations)",
"refresh_models": "Refresh Models",
"refreshing_models": "Refreshing...",
"embedding_configuration": "Embeddings Configuration",
"embedding_default_provider": "Default Provider",
"embedding_default_provider_description": "Select the default provider used for generating note embeddings",
"embedding_provider_precedence": "Embedding Provider Precedence",
"embedding_providers_order": "Embedding Provider Order",
"embedding_providers_order_description": "Set the order of embedding providers in comma-separated format (e.g., \"openai,voyage,ollama,local\")",
"enable_automatic_indexing": "Enable Automatic Indexing",
"enable_automatic_indexing_description": "Automatically generate embeddings for new and updated notes",
"embedding_auto_update_enabled": "Auto-update Embeddings",
"embedding_auto_update_enabled_description": "Automatically update embeddings when notes are modified",
"recreate_embeddings": "Recreate All Embeddings",
"recreate_embeddings_description": "Regenerate all note embeddings from scratch (may take a long time for large note collections)",
"recreate_embeddings_started": "Embeddings regeneration started. This may take a long time for large note collections.",
"recreate_embeddings_error": "Error starting embeddings regeneration. Check logs for details.",
"recreate_embeddings_confirm": "Are you sure you want to recreate all embeddings? This may take a long time for large note collections.",
"rebuild_index": "Rebuild Index",
"rebuild_index_description": "Rebuild the vector search index for better performance (much faster than recreating embeddings)",
"rebuild_index_started": "Embedding index rebuild started. This may take several minutes.",
"rebuild_index_error": "Error starting index rebuild. Check logs for details.",
"note_title": "Note Title",
"error": "Error",
@@ -1212,43 +1187,16 @@
"partial": "{{ percentage }}% completed",
"retry_queued": "Note queued for retry",
"retry_failed": "Failed to queue note for retry",
"embedding_provider_precedence_description": "Comma-separated list of providers in order of precedence for embeddings search (e.g., 'openai,ollama,anthropic')",
"embedding_dimension_strategy": "Embedding Dimension Strategy",
"embedding_dimension_auto": "Auto (Recommended)",
"embedding_dimension_fixed": "Fixed",
"embedding_similarity_threshold": "Similarity Threshold",
"embedding_similarity_threshold_description": "Minimum similarity score for notes to be included in search results (0-1)",
"max_notes_per_llm_query": "Max Notes Per Query",
"max_notes_per_llm_query_description": "Maximum number of similar notes to include in AI context",
"embedding_dimension_strategy_description": "Choose how embeddings are handled. 'Native' preserves maximum information by adapting smaller vectors to match larger ones (recommended). 'Regenerate' creates new embeddings with the target model for specific search needs.",
"drag_providers_to_reorder": "Drag providers up or down to set your preferred order for embedding searches",
"active_providers": "Active Providers",
"disabled_providers": "Disabled Providers",
"remove_provider": "Remove provider from search",
"restore_provider": "Restore provider to search",
"embedding_generation_location": "Generation Location",
"embedding_generation_location_description": "Select where embedding generation should happen",
"embedding_generation_location_client": "Client/Server",
"embedding_generation_location_sync_server": "Sync Server",
"enable_auto_update_embeddings": "Auto-update Embeddings",
"enable_auto_update_embeddings_description": "Automatically update embeddings when notes are modified",
"auto_update_embeddings": "Auto-update Embeddings",
"auto_update_embeddings_desc": "Automatically update embeddings when notes are modified",
"similarity_threshold": "Similarity Threshold",
"similarity_threshold_description": "Minimum similarity score (0-1) for notes to be included in context for LLM queries",
"embedding_batch_size": "Batch Size",
"embedding_batch_size_description": "Number of notes to process in a single batch (1-50)",
"embedding_update_interval": "Update Interval (ms)",
"embedding_update_interval_description": "Time between processing batches of embeddings (in milliseconds)",
"embedding_default_dimension": "Default Dimension",
"embedding_default_dimension_description": "Default embedding vector dimension when creating new embeddings",
"reprocess_all_embeddings": "Reprocess All Embeddings",
"reprocess_all_embeddings_description": "Queue all notes for embedding processing. This may take some time depending on your number of notes.",
"reprocessing_embeddings": "Reprocessing...",
"reprocess_started": "Embedding reprocessing started in the background",
"reprocess_error": "Error starting embedding reprocessing",
"reprocess_index": "Rebuild Search Index",
"reprocess_index_description": "Optimize the search index for better performance. This uses existing embeddings without regenerating them (much faster than reprocessing all embeddings).",
"reprocessing_index": "Rebuilding...",
"reprocess_index_started": "Search index optimization started in the background",
"reprocess_index_error": "Error rebuilding search index",
@@ -1261,7 +1209,6 @@
"incomplete": "Incomplete ({{percentage}}%)",
"complete": "Complete (100%)",
"refreshing": "Refreshing...",
"stats_error": "Error fetching embedding statistics",
"auto_refresh_notice": "Auto-refreshes every {{seconds}} seconds",
"note_queued_for_retry": "Note queued for retry",
"failed_to_retry_note": "Failed to retry note",
@@ -1269,7 +1216,6 @@
"failed_to_retry_all": "Failed to retry notes",
"ai_settings": "AI Settings",
"api_key_tooltip": "API key for accessing the service",
"confirm_delete_embeddings": "Are you sure you want to delete all AI embeddings? This will remove all semantic search capabilities until notes are reindexed, which can take a significant amount of time.",
"empty_key_warning": {
"anthropic": "Anthropic API key is empty. Please enter a valid API key.",
"openai": "OpenAI API key is empty. Please enter a valid API key.",
@@ -1302,7 +1248,6 @@
"note_chat": "Note Chat",
"notes_indexed": "{{ count }} note indexed",
"notes_indexed_plural": "{{ count }} notes indexed",
"reset_embeddings": "Reset Embeddings",
"sources": "Sources",
"start_indexing": "Start Indexing",
"use_advanced_context": "Use Advanced Context",
@@ -1315,24 +1260,10 @@
},
"create_new_ai_chat": "Create new AI Chat",
"configuration_warnings": "There are some issues with your AI configuration. Please check your settings.",
"embeddings_started": "Embedding generation started",
"embeddings_stopped": "Embedding generation stopped",
"embeddings_toggle_error": "Error toggling embeddings",
"local_embedding_description": "Uses local embedding models for offline text embedding generation",
"local_embedding_settings": "Local Embedding Settings",
"ollama_embedding_settings": "Ollama Embedding Settings",
"ollama_embedding_url_description": "URL for the Ollama API for embedding generation (default: http://localhost:11434)",
"openai_embedding_api_key_description": "Your OpenAI API key for embedding generation (can be different from chat API key)",
"openai_embedding_settings": "OpenAI Embedding Settings",
"openai_embedding_url_description": "Base URL for OpenAI embedding API (default: https://api.openai.com/v1)",
"selected_embedding_provider": "Selected Embedding Provider",
"selected_embedding_provider_description": "Choose the provider for generating note embeddings",
"selected_provider": "Selected Provider",
"selected_provider_description": "Choose the AI provider for chat and completion features",
"select_embedding_provider": "Select embedding provider...",
"select_model": "Select model...",
"select_provider": "Select provider...",
"voyage_embedding_url_description": "Base URL for the Voyage AI embedding API (default: https://api.voyageai.com/v1)"
"select_provider": "Select provider..."
},
"zoom_factor": {
"title": "Zoom Factor (desktop build only)",

View File

@@ -258,9 +258,3 @@ export async function getDirectResponse(noteId: string, messageParams: any): Pro
}
}
/**
* Get embedding statistics
*/
export async function getEmbeddingStats(): Promise<any> {
return server.get('llm/embeddings/stats');
}

View File

@@ -2,12 +2,11 @@
* Validation functions for LLM Chat
*/
import options from "../../services/options.js";
import { getEmbeddingStats } from "./communication.js";
/**
* Validate embedding providers configuration
* Validate providers configuration
*/
export async function validateEmbeddingProviders(validationWarning: HTMLElement): Promise<void> {
export async function validateProviders(validationWarning: HTMLElement): Promise<void> {
try {
// Check if AI is enabled
const aiEnabled = options.is('aiEnabled');
@@ -62,23 +61,8 @@ export async function validateEmbeddingProviders(validationWarning: HTMLElement)
// Add checks for other providers as needed
}
// Fetch embedding stats to check if there are any notes being processed
const embeddingStats = await getEmbeddingStats() as {
success: boolean,
stats: {
totalNotesCount: number;
embeddedNotesCount: number;
queuedNotesCount: number;
failedNotesCount: number;
lastProcessedDate: string | null;
percentComplete: number;
}
};
const queuedNotes = embeddingStats?.stats?.queuedNotesCount || 0;
const hasEmbeddingsInQueue = queuedNotes > 0;
// Show warning if there are configuration issues or embeddings in queue
if (configIssues.length > 0 || hasEmbeddingsInQueue) {
// Show warning if there are configuration issues
if (configIssues.length > 0) {
let message = '<i class="bx bx-error-circle me-2"></i><strong>AI Provider Configuration Issues</strong>';
message += '<ul class="mb-1 ps-4">';
@@ -87,11 +71,6 @@ export async function validateEmbeddingProviders(validationWarning: HTMLElement)
for (const issue of configIssues) {
message += `<li>${issue}</li>`;
}
// Show warning about embeddings queue if applicable
if (hasEmbeddingsInQueue) {
message += `<li>Currently processing embeddings for ${queuedNotes} notes. Some AI features may produce incomplete results until processing completes.</li>`;
}
message += '</ul>';
message += '<div class="mt-2"><a href="javascript:" class="settings-link btn btn-sm btn-outline-secondary"><i class="bx bx-cog me-1"></i>Open AI Settings</a></div>';
@@ -103,7 +82,7 @@ export async function validateEmbeddingProviders(validationWarning: HTMLElement)
validationWarning.style.display = 'none';
}
} catch (error) {
console.error('Error validating embedding providers:', error);
console.error('Error validating providers:', error);
validationWarning.style.display = 'none';
}
}

View File

@@ -4,16 +4,12 @@ import { t } from "../../../../services/i18n.js";
import type { OptionDefinitions, OptionMap } from "@triliumnext/commons";
import server from "../../../../services/server.js";
import toastService from "../../../../services/toast.js";
import type { EmbeddingStats, FailedEmbeddingNotes } from "./interfaces.js";
import { ProviderService } from "./providers.js";
export default class AiSettingsWidget extends OptionsWidget {
private ollamaModelsRefreshed = false;
private openaiModelsRefreshed = false;
private anthropicModelsRefreshed = false;
private statsRefreshInterval: NodeJS.Timeout | null = null;
private indexRebuildRefreshInterval: NodeJS.Timeout | null = null;
private readonly STATS_REFRESH_INTERVAL = 5000; // 5 seconds
private providerService: ProviderService | null = null;
doRender() {
@@ -23,9 +19,6 @@ export default class AiSettingsWidget extends OptionsWidget {
// Setup event handlers for options
this.setupEventHandlers();
this.refreshEmbeddingStats();
this.fetchFailedEmbeddingNotes();
return this.$widget;
}
@@ -57,26 +50,13 @@ export default class AiSettingsWidget extends OptionsWidget {
const isEnabled = value === 'true';
if (isEnabled) {
// Start embedding generation
await server.post('llm/embeddings/start');
toastService.showMessage(t("ai_llm.embeddings_started") || "Embedding generation started");
// Start polling for stats updates
this.refreshEmbeddingStats();
toastService.showMessage(t("ai_llm.ai_enabled") || "AI features enabled");
} else {
// Stop embedding generation
await server.post('llm/embeddings/stop');
toastService.showMessage(t("ai_llm.embeddings_stopped") || "Embedding generation stopped");
// Clear any active polling intervals
if (this.indexRebuildRefreshInterval) {
clearInterval(this.indexRebuildRefreshInterval);
this.indexRebuildRefreshInterval = null;
}
toastService.showMessage(t("ai_llm.ai_disabled") || "AI features disabled");
}
} catch (error) {
console.error('Error toggling embeddings:', error);
toastService.showError(t("ai_llm.embeddings_toggle_error") || "Error toggling embeddings");
console.error('Error toggling AI:', error);
toastService.showError(t("ai_llm.ai_toggle_error") || "Error toggling AI features");
}
}
@@ -102,7 +82,6 @@ export default class AiSettingsWidget extends OptionsWidget {
this.setupChangeHandler('.openai-api-key', 'openaiApiKey', true);
this.setupChangeHandler('.openai-base-url', 'openaiBaseUrl', true);
this.setupChangeHandler('.openai-default-model', 'openaiDefaultModel');
this.setupChangeHandler('.openai-embedding-model', 'openaiEmbeddingModel');
// Anthropic options
this.setupChangeHandler('.anthropic-api-key', 'anthropicApiKey', true);
@@ -111,18 +90,10 @@ export default class AiSettingsWidget extends OptionsWidget {
// Voyage options
this.setupChangeHandler('.voyage-api-key', 'voyageApiKey');
this.setupChangeHandler('.voyage-embedding-model', 'voyageEmbeddingModel');
this.setupChangeHandler('.voyage-embedding-base-url', 'voyageEmbeddingBaseUrl');
// Ollama options
this.setupChangeHandler('.ollama-base-url', 'ollamaBaseUrl');
this.setupChangeHandler('.ollama-default-model', 'ollamaDefaultModel');
this.setupChangeHandler('.ollama-embedding-model', 'ollamaEmbeddingModel');
this.setupChangeHandler('.ollama-embedding-base-url', 'ollamaEmbeddingBaseUrl');
// Embedding-specific provider options
this.setupChangeHandler('.openai-embedding-api-key', 'openaiEmbeddingApiKey', true);
this.setupChangeHandler('.openai-embedding-base-url', 'openaiEmbeddingBaseUrl', true);
const $refreshModels = this.$widget.find('.refresh-models');
$refreshModels.on('click', async () => {
@@ -162,15 +133,6 @@ export default class AiSettingsWidget extends OptionsWidget {
this.anthropicModelsRefreshed = await this.providerService?.refreshAnthropicModels(false, this.anthropicModelsRefreshed) || false;
});
// Embedding options event handlers
this.setupChangeHandler('.embedding-auto-update-enabled', 'embeddingAutoUpdateEnabled', false, true);
this.setupChangeHandler('.enable-automatic-indexing', 'enableAutomaticIndexing', false, true);
this.setupChangeHandler('.embedding-similarity-threshold', 'embeddingSimilarityThreshold');
this.setupChangeHandler('.max-notes-per-llm-query', 'maxNotesPerLlmQuery');
this.setupChangeHandler('.embedding-selected-provider', 'embeddingSelectedProvider', true);
this.setupChangeHandler('.embedding-dimension-strategy', 'embeddingDimensionStrategy');
this.setupChangeHandler('.embedding-batch-size', 'embeddingBatchSize');
this.setupChangeHandler('.embedding-update-interval', 'embeddingUpdateInterval');
// Add provider selection change handlers for dynamic settings visibility
this.$widget.find('.ai-selected-provider').on('change', async () => {
@@ -183,26 +145,13 @@ export default class AiSettingsWidget extends OptionsWidget {
}
});
this.$widget.find('.embedding-selected-provider').on('change', async () => {
const selectedProvider = this.$widget.find('.embedding-selected-provider').val() as string;
this.$widget.find('.embedding-provider-settings').hide();
if (selectedProvider) {
this.$widget.find(`.${selectedProvider}-embedding-provider-settings`).show();
// Automatically fetch embedding models for the newly selected provider
await this.fetchModelsForProvider(selectedProvider, 'embedding');
}
});
// Add base URL change handlers to trigger model fetching
this.$widget.find('.openai-base-url').on('change', async () => {
const selectedProvider = this.$widget.find('.ai-selected-provider').val() as string;
const selectedEmbeddingProvider = this.$widget.find('.embedding-selected-provider').val() as string;
if (selectedProvider === 'openai') {
await this.fetchModelsForProvider('openai', 'chat');
}
if (selectedEmbeddingProvider === 'openai') {
await this.fetchModelsForProvider('openai', 'embedding');
}
});
this.$widget.find('.anthropic-base-url').on('change', async () => {
@@ -214,25 +163,17 @@ export default class AiSettingsWidget extends OptionsWidget {
this.$widget.find('.ollama-base-url').on('change', async () => {
const selectedProvider = this.$widget.find('.ai-selected-provider').val() as string;
const selectedEmbeddingProvider = this.$widget.find('.embedding-selected-provider').val() as string;
if (selectedProvider === 'ollama') {
await this.fetchModelsForProvider('ollama', 'chat');
}
if (selectedEmbeddingProvider === 'ollama') {
await this.fetchModelsForProvider('ollama', 'embedding');
}
});
// Add API key change handlers to trigger model fetching
this.$widget.find('.openai-api-key').on('change', async () => {
const selectedProvider = this.$widget.find('.ai-selected-provider').val() as string;
const selectedEmbeddingProvider = this.$widget.find('.embedding-selected-provider').val() as string;
if (selectedProvider === 'openai') {
await this.fetchModelsForProvider('openai', 'chat');
}
if (selectedEmbeddingProvider === 'openai') {
await this.fetchModelsForProvider('openai', 'embedding');
}
});
this.$widget.find('.anthropic-api-key').on('change', async () => {
@@ -242,85 +183,6 @@ export default class AiSettingsWidget extends OptionsWidget {
}
});
this.$widget.find('.voyage-api-key').on('change', async () => {
const selectedEmbeddingProvider = this.$widget.find('.embedding-selected-provider').val() as string;
if (selectedEmbeddingProvider === 'voyage') {
// Voyage doesn't have dynamic model fetching yet, but we can add it here when implemented
console.log('Voyage API key changed - model fetching not yet implemented');
}
});
// Add embedding base URL change handlers to trigger model fetching
this.$widget.find('.openai-embedding-base-url').on('change', async () => {
const selectedEmbeddingProvider = this.$widget.find('.embedding-selected-provider').val() as string;
if (selectedEmbeddingProvider === 'openai') {
await this.fetchModelsForProvider('openai', 'embedding');
}
});
this.$widget.find('.voyage-embedding-base-url').on('change', async () => {
const selectedEmbeddingProvider = this.$widget.find('.embedding-selected-provider').val() as string;
if (selectedEmbeddingProvider === 'voyage') {
// Voyage doesn't have dynamic model fetching yet, but we can add it here when implemented
console.log('Voyage embedding base URL changed - model fetching not yet implemented');
}
});
this.$widget.find('.ollama-embedding-base-url').on('change', async () => {
const selectedEmbeddingProvider = this.$widget.find('.embedding-selected-provider').val() as string;
if (selectedEmbeddingProvider === 'ollama') {
await this.fetchModelsForProvider('ollama', 'embedding');
}
});
// Add embedding API key change handlers to trigger model fetching
this.$widget.find('.openai-embedding-api-key').on('change', async () => {
const selectedEmbeddingProvider = this.$widget.find('.embedding-selected-provider').val() as string;
if (selectedEmbeddingProvider === 'openai') {
await this.fetchModelsForProvider('openai', 'embedding');
}
});
// No sortable behavior needed anymore
// Embedding stats refresh button
const $refreshStats = this.$widget.find('.embedding-refresh-stats');
$refreshStats.on('click', async () => {
await this.refreshEmbeddingStats();
await this.fetchFailedEmbeddingNotes();
});
// Recreate embeddings button
const $recreateEmbeddings = this.$widget.find('.recreate-embeddings');
$recreateEmbeddings.on('click', async () => {
if (confirm(t("ai_llm.recreate_embeddings_confirm") || "Are you sure you want to recreate all embeddings? This may take a long time.")) {
try {
await server.post('llm/embeddings/reprocess');
toastService.showMessage(t("ai_llm.recreate_embeddings_started"));
// Start progress polling
this.pollIndexRebuildProgress();
} catch (e) {
console.error('Error starting embeddings regeneration:', e);
toastService.showError(t("ai_llm.recreate_embeddings_error"));
}
}
});
// Rebuild index button
const $rebuildIndex = this.$widget.find('.rebuild-embeddings-index');
$rebuildIndex.on('click', async () => {
try {
await server.post('llm/embeddings/rebuild-index');
toastService.showMessage(t("ai_llm.rebuild_index_started"));
// Start progress polling
this.pollIndexRebuildProgress();
} catch (e) {
console.error('Error starting index rebuild:', e);
toastService.showError(t("ai_llm.rebuild_index_error"));
}
});
}
/**
@@ -360,30 +222,9 @@ export default class AiSettingsWidget extends OptionsWidget {
}
}
// Similar checks for embeddings
const embeddingWarnings: string[] = [];
const embeddingsEnabled = this.$widget.find('.enable-automatic-indexing').prop('checked');
if (embeddingsEnabled) {
const selectedEmbeddingProvider = this.$widget.find('.embedding-selected-provider').val() as string;
if (selectedEmbeddingProvider === 'openai' && !this.$widget.find('.openai-api-key').val()) {
embeddingWarnings.push(t("ai_llm.empty_key_warning.openai"));
}
if (selectedEmbeddingProvider === 'voyage' && !this.$widget.find('.voyage-api-key').val()) {
embeddingWarnings.push(t("ai_llm.empty_key_warning.voyage"));
}
if (selectedEmbeddingProvider === 'ollama' && !this.$widget.find('.ollama-embedding-base-url').val()) {
embeddingWarnings.push(t("ai_llm.empty_key_warning.ollama"));
}
}
// Combine all warnings
const allWarnings = [
...providerWarnings,
...embeddingWarnings
...providerWarnings
];
// Show or hide warnings
@@ -396,168 +237,6 @@ export default class AiSettingsWidget extends OptionsWidget {
}
}
/**
* Poll for index rebuild progress
*/
pollIndexRebuildProgress() {
if (this.indexRebuildRefreshInterval) {
clearInterval(this.indexRebuildRefreshInterval);
}
// Set up polling interval for index rebuild progress
this.indexRebuildRefreshInterval = setInterval(async () => {
await this.refreshEmbeddingStats();
}, this.STATS_REFRESH_INTERVAL);
// Stop polling after 5 minutes to avoid indefinite polling
setTimeout(() => {
if (this.indexRebuildRefreshInterval) {
clearInterval(this.indexRebuildRefreshInterval);
this.indexRebuildRefreshInterval = null;
}
}, 5 * 60 * 1000);
}
/**
* Refresh embedding statistics
*/
async refreshEmbeddingStats() {
if (!this.$widget) return;
try {
const response = await server.get<EmbeddingStats>('llm/embeddings/stats');
if (response && response.success) {
const stats = response.stats;
// Update stats display
this.$widget.find('.embedding-processed-notes').text(stats.embeddedNotesCount);
this.$widget.find('.embedding-total-notes').text(stats.totalNotesCount);
this.$widget.find('.embedding-queued-notes').text(stats.queuedNotesCount);
this.$widget.find('.embedding-failed-notes').text(stats.failedNotesCount);
if (stats.lastProcessedDate) {
const date = new Date(stats.lastProcessedDate);
this.$widget.find('.embedding-last-processed').text(date.toLocaleString());
} else {
this.$widget.find('.embedding-last-processed').text('-');
}
// Update progress bar
const $progressBar = this.$widget.find('.embedding-progress');
const progressPercent = stats.percentComplete;
$progressBar.css('width', `${progressPercent}%`);
$progressBar.attr('aria-valuenow', progressPercent.toString());
$progressBar.text(`${progressPercent}%`);
// Update status text
let statusText;
if (stats.queuedNotesCount > 0) {
statusText = t("ai_llm.agent.processing", { percentage: progressPercent });
} else if (stats.embeddedNotesCount === 0) {
statusText = t("ai_llm.not_started");
} else if (stats.embeddedNotesCount === stats.totalNotesCount) {
statusText = t("ai_llm.complete");
// Clear polling interval if processing is complete
if (this.indexRebuildRefreshInterval) {
clearInterval(this.indexRebuildRefreshInterval);
this.indexRebuildRefreshInterval = null;
}
} else {
statusText = t("ai_llm.partial", { percentage: progressPercent });
}
this.$widget.find('.embedding-status-text').text(statusText);
}
} catch (e) {
console.error('Error fetching embedding stats:', e);
}
}
/**
* Fetch failed embedding notes
*/
async fetchFailedEmbeddingNotes() {
if (!this.$widget) return;
try {
const response = await server.get<FailedEmbeddingNotes>('llm/embeddings/failed');
if (response && response.success) {
const failedNotes = response.failedNotes || [];
const $failedNotesList = this.$widget.find('.embedding-failed-notes-list');
if (failedNotes.length === 0) {
$failedNotesList.html(`<div class="alert alert-info">${t("ai_llm.no_failed_embeddings")}</div>`);
return;
}
// Create a table with failed notes
let html = `
<table class="table table-sm table-striped">
<thead>
<tr>
<th>${t("ai_llm.note_title")}</th>
<th>${t("ai_llm.error")}</th>
<th>${t("ai_llm.last_attempt")}</th>
<th>${t("ai_llm.actions")}</th>
</tr>
</thead>
<tbody>
`;
for (const note of failedNotes) {
const date = new Date(note.lastAttempt);
const isPermanent = note.isPermanent;
const noteTitle = note.title || note.noteId;
html += `
<tr data-note-id="${note.noteId}">
<td><a href="#" class="open-note">${noteTitle}</a></td>
<td>${note.error}</td>
<td>${date.toLocaleString()}</td>
<td>
<button class="btn btn-sm btn-outline-secondary retry-embedding" ${isPermanent ? 'disabled' : ''}>
${t("ai_llm.retry")}
</button>
</td>
</tr>
`;
}
html += `
</tbody>
</table>
`;
$failedNotesList.html(html);
// Add event handlers for retry buttons
$failedNotesList.find('.retry-embedding').on('click', async function() {
const noteId = $(this).closest('tr').data('note-id');
try {
await server.post('llm/embeddings/retry', { noteId });
toastService.showMessage(t("ai_llm.retry_queued"));
// Remove this row or update status
$(this).closest('tr').remove();
} catch (e) {
console.error('Error retrying embedding:', e);
toastService.showError(t("ai_llm.retry_failed"));
}
});
// Add event handlers for open note links
$failedNotesList.find('.open-note').on('click', function(e) {
e.preventDefault();
const noteId = $(this).closest('tr').data('note-id');
window.open(`#${noteId}`, '_blank');
});
}
} catch (e) {
console.error('Error fetching failed embedding notes:', e);
}
}
/**
* Helper to get display name for providers
@@ -594,7 +273,7 @@ export default class AiSettingsWidget extends OptionsWidget {
/**
* Fetch models for a specific provider and model type
*/
async fetchModelsForProvider(provider: string, modelType: 'chat' | 'embedding') {
async fetchModelsForProvider(provider: string, modelType: 'chat') {
if (!this.providerService) return;
try {
@@ -629,12 +308,6 @@ export default class AiSettingsWidget extends OptionsWidget {
this.$widget.find(`.${selectedAiProvider}-provider-settings`).show();
}
// Update embedding provider settings visibility
const selectedEmbeddingProvider = this.$widget.find('.embedding-selected-provider').val() as string;
this.$widget.find('.embedding-provider-settings').hide();
if (selectedEmbeddingProvider) {
this.$widget.find(`.${selectedEmbeddingProvider}-embedding-provider-settings`).show();
}
}
/**
@@ -653,7 +326,6 @@ export default class AiSettingsWidget extends OptionsWidget {
this.$widget.find('.openai-api-key').val(options.openaiApiKey || '');
this.$widget.find('.openai-base-url').val(options.openaiBaseUrl || 'https://api.openai.com/v1');
this.setModelDropdownValue('.openai-default-model', options.openaiDefaultModel);
this.setModelDropdownValue('.openai-embedding-model', options.openaiEmbeddingModel);
// Anthropic Section
this.$widget.find('.anthropic-api-key').val(options.anthropicApiKey || '');
@@ -662,58 +334,26 @@ export default class AiSettingsWidget extends OptionsWidget {
// Voyage Section
this.$widget.find('.voyage-api-key').val(options.voyageApiKey || '');
this.$widget.find('.voyage-embedding-base-url').val(options.voyageEmbeddingBaseUrl || 'https://api.voyageai.com/v1');
this.setModelDropdownValue('.voyage-embedding-model', options.voyageEmbeddingModel);
// Ollama Section
this.$widget.find('.ollama-base-url').val(options.ollamaBaseUrl || 'http://localhost:11434');
this.$widget.find('.ollama-embedding-base-url').val(options.ollamaEmbeddingBaseUrl || 'http://localhost:11434');
this.setModelDropdownValue('.ollama-default-model', options.ollamaDefaultModel);
this.setModelDropdownValue('.ollama-embedding-model', options.ollamaEmbeddingModel);
// Embedding-specific provider options
this.$widget.find('.openai-embedding-api-key').val(options.openaiEmbeddingApiKey || '');
this.$widget.find('.openai-embedding-base-url').val(options.openaiEmbeddingBaseUrl || 'https://api.openai.com/v1');
// Embedding Options
this.$widget.find('.embedding-selected-provider').val(options.embeddingSelectedProvider || 'openai');
this.$widget.find('.embedding-auto-update-enabled').prop('checked', options.embeddingAutoUpdateEnabled !== 'false');
this.$widget.find('.enable-automatic-indexing').prop('checked', options.enableAutomaticIndexing !== 'false');
this.$widget.find('.embedding-similarity-threshold').val(options.embeddingSimilarityThreshold || '0.75');
this.$widget.find('.max-notes-per-llm-query').val(options.maxNotesPerLlmQuery || '3');
this.$widget.find('.embedding-dimension-strategy').val(options.embeddingDimensionStrategy || 'auto');
this.$widget.find('.embedding-batch-size').val(options.embeddingBatchSize || '10');
this.$widget.find('.embedding-update-interval').val(options.embeddingUpdateInterval || '5000');
// Show/hide provider settings based on selected providers
this.updateProviderSettingsVisibility();
// Automatically fetch models for currently selected providers
const selectedAiProvider = this.$widget.find('.ai-selected-provider').val() as string;
const selectedEmbeddingProvider = this.$widget.find('.embedding-selected-provider').val() as string;
if (selectedAiProvider) {
await this.fetchModelsForProvider(selectedAiProvider, 'chat');
}
if (selectedEmbeddingProvider) {
await this.fetchModelsForProvider(selectedEmbeddingProvider, 'embedding');
}
// Display validation warnings
this.displayValidationWarnings();
}
cleanup() {
// Clear intervals
if (this.statsRefreshInterval) {
clearInterval(this.statsRefreshInterval);
this.statsRefreshInterval = null;
}
if (this.indexRebuildRefreshInterval) {
clearInterval(this.indexRebuildRefreshInterval);
this.indexRebuildRefreshInterval = null;
}
// Cleanup method for widget
}
}

View File

@@ -11,34 +11,6 @@ export interface OllamaModelResponse {
}>;
}
// Interface for embedding statistics
export interface EmbeddingStats {
success: boolean;
stats: {
totalNotesCount: number;
embeddedNotesCount: number;
queuedNotesCount: number;
failedNotesCount: number;
lastProcessedDate: string | null;
percentComplete: number;
}
}
// Interface for failed embedding notes
export interface FailedEmbeddingNotes {
success: boolean;
failedNotes: Array<{
noteId: string;
title?: string;
operation: string;
attempts: number;
lastAttempt: string;
error: string;
failureType: string;
chunks: number;
isPermanent: boolean;
}>;
}
export interface OpenAIModelResponse {
success: boolean;
@@ -47,11 +19,6 @@ export interface OpenAIModelResponse {
name: string;
type: string;
}>;
embeddingModels: Array<{
id: string;
name: string;
type: string;
}>;
}
export interface AnthropicModelResponse {
@@ -61,9 +28,4 @@ export interface AnthropicModelResponse {
name: string;
type: string;
}>;
embeddingModels: Array<{
id: string;
name: string;
type: string;
}>;
}

View File

@@ -6,21 +6,7 @@ import type { OpenAIModelResponse, AnthropicModelResponse, OllamaModelResponse }
export class ProviderService {
constructor(private $widget: JQuery<HTMLElement>) {
// Initialize Voyage models (since they don't have a dynamic refresh yet)
this.initializeVoyageModels();
}
/**
* Initialize Voyage models with default values and ensure proper selection
*/
private initializeVoyageModels() {
setTimeout(() => {
const $voyageModelSelect = this.$widget.find('.voyage-embedding-model');
if ($voyageModelSelect.length > 0) {
const currentValue = $voyageModelSelect.val();
this.ensureSelectedValue($voyageModelSelect, currentValue, 'voyageEmbeddingModel');
}
}, 100); // Small delay to ensure the widget is fully initialized
// Embedding functionality removed
}
/**
@@ -95,29 +81,10 @@ export class ProviderService {
this.ensureSelectedValue($chatModelSelect, currentChatValue, 'openaiDefaultModel');
}
// Update the embedding models dropdown
if (response.embeddingModels?.length > 0) {
const $embedModelSelect = this.$widget.find('.openai-embedding-model');
const currentEmbedValue = $embedModelSelect.val();
// Clear existing options
$embedModelSelect.empty();
// Sort models by name
const sortedEmbedModels = [...response.embeddingModels].sort((a, b) => a.name.localeCompare(b.name));
// Add models to the dropdown
sortedEmbedModels.forEach(model => {
$embedModelSelect.append(`<option value="${model.id}">${model.name}</option>`);
});
// Try to restore the previously selected value
this.ensureSelectedValue($embedModelSelect, currentEmbedValue, 'openaiEmbeddingModel');
}
if (showLoading) {
// Show success message
const totalModels = (response.chatModels?.length || 0) + (response.embeddingModels?.length || 0);
const totalModels = (response.chatModels?.length || 0);
toastService.showMessage(`${totalModels} OpenAI models found.`);
}
@@ -187,14 +154,9 @@ export class ProviderService {
this.ensureSelectedValue($chatModelSelect, currentChatValue, 'anthropicDefaultModel');
}
// Handle embedding models if they exist
if (response.embeddingModels?.length > 0 && showLoading) {
toastService.showMessage(`Found ${response.embeddingModels.length} Anthropic embedding models.`);
}
if (showLoading) {
// Show success message
const totalModels = (response.chatModels?.length || 0) + (response.embeddingModels?.length || 0);
const totalModels = (response.chatModels?.length || 0);
toastService.showMessage(`${totalModels} Anthropic models found.`);
}
@@ -240,66 +202,13 @@ export class ProviderService {
}
try {
// Determine which URL to use based on the current context
// If we're in the embedding provider context, use the embedding base URL
// Otherwise, use the general base URL
const selectedAiProvider = this.$widget.find('.ai-selected-provider').val() as string;
const selectedEmbeddingProvider = this.$widget.find('.embedding-selected-provider').val() as string;
let ollamaBaseUrl: string;
// If embedding provider is Ollama and it's visible, use embedding URL
const $embeddingOllamaSettings = this.$widget.find('.ollama-embedding-provider-settings');
if (selectedEmbeddingProvider === 'ollama' && $embeddingOllamaSettings.is(':visible')) {
ollamaBaseUrl = this.$widget.find('.ollama-embedding-base-url').val() as string;
} else {
ollamaBaseUrl = this.$widget.find('.ollama-base-url').val() as string;
}
// Use the general Ollama base URL
const ollamaBaseUrl = this.$widget.find('.ollama-base-url').val() as string;
const response = await server.get<OllamaModelResponse>(`llm/providers/ollama/models?baseUrl=${encodeURIComponent(ollamaBaseUrl)}`);
if (response && response.success && response.models && response.models.length > 0) {
// Update both embedding model dropdowns
const $embedModelSelect = this.$widget.find('.ollama-embedding-model');
const $chatEmbedModelSelect = this.$widget.find('.ollama-chat-embedding-model');
const currentValue = $embedModelSelect.val();
const currentChatEmbedValue = $chatEmbedModelSelect.val();
// Prepare embedding models
const embeddingModels = response.models.filter(model =>
model.name.includes('embed') || model.name.includes('bert'));
const generalModels = response.models.filter(model =>
!model.name.includes('embed') && !model.name.includes('bert'));
// Update .ollama-embedding-model dropdown (embedding provider settings)
$embedModelSelect.empty();
embeddingModels.forEach(model => {
$embedModelSelect.append(`<option value="${model.name}">${model.name}</option>`);
});
if (embeddingModels.length > 0) {
$embedModelSelect.append(`<option disabled>─────────────</option>`);
}
generalModels.forEach(model => {
$embedModelSelect.append(`<option value="${model.name}">${model.name}</option>`);
});
this.ensureSelectedValue($embedModelSelect, currentValue, 'ollamaEmbeddingModel');
// Update .ollama-chat-embedding-model dropdown (general Ollama provider settings)
$chatEmbedModelSelect.empty();
embeddingModels.forEach(model => {
$chatEmbedModelSelect.append(`<option value="${model.name}">${model.name}</option>`);
});
if (embeddingModels.length > 0) {
$chatEmbedModelSelect.append(`<option disabled>─────────────</option>`);
}
generalModels.forEach(model => {
$chatEmbedModelSelect.append(`<option value="${model.name}">${model.name}</option>`);
});
this.ensureSelectedValue($chatEmbedModelSelect, currentChatEmbedValue, 'ollamaEmbeddingModel');
// Also update the LLM model dropdown
// Update the LLM model dropdown
const $modelSelect = this.$widget.find('.ollama-default-model');
const currentModelValue = $modelSelect.val();

View File

@@ -16,46 +16,7 @@ export const TPL = `
</div>
</div>
<div class="options-section">
<h4>${t("ai_llm.embedding_statistics")}</h4>
<div class="embedding-stats-container">
<div class="embedding-stats">
<div class="row">
<div class="col-md-6">
<div><strong>${t("ai_llm.processed_notes")}:</strong> <span class="embedding-processed-notes">-</span></div>
<div><strong>${t("ai_llm.total_notes")}:</strong> <span class="embedding-total-notes">-</span></div>
<div><strong>${t("ai_llm.progress")}:</strong> <span class="embedding-status-text">-</span></div>
</div>
<div class="col-md-6">
<div><strong>${t("ai_llm.queued_notes")}:</strong> <span class="embedding-queued-notes">-</span></div>
<div><strong>${t("ai_llm.failed_notes")}:</strong> <span class="embedding-failed-notes">-</span></div>
<div><strong>${t("ai_llm.last_processed")}:</strong> <span class="embedding-last-processed">-</span></div>
</div>
</div>
</div>
<div class="progress mt-1" style="height: 10px;">
<div class="progress-bar embedding-progress" role="progressbar" style="width: 0%;"
aria-valuenow="0" aria-valuemin="0" aria-valuemax="100">0%</div>
</div>
<div class="mt-2">
<button class="btn btn-sm btn-outline-secondary embedding-refresh-stats">
${t("ai_llm.refresh_stats")}
</button>
</div>
</div>
<hr/>
<!-- Failed embeddings section -->
<h5>${t("ai_llm.failed_notes")}</h4>
<div class="form-group mt-4">
<div class="embedding-failed-notes-container">
<div class="embedding-failed-notes-list">
<div class="alert alert-info">${t("ai_llm.no_failed_embeddings")}</div>
</div>
</div>
</div>
</div>
<!-- Embedding statistics section removed -->
<div class="ai-providers-section options-section">
<h4>${t("ai_llm.provider_configuration")}</h4>
@@ -171,188 +132,4 @@ export const TPL = `
</div>
</div>
<div class="options-section">
<h4>${t("ai_llm.embeddings_configuration")}</h4>
<div class="form-group">
<label class="embedding-provider-label">${t("ai_llm.selected_embedding_provider")}</label>
<select class="embedding-selected-provider form-control">
<option value="">${t("ai_llm.select_embedding_provider")}</option>
<option value="openai">OpenAI</option>
<option value="voyage">Voyage AI</option>
<option value="ollama">Ollama</option>
<option value="local">Local</option>
</select>
<div class="form-text">${t("ai_llm.selected_embedding_provider_description")}</div>
</div>
<!-- OpenAI Embedding Provider Settings -->
<div class="embedding-provider-settings openai-embedding-provider-settings" style="display: none;">
<div class="card mt-3">
<div class="card-header">
<h5>${t("ai_llm.openai_embedding_settings")}</h5>
</div>
<div class="card-body">
<div class="form-group">
<label>${t("ai_llm.api_key")}</label>
<input type="password" class="openai-embedding-api-key form-control" autocomplete="off" />
<div class="form-text">${t("ai_llm.openai_embedding_api_key_description")}</div>
</div>
<div class="form-group">
<label>${t("ai_llm.url")}</label>
<input type="text" class="openai-embedding-base-url form-control" />
<div class="form-text">${t("ai_llm.openai_embedding_url_description")}</div>
</div>
<div class="form-group">
<label>${t("ai_llm.embedding_model")}</label>
<select class="openai-embedding-model form-control">
<option value="">${t("ai_llm.select_model")}</option>
</select>
<div class="form-text">${t("ai_llm.openai_embedding_model_description")}</div>
</div>
</div>
</div>
</div>
<!-- Voyage Embedding Provider Settings -->
<div class="embedding-provider-settings voyage-embedding-provider-settings" style="display: none;">
<div class="card mt-3">
<div class="card-header">
<h5>${t("ai_llm.voyage_settings")}</h5>
</div>
<div class="card-body">
<div class="form-group">
<label>${t("ai_llm.api_key")}</label>
<input type="password" class="voyage-api-key form-control" autocomplete="off" />
<div class="form-text">${t("ai_llm.voyage_api_key_description")}</div>
</div>
<div class="form-group">
<label>${t("ai_llm.url")}</label>
<input type="text" class="voyage-embedding-base-url form-control" />
<div class="form-text">${t("ai_llm.voyage_embedding_url_description")}</div>
</div>
<div class="form-group">
<label>${t("ai_llm.embedding_model")}</label>
<select class="voyage-embedding-model form-control">
<option value="">${t("ai_llm.select_model")}</option>
</select>
<div class="form-text">${t("ai_llm.voyage_embedding_model_description")}</div>
</div>
</div>
</div>
</div>
<!-- Ollama Embedding Provider Settings -->
<div class="embedding-provider-settings ollama-embedding-provider-settings" style="display: none;">
<div class="card mt-3">
<div class="card-header">
<h5>${t("ai_llm.ollama_embedding_settings")}</h5>
</div>
<div class="card-body">
<div class="form-group">
<label>${t("ai_llm.url")}</label>
<input type="text" class="ollama-embedding-base-url form-control" />
<div class="form-text">${t("ai_llm.ollama_embedding_url_description")}</div>
</div>
<div class="form-group">
<label>${t("ai_llm.embedding_model")}</label>
<select class="ollama-embedding-model form-control">
<option value="">${t("ai_llm.select_model")}</option>
</select>
<div class="form-text">${t("ai_llm.ollama_embedding_model_description")}</div>
</div>
</div>
</div>
</div>
<!-- Local Embedding Provider Settings -->
<div class="embedding-provider-settings local-embedding-provider-settings" style="display: none;">
<div class="card mt-3">
<div class="card-header">
<h5>${t("ai_llm.local_embedding_settings")}</h5>
</div>
<div class="card-body">
<div class="form-text">${t("ai_llm.local_embedding_description")}</div>
</div>
</div>
</div>
<div class="form-group">
<label>${t("ai_llm.embedding_dimension_strategy")}</label>
<select class="embedding-dimension-strategy form-control">
<option value="auto">${t("ai_llm.embedding_dimension_auto")}</option>
<option value="fixed-768">${t("ai_llm.embedding_dimension_fixed")} (768)</option>
<option value="fixed-1024">${t("ai_llm.embedding_dimension_fixed")} (1024)</option>
<option value="fixed-1536">${t("ai_llm.embedding_dimension_fixed")} (1536)</option>
</select>
<div class="form-text">${t("ai_llm.embedding_dimension_strategy_description")}</div>
</div>
<div class="form-group">
<label>${t("ai_llm.embedding_similarity_threshold")}</label>
<input class="embedding-similarity-threshold form-control" type="number" min="0" max="1" step="0.01">
<div class="form-text">${t("ai_llm.embedding_similarity_threshold_description")}</div>
</div>
<div class="form-group">
<label>${t("ai_llm.embedding_batch_size")}</label>
<input class="embedding-batch-size form-control" type="number" min="1" max="100" step="1">
<div class="form-text">${t("ai_llm.embedding_batch_size_description")}</div>
</div>
<div class="form-group">
<label>${t("ai_llm.embedding_update_interval")}</label>
<input class="embedding-update-interval form-control" type="number" min="100" max="60000" step="100">
<div class="form-text">${t("ai_llm.embedding_update_interval_description")}</div>
</div>
<div class="form-group">
<label>${t("ai_llm.max_notes_per_llm_query")}</label>
<input class="max-notes-per-llm-query form-control" type="number" min="1" max="20" step="1">
<div class="form-text">${t("ai_llm.max_notes_per_llm_query_description")}</div>
</div>
<div class="form-group">
<label class="tn-checkbox">
<input class="enable-automatic-indexing form-check-input" type="checkbox">
${t("ai_llm.enable_automatic_indexing")}
</label>
<div class="form-text">${t("ai_llm.enable_automatic_indexing_description")}</div>
</div>
<div class="form-group mt-3">
<label class="tn-checkbox">
<input class="embedding-auto-update-enabled form-check-input" type="checkbox">
${t("ai_llm.embedding_auto_update_enabled")}
</label>
<div class="form-text">${t("ai_llm.embedding_auto_update_enabled_description")}</div>
</div>
<!-- Recreate embeddings button -->
<div class="form-group mt-3">
<button class="btn btn-outline-primary recreate-embeddings">
${t("ai_llm.recreate_embeddings")}
</button>
<div class="form-text">${t("ai_llm.recreate_embeddings_description")}</div>
</div>
<!-- Rebuild index button -->
<div class="form-group mt-3">
<button class="btn btn-outline-primary rebuild-embeddings-index">
${t("ai_llm.rebuild_index")}
</button>
<div class="form-text">${t("ai_llm.rebuild_index_description")}</div>
</div>
<!-- Note about embedding provider precedence -->
<div class="form-group mt-3">
<h5>${t("ai_llm.embedding_providers_order")}</h5>
<div class="form-text mt-2">${t("ai_llm.embedding_providers_order_description")}</div>
</div>
</div>`;
`;