feat(llm): change from using precedence list to using a sing specified provider for either chat and/or embeddings

This commit is contained in:
perf3ct
2025-06-04 20:13:13 +00:00
parent f9d8bf26c4
commit a20e36f4ee
15 changed files with 685 additions and 414 deletions

View File

@@ -95,7 +95,7 @@ const ALLOWED_OPTIONS = new Set<OptionNames>([
"aiEnabled",
"aiTemperature",
"aiSystemPrompt",
"aiProviderPrecedence",
"aiSelectedProvider",
"openaiApiKey",
"openaiBaseUrl",
"openaiDefaultModel",
@@ -110,7 +110,7 @@ const ALLOWED_OPTIONS = new Set<OptionNames>([
"ollamaEmbeddingModel",
"embeddingAutoUpdateEnabled",
"embeddingDimensionStrategy",
"embeddingProviderPrecedence",
"embeddingSelectedProvider",
"embeddingSimilarityThreshold",
"embeddingBatchSize",
"embeddingUpdateInterval",

View File

@@ -1,4 +1,5 @@
import options from '../options.js';
import eventService from '../events.js';
import type { AIService, ChatCompletionOptions, ChatResponse, Message } from './ai_interface.js';
import { AnthropicService } from './providers/anthropic_service.js';
import { ContextExtractor } from './context/index.js';
@@ -20,9 +21,8 @@ import type { NoteSearchResult } from './interfaces/context_interfaces.js';
// Import new configuration system
import {
getProviderPrecedence,
getPreferredProvider,
getEmbeddingProviderPrecedence,
getSelectedProvider,
getSelectedEmbeddingProvider,
parseModelIdentifier,
isAIEnabled,
getDefaultModelForProvider,
@@ -60,6 +60,9 @@ export class AIServiceManager implements IAIServiceManager {
this.initializeTools().catch(error => {
log.error(`Error initializing LLM tools during AIServiceManager construction: ${error.message || String(error)}`);
});
// Set up event listener for provider changes
this.setupProviderChangeListener();
}
/**
@@ -84,16 +87,21 @@ export class AIServiceManager implements IAIServiceManager {
}
/**
* Update the provider precedence order using the new configuration system
* Update the provider order using the new configuration system (single provider)
*/
async updateProviderOrderAsync(): Promise<void> {
try {
const providers = await getProviderPrecedence();
this.providerOrder = providers as ServiceProviders[];
const selectedProvider = await getSelectedProvider();
if (selectedProvider) {
this.providerOrder = [selectedProvider as ServiceProviders];
log.info(`Updated provider order: ${selectedProvider}`);
} else {
this.providerOrder = [];
log.info('No provider selected');
}
this.initialized = true;
log.info(`Updated provider order: ${providers.join(', ')}`);
} catch (error) {
log.error(`Failed to get provider precedence: ${error}`);
log.error(`Failed to get selected provider: ${error}`);
// Keep empty order, will be handled gracefully by other methods
this.providerOrder = [];
this.initialized = true;
@@ -521,13 +529,13 @@ export class AIServiceManager implements IAIServiceManager {
*/
async getPreferredProviderAsync(): Promise<string> {
try {
const preferredProvider = await getPreferredProvider();
if (preferredProvider === null) {
// No providers configured, fallback to first available
log.info('No providers configured in precedence, using first available provider');
const selectedProvider = await getSelectedProvider();
if (selectedProvider === null) {
// No provider selected, fallback to first available
log.info('No provider selected, using first available provider');
return this.providerOrder[0];
}
return preferredProvider;
return selectedProvider;
} catch (error) {
log.error(`Error getting preferred provider: ${error}`);
return this.providerOrder[0];
@@ -580,6 +588,7 @@ export class AIServiceManager implements IAIServiceManager {
};
}
/**
* Error handler that properly types the error object
*/
@@ -589,6 +598,75 @@ export class AIServiceManager implements IAIServiceManager {
}
return String(error);
}
/**
* Set up event listener for provider changes
*/
private setupProviderChangeListener(): void {
// List of AI-related options that should trigger service recreation
const aiRelatedOptions = [
'aiSelectedProvider',
'embeddingSelectedProvider',
'openaiApiKey',
'openaiBaseUrl',
'openaiDefaultModel',
'anthropicApiKey',
'anthropicBaseUrl',
'anthropicDefaultModel',
'ollamaBaseUrl',
'ollamaDefaultModel',
'voyageApiKey'
];
eventService.subscribe(['entityChanged'], ({ entityName, entity }) => {
if (entityName === 'options' && entity && aiRelatedOptions.includes(entity.name)) {
log.info(`AI-related option '${entity.name}' changed, recreating LLM services`);
this.recreateServices();
}
});
}
/**
* Recreate LLM services when provider settings change
*/
private async recreateServices(): Promise<void> {
try {
log.info('Recreating LLM services due to configuration change');
// Clear configuration cache first
clearConfigurationCache();
// Recreate all service instances to pick up new configuration
this.recreateServiceInstances();
// Update provider order with new configuration
await this.updateProviderOrderAsync();
log.info('LLM services recreated successfully');
} catch (error) {
log.error(`Error recreating LLM services: ${this.handleError(error)}`);
}
}
/**
* Recreate service instances to pick up new configuration
*/
private recreateServiceInstances(): void {
try {
log.info('Recreating service instances');
// Recreate service instances
this.services = {
openai: new OpenAIService(),
anthropic: new AnthropicService(),
ollama: new OllamaService()
};
log.info('Service instances recreated successfully');
} catch (error) {
log.error(`Error recreating service instances: ${this.handleError(error)}`);
}
}
}
// Don't create singleton immediately, use a lazy-loading pattern

View File

@@ -1,10 +1,9 @@
import configurationManager from './configuration_manager.js';
import optionService from '../../options.js';
import type {
ProviderType,
ModelIdentifier,
ModelConfig,
ProviderPrecedenceConfig,
EmbeddingProviderPrecedenceConfig
} from '../interfaces/configuration_interfaces.js';
/**
@@ -13,41 +12,19 @@ import type {
*/
/**
* Get the ordered list of AI providers
* Get the selected AI provider
*/
export async function getProviderPrecedence(): Promise<ProviderType[]> {
const config = await configurationManager.getProviderPrecedence();
return config.providers;
export async function getSelectedProvider(): Promise<ProviderType | null> {
const providerOption = optionService.getOption('aiSelectedProvider');
return providerOption as ProviderType || null;
}
/**
* Get the default/preferred AI provider
* Get the selected embedding provider
*/
export async function getPreferredProvider(): Promise<ProviderType | null> {
const config = await configurationManager.getProviderPrecedence();
if (config.providers.length === 0) {
return null; // No providers configured
}
return config.defaultProvider || config.providers[0];
}
/**
* Get the ordered list of embedding providers
*/
export async function getEmbeddingProviderPrecedence(): Promise<string[]> {
const config = await configurationManager.getEmbeddingProviderPrecedence();
return config.providers;
}
/**
* Get the default embedding provider
*/
export async function getPreferredEmbeddingProvider(): Promise<string | null> {
const config = await configurationManager.getEmbeddingProviderPrecedence();
if (config.providers.length === 0) {
return null; // No providers configured
}
return config.defaultProvider || config.providers[0];
export async function getSelectedEmbeddingProvider(): Promise<string | null> {
const providerOption = optionService.getOption('embeddingSelectedProvider');
return providerOption || null;
}
/**
@@ -107,22 +84,20 @@ export async function isProviderConfigured(provider: ProviderType): Promise<bool
}
/**
* Get the first available (configured) provider from the precedence list
* Get the currently selected provider if it's available and configured
*/
export async function getFirstAvailableProvider(): Promise<ProviderType | null> {
const providers = await getProviderPrecedence();
if (providers.length === 0) {
return null; // No providers configured
export async function getAvailableSelectedProvider(): Promise<ProviderType | null> {
const selectedProvider = await getSelectedProvider();
if (!selectedProvider) {
return null; // No provider selected
}
for (const provider of providers) {
if (await isProviderConfigured(provider)) {
return provider;
}
if (await isProviderConfigured(selectedProvider)) {
return selectedProvider;
}
return null; // No providers are properly configured
return null; // Selected provider is not properly configured
}
/**
@@ -163,17 +138,59 @@ export async function getValidModelConfig(provider: ProviderType): Promise<{ mod
}
/**
* Get the first valid model configuration from the provider precedence list
* Get the model configuration for the currently selected provider
*/
export async function getFirstValidModelConfig(): Promise<{ model: string; provider: ProviderType } | null> {
const providers = await getProviderPrecedence();
for (const provider of providers) {
const config = await getValidModelConfig(provider);
if (config) {
return config;
}
export async function getSelectedModelConfig(): Promise<{ model: string; provider: ProviderType } | null> {
const selectedProvider = await getSelectedProvider();
if (!selectedProvider) {
return null; // No provider selected
}
return null; // No valid model configuration found
return await getValidModelConfig(selectedProvider);
}
// Legacy support functions - these maintain backwards compatibility but now use single provider logic
/**
* @deprecated Use getSelectedProvider() instead
*/
export async function getProviderPrecedence(): Promise<ProviderType[]> {
const selected = await getSelectedProvider();
return selected ? [selected] : [];
}
/**
* @deprecated Use getSelectedProvider() instead
*/
export async function getPreferredProvider(): Promise<ProviderType | null> {
return await getSelectedProvider();
}
/**
* @deprecated Use getSelectedEmbeddingProvider() instead
*/
export async function getEmbeddingProviderPrecedence(): Promise<string[]> {
const selected = await getSelectedEmbeddingProvider();
return selected ? [selected] : [];
}
/**
* @deprecated Use getSelectedEmbeddingProvider() instead
*/
export async function getPreferredEmbeddingProvider(): Promise<string | null> {
return await getSelectedEmbeddingProvider();
}
/**
* @deprecated Use getAvailableSelectedProvider() instead
*/
export async function getFirstAvailableProvider(): Promise<ProviderType | null> {
return await getAvailableSelectedProvider();
}
/**
* @deprecated Use getSelectedModelConfig() instead
*/
export async function getFirstValidModelConfig(): Promise<{ model: string; provider: ProviderType } | null> {
return await getSelectedModelConfig();
}

View File

@@ -50,8 +50,8 @@ export class ConfigurationManager {
try {
const config: AIConfig = {
enabled: await this.getAIEnabled(),
providerPrecedence: await this.getProviderPrecedence(),
embeddingProviderPrecedence: await this.getEmbeddingProviderPrecedence(),
selectedProvider: await this.getSelectedProvider(),
selectedEmbeddingProvider: await this.getSelectedEmbeddingProvider(),
defaultModels: await this.getDefaultModels(),
providerSettings: await this.getProviderSettings()
};
@@ -66,46 +66,28 @@ export class ConfigurationManager {
}
/**
* Parse provider precedence from string option
* Get the selected AI provider
*/
public async getProviderPrecedence(): Promise<ProviderPrecedenceConfig> {
public async getSelectedProvider(): Promise<ProviderType | null> {
try {
const precedenceOption = await options.getOption('aiProviderPrecedence');
const providers = this.parseProviderList(precedenceOption);
return {
providers: providers as ProviderType[],
defaultProvider: providers.length > 0 ? providers[0] as ProviderType : undefined
};
const selectedProvider = await options.getOption('aiSelectedProvider');
return selectedProvider as ProviderType || null;
} catch (error) {
log.error(`Error parsing provider precedence: ${error}`);
// Only return known providers if they exist, don't assume defaults
return {
providers: [],
defaultProvider: undefined
};
log.error(`Error getting selected provider: ${error}`);
return null;
}
}
/**
* Parse embedding provider precedence from string option
* Get the selected embedding provider
*/
public async getEmbeddingProviderPrecedence(): Promise<EmbeddingProviderPrecedenceConfig> {
public async getSelectedEmbeddingProvider(): Promise<EmbeddingProviderType | null> {
try {
const precedenceOption = await options.getOption('embeddingProviderPrecedence');
const providers = this.parseProviderList(precedenceOption);
return {
providers: providers as EmbeddingProviderType[],
defaultProvider: providers.length > 0 ? providers[0] as EmbeddingProviderType : undefined
};
const selectedProvider = await options.getOption('embeddingSelectedProvider');
return selectedProvider as EmbeddingProviderType || null;
} catch (error) {
log.error(`Error parsing embedding provider precedence: ${error}`);
// Don't assume defaults, return empty configuration
return {
providers: [],
defaultProvider: undefined
};
log.error(`Error getting selected embedding provider: ${error}`);
return null;
}
}
@@ -265,31 +247,29 @@ export class ConfigurationManager {
return result;
}
// Validate provider precedence
if (config.providerPrecedence.providers.length === 0) {
result.errors.push('No providers configured in precedence list');
// Validate selected provider
if (!config.selectedProvider) {
result.errors.push('No AI provider selected');
result.isValid = false;
}
} else {
// Validate selected provider settings
const providerConfig = config.providerSettings[config.selectedProvider];
// Validate provider settings
for (const provider of config.providerPrecedence.providers) {
const providerConfig = config.providerSettings[provider];
if (provider === 'openai') {
if (config.selectedProvider === 'openai') {
const openaiConfig = providerConfig as OpenAISettings | undefined;
if (!openaiConfig?.apiKey) {
result.warnings.push('OpenAI API key is not configured');
}
}
if (provider === 'anthropic') {
if (config.selectedProvider === 'anthropic') {
const anthropicConfig = providerConfig as AnthropicSettings | undefined;
if (!anthropicConfig?.apiKey) {
result.warnings.push('Anthropic API key is not configured');
}
}
if (provider === 'ollama') {
if (config.selectedProvider === 'ollama') {
const ollamaConfig = providerConfig as OllamaSettings | undefined;
if (!ollamaConfig?.baseUrl) {
result.warnings.push('Ollama base URL is not configured');
@@ -297,6 +277,11 @@ export class ConfigurationManager {
}
}
// Validate selected embedding provider
if (!config.selectedEmbeddingProvider) {
result.warnings.push('No embedding provider selected');
}
} catch (error) {
result.errors.push(`Configuration validation error: ${error}`);
result.isValid = false;
@@ -356,14 +341,8 @@ export class ConfigurationManager {
private getDefaultConfig(): AIConfig {
return {
enabled: false,
providerPrecedence: {
providers: [],
defaultProvider: undefined
},
embeddingProviderPrecedence: {
providers: [],
defaultProvider: undefined
},
selectedProvider: null,
selectedEmbeddingProvider: null,
defaultModels: {
openai: undefined,
anthropic: undefined,

View File

@@ -1,51 +1,32 @@
import options from '../../../options.js';
import log from '../../../log.js';
import { getEmbeddingProvider, getEnabledEmbeddingProviders } from '../../providers/providers.js';
import { getSelectedEmbeddingProvider } from '../../config/configuration_helpers.js';
/**
* Manages embedding providers for context services
*/
export class ProviderManager {
/**
* Get the preferred embedding provider based on user settings
* Tries to use the most appropriate provider in this order:
* 1. User's configured default provider
* 2. OpenAI if API key is set
* 3. Anthropic if API key is set
* 4. Ollama if configured
* 5. Any available provider
* 6. Local provider as fallback
* Get the selected embedding provider based on user settings
* Uses the single provider selection approach
*
* @returns The preferred embedding provider or null if none available
* @returns The selected embedding provider or null if none available
*/
async getPreferredEmbeddingProvider(): Promise<any> {
try {
// Try to get providers based on precedence list
const precedenceOption = await options.getOption('embeddingProviderPrecedence');
let precedenceList: string[] = [];
if (precedenceOption) {
if (precedenceOption.startsWith('[') && precedenceOption.endsWith(']')) {
precedenceList = JSON.parse(precedenceOption);
} else if (typeof precedenceOption === 'string') {
if (precedenceOption.includes(',')) {
precedenceList = precedenceOption.split(',').map(p => p.trim());
} else {
precedenceList = [precedenceOption];
}
}
}
// Try each provider in the precedence list
for (const providerId of precedenceList) {
const provider = await getEmbeddingProvider(providerId);
// Get the selected embedding provider
const selectedProvider = await getSelectedEmbeddingProvider();
if (selectedProvider) {
const provider = await getEmbeddingProvider(selectedProvider);
if (provider) {
log.info(`Using embedding provider from precedence list: ${providerId}`);
log.info(`Using selected embedding provider: ${selectedProvider}`);
return provider;
}
log.info(`Selected embedding provider ${selectedProvider} is not available`);
}
// If no provider from precedence list is available, try any enabled provider
// If no provider is selected or available, try any enabled provider
const providers = await getEnabledEmbeddingProviders();
if (providers.length > 0) {
log.info(`Using available embedding provider: ${providers[0].name}`);

View File

@@ -497,40 +497,24 @@ export class IndexService {
throw new Error("No embedding providers available");
}
// Get the embedding provider precedence
// Get the selected embedding provider
const options = (await import('../options.js')).default;
let preferredProviders: string[] = [];
const embeddingPrecedence = await options.getOption('embeddingProviderPrecedence');
const selectedEmbeddingProvider = await options.getOption('embeddingSelectedProvider');
let provider;
if (embeddingPrecedence) {
// Parse the precedence string
if (embeddingPrecedence.startsWith('[') && embeddingPrecedence.endsWith(']')) {
preferredProviders = JSON.parse(embeddingPrecedence);
} else if (typeof embeddingPrecedence === 'string') {
if (embeddingPrecedence.includes(',')) {
preferredProviders = embeddingPrecedence.split(',').map(p => p.trim());
} else {
preferredProviders = [embeddingPrecedence];
}
}
// Find first enabled provider by precedence order
for (const providerName of preferredProviders) {
const matchedProvider = providers.find(p => p.name === providerName);
if (matchedProvider) {
provider = matchedProvider;
break;
}
}
// If no match found, use first available
if (!provider && providers.length > 0) {
if (selectedEmbeddingProvider) {
// Try to use the selected provider
const enabledProviders = await providerManager.getEnabledEmbeddingProviders();
provider = enabledProviders.find(p => p.name === selectedEmbeddingProvider);
if (!provider) {
log.info(`Selected embedding provider ${selectedEmbeddingProvider} is not available, using first enabled provider`);
// Fall back to first enabled provider
provider = providers[0];
}
} else {
// Default to first available provider
// No provider selected, use first available provider
log.info('No embedding provider selected, using first available provider');
provider = providers[0];
}

View File

@@ -46,8 +46,8 @@ export interface ModelCapabilities {
*/
export interface AIConfig {
enabled: boolean;
providerPrecedence: ProviderPrecedenceConfig;
embeddingProviderPrecedence: EmbeddingProviderPrecedenceConfig;
selectedProvider: ProviderType | null;
selectedEmbeddingProvider: EmbeddingProviderType | null;
defaultModels: Record<ProviderType, string | undefined>;
providerSettings: ProviderSettings;
}
@@ -87,7 +87,7 @@ export type ProviderType = 'openai' | 'anthropic' | 'ollama';
/**
* Valid embedding provider types
*/
export type EmbeddingProviderType = 'openai' | 'ollama' | 'local';
export type EmbeddingProviderType = 'openai' | 'voyage' | 'ollama' | 'local';
/**
* Model identifier with provider prefix (e.g., "openai:gpt-4" or "ollama:llama2")

View File

@@ -11,8 +11,7 @@ import type { ServiceProviders } from '../../interfaces/ai_service_interfaces.js
// Import new configuration system
import {
getProviderPrecedence,
getPreferredProvider,
getSelectedProvider,
parseModelIdentifier,
getDefaultModelForProvider,
createModelConfig
@@ -99,22 +98,30 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
}
}
// Get default provider and model using the new configuration system
// Get selected provider and model using the new configuration system
try {
// Use the new configuration helpers - no string parsing!
const preferredProvider = await getPreferredProvider();
const selectedProvider = await getSelectedProvider();
if (!preferredProvider) {
throw new Error('No AI providers are configured. Please check your AI settings.');
if (!selectedProvider) {
throw new Error('No AI provider is selected. Please select a provider in your AI settings.');
}
const modelName = await getDefaultModelForProvider(preferredProvider);
const modelName = await getDefaultModelForProvider(selectedProvider);
if (!modelName) {
throw new Error(`No default model configured for provider ${preferredProvider}. Please set a default model in your AI settings.`);
// Try to fetch and set a default model from the provider
const fetchedModel = await this.fetchAndSetDefaultModel(selectedProvider);
if (!fetchedModel) {
throw new Error(`No default model configured for provider ${selectedProvider}. Please set a default model in your AI settings.`);
}
// Use the fetched model
updatedOptions.model = fetchedModel;
} else {
updatedOptions.model = modelName;
}
log.info(`Selected provider: ${preferredProvider}, model: ${modelName}`);
log.info(`Selected provider: ${selectedProvider}, model: ${updatedOptions.model}`);
// Determine query complexity
let queryComplexity = 'low';
@@ -142,15 +149,14 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
queryComplexity = contentLength > SEARCH_CONSTANTS.CONTEXT.CONTENT_LENGTH.HIGH_THRESHOLD ? 'high' : 'medium';
}
// Set the model and add provider metadata
updatedOptions.model = modelName;
this.addProviderMetadata(updatedOptions, preferredProvider as ServiceProviders, modelName);
// Add provider metadata (model is already set above)
this.addProviderMetadata(updatedOptions, selectedProvider as ServiceProviders, updatedOptions.model);
log.info(`Selected model: ${modelName} from provider: ${preferredProvider} for query complexity: ${queryComplexity}`);
log.info(`Selected model: ${updatedOptions.model} from provider: ${selectedProvider} for query complexity: ${queryComplexity}`);
log.info(`[ModelSelectionStage] Final options: ${JSON.stringify({
model: updatedOptions.model,
stream: updatedOptions.stream,
provider: preferredProvider,
provider: selectedProvider,
enableTools: updatedOptions.enableTools
})}`);
@@ -210,38 +216,38 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
}
/**
* Determine model based on provider precedence using the new configuration system
* Determine model based on selected provider using the new configuration system
*/
private async determineDefaultModel(input: ModelSelectionInput): Promise<string> {
try {
// Use the new configuration system
const providers = await getProviderPrecedence();
// Use the new single provider configuration system
const selectedProvider = await getSelectedProvider();
// Use only providers that are available
const availableProviders = providers.filter(provider =>
aiServiceManager.isProviderAvailable(provider));
if (availableProviders.length === 0) {
throw new Error('No AI providers are available');
if (!selectedProvider) {
throw new Error('No AI provider is selected. Please select a provider in your AI settings.');
}
// Get the first available provider and its default model
const defaultProvider = availableProviders[0];
const defaultModel = await getDefaultModelForProvider(defaultProvider);
// Check if the provider is available
if (!aiServiceManager.isProviderAvailable(selectedProvider)) {
throw new Error(`Selected provider ${selectedProvider} is not available`);
}
// Get the default model for the selected provider
const defaultModel = await getDefaultModelForProvider(selectedProvider);
if (!defaultModel) {
throw new Error(`No default model configured for provider ${defaultProvider}. Please configure a default model in your AI settings.`);
throw new Error(`No default model configured for provider ${selectedProvider}. Please configure a default model in your AI settings.`);
}
// Set provider metadata
if (!input.options.providerMetadata) {
input.options.providerMetadata = {
provider: defaultProvider as 'openai' | 'anthropic' | 'ollama' | 'local',
provider: selectedProvider as 'openai' | 'anthropic' | 'ollama' | 'local',
modelId: defaultModel
};
}
log.info(`Selected default model ${defaultModel} from provider ${defaultProvider}`);
log.info(`Selected default model ${defaultModel} from provider ${selectedProvider}`);
return defaultModel;
} catch (error) {
log.error(`Error determining default model: ${error}`);
@@ -271,4 +277,126 @@ export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput,
return MODEL_CAPABILITIES['default'].contextWindowTokens;
}
}
/**
* Fetch available models from provider and set a default model
*/
private async fetchAndSetDefaultModel(provider: ProviderType): Promise<string | null> {
try {
log.info(`Fetching available models for provider ${provider}`);
// Import server-side options to update the default model
const optionService = (await import('../../../options.js')).default;
switch (provider) {
case 'openai':
const openaiModels = await this.fetchOpenAIModels();
if (openaiModels.length > 0) {
// Use the first available model without any preferences
const selectedModel = openaiModels[0];
await optionService.setOption('openaiDefaultModel', selectedModel);
log.info(`Set default OpenAI model to: ${selectedModel}`);
return selectedModel;
}
break;
case 'anthropic':
const anthropicModels = await this.fetchAnthropicModels();
if (anthropicModels.length > 0) {
// Use the first available model without any preferences
const selectedModel = anthropicModels[0];
await optionService.setOption('anthropicDefaultModel', selectedModel);
log.info(`Set default Anthropic model to: ${selectedModel}`);
return selectedModel;
}
break;
case 'ollama':
const ollamaModels = await this.fetchOllamaModels();
if (ollamaModels.length > 0) {
// Use the first available model without any preferences
const selectedModel = ollamaModels[0];
await optionService.setOption('ollamaDefaultModel', selectedModel);
log.info(`Set default Ollama model to: ${selectedModel}`);
return selectedModel;
}
break;
}
log.info(`No models available for provider ${provider}`);
return null;
} catch (error) {
log.error(`Error fetching models for provider ${provider}: ${error}`);
return null;
}
}
/**
* Fetch available OpenAI models
*/
private async fetchOpenAIModels(): Promise<string[]> {
try {
// Use the provider service to get available models
const aiServiceManager = (await import('../../ai_service_manager.js')).default;
const service = aiServiceManager.getInstance().getService('openai');
if (service && typeof (service as any).getAvailableModels === 'function') {
return await (service as any).getAvailableModels();
}
// No fallback - return empty array if models can't be fetched
log.info('OpenAI service does not support getAvailableModels method');
return [];
} catch (error) {
log.error(`Error fetching OpenAI models: ${error}`);
return [];
}
}
/**
* Fetch available Anthropic models
*/
private async fetchAnthropicModels(): Promise<string[]> {
try {
// Use the provider service to get available models
const aiServiceManager = (await import('../../ai_service_manager.js')).default;
const service = aiServiceManager.getInstance().getService('anthropic');
if (service && typeof (service as any).getAvailableModels === 'function') {
return await (service as any).getAvailableModels();
}
// No fallback - return empty array if models can't be fetched
log.info('Anthropic service does not support getAvailableModels method');
return [];
} catch (error) {
log.error(`Error fetching Anthropic models: ${error}`);
return [];
}
}
/**
* Fetch available Ollama models
*/
private async fetchOllamaModels(): Promise<string[]> {
try {
// Use the provider service to get available models
const aiServiceManager = (await import('../../ai_service_manager.js')).default;
const service = aiServiceManager.getInstance().getService('ollama');
if (service && typeof (service as any).getAvailableModels === 'function') {
return await (service as any).getAvailableModels();
}
// No fallback - return empty array if models can't be fetched
log.info('Ollama service does not support getAvailableModels method');
return [];
} catch (error) {
log.error(`Error fetching Ollama models: ${error}`);
return [];
}
}
}

View File

@@ -606,4 +606,12 @@ export class AnthropicService extends BaseAIService {
return convertedTools;
}
/**
* Clear cached Anthropic client to force recreation with new settings
*/
clearCache(): void {
this.client = null;
log.info('Anthropic client cache cleared');
}
}

View File

@@ -526,4 +526,13 @@ export class OllamaService extends BaseAIService {
log.info(`Added tool execution feedback: ${toolExecutionStatus.length} statuses`);
return updatedMessages;
}
/**
* Clear cached Ollama client to force recreation with new settings
*/
clearCache(): void {
// Ollama service doesn't maintain a persistent client like OpenAI/Anthropic
// but we can clear any future cached state here if needed
log.info('Ollama client cache cleared (no persistent client to clear)');
}
}

View File

@@ -257,4 +257,12 @@ export class OpenAIService extends BaseAIService {
throw error;
}
}
/**
* Clear cached OpenAI client to force recreation with new settings
*/
clearCache(): void {
this.openai = null;
log.info('OpenAI client cache cleared');
}
}

View File

@@ -195,26 +195,26 @@ const defaultOptions: DefaultOption[] = [
// AI Options
{ name: "aiEnabled", value: "false", isSynced: true },
{ name: "openaiApiKey", value: "", isSynced: false },
{ name: "openaiDefaultModel", value: "gpt-4o", isSynced: true },
{ name: "openaiEmbeddingModel", value: "text-embedding-3-small", isSynced: true },
{ name: "openaiDefaultModel", value: "", isSynced: true },
{ name: "openaiEmbeddingModel", value: "", isSynced: true },
{ name: "openaiBaseUrl", value: "https://api.openai.com/v1", isSynced: true },
{ name: "anthropicApiKey", value: "", isSynced: false },
{ name: "anthropicDefaultModel", value: "claude-3-opus-20240229", isSynced: true },
{ name: "voyageEmbeddingModel", value: "voyage-2", isSynced: true },
{ name: "anthropicDefaultModel", value: "", isSynced: true },
{ name: "voyageEmbeddingModel", value: "", isSynced: true },
{ name: "voyageApiKey", value: "", isSynced: false },
{ name: "anthropicBaseUrl", value: "https://api.anthropic.com/v1", isSynced: true },
{ name: "ollamaEnabled", value: "false", isSynced: true },
{ name: "ollamaDefaultModel", value: "llama3", isSynced: true },
{ name: "ollamaDefaultModel", value: "", isSynced: true },
{ name: "ollamaBaseUrl", value: "http://localhost:11434", isSynced: true },
{ name: "ollamaEmbeddingModel", value: "nomic-embed-text", isSynced: true },
{ name: "ollamaEmbeddingModel", value: "", isSynced: true },
{ name: "embeddingAutoUpdateEnabled", value: "true", isSynced: true },
// Adding missing AI options
{ name: "aiTemperature", value: "0.7", isSynced: true },
{ name: "aiSystemPrompt", value: "", isSynced: true },
{ name: "aiProviderPrecedence", value: "openai,anthropic,ollama", isSynced: true },
{ name: "aiSelectedProvider", value: "openai", isSynced: true },
{ name: "embeddingDimensionStrategy", value: "auto", isSynced: true },
{ name: "embeddingProviderPrecedence", value: "openai,voyage,ollama,local", isSynced: true },
{ name: "embeddingSelectedProvider", value: "openai", isSynced: true },
{ name: "embeddingSimilarityThreshold", value: "0.75", isSynced: true },
{ name: "enableAutomaticIndexing", value: "true", isSynced: true },
{ name: "maxNotesPerLlmQuery", value: "3", isSynced: true },