feat(llm): create unit tests for LLM services

This commit is contained in:
perf3ct
2025-06-07 21:03:54 +00:00
parent ebb1654d0e
commit 7f9ad04b57
13 changed files with 6114 additions and 0 deletions

View File

@@ -0,0 +1,512 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { AIServiceManager } from './ai_service_manager.js';
import options from '../options.js';
import eventService from '../events.js';
import { AnthropicService } from './providers/anthropic_service.js';
import { OpenAIService } from './providers/openai_service.js';
import { OllamaService } from './providers/ollama_service.js';
import * as configHelpers from './config/configuration_helpers.js';
import type { AIService, ChatCompletionOptions, Message } from './ai_interface.js';
// Mock dependencies
vi.mock('../options.js', () => ({
default: {
getOption: vi.fn(),
getOptionBool: vi.fn()
}
}));
vi.mock('../events.js', () => ({
default: {
subscribe: vi.fn()
}
}));
vi.mock('../log.js', () => ({
default: {
info: vi.fn(),
error: vi.fn(),
warn: vi.fn()
}
}));
vi.mock('./providers/anthropic_service.js', () => ({
AnthropicService: vi.fn().mockImplementation(() => ({
isAvailable: vi.fn().mockReturnValue(true),
generateChatCompletion: vi.fn()
}))
}));
vi.mock('./providers/openai_service.js', () => ({
OpenAIService: vi.fn().mockImplementation(() => ({
isAvailable: vi.fn().mockReturnValue(true),
generateChatCompletion: vi.fn()
}))
}));
vi.mock('./providers/ollama_service.js', () => ({
OllamaService: vi.fn().mockImplementation(() => ({
isAvailable: vi.fn().mockReturnValue(true),
generateChatCompletion: vi.fn()
}))
}));
vi.mock('./config/configuration_helpers.js', () => ({
getSelectedProvider: vi.fn(),
parseModelIdentifier: vi.fn(),
isAIEnabled: vi.fn(),
getDefaultModelForProvider: vi.fn(),
clearConfigurationCache: vi.fn(),
validateConfiguration: vi.fn()
}));
vi.mock('./context/index.js', () => ({
ContextExtractor: vi.fn().mockImplementation(() => ({}))
}));
vi.mock('./context_extractors/index.js', () => ({
default: {
getTools: vi.fn().mockReturnValue({
noteNavigator: {},
queryDecomposition: {},
contextualThinking: {}
}),
getAllTools: vi.fn().mockReturnValue([])
}
}));
vi.mock('./context/services/context_service.js', () => ({
default: {
findRelevantNotes: vi.fn().mockResolvedValue([])
}
}));
vi.mock('./tools/tool_initializer.js', () => ({
default: {
initializeTools: vi.fn().mockResolvedValue(undefined)
}
}));
describe('AIServiceManager', () => {
let manager: AIServiceManager;
beforeEach(() => {
vi.clearAllMocks();
manager = new AIServiceManager();
});
afterEach(() => {
vi.restoreAllMocks();
});
describe('constructor', () => {
it('should initialize tools and set up event listeners', () => {
expect(eventService.subscribe).toHaveBeenCalled();
});
});
describe('getSelectedProviderAsync', () => {
it('should return the selected provider', async () => {
vi.mocked(configHelpers.getSelectedProvider).mockResolvedValueOnce('openai');
const result = await manager.getSelectedProviderAsync();
expect(result).toBe('openai');
expect(configHelpers.getSelectedProvider).toHaveBeenCalled();
});
it('should return null if no provider is selected', async () => {
vi.mocked(configHelpers.getSelectedProvider).mockResolvedValueOnce(null);
const result = await manager.getSelectedProviderAsync();
expect(result).toBeNull();
});
it('should handle errors and return null', async () => {
vi.mocked(configHelpers.getSelectedProvider).mockRejectedValueOnce(new Error('Config error'));
const result = await manager.getSelectedProviderAsync();
expect(result).toBeNull();
});
});
describe('validateConfiguration', () => {
it('should return null for valid configuration', async () => {
vi.mocked(configHelpers.validateConfiguration).mockResolvedValueOnce({
isValid: true,
errors: [],
warnings: []
});
const result = await manager.validateConfiguration();
expect(result).toBeNull();
});
it('should return error message for invalid configuration', async () => {
vi.mocked(configHelpers.validateConfiguration).mockResolvedValueOnce({
isValid: false,
errors: ['Missing API key', 'Invalid model'],
warnings: []
});
const result = await manager.validateConfiguration();
expect(result).toContain('There are issues with your AI configuration');
expect(result).toContain('Missing API key');
expect(result).toContain('Invalid model');
});
it('should include warnings in valid configuration', async () => {
vi.mocked(configHelpers.validateConfiguration).mockResolvedValueOnce({
isValid: true,
errors: [],
warnings: ['Model not optimal']
});
const result = await manager.validateConfiguration();
expect(result).toBeNull();
});
});
describe('getOrCreateAnyService', () => {
it('should create and return the selected provider service', async () => {
vi.mocked(configHelpers.getSelectedProvider).mockResolvedValueOnce('openai');
vi.mocked(options.getOption).mockReturnValueOnce('test-api-key');
const mockService = {
isAvailable: vi.fn().mockReturnValue(true),
generateChatCompletion: vi.fn()
};
vi.mocked(OpenAIService).mockImplementationOnce(() => mockService as any);
const result = await manager.getOrCreateAnyService();
expect(result).toBe(mockService);
});
it('should throw error if no provider is selected', async () => {
vi.mocked(configHelpers.getSelectedProvider).mockResolvedValueOnce(null);
await expect(manager.getOrCreateAnyService()).rejects.toThrow(
'No AI provider is selected'
);
});
it('should throw error if selected provider is not available', async () => {
vi.mocked(configHelpers.getSelectedProvider).mockResolvedValueOnce('openai');
vi.mocked(options.getOption).mockReturnValueOnce(null); // No API key
await expect(manager.getOrCreateAnyService()).rejects.toThrow(
'Selected AI provider (openai) is not available'
);
});
});
describe('isAnyServiceAvailable', () => {
it('should return true if any provider is available', () => {
vi.mocked(options.getOption).mockReturnValueOnce('test-api-key');
const result = manager.isAnyServiceAvailable();
expect(result).toBe(true);
});
it('should return false if no providers are available', () => {
vi.mocked(options.getOption).mockReturnValue(null);
const result = manager.isAnyServiceAvailable();
expect(result).toBe(false);
});
});
describe('getAvailableProviders', () => {
it('should return list of available providers', () => {
vi.mocked(options.getOption)
.mockReturnValueOnce('openai-key')
.mockReturnValueOnce('anthropic-key')
.mockReturnValueOnce(null); // No Ollama URL
const result = manager.getAvailableProviders();
expect(result).toEqual(['openai', 'anthropic']);
});
it('should include already created services', () => {
const mockService = {
isAvailable: vi.fn().mockReturnValue(true)
};
(manager as any).services.openai = mockService;
const result = manager.getAvailableProviders();
expect(result).toContain('openai');
});
});
describe('generateChatCompletion', () => {
const messages: Message[] = [
{ role: 'user', content: 'Hello' }
];
it('should generate completion with selected provider', async () => {
vi.mocked(configHelpers.getSelectedProvider).mockResolvedValueOnce('openai');
vi.mocked(options.getOption).mockReturnValueOnce('test-api-key');
const mockResponse = { content: 'Hello response' };
const mockService = {
isAvailable: vi.fn().mockReturnValue(true),
generateChatCompletion: vi.fn().mockResolvedValueOnce(mockResponse)
};
vi.mocked(OpenAIService).mockImplementationOnce(() => mockService as any);
const result = await manager.generateChatCompletion(messages);
expect(result).toBe(mockResponse);
expect(mockService.generateChatCompletion).toHaveBeenCalledWith(messages, {});
});
it('should handle provider prefix in model', async () => {
vi.mocked(configHelpers.getSelectedProvider).mockResolvedValueOnce('openai');
vi.mocked(configHelpers.parseModelIdentifier).mockReturnValueOnce({
provider: 'openai',
modelId: 'gpt-4'
});
vi.mocked(options.getOption).mockReturnValueOnce('test-api-key');
const mockResponse = { content: 'Hello response' };
const mockService = {
isAvailable: vi.fn().mockReturnValue(true),
generateChatCompletion: vi.fn().mockResolvedValueOnce(mockResponse)
};
vi.mocked(OpenAIService).mockImplementationOnce(() => mockService as any);
const result = await manager.generateChatCompletion(messages, {
model: 'openai:gpt-4'
});
expect(result).toBe(mockResponse);
expect(mockService.generateChatCompletion).toHaveBeenCalledWith(
messages,
{ model: 'gpt-4' }
);
});
it('should throw error if no messages provided', async () => {
await expect(manager.generateChatCompletion([])).rejects.toThrow(
'No messages provided'
);
});
it('should throw error if no provider selected', async () => {
vi.mocked(configHelpers.getSelectedProvider).mockResolvedValueOnce(null);
await expect(manager.generateChatCompletion(messages)).rejects.toThrow(
'No AI provider is selected'
);
});
it('should throw error if model specifies different provider', async () => {
vi.mocked(configHelpers.getSelectedProvider).mockResolvedValueOnce('openai');
vi.mocked(configHelpers.parseModelIdentifier).mockReturnValueOnce({
provider: 'anthropic',
modelId: 'claude-3'
});
await expect(
manager.generateChatCompletion(messages, { model: 'anthropic:claude-3' })
).rejects.toThrow(
"Model specifies provider 'anthropic' but selected provider is 'openai'"
);
});
});
describe('getAIEnabledAsync', () => {
it('should return AI enabled status', async () => {
vi.mocked(configHelpers.isAIEnabled).mockResolvedValueOnce(true);
const result = await manager.getAIEnabledAsync();
expect(result).toBe(true);
expect(configHelpers.isAIEnabled).toHaveBeenCalled();
});
});
describe('getAIEnabled', () => {
it('should return AI enabled status synchronously', () => {
vi.mocked(options.getOptionBool).mockReturnValueOnce(true);
const result = manager.getAIEnabled();
expect(result).toBe(true);
expect(options.getOptionBool).toHaveBeenCalledWith('aiEnabled');
});
});
describe('initialize', () => {
it('should initialize if AI is enabled', async () => {
vi.mocked(configHelpers.isAIEnabled).mockResolvedValueOnce(true);
await manager.initialize();
expect(configHelpers.isAIEnabled).toHaveBeenCalled();
});
it('should not initialize if AI is disabled', async () => {
vi.mocked(configHelpers.isAIEnabled).mockResolvedValueOnce(false);
await manager.initialize();
expect(configHelpers.isAIEnabled).toHaveBeenCalled();
});
});
describe('getService', () => {
it('should return service for specified provider', async () => {
vi.mocked(options.getOption).mockReturnValueOnce('test-api-key');
const mockService = {
isAvailable: vi.fn().mockReturnValue(true),
generateChatCompletion: vi.fn()
};
vi.mocked(OpenAIService).mockImplementationOnce(() => mockService as any);
const result = await manager.getService('openai');
expect(result).toBe(mockService);
});
it('should return selected provider service if no provider specified', async () => {
vi.mocked(configHelpers.getSelectedProvider).mockResolvedValueOnce('anthropic');
vi.mocked(options.getOption).mockReturnValueOnce('test-api-key');
const mockService = {
isAvailable: vi.fn().mockReturnValue(true),
generateChatCompletion: vi.fn()
};
vi.mocked(AnthropicService).mockImplementationOnce(() => mockService as any);
const result = await manager.getService();
expect(result).toBe(mockService);
});
it('should throw error if specified provider not available', async () => {
vi.mocked(options.getOption).mockReturnValueOnce(null); // No API key
await expect(manager.getService('openai')).rejects.toThrow(
'Specified provider openai is not available'
);
});
});
describe('getSelectedProvider', () => {
it('should return selected provider synchronously', () => {
vi.mocked(options.getOption).mockReturnValueOnce('anthropic');
const result = manager.getSelectedProvider();
expect(result).toBe('anthropic');
});
it('should return default provider if none selected', () => {
vi.mocked(options.getOption).mockReturnValueOnce(null);
const result = manager.getSelectedProvider();
expect(result).toBe('openai');
});
});
describe('isProviderAvailable', () => {
it('should return true if provider service is available', () => {
const mockService = {
isAvailable: vi.fn().mockReturnValue(true)
};
(manager as any).services.openai = mockService;
const result = manager.isProviderAvailable('openai');
expect(result).toBe(true);
});
it('should return false if provider service not created', () => {
const result = manager.isProviderAvailable('openai');
expect(result).toBe(false);
});
});
describe('getProviderMetadata', () => {
it('should return metadata for existing provider', () => {
const mockService = {
isAvailable: vi.fn().mockReturnValue(true)
};
(manager as any).services.openai = mockService;
const result = manager.getProviderMetadata('openai');
expect(result).toEqual({
name: 'openai',
capabilities: {
chat: true,
streaming: true,
functionCalling: true
},
models: ['default'],
defaultModel: 'default'
});
});
it('should return null for non-existing provider', () => {
const result = manager.getProviderMetadata('openai');
expect(result).toBeNull();
});
});
describe('event handling', () => {
it('should recreate services on AI option changes', async () => {
const eventCallback = vi.mocked(eventService.subscribe).mock.calls[0][1];
await eventCallback({
entityName: 'options',
entity: { name: 'openaiApiKey', value: 'new-key' }
});
expect(configHelpers.clearConfigurationCache).toHaveBeenCalled();
});
it('should initialize on aiEnabled set to true', async () => {
const eventCallback = vi.mocked(eventService.subscribe).mock.calls[0][1];
vi.mocked(configHelpers.isAIEnabled).mockResolvedValueOnce(true);
await eventCallback({
entityName: 'options',
entity: { name: 'aiEnabled', value: 'true' }
});
expect(configHelpers.isAIEnabled).toHaveBeenCalled();
});
it('should clear providers on aiEnabled set to false', async () => {
const mockService = {
isAvailable: vi.fn().mockReturnValue(true)
};
(manager as any).services.openai = mockService;
const eventCallback = vi.mocked(eventService.subscribe).mock.calls[0][1];
await eventCallback({
entityName: 'options',
entity: { name: 'aiEnabled', value: 'false' }
});
expect((manager as any).services).toEqual({});
});
});
});

View File

@@ -0,0 +1,422 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import type { Request, Response } from 'express';
import RestChatService from './rest_chat_service.js';
import type { Message } from '../ai_interface.js';
// Mock dependencies
vi.mock('../../log.js', () => ({
default: {
info: vi.fn(),
error: vi.fn(),
warn: vi.fn()
}
}));
vi.mock('../../options.js', () => ({
default: {
getOption: vi.fn(),
getOptionBool: vi.fn()
}
}));
vi.mock('../ai_service_manager.js', () => ({
default: {
getOrCreateAnyService: vi.fn(),
generateChatCompletion: vi.fn(),
isAnyServiceAvailable: vi.fn(),
getAIEnabled: vi.fn()
}
}));
vi.mock('../pipeline/chat_pipeline.js', () => ({
ChatPipeline: vi.fn().mockImplementation(() => ({
execute: vi.fn()
}))
}));
vi.mock('./handlers/tool_handler.js', () => ({
ToolHandler: vi.fn().mockImplementation(() => ({
handleToolCalls: vi.fn()
}))
}));
vi.mock('../chat_storage_service.js', () => ({
default: {
getChat: vi.fn(),
createChat: vi.fn(),
updateChat: vi.fn(),
deleteChat: vi.fn(),
getAllChats: vi.fn(),
recordSources: vi.fn()
}
}));
vi.mock('../config/configuration_helpers.js', () => ({
isAIEnabled: vi.fn(),
getSelectedModelConfig: vi.fn()
}));
describe('RestChatService', () => {
let restChatService: RestChatService;
let mockOptions: any;
let mockAiServiceManager: any;
let mockChatStorageService: any;
let mockReq: Partial<Request>;
let mockRes: Partial<Response>;
beforeEach(async () => {
vi.clearAllMocks();
// Get mocked modules
mockOptions = (await import('../../options.js')).default;
mockAiServiceManager = (await import('../ai_service_manager.js')).default;
mockChatStorageService = (await import('../chat_storage_service.js')).default;
restChatService = (await import('./rest_chat_service.js')).default;
// Setup mock request and response
mockReq = {
params: {},
body: {},
query: {},
method: 'POST'
};
mockRes = {
status: vi.fn().mockReturnThis(),
json: vi.fn().mockReturnThis(),
send: vi.fn().mockReturnThis()
};
});
afterEach(() => {
vi.restoreAllMocks();
});
describe('isDatabaseInitialized', () => {
it('should return true when database is initialized', () => {
mockOptions.getOption.mockReturnValueOnce('true');
const result = restChatService.isDatabaseInitialized();
expect(result).toBe(true);
expect(mockOptions.getOption).toHaveBeenCalledWith('initialized');
});
it('should return false when database is not initialized', () => {
mockOptions.getOption.mockImplementationOnce(() => {
throw new Error('Database not initialized');
});
const result = restChatService.isDatabaseInitialized();
expect(result).toBe(false);
});
});
describe('handleSendMessage', () => {
beforeEach(() => {
mockReq.params = { chatNoteId: 'chat-123' };
mockOptions.getOptionBool.mockReturnValue(true); // AI enabled
vi.spyOn(restChatService, 'isDatabaseInitialized').mockReturnValue(true);
mockAiServiceManager.getOrCreateAnyService.mockResolvedValue({});
});
it('should handle POST request with valid content', async () => {
mockReq.method = 'POST';
mockReq.body = {
content: 'Hello, how are you?',
useAdvancedContext: false,
showThinking: false
};
const existingChat = {
id: 'chat-123',
title: 'Test Chat',
messages: [{ role: 'user', content: 'Previous message' }],
noteId: 'chat-123',
createdAt: new Date(),
updatedAt: new Date(),
metadata: {}
};
mockChatStorageService.getChat.mockResolvedValueOnce(existingChat);
// Mock the rest of the implementation
const result = await restChatService.handleSendMessage(mockReq as Request, mockRes as Response);
expect(mockChatStorageService.getChat).toHaveBeenCalledWith('chat-123');
expect(mockAiServiceManager.getOrCreateAnyService).toHaveBeenCalled();
});
it('should create new chat if not found for POST request', async () => {
mockReq.method = 'POST';
mockReq.body = {
content: 'Hello, how are you?'
};
mockChatStorageService.getChat.mockResolvedValueOnce(null);
const newChat = {
id: 'new-chat-123',
title: 'New Chat',
messages: [],
noteId: 'new-chat-123',
createdAt: new Date(),
updatedAt: new Date(),
metadata: {}
};
mockChatStorageService.createChat.mockResolvedValueOnce(newChat);
await restChatService.handleSendMessage(mockReq as Request, mockRes as Response);
expect(mockChatStorageService.createChat).toHaveBeenCalledWith('New Chat');
});
it('should return error for GET request without stream parameter', async () => {
mockReq.method = 'GET';
mockReq.query = {}; // No stream parameter
const result = await restChatService.handleSendMessage(mockReq as Request, mockRes as Response);
expect(result).toEqual({
error: 'Error processing your request: Stream parameter must be set to true for GET/streaming requests'
});
});
it('should return error for POST request with empty content', async () => {
mockReq.method = 'POST';
mockReq.body = {
content: '' // Empty content
};
const result = await restChatService.handleSendMessage(mockReq as Request, mockRes as Response);
expect(result).toEqual({
error: 'Error processing your request: Content cannot be empty'
});
});
it('should return error when AI is disabled', async () => {
mockOptions.getOptionBool.mockReturnValue(false); // AI disabled
mockReq.method = 'POST';
mockReq.body = {
content: 'Hello'
};
const result = await restChatService.handleSendMessage(mockReq as Request, mockRes as Response);
expect(result).toEqual({
error: "AI features are disabled. Please enable them in the settings."
});
});
it('should return error when database is not initialized', async () => {
vi.spyOn(restChatService, 'isDatabaseInitialized').mockReturnValue(false);
mockReq.method = 'POST';
mockReq.body = {
content: 'Hello'
};
const result = await restChatService.handleSendMessage(mockReq as Request, mockRes as Response);
expect(result).toEqual({
error: 'Error processing your request: Database is not initialized'
});
});
it('should return error for GET request when chat not found', async () => {
mockReq.method = 'GET';
mockReq.query = { stream: 'true' };
mockReq.body = { content: 'Hello' };
mockChatStorageService.getChat.mockResolvedValueOnce(null);
const result = await restChatService.handleSendMessage(mockReq as Request, mockRes as Response);
expect(result).toEqual({
error: 'Error processing your request: Chat Note not found, cannot create session for streaming'
});
});
it('should handle GET request with stream parameter', async () => {
mockReq.method = 'GET';
mockReq.query = {
stream: 'true',
useAdvancedContext: 'true',
showThinking: 'false'
};
mockReq.body = {
content: 'Hello from stream'
};
const existingChat = {
id: 'chat-123',
title: 'Test Chat',
messages: [],
noteId: 'chat-123',
createdAt: new Date(),
updatedAt: new Date(),
metadata: {}
};
mockChatStorageService.getChat.mockResolvedValueOnce(existingChat);
await restChatService.handleSendMessage(mockReq as Request, mockRes as Response);
expect(mockChatStorageService.getChat).toHaveBeenCalledWith('chat-123');
});
it('should handle invalid content types', async () => {
mockReq.method = 'POST';
mockReq.body = {
content: null // Invalid content type
};
const result = await restChatService.handleSendMessage(mockReq as Request, mockRes as Response);
expect(result).toEqual({
error: 'Error processing your request: Content cannot be empty'
});
});
it('should handle whitespace-only content', async () => {
mockReq.method = 'POST';
mockReq.body = {
content: ' \n\t ' // Whitespace only
};
const result = await restChatService.handleSendMessage(mockReq as Request, mockRes as Response);
expect(result).toEqual({
error: 'Error processing your request: Content cannot be empty'
});
});
});
describe('error handling', () => {
beforeEach(() => {
mockReq.params = { chatNoteId: 'chat-123' };
mockReq.method = 'POST';
mockReq.body = { content: 'Hello' };
mockOptions.getOptionBool.mockReturnValue(true);
vi.spyOn(restChatService, 'isDatabaseInitialized').mockReturnValue(true);
});
it('should handle AI service manager errors', async () => {
mockAiServiceManager.getOrCreateAnyService.mockRejectedValueOnce(
new Error('No AI provider available')
);
const result = await restChatService.handleSendMessage(mockReq as Request, mockRes as Response);
expect(result).toEqual({
error: 'Error processing your request: No AI provider available'
});
});
it('should handle chat storage service errors', async () => {
mockAiServiceManager.getOrCreateAnyService.mockResolvedValueOnce({});
mockChatStorageService.getChat.mockRejectedValueOnce(
new Error('Database connection failed')
);
const result = await restChatService.handleSendMessage(mockReq as Request, mockRes as Response);
expect(result).toEqual({
error: 'Error processing your request: Database connection failed'
});
});
});
describe('parameter parsing', () => {
it('should parse useAdvancedContext from body for POST', async () => {
mockReq.method = 'POST';
mockReq.body = {
content: 'Hello',
useAdvancedContext: true,
showThinking: false
};
mockReq.params = { chatNoteId: 'chat-123' };
mockOptions.getOptionBool.mockReturnValue(true);
vi.spyOn(restChatService, 'isDatabaseInitialized').mockReturnValue(true);
mockAiServiceManager.getOrCreateAnyService.mockResolvedValue({});
mockChatStorageService.getChat.mockResolvedValue({
id: 'chat-123',
title: 'Test',
messages: [],
noteId: 'chat-123',
createdAt: new Date(),
updatedAt: new Date(),
metadata: {}
});
await restChatService.handleSendMessage(mockReq as Request, mockRes as Response);
// Verify that useAdvancedContext was parsed correctly
// This would be tested by checking if the right parameters were passed to the pipeline
expect(mockChatStorageService.getChat).toHaveBeenCalledWith('chat-123');
});
it('should parse parameters from query for GET', async () => {
mockReq.method = 'GET';
mockReq.query = {
stream: 'true',
useAdvancedContext: 'true',
showThinking: 'true'
};
mockReq.body = {
content: 'Hello from stream'
};
mockReq.params = { chatNoteId: 'chat-123' };
mockOptions.getOptionBool.mockReturnValue(true);
vi.spyOn(restChatService, 'isDatabaseInitialized').mockReturnValue(true);
mockAiServiceManager.getOrCreateAnyService.mockResolvedValue({});
mockChatStorageService.getChat.mockResolvedValue({
id: 'chat-123',
title: 'Test',
messages: [],
noteId: 'chat-123',
createdAt: new Date(),
updatedAt: new Date(),
metadata: {}
});
await restChatService.handleSendMessage(mockReq as Request, mockRes as Response);
expect(mockChatStorageService.getChat).toHaveBeenCalledWith('chat-123');
});
it('should handle mixed parameter sources for GET', async () => {
mockReq.method = 'GET';
mockReq.query = {
stream: 'true',
useAdvancedContext: 'false' // Query parameter
};
mockReq.body = {
content: 'Hello',
useAdvancedContext: true, // Body parameter should take precedence
showThinking: true
};
mockReq.params = { chatNoteId: 'chat-123' };
mockOptions.getOptionBool.mockReturnValue(true);
vi.spyOn(restChatService, 'isDatabaseInitialized').mockReturnValue(true);
mockAiServiceManager.getOrCreateAnyService.mockResolvedValue({});
mockChatStorageService.getChat.mockResolvedValue({
id: 'chat-123',
title: 'Test',
messages: [],
noteId: 'chat-123',
createdAt: new Date(),
updatedAt: new Date(),
metadata: {}
});
await restChatService.handleSendMessage(mockReq as Request, mockRes as Response);
expect(mockChatStorageService.getChat).toHaveBeenCalledWith('chat-123');
});
});
});

View File

@@ -0,0 +1,439 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { getFormatter, buildMessagesWithContext, buildContextFromNotes } from './message_formatter.js';
import type { Message } from '../../ai_interface.js';
// Mock the constants
vi.mock('../../constants/llm_prompt_constants.js', () => ({
CONTEXT_PROMPTS: {
CONTEXT_NOTES_WRAPPER: 'Here are some relevant notes:\n\n{noteContexts}\n\nNow please answer this query: {query}'
}
}));
describe('MessageFormatter', () => {
beforeEach(() => {
vi.clearAllMocks();
});
afterEach(() => {
vi.restoreAllMocks();
});
describe('getFormatter', () => {
it('should return a formatter for any provider', () => {
const formatter = getFormatter('openai');
expect(formatter).toBeDefined();
expect(typeof formatter.formatMessages).toBe('function');
});
it('should return the same interface for different providers', () => {
const openaiFormatter = getFormatter('openai');
const anthropicFormatter = getFormatter('anthropic');
const ollamaFormatter = getFormatter('ollama');
expect(openaiFormatter.formatMessages).toBeDefined();
expect(anthropicFormatter.formatMessages).toBeDefined();
expect(ollamaFormatter.formatMessages).toBeDefined();
});
});
describe('formatMessages', () => {
it('should format messages without system prompt or context', () => {
const formatter = getFormatter('openai');
const messages: Message[] = [
{ role: 'user', content: 'Hello' },
{ role: 'assistant', content: 'Hi there!' }
];
const result = formatter.formatMessages(messages);
expect(result).toEqual(messages);
});
it('should add system message with context', () => {
const formatter = getFormatter('openai');
const messages: Message[] = [
{ role: 'user', content: 'Hello' }
];
const context = 'This is important context';
const result = formatter.formatMessages(messages, undefined, context);
expect(result).toHaveLength(2);
expect(result[0]).toEqual({
role: 'system',
content: 'Use the following context to answer the query: This is important context'
});
expect(result[1]).toEqual(messages[0]);
});
it('should add system message with custom system prompt', () => {
const formatter = getFormatter('openai');
const messages: Message[] = [
{ role: 'user', content: 'Hello' }
];
const systemPrompt = 'You are a helpful assistant';
const result = formatter.formatMessages(messages, systemPrompt);
expect(result).toHaveLength(2);
expect(result[0]).toEqual({
role: 'system',
content: 'You are a helpful assistant'
});
expect(result[1]).toEqual(messages[0]);
});
it('should prefer system prompt over context when both are provided', () => {
const formatter = getFormatter('openai');
const messages: Message[] = [
{ role: 'user', content: 'Hello' }
];
const systemPrompt = 'You are a helpful assistant';
const context = 'This is context';
const result = formatter.formatMessages(messages, systemPrompt, context);
expect(result).toHaveLength(2);
expect(result[0]).toEqual({
role: 'system',
content: 'You are a helpful assistant'
});
});
it('should skip duplicate system messages', () => {
const formatter = getFormatter('openai');
const messages: Message[] = [
{ role: 'system', content: 'Original system message' },
{ role: 'user', content: 'Hello' }
];
const systemPrompt = 'New system prompt';
const result = formatter.formatMessages(messages, systemPrompt);
expect(result).toHaveLength(2);
expect(result[0]).toEqual({
role: 'system',
content: 'New system prompt'
});
expect(result[1]).toEqual(messages[1]);
});
it('should preserve existing system message when no new one is provided', () => {
const formatter = getFormatter('openai');
const messages: Message[] = [
{ role: 'system', content: 'Original system message' },
{ role: 'user', content: 'Hello' }
];
const result = formatter.formatMessages(messages);
expect(result).toEqual(messages);
});
it('should handle empty messages array', () => {
const formatter = getFormatter('openai');
const result = formatter.formatMessages([]);
expect(result).toEqual([]);
});
it('should handle messages with tool calls', () => {
const formatter = getFormatter('openai');
const messages: Message[] = [
{ role: 'user', content: 'Search for notes about AI' },
{
role: 'assistant',
content: 'I need to search for notes.',
tool_calls: [
{
id: 'call_123',
type: 'function',
function: {
name: 'searchNotes',
arguments: '{"query": "AI"}'
}
}
]
},
{
role: 'tool',
content: 'Found 3 notes about AI',
tool_call_id: 'call_123'
},
{ role: 'assistant', content: 'I found 3 notes about AI for you.' }
];
const result = formatter.formatMessages(messages);
expect(result).toEqual(messages);
expect(result[1].tool_calls).toBeDefined();
expect(result[2].tool_call_id).toBe('call_123');
});
});
describe('buildMessagesWithContext', () => {
it('should build messages with context using service class', async () => {
const messages: Message[] = [
{ role: 'user', content: 'Hello' }
];
const context = 'Important context';
const mockService = {
constructor: { name: 'OpenAIService' }
};
const result = await buildMessagesWithContext(messages, context, mockService);
expect(result).toHaveLength(2);
expect(result[0].role).toBe('system');
expect(result[0].content).toContain('Important context');
expect(result[1]).toEqual(messages[0]);
});
it('should handle string provider name', async () => {
const messages: Message[] = [
{ role: 'user', content: 'Hello' }
];
const context = 'Important context';
const result = await buildMessagesWithContext(messages, context, 'anthropic');
expect(result).toHaveLength(2);
expect(result[0].role).toBe('system');
expect(result[1]).toEqual(messages[0]);
});
it('should return empty array for empty messages', async () => {
const result = await buildMessagesWithContext([], 'context', 'openai');
expect(result).toEqual([]);
});
it('should return original messages when no context provided', async () => {
const messages: Message[] = [
{ role: 'user', content: 'Hello' }
];
const result = await buildMessagesWithContext(messages, '', 'openai');
expect(result).toEqual(messages);
});
it('should return original messages when context is whitespace', async () => {
const messages: Message[] = [
{ role: 'user', content: 'Hello' }
];
const result = await buildMessagesWithContext(messages, ' \n\t ', 'openai');
expect(result).toEqual(messages);
});
it('should handle service without constructor name', async () => {
const messages: Message[] = [
{ role: 'user', content: 'Hello' }
];
const context = 'Important context';
const mockService = {}; // No constructor property
const result = await buildMessagesWithContext(messages, context, mockService);
expect(result).toHaveLength(2);
expect(result[0].role).toBe('system');
});
it('should handle errors gracefully', async () => {
const messages: Message[] = [
{ role: 'user', content: 'Hello' }
];
const context = 'Important context';
const mockService = {
constructor: {
get name() {
throw new Error('Constructor error');
}
}
};
const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
const result = await buildMessagesWithContext(messages, context, mockService);
expect(result).toEqual(messages); // Should fallback to original messages
expect(consoleErrorSpy).toHaveBeenCalledWith(
expect.stringContaining('Error building messages with context')
);
consoleErrorSpy.mockRestore();
});
it('should extract provider name from various service class names', async () => {
const messages: Message[] = [
{ role: 'user', content: 'Hello' }
];
const context = 'test context';
const services = [
{ constructor: { name: 'OpenAIService' } },
{ constructor: { name: 'AnthropicService' } },
{ constructor: { name: 'OllamaService' } },
{ constructor: { name: 'CustomAIService' } }
];
for (const service of services) {
const result = await buildMessagesWithContext(messages, context, service);
expect(result).toHaveLength(2);
expect(result[0].role).toBe('system');
}
});
});
describe('buildContextFromNotes', () => {
it('should build context from notes with content', () => {
const sources = [
{
title: 'Note 1',
content: 'This is the content of note 1'
},
{
title: 'Note 2',
content: 'This is the content of note 2'
}
];
const query = 'What is the content?';
const result = buildContextFromNotes(sources, query);
expect(result).toContain('Here are some relevant notes:');
expect(result).toContain('### Note 1');
expect(result).toContain('This is the content of note 1');
expect(result).toContain('### Note 2');
expect(result).toContain('This is the content of note 2');
expect(result).toContain('What is the content?');
expect(result).toContain('<note>');
expect(result).toContain('</note>');
});
it('should filter out sources without content', () => {
const sources = [
{
title: 'Note 1',
content: 'This has content'
},
{
title: 'Note 2',
content: null // No content
},
{
title: 'Note 3',
content: 'This also has content'
}
];
const query = 'Test query';
const result = buildContextFromNotes(sources, query);
expect(result).toContain('Note 1');
expect(result).not.toContain('Note 2');
expect(result).toContain('Note 3');
});
it('should handle empty sources array', () => {
const result = buildContextFromNotes([], 'Test query');
expect(result).toBe('Test query');
});
it('should handle null/undefined sources', () => {
const result1 = buildContextFromNotes(null as any, 'Test query');
const result2 = buildContextFromNotes(undefined as any, 'Test query');
expect(result1).toBe('Test query');
expect(result2).toBe('Test query');
});
it('should handle empty query', () => {
const sources = [
{
title: 'Note 1',
content: 'Content 1'
}
];
const result = buildContextFromNotes(sources, '');
expect(result).toContain('### Note 1');
expect(result).toContain('Content 1');
});
it('should handle sources with empty content arrays', () => {
const sources = [
{
title: 'Note 1',
content: 'Has content'
},
{
title: 'Note 2',
content: '' // Empty string
}
];
const query = 'Test';
const result = buildContextFromNotes(sources, query);
expect(result).toContain('Note 1');
expect(result).toContain('Has content');
expect(result).not.toContain('Note 2');
});
it('should handle sources with undefined content', () => {
const sources = [
{
title: 'Note 1',
content: 'Has content'
},
{
title: 'Note 2'
// content is undefined
}
];
const query = 'Test';
const result = buildContextFromNotes(sources, query);
expect(result).toContain('Note 1');
expect(result).toContain('Has content');
expect(result).not.toContain('Note 2');
});
it('should wrap each note in proper tags', () => {
const sources = [
{
title: 'Test Note',
content: 'Test content'
}
];
const query = 'Query';
const result = buildContextFromNotes(sources, query);
expect(result).toMatch(/<note>\s*### Test Note\s*Test content\s*<\/note>/);
});
it('should handle special characters in titles and content', () => {
const sources = [
{
title: 'Note with "quotes" & symbols',
content: 'Content with <tags> and & symbols'
}
];
const query = 'Special characters test';
const result = buildContextFromNotes(sources, query);
expect(result).toContain('Note with "quotes" & symbols');
expect(result).toContain('Content with <tags> and & symbols');
});
});
});

View File

@@ -0,0 +1,861 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { ChatService } from './chat_service.js';
import type { Message, ChatCompletionOptions } from './ai_interface.js';
// Mock dependencies
vi.mock('./chat_storage_service.js', () => ({
default: {
createChat: vi.fn(),
getChat: vi.fn(),
updateChat: vi.fn(),
deleteChat: vi.fn(),
getAllChats: vi.fn(),
recordSources: vi.fn()
}
}));
vi.mock('../log.js', () => ({
default: {
info: vi.fn(),
error: vi.fn(),
warn: vi.fn()
}
}));
vi.mock('./constants/llm_prompt_constants.js', () => ({
CONTEXT_PROMPTS: {
NOTE_CONTEXT_PROMPT: 'Context: {context}',
SEMANTIC_NOTE_CONTEXT_PROMPT: 'Query: {query}\nContext: {context}'
},
ERROR_PROMPTS: {
USER_ERRORS: {
GENERAL_ERROR: 'Sorry, I encountered an error processing your request.',
CONTEXT_ERROR: 'Sorry, I encountered an error processing the context.'
}
}
}));
vi.mock('./pipeline/chat_pipeline.js', () => ({
ChatPipeline: vi.fn().mockImplementation((config) => ({
config,
execute: vi.fn(),
getMetrics: vi.fn(),
resetMetrics: vi.fn(),
stages: {
contextExtraction: {
execute: vi.fn()
},
semanticContextExtraction: {
execute: vi.fn()
}
}
}))
}));
vi.mock('./ai_service_manager.js', () => ({
default: {
getService: vi.fn()
}
}));
describe('ChatService', () => {
let chatService: ChatService;
let mockChatStorageService: any;
let mockAiServiceManager: any;
let mockChatPipeline: any;
let mockLog: any;
beforeEach(async () => {
vi.clearAllMocks();
// Get mocked modules
mockChatStorageService = (await import('./chat_storage_service.js')).default;
mockAiServiceManager = (await import('./ai_service_manager.js')).default;
mockLog = (await import('../log.js')).default;
// Setup pipeline mock
mockChatPipeline = {
execute: vi.fn(),
getMetrics: vi.fn(),
resetMetrics: vi.fn(),
stages: {
contextExtraction: {
execute: vi.fn()
},
semanticContextExtraction: {
execute: vi.fn()
}
}
};
// Create a new ChatService instance
chatService = new ChatService();
// Replace the internal pipelines with our mock
(chatService as any).pipelines.set('default', mockChatPipeline);
(chatService as any).pipelines.set('agent', mockChatPipeline);
(chatService as any).pipelines.set('performance', mockChatPipeline);
});
afterEach(() => {
vi.restoreAllMocks();
});
describe('constructor', () => {
it('should initialize with default pipelines', () => {
expect(chatService).toBeDefined();
// Verify pipelines are created by checking internal state
expect((chatService as any).pipelines).toBeDefined();
expect((chatService as any).sessionCache).toBeDefined();
});
});
describe('createSession', () => {
it('should create a new chat session with default title', async () => {
const mockChat = {
id: 'chat-123',
title: 'New Chat',
messages: [],
noteId: 'chat-123',
createdAt: new Date(),
updatedAt: new Date(),
metadata: {}
};
mockChatStorageService.createChat.mockResolvedValueOnce(mockChat);
const session = await chatService.createSession();
expect(session).toEqual({
id: 'chat-123',
title: 'New Chat',
messages: [],
isStreaming: false
});
expect(mockChatStorageService.createChat).toHaveBeenCalledWith('New Chat', []);
});
it('should create a new chat session with custom title and messages', async () => {
const initialMessages: Message[] = [
{ role: 'user', content: 'Hello' }
];
const mockChat = {
id: 'chat-456',
title: 'Custom Chat',
messages: initialMessages,
noteId: 'chat-456',
createdAt: new Date(),
updatedAt: new Date(),
metadata: {}
};
mockChatStorageService.createChat.mockResolvedValueOnce(mockChat);
const session = await chatService.createSession('Custom Chat', initialMessages);
expect(session).toEqual({
id: 'chat-456',
title: 'Custom Chat',
messages: initialMessages,
isStreaming: false
});
expect(mockChatStorageService.createChat).toHaveBeenCalledWith('Custom Chat', initialMessages);
});
});
describe('getOrCreateSession', () => {
it('should return cached session if available', async () => {
const mockChat = {
id: 'chat-123',
title: 'Test Chat',
messages: [{ role: 'user', content: 'Hello' }],
noteId: 'chat-123',
createdAt: new Date(),
updatedAt: new Date(),
metadata: {}
};
const cachedSession = {
id: 'chat-123',
title: 'Old Title',
messages: [],
isStreaming: false
};
// Pre-populate cache
(chatService as any).sessionCache.set('chat-123', cachedSession);
mockChatStorageService.getChat.mockResolvedValueOnce(mockChat);
const session = await chatService.getOrCreateSession('chat-123');
expect(session).toEqual({
id: 'chat-123',
title: 'Test Chat', // Should be updated from storage
messages: [{ role: 'user', content: 'Hello' }], // Should be updated from storage
isStreaming: false
});
expect(mockChatStorageService.getChat).toHaveBeenCalledWith('chat-123');
});
it('should load session from storage if not cached', async () => {
const mockChat = {
id: 'chat-123',
title: 'Test Chat',
messages: [{ role: 'user', content: 'Hello' }],
noteId: 'chat-123',
createdAt: new Date(),
updatedAt: new Date(),
metadata: {}
};
mockChatStorageService.getChat.mockResolvedValueOnce(mockChat);
const session = await chatService.getOrCreateSession('chat-123');
expect(session).toEqual({
id: 'chat-123',
title: 'Test Chat',
messages: [{ role: 'user', content: 'Hello' }],
isStreaming: false
});
expect(mockChatStorageService.getChat).toHaveBeenCalledWith('chat-123');
});
it('should create new session if not found', async () => {
mockChatStorageService.getChat.mockResolvedValueOnce(null);
const mockNewChat = {
id: 'chat-new',
title: 'New Chat',
messages: [],
noteId: 'chat-new',
createdAt: new Date(),
updatedAt: new Date(),
metadata: {}
};
mockChatStorageService.createChat.mockResolvedValueOnce(mockNewChat);
const session = await chatService.getOrCreateSession('nonexistent');
expect(session).toEqual({
id: 'chat-new',
title: 'New Chat',
messages: [],
isStreaming: false
});
expect(mockChatStorageService.getChat).toHaveBeenCalledWith('nonexistent');
expect(mockChatStorageService.createChat).toHaveBeenCalledWith('New Chat', []);
});
it('should create new session when no sessionId provided', async () => {
const mockNewChat = {
id: 'chat-new',
title: 'New Chat',
messages: [],
noteId: 'chat-new',
createdAt: new Date(),
updatedAt: new Date(),
metadata: {}
};
mockChatStorageService.createChat.mockResolvedValueOnce(mockNewChat);
const session = await chatService.getOrCreateSession();
expect(session).toEqual({
id: 'chat-new',
title: 'New Chat',
messages: [],
isStreaming: false
});
expect(mockChatStorageService.createChat).toHaveBeenCalledWith('New Chat', []);
});
});
describe('sendMessage', () => {
beforeEach(() => {
const mockSession = {
id: 'chat-123',
title: 'Test Chat',
messages: [],
isStreaming: false
};
const mockChat = {
id: 'chat-123',
title: 'Test Chat',
messages: [],
noteId: 'chat-123',
createdAt: new Date(),
updatedAt: new Date(),
metadata: {}
};
mockChatStorageService.getChat.mockResolvedValue(mockChat);
mockChatStorageService.updateChat.mockResolvedValue(mockChat);
mockChatPipeline.execute.mockResolvedValue({
text: 'Hello! How can I help you?',
model: 'gpt-3.5-turbo',
provider: 'OpenAI',
usage: { promptTokens: 10, completionTokens: 8, totalTokens: 18 }
});
});
it('should send message and get AI response', async () => {
const session = await chatService.sendMessage('chat-123', 'Hello');
expect(session.messages).toHaveLength(2);
expect(session.messages[0]).toEqual({
role: 'user',
content: 'Hello'
});
expect(session.messages[1]).toEqual({
role: 'assistant',
content: 'Hello! How can I help you?',
tool_calls: undefined
});
expect(mockChatStorageService.updateChat).toHaveBeenCalledTimes(2); // Once for user message, once for complete conversation
expect(mockChatPipeline.execute).toHaveBeenCalled();
});
it('should handle streaming callback', async () => {
const streamCallback = vi.fn();
await chatService.sendMessage('chat-123', 'Hello', {}, streamCallback);
expect(mockChatPipeline.execute).toHaveBeenCalledWith(
expect.objectContaining({
streamCallback
})
);
});
it('should update title for first message', async () => {
const mockChat = {
id: 'chat-123',
title: 'New Chat',
messages: [],
noteId: 'chat-123',
createdAt: new Date(),
updatedAt: new Date(),
metadata: {}
};
mockChatStorageService.getChat.mockResolvedValue(mockChat);
await chatService.sendMessage('chat-123', 'What is the weather like?');
// Should update title based on first message
expect(mockChatStorageService.updateChat).toHaveBeenLastCalledWith(
'chat-123',
expect.any(Array),
'What is the weather like?'
);
});
it('should handle errors gracefully', async () => {
mockChatPipeline.execute.mockRejectedValueOnce(new Error('AI service error'));
const session = await chatService.sendMessage('chat-123', 'Hello');
expect(session.messages).toHaveLength(2);
expect(session.messages[1]).toEqual({
role: 'assistant',
content: 'Sorry, I encountered an error processing your request.'
});
expect(session.isStreaming).toBe(false);
expect(mockChatStorageService.updateChat).toHaveBeenCalledWith(
'chat-123',
expect.arrayContaining([
expect.objectContaining({
role: 'assistant',
content: 'Sorry, I encountered an error processing your request.'
})
])
);
});
it('should handle tool calls in response', async () => {
const toolCalls = [{
id: 'call_123',
type: 'function' as const,
function: {
name: 'searchNotes',
arguments: '{"query": "test"}'
}
}];
mockChatPipeline.execute.mockResolvedValueOnce({
text: 'I need to search for notes.',
model: 'gpt-4',
provider: 'OpenAI',
tool_calls: toolCalls,
usage: { promptTokens: 10, completionTokens: 8, totalTokens: 18 }
});
const session = await chatService.sendMessage('chat-123', 'Search for notes about AI');
expect(session.messages[1]).toEqual({
role: 'assistant',
content: 'I need to search for notes.',
tool_calls: toolCalls
});
});
});
describe('sendContextAwareMessage', () => {
beforeEach(() => {
const mockSession = {
id: 'chat-123',
title: 'Test Chat',
messages: [],
isStreaming: false
};
const mockChat = {
id: 'chat-123',
title: 'Test Chat',
messages: [],
noteId: 'chat-123',
createdAt: new Date(),
updatedAt: new Date(),
metadata: {}
};
mockChatStorageService.getChat.mockResolvedValue(mockChat);
mockChatStorageService.updateChat.mockResolvedValue(mockChat);
mockChatPipeline.execute.mockResolvedValue({
text: 'Based on the context, here is my response.',
model: 'gpt-4',
provider: 'OpenAI',
usage: { promptTokens: 20, completionTokens: 15, totalTokens: 35 }
});
});
it('should send context-aware message with note ID', async () => {
const session = await chatService.sendContextAwareMessage(
'chat-123',
'What is this note about?',
'note-456'
);
expect(session.messages).toHaveLength(2);
expect(session.messages[0]).toEqual({
role: 'user',
content: 'What is this note about?'
});
expect(mockChatPipeline.execute).toHaveBeenCalledWith(
expect.objectContaining({
noteId: 'note-456',
query: 'What is this note about?',
showThinking: false
})
);
expect(mockChatStorageService.updateChat).toHaveBeenLastCalledWith(
'chat-123',
expect.any(Array),
undefined,
expect.objectContaining({
contextNoteId: 'note-456'
})
);
});
it('should use agent pipeline when showThinking is enabled', async () => {
await chatService.sendContextAwareMessage(
'chat-123',
'Analyze this note',
'note-456',
{ showThinking: true }
);
expect(mockChatPipeline.execute).toHaveBeenCalledWith(
expect.objectContaining({
showThinking: true
})
);
});
it('should handle errors in context-aware messages', async () => {
mockChatPipeline.execute.mockRejectedValueOnce(new Error('Context error'));
const session = await chatService.sendContextAwareMessage(
'chat-123',
'What is this note about?',
'note-456'
);
expect(session.messages[1]).toEqual({
role: 'assistant',
content: 'Sorry, I encountered an error processing the context.'
});
});
});
describe('addNoteContext', () => {
it('should add note context to session', async () => {
const mockSession = {
id: 'chat-123',
title: 'Test Chat',
messages: [
{ role: 'user', content: 'Tell me about AI features' }
],
isStreaming: false
};
const mockChat = {
id: 'chat-123',
title: 'Test Chat',
messages: mockSession.messages,
noteId: 'chat-123',
createdAt: new Date(),
updatedAt: new Date(),
metadata: {}
};
mockChatStorageService.getChat.mockResolvedValue(mockChat);
mockChatStorageService.updateChat.mockResolvedValue(mockChat);
// Mock the pipeline's context extraction stage
mockChatPipeline.stages.contextExtraction.execute.mockResolvedValue({
context: 'This note contains information about AI features...',
sources: [
{
noteId: 'note-456',
title: 'AI Features',
similarity: 0.95,
content: 'AI features content'
}
]
});
const session = await chatService.addNoteContext('chat-123', 'note-456');
expect(session.messages).toHaveLength(2);
expect(session.messages[1]).toEqual({
role: 'user',
content: 'Context: This note contains information about AI features...'
});
expect(mockChatStorageService.recordSources).toHaveBeenCalledWith(
'chat-123',
[expect.objectContaining({
noteId: 'note-456',
title: 'AI Features',
similarity: 0.95,
content: 'AI features content'
})]
);
});
});
describe('addSemanticNoteContext', () => {
it('should add semantic note context to session', async () => {
const mockSession = {
id: 'chat-123',
title: 'Test Chat',
messages: [],
isStreaming: false
};
const mockChat = {
id: 'chat-123',
title: 'Test Chat',
messages: [],
noteId: 'chat-123',
createdAt: new Date(),
updatedAt: new Date(),
metadata: {}
};
mockChatStorageService.getChat.mockResolvedValue(mockChat);
mockChatStorageService.updateChat.mockResolvedValue(mockChat);
mockChatPipeline.stages.semanticContextExtraction.execute.mockResolvedValue({
context: 'Semantic context about machine learning...',
sources: []
});
const session = await chatService.addSemanticNoteContext(
'chat-123',
'note-456',
'machine learning algorithms'
);
expect(session.messages).toHaveLength(1);
expect(session.messages[0]).toEqual({
role: 'user',
content: 'Query: machine learning algorithms\nContext: Semantic context about machine learning...'
});
expect(mockChatPipeline.stages.semanticContextExtraction.execute).toHaveBeenCalledWith({
noteId: 'note-456',
query: 'machine learning algorithms'
});
});
});
describe('getAllSessions', () => {
it('should return all chat sessions', async () => {
const mockChats = [
{
id: 'chat-1',
title: 'Chat 1',
messages: [{ role: 'user', content: 'Hello' }],
noteId: 'chat-1',
createdAt: new Date(),
updatedAt: new Date(),
metadata: {}
},
{
id: 'chat-2',
title: 'Chat 2',
messages: [{ role: 'user', content: 'Hi' }],
noteId: 'chat-2',
createdAt: new Date(),
updatedAt: new Date(),
metadata: {}
}
];
mockChatStorageService.getAllChats.mockResolvedValue(mockChats);
const sessions = await chatService.getAllSessions();
expect(sessions).toHaveLength(2);
expect(sessions[0]).toEqual({
id: 'chat-1',
title: 'Chat 1',
messages: [{ role: 'user', content: 'Hello' }],
isStreaming: false
});
expect(sessions[1]).toEqual({
id: 'chat-2',
title: 'Chat 2',
messages: [{ role: 'user', content: 'Hi' }],
isStreaming: false
});
});
it('should update cached sessions with latest data', async () => {
const mockChats = [
{
id: 'chat-1',
title: 'Updated Title',
messages: [{ role: 'user', content: 'Updated message' }],
noteId: 'chat-1',
createdAt: new Date(),
updatedAt: new Date(),
metadata: {}
}
];
// Pre-populate cache with old data
(chatService as any).sessionCache.set('chat-1', {
id: 'chat-1',
title: 'Old Title',
messages: [{ role: 'user', content: 'Old message' }],
isStreaming: true
});
mockChatStorageService.getAllChats.mockResolvedValue(mockChats);
const sessions = await chatService.getAllSessions();
expect(sessions[0]).toEqual({
id: 'chat-1',
title: 'Updated Title',
messages: [{ role: 'user', content: 'Updated message' }],
isStreaming: true // Should preserve streaming state
});
});
});
describe('deleteSession', () => {
it('should delete session from cache and storage', async () => {
// Pre-populate cache
(chatService as any).sessionCache.set('chat-123', {
id: 'chat-123',
title: 'Test Chat',
messages: [],
isStreaming: false
});
mockChatStorageService.deleteChat.mockResolvedValue(true);
const result = await chatService.deleteSession('chat-123');
expect(result).toBe(true);
expect((chatService as any).sessionCache.has('chat-123')).toBe(false);
expect(mockChatStorageService.deleteChat).toHaveBeenCalledWith('chat-123');
});
});
describe('generateChatCompletion', () => {
it('should use AI service directly for simple completion', async () => {
const messages: Message[] = [
{ role: 'user', content: 'Hello' }
];
const mockService = {
getName: () => 'OpenAI',
generateChatCompletion: vi.fn().mockResolvedValue({
text: 'Hello! How can I help?',
model: 'gpt-3.5-turbo',
provider: 'OpenAI'
})
};
mockAiServiceManager.getService.mockResolvedValue(mockService);
const result = await chatService.generateChatCompletion(messages);
expect(result).toEqual({
text: 'Hello! How can I help?',
model: 'gpt-3.5-turbo',
provider: 'OpenAI'
});
expect(mockService.generateChatCompletion).toHaveBeenCalledWith(messages, {});
});
it('should use pipeline for advanced context', async () => {
const messages: Message[] = [
{ role: 'user', content: 'Hello' }
];
const options = {
useAdvancedContext: true,
noteId: 'note-123'
};
// Mock AI service for this test
const mockService = {
getName: () => 'OpenAI',
generateChatCompletion: vi.fn()
};
mockAiServiceManager.getService.mockResolvedValue(mockService);
mockChatPipeline.execute.mockResolvedValue({
text: 'Response with context',
model: 'gpt-4',
provider: 'OpenAI',
tool_calls: []
});
const result = await chatService.generateChatCompletion(messages, options);
expect(result).toEqual({
text: 'Response with context',
model: 'gpt-4',
provider: 'OpenAI',
tool_calls: []
});
expect(mockChatPipeline.execute).toHaveBeenCalledWith({
messages,
options,
query: 'Hello',
noteId: 'note-123'
});
});
it('should throw error when no AI service available', async () => {
const messages: Message[] = [
{ role: 'user', content: 'Hello' }
];
mockAiServiceManager.getService.mockResolvedValue(null);
await expect(chatService.generateChatCompletion(messages)).rejects.toThrow(
'No AI service available'
);
});
});
describe('pipeline metrics', () => {
it('should get pipeline metrics', () => {
mockChatPipeline.getMetrics.mockReturnValue({ requestCount: 5 });
const metrics = chatService.getPipelineMetrics();
expect(metrics).toEqual({ requestCount: 5 });
expect(mockChatPipeline.getMetrics).toHaveBeenCalled();
});
it('should reset pipeline metrics', () => {
chatService.resetPipelineMetrics();
expect(mockChatPipeline.resetMetrics).toHaveBeenCalled();
});
it('should handle different pipeline types', () => {
mockChatPipeline.getMetrics.mockReturnValue({ requestCount: 3 });
const metrics = chatService.getPipelineMetrics('agent');
expect(metrics).toEqual({ requestCount: 3 });
});
});
describe('generateTitleFromMessages', () => {
it('should generate title from first user message', () => {
const messages: Message[] = [
{ role: 'user', content: 'What is machine learning?' },
{ role: 'assistant', content: 'Machine learning is...' }
];
// Access private method for testing
const generateTitle = (chatService as any).generateTitleFromMessages.bind(chatService);
const title = generateTitle(messages);
expect(title).toBe('What is machine learning?');
});
it('should truncate long titles', () => {
const messages: Message[] = [
{ role: 'user', content: 'This is a very long message that should be truncated because it exceeds the maximum length' },
{ role: 'assistant', content: 'Response' }
];
const generateTitle = (chatService as any).generateTitleFromMessages.bind(chatService);
const title = generateTitle(messages);
expect(title).toBe('This is a very long message...');
expect(title.length).toBe(30);
});
it('should return default title for empty or invalid messages', () => {
const generateTitle = (chatService as any).generateTitleFromMessages.bind(chatService);
expect(generateTitle([])).toBe('New Chat');
expect(generateTitle([{ role: 'assistant', content: 'Hello' }])).toBe('New Chat');
});
it('should use first line for multiline messages', () => {
const messages: Message[] = [
{ role: 'user', content: 'First line\nSecond line\nThird line' },
{ role: 'assistant', content: 'Response' }
];
const generateTitle = (chatService as any).generateTitleFromMessages.bind(chatService);
const title = generateTitle(messages);
expect(title).toBe('First line');
});
});
});

View File

@@ -0,0 +1,625 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { ChatStorageService } from './chat_storage_service.js';
import type { Message } from './ai_interface.js';
// Mock dependencies
vi.mock('../notes.js', () => ({
default: {
createNewNote: vi.fn()
}
}));
vi.mock('../sql.js', () => ({
default: {
getRow: vi.fn(),
getRows: vi.fn(),
execute: vi.fn()
}
}));
vi.mock('../attributes.js', () => ({
default: {
createLabel: vi.fn()
}
}));
vi.mock('../log.js', () => ({
default: {
error: vi.fn(),
info: vi.fn(),
warn: vi.fn()
}
}));
vi.mock('i18next', () => ({
t: vi.fn((key: string) => {
switch (key) {
case 'ai.chat.root_note_title':
return 'AI Chats';
case 'ai.chat.root_note_content':
return 'This note contains all AI chat conversations.';
case 'ai.chat.new_chat_title':
return 'New Chat';
default:
return key;
}
})
}));
describe('ChatStorageService', () => {
let chatStorageService: ChatStorageService;
let mockNotes: any;
let mockSql: any;
let mockAttributes: any;
beforeEach(async () => {
vi.clearAllMocks();
chatStorageService = new ChatStorageService();
// Get mocked modules
mockNotes = (await import('../notes.js')).default;
mockSql = (await import('../sql.js')).default;
mockAttributes = (await import('../attributes.js')).default;
});
afterEach(() => {
vi.restoreAllMocks();
});
describe('getOrCreateChatRoot', () => {
it('should return existing chat root if it exists', async () => {
mockSql.getRow.mockResolvedValueOnce({ noteId: 'existing-root-123' });
const rootId = await chatStorageService.getOrCreateChatRoot();
expect(rootId).toBe('existing-root-123');
expect(mockSql.getRow).toHaveBeenCalledWith(
'SELECT noteId FROM attributes WHERE name = ? AND value = ?',
['label', 'triliumChatRoot']
);
});
it('should create new chat root if it does not exist', async () => {
mockSql.getRow.mockResolvedValueOnce(null);
mockNotes.createNewNote.mockReturnValueOnce({
note: { noteId: 'new-root-123' }
});
const rootId = await chatStorageService.getOrCreateChatRoot();
expect(rootId).toBe('new-root-123');
expect(mockNotes.createNewNote).toHaveBeenCalledWith({
parentNoteId: 'root',
title: 'AI Chats',
type: 'text',
content: 'This note contains all AI chat conversations.'
});
expect(mockAttributes.createLabel).toHaveBeenCalledWith(
'new-root-123',
'triliumChatRoot',
''
);
});
});
describe('createChat', () => {
it('should create a new chat with default title', async () => {
const mockDate = new Date('2024-01-01T00:00:00Z');
vi.useFakeTimers();
vi.setSystemTime(mockDate);
mockSql.getRow.mockResolvedValueOnce({ noteId: 'root-123' });
mockNotes.createNewNote.mockReturnValueOnce({
note: { noteId: 'chat-123' }
});
const messages: Message[] = [
{ role: 'user', content: 'Hello' }
];
const result = await chatStorageService.createChat('Test Chat', messages);
expect(result).toEqual({
id: 'chat-123',
title: 'Test Chat',
messages,
noteId: 'chat-123',
createdAt: mockDate,
updatedAt: mockDate,
metadata: {}
});
expect(mockNotes.createNewNote).toHaveBeenCalledWith({
parentNoteId: 'root-123',
title: 'Test Chat',
type: 'code',
mime: 'application/json',
content: JSON.stringify({
messages,
metadata: {},
createdAt: mockDate,
updatedAt: mockDate
}, null, 2)
});
expect(mockAttributes.createLabel).toHaveBeenCalledWith(
'chat-123',
'triliumChat',
''
);
vi.useRealTimers();
});
it('should create chat with custom metadata', async () => {
mockSql.getRow.mockResolvedValueOnce({ noteId: 'root-123' });
mockNotes.createNewNote.mockReturnValueOnce({
note: { noteId: 'chat-123' }
});
const metadata = {
model: 'gpt-4',
provider: 'openai',
temperature: 0.7
};
const result = await chatStorageService.createChat('Test Chat', [], metadata);
expect(result.metadata).toEqual(metadata);
});
it('should generate default title if none provided', async () => {
mockSql.getRow.mockResolvedValueOnce({ noteId: 'root-123' });
mockNotes.createNewNote.mockReturnValueOnce({
note: { noteId: 'chat-123' }
});
const result = await chatStorageService.createChat('');
expect(result.title).toContain('New Chat');
expect(result.title).toMatch(/\d{1,2}\/\d{1,2}\/\d{4}/); // Date pattern
});
});
describe('getAllChats', () => {
it('should return all chats with parsed content', async () => {
const mockChats = [
{
noteId: 'chat-1',
title: 'Chat 1',
dateCreated: '2024-01-01T00:00:00Z',
dateModified: '2024-01-01T01:00:00Z',
content: JSON.stringify({
messages: [{ role: 'user', content: 'Hello' }],
metadata: { model: 'gpt-4' },
createdAt: '2024-01-01T00:00:00Z',
updatedAt: '2024-01-01T01:00:00Z'
})
},
{
noteId: 'chat-2',
title: 'Chat 2',
dateCreated: '2024-01-02T00:00:00Z',
dateModified: '2024-01-02T01:00:00Z',
content: JSON.stringify({
messages: [{ role: 'user', content: 'Hi' }],
metadata: { provider: 'anthropic' }
})
}
];
mockSql.getRows.mockResolvedValueOnce(mockChats);
const result = await chatStorageService.getAllChats();
expect(result).toHaveLength(2);
expect(result[0]).toEqual({
id: 'chat-1',
title: 'Chat 1',
messages: [{ role: 'user', content: 'Hello' }],
noteId: 'chat-1',
createdAt: new Date('2024-01-01T00:00:00Z'),
updatedAt: new Date('2024-01-01T01:00:00Z'),
metadata: { model: 'gpt-4' }
});
expect(mockSql.getRows).toHaveBeenCalledWith(
expect.stringContaining('SELECT notes.noteId, notes.title'),
['label', 'triliumChat']
);
});
it('should handle chats with invalid JSON content', async () => {
const mockChats = [
{
noteId: 'chat-1',
title: 'Chat 1',
dateCreated: '2024-01-01T00:00:00Z',
dateModified: '2024-01-01T01:00:00Z',
content: 'invalid json'
}
];
mockSql.getRows.mockResolvedValueOnce(mockChats);
const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
const result = await chatStorageService.getAllChats();
expect(result).toHaveLength(1);
expect(result[0]).toEqual({
id: 'chat-1',
title: 'Chat 1',
messages: [],
noteId: 'chat-1',
createdAt: new Date('2024-01-01T00:00:00Z'),
updatedAt: new Date('2024-01-01T01:00:00Z'),
metadata: {}
});
expect(consoleErrorSpy).toHaveBeenCalledWith('Failed to parse chat content:', expect.any(Error));
consoleErrorSpy.mockRestore();
});
});
describe('getChat', () => {
it('should return specific chat by ID', async () => {
const mockChat = {
noteId: 'chat-123',
title: 'Test Chat',
dateCreated: '2024-01-01T00:00:00Z',
dateModified: '2024-01-01T01:00:00Z',
content: JSON.stringify({
messages: [{ role: 'user', content: 'Hello' }],
metadata: { model: 'gpt-4' },
createdAt: '2024-01-01T00:00:00Z',
updatedAt: '2024-01-01T01:00:00Z'
})
};
mockSql.getRow.mockResolvedValueOnce(mockChat);
const result = await chatStorageService.getChat('chat-123');
expect(result).toEqual({
id: 'chat-123',
title: 'Test Chat',
messages: [{ role: 'user', content: 'Hello' }],
noteId: 'chat-123',
createdAt: new Date('2024-01-01T00:00:00Z'),
updatedAt: new Date('2024-01-01T01:00:00Z'),
metadata: { model: 'gpt-4' }
});
expect(mockSql.getRow).toHaveBeenCalledWith(
expect.stringContaining('SELECT notes.noteId, notes.title'),
['chat-123']
);
});
it('should return null if chat not found', async () => {
mockSql.getRow.mockResolvedValueOnce(null);
const result = await chatStorageService.getChat('nonexistent');
expect(result).toBeNull();
});
});
describe('updateChat', () => {
it('should update chat messages and metadata', async () => {
const existingChat = {
id: 'chat-123',
title: 'Test Chat',
messages: [{ role: 'user', content: 'Hello' }],
noteId: 'chat-123',
createdAt: new Date('2024-01-01T00:00:00Z'),
updatedAt: new Date('2024-01-01T01:00:00Z'),
metadata: { model: 'gpt-4' }
};
const newMessages: Message[] = [
{ role: 'user', content: 'Hello' },
{ role: 'assistant', content: 'Hi there!' }
];
const newMetadata = { provider: 'openai', temperature: 0.7 };
// Mock getChat to return existing chat
vi.spyOn(chatStorageService, 'getChat').mockResolvedValueOnce(existingChat);
const mockDate = new Date('2024-01-01T02:00:00Z');
vi.useFakeTimers();
vi.setSystemTime(mockDate);
const result = await chatStorageService.updateChat(
'chat-123',
newMessages,
'Updated Title',
newMetadata
);
expect(result).toEqual({
...existingChat,
title: 'Updated Title',
messages: newMessages,
updatedAt: mockDate,
metadata: { model: 'gpt-4', provider: 'openai', temperature: 0.7 }
});
expect(mockSql.execute).toHaveBeenCalledWith(
'UPDATE blobs SET content = ? WHERE blobId = (SELECT blobId FROM notes WHERE noteId = ?)',
[
JSON.stringify({
messages: newMessages,
metadata: { model: 'gpt-4', provider: 'openai', temperature: 0.7 },
createdAt: existingChat.createdAt,
updatedAt: mockDate
}, null, 2),
'chat-123'
]
);
expect(mockSql.execute).toHaveBeenCalledWith(
'UPDATE notes SET title = ? WHERE noteId = ?',
['Updated Title', 'chat-123']
);
vi.useRealTimers();
});
it('should return null if chat not found', async () => {
vi.spyOn(chatStorageService, 'getChat').mockResolvedValueOnce(null);
const result = await chatStorageService.updateChat(
'nonexistent',
[],
'Title'
);
expect(result).toBeNull();
});
});
describe('deleteChat', () => {
it('should mark chat as deleted', async () => {
const result = await chatStorageService.deleteChat('chat-123');
expect(result).toBe(true);
expect(mockSql.execute).toHaveBeenCalledWith(
'UPDATE notes SET isDeleted = 1 WHERE noteId = ?',
['chat-123']
);
});
it('should return false on SQL error', async () => {
mockSql.execute.mockRejectedValueOnce(new Error('SQL error'));
const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
const result = await chatStorageService.deleteChat('chat-123');
expect(result).toBe(false);
expect(consoleErrorSpy).toHaveBeenCalledWith('Failed to delete chat:', expect.any(Error));
consoleErrorSpy.mockRestore();
});
});
describe('recordToolExecution', () => {
it('should record tool execution in chat metadata', async () => {
const existingChat = {
id: 'chat-123',
title: 'Test Chat',
messages: [],
noteId: 'chat-123',
createdAt: new Date(),
updatedAt: new Date(),
metadata: {}
};
vi.spyOn(chatStorageService, 'getChat').mockResolvedValueOnce(existingChat);
vi.spyOn(chatStorageService, 'updateChat').mockResolvedValueOnce(existingChat);
const result = await chatStorageService.recordToolExecution(
'chat-123',
'searchNotes',
'tool-123',
{ query: 'test' },
'Found 3 notes'
);
expect(result).toBe(true);
expect(chatStorageService.updateChat).toHaveBeenCalledWith(
'chat-123',
[],
undefined,
expect.objectContaining({
toolExecutions: expect.arrayContaining([
expect.objectContaining({
id: 'tool-123',
name: 'searchNotes',
arguments: { query: 'test' },
result: 'Found 3 notes'
})
])
})
);
});
it('should return false if chat not found', async () => {
vi.spyOn(chatStorageService, 'getChat').mockResolvedValueOnce(null);
const result = await chatStorageService.recordToolExecution(
'nonexistent',
'searchNotes',
'tool-123',
{ query: 'test' },
'Result'
);
expect(result).toBe(false);
});
});
describe('recordSources', () => {
it('should record sources in chat metadata', async () => {
const existingChat = {
id: 'chat-123',
title: 'Test Chat',
messages: [],
noteId: 'chat-123',
createdAt: new Date(),
updatedAt: new Date(),
metadata: {}
};
const sources = [
{
noteId: 'note-1',
title: 'Source Note 1',
similarity: 0.95
},
{
noteId: 'note-2',
title: 'Source Note 2',
similarity: 0.87
}
];
vi.spyOn(chatStorageService, 'getChat').mockResolvedValueOnce(existingChat);
vi.spyOn(chatStorageService, 'updateChat').mockResolvedValueOnce(existingChat);
const result = await chatStorageService.recordSources('chat-123', sources);
expect(result).toBe(true);
expect(chatStorageService.updateChat).toHaveBeenCalledWith(
'chat-123',
[],
undefined,
expect.objectContaining({
sources
})
);
});
});
describe('extractToolExecutionsFromMessages', () => {
it('should extract tool executions from assistant messages with tool calls', async () => {
const messages: Message[] = [
{
role: 'assistant',
content: 'I need to search for notes.',
tool_calls: [
{
id: 'call_123',
type: 'function',
function: {
name: 'searchNotes',
arguments: '{"query": "test"}'
}
}
]
},
{
role: 'tool',
content: 'Found 2 notes',
tool_call_id: 'call_123'
},
{
role: 'assistant',
content: 'Based on the search results...'
}
];
// Access private method through any cast for testing
const extractToolExecutions = (chatStorageService as any).extractToolExecutionsFromMessages.bind(chatStorageService);
const toolExecutions = extractToolExecutions(messages, []);
expect(toolExecutions).toHaveLength(1);
expect(toolExecutions[0]).toEqual(
expect.objectContaining({
id: 'call_123',
name: 'searchNotes',
arguments: { query: 'test' },
result: 'Found 2 notes',
timestamp: expect.any(Date)
})
);
});
it('should handle error responses from tools', async () => {
const messages: Message[] = [
{
role: 'assistant',
content: 'I need to search for notes.',
tool_calls: [
{
id: 'call_123',
type: 'function',
function: {
name: 'searchNotes',
arguments: '{"query": "test"}'
}
}
]
},
{
role: 'tool',
content: 'Error: Search service unavailable',
tool_call_id: 'call_123'
}
];
const extractToolExecutions = (chatStorageService as any).extractToolExecutionsFromMessages.bind(chatStorageService);
const toolExecutions = extractToolExecutions(messages, []);
expect(toolExecutions).toHaveLength(1);
expect(toolExecutions[0]).toEqual(
expect.objectContaining({
id: 'call_123',
name: 'searchNotes',
error: 'Search service unavailable',
result: 'Error: Search service unavailable'
})
);
});
it('should not duplicate existing tool executions', async () => {
const existingToolExecutions = [
{
id: 'call_123',
name: 'searchNotes',
arguments: { query: 'existing' },
result: 'Previous result',
timestamp: new Date()
}
];
const messages: Message[] = [
{
role: 'assistant',
content: 'I need to search for notes.',
tool_calls: [
{
id: 'call_123', // Same ID as existing
type: 'function',
function: {
name: 'searchNotes',
arguments: '{"query": "test"}'
}
}
]
},
{
role: 'tool',
content: 'Found 2 notes',
tool_call_id: 'call_123'
}
];
const extractToolExecutions = (chatStorageService as any).extractToolExecutionsFromMessages.bind(chatStorageService);
const toolExecutions = extractToolExecutions(messages, existingToolExecutions);
expect(toolExecutions).toHaveLength(1);
expect(toolExecutions[0].arguments).toEqual({ query: 'existing' });
});
});
});

View File

@@ -0,0 +1,341 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import * as configHelpers from './configuration_helpers.js';
import configurationManager from './configuration_manager.js';
import optionService from '../../options.js';
import type { ProviderType, ModelIdentifier, ModelConfig } from '../interfaces/configuration_interfaces.js';
// Mock dependencies
vi.mock('./configuration_manager.js', () => ({
default: {
parseModelIdentifier: vi.fn(),
createModelConfig: vi.fn(),
getAIConfig: vi.fn(),
validateConfiguration: vi.fn()
}
}));
vi.mock('../../options.js', () => ({
default: {
getOption: vi.fn()
}
}));
vi.mock('../../log.js', () => ({
default: {
info: vi.fn(),
error: vi.fn(),
warn: vi.fn()
}
}));
describe('configuration_helpers', () => {
beforeEach(() => {
vi.clearAllMocks();
});
afterEach(() => {
vi.restoreAllMocks();
});
describe('getSelectedProvider', () => {
it('should return the selected provider', async () => {
vi.mocked(optionService.getOption).mockReturnValueOnce('openai');
const result = await configHelpers.getSelectedProvider();
expect(result).toBe('openai');
expect(optionService.getOption).toHaveBeenCalledWith('aiSelectedProvider');
});
it('should return null if no provider is selected', async () => {
vi.mocked(optionService.getOption).mockReturnValueOnce(null);
const result = await configHelpers.getSelectedProvider();
expect(result).toBeNull();
});
it('should handle invalid provider and return null', async () => {
vi.mocked(optionService.getOption).mockReturnValueOnce('invalid-provider');
const result = await configHelpers.getSelectedProvider();
expect(result).toBe('invalid-provider' as ProviderType);
});
});
describe('parseModelIdentifier', () => {
it('should delegate to configuration manager', () => {
const mockIdentifier: ModelIdentifier = {
provider: 'openai',
modelId: 'gpt-4'
};
vi.mocked(configurationManager.parseModelIdentifier).mockReturnValueOnce(mockIdentifier);
const result = configHelpers.parseModelIdentifier('openai:gpt-4');
expect(result).toBe(mockIdentifier);
expect(configurationManager.parseModelIdentifier).toHaveBeenCalledWith('openai:gpt-4');
});
});
describe('createModelConfig', () => {
it('should delegate to configuration manager', () => {
const mockConfig: ModelConfig = {
provider: 'openai',
model: 'gpt-4',
temperature: 0.7,
maxTokens: 1000
};
vi.mocked(configurationManager.createModelConfig).mockReturnValueOnce(mockConfig);
const result = configHelpers.createModelConfig('gpt-4', 'openai');
expect(result).toBe(mockConfig);
expect(configurationManager.createModelConfig).toHaveBeenCalledWith('gpt-4', 'openai');
});
});
describe('getDefaultModelForProvider', () => {
it('should return default model for provider', async () => {
vi.mocked(configurationManager.getAIConfig).mockResolvedValueOnce({
enabled: true,
selectedProvider: 'openai',
defaultModels: {
openai: 'gpt-4',
anthropic: 'claude-3',
ollama: 'llama2'
},
providerSettings: {}
} as any);
const result = await configHelpers.getDefaultModelForProvider('openai');
expect(result).toBe('gpt-4');
});
it('should return undefined if no default model', async () => {
vi.mocked(configurationManager.getAIConfig).mockResolvedValueOnce({
enabled: true,
selectedProvider: 'openai',
defaultModels: {},
providerSettings: {}
} as any);
const result = await configHelpers.getDefaultModelForProvider('openai');
expect(result).toBeUndefined();
});
});
describe('getProviderSettings', () => {
it('should return provider settings', async () => {
const mockSettings = {
apiKey: 'test-key',
baseUrl: 'https://api.openai.com'
};
vi.mocked(configurationManager.getAIConfig).mockResolvedValueOnce({
enabled: true,
selectedProvider: 'openai',
defaultModels: {},
providerSettings: {
openai: mockSettings
}
} as any);
const result = await configHelpers.getProviderSettings('openai');
expect(result).toBe(mockSettings);
});
it('should return undefined if no settings', async () => {
vi.mocked(configurationManager.getAIConfig).mockResolvedValueOnce({
enabled: true,
selectedProvider: 'openai',
defaultModels: {},
providerSettings: {}
} as any);
const result = await configHelpers.getProviderSettings('openai');
expect(result).toBeUndefined();
});
});
describe('isAIEnabled', () => {
it('should return true if AI is enabled', async () => {
vi.mocked(configurationManager.getAIConfig).mockResolvedValueOnce({
enabled: true,
selectedProvider: 'openai',
defaultModels: {},
providerSettings: {}
} as any);
const result = await configHelpers.isAIEnabled();
expect(result).toBe(true);
});
it('should return false if AI is disabled', async () => {
vi.mocked(configurationManager.getAIConfig).mockResolvedValueOnce({
enabled: false,
selectedProvider: null,
defaultModels: {},
providerSettings: {}
} as any);
const result = await configHelpers.isAIEnabled();
expect(result).toBe(false);
});
});
describe('isProviderConfigured', () => {
it('should return true for configured OpenAI', async () => {
vi.mocked(configurationManager.getAIConfig).mockResolvedValueOnce({
enabled: true,
selectedProvider: 'openai',
defaultModels: {},
providerSettings: {
openai: {
apiKey: 'test-key'
}
}
} as any);
const result = await configHelpers.isProviderConfigured('openai');
expect(result).toBe(true);
});
it('should return false for unconfigured OpenAI', async () => {
vi.mocked(configurationManager.getAIConfig).mockResolvedValueOnce({
enabled: true,
selectedProvider: 'openai',
defaultModels: {},
providerSettings: {
openai: {}
}
} as any);
const result = await configHelpers.isProviderConfigured('openai');
expect(result).toBe(false);
});
it('should return true for configured Anthropic', async () => {
vi.mocked(configurationManager.getAIConfig).mockResolvedValueOnce({
enabled: true,
selectedProvider: 'anthropic',
defaultModels: {},
providerSettings: {
anthropic: {
apiKey: 'test-key'
}
}
} as any);
const result = await configHelpers.isProviderConfigured('anthropic');
expect(result).toBe(true);
});
it('should return true for configured Ollama', async () => {
vi.mocked(configurationManager.getAIConfig).mockResolvedValueOnce({
enabled: true,
selectedProvider: 'ollama',
defaultModels: {},
providerSettings: {
ollama: {
baseUrl: 'http://localhost:11434'
}
}
} as any);
const result = await configHelpers.isProviderConfigured('ollama');
expect(result).toBe(true);
});
it('should return false for unknown provider', async () => {
vi.mocked(configurationManager.getAIConfig).mockResolvedValueOnce({
enabled: true,
selectedProvider: null,
defaultModels: {},
providerSettings: {}
} as any);
const result = await configHelpers.isProviderConfigured('unknown' as ProviderType);
expect(result).toBe(false);
});
});
describe('getAvailableSelectedProvider', () => {
it('should return selected provider if configured', async () => {
vi.mocked(optionService.getOption).mockReturnValueOnce('openai');
vi.mocked(configurationManager.getAIConfig).mockResolvedValueOnce({
enabled: true,
selectedProvider: 'openai',
defaultModels: {},
providerSettings: {
openai: {
apiKey: 'test-key'
}
}
} as any);
const result = await configHelpers.getAvailableSelectedProvider();
expect(result).toBe('openai');
});
it('should return null if no provider selected', async () => {
vi.mocked(optionService.getOption).mockReturnValueOnce(null);
const result = await configHelpers.getAvailableSelectedProvider();
expect(result).toBeNull();
});
it('should return null if selected provider not configured', async () => {
vi.mocked(optionService.getOption).mockReturnValueOnce('openai');
vi.mocked(configurationManager.getAIConfig).mockResolvedValueOnce({
enabled: true,
selectedProvider: 'openai',
defaultModels: {},
providerSettings: {
openai: {} // No API key
}
} as any);
const result = await configHelpers.getAvailableSelectedProvider();
expect(result).toBeNull();
});
});
describe('validateConfiguration', () => {
it('should delegate to configuration manager', async () => {
const mockValidation = {
isValid: true,
errors: [],
warnings: []
};
vi.mocked(configurationManager.validateConfiguration).mockResolvedValueOnce(mockValidation);
const result = await configHelpers.validateConfiguration();
expect(result).toBe(mockValidation);
expect(configurationManager.validateConfiguration).toHaveBeenCalled();
});
});
describe('clearConfigurationCache', () => {
it('should delegate to configuration manager', () => {
configHelpers.clearConfigurationCache();
expect(configurationManager.clearConfigurationCache).toHaveBeenCalled();
});
});
});

View File

@@ -0,0 +1,433 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { ContextService } from './context_service.js';
import type { ContextOptions } from './context_service.js';
import type { NoteSearchResult } from '../../interfaces/context_interfaces.js';
import type { LLMServiceInterface } from '../../interfaces/agent_tool_interfaces.js';
// Mock dependencies
vi.mock('../../../log.js', () => ({
default: {
info: vi.fn(),
error: vi.fn(),
warn: vi.fn()
}
}));
vi.mock('../modules/cache_manager.js', () => ({
default: {
get: vi.fn(),
set: vi.fn(),
clear: vi.fn()
}
}));
vi.mock('./query_processor.js', () => ({
default: {
enhanceQuery: vi.fn().mockResolvedValue('enhanced query'),
decomposeQuery: vi.fn().mockResolvedValue({
subQueries: ['sub query 1', 'sub query 2'],
thinking: 'decomposition thinking'
})
}
}));
vi.mock('../modules/context_formatter.js', () => ({
default: {
formatNotes: vi.fn().mockReturnValue('formatted context'),
formatResponse: vi.fn().mockReturnValue('formatted response')
}
}));
vi.mock('../../ai_service_manager.js', () => ({
default: {
getContextExtractor: vi.fn().mockReturnValue({
findRelevantNotes: vi.fn().mockResolvedValue([])
})
}
}));
vi.mock('../index.js', () => ({
ContextExtractor: vi.fn().mockImplementation(() => ({
findRelevantNotes: vi.fn().mockResolvedValue([])
}))
}));
describe('ContextService', () => {
let service: ContextService;
let mockLLMService: LLMServiceInterface;
beforeEach(() => {
vi.clearAllMocks();
service = new ContextService();
mockLLMService = {
generateChatCompletion: vi.fn().mockResolvedValue({
content: 'Mock LLM response',
role: 'assistant'
}),
isAvailable: vi.fn().mockReturnValue(true)
};
});
afterEach(() => {
vi.restoreAllMocks();
});
describe('constructor', () => {
it('should initialize with default state', () => {
expect(service).toBeDefined();
expect((service as any).initialized).toBe(false);
expect((service as any).initPromise).toBeNull();
expect((service as any).contextExtractor).toBeDefined();
});
});
describe('initialize', () => {
it('should initialize successfully', async () => {
await service.initialize();
expect((service as any).initialized).toBe(true);
expect((service as any).initPromise).toBeNull();
});
it('should not initialize twice', async () => {
await service.initialize();
await service.initialize(); // Second call should be a no-op
expect((service as any).initialized).toBe(true);
});
it('should handle concurrent initialization calls', async () => {
const promises = [
service.initialize(),
service.initialize(),
service.initialize()
];
await Promise.all(promises);
expect((service as any).initialized).toBe(true);
});
it('should handle initialization errors', async () => {
// Mock an error in initialization
const originalContextExtractor = (service as any).contextExtractor;
(service as any).contextExtractor = null; // Force an error
await expect(service.initialize()).rejects.toThrow();
// Restore for cleanup
(service as any).contextExtractor = originalContextExtractor;
});
});
describe('processQuery', () => {
beforeEach(async () => {
await service.initialize();
});
const userQuestion = 'What are the main features of the application?';
it('should process query with default options', async () => {
const mockNotes: NoteSearchResult[] = [
{
noteId: 'note1',
title: 'Features Overview',
content: 'The app has many features...',
relevanceScore: 0.9,
searchType: 'content'
}
];
(service as any).contextExtractor.findRelevantNotes.mockResolvedValueOnce(mockNotes);
const result = await service.processQuery(userQuestion, mockLLMService);
expect(result).toEqual({
context: 'formatted context',
sources: mockNotes,
thinking: undefined,
decomposedQuery: undefined
});
});
it('should handle summarized content option', async () => {
const options: ContextOptions = {
summarizeContent: true,
maxResults: 5
};
const mockNotes: NoteSearchResult[] = [
{
noteId: 'note1',
title: 'Long Content',
content: 'This is a very long piece of content that should be summarized...',
relevanceScore: 0.8,
searchType: 'content'
}
];
(service as any).contextExtractor.findRelevantNotes.mockResolvedValueOnce(mockNotes);
const result = await service.processQuery(userQuestion, mockLLMService, options);
expect(result.sources).toEqual(mockNotes);
expect((service as any).contextExtractor.findRelevantNotes).toHaveBeenCalledWith(
userQuestion,
null,
expect.objectContaining({
maxResults: 5,
summarize: true,
llmService: mockLLMService
})
);
});
it('should handle query enhancement option', async () => {
const options: ContextOptions = {
useQueryEnhancement: true
};
const queryProcessor = (await import('./query_processor.js')).default;
await service.processQuery(userQuestion, mockLLMService, options);
expect(queryProcessor.enhanceQuery).toHaveBeenCalledWith(
userQuestion,
mockLLMService
);
});
it('should handle query decomposition option', async () => {
const options: ContextOptions = {
useQueryDecomposition: true,
showThinking: true
};
const queryProcessor = (await import('./query_processor.js')).default;
const result = await service.processQuery(userQuestion, mockLLMService, options);
expect(queryProcessor.decomposeQuery).toHaveBeenCalledWith(
userQuestion,
mockLLMService
);
expect(result.thinking).toBe('decomposition thinking');
expect(result.decomposedQuery).toEqual({
subQueries: ['sub query 1', 'sub query 2'],
thinking: 'decomposition thinking'
});
});
it('should respect context note ID', async () => {
const options: ContextOptions = {
contextNoteId: 'specific-note-123'
};
await service.processQuery(userQuestion, mockLLMService, options);
expect((service as any).contextExtractor.findRelevantNotes).toHaveBeenCalledWith(
userQuestion,
'specific-note-123',
expect.any(Object)
);
});
it('should handle empty search results', async () => {
(service as any).contextExtractor.findRelevantNotes.mockResolvedValueOnce([]);
const result = await service.processQuery(userQuestion, mockLLMService);
expect(result).toEqual({
context: 'formatted context',
sources: [],
thinking: undefined,
decomposedQuery: undefined
});
});
it('should handle errors in context extraction', async () => {
(service as any).contextExtractor.findRelevantNotes.mockRejectedValueOnce(
new Error('Context extraction failed')
);
await expect(
service.processQuery(userQuestion, mockLLMService)
).rejects.toThrow('Context extraction failed');
});
it('should handle errors in query enhancement', async () => {
const options: ContextOptions = {
useQueryEnhancement: true
};
const queryProcessor = (await import('./query_processor.js')).default;
queryProcessor.enhanceQuery.mockRejectedValueOnce(
new Error('Query enhancement failed')
);
await expect(
service.processQuery(userQuestion, mockLLMService, options)
).rejects.toThrow('Query enhancement failed');
});
it('should handle errors in query decomposition', async () => {
const options: ContextOptions = {
useQueryDecomposition: true
};
const queryProcessor = (await import('./query_processor.js')).default;
queryProcessor.decomposeQuery.mockRejectedValueOnce(
new Error('Query decomposition failed')
);
await expect(
service.processQuery(userQuestion, mockLLMService, options)
).rejects.toThrow('Query decomposition failed');
});
});
describe('findRelevantNotes', () => {
beforeEach(async () => {
await service.initialize();
});
it('should find relevant notes with default options', async () => {
const mockNotes: NoteSearchResult[] = [
{
noteId: 'note1',
title: 'Relevant Note',
content: 'This note is relevant to the query',
relevanceScore: 0.85,
searchType: 'content'
}
];
(service as any).contextExtractor.findRelevantNotes.mockResolvedValueOnce(mockNotes);
const result = await service.findRelevantNotes(
'test query',
'context-note-123',
{}
);
expect(result).toEqual(mockNotes);
expect((service as any).contextExtractor.findRelevantNotes).toHaveBeenCalledWith(
'test query',
'context-note-123',
{}
);
});
it('should pass through options to context extractor', async () => {
const options = {
maxResults: 15,
summarize: true,
llmService: mockLLMService
};
await service.findRelevantNotes('test query', null, options);
expect((service as any).contextExtractor.findRelevantNotes).toHaveBeenCalledWith(
'test query',
null,
options
);
});
it('should handle null context note ID', async () => {
await service.findRelevantNotes('test query', null, {});
expect((service as any).contextExtractor.findRelevantNotes).toHaveBeenCalledWith(
'test query',
null,
{}
);
});
});
describe('error handling', () => {
it('should handle service not initialized', async () => {
const uninitializedService = new ContextService();
// Don't initialize the service
await expect(
uninitializedService.processQuery('test', mockLLMService)
).rejects.toThrow();
});
it('should handle invalid LLM service', async () => {
await service.initialize();
const invalidLLMService = {
generateChatCompletion: vi.fn().mockRejectedValue(new Error('LLM error')),
isAvailable: vi.fn().mockReturnValue(false)
};
const options: ContextOptions = {
useQueryEnhancement: true
};
await expect(
service.processQuery('test', invalidLLMService, options)
).rejects.toThrow();
});
it('should handle context formatter errors', async () => {
await service.initialize();
const contextFormatter = (await import('../modules/context_formatter.js')).default;
contextFormatter.formatNotes.mockImplementationOnce(() => {
throw new Error('Formatting error');
});
await expect(
service.processQuery('test', mockLLMService)
).rejects.toThrow('Formatting error');
});
});
describe('performance', () => {
beforeEach(async () => {
await service.initialize();
});
it('should handle large result sets efficiently', async () => {
const largeResultSet: NoteSearchResult[] = Array.from({ length: 100 }, (_, i) => ({
noteId: `note${i}`,
title: `Note ${i}`,
content: `Content for note ${i}`,
relevanceScore: Math.random(),
searchType: 'content' as const
}));
(service as any).contextExtractor.findRelevantNotes.mockResolvedValueOnce(largeResultSet);
const startTime = Date.now();
const result = await service.processQuery('test query', mockLLMService, {
maxResults: 50
});
const endTime = Date.now();
expect(result.sources).toHaveLength(100); // Should return all found notes
expect(endTime - startTime).toBeLessThan(1000); // Should complete quickly
});
it('should handle concurrent queries', async () => {
const queries = [
'First query',
'Second query',
'Third query'
];
const promises = queries.map(query =>
service.processQuery(query, mockLLMService)
);
const results = await Promise.all(promises);
expect(results).toHaveLength(3);
expect((service as any).contextExtractor.findRelevantNotes).toHaveBeenCalledTimes(3);
});
});
});

View File

@@ -0,0 +1,312 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { ModelCapabilitiesService } from './model_capabilities_service.js';
import type { ModelCapabilities } from './interfaces/model_capabilities.js';
// Mock dependencies
vi.mock('../log.js', () => ({
default: {
info: vi.fn(),
error: vi.fn(),
warn: vi.fn()
}
}));
vi.mock('./interfaces/model_capabilities.js', () => ({
DEFAULT_MODEL_CAPABILITIES: {
contextLength: 4096,
supportedMessageTypes: ['text'],
supportsToolCalls: false,
supportsStreaming: true,
maxOutputTokens: 2048,
temperature: { min: 0, max: 2, default: 0.7 },
topP: { min: 0, max: 1, default: 0.9 }
}
}));
vi.mock('./constants/search_constants.js', () => ({
MODEL_CAPABILITIES: {
'gpt-4': {
contextLength: 8192,
supportsToolCalls: true,
maxOutputTokens: 4096
},
'gpt-3.5-turbo': {
contextLength: 4096,
supportsToolCalls: true,
maxOutputTokens: 2048
},
'claude-3-opus': {
contextLength: 200000,
supportsToolCalls: true,
maxOutputTokens: 4096
}
}
}));
vi.mock('./ai_service_manager.js', () => ({
default: {
getService: vi.fn()
}
}));
describe('ModelCapabilitiesService', () => {
let service: ModelCapabilitiesService;
let mockLog: any;
beforeEach(async () => {
vi.clearAllMocks();
service = new ModelCapabilitiesService();
// Get mocked log
mockLog = (await import('../log.js')).default;
});
afterEach(() => {
vi.restoreAllMocks();
service.clearCache();
});
describe('getChatModelCapabilities', () => {
it('should return cached capabilities if available', async () => {
const mockCapabilities: ModelCapabilities = {
contextLength: 8192,
supportedMessageTypes: ['text'],
supportsToolCalls: true,
supportsStreaming: true,
maxOutputTokens: 4096,
temperature: { min: 0, max: 2, default: 0.7 },
topP: { min: 0, max: 1, default: 0.9 }
};
// Pre-populate cache
(service as any).capabilitiesCache.set('chat:gpt-4', mockCapabilities);
const result = await service.getChatModelCapabilities('gpt-4');
expect(result).toEqual(mockCapabilities);
expect(mockLog.info).not.toHaveBeenCalled();
});
it('should fetch and cache capabilities for new model', async () => {
const result = await service.getChatModelCapabilities('gpt-4');
expect(result).toEqual({
contextLength: 8192,
supportedMessageTypes: ['text'],
supportsToolCalls: true,
supportsStreaming: true,
maxOutputTokens: 4096,
temperature: { min: 0, max: 2, default: 0.7 },
topP: { min: 0, max: 1, default: 0.9 }
});
expect(mockLog.info).toHaveBeenCalledWith('Using static capabilities for chat model: gpt-4');
// Verify it's cached
const cached = (service as any).capabilitiesCache.get('chat:gpt-4');
expect(cached).toEqual(result);
});
it('should handle case-insensitive model names', async () => {
const result = await service.getChatModelCapabilities('GPT-4');
expect(result.contextLength).toBe(8192);
expect(result.supportsToolCalls).toBe(true);
expect(mockLog.info).toHaveBeenCalledWith('Using static capabilities for chat model: GPT-4');
});
it('should return default capabilities for unknown models', async () => {
const result = await service.getChatModelCapabilities('unknown-model');
expect(result).toEqual({
contextLength: 4096,
supportedMessageTypes: ['text'],
supportsToolCalls: false,
supportsStreaming: true,
maxOutputTokens: 2048,
temperature: { min: 0, max: 2, default: 0.7 },
topP: { min: 0, max: 1, default: 0.9 }
});
expect(mockLog.info).toHaveBeenCalledWith('AI service doesn\'t support model capabilities - using defaults for model: unknown-model');
});
it('should merge static capabilities with defaults', async () => {
const result = await service.getChatModelCapabilities('gpt-3.5-turbo');
expect(result).toEqual({
contextLength: 4096,
supportedMessageTypes: ['text'],
supportsToolCalls: true,
supportsStreaming: true,
maxOutputTokens: 2048,
temperature: { min: 0, max: 2, default: 0.7 },
topP: { min: 0, max: 1, default: 0.9 }
});
});
});
describe('clearCache', () => {
it('should clear all cached capabilities', () => {
const mockCapabilities: ModelCapabilities = {
contextLength: 8192,
supportedMessageTypes: ['text'],
supportsToolCalls: true,
supportsStreaming: true,
maxOutputTokens: 4096,
temperature: { min: 0, max: 2, default: 0.7 },
topP: { min: 0, max: 1, default: 0.9 }
};
// Pre-populate cache
(service as any).capabilitiesCache.set('chat:model1', mockCapabilities);
(service as any).capabilitiesCache.set('chat:model2', mockCapabilities);
expect((service as any).capabilitiesCache.size).toBe(2);
service.clearCache();
expect((service as any).capabilitiesCache.size).toBe(0);
expect(mockLog.info).toHaveBeenCalledWith('Model capabilities cache cleared');
});
});
describe('getCachedCapabilities', () => {
it('should return all cached capabilities as a record', () => {
const mockCapabilities1: ModelCapabilities = {
contextLength: 8192,
supportedMessageTypes: ['text'],
supportsToolCalls: true,
supportsStreaming: true,
maxOutputTokens: 4096,
temperature: { min: 0, max: 2, default: 0.7 },
topP: { min: 0, max: 1, default: 0.9 }
};
const mockCapabilities2: ModelCapabilities = {
contextLength: 4096,
supportedMessageTypes: ['text'],
supportsToolCalls: false,
supportsStreaming: true,
maxOutputTokens: 2048,
temperature: { min: 0, max: 2, default: 0.7 },
topP: { min: 0, max: 1, default: 0.9 }
};
// Pre-populate cache
(service as any).capabilitiesCache.set('chat:model1', mockCapabilities1);
(service as any).capabilitiesCache.set('chat:model2', mockCapabilities2);
const result = service.getCachedCapabilities();
expect(result).toEqual({
'chat:model1': mockCapabilities1,
'chat:model2': mockCapabilities2
});
});
it('should return empty object when cache is empty', () => {
const result = service.getCachedCapabilities();
expect(result).toEqual({});
});
});
describe('fetchChatModelCapabilities', () => {
it('should return static capabilities when available', async () => {
// Access private method for testing
const fetchMethod = (service as any).fetchChatModelCapabilities.bind(service);
const result = await fetchMethod('claude-3-opus');
expect(result).toEqual({
contextLength: 200000,
supportedMessageTypes: ['text'],
supportsToolCalls: true,
supportsStreaming: true,
maxOutputTokens: 4096,
temperature: { min: 0, max: 2, default: 0.7 },
topP: { min: 0, max: 1, default: 0.9 }
});
expect(mockLog.info).toHaveBeenCalledWith('Using static capabilities for chat model: claude-3-opus');
});
it('should fallback to defaults when no static capabilities exist', async () => {
const fetchMethod = (service as any).fetchChatModelCapabilities.bind(service);
const result = await fetchMethod('unknown-model');
expect(result).toEqual({
contextLength: 4096,
supportedMessageTypes: ['text'],
supportsToolCalls: false,
supportsStreaming: true,
maxOutputTokens: 2048,
temperature: { min: 0, max: 2, default: 0.7 },
topP: { min: 0, max: 1, default: 0.9 }
});
expect(mockLog.info).toHaveBeenCalledWith('AI service doesn\'t support model capabilities - using defaults for model: unknown-model');
expect(mockLog.info).toHaveBeenCalledWith('Using default capabilities for chat model: unknown-model');
});
it('should handle errors and return defaults', async () => {
// Mock the MODEL_CAPABILITIES to throw an error
vi.doMock('./constants/search_constants.js', () => {
throw new Error('Failed to load constants');
});
const fetchMethod = (service as any).fetchChatModelCapabilities.bind(service);
const result = await fetchMethod('test-model');
expect(result).toEqual({
contextLength: 4096,
supportedMessageTypes: ['text'],
supportsToolCalls: false,
supportsStreaming: true,
maxOutputTokens: 2048,
temperature: { min: 0, max: 2, default: 0.7 },
topP: { min: 0, max: 1, default: 0.9 }
});
});
});
describe('caching behavior', () => {
it('should use cache for subsequent calls to same model', async () => {
const spy = vi.spyOn(service as any, 'fetchChatModelCapabilities');
// First call
await service.getChatModelCapabilities('gpt-4');
expect(spy).toHaveBeenCalledTimes(1);
// Second call should use cache
await service.getChatModelCapabilities('gpt-4');
expect(spy).toHaveBeenCalledTimes(1); // Still 1, not called again
spy.mockRestore();
});
it('should fetch separately for different models', async () => {
const spy = vi.spyOn(service as any, 'fetchChatModelCapabilities');
await service.getChatModelCapabilities('gpt-4');
await service.getChatModelCapabilities('gpt-3.5-turbo');
expect(spy).toHaveBeenCalledTimes(2);
expect(spy).toHaveBeenNthCalledWith(1, 'gpt-4');
expect(spy).toHaveBeenNthCalledWith(2, 'gpt-3.5-turbo');
spy.mockRestore();
});
it('should treat models with different cases as different entries', async () => {
await service.getChatModelCapabilities('gpt-4');
await service.getChatModelCapabilities('GPT-4');
const cached = service.getCachedCapabilities();
expect(Object.keys(cached)).toHaveLength(2);
expect(cached['chat:gpt-4']).toBeDefined();
expect(cached['chat:GPT-4']).toBeDefined();
});
});
});

View File

@@ -0,0 +1,419 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { ChatPipeline } from './chat_pipeline.js';
import type { ChatPipelineInput, ChatPipelineConfig } from './interfaces.js';
import type { Message, ChatResponse } from '../ai_interface.js';
// Mock all pipeline stages
vi.mock('./stages/context_extraction_stage.js', () => ({
ContextExtractionStage: vi.fn().mockImplementation(() => ({
execute: vi.fn().mockResolvedValue({})
}))
}));
vi.mock('./stages/semantic_context_extraction_stage.js', () => ({
SemanticContextExtractionStage: vi.fn().mockImplementation(() => ({
execute: vi.fn().mockResolvedValue({})
}))
}));
vi.mock('./stages/agent_tools_context_stage.js', () => ({
AgentToolsContextStage: vi.fn().mockImplementation(() => ({
execute: vi.fn().mockResolvedValue({})
}))
}));
vi.mock('./stages/message_preparation_stage.js', () => ({
MessagePreparationStage: vi.fn().mockImplementation(() => ({
execute: vi.fn().mockResolvedValue({
preparedMessages: [{ role: 'user', content: 'Hello' }]
})
}))
}));
vi.mock('./stages/model_selection_stage.js', () => ({
ModelSelectionStage: vi.fn().mockImplementation(() => ({
execute: vi.fn().mockResolvedValue({
selectedProvider: 'openai',
selectedModel: 'gpt-4'
})
}))
}));
vi.mock('./stages/llm_completion_stage.js', () => ({
LLMCompletionStage: vi.fn().mockImplementation(() => ({
execute: vi.fn().mockResolvedValue({
response: {
content: 'Hello! How can I help you?',
role: 'assistant',
finish_reason: 'stop'
}
})
}))
}));
vi.mock('./stages/response_processing_stage.js', () => ({
ResponseProcessingStage: vi.fn().mockImplementation(() => ({
execute: vi.fn().mockResolvedValue({
processedResponse: {
content: 'Hello! How can I help you?',
role: 'assistant',
finish_reason: 'stop'
}
})
}))
}));
vi.mock('./stages/tool_calling_stage.js', () => ({
ToolCallingStage: vi.fn().mockImplementation(() => ({
execute: vi.fn().mockResolvedValue({
toolCallRequired: false
})
}))
}));
vi.mock('../tools/tool_registry.js', () => ({
default: {
getTools: vi.fn().mockReturnValue([]),
executeTool: vi.fn()
}
}));
vi.mock('../tools/tool_initializer.js', () => ({
default: {
initializeTools: vi.fn().mockResolvedValue(undefined)
}
}));
vi.mock('../../log.js', () => ({
default: {
info: vi.fn(),
error: vi.fn(),
warn: vi.fn()
}
}));
describe('ChatPipeline', () => {
let pipeline: ChatPipeline;
beforeEach(() => {
vi.clearAllMocks();
pipeline = new ChatPipeline();
});
afterEach(() => {
vi.restoreAllMocks();
});
describe('constructor', () => {
it('should initialize with default configuration', () => {
expect(pipeline.config).toEqual({
enableStreaming: true,
enableMetrics: true,
maxToolCallIterations: 3
});
});
it('should accept custom configuration', () => {
const customConfig: Partial<ChatPipelineConfig> = {
enableStreaming: false,
maxToolCallIterations: 5
};
const customPipeline = new ChatPipeline(customConfig);
expect(customPipeline.config).toEqual({
enableStreaming: false,
enableMetrics: true,
maxToolCallIterations: 5
});
});
it('should initialize all pipeline stages', () => {
expect(pipeline.stages.contextExtraction).toBeDefined();
expect(pipeline.stages.semanticContextExtraction).toBeDefined();
expect(pipeline.stages.agentToolsContext).toBeDefined();
expect(pipeline.stages.messagePreparation).toBeDefined();
expect(pipeline.stages.modelSelection).toBeDefined();
expect(pipeline.stages.llmCompletion).toBeDefined();
expect(pipeline.stages.responseProcessing).toBeDefined();
expect(pipeline.stages.toolCalling).toBeDefined();
});
it('should initialize metrics', () => {
expect(pipeline.metrics).toEqual({
totalExecutions: 0,
averageExecutionTime: 0,
stageMetrics: {
contextExtraction: {
totalExecutions: 0,
averageExecutionTime: 0
},
semanticContextExtraction: {
totalExecutions: 0,
averageExecutionTime: 0
},
agentToolsContext: {
totalExecutions: 0,
averageExecutionTime: 0
},
messagePreparation: {
totalExecutions: 0,
averageExecutionTime: 0
},
modelSelection: {
totalExecutions: 0,
averageExecutionTime: 0
},
llmCompletion: {
totalExecutions: 0,
averageExecutionTime: 0
},
responseProcessing: {
totalExecutions: 0,
averageExecutionTime: 0
},
toolCalling: {
totalExecutions: 0,
averageExecutionTime: 0
}
}
});
});
});
describe('execute', () => {
const messages: Message[] = [
{ role: 'user', content: 'Hello' }
];
const input: ChatPipelineInput = {
messages,
noteId: 'note-123',
userId: 'user-456'
};
it('should execute all pipeline stages in order', async () => {
const result = await pipeline.execute(input);
expect(pipeline.stages.contextExtraction.execute).toHaveBeenCalled();
expect(pipeline.stages.semanticContextExtraction.execute).toHaveBeenCalled();
expect(pipeline.stages.agentToolsContext.execute).toHaveBeenCalled();
expect(pipeline.stages.messagePreparation.execute).toHaveBeenCalled();
expect(pipeline.stages.modelSelection.execute).toHaveBeenCalled();
expect(pipeline.stages.llmCompletion.execute).toHaveBeenCalled();
expect(pipeline.stages.responseProcessing.execute).toHaveBeenCalled();
expect(pipeline.stages.toolCalling.execute).toHaveBeenCalled();
expect(result).toEqual({
content: 'Hello! How can I help you?',
role: 'assistant',
finish_reason: 'stop'
});
});
it('should increment total executions metric', async () => {
const initialExecutions = pipeline.metrics.totalExecutions;
await pipeline.execute(input);
expect(pipeline.metrics.totalExecutions).toBe(initialExecutions + 1);
});
it('should handle streaming callback', async () => {
const streamCallback = vi.fn();
const inputWithStream = { ...input, streamCallback };
await pipeline.execute(inputWithStream);
expect(pipeline.stages.llmCompletion.execute).toHaveBeenCalledWith(
expect.objectContaining({
streamCallback: expect.any(Function)
})
);
});
it('should handle tool calling iterations', async () => {
// Mock tool calling stage to require a tool call
const mockToolCallingStage = pipeline.stages.toolCalling;
mockToolCallingStage.execute
.mockResolvedValueOnce({
toolCallRequired: true,
toolCallMessages: [{ role: 'assistant', content: 'Using tool...' }]
})
.mockResolvedValueOnce({
toolCallRequired: false
});
await pipeline.execute(input);
// Should call tool calling stage twice (initial + one iteration)
expect(mockToolCallingStage.execute).toHaveBeenCalledTimes(2);
});
it('should respect max tool call iterations', async () => {
// Set low max iterations
pipeline.config.maxToolCallIterations = 1;
// Mock tool calling stage to always require tool calls
const mockToolCallingStage = pipeline.stages.toolCalling;
mockToolCallingStage.execute.mockResolvedValue({
toolCallRequired: true,
toolCallMessages: [{ role: 'assistant', content: 'Using tool...' }]
});
await pipeline.execute(input);
// Should call tool calling stage max iterations + 1 (initial)
expect(mockToolCallingStage.execute).toHaveBeenCalledTimes(2);
});
it('should handle stage errors gracefully', async () => {
// Mock a stage to throw an error
pipeline.stages.contextExtraction.execute.mockRejectedValueOnce(
new Error('Context extraction failed')
);
await expect(pipeline.execute(input)).rejects.toThrow(
'Context extraction failed'
);
});
it('should pass context between stages', async () => {
const contextData = { relevantNotes: ['note1', 'note2'] };
pipeline.stages.contextExtraction.execute.mockResolvedValueOnce(contextData);
await pipeline.execute(input);
expect(pipeline.stages.semanticContextExtraction.execute).toHaveBeenCalledWith(
expect.objectContaining(contextData)
);
});
it('should handle empty messages', async () => {
const emptyInput: ChatPipelineInput = {
messages: [],
noteId: 'note-123',
userId: 'user-456'
};
const result = await pipeline.execute(emptyInput);
expect(result).toEqual({
content: 'Hello! How can I help you?',
role: 'assistant',
finish_reason: 'stop'
});
});
it('should calculate content length for model selection', async () => {
const longMessages: Message[] = [
{ role: 'user', content: 'This is a very long message that contains lots of text' },
{ role: 'assistant', content: 'This is another long response with detailed information' }
];
const longInput = { ...input, messages: longMessages };
await pipeline.execute(longInput);
expect(pipeline.stages.modelSelection.execute).toHaveBeenCalledWith(
expect.objectContaining({
contentLength: expect.any(Number)
})
);
});
it('should update average execution time', async () => {
// Execute pipeline multiple times
await pipeline.execute(input);
await pipeline.execute(input);
expect(pipeline.metrics.averageExecutionTime).toBeGreaterThan(0);
});
it('should disable streaming when config is false', async () => {
pipeline.config.enableStreaming = false;
const streamCallback = vi.fn();
const inputWithStream = { ...input, streamCallback };
await pipeline.execute(inputWithStream);
// Should not pass stream callback to LLM stage
expect(pipeline.stages.llmCompletion.execute).toHaveBeenCalledWith(
expect.not.objectContaining({
streamCallback: expect.any(Function)
})
);
});
it('should handle concurrent executions', async () => {
const promises = [
pipeline.execute(input),
pipeline.execute(input),
pipeline.execute(input)
];
const results = await Promise.all(promises);
expect(results).toHaveLength(3);
expect(pipeline.metrics.totalExecutions).toBe(3);
});
});
describe('metrics', () => {
it('should track stage execution times when metrics enabled', async () => {
pipeline.config.enableMetrics = true;
const input: ChatPipelineInput = {
messages: [{ role: 'user', content: 'Hello' }],
noteId: 'note-123',
userId: 'user-456'
};
await pipeline.execute(input);
// Check that metrics were updated
expect(pipeline.metrics.totalExecutions).toBe(1);
expect(pipeline.metrics.averageExecutionTime).toBeGreaterThan(0);
});
it('should skip metrics when disabled', async () => {
pipeline.config.enableMetrics = false;
const input: ChatPipelineInput = {
messages: [{ role: 'user', content: 'Hello' }],
noteId: 'note-123',
userId: 'user-456'
};
await pipeline.execute(input);
// Execution count should still be tracked
expect(pipeline.metrics.totalExecutions).toBe(1);
});
});
describe('error handling', () => {
it('should propagate errors from stages', async () => {
const error = new Error('Stage execution failed');
pipeline.stages.messagePreparation.execute.mockRejectedValueOnce(error);
const input: ChatPipelineInput = {
messages: [{ role: 'user', content: 'Hello' }],
noteId: 'note-123',
userId: 'user-456'
};
await expect(pipeline.execute(input)).rejects.toThrow('Stage execution failed');
});
it('should handle invalid input gracefully', async () => {
const invalidInput = {
messages: null,
noteId: 'note-123',
userId: 'user-456'
} as any;
await expect(pipeline.execute(invalidInput)).rejects.toThrow();
});
});
});

View File

@@ -0,0 +1,433 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { AnthropicService } from './anthropic_service.js';
import options from '../../options.js';
import * as providers from './providers.js';
import type { ChatCompletionOptions, Message } from '../ai_interface.js';
import Anthropic from '@anthropic-ai/sdk';
import { PROVIDER_CONSTANTS } from '../constants/provider_constants.js';
// Mock dependencies
vi.mock('../../options.js', () => ({
default: {
getOption: vi.fn(),
getOptionBool: vi.fn()
}
}));
vi.mock('../../log.js', () => ({
default: {
info: vi.fn(),
error: vi.fn(),
warn: vi.fn()
}
}));
vi.mock('./providers.js', () => ({
getAnthropicOptions: vi.fn()
}));
vi.mock('@anthropic-ai/sdk', () => {
const mockStream = {
[Symbol.asyncIterator]: async function* () {
yield {
type: 'content_block_delta',
delta: { text: 'Hello' }
};
yield {
type: 'content_block_delta',
delta: { text: ' world' }
};
yield {
type: 'message_delta',
delta: { stop_reason: 'end_turn' }
};
}
};
const mockAnthropic = vi.fn().mockImplementation(() => ({
messages: {
create: vi.fn().mockImplementation((params) => {
if (params.stream) {
return Promise.resolve(mockStream);
}
return Promise.resolve({
id: 'msg_123',
type: 'message',
role: 'assistant',
content: [{
type: 'text',
text: 'Hello! How can I help you today?'
}],
model: 'claude-3-opus-20240229',
stop_reason: 'end_turn',
stop_sequence: null,
usage: {
input_tokens: 10,
output_tokens: 25
}
});
})
}
}));
return { default: mockAnthropic };
});
describe('AnthropicService', () => {
let service: AnthropicService;
let mockAnthropicInstance: any;
beforeEach(() => {
vi.clearAllMocks();
service = new AnthropicService();
// Get the mocked Anthropic instance
const AnthropicMock = vi.mocked(Anthropic);
mockAnthropicInstance = new AnthropicMock({ apiKey: 'test' });
});
afterEach(() => {
vi.restoreAllMocks();
});
describe('constructor', () => {
it('should initialize with provider name', () => {
expect(service).toBeDefined();
expect((service as any).providerName).toBe('Anthropic');
});
});
describe('isAvailable', () => {
it('should return true when AI is enabled and API key exists', () => {
vi.mocked(options.getOptionBool).mockReturnValueOnce(true); // AI enabled
vi.mocked(options.getOption).mockReturnValueOnce('test-api-key'); // API key
const result = service.isAvailable();
expect(result).toBe(true);
});
it('should return false when AI is disabled', () => {
vi.mocked(options.getOptionBool).mockReturnValueOnce(false); // AI disabled
const result = service.isAvailable();
expect(result).toBe(false);
});
it('should return false when no API key', () => {
vi.mocked(options.getOptionBool).mockReturnValueOnce(true); // AI enabled
vi.mocked(options.getOption).mockReturnValueOnce(null); // No API key
const result = service.isAvailable();
expect(result).toBe(false);
});
});
describe('generateChatCompletion', () => {
const messages: Message[] = [
{ role: 'user', content: 'Hello' }
];
beforeEach(() => {
vi.mocked(options.getOptionBool).mockReturnValue(true); // AI enabled
vi.mocked(options.getOption)
.mockReturnValueOnce('test-api-key') // API key
.mockReturnValueOnce('You are a helpful assistant'); // System prompt
});
it('should generate non-streaming completion', async () => {
const mockOptions = {
apiKey: 'test-key',
baseUrl: 'https://api.anthropic.com',
model: 'claude-3-opus-20240229',
temperature: 0.7,
max_tokens: 1000,
stream: false
};
vi.mocked(providers.getAnthropicOptions).mockReturnValueOnce(mockOptions);
const result = await service.generateChatCompletion(messages);
expect(result).toEqual({
content: 'Hello! How can I help you today?',
role: 'assistant',
finish_reason: 'end_turn'
});
});
it('should format messages properly for Anthropic API', async () => {
const mockOptions = {
apiKey: 'test-key',
baseUrl: 'https://api.anthropic.com',
model: 'claude-3-opus-20240229',
stream: false
};
vi.mocked(providers.getAnthropicOptions).mockReturnValueOnce(mockOptions);
const createSpy = vi.spyOn(mockAnthropicInstance.messages, 'create');
await service.generateChatCompletion(messages);
const calledParams = createSpy.mock.calls[0][0];
expect(calledParams.messages).toEqual([
{ role: 'user', content: 'Hello' }
]);
expect(calledParams.system).toBe('You are a helpful assistant');
});
it('should handle streaming completion', async () => {
const mockOptions = {
apiKey: 'test-key',
baseUrl: 'https://api.anthropic.com',
model: 'claude-3-opus-20240229',
stream: true,
onChunk: vi.fn()
};
vi.mocked(providers.getAnthropicOptions).mockReturnValueOnce(mockOptions);
const result = await service.generateChatCompletion(messages);
// Wait for chunks to be processed
await new Promise(resolve => setTimeout(resolve, 100));
expect(mockOptions.onChunk).toHaveBeenCalledTimes(2);
expect(mockOptions.onChunk).toHaveBeenNthCalledWith(1, {
content: 'Hello',
role: 'assistant',
finish_reason: null
});
expect(mockOptions.onChunk).toHaveBeenNthCalledWith(2, {
content: ' world',
role: 'assistant',
finish_reason: null
});
expect(result).toEqual({
content: 'Hello world',
role: 'assistant',
finish_reason: 'end_turn'
});
});
it('should handle tool calls', async () => {
const mockTools = [{
name: 'test_tool',
description: 'Test tool',
input_schema: {
type: 'object',
properties: {}
}
}];
const mockOptions = {
apiKey: 'test-key',
baseUrl: 'https://api.anthropic.com',
model: 'claude-3-opus-20240229',
stream: false,
enableTools: true,
tools: mockTools,
tool_choice: { type: 'any' }
};
vi.mocked(providers.getAnthropicOptions).mockReturnValueOnce(mockOptions);
// Mock response with tool use
mockAnthropicInstance.messages.create.mockResolvedValueOnce({
id: 'msg_123',
type: 'message',
role: 'assistant',
content: [{
type: 'tool_use',
id: 'tool_123',
name: 'test_tool',
input: { key: 'value' }
}],
model: 'claude-3-opus-20240229',
stop_reason: 'tool_use',
stop_sequence: null,
usage: {
input_tokens: 10,
output_tokens: 25
}
});
const result = await service.generateChatCompletion(messages);
expect(result).toEqual({
content: '',
role: 'assistant',
tool_calls: [{
id: 'tool_123',
type: 'function',
function: {
name: 'test_tool',
arguments: '{"key":"value"}'
}
}],
finish_reason: 'tool_use'
});
});
it('should throw error if service not available', async () => {
vi.mocked(options.getOptionBool).mockReturnValueOnce(false); // AI disabled
await expect(service.generateChatCompletion(messages)).rejects.toThrow(
'Anthropic service is not available'
);
});
it('should handle API errors', async () => {
const mockOptions = {
apiKey: 'test-key',
baseUrl: 'https://api.anthropic.com',
model: 'claude-3-opus-20240229',
stream: false
};
vi.mocked(providers.getAnthropicOptions).mockReturnValueOnce(mockOptions);
// Mock API error
mockAnthropicInstance.messages.create.mockRejectedValueOnce(
new Error('API Error: Invalid API key')
);
await expect(service.generateChatCompletion(messages)).rejects.toThrow(
'Anthropic API error: API Error: Invalid API key'
);
});
it('should use custom API version and beta version', async () => {
const mockOptions = {
apiKey: 'test-key',
baseUrl: 'https://api.anthropic.com',
model: 'claude-3-opus-20240229',
apiVersion: '2024-01-01',
betaVersion: 'beta-feature-1',
stream: false
};
vi.mocked(providers.getAnthropicOptions).mockReturnValueOnce(mockOptions);
// Spy on Anthropic constructor
const AnthropicMock = vi.mocked(Anthropic);
AnthropicMock.mockClear();
// Create new service to trigger client creation
const newService = new AnthropicService();
await newService.generateChatCompletion(messages);
expect(AnthropicMock).toHaveBeenCalledWith({
apiKey: 'test-key',
baseURL: 'https://api.anthropic.com',
defaultHeaders: {
'anthropic-version': '2024-01-01',
'anthropic-beta': 'beta-feature-1'
}
});
});
it('should use default API version when not specified', async () => {
const mockOptions = {
apiKey: 'test-key',
baseUrl: 'https://api.anthropic.com',
model: 'claude-3-opus-20240229',
stream: false
};
vi.mocked(providers.getAnthropicOptions).mockReturnValueOnce(mockOptions);
// Spy on Anthropic constructor
const AnthropicMock = vi.mocked(Anthropic);
AnthropicMock.mockClear();
// Create new service to trigger client creation
const newService = new AnthropicService();
await newService.generateChatCompletion(messages);
expect(AnthropicMock).toHaveBeenCalledWith({
apiKey: 'test-key',
baseURL: 'https://api.anthropic.com',
defaultHeaders: {
'anthropic-version': PROVIDER_CONSTANTS.ANTHROPIC.API_VERSION,
'anthropic-beta': PROVIDER_CONSTANTS.ANTHROPIC.BETA_VERSION
}
});
});
it('should handle mixed content types in response', async () => {
const mockOptions = {
apiKey: 'test-key',
baseUrl: 'https://api.anthropic.com',
model: 'claude-3-opus-20240229',
stream: false
};
vi.mocked(providers.getAnthropicOptions).mockReturnValueOnce(mockOptions);
// Mock response with mixed content
mockAnthropicInstance.messages.create.mockResolvedValueOnce({
id: 'msg_123',
type: 'message',
role: 'assistant',
content: [
{ type: 'text', text: 'Here is the result: ' },
{ type: 'tool_use', id: 'tool_123', name: 'calculate', input: { x: 5, y: 3 } },
{ type: 'text', text: ' The calculation is complete.' }
],
model: 'claude-3-opus-20240229',
stop_reason: 'end_turn',
stop_sequence: null,
usage: {
input_tokens: 10,
output_tokens: 25
}
});
const result = await service.generateChatCompletion(messages);
expect(result.content).toBe('Here is the result: The calculation is complete.');
expect(result.tool_calls).toHaveLength(1);
expect(result.tool_calls![0].function.name).toBe('calculate');
});
it('should handle tool results in messages', async () => {
const messagesWithToolResult: Message[] = [
{ role: 'user', content: 'Calculate 5 + 3' },
{
role: 'assistant',
content: '',
tool_calls: [{
id: 'call_123',
type: 'function',
function: { name: 'calculate', arguments: '{"x": 5, "y": 3}' }
}]
},
{
role: 'tool',
content: '8',
tool_call_id: 'call_123'
}
];
const mockOptions = {
apiKey: 'test-key',
baseUrl: 'https://api.anthropic.com',
model: 'claude-3-opus-20240229',
stream: false
};
vi.mocked(providers.getAnthropicOptions).mockReturnValueOnce(mockOptions);
const createSpy = vi.spyOn(mockAnthropicInstance.messages, 'create');
await service.generateChatCompletion(messagesWithToolResult);
const formattedMessages = createSpy.mock.calls[0][0].messages;
expect(formattedMessages).toHaveLength(3);
expect(formattedMessages[2]).toEqual({
role: 'user',
content: [{
type: 'tool_result',
tool_use_id: 'call_123',
content: '8'
}]
});
});
});
});

View File

@@ -0,0 +1,467 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { OllamaService } from './ollama_service.js';
import options from '../../options.js';
import * as providers from './providers.js';
import type { ChatCompletionOptions, Message } from '../ai_interface.js';
import { Ollama } from 'ollama';
// Mock dependencies
vi.mock('../../options.js', () => ({
default: {
getOption: vi.fn(),
getOptionBool: vi.fn()
}
}));
vi.mock('../../log.js', () => ({
default: {
info: vi.fn(),
error: vi.fn(),
warn: vi.fn()
}
}));
vi.mock('./providers.js', () => ({
getOllamaOptions: vi.fn()
}));
vi.mock('../formatters/ollama_formatter.js', () => ({
OllamaMessageFormatter: vi.fn().mockImplementation(() => ({
formatMessages: vi.fn(),
formatResponse: vi.fn()
}))
}));
vi.mock('../tools/tool_registry.js', () => ({
default: {
getTools: vi.fn().mockReturnValue([]),
executeTool: vi.fn()
}
}));
vi.mock('./stream_handler.js', () => ({
StreamProcessor: vi.fn(),
createStreamHandler: vi.fn(),
performProviderHealthCheck: vi.fn(),
processProviderStream: vi.fn(),
extractStreamStats: vi.fn()
}));
vi.mock('ollama', () => {
const mockStream = {
[Symbol.asyncIterator]: async function* () {
yield {
message: {
role: 'assistant',
content: 'Hello'
},
done: false
};
yield {
message: {
role: 'assistant',
content: ' world'
},
done: true
};
}
};
const mockOllama = vi.fn().mockImplementation(() => ({
chat: vi.fn().mockImplementation((params) => {
if (params.stream) {
return Promise.resolve(mockStream);
}
return Promise.resolve({
message: {
role: 'assistant',
content: 'Hello! How can I help you today?'
},
created_at: '2024-01-01T00:00:00Z',
model: 'llama2',
done: true
});
}),
show: vi.fn().mockResolvedValue({
modelfile: 'FROM llama2',
parameters: {},
template: '',
details: {
format: 'gguf',
family: 'llama',
families: ['llama'],
parameter_size: '7B',
quantization_level: 'Q4_0'
}
}),
list: vi.fn().mockResolvedValue({
models: [
{
name: 'llama2:latest',
modified_at: '2024-01-01T00:00:00Z',
size: 3800000000
}
]
})
}));
return { Ollama: mockOllama };
});
// Mock global fetch
global.fetch = vi.fn().mockResolvedValue({
ok: true,
status: 200,
statusText: 'OK',
json: vi.fn().mockResolvedValue({})
});
describe('OllamaService', () => {
let service: OllamaService;
let mockOllamaInstance: any;
beforeEach(() => {
vi.clearAllMocks();
service = new OllamaService();
// Get the mocked Ollama instance
const OllamaMock = vi.mocked(Ollama);
mockOllamaInstance = new OllamaMock({ host: 'http://localhost:11434' });
});
afterEach(() => {
vi.restoreAllMocks();
});
describe('constructor', () => {
it('should initialize with provider name and formatter', () => {
expect(service).toBeDefined();
expect((service as any).providerName).toBe('Ollama');
expect((service as any).formatter).toBeDefined();
});
});
describe('isAvailable', () => {
it('should return true when AI is enabled and base URL exists', () => {
vi.mocked(options.getOptionBool).mockReturnValueOnce(true); // AI enabled
vi.mocked(options.getOption).mockReturnValueOnce('http://localhost:11434'); // Base URL
const result = service.isAvailable();
expect(result).toBe(true);
});
it('should return false when AI is disabled', () => {
vi.mocked(options.getOptionBool).mockReturnValueOnce(false); // AI disabled
const result = service.isAvailable();
expect(result).toBe(false);
});
it('should return false when no base URL', () => {
vi.mocked(options.getOptionBool).mockReturnValueOnce(true); // AI enabled
vi.mocked(options.getOption).mockReturnValueOnce(null); // No base URL
const result = service.isAvailable();
expect(result).toBe(false);
});
});
describe('generateChatCompletion', () => {
const messages: Message[] = [
{ role: 'user', content: 'Hello' }
];
beforeEach(() => {
vi.mocked(options.getOptionBool).mockReturnValue(true); // AI enabled
vi.mocked(options.getOption)
.mockReturnValueOnce('http://localhost:11434') // Base URL
.mockReturnValueOnce('You are a helpful assistant'); // System prompt
});
it('should generate non-streaming completion', async () => {
const mockOptions = {
baseUrl: 'http://localhost:11434',
model: 'llama2',
temperature: 0.7,
stream: false
};
vi.mocked(providers.getOllamaOptions).mockReturnValueOnce(mockOptions);
const result = await service.generateChatCompletion(messages);
expect(result).toEqual({
content: 'Hello! How can I help you today?',
role: 'assistant',
finish_reason: 'stop'
});
});
it('should handle streaming completion', async () => {
const mockOptions = {
baseUrl: 'http://localhost:11434',
model: 'llama2',
temperature: 0.7,
stream: true,
onChunk: vi.fn()
};
vi.mocked(providers.getOllamaOptions).mockReturnValueOnce(mockOptions);
const result = await service.generateChatCompletion(messages);
// Wait for chunks to be processed
await new Promise(resolve => setTimeout(resolve, 100));
expect(mockOptions.onChunk).toHaveBeenCalledTimes(2);
expect(result).toEqual({
content: 'Hello world',
role: 'assistant',
finish_reason: 'stop'
});
});
it('should handle tools when enabled', async () => {
const mockTools = [{
name: 'test_tool',
description: 'Test tool',
parameters: {
type: 'object',
properties: {},
required: []
}
}];
const mockOptions = {
baseUrl: 'http://localhost:11434',
model: 'llama2',
stream: false,
enableTools: true,
tools: mockTools
};
vi.mocked(providers.getOllamaOptions).mockReturnValueOnce(mockOptions);
const chatSpy = vi.spyOn(mockOllamaInstance, 'chat');
await service.generateChatCompletion(messages);
const calledParams = chatSpy.mock.calls[0][0];
expect(calledParams.tools).toEqual(mockTools);
});
it('should throw error if service not available', async () => {
vi.mocked(options.getOptionBool).mockReturnValueOnce(false); // AI disabled
await expect(service.generateChatCompletion(messages)).rejects.toThrow(
'Ollama service is not available'
);
});
it('should throw error if no base URL configured', async () => {
vi.mocked(options.getOption).mockReturnValueOnce(null); // No base URL
await expect(service.generateChatCompletion(messages)).rejects.toThrow(
'Ollama base URL is not configured'
);
});
it('should handle API errors', async () => {
const mockOptions = {
baseUrl: 'http://localhost:11434',
model: 'llama2',
stream: false
};
vi.mocked(providers.getOllamaOptions).mockReturnValueOnce(mockOptions);
// Mock API error
mockOllamaInstance.chat.mockRejectedValueOnce(
new Error('Connection refused')
);
await expect(service.generateChatCompletion(messages)).rejects.toThrow(
'Ollama API error: Connection refused'
);
});
it('should create client with custom fetch for debugging', () => {
const mockOptions = {
baseUrl: 'http://localhost:11434',
model: 'llama2',
stream: false
};
vi.mocked(providers.getOllamaOptions).mockReturnValueOnce(mockOptions);
// Spy on Ollama constructor
const OllamaMock = vi.mocked(Ollama);
OllamaMock.mockClear();
// Create new service to trigger client creation
const newService = new OllamaService();
newService.generateChatCompletion(messages);
expect(OllamaMock).toHaveBeenCalledWith({
host: 'http://localhost:11434',
fetch: expect.any(Function)
});
});
it('should handle tool execution feedback', async () => {
const mockOptions = {
baseUrl: 'http://localhost:11434',
model: 'llama2',
stream: false,
enableTools: true,
tools: [{ name: 'test_tool', description: 'Test', parameters: {} }]
};
vi.mocked(providers.getOllamaOptions).mockReturnValueOnce(mockOptions);
// Mock response with tool call
mockOllamaInstance.chat.mockResolvedValueOnce({
message: {
role: 'assistant',
content: '',
tool_calls: [{
id: 'call_123',
function: {
name: 'test_tool',
arguments: { key: 'value' }
}
}]
},
done: true
});
const result = await service.generateChatCompletion(messages);
expect(result.tool_calls).toEqual([{
id: 'call_123',
type: 'function',
function: {
name: 'test_tool',
arguments: '{"key":"value"}'
}
}]);
});
it('should handle mixed text and tool content', async () => {
const mockOptions = {
baseUrl: 'http://localhost:11434',
model: 'llama2',
stream: false
};
vi.mocked(providers.getOllamaOptions).mockReturnValueOnce(mockOptions);
// Mock response with both text and tool calls
mockOllamaInstance.chat.mockResolvedValueOnce({
message: {
role: 'assistant',
content: 'Let me help you with that.',
tool_calls: [{
id: 'call_123',
function: {
name: 'calculate',
arguments: { x: 5, y: 3 }
}
}]
},
done: true
});
const result = await service.generateChatCompletion(messages);
expect(result.content).toBe('Let me help you with that.');
expect(result.tool_calls).toHaveLength(1);
});
it('should format messages using the formatter', async () => {
const mockOptions = {
baseUrl: 'http://localhost:11434',
model: 'llama2',
stream: false
};
vi.mocked(providers.getOllamaOptions).mockReturnValueOnce(mockOptions);
const formattedMessages = [{ role: 'user', content: 'Hello' }];
(service as any).formatter.formatMessages.mockReturnValueOnce(formattedMessages);
const chatSpy = vi.spyOn(mockOllamaInstance, 'chat');
await service.generateChatCompletion(messages);
expect((service as any).formatter.formatMessages).toHaveBeenCalledWith(
messages,
'You are a helpful assistant'
);
expect(chatSpy).toHaveBeenCalledWith(
expect.objectContaining({
messages: formattedMessages
})
);
});
it('should handle network errors gracefully', async () => {
const mockOptions = {
baseUrl: 'http://localhost:11434',
model: 'llama2',
stream: false
};
vi.mocked(providers.getOllamaOptions).mockReturnValueOnce(mockOptions);
// Mock network error
global.fetch = vi.fn().mockRejectedValueOnce(
new Error('Network error')
);
mockOllamaInstance.chat.mockRejectedValueOnce(
new Error('fetch failed')
);
await expect(service.generateChatCompletion(messages)).rejects.toThrow(
'Ollama API error: fetch failed'
);
});
it('should validate model availability', async () => {
const mockOptions = {
baseUrl: 'http://localhost:11434',
model: 'nonexistent-model',
stream: false
};
vi.mocked(providers.getOllamaOptions).mockReturnValueOnce(mockOptions);
// Mock model not found error
mockOllamaInstance.chat.mockRejectedValueOnce(
new Error('model "nonexistent-model" not found')
);
await expect(service.generateChatCompletion(messages)).rejects.toThrow(
'Ollama API error: model "nonexistent-model" not found'
);
});
});
describe('client management', () => {
it('should reuse existing client', async () => {
vi.mocked(options.getOptionBool).mockReturnValue(true);
vi.mocked(options.getOption).mockReturnValue('http://localhost:11434');
const mockOptions = {
baseUrl: 'http://localhost:11434',
model: 'llama2',
stream: false
};
vi.mocked(providers.getOllamaOptions).mockReturnValue(mockOptions);
const OllamaMock = vi.mocked(Ollama);
OllamaMock.mockClear();
// Make two calls
await service.generateChatCompletion([{ role: 'user', content: 'Hello' }]);
await service.generateChatCompletion([{ role: 'user', content: 'Hi' }]);
// Should only create client once
expect(OllamaMock).toHaveBeenCalledTimes(1);
});
});
});

View File

@@ -0,0 +1,340 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { OpenAIService } from './openai_service.js';
import options from '../../options.js';
import * as providers from './providers.js';
import type { ChatCompletionOptions, Message } from '../ai_interface.js';
// Mock dependencies
vi.mock('../../options.js', () => ({
default: {
getOption: vi.fn(),
getOptionBool: vi.fn()
}
}));
vi.mock('../../log.js', () => ({
default: {
info: vi.fn(),
error: vi.fn(),
warn: vi.fn()
}
}));
vi.mock('./providers.js', () => ({
getOpenAIOptions: vi.fn()
}));
// Mock OpenAI completely
vi.mock('openai', () => {
return {
default: vi.fn()
};
});
describe('OpenAIService', () => {
let service: OpenAIService;
beforeEach(() => {
vi.clearAllMocks();
service = new OpenAIService();
});
afterEach(() => {
vi.restoreAllMocks();
});
describe('constructor', () => {
it('should initialize with provider name', () => {
expect(service).toBeDefined();
expect(service.getName()).toBe('OpenAI');
});
});
describe('isAvailable', () => {
it('should return true when base checks pass', () => {
vi.mocked(options.getOptionBool).mockReturnValueOnce(true); // AI enabled
const result = service.isAvailable();
expect(result).toBe(true);
});
it('should return false when AI is disabled', () => {
vi.mocked(options.getOptionBool).mockReturnValueOnce(false); // AI disabled
const result = service.isAvailable();
expect(result).toBe(false);
});
});
describe('generateChatCompletion', () => {
const messages: Message[] = [
{ role: 'user', content: 'Hello' }
];
beforeEach(() => {
vi.mocked(options.getOptionBool).mockReturnValue(true); // AI enabled
vi.mocked(options.getOption).mockReturnValue('You are a helpful assistant'); // System prompt
});
it('should generate non-streaming completion', async () => {
const mockOptions = {
apiKey: 'test-key',
model: 'gpt-3.5-turbo',
temperature: 0.7,
max_tokens: 1000,
stream: false,
enableTools: false
};
vi.mocked(providers.getOpenAIOptions).mockReturnValueOnce(mockOptions);
// Mock the getClient method to return our mock client
const mockCompletion = {
id: 'chatcmpl-123',
object: 'chat.completion',
created: 1677652288,
model: 'gpt-3.5-turbo',
choices: [{
index: 0,
message: {
role: 'assistant',
content: 'Hello! How can I help you today?'
},
finish_reason: 'stop'
}],
usage: {
prompt_tokens: 9,
completion_tokens: 12,
total_tokens: 21
}
};
const mockClient = {
chat: {
completions: {
create: vi.fn().mockResolvedValueOnce(mockCompletion)
}
}
};
vi.spyOn(service as any, 'getClient').mockReturnValue(mockClient);
const result = await service.generateChatCompletion(messages);
expect(result).toEqual({
text: 'Hello! How can I help you today?',
model: 'gpt-3.5-turbo',
provider: 'OpenAI',
usage: {
promptTokens: 9,
completionTokens: 12,
totalTokens: 21
},
tool_calls: undefined
});
});
it('should handle streaming completion', async () => {
const mockOptions = {
apiKey: 'test-key',
model: 'gpt-3.5-turbo',
stream: true
};
vi.mocked(providers.getOpenAIOptions).mockReturnValueOnce(mockOptions);
// Mock the streaming response
const mockStream = {
[Symbol.asyncIterator]: async function* () {
yield {
choices: [{
delta: { content: 'Hello' },
finish_reason: null
}]
};
yield {
choices: [{
delta: { content: ' world' },
finish_reason: 'stop'
}]
};
}
};
const mockClient = {
chat: {
completions: {
create: vi.fn().mockResolvedValueOnce(mockStream)
}
}
};
vi.spyOn(service as any, 'getClient').mockReturnValue(mockClient);
const result = await service.generateChatCompletion(messages);
expect(result).toHaveProperty('stream');
expect(result.text).toBe('');
expect(result.model).toBe('gpt-3.5-turbo');
expect(result.provider).toBe('OpenAI');
});
it('should throw error if service not available', async () => {
vi.mocked(options.getOptionBool).mockReturnValueOnce(false); // AI disabled
await expect(service.generateChatCompletion(messages)).rejects.toThrow(
'OpenAI service is not available'
);
});
it('should handle API errors', async () => {
const mockOptions = {
apiKey: 'test-key',
model: 'gpt-3.5-turbo',
stream: false
};
vi.mocked(providers.getOpenAIOptions).mockReturnValueOnce(mockOptions);
const mockClient = {
chat: {
completions: {
create: vi.fn().mockRejectedValueOnce(new Error('API Error: Invalid API key'))
}
}
};
vi.spyOn(service as any, 'getClient').mockReturnValue(mockClient);
await expect(service.generateChatCompletion(messages)).rejects.toThrow(
'API Error: Invalid API key'
);
});
it('should handle tools when enabled', async () => {
const mockTools = [{
type: 'function' as const,
function: {
name: 'test_tool',
description: 'Test tool',
parameters: {}
}
}];
const mockOptions = {
apiKey: 'test-key',
model: 'gpt-3.5-turbo',
stream: false,
enableTools: true,
tools: mockTools,
tool_choice: 'auto'
};
vi.mocked(providers.getOpenAIOptions).mockReturnValueOnce(mockOptions);
const mockCompletion = {
id: 'chatcmpl-123',
object: 'chat.completion',
created: 1677652288,
model: 'gpt-3.5-turbo',
choices: [{
index: 0,
message: {
role: 'assistant',
content: 'I need to use a tool.'
},
finish_reason: 'stop'
}],
usage: {
prompt_tokens: 9,
completion_tokens: 12,
total_tokens: 21
}
};
const mockClient = {
chat: {
completions: {
create: vi.fn().mockResolvedValueOnce(mockCompletion)
}
}
};
vi.spyOn(service as any, 'getClient').mockReturnValue(mockClient);
await service.generateChatCompletion(messages);
const createCall = mockClient.chat.completions.create.mock.calls[0][0];
expect(createCall.tools).toEqual(mockTools);
expect(createCall.tool_choice).toBe('auto');
});
it('should handle tool calls in response', async () => {
const mockOptions = {
apiKey: 'test-key',
model: 'gpt-3.5-turbo',
stream: false,
enableTools: true,
tools: [{ type: 'function' as const, function: { name: 'test', description: 'test' } }]
};
vi.mocked(providers.getOpenAIOptions).mockReturnValueOnce(mockOptions);
const mockCompletion = {
id: 'chatcmpl-123',
object: 'chat.completion',
created: 1677652288,
model: 'gpt-3.5-turbo',
choices: [{
index: 0,
message: {
role: 'assistant',
content: null,
tool_calls: [{
id: 'call_123',
type: 'function',
function: {
name: 'test',
arguments: '{"key": "value"}'
}
}]
},
finish_reason: 'tool_calls'
}],
usage: {
prompt_tokens: 9,
completion_tokens: 12,
total_tokens: 21
}
};
const mockClient = {
chat: {
completions: {
create: vi.fn().mockResolvedValueOnce(mockCompletion)
}
}
};
vi.spyOn(service as any, 'getClient').mockReturnValue(mockClient);
const result = await service.generateChatCompletion(messages);
expect(result).toEqual({
text: '',
model: 'gpt-3.5-turbo',
provider: 'OpenAI',
usage: {
promptTokens: 9,
completionTokens: 12,
totalTokens: 21
},
tool_calls: [{
id: 'call_123',
type: 'function',
function: {
name: 'test',
arguments: '{"key": "value"}'
}
}]
});
});
});
});

View File

@@ -0,0 +1,510 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { ToolRegistry } from './tool_registry.js';
import type { Tool, ToolHandler } from './tool_interfaces.js';
// Mock dependencies
vi.mock('../../log.js', () => ({
default: {
info: vi.fn(),
error: vi.fn(),
warn: vi.fn()
}
}));
describe('ToolRegistry', () => {
let registry: ToolRegistry;
beforeEach(() => {
// Reset singleton instance before each test
(ToolRegistry as any).instance = undefined;
registry = ToolRegistry.getInstance();
// Clear any existing tools
(registry as any).tools.clear();
(registry as any).initializationAttempted = false;
vi.clearAllMocks();
});
afterEach(() => {
vi.restoreAllMocks();
});
describe('singleton pattern', () => {
it('should return the same instance', () => {
const instance1 = ToolRegistry.getInstance();
const instance2 = ToolRegistry.getInstance();
expect(instance1).toBe(instance2);
});
it('should create instance only once', () => {
const instance1 = ToolRegistry.getInstance();
const instance2 = ToolRegistry.getInstance();
const instance3 = ToolRegistry.getInstance();
expect(instance1).toBe(instance2);
expect(instance2).toBe(instance3);
});
});
describe('tool validation', () => {
it('should validate a proper tool handler', () => {
const validHandler: ToolHandler = {
definition: {
type: 'function',
function: {
name: 'test_tool',
description: 'A test tool',
parameters: {
type: 'object',
properties: {},
required: []
}
}
},
execute: vi.fn().mockResolvedValue('result')
};
registry.registerTool(validHandler);
expect(registry.getTool('test_tool')).toBe(validHandler);
});
it('should reject null or undefined handler', () => {
registry.registerTool(null as any);
registry.registerTool(undefined as any);
expect(registry.getTools()).toHaveLength(0);
});
it('should reject handler without definition', () => {
const invalidHandler = {
execute: vi.fn()
} as any;
registry.registerTool(invalidHandler);
expect(registry.getTools()).toHaveLength(0);
});
it('should reject handler without function definition', () => {
const invalidHandler = {
definition: {
type: 'function'
},
execute: vi.fn()
} as any;
registry.registerTool(invalidHandler);
expect(registry.getTools()).toHaveLength(0);
});
it('should reject handler without function name', () => {
const invalidHandler = {
definition: {
type: 'function',
function: {
description: 'Missing name'
}
},
execute: vi.fn()
} as any;
registry.registerTool(invalidHandler);
expect(registry.getTools()).toHaveLength(0);
});
it('should reject handler without execute method', () => {
const invalidHandler = {
definition: {
type: 'function',
function: {
name: 'test_tool',
description: 'Test tool'
}
}
} as any;
registry.registerTool(invalidHandler);
expect(registry.getTools()).toHaveLength(0);
});
it('should reject handler with non-function execute', () => {
const invalidHandler = {
definition: {
type: 'function',
function: {
name: 'test_tool',
description: 'Test tool'
}
},
execute: 'not a function'
} as any;
registry.registerTool(invalidHandler);
expect(registry.getTools()).toHaveLength(0);
});
});
describe('tool registration', () => {
it('should register a valid tool', () => {
const handler: ToolHandler = {
definition: {
type: 'function',
function: {
name: 'calculator',
description: 'Performs calculations',
parameters: {
type: 'object',
properties: {
expression: { type: 'string' }
},
required: ['expression']
}
}
},
execute: vi.fn().mockResolvedValue('42')
};
registry.registerTool(handler);
expect(registry.getTool('calculator')).toBe(handler);
expect(registry.getTools()).toHaveLength(1);
});
it('should prevent duplicate tool registration', () => {
const handler1: ToolHandler = {
definition: {
type: 'function',
function: {
name: 'duplicate_tool',
description: 'First version'
}
},
execute: vi.fn()
};
const handler2: ToolHandler = {
definition: {
type: 'function',
function: {
name: 'duplicate_tool',
description: 'Second version'
}
},
execute: vi.fn()
};
registry.registerTool(handler1);
registry.registerTool(handler2);
// Should keep the first registration
expect(registry.getTool('duplicate_tool')).toBe(handler1);
expect(registry.getTools()).toHaveLength(1);
});
it('should handle registration errors gracefully', () => {
const handlerWithError = {
definition: {
type: 'function',
function: {
name: 'error_tool',
description: 'This will cause an error'
}
},
execute: null
} as any;
// Should not throw
expect(() => registry.registerTool(handlerWithError)).not.toThrow();
expect(registry.getTool('error_tool')).toBeUndefined();
});
});
describe('tool retrieval', () => {
beforeEach(() => {
const tools = [
{
name: 'tool1',
description: 'First tool'
},
{
name: 'tool2',
description: 'Second tool'
},
{
name: 'tool3',
description: 'Third tool'
}
];
tools.forEach(tool => {
const handler: ToolHandler = {
definition: {
type: 'function',
function: tool
},
execute: vi.fn()
};
registry.registerTool(handler);
});
});
it('should retrieve a tool by name', () => {
const tool = registry.getTool('tool1');
expect(tool).toBeDefined();
expect(tool?.definition.function.name).toBe('tool1');
});
it('should return undefined for non-existent tool', () => {
const tool = registry.getTool('non_existent');
expect(tool).toBeUndefined();
});
it('should get all registered tools', () => {
const tools = registry.getTools();
expect(tools).toHaveLength(3);
expect(tools.map(t => t.definition.function.name)).toEqual(['tool1', 'tool2', 'tool3']);
});
it('should get tool definitions', () => {
const definitions = registry.getToolDefinitions();
expect(definitions).toHaveLength(3);
expect(definitions[0]).toEqual({
type: 'function',
function: {
name: 'tool1',
description: 'First tool'
}
});
});
it('should check if tool exists', () => {
expect(registry.hasTool('tool1')).toBe(true);
expect(registry.hasTool('non_existent')).toBe(false);
});
});
describe('tool execution', () => {
let mockHandler: ToolHandler;
beforeEach(() => {
mockHandler = {
definition: {
type: 'function',
function: {
name: 'test_executor',
description: 'Test tool for execution',
parameters: {
type: 'object',
properties: {
input: { type: 'string' }
},
required: ['input']
}
}
},
execute: vi.fn().mockResolvedValue('execution result')
};
registry.registerTool(mockHandler);
});
it('should execute a tool with arguments', async () => {
const result = await registry.executeTool('test_executor', { input: 'test value' });
expect(result).toBe('execution result');
expect(mockHandler.execute).toHaveBeenCalledWith({ input: 'test value' });
});
it('should throw error for non-existent tool', async () => {
await expect(
registry.executeTool('non_existent', {})
).rejects.toThrow('Tool non_existent not found');
});
it('should handle tool execution errors', async () => {
mockHandler.execute = vi.fn().mockRejectedValue(new Error('Execution failed'));
await expect(
registry.executeTool('test_executor', { input: 'test' })
).rejects.toThrow('Execution failed');
});
it('should execute tool without arguments', async () => {
const simpleHandler: ToolHandler = {
definition: {
type: 'function',
function: {
name: 'simple_tool',
description: 'Simple tool'
}
},
execute: vi.fn().mockResolvedValue('simple result')
};
registry.registerTool(simpleHandler);
const result = await registry.executeTool('simple_tool');
expect(result).toBe('simple result');
expect(simpleHandler.execute).toHaveBeenCalledWith(undefined);
});
});
describe('tool unregistration', () => {
beforeEach(() => {
const handler: ToolHandler = {
definition: {
type: 'function',
function: {
name: 'removable_tool',
description: 'A tool that can be removed'
}
},
execute: vi.fn()
};
registry.registerTool(handler);
});
it('should unregister a tool', () => {
expect(registry.hasTool('removable_tool')).toBe(true);
registry.unregisterTool('removable_tool');
expect(registry.hasTool('removable_tool')).toBe(false);
});
it('should handle unregistering non-existent tool', () => {
// Should not throw
expect(() => registry.unregisterTool('non_existent')).not.toThrow();
});
});
describe('registry clearing', () => {
beforeEach(() => {
// Register multiple tools
for (let i = 1; i <= 3; i++) {
const handler: ToolHandler = {
definition: {
type: 'function',
function: {
name: `tool${i}`,
description: `Tool ${i}`
}
},
execute: vi.fn()
};
registry.registerTool(handler);
}
});
it('should clear all tools', () => {
expect(registry.getTools()).toHaveLength(3);
registry.clear();
expect(registry.getTools()).toHaveLength(0);
});
it('should reset initialization flag on clear', () => {
(registry as any).initializationAttempted = true;
registry.clear();
expect((registry as any).initializationAttempted).toBe(false);
});
});
describe('initialization handling', () => {
it('should attempt initialization when registry is empty', () => {
const emptyRegistry = ToolRegistry.getInstance();
(emptyRegistry as any).tools.clear();
(emptyRegistry as any).initializationAttempted = false;
// Try to get tools which should trigger initialization attempt
emptyRegistry.getTools();
expect((emptyRegistry as any).initializationAttempted).toBe(true);
});
it('should not attempt initialization twice', () => {
const spy = vi.spyOn(registry as any, 'tryInitializeTools');
registry.getTools(); // First call
registry.getTools(); // Second call
expect(spy).toHaveBeenCalledTimes(1);
});
it('should not attempt initialization if tools exist', () => {
const handler: ToolHandler = {
definition: {
type: 'function',
function: {
name: 'existing_tool',
description: 'Already exists'
}
},
execute: vi.fn()
};
registry.registerTool(handler);
const spy = vi.spyOn(registry as any, 'tryInitializeTools');
registry.getTools();
expect(spy).not.toHaveBeenCalled();
});
});
describe('error handling', () => {
it('should handle validation errors gracefully', () => {
const problematicHandler = {
definition: {
type: 'function',
function: {
name: 'problematic',
description: 'This will cause validation issues'
}
},
execute: () => { throw new Error('Validation error'); }
} as any;
// Should not throw during registration
expect(() => registry.registerTool(problematicHandler)).not.toThrow();
});
it('should handle tool execution that throws synchronously', async () => {
const throwingHandler: ToolHandler = {
definition: {
type: 'function',
function: {
name: 'throwing_tool',
description: 'Throws an error'
}
},
execute: vi.fn().mockImplementation(() => {
throw new Error('Synchronous error');
})
};
registry.registerTool(throwingHandler);
await expect(
registry.executeTool('throwing_tool', {})
).rejects.toThrow('Synchronous error');
});
});
});