Merge remote-tracking branch 'origin/main' into feature/rtl_ui

This commit is contained in:
Elian Doran
2025-10-09 17:46:26 +03:00
61 changed files with 1662 additions and 670 deletions

View File

@@ -18,6 +18,134 @@
"copy-notes-to-clipboard": "نسخ الملاحظات المحددة الى الحافظة",
"paste-notes-from-clipboard": "لصق الملاحظا من الحافظة الى الملاحظة الحالية",
"cut-notes-to-clipboard": "قص الملاحظات المحددة الى الحافظة",
"select-all-notes-in-parent": "تحديد جميع الملاحظات من مستوى الملاحظة الحالي"
"select-all-notes-in-parent": "تحديد جميع الملاحظات من مستوى الملاحظة الحالي",
"back-in-note-history": "الانتقال الى الملاحظة السابقة في السجل",
"forward-in-note-history": "الانتقال الى الملاحظة التالية في السجل",
"scroll-to-active-note": "تمرير شجرة الملاحظات الى الملاحظة النشطة",
"search-in-subtree": "البحث عن الملاحظات في الشجرة الفرعية للملاحظة النشطة",
"expand-subtree": "توسيع الشجرة الفرعية للملاحظة الحالية",
"create-note-into-inbox": "انشاء ملاحظة في صندوق الوارد (اذا كان معرفا) او في ملاحظة اليوم",
"move-note-up-in-hierarchy": "نقل الملاحظة للاعلى في التسلسل الهرمي",
"move-note-down-in-hierarchy": "نقل الملاحظة للاسفل في التسلسل الهرمي",
"edit-note-title": "الانتقال من شجرة الملاحظات إلى تفاصيل الملاحظة وتحرير العنوان",
"edit-branch-prefix": "عرض مربع حوار \"تعديل بادئة الفرع\"",
"add-note-above-to-the-selection": "اضافة ملاحظة فوق الملاحظة المحددة",
"add-note-below-to-selection": "اضافة ملاحظة اسفل الملاحظة المحددة",
"duplicate-subtree": "استنساخ الشجرة الفرعية",
"tabs-and-windows": "التبويبات والنوافذ",
"open-new-tab": "فتح تبويب جديد",
"close-active-tab": "غلق التبويب النشط",
"reopen-last-tab": "اعادة فتح اخر تبويب مغلق",
"activate-next-tab": "تنشيط التبويب الموجود على اليمين",
"activate-previous-tab": "تنشيط التبويب الموجود على اليسار",
"open-new-window": "فتح نافذة جديدة فارغة",
"first-tab": "تنشيط التبويب الاول في القائمة",
"second-tab": "تنشيط التبويب الثاني في القائمة",
"third-tab": "تنشيط التبويب الثالث في الثائمة",
"fourth-tab": "تنشيط التبويب الرابع في القائمة",
"fifth-tab": "تنشيط التبويب الخامس في القائمة",
"sixth-tab": "تنشيط التبويب السادس في القائمة",
"seventh-tab": "تنشيط التبويب السابع في القائمة",
"eight-tab": "تنشيط التبويب الثامن في القائمة",
"ninth-tab": "تنشيط التبويب التاسع في القائمة",
"last-tab": "تنشيط التبويب الاخير في القائمة",
"other": "أخرى",
"dialogs": "مربعات الحوار"
},
"setup_sync-from-server": {
"note": "ملاحظة:",
"password": "كلمة السر",
"password-placeholder": "كلمة السر",
"back": "رجوع",
"server-host-placeholder": "https://<hostname>:<port>",
"proxy-server-placeholder": "https://<hostname>:<port>"
},
"weekdays": {
"monday": "الأثنين",
"tuesday": "الثلاثاء",
"wednesday": "الاربعاء",
"thursday": "الخميس",
"friday": "الجمعة",
"saturday": "السبت",
"sunday": "الأحد"
},
"months": {
"january": "يناير",
"february": "فبراير",
"march": "مارس",
"april": "ابريل",
"may": "مايو",
"june": "يونيو",
"july": "يوليو",
"august": "أغسطس",
"september": "سبتمبر",
"october": "أكتوبر",
"november": "نوفمبر",
"december": "ديسمبر"
},
"special_notes": {
"search_prefix": "بحث:"
},
"hidden-subtree": {
"calendar-title": "تقويم",
"bookmarks-title": "العلامات المرجعية",
"settings-title": "أعدادات",
"options-title": "خيارات",
"appearance-title": "المظهر",
"shortcuts-title": "أختصارات",
"images-title": "صور",
"password-title": "كلمة السر",
"backup-title": "نسخة أحتياطية",
"sync-title": "مزامنة",
"other": "أخرى",
"advanced-title": "متقدم",
"inbox-title": "صندوق الوارد",
"spacer-title": "فاصل",
"spellcheck-title": "التدقيق الاملائي",
"multi-factor-authentication-title": "المصادقة متعددة العوامل"
},
"tray": {
"bookmarks": "العلامات المرجعية"
},
"modals": {
"error_title": "خطأ"
},
"share_theme": {
"search_placeholder": "بحث...",
"subpages": "الصفحات الفرعية:",
"expand": "توسيع"
},
"hidden_subtree_templates": {
"description": "الوصف",
"calendar": "التقويم",
"table": "جدول",
"geolocation": "الموقع الجغرافي",
"board": "لوحة",
"status": "الحالة",
"board_status_done": "تمت"
},
"login": {
"title": "تسجيل الدخول",
"password": "كلمة السر",
"button": "تسجيل الدخول"
},
"set_password": {
"password": "كلمة السر"
},
"setup": {
"next": "التالي",
"title": "تثبيت"
},
"setup_sync-from-desktop": {
"step6-here": "هنا"
},
"setup_sync-in-progress": {
"outstanding-items-default": "غير متوفر"
},
"share_page": {
"parent": "الأصل:"
},
"notes": {
"duplicate-note-suffix": "(مكرر)"
}
}

View File

@@ -0,0 +1,8 @@
{
"keyboard_actions": {
"back-in-note-history": "Navigasi ke catatan sebelumnya di history",
"forward-in-note-history": "Navigasi ke catatan selanjutnya di history",
"open-jump-to-note-dialog": "Buka dialog \"Menuju ke catatan\"",
"open-command-palette": "Buka palet perintah"
}
}

View File

@@ -1,4 +1,5 @@
import { BNote } from "../../services/backend_script_entrypoint";
import cls from "../../services/cls";
import { buildNote } from "../../test/becca_easy_mocking";
import { processContent } from "./clipper";
@@ -6,7 +7,9 @@ let note!: BNote;
describe("processContent", () => {
beforeAll(() => {
note = buildNote({});
note = buildNote({
content: "Hi there"
});
note.saveAttachment = () => {};
vi.mock("../../services/image.js", () => ({
default: {
@@ -21,29 +24,29 @@ describe("processContent", () => {
});
it("processes basic note", () => {
const processed = processContent([], note, "<p>Hello world.</p>");
const processed = cls.init(() => processContent([], note, "<p>Hello world.</p>"));
expect(processed).toStrictEqual("<p>Hello world.</p>")
});
it("processes plain text", () => {
const processed = processContent([], note, "Hello world.");
const processed = cls.init(() => processContent([], note, "Hello world."));
expect(processed).toStrictEqual("<p>Hello world.</p>")
});
it("replaces images", () => {
const processed = processContent(
const processed = cls.init(() => processContent(
[{"imageId":"OKZxZA3MonZJkwFcEhId","src":"inline.png","dataUrl":"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAkAAAAQCAYAAADESFVDAAAAF0lEQVQoU2P8DwQMBADjqKLRIGAgKggAzHs/0SoYCGwAAAAASUVORK5CYII="}],
note, `<img src="OKZxZA3MonZJkwFcEhId">`
);
));
expect(processed).toStrictEqual(`<img src="api/attachments/foo/image/encodedTitle" >`);
});
it("skips over non-data images", () => {
for (const url of [ "foo", "" ]) {
const processed = processContent(
const processed = cls.init(() => processContent(
[{"imageId":"OKZxZA3MonZJkwFcEhId","src":"inline.png","dataUrl": url}],
note, `<img src="OKZxZA3MonZJkwFcEhId">`
);
));
expect(processed).toStrictEqual(`<img src="OKZxZA3MonZJkwFcEhId" >`);
}
});

View File

@@ -3,7 +3,7 @@ import { beforeAll, describe, expect, it, vi, beforeEach, afterEach } from "vite
import supertest from "supertest";
import config from "../../services/config.js";
import { refreshAuth } from "../../services/auth.js";
import type { WebSocket } from 'ws';
import { sleepFor } from "@triliumnext/commons";
// Mock the CSRF protection middleware to allow tests to pass
vi.mock("../csrf_protection.js", () => ({
@@ -72,7 +72,11 @@ vi.mock("../../services/options.js", () => ({
getOptionMap: vi.fn(() => new Map()),
createOption: vi.fn(),
getOption: vi.fn(() => '0'),
getOptionOrNull: vi.fn(() => null)
getOptionOrNull: vi.fn(() => null),
getOptionInt: vi.fn(name => {
if (name === "protectedSessionTimeout") return Number.MAX_SAFE_INTEGER;
return 0;
})
}
}));
@@ -499,6 +503,7 @@ describe("LLM API Tests", () => {
const ws = (await import("../../services/ws.js")).default;
// Verify thinking message was sent
await sleepFor(1_000);
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
type: 'llm-stream',
chatNoteId: testChatId,

View File

@@ -12,7 +12,11 @@ import type { AIService, ChatCompletionOptions, Message } from './ai_interface.j
vi.mock('../options.js', () => ({
default: {
getOption: vi.fn(),
getOptionBool: vi.fn()
getOptionBool: vi.fn(),
getOptionInt: vi.fn(name => {
if (name === "protectedSessionTimeout") return Number.MAX_SAFE_INTEGER;
return 0;
})
}
}));
@@ -110,26 +114,26 @@ describe('AIServiceManager', () => {
describe('getSelectedProviderAsync', () => {
it('should return the selected provider', async () => {
vi.mocked(configHelpers.getSelectedProvider).mockResolvedValueOnce('openai');
const result = await manager.getSelectedProviderAsync();
expect(result).toBe('openai');
expect(configHelpers.getSelectedProvider).toHaveBeenCalled();
});
it('should return null if no provider is selected', async () => {
vi.mocked(configHelpers.getSelectedProvider).mockResolvedValueOnce(null);
const result = await manager.getSelectedProviderAsync();
expect(result).toBeNull();
});
it('should handle errors and return null', async () => {
vi.mocked(configHelpers.getSelectedProvider).mockRejectedValueOnce(new Error('Config error'));
const result = await manager.getSelectedProviderAsync();
expect(result).toBeNull();
});
});
@@ -141,9 +145,9 @@ describe('AIServiceManager', () => {
errors: [],
warnings: []
});
const result = await manager.validateConfiguration();
expect(result).toBeNull();
});
@@ -153,9 +157,9 @@ describe('AIServiceManager', () => {
errors: ['Missing API key', 'Invalid model'],
warnings: []
});
const result = await manager.validateConfiguration();
expect(result).toContain('There are issues with your AI configuration');
expect(result).toContain('Missing API key');
expect(result).toContain('Invalid model');
@@ -167,9 +171,9 @@ describe('AIServiceManager', () => {
errors: [],
warnings: ['Model not optimal']
});
const result = await manager.validateConfiguration();
expect(result).toBeNull();
});
});
@@ -178,21 +182,21 @@ describe('AIServiceManager', () => {
it('should create and return the selected provider service', async () => {
vi.mocked(configHelpers.getSelectedProvider).mockResolvedValueOnce('openai');
vi.mocked(options.getOption).mockReturnValueOnce('test-api-key');
const mockService = {
isAvailable: vi.fn().mockReturnValue(true),
generateChatCompletion: vi.fn()
};
vi.mocked(OpenAIService).mockImplementationOnce(() => mockService as any);
const result = await manager.getOrCreateAnyService();
expect(result).toBe(mockService);
});
it('should throw error if no provider is selected', async () => {
vi.mocked(configHelpers.getSelectedProvider).mockResolvedValueOnce(null);
await expect(manager.getOrCreateAnyService()).rejects.toThrow(
'No AI provider is selected'
);
@@ -201,7 +205,7 @@ describe('AIServiceManager', () => {
it('should throw error if selected provider is not available', async () => {
vi.mocked(configHelpers.getSelectedProvider).mockResolvedValueOnce('openai');
vi.mocked(options.getOption).mockReturnValueOnce(''); // No API key
await expect(manager.getOrCreateAnyService()).rejects.toThrow(
'Selected AI provider (openai) is not available'
);
@@ -211,17 +215,17 @@ describe('AIServiceManager', () => {
describe('isAnyServiceAvailable', () => {
it('should return true if any provider is available', () => {
vi.mocked(options.getOption).mockReturnValueOnce('test-api-key');
const result = manager.isAnyServiceAvailable();
expect(result).toBe(true);
});
it('should return false if no providers are available', () => {
vi.mocked(options.getOption).mockReturnValue('');
const result = manager.isAnyServiceAvailable();
expect(result).toBe(false);
});
});
@@ -232,18 +236,18 @@ describe('AIServiceManager', () => {
.mockReturnValueOnce('openai-key')
.mockReturnValueOnce('anthropic-key')
.mockReturnValueOnce(''); // No Ollama URL
const result = manager.getAvailableProviders();
expect(result).toEqual(['openai', 'anthropic']);
});
it('should include already created services', () => {
// Mock that OpenAI has API key configured
vi.mocked(options.getOption).mockReturnValueOnce('test-api-key');
const result = manager.getAvailableProviders();
expect(result).toContain('openai');
});
});
@@ -255,23 +259,23 @@ describe('AIServiceManager', () => {
it('should generate completion with selected provider', async () => {
vi.mocked(configHelpers.getSelectedProvider).mockResolvedValueOnce('openai');
// Mock the getAvailableProviders to include openai
vi.mocked(options.getOption)
.mockReturnValueOnce('test-api-key') // for availability check
.mockReturnValueOnce('') // for anthropic
.mockReturnValueOnce('') // for ollama
.mockReturnValueOnce('test-api-key'); // for service creation
const mockResponse = { content: 'Hello response' };
const mockService = {
isAvailable: vi.fn().mockReturnValue(true),
generateChatCompletion: vi.fn().mockResolvedValueOnce(mockResponse)
};
vi.mocked(OpenAIService).mockImplementationOnce(() => mockService as any);
const result = await manager.generateChatCompletion(messages);
expect(result).toBe(mockResponse);
expect(mockService.generateChatCompletion).toHaveBeenCalledWith(messages, {});
});
@@ -283,28 +287,28 @@ describe('AIServiceManager', () => {
modelId: 'gpt-4',
fullIdentifier: 'openai:gpt-4'
});
// Mock the getAvailableProviders to include openai
vi.mocked(options.getOption)
.mockReturnValueOnce('test-api-key') // for availability check
.mockReturnValueOnce('') // for anthropic
.mockReturnValueOnce('') // for ollama
.mockReturnValueOnce('test-api-key'); // for service creation
const mockResponse = { content: 'Hello response' };
const mockService = {
isAvailable: vi.fn().mockReturnValue(true),
generateChatCompletion: vi.fn().mockResolvedValueOnce(mockResponse)
};
vi.mocked(OpenAIService).mockImplementationOnce(() => mockService as any);
const result = await manager.generateChatCompletion(messages, {
model: 'openai:gpt-4'
const result = await manager.generateChatCompletion(messages, {
model: 'openai:gpt-4'
});
expect(result).toBe(mockResponse);
expect(mockService.generateChatCompletion).toHaveBeenCalledWith(
messages,
messages,
{ model: 'gpt-4' }
);
});
@@ -317,7 +321,7 @@ describe('AIServiceManager', () => {
it('should throw error if no provider selected', async () => {
vi.mocked(configHelpers.getSelectedProvider).mockResolvedValueOnce(null);
await expect(manager.generateChatCompletion(messages)).rejects.toThrow(
'No AI provider is selected'
);
@@ -330,13 +334,13 @@ describe('AIServiceManager', () => {
modelId: 'claude-3',
fullIdentifier: 'anthropic:claude-3'
});
// Mock that openai is available
vi.mocked(options.getOption)
.mockReturnValueOnce('test-api-key') // for availability check
.mockReturnValueOnce('') // for anthropic
.mockReturnValueOnce(''); // for ollama
await expect(
manager.generateChatCompletion(messages, { model: 'anthropic:claude-3' })
).rejects.toThrow(
@@ -348,9 +352,9 @@ describe('AIServiceManager', () => {
describe('getAIEnabledAsync', () => {
it('should return AI enabled status', async () => {
vi.mocked(configHelpers.isAIEnabled).mockResolvedValueOnce(true);
const result = await manager.getAIEnabledAsync();
expect(result).toBe(true);
expect(configHelpers.isAIEnabled).toHaveBeenCalled();
});
@@ -359,9 +363,9 @@ describe('AIServiceManager', () => {
describe('getAIEnabled', () => {
it('should return AI enabled status synchronously', () => {
vi.mocked(options.getOptionBool).mockReturnValueOnce(true);
const result = manager.getAIEnabled();
expect(result).toBe(true);
expect(options.getOptionBool).toHaveBeenCalledWith('aiEnabled');
});
@@ -370,17 +374,17 @@ describe('AIServiceManager', () => {
describe('initialize', () => {
it('should initialize if AI is enabled', async () => {
vi.mocked(configHelpers.isAIEnabled).mockResolvedValueOnce(true);
await manager.initialize();
expect(configHelpers.isAIEnabled).toHaveBeenCalled();
});
it('should not initialize if AI is disabled', async () => {
vi.mocked(configHelpers.isAIEnabled).mockResolvedValueOnce(false);
await manager.initialize();
expect(configHelpers.isAIEnabled).toHaveBeenCalled();
});
});
@@ -388,36 +392,36 @@ describe('AIServiceManager', () => {
describe('getService', () => {
it('should return service for specified provider', async () => {
vi.mocked(options.getOption).mockReturnValueOnce('test-api-key');
const mockService = {
isAvailable: vi.fn().mockReturnValue(true),
generateChatCompletion: vi.fn()
};
vi.mocked(OpenAIService).mockImplementationOnce(() => mockService as any);
const result = await manager.getService('openai');
expect(result).toBe(mockService);
});
it('should return selected provider service if no provider specified', async () => {
vi.mocked(configHelpers.getSelectedProvider).mockResolvedValueOnce('anthropic');
vi.mocked(options.getOption).mockReturnValueOnce('test-api-key');
const mockService = {
isAvailable: vi.fn().mockReturnValue(true),
generateChatCompletion: vi.fn()
};
vi.mocked(AnthropicService).mockImplementationOnce(() => mockService as any);
const result = await manager.getService();
expect(result).toBe(mockService);
});
it('should throw error if specified provider not available', async () => {
vi.mocked(options.getOption).mockReturnValueOnce(''); // No API key
await expect(manager.getService('openai')).rejects.toThrow(
'Specified provider openai is not available'
);
@@ -427,17 +431,17 @@ describe('AIServiceManager', () => {
describe('getSelectedProvider', () => {
it('should return selected provider synchronously', () => {
vi.mocked(options.getOption).mockReturnValueOnce('anthropic');
const result = manager.getSelectedProvider();
expect(result).toBe('anthropic');
});
it('should return default provider if none selected', () => {
vi.mocked(options.getOption).mockReturnValueOnce('');
const result = manager.getSelectedProvider();
expect(result).toBe('openai');
});
});
@@ -446,18 +450,18 @@ describe('AIServiceManager', () => {
it('should return true if provider service is available', () => {
// Mock that OpenAI has API key configured
vi.mocked(options.getOption).mockReturnValueOnce('test-api-key');
const result = manager.isProviderAvailable('openai');
expect(result).toBe(true);
});
it('should return false if provider service not created', () => {
// Mock that OpenAI has no API key configured
vi.mocked(options.getOption).mockReturnValueOnce('');
const result = manager.isProviderAvailable('openai');
expect(result).toBe(false);
});
});
@@ -467,13 +471,13 @@ describe('AIServiceManager', () => {
// Since getProviderMetadata only returns metadata for the current active provider,
// and we don't have a current provider set, it should return null
const result = manager.getProviderMetadata('openai');
expect(result).toBeNull();
});
it('should return null for non-existing provider', () => {
const result = manager.getProviderMetadata('openai');
expect(result).toBeNull();
});
});
@@ -485,4 +489,4 @@ describe('AIServiceManager', () => {
expect(manager).toBeDefined();
});
});
});
});

View File

@@ -15,7 +15,11 @@ vi.mock('../../log.js', () => ({
vi.mock('../../options.js', () => ({
default: {
getOption: vi.fn(),
getOptionBool: vi.fn()
getOptionBool: vi.fn(),
getOptionInt: vi.fn(name => {
if (name === "protectedSessionTimeout") return Number.MAX_SAFE_INTEGER;
return 0;
})
}
}));
@@ -66,14 +70,14 @@ describe('RestChatService', () => {
beforeEach(async () => {
vi.clearAllMocks();
// Get mocked modules
mockOptions = (await import('../../options.js')).default;
mockAiServiceManager = (await import('../ai_service_manager.js')).default;
mockChatStorageService = (await import('../chat_storage_service.js')).default;
restChatService = (await import('./rest_chat_service.js')).default;
// Setup mock request and response
mockReq = {
params: {},
@@ -81,7 +85,7 @@ describe('RestChatService', () => {
query: {},
method: 'POST'
};
mockRes = {
status: vi.fn().mockReturnThis(),
json: vi.fn().mockReturnThis(),
@@ -240,7 +244,7 @@ describe('RestChatService', () => {
it('should handle GET request with stream parameter', async () => {
mockReq.method = 'GET';
mockReq.query = {
mockReq.query = {
stream: 'true',
useAdvancedContext: 'true',
showThinking: 'false'
@@ -419,4 +423,4 @@ describe('RestChatService', () => {
expect(mockChatStorageService.getChat).toHaveBeenCalledWith('chat-123');
});
});
});
});

View File

@@ -18,7 +18,11 @@ vi.mock('./configuration_manager.js', () => ({
vi.mock('../../options.js', () => ({
default: {
getOption: vi.fn(),
getOptionBool: vi.fn()
getOptionBool: vi.fn(),
getOptionInt: vi.fn(name => {
if (name === "protectedSessionTimeout") return Number.MAX_SAFE_INTEGER;
return 0;
})
}
}));
@@ -42,26 +46,26 @@ describe('configuration_helpers', () => {
describe('getSelectedProvider', () => {
it('should return the selected provider', async () => {
vi.mocked(optionService.getOption).mockReturnValueOnce('openai');
const result = await configHelpers.getSelectedProvider();
expect(result).toBe('openai');
expect(optionService.getOption).toHaveBeenCalledWith('aiSelectedProvider');
});
it('should return null if no provider is selected', async () => {
vi.mocked(optionService.getOption).mockReturnValueOnce('');
const result = await configHelpers.getSelectedProvider();
expect(result).toBeNull();
});
it('should handle invalid provider and return null', async () => {
vi.mocked(optionService.getOption).mockReturnValueOnce('invalid-provider');
const result = await configHelpers.getSelectedProvider();
expect(result).toBe('invalid-provider' as ProviderType);
});
});
@@ -69,7 +73,7 @@ describe('configuration_helpers', () => {
describe('parseModelIdentifier', () => {
it('should parse model identifier directly', () => {
const result = configHelpers.parseModelIdentifier('openai:gpt-4');
expect(result).toStrictEqual({
provider: 'openai',
modelId: 'gpt-4',
@@ -79,7 +83,7 @@ describe('configuration_helpers', () => {
it('should handle model without provider', () => {
const result = configHelpers.parseModelIdentifier('gpt-4');
expect(result).toStrictEqual({
modelId: 'gpt-4',
fullIdentifier: 'gpt-4'
@@ -88,7 +92,7 @@ describe('configuration_helpers', () => {
it('should handle empty model string', () => {
const result = configHelpers.parseModelIdentifier('');
expect(result).toStrictEqual({
modelId: '',
fullIdentifier: ''
@@ -98,7 +102,7 @@ describe('configuration_helpers', () => {
// Tests for special characters in model names
it('should handle model names with periods', () => {
const result = configHelpers.parseModelIdentifier('gpt-4.1-turbo-preview');
expect(result).toStrictEqual({
modelId: 'gpt-4.1-turbo-preview',
fullIdentifier: 'gpt-4.1-turbo-preview'
@@ -107,7 +111,7 @@ describe('configuration_helpers', () => {
it('should handle model names with provider prefix and periods', () => {
const result = configHelpers.parseModelIdentifier('openai:gpt-4.1-turbo');
expect(result).toStrictEqual({
provider: 'openai',
modelId: 'gpt-4.1-turbo',
@@ -117,7 +121,7 @@ describe('configuration_helpers', () => {
it('should handle model names with multiple colons', () => {
const result = configHelpers.parseModelIdentifier('custom:model:v1.2:latest');
expect(result).toStrictEqual({
modelId: 'custom:model:v1.2:latest',
fullIdentifier: 'custom:model:v1.2:latest'
@@ -126,7 +130,7 @@ describe('configuration_helpers', () => {
it('should handle Ollama model names with colons', () => {
const result = configHelpers.parseModelIdentifier('ollama:llama3.1:70b-instruct-q4_K_M');
expect(result).toStrictEqual({
provider: 'ollama',
modelId: 'llama3.1:70b-instruct-q4_K_M',
@@ -136,7 +140,7 @@ describe('configuration_helpers', () => {
it('should handle model names with slashes', () => {
const result = configHelpers.parseModelIdentifier('library/mistral:7b-instruct');
expect(result).toStrictEqual({
modelId: 'library/mistral:7b-instruct',
fullIdentifier: 'library/mistral:7b-instruct'
@@ -146,7 +150,7 @@ describe('configuration_helpers', () => {
it('should handle complex model names with special characters', () => {
const complexName = 'org/model-v1.2.3:tag@version#variant';
const result = configHelpers.parseModelIdentifier(complexName);
expect(result).toStrictEqual({
modelId: complexName,
fullIdentifier: complexName
@@ -155,7 +159,7 @@ describe('configuration_helpers', () => {
it('should handle model names with @ symbols', () => {
const result = configHelpers.parseModelIdentifier('claude-3.5-sonnet@20241022');
expect(result).toStrictEqual({
modelId: 'claude-3.5-sonnet@20241022',
fullIdentifier: 'claude-3.5-sonnet@20241022'
@@ -165,7 +169,7 @@ describe('configuration_helpers', () => {
it('should not modify or encode special characters', () => {
const specialChars = 'model!@#$%^&*()_+-=[]{}|;:\'",.<>?/~`';
const result = configHelpers.parseModelIdentifier(specialChars);
expect(result).toStrictEqual({
modelId: specialChars,
fullIdentifier: specialChars
@@ -176,7 +180,7 @@ describe('configuration_helpers', () => {
describe('createModelConfig', () => {
it('should create model config directly', () => {
const result = configHelpers.createModelConfig('gpt-4', 'openai');
expect(result).toStrictEqual({
provider: 'openai',
modelId: 'gpt-4',
@@ -186,7 +190,7 @@ describe('configuration_helpers', () => {
it('should handle model with provider prefix', () => {
const result = configHelpers.createModelConfig('openai:gpt-4');
expect(result).toStrictEqual({
provider: 'openai',
modelId: 'gpt-4',
@@ -196,7 +200,7 @@ describe('configuration_helpers', () => {
it('should fallback to openai provider when none specified', () => {
const result = configHelpers.createModelConfig('gpt-4');
expect(result).toStrictEqual({
provider: 'openai',
modelId: 'gpt-4',
@@ -208,27 +212,27 @@ describe('configuration_helpers', () => {
describe('getDefaultModelForProvider', () => {
it('should return default model for provider', async () => {
vi.mocked(optionService.getOption).mockReturnValue('gpt-4');
const result = await configHelpers.getDefaultModelForProvider('openai');
expect(result).toBe('gpt-4');
expect(optionService.getOption).toHaveBeenCalledWith('openaiDefaultModel');
});
it('should return undefined if no default model', async () => {
vi.mocked(optionService.getOption).mockReturnValue('');
const result = await configHelpers.getDefaultModelForProvider('anthropic');
expect(result).toBeUndefined();
expect(optionService.getOption).toHaveBeenCalledWith('anthropicDefaultModel');
});
it('should handle ollama provider', async () => {
vi.mocked(optionService.getOption).mockReturnValue('llama2');
const result = await configHelpers.getDefaultModelForProvider('ollama');
expect(result).toBe('llama2');
expect(optionService.getOption).toHaveBeenCalledWith('ollamaDefaultModel');
});
@@ -237,27 +241,27 @@ describe('configuration_helpers', () => {
it('should handle OpenAI model names with periods', async () => {
const modelName = 'gpt-4.1-turbo-preview';
vi.mocked(optionService.getOption).mockReturnValue(modelName);
const result = await configHelpers.getDefaultModelForProvider('openai');
expect(result).toBe(modelName);
});
it('should handle Anthropic model names with periods and @ symbols', async () => {
const modelName = 'claude-3.5-sonnet@20241022';
vi.mocked(optionService.getOption).mockReturnValue(modelName);
const result = await configHelpers.getDefaultModelForProvider('anthropic');
expect(result).toBe(modelName);
});
it('should handle Ollama model names with colons and slashes', async () => {
const modelName = 'library/llama3.1:70b-instruct-q4_K_M';
vi.mocked(optionService.getOption).mockReturnValue(modelName);
const result = await configHelpers.getDefaultModelForProvider('ollama');
expect(result).toBe(modelName);
});
});
@@ -268,9 +272,9 @@ describe('configuration_helpers', () => {
.mockReturnValueOnce('test-key') // openaiApiKey
.mockReturnValueOnce('https://api.openai.com') // openaiBaseUrl
.mockReturnValueOnce('gpt-4'); // openaiDefaultModel
const result = await configHelpers.getProviderSettings('openai');
expect(result).toStrictEqual({
apiKey: 'test-key',
baseUrl: 'https://api.openai.com',
@@ -283,9 +287,9 @@ describe('configuration_helpers', () => {
.mockReturnValueOnce('anthropic-key') // anthropicApiKey
.mockReturnValueOnce('https://api.anthropic.com') // anthropicBaseUrl
.mockReturnValueOnce('claude-3'); // anthropicDefaultModel
const result = await configHelpers.getProviderSettings('anthropic');
expect(result).toStrictEqual({
apiKey: 'anthropic-key',
baseUrl: 'https://api.anthropic.com',
@@ -297,9 +301,9 @@ describe('configuration_helpers', () => {
vi.mocked(optionService.getOption)
.mockReturnValueOnce('http://localhost:11434') // ollamaBaseUrl
.mockReturnValueOnce('llama2'); // ollamaDefaultModel
const result = await configHelpers.getProviderSettings('ollama');
expect(result).toStrictEqual({
baseUrl: 'http://localhost:11434',
defaultModel: 'llama2'
@@ -308,7 +312,7 @@ describe('configuration_helpers', () => {
it('should return empty object for unknown provider', async () => {
const result = await configHelpers.getProviderSettings('unknown' as ProviderType);
expect(result).toStrictEqual({});
});
});
@@ -316,18 +320,18 @@ describe('configuration_helpers', () => {
describe('isAIEnabled', () => {
it('should return true if AI is enabled', async () => {
vi.mocked(optionService.getOptionBool).mockReturnValue(true);
const result = await configHelpers.isAIEnabled();
expect(result).toBe(true);
expect(optionService.getOptionBool).toHaveBeenCalledWith('aiEnabled');
});
it('should return false if AI is disabled', async () => {
vi.mocked(optionService.getOptionBool).mockReturnValue(false);
const result = await configHelpers.isAIEnabled();
expect(result).toBe(false);
expect(optionService.getOptionBool).toHaveBeenCalledWith('aiEnabled');
});
@@ -339,9 +343,9 @@ describe('configuration_helpers', () => {
.mockReturnValueOnce('test-key') // openaiApiKey
.mockReturnValueOnce('') // openaiBaseUrl
.mockReturnValueOnce(''); // openaiDefaultModel
const result = await configHelpers.isProviderConfigured('openai');
expect(result).toBe(true);
});
@@ -350,9 +354,9 @@ describe('configuration_helpers', () => {
.mockReturnValueOnce('') // openaiApiKey (empty)
.mockReturnValueOnce('') // openaiBaseUrl
.mockReturnValueOnce(''); // openaiDefaultModel
const result = await configHelpers.isProviderConfigured('openai');
expect(result).toBe(false);
});
@@ -361,9 +365,9 @@ describe('configuration_helpers', () => {
.mockReturnValueOnce('anthropic-key') // anthropicApiKey
.mockReturnValueOnce('') // anthropicBaseUrl
.mockReturnValueOnce(''); // anthropicDefaultModel
const result = await configHelpers.isProviderConfigured('anthropic');
expect(result).toBe(true);
});
@@ -371,15 +375,15 @@ describe('configuration_helpers', () => {
vi.mocked(optionService.getOption)
.mockReturnValueOnce('http://localhost:11434') // ollamaBaseUrl
.mockReturnValueOnce(''); // ollamaDefaultModel
const result = await configHelpers.isProviderConfigured('ollama');
expect(result).toBe(true);
});
it('should return false for unknown provider', async () => {
const result = await configHelpers.isProviderConfigured('unknown' as ProviderType);
expect(result).toBe(false);
});
});
@@ -391,17 +395,17 @@ describe('configuration_helpers', () => {
.mockReturnValueOnce('test-key') // openaiApiKey
.mockReturnValueOnce('') // openaiBaseUrl
.mockReturnValueOnce(''); // openaiDefaultModel
const result = await configHelpers.getAvailableSelectedProvider();
expect(result).toBe('openai');
});
it('should return null if no provider selected', async () => {
vi.mocked(optionService.getOption).mockReturnValueOnce('');
const result = await configHelpers.getAvailableSelectedProvider();
expect(result).toBeNull();
});
@@ -411,9 +415,9 @@ describe('configuration_helpers', () => {
.mockReturnValueOnce('') // openaiApiKey (empty)
.mockReturnValueOnce('') // openaiBaseUrl
.mockReturnValueOnce(''); // openaiDefaultModel
const result = await configHelpers.getAvailableSelectedProvider();
expect(result).toBeNull();
});
});
@@ -427,9 +431,9 @@ describe('configuration_helpers', () => {
.mockReturnValueOnce('test-key') // openaiApiKey
.mockReturnValueOnce('') // openaiBaseUrl
.mockReturnValueOnce('gpt-4'); // openaiDefaultModel
const result = await configHelpers.validateConfiguration();
expect(result).toStrictEqual({
isValid: true,
errors: [],
@@ -439,9 +443,9 @@ describe('configuration_helpers', () => {
it('should return warning when AI is disabled', async () => {
vi.mocked(optionService.getOptionBool).mockReturnValue(false);
const result = await configHelpers.validateConfiguration();
expect(result).toStrictEqual({
isValid: true,
errors: [],
@@ -452,9 +456,9 @@ describe('configuration_helpers', () => {
it('should return error when no provider selected', async () => {
vi.mocked(optionService.getOptionBool).mockReturnValue(true);
vi.mocked(optionService.getOption).mockReturnValue(''); // no aiSelectedProvider
const result = await configHelpers.validateConfiguration();
expect(result).toStrictEqual({
isValid: false,
errors: ['No AI provider selected'],
@@ -469,9 +473,9 @@ describe('configuration_helpers', () => {
.mockReturnValueOnce('') // openaiApiKey (empty)
.mockReturnValueOnce('') // openaiBaseUrl
.mockReturnValueOnce(''); // openaiDefaultModel
const result = await configHelpers.validateConfiguration();
expect(result).toStrictEqual({
isValid: true,
errors: [],
@@ -495,9 +499,9 @@ describe('configuration_helpers', () => {
.mockReturnValueOnce('test-key') // openaiApiKey
.mockReturnValueOnce('') // openaiBaseUrl
.mockReturnValueOnce(''); // openaiDefaultModel
const result = await configHelpers.getValidModelConfig('openai');
expect(result).toStrictEqual({
model: modelName,
provider: 'openai'
@@ -511,9 +515,9 @@ describe('configuration_helpers', () => {
.mockReturnValueOnce('anthropic-key') // anthropicApiKey
.mockReturnValueOnce('') // anthropicBaseUrl
.mockReturnValueOnce(''); // anthropicDefaultModel
const result = await configHelpers.getValidModelConfig('anthropic');
expect(result).toStrictEqual({
model: modelName,
provider: 'anthropic'
@@ -526,9 +530,9 @@ describe('configuration_helpers', () => {
.mockReturnValueOnce(modelName) // ollamaDefaultModel
.mockReturnValueOnce('http://localhost:11434') // ollamaBaseUrl
.mockReturnValueOnce(''); // ollamaDefaultModel
const result = await configHelpers.getValidModelConfig('ollama');
expect(result).toStrictEqual({
model: modelName,
provider: 'ollama'
@@ -545,9 +549,9 @@ describe('configuration_helpers', () => {
.mockReturnValueOnce('test-key') // openaiApiKey
.mockReturnValueOnce('') // openaiBaseUrl
.mockReturnValueOnce(''); // openaiDefaultModel
const result = await configHelpers.getSelectedModelConfig();
expect(result).toStrictEqual({
model: modelName,
provider: 'openai'
@@ -562,9 +566,9 @@ describe('configuration_helpers', () => {
.mockReturnValueOnce('test-key') // openaiApiKey
.mockReturnValueOnce('') // openaiBaseUrl
.mockReturnValueOnce(''); // openaiDefaultModel
const result = await configHelpers.getSelectedModelConfig();
expect(result).toStrictEqual({
model: modelName,
provider: 'openai'
@@ -578,9 +582,9 @@ describe('configuration_helpers', () => {
.mockReturnValueOnce(modelName) // ollamaDefaultModel
.mockReturnValueOnce('http://localhost:11434') // ollamaBaseUrl
.mockReturnValueOnce(''); // ollamaDefaultModel
const result = await configHelpers.getSelectedModelConfig();
expect(result).toStrictEqual({
model: modelName,
provider: 'ollama'
@@ -595,13 +599,13 @@ describe('configuration_helpers', () => {
.mockReturnValueOnce('test-key') // anthropicApiKey
.mockReturnValueOnce('') // anthropicBaseUrl
.mockReturnValueOnce(''); // anthropicDefaultModel
const result = await configHelpers.getSelectedModelConfig();
expect(result).toStrictEqual({
model: modelName,
provider: 'anthropic'
});
});
});
});
});

View File

@@ -10,7 +10,11 @@ import { PROVIDER_CONSTANTS } from '../constants/provider_constants.js';
vi.mock('../../options.js', () => ({
default: {
getOption: vi.fn(),
getOptionBool: vi.fn()
getOptionBool: vi.fn(),
getOptionInt: vi.fn(name => {
if (name === "protectedSessionTimeout") return Number.MAX_SAFE_INTEGER;
return 0;
})
}
}));
@@ -79,7 +83,7 @@ describe('AnthropicService', () => {
beforeEach(() => {
vi.clearAllMocks();
// Get the mocked Anthropic instance before creating the service
const AnthropicMock = vi.mocked(Anthropic);
mockAnthropicInstance = {
@@ -122,9 +126,9 @@ describe('AnthropicService', () => {
})
}
};
AnthropicMock.mockImplementation(() => mockAnthropicInstance);
service = new AnthropicService();
});
@@ -144,26 +148,26 @@ describe('AnthropicService', () => {
it('should return true when AI is enabled and API key exists', () => {
vi.mocked(options.getOptionBool).mockReturnValueOnce(true); // AI enabled
vi.mocked(options.getOption).mockReturnValueOnce('test-api-key'); // API key
const result = service.isAvailable();
expect(result).toBe(true);
});
it('should return false when AI is disabled', () => {
vi.mocked(options.getOptionBool).mockReturnValueOnce(false); // AI disabled
const result = service.isAvailable();
expect(result).toBe(false);
});
it('should return false when no API key', () => {
vi.mocked(options.getOptionBool).mockReturnValueOnce(true); // AI enabled
vi.mocked(options.getOption).mockReturnValueOnce(''); // No API key
const result = service.isAvailable();
expect(result).toBe(false);
});
});
@@ -190,9 +194,9 @@ describe('AnthropicService', () => {
stream: false
};
vi.mocked(providers.getAnthropicOptions).mockReturnValueOnce(mockOptions);
const result = await service.generateChatCompletion(messages);
expect(result).toEqual({
text: 'Hello! How can I help you today?',
provider: 'Anthropic',
@@ -214,11 +218,11 @@ describe('AnthropicService', () => {
stream: false
};
vi.mocked(providers.getAnthropicOptions).mockReturnValueOnce(mockOptions);
const createSpy = vi.spyOn(mockAnthropicInstance.messages, 'create');
await service.generateChatCompletion(messages);
const calledParams = createSpy.mock.calls[0][0] as any;
expect(calledParams.messages).toEqual([
{ role: 'user', content: 'Hello' }
@@ -235,12 +239,12 @@ describe('AnthropicService', () => {
onChunk: vi.fn()
};
vi.mocked(providers.getAnthropicOptions).mockReturnValueOnce(mockOptions);
const result = await service.generateChatCompletion(messages);
// Wait for chunks to be processed
await new Promise(resolve => setTimeout(resolve, 100));
// Check that the result exists (streaming logic is complex, so we just verify basic structure)
expect(result).toBeDefined();
expect(result).toHaveProperty('text');
@@ -256,7 +260,7 @@ describe('AnthropicService', () => {
properties: {}
}
}];
const mockOptions = {
apiKey: 'test-key',
baseUrl: 'https://api.anthropic.com',
@@ -267,7 +271,7 @@ describe('AnthropicService', () => {
tool_choice: { type: 'any' }
};
vi.mocked(providers.getAnthropicOptions).mockReturnValueOnce(mockOptions);
// Mock response with tool use
mockAnthropicInstance.messages.create.mockResolvedValueOnce({
id: 'msg_123',
@@ -287,9 +291,9 @@ describe('AnthropicService', () => {
output_tokens: 25
}
});
const result = await service.generateChatCompletion(messages);
expect(result).toEqual({
text: '',
provider: 'Anthropic',
@@ -312,7 +316,7 @@ describe('AnthropicService', () => {
it('should throw error if service not available', async () => {
vi.mocked(options.getOptionBool).mockReturnValueOnce(false); // AI disabled
await expect(service.generateChatCompletion(messages)).rejects.toThrow(
'Anthropic service is not available'
);
@@ -326,12 +330,12 @@ describe('AnthropicService', () => {
stream: false
};
vi.mocked(providers.getAnthropicOptions).mockReturnValueOnce(mockOptions);
// Mock API error
mockAnthropicInstance.messages.create.mockRejectedValueOnce(
new Error('API Error: Invalid API key')
);
await expect(service.generateChatCompletion(messages)).rejects.toThrow(
'API Error: Invalid API key'
);
@@ -347,15 +351,15 @@ describe('AnthropicService', () => {
stream: false
};
vi.mocked(providers.getAnthropicOptions).mockReturnValueOnce(mockOptions);
// Spy on Anthropic constructor
const AnthropicMock = vi.mocked(Anthropic);
AnthropicMock.mockClear();
// Create new service to trigger client creation
const newService = new AnthropicService();
await newService.generateChatCompletion(messages);
expect(AnthropicMock).toHaveBeenCalledWith({
apiKey: 'test-key',
baseURL: 'https://api.anthropic.com',
@@ -374,15 +378,15 @@ describe('AnthropicService', () => {
stream: false
};
vi.mocked(providers.getAnthropicOptions).mockReturnValueOnce(mockOptions);
// Spy on Anthropic constructor
const AnthropicMock = vi.mocked(Anthropic);
AnthropicMock.mockClear();
// Create new service to trigger client creation
const newService = new AnthropicService();
await newService.generateChatCompletion(messages);
expect(AnthropicMock).toHaveBeenCalledWith({
apiKey: 'test-key',
baseURL: 'https://api.anthropic.com',
@@ -401,7 +405,7 @@ describe('AnthropicService', () => {
stream: false
};
vi.mocked(providers.getAnthropicOptions).mockReturnValueOnce(mockOptions);
// Mock response with mixed content
mockAnthropicInstance.messages.create.mockResolvedValueOnce({
id: 'msg_123',
@@ -420,9 +424,9 @@ describe('AnthropicService', () => {
output_tokens: 25
}
});
const result = await service.generateChatCompletion(messages);
expect(result.text).toBe('Here is the result: The calculation is complete.');
expect(result.tool_calls).toHaveLength(1);
expect(result.tool_calls![0].function.name).toBe('calculate');
@@ -431,8 +435,8 @@ describe('AnthropicService', () => {
it('should handle tool results in messages', async () => {
const messagesWithToolResult: Message[] = [
{ role: 'user', content: 'Calculate 5 + 3' },
{
role: 'assistant',
{
role: 'assistant',
content: '',
tool_calls: [{
id: 'call_123',
@@ -440,13 +444,13 @@ describe('AnthropicService', () => {
function: { name: 'calculate', arguments: '{"x": 5, "y": 3}' }
}]
},
{
role: 'tool',
{
role: 'tool',
content: '8',
tool_call_id: 'call_123'
}
];
const mockOptions = {
apiKey: 'test-key',
baseUrl: 'https://api.anthropic.com',
@@ -454,11 +458,11 @@ describe('AnthropicService', () => {
stream: false
};
vi.mocked(providers.getAnthropicOptions).mockReturnValueOnce(mockOptions);
const createSpy = vi.spyOn(mockAnthropicInstance.messages, 'create');
await service.generateChatCompletion(messagesWithToolResult);
const formattedMessages = (createSpy.mock.calls[0][0] as any).messages;
expect(formattedMessages).toHaveLength(3);
expect(formattedMessages[2]).toEqual({
@@ -471,4 +475,4 @@ describe('AnthropicService', () => {
});
});
});
});
});

View File

@@ -10,7 +10,11 @@ import options from '../../options.js';
vi.mock('../../options.js', () => ({
default: {
getOption: vi.fn(),
getOptionBool: vi.fn()
getOptionBool: vi.fn(),
getOptionInt: vi.fn(name => {
if (name === "protectedSessionTimeout") return Number.MAX_SAFE_INTEGER;
return 0;
})
}
}));
@@ -82,7 +86,7 @@ describe('LLM Model Selection with Special Characters', () => {
// Spy on getOpenAIOptions to verify model name is passed correctly
const getOpenAIOptionsSpy = vi.spyOn(providers, 'getOpenAIOptions');
try {
await service.generateChatCompletion([{ role: 'user', content: 'test' }], opts);
} catch (error) {
@@ -108,7 +112,7 @@ describe('LLM Model Selection with Special Characters', () => {
};
const getOpenAIOptionsSpy = vi.spyOn(providers, 'getOpenAIOptions');
try {
await service.generateChatCompletion([{ role: 'user', content: 'test' }], opts);
} catch (error) {
@@ -127,7 +131,7 @@ describe('LLM Model Selection with Special Characters', () => {
};
const getOpenAIOptionsSpy = vi.spyOn(providers, 'getOpenAIOptions');
const openaiOptions = providers.getOpenAIOptions(opts);
expect(openaiOptions.model).toBe(modelName);
});
@@ -153,7 +157,7 @@ describe('LLM Model Selection with Special Characters', () => {
});
const service = new OpenAIService();
// Access the private openai client through the service
const client = (service as any).getClient('test-key');
const createSpy = vi.spyOn(client.chat.completions, 'create');
@@ -213,7 +217,7 @@ describe('LLM Model Selection with Special Characters', () => {
});
const service = new AnthropicService();
// Access the private anthropic client
const client = (service as any).getClient('test-key');
const createSpy = vi.spyOn(client.messages, 'create');
@@ -278,7 +282,7 @@ describe('LLM Model Selection with Special Characters', () => {
const ollamaOptions = await providers.getOllamaOptions(opts);
expect(ollamaOptions.model).toBe(modelName);
// Also test with model specified in options
const optsWithModel: ChatCompletionOptions = {
model: 'another/model:v2.0@beta',
@@ -370,7 +374,7 @@ describe('LLM Model Selection with Special Characters', () => {
describe('Integration with REST API', () => {
it('should pass model names correctly through REST chat service', async () => {
const modelName = 'gpt-4.1-turbo-preview@latest';
// Mock the configuration helpers
vi.doMock('../config/configuration_helpers.js', () => ({
getSelectedModelConfig: vi.fn().mockResolvedValue({
@@ -382,8 +386,8 @@ describe('LLM Model Selection with Special Characters', () => {
const { getSelectedModelConfig } = await import('../config/configuration_helpers.js');
const config = await getSelectedModelConfig();
expect(config?.model).toBe(modelName);
});
});
});
});

View File

@@ -9,7 +9,11 @@ import { Ollama } from 'ollama';
vi.mock('../../options.js', () => ({
default: {
getOption: vi.fn(),
getOptionBool: vi.fn()
getOptionBool: vi.fn(),
getOptionInt: vi.fn(name => {
if (name === "protectedSessionTimeout") return Number.MAX_SAFE_INTEGER;
return 0;
})
}
}));
@@ -134,7 +138,7 @@ describe('OllamaService', () => {
beforeEach(() => {
vi.clearAllMocks();
// Create the mock instance before creating the service
const OllamaMock = vi.mocked(Ollama);
mockOllamaInstance = {
@@ -191,11 +195,11 @@ describe('OllamaService', () => {
]
})
};
OllamaMock.mockImplementation(() => mockOllamaInstance);
service = new OllamaService();
// Replace the formatter with a mock after construction
(service as any).formatter = {
formatMessages: vi.fn().mockReturnValue([
@@ -231,26 +235,26 @@ describe('OllamaService', () => {
it('should return true when AI is enabled and base URL exists', () => {
vi.mocked(options.getOptionBool).mockReturnValueOnce(true); // AI enabled
vi.mocked(options.getOption).mockReturnValueOnce('http://localhost:11434'); // Base URL
const result = service.isAvailable();
expect(result).toBe(true);
});
it('should return false when AI is disabled', () => {
vi.mocked(options.getOptionBool).mockReturnValueOnce(false); // AI disabled
const result = service.isAvailable();
expect(result).toBe(false);
});
it('should return false when no base URL', () => {
vi.mocked(options.getOptionBool).mockReturnValueOnce(true); // AI enabled
vi.mocked(options.getOption).mockReturnValueOnce(''); // No base URL
const result = service.isAvailable();
expect(result).toBe(false);
});
});
@@ -275,9 +279,9 @@ describe('OllamaService', () => {
};
vi.mocked(providers.getOllamaOptions).mockResolvedValueOnce(mockOptions);
vi.mocked(options.getOption).mockReturnValue('http://localhost:11434');
const result = await service.generateChatCompletion(messages);
expect(result).toEqual({
text: 'Hello! How can I help you today?',
provider: 'ollama',
@@ -296,12 +300,12 @@ describe('OllamaService', () => {
};
vi.mocked(providers.getOllamaOptions).mockResolvedValueOnce(mockOptions);
vi.mocked(options.getOption).mockReturnValue('http://localhost:11434');
const result = await service.generateChatCompletion(messages);
// Wait for chunks to be processed
await new Promise(resolve => setTimeout(resolve, 100));
// For streaming, we expect a different response structure
expect(result).toBeDefined();
expect(result).toHaveProperty('text');
@@ -310,7 +314,7 @@ describe('OllamaService', () => {
it('should handle tools when enabled', async () => {
vi.mocked(options.getOption).mockReturnValue('http://localhost:11434');
const mockTools = [{
name: 'test_tool',
description: 'Test tool',
@@ -320,7 +324,7 @@ describe('OllamaService', () => {
required: []
}
}];
const mockOptions = {
baseUrl: 'http://localhost:11434',
model: 'llama2',
@@ -329,18 +333,18 @@ describe('OllamaService', () => {
tools: mockTools
};
vi.mocked(providers.getOllamaOptions).mockResolvedValueOnce(mockOptions);
const chatSpy = vi.spyOn(mockOllamaInstance, 'chat');
await service.generateChatCompletion(messages);
const calledParams = chatSpy.mock.calls[0][0] as any;
expect(calledParams.tools).toEqual(mockTools);
});
it('should throw error if service not available', async () => {
vi.mocked(options.getOptionBool).mockReturnValueOnce(false); // AI disabled
await expect(service.generateChatCompletion(messages)).rejects.toThrow(
'Ollama service is not available'
);
@@ -350,14 +354,14 @@ describe('OllamaService', () => {
vi.mocked(options.getOption)
.mockReturnValueOnce('') // Empty base URL for ollamaBaseUrl
.mockReturnValue(''); // Ensure all subsequent calls return empty
const mockOptions = {
baseUrl: '',
model: 'llama2',
stream: false
};
vi.mocked(providers.getOllamaOptions).mockResolvedValueOnce(mockOptions);
await expect(service.generateChatCompletion(messages)).rejects.toThrow(
'Ollama service is not available'
);
@@ -365,19 +369,19 @@ describe('OllamaService', () => {
it('should handle API errors', async () => {
vi.mocked(options.getOption).mockReturnValue('http://localhost:11434');
const mockOptions = {
baseUrl: 'http://localhost:11434',
model: 'llama2',
stream: false
};
vi.mocked(providers.getOllamaOptions).mockResolvedValueOnce(mockOptions);
// Mock API error
mockOllamaInstance.chat.mockRejectedValueOnce(
new Error('Connection refused')
);
await expect(service.generateChatCompletion(messages)).rejects.toThrow(
'Connection refused'
);
@@ -385,30 +389,30 @@ describe('OllamaService', () => {
it('should create client with custom fetch for debugging', async () => {
vi.mocked(options.getOption).mockReturnValue('http://localhost:11434');
const mockOptions = {
baseUrl: 'http://localhost:11434',
model: 'llama2',
stream: false
};
vi.mocked(providers.getOllamaOptions).mockResolvedValueOnce(mockOptions);
// Spy on Ollama constructor
const OllamaMock = vi.mocked(Ollama);
OllamaMock.mockClear();
// Create new service to trigger client creation
const newService = new OllamaService();
// Replace the formatter with a mock for the new service
(newService as any).formatter = {
formatMessages: vi.fn().mockReturnValue([
{ role: 'user', content: 'Hello' }
])
};
await newService.generateChatCompletion(messages);
expect(OllamaMock).toHaveBeenCalledWith({
host: 'http://localhost:11434',
fetch: expect.any(Function)
@@ -417,7 +421,7 @@ describe('OllamaService', () => {
it('should handle tool execution feedback', async () => {
vi.mocked(options.getOption).mockReturnValue('http://localhost:11434');
const mockOptions = {
baseUrl: 'http://localhost:11434',
model: 'llama2',
@@ -426,7 +430,7 @@ describe('OllamaService', () => {
tools: [{ name: 'test_tool', description: 'Test', parameters: {} }]
};
vi.mocked(providers.getOllamaOptions).mockResolvedValueOnce(mockOptions);
// Mock response with tool call (arguments should be a string for Ollama)
mockOllamaInstance.chat.mockResolvedValueOnce({
message: {
@@ -442,9 +446,9 @@ describe('OllamaService', () => {
},
done: true
});
const result = await service.generateChatCompletion(messages);
expect(result.tool_calls).toEqual([{
id: 'call_123',
type: 'function',
@@ -457,14 +461,14 @@ describe('OllamaService', () => {
it('should handle mixed text and tool content', async () => {
vi.mocked(options.getOption).mockReturnValue('http://localhost:11434');
const mockOptions = {
baseUrl: 'http://localhost:11434',
model: 'llama2',
stream: false
};
vi.mocked(providers.getOllamaOptions).mockResolvedValueOnce(mockOptions);
// Mock response with both text and tool calls
mockOllamaInstance.chat.mockResolvedValueOnce({
message: {
@@ -480,30 +484,30 @@ describe('OllamaService', () => {
},
done: true
});
const result = await service.generateChatCompletion(messages);
expect(result.text).toBe('Let me help you with that.');
expect(result.tool_calls).toHaveLength(1);
});
it('should format messages using the formatter', async () => {
vi.mocked(options.getOption).mockReturnValue('http://localhost:11434');
const mockOptions = {
baseUrl: 'http://localhost:11434',
model: 'llama2',
stream: false
};
vi.mocked(providers.getOllamaOptions).mockResolvedValueOnce(mockOptions);
const formattedMessages = [{ role: 'user', content: 'Hello' }];
(service as any).formatter.formatMessages.mockReturnValueOnce(formattedMessages);
const chatSpy = vi.spyOn(mockOllamaInstance, 'chat');
await service.generateChatCompletion(messages);
expect((service as any).formatter.formatMessages).toHaveBeenCalled();
expect(chatSpy).toHaveBeenCalledWith(
expect.objectContaining({
@@ -514,23 +518,23 @@ describe('OllamaService', () => {
it('should handle network errors gracefully', async () => {
vi.mocked(options.getOption).mockReturnValue('http://localhost:11434');
const mockOptions = {
baseUrl: 'http://localhost:11434',
model: 'llama2',
stream: false
};
vi.mocked(providers.getOllamaOptions).mockResolvedValueOnce(mockOptions);
// Mock network error
global.fetch = vi.fn().mockRejectedValueOnce(
new Error('Network error')
);
mockOllamaInstance.chat.mockRejectedValueOnce(
new Error('fetch failed')
);
await expect(service.generateChatCompletion(messages)).rejects.toThrow(
'fetch failed'
);
@@ -538,19 +542,19 @@ describe('OllamaService', () => {
it('should validate model availability', async () => {
vi.mocked(options.getOption).mockReturnValue('http://localhost:11434');
const mockOptions = {
baseUrl: 'http://localhost:11434',
model: 'nonexistent-model',
stream: false
};
vi.mocked(providers.getOllamaOptions).mockResolvedValueOnce(mockOptions);
// Mock model not found error
mockOllamaInstance.chat.mockRejectedValueOnce(
new Error('model "nonexistent-model" not found')
);
await expect(service.generateChatCompletion(messages)).rejects.toThrow(
'model "nonexistent-model" not found'
);
@@ -561,23 +565,23 @@ describe('OllamaService', () => {
it('should reuse existing client', async () => {
vi.mocked(options.getOptionBool).mockReturnValue(true);
vi.mocked(options.getOption).mockReturnValue('http://localhost:11434');
const mockOptions = {
baseUrl: 'http://localhost:11434',
model: 'llama2',
stream: false
};
vi.mocked(providers.getOllamaOptions).mockResolvedValue(mockOptions);
const OllamaMock = vi.mocked(Ollama);
OllamaMock.mockClear();
// Make two calls
await service.generateChatCompletion([{ role: 'user', content: 'Hello' }]);
await service.generateChatCompletion([{ role: 'user', content: 'Hi' }]);
// Should only create client once
expect(OllamaMock).toHaveBeenCalledTimes(1);
});
});
});
});

View File

@@ -2,13 +2,17 @@ import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { OpenAIService } from './openai_service.js';
import options from '../../options.js';
import * as providers from './providers.js';
import type { ChatCompletionOptions, Message } from '../ai_interface.js';
import type { Message } from '../ai_interface.js';
// Mock dependencies
vi.mock('../../options.js', () => ({
default: {
getOption: vi.fn(),
getOptionBool: vi.fn()
getOptionBool: vi.fn(),
getOptionInt: vi.fn(name => {
if (name === "protectedSessionTimeout") return Number.MAX_SAFE_INTEGER;
return 0;
})
}
}));
@@ -53,17 +57,17 @@ describe('OpenAIService', () => {
describe('isAvailable', () => {
it('should return true when base checks pass', () => {
vi.mocked(options.getOptionBool).mockReturnValueOnce(true); // AI enabled
const result = service.isAvailable();
expect(result).toBe(true);
});
it('should return false when AI is disabled', () => {
vi.mocked(options.getOptionBool).mockReturnValueOnce(false); // AI disabled
const result = service.isAvailable();
expect(result).toBe(false);
});
});
@@ -89,7 +93,7 @@ describe('OpenAIService', () => {
enableTools: false
};
vi.mocked(providers.getOpenAIOptions).mockReturnValueOnce(mockOptions);
// Mock the getClient method to return our mock client
const mockCompletion = {
id: 'chatcmpl-123',
@@ -120,9 +124,9 @@ describe('OpenAIService', () => {
};
vi.spyOn(service as any, 'getClient').mockReturnValue(mockClient);
const result = await service.generateChatCompletion(messages);
expect(result).toEqual({
text: 'Hello! How can I help you today?',
model: 'gpt-3.5-turbo',
@@ -144,7 +148,7 @@ describe('OpenAIService', () => {
stream: true
};
vi.mocked(providers.getOpenAIOptions).mockReturnValueOnce(mockOptions);
// Mock the streaming response
const mockStream = {
[Symbol.asyncIterator]: async function* () {
@@ -162,7 +166,7 @@ describe('OpenAIService', () => {
};
}
};
const mockClient = {
chat: {
completions: {
@@ -172,9 +176,9 @@ describe('OpenAIService', () => {
};
vi.spyOn(service as any, 'getClient').mockReturnValue(mockClient);
const result = await service.generateChatCompletion(messages);
expect(result).toHaveProperty('stream');
expect(result.text).toBe('');
expect(result.model).toBe('gpt-3.5-turbo');
@@ -183,7 +187,7 @@ describe('OpenAIService', () => {
it('should throw error if service not available', async () => {
vi.mocked(options.getOptionBool).mockReturnValueOnce(false); // AI disabled
await expect(service.generateChatCompletion(messages)).rejects.toThrow(
'OpenAI service is not available'
);
@@ -197,7 +201,7 @@ describe('OpenAIService', () => {
stream: false
};
vi.mocked(providers.getOpenAIOptions).mockReturnValueOnce(mockOptions);
const mockClient = {
chat: {
completions: {
@@ -207,7 +211,7 @@ describe('OpenAIService', () => {
};
vi.spyOn(service as any, 'getClient').mockReturnValue(mockClient);
await expect(service.generateChatCompletion(messages)).rejects.toThrow(
'API Error: Invalid API key'
);
@@ -222,7 +226,7 @@ describe('OpenAIService', () => {
parameters: {}
}
}];
const mockOptions = {
apiKey: 'test-key',
baseUrl: 'https://api.openai.com/v1',
@@ -233,7 +237,7 @@ describe('OpenAIService', () => {
tool_choice: 'auto'
};
vi.mocked(providers.getOpenAIOptions).mockReturnValueOnce(mockOptions);
const mockCompletion = {
id: 'chatcmpl-123',
object: 'chat.completion',
@@ -263,9 +267,9 @@ describe('OpenAIService', () => {
};
vi.spyOn(service as any, 'getClient').mockReturnValue(mockClient);
await service.generateChatCompletion(messages);
const createCall = mockClient.chat.completions.create.mock.calls[0][0];
expect(createCall.tools).toEqual(mockTools);
expect(createCall.tool_choice).toBe('auto');
@@ -281,7 +285,7 @@ describe('OpenAIService', () => {
tools: [{ type: 'function' as const, function: { name: 'test', description: 'test' } }]
};
vi.mocked(providers.getOpenAIOptions).mockReturnValueOnce(mockOptions);
const mockCompletion = {
id: 'chatcmpl-123',
object: 'chat.completion',
@@ -319,9 +323,9 @@ describe('OpenAIService', () => {
};
vi.spyOn(service as any, 'getClient').mockReturnValue(mockClient);
const result = await service.generateChatCompletion(messages);
expect(result).toEqual({
text: '',
model: 'gpt-3.5-turbo',
@@ -342,4 +346,4 @@ describe('OpenAIService', () => {
});
});
});
});
});

View File

@@ -1,9 +1,6 @@
"use strict";
import log from "./log.js";
import dataEncryptionService from "./encryption/data_encryption.js";
import options from "./options.js";
import ws from "./ws.js";
let dataKey: Buffer | null = null;
@@ -15,11 +12,11 @@ function getDataKey() {
return dataKey;
}
function resetDataKey() {
export function resetDataKey() {
dataKey = null;
}
function isProtectedSessionAvailable() {
export function isProtectedSessionAvailable() {
return !!dataKey;
}
@@ -57,15 +54,8 @@ function touchProtectedSession() {
}
}
function checkProtectedSessionExpiration() {
const protectedSessionTimeout = options.getOptionInt("protectedSessionTimeout");
if (isProtectedSessionAvailable() && lastProtectedSessionOperationDate && Date.now() - lastProtectedSessionOperationDate > protectedSessionTimeout * 1000) {
resetDataKey();
log.info("Expiring protected session");
ws.reloadFrontend("leaving protected session");
}
export function getLastProtectedSessionOperationDate() {
return lastProtectedSessionOperationDate;
}
export default {
@@ -75,6 +65,5 @@ export default {
encrypt,
decrypt,
decryptString,
touchProtectedSession,
checkProtectedSessionExpiration
touchProtectedSession
};

View File

@@ -4,9 +4,11 @@ import sqlInit from "./sql_init.js";
import config from "./config.js";
import log from "./log.js";
import attributeService from "../services/attributes.js";
import protectedSessionService from "../services/protected_session.js";
import hiddenSubtreeService from "./hidden_subtree.js";
import type BNote from "../becca/entities/bnote.js";
import options from "./options.js";
import { getLastProtectedSessionOperationDate, isProtectedSessionAvailable, resetDataKey } from "./protected_session.js";
import ws from "./ws.js";
function getRunAtHours(note: BNote): number[] {
try {
@@ -64,5 +66,15 @@ sqlInit.dbReady.then(() => {
);
}
setInterval(() => protectedSessionService.checkProtectedSessionExpiration(), 30000);
setInterval(() => checkProtectedSessionExpiration(), 1);
});
function checkProtectedSessionExpiration() {
const protectedSessionTimeout = options.getOptionInt("protectedSessionTimeout");
const lastProtectedSessionOperationDate = getLastProtectedSessionOperationDate();
if (isProtectedSessionAvailable() && lastProtectedSessionOperationDate && Date.now() - lastProtectedSessionOperationDate > protectedSessionTimeout * 1000) {
resetDataKey();
log.info("Expiring protected session");
ws.reloadFrontend("leaving protected session");
}
}