diff --git a/src/__tests__/integration/group-chat-integration.test.ts b/src/__tests__/integration/group-chat-integration.test.ts index b3dbb043..23627449 100644 --- a/src/__tests__/integration/group-chat-integration.test.ts +++ b/src/__tests__/integration/group-chat-integration.test.ts @@ -26,7 +26,7 @@ import { describe, it, expect, beforeAll } from 'vitest'; import { spawn } from 'child_process'; import { promisify } from 'util'; import { exec } from 'child_process'; -import { getAgentCapabilities } from '../../main/agent-capabilities'; +import { getAgentCapabilities } from '../../main/agents'; const execAsync = promisify(exec); diff --git a/src/__tests__/integration/group-chat.integration.test.ts b/src/__tests__/integration/group-chat.integration.test.ts index cbd61b98..9062631a 100644 --- a/src/__tests__/integration/group-chat.integration.test.ts +++ b/src/__tests__/integration/group-chat.integration.test.ts @@ -36,7 +36,7 @@ import { } from '../../main/group-chat/group-chat-moderator'; import { addParticipant } from '../../main/group-chat/group-chat-agent'; import { routeUserMessage } from '../../main/group-chat/group-chat-router'; -import { AgentDetector } from '../../main/agent-detector'; +import { AgentDetector } from '../../main/agents'; import { selectTestAgents, waitForAgentResponse, diff --git a/src/__tests__/integration/provider-integration.test.ts b/src/__tests__/integration/provider-integration.test.ts index a7e463dd..263b0acc 100644 --- a/src/__tests__/integration/provider-integration.test.ts +++ b/src/__tests__/integration/provider-integration.test.ts @@ -29,7 +29,7 @@ import { exec } from 'child_process'; import * as fs from 'fs'; import * as path from 'path'; import * as os from 'os'; -import { getAgentCapabilities } from '../../main/agent-capabilities'; +import { getAgentCapabilities } from '../../main/agents'; import { buildSshCommand, buildRemoteCommand } from '../../main/utils/ssh-command-builder'; import type { SshRemoteConfig } from '../../shared/types'; diff --git a/src/__tests__/main/agent-capabilities.test.ts b/src/__tests__/main/agents/capabilities.test.ts similarity index 99% rename from src/__tests__/main/agent-capabilities.test.ts rename to src/__tests__/main/agents/capabilities.test.ts index 50ab3648..c199588a 100644 --- a/src/__tests__/main/agent-capabilities.test.ts +++ b/src/__tests__/main/agents/capabilities.test.ts @@ -5,7 +5,7 @@ import { AGENT_CAPABILITIES, getAgentCapabilities, hasCapability, -} from '../../main/agent-capabilities'; +} from '../../../main/agents'; describe('agent-capabilities', () => { describe('AgentCapabilities interface', () => { diff --git a/src/__tests__/main/agents/definitions.test.ts b/src/__tests__/main/agents/definitions.test.ts new file mode 100644 index 00000000..04f4e1a4 --- /dev/null +++ b/src/__tests__/main/agents/definitions.test.ts @@ -0,0 +1,253 @@ +/** + * Tests for agent-definitions.ts + * + * Tests the agent definition data structures and helper functions. + */ + +import { describe, it, expect } from 'vitest'; +import { + AGENT_DEFINITIONS, + getAgentDefinition, + getAgentIds, + getVisibleAgentDefinitions, + type AgentDefinition, + type AgentConfigOption, +} from '../../../main/agents'; + +describe('agent-definitions', () => { + describe('AGENT_DEFINITIONS', () => { + it('should contain all expected agents', () => { + const agentIds = AGENT_DEFINITIONS.map((def) => def.id); + + expect(agentIds).toContain('terminal'); + expect(agentIds).toContain('claude-code'); + expect(agentIds).toContain('codex'); + expect(agentIds).toContain('opencode'); + expect(agentIds).toContain('gemini-cli'); + expect(agentIds).toContain('qwen3-coder'); + expect(agentIds).toContain('aider'); + }); + + it('should have required properties on all definitions', () => { + for (const def of AGENT_DEFINITIONS) { + expect(def.id).toBeDefined(); + expect(def.name).toBeDefined(); + expect(def.binaryName).toBeDefined(); + expect(def.command).toBeDefined(); + expect(def.args).toBeDefined(); + expect(Array.isArray(def.args)).toBe(true); + } + }); + + it('should have terminal as a hidden agent', () => { + const terminal = AGENT_DEFINITIONS.find((def) => def.id === 'terminal'); + expect(terminal?.hidden).toBe(true); + }); + + it('should have claude-code with correct base args', () => { + const claudeCode = AGENT_DEFINITIONS.find((def) => def.id === 'claude-code'); + expect(claudeCode).toBeDefined(); + expect(claudeCode?.args).toContain('--print'); + expect(claudeCode?.args).toContain('--verbose'); + expect(claudeCode?.args).toContain('--output-format'); + expect(claudeCode?.args).toContain('stream-json'); + expect(claudeCode?.args).toContain('--dangerously-skip-permissions'); + }); + + it('should have codex with batch mode configuration', () => { + const codex = AGENT_DEFINITIONS.find((def) => def.id === 'codex'); + expect(codex).toBeDefined(); + expect(codex?.batchModePrefix).toEqual(['exec']); + expect(codex?.batchModeArgs).toContain('--dangerously-bypass-approvals-and-sandbox'); + expect(codex?.jsonOutputArgs).toEqual(['--json']); + }); + + it('should have opencode with batch mode configuration', () => { + const opencode = AGENT_DEFINITIONS.find((def) => def.id === 'opencode'); + expect(opencode).toBeDefined(); + expect(opencode?.batchModePrefix).toEqual(['run']); + expect(opencode?.jsonOutputArgs).toEqual(['--format', 'json']); + expect(opencode?.noPromptSeparator).toBe(true); + }); + + it('should have opencode with default env vars for YOLO mode', () => { + const opencode = AGENT_DEFINITIONS.find((def) => def.id === 'opencode'); + expect(opencode?.defaultEnvVars).toBeDefined(); + expect(opencode?.defaultEnvVars?.OPENCODE_CONFIG_CONTENT).toContain('external_directory'); + }); + }); + + describe('getAgentDefinition', () => { + it('should return definition for valid agent ID', () => { + const claudeCode = getAgentDefinition('claude-code'); + expect(claudeCode).toBeDefined(); + expect(claudeCode?.id).toBe('claude-code'); + expect(claudeCode?.name).toBe('Claude Code'); + }); + + it('should return undefined for invalid agent ID', () => { + const invalid = getAgentDefinition('non-existent-agent'); + expect(invalid).toBeUndefined(); + }); + + it('should return definition for all known agents', () => { + const knownAgents = ['terminal', 'claude-code', 'codex', 'opencode', 'gemini-cli', 'aider']; + for (const agentId of knownAgents) { + const def = getAgentDefinition(agentId); + expect(def).toBeDefined(); + expect(def?.id).toBe(agentId); + } + }); + }); + + describe('getAgentIds', () => { + it('should return array of all agent IDs', () => { + const ids = getAgentIds(); + expect(Array.isArray(ids)).toBe(true); + expect(ids.length).toBeGreaterThan(0); + expect(ids).toContain('claude-code'); + expect(ids).toContain('terminal'); + }); + + it('should match AGENT_DEFINITIONS length', () => { + const ids = getAgentIds(); + expect(ids.length).toBe(AGENT_DEFINITIONS.length); + }); + }); + + describe('getVisibleAgentDefinitions', () => { + it('should not include hidden agents', () => { + const visible = getVisibleAgentDefinitions(); + const visibleIds = visible.map((def) => def.id); + + // Terminal should be hidden + expect(visibleIds).not.toContain('terminal'); + }); + + it('should include visible agents', () => { + const visible = getVisibleAgentDefinitions(); + const visibleIds = visible.map((def) => def.id); + + expect(visibleIds).toContain('claude-code'); + expect(visibleIds).toContain('codex'); + expect(visibleIds).toContain('opencode'); + }); + + it('should return fewer items than AGENT_DEFINITIONS', () => { + const visible = getVisibleAgentDefinitions(); + expect(visible.length).toBeLessThan(AGENT_DEFINITIONS.length); + }); + }); + + describe('Agent argument builders', () => { + it('should have resumeArgs function for claude-code', () => { + const claudeCode = getAgentDefinition('claude-code'); + expect(claudeCode?.resumeArgs).toBeDefined(); + expect(typeof claudeCode?.resumeArgs).toBe('function'); + + const args = claudeCode?.resumeArgs?.('test-session-123'); + expect(args).toEqual(['--resume', 'test-session-123']); + }); + + it('should have resumeArgs function for codex', () => { + const codex = getAgentDefinition('codex'); + expect(codex?.resumeArgs).toBeDefined(); + + const args = codex?.resumeArgs?.('thread-456'); + expect(args).toEqual(['resume', 'thread-456']); + }); + + it('should have resumeArgs function for opencode', () => { + const opencode = getAgentDefinition('opencode'); + expect(opencode?.resumeArgs).toBeDefined(); + + const args = opencode?.resumeArgs?.('session-789'); + expect(args).toEqual(['--session', 'session-789']); + }); + + it('should have modelArgs function for opencode', () => { + const opencode = getAgentDefinition('opencode'); + expect(opencode?.modelArgs).toBeDefined(); + + const args = opencode?.modelArgs?.('ollama/qwen3:8b'); + expect(args).toEqual(['--model', 'ollama/qwen3:8b']); + }); + + it('should have workingDirArgs function for codex', () => { + const codex = getAgentDefinition('codex'); + expect(codex?.workingDirArgs).toBeDefined(); + + const args = codex?.workingDirArgs?.('/path/to/project'); + expect(args).toEqual(['-C', '/path/to/project']); + }); + + it('should have imageArgs function for codex', () => { + const codex = getAgentDefinition('codex'); + expect(codex?.imageArgs).toBeDefined(); + + const args = codex?.imageArgs?.('/path/to/image.png'); + expect(args).toEqual(['-i', '/path/to/image.png']); + }); + + it('should have imageArgs function for opencode', () => { + const opencode = getAgentDefinition('opencode'); + expect(opencode?.imageArgs).toBeDefined(); + + const args = opencode?.imageArgs?.('/path/to/image.png'); + expect(args).toEqual(['-f', '/path/to/image.png']); + }); + }); + + describe('Agent config options', () => { + it('should have configOptions for codex', () => { + const codex = getAgentDefinition('codex'); + expect(codex?.configOptions).toBeDefined(); + expect(Array.isArray(codex?.configOptions)).toBe(true); + + const contextWindowOption = codex?.configOptions?.find((opt) => opt.key === 'contextWindow'); + expect(contextWindowOption).toBeDefined(); + expect(contextWindowOption?.type).toBe('number'); + expect(contextWindowOption?.default).toBe(400000); + }); + + it('should have configOptions for opencode', () => { + const opencode = getAgentDefinition('opencode'); + expect(opencode?.configOptions).toBeDefined(); + + const modelOption = opencode?.configOptions?.find((opt) => opt.key === 'model'); + expect(modelOption).toBeDefined(); + expect(modelOption?.type).toBe('text'); + expect(modelOption?.default).toBe(''); + + // Test argBuilder + expect(modelOption?.argBuilder).toBeDefined(); + expect(modelOption?.argBuilder?.('ollama/qwen3:8b')).toEqual(['--model', 'ollama/qwen3:8b']); + expect(modelOption?.argBuilder?.('')).toEqual([]); + expect(modelOption?.argBuilder?.(' ')).toEqual([]); + }); + }); + + describe('Type definitions', () => { + it('should export AgentDefinition type', () => { + const def: AgentDefinition = { + id: 'test', + name: 'Test Agent', + binaryName: 'test', + command: 'test', + args: [], + }; + expect(def.id).toBe('test'); + }); + + it('should export AgentConfigOption type', () => { + const option: AgentConfigOption = { + key: 'testKey', + type: 'text', + label: 'Test Label', + description: 'Test description', + default: 'default value', + }; + expect(option.key).toBe('testKey'); + }); + }); +}); diff --git a/src/__tests__/main/agent-detector.test.ts b/src/__tests__/main/agents/detector.test.ts similarity index 94% rename from src/__tests__/main/agent-detector.test.ts rename to src/__tests__/main/agents/detector.test.ts index 58f8cddf..2d8f5a3e 100644 --- a/src/__tests__/main/agent-detector.test.ts +++ b/src/__tests__/main/agents/detector.test.ts @@ -4,14 +4,14 @@ import { AgentConfig, AgentConfigOption, AgentCapabilities, -} from '../../main/agent-detector'; +} from '../../../main/agents'; // Mock dependencies -vi.mock('../../main/utils/execFile', () => ({ +vi.mock('../../../main/utils/execFile', () => ({ execFileNoThrow: vi.fn(), })); -vi.mock('../../main/utils/logger', () => ({ +vi.mock('../../../main/utils/logger', () => ({ logger: { info: vi.fn(), warn: vi.fn(), @@ -21,8 +21,8 @@ vi.mock('../../main/utils/logger', () => ({ })); // Get mocked modules -import { execFileNoThrow } from '../../main/utils/execFile'; -import { logger } from '../../main/utils/logger'; +import { execFileNoThrow } from '../../../main/utils/execFile'; +import { logger } from '../../../main/utils/logger'; import * as fs from 'fs'; import * as os from 'os'; import * as path from 'path'; @@ -501,7 +501,7 @@ describe('agent-detector', () => { expect(logger.warn).toHaveBeenCalledWith( expect.stringContaining('not executable'), - 'AgentDetector' + 'PathProber' ); } finally { Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true }); @@ -1230,6 +1230,81 @@ describe('agent-detector', () => { }); }); + describe('model cache TTL', () => { + it('should invalidate model cache after TTL expires', async () => { + vi.useFakeTimers(); + + // Setup: opencode is available + mockExecFileNoThrow.mockImplementation(async (cmd, args) => { + const binaryName = args[0]; + if (binaryName === 'opencode') { + return { stdout: '/usr/bin/opencode\n', stderr: '', exitCode: 0 }; + } + if (cmd === '/usr/bin/opencode' && args[0] === 'models') { + return { + stdout: 'initial-model\n', + stderr: '', + exitCode: 0, + }; + } + return { stdout: '', stderr: 'not found', exitCode: 1 }; + }); + + // Create detector with short TTL for testing (100ms) + const shortTtlDetector = new AgentDetector(100); + await shortTtlDetector.detectAgents(); + + // First call - should fetch + const models1 = await shortTtlDetector.discoverModels('opencode'); + expect(models1).toEqual(['initial-model']); + + // Clear mocks to track new calls + mockExecFileNoThrow.mockClear(); + + // Second call immediately - should use cache + const models2 = await shortTtlDetector.discoverModels('opencode'); + expect(models2).toEqual(['initial-model']); + expect(mockExecFileNoThrow).not.toHaveBeenCalledWith( + '/usr/bin/opencode', + ['models'], + undefined, + expect.any(Object) + ); + + // Advance time past TTL + vi.advanceTimersByTime(150); + + // Setup new response for after cache expires + mockExecFileNoThrow.mockImplementation(async (cmd, args) => { + if (cmd === '/usr/bin/opencode' && args[0] === 'models') { + return { + stdout: 'new-model-after-ttl\n', + stderr: '', + exitCode: 0, + }; + } + return { stdout: '', stderr: '', exitCode: 1 }; + }); + + // Third call after TTL - should re-fetch + const models3 = await shortTtlDetector.discoverModels('opencode'); + expect(models3).toEqual(['new-model-after-ttl']); + expect(mockExecFileNoThrow).toHaveBeenCalledWith( + '/usr/bin/opencode', + ['models'], + undefined, + expect.any(Object) + ); + + vi.useRealTimers(); + }); + + it('should accept custom cache TTL in constructor', () => { + const customTtlDetector = new AgentDetector(60000); // 1 minute + expect(customTtlDetector).toBeDefined(); + }); + }); + describe('clearModelCache', () => { beforeEach(async () => { mockExecFileNoThrow.mockImplementation(async (cmd, args) => { diff --git a/src/__tests__/main/agents/path-prober.test.ts b/src/__tests__/main/agents/path-prober.test.ts new file mode 100644 index 00000000..816cc7a6 --- /dev/null +++ b/src/__tests__/main/agents/path-prober.test.ts @@ -0,0 +1,452 @@ +/** + * Tests for path-prober.ts + * + * Tests the platform-specific binary detection logic. + */ + +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import * as fs from 'fs'; + +// Mock dependencies before importing the module +vi.mock('../../../main/utils/execFile', () => ({ + execFileNoThrow: vi.fn(), +})); + +vi.mock('../../../main/utils/logger', () => ({ + logger: { + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + debug: vi.fn(), + }, +})); + +vi.mock('../../../shared/pathUtils', () => ({ + expandTilde: vi.fn((p: string) => p.replace(/^~/, '/Users/testuser')), + detectNodeVersionManagerBinPaths: vi.fn(() => []), +})); + +// Import after mocking +import { + getExpandedEnv, + checkCustomPath, + checkBinaryExists, + probeWindowsPaths, + probeUnixPaths, + type BinaryDetectionResult, +} from '../../../main/agents'; +import { execFileNoThrow } from '../../../main/utils/execFile'; +import { logger } from '../../../main/utils/logger'; + +describe('path-prober', () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + describe('getExpandedEnv', () => { + it('should return environment with PATH', () => { + const env = getExpandedEnv(); + expect(env.PATH).toBeDefined(); + expect(typeof env.PATH).toBe('string'); + }); + + it('should include common Unix paths on non-Windows', () => { + const originalPlatform = process.platform; + Object.defineProperty(process, 'platform', { value: 'darwin', configurable: true }); + + try { + const env = getExpandedEnv(); + expect(env.PATH).toContain('/opt/homebrew/bin'); + expect(env.PATH).toContain('/usr/local/bin'); + } finally { + Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true }); + } + }); + + it('should preserve existing PATH entries', () => { + const originalPath = process.env.PATH; + const testPath = '/test/custom/path'; + process.env.PATH = testPath; + + try { + const env = getExpandedEnv(); + expect(env.PATH).toContain(testPath); + } finally { + process.env.PATH = originalPath; + } + }); + }); + + describe('checkCustomPath', () => { + let statMock: ReturnType; + let accessMock: ReturnType; + + beforeEach(() => { + statMock = vi.spyOn(fs.promises, 'stat'); + accessMock = vi.spyOn(fs.promises, 'access'); + }); + + afterEach(() => { + statMock.mockRestore(); + accessMock.mockRestore(); + }); + + it('should return exists: true for valid executable path on Unix', async () => { + const originalPlatform = process.platform; + Object.defineProperty(process, 'platform', { value: 'darwin', configurable: true }); + + try { + statMock.mockResolvedValue({ isFile: () => true } as fs.Stats); + accessMock.mockResolvedValue(undefined); + + const result = await checkCustomPath('/usr/local/bin/claude'); + expect(result.exists).toBe(true); + expect(result.path).toBe('/usr/local/bin/claude'); + } finally { + Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true }); + } + }); + + it('should return exists: false for non-executable file on Unix', async () => { + const originalPlatform = process.platform; + Object.defineProperty(process, 'platform', { value: 'darwin', configurable: true }); + + try { + statMock.mockResolvedValue({ isFile: () => true } as fs.Stats); + accessMock.mockRejectedValue(new Error('EACCES')); + + const result = await checkCustomPath('/path/to/non-executable'); + expect(result.exists).toBe(false); + expect(logger.warn).toHaveBeenCalledWith( + expect.stringContaining('not executable'), + 'PathProber' + ); + } finally { + Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true }); + } + }); + + it('should return exists: false for non-existent path', async () => { + statMock.mockRejectedValue(new Error('ENOENT')); + + const result = await checkCustomPath('/non/existent/path'); + expect(result.exists).toBe(false); + }); + + it('should expand tilde in path', async () => { + const originalPlatform = process.platform; + Object.defineProperty(process, 'platform', { value: 'darwin', configurable: true }); + + try { + statMock.mockResolvedValue({ isFile: () => true } as fs.Stats); + accessMock.mockResolvedValue(undefined); + + const result = await checkCustomPath('~/.local/bin/claude'); + expect(result.exists).toBe(true); + expect(result.path).toBe('/Users/testuser/.local/bin/claude'); + } finally { + Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true }); + } + }); + + it('should try .exe extension on Windows', async () => { + const originalPlatform = process.platform; + Object.defineProperty(process, 'platform', { value: 'win32', configurable: true }); + + try { + // First call (exact path) returns false, second call (.exe) returns true + statMock + .mockRejectedValueOnce(new Error('ENOENT')) + .mockResolvedValueOnce({ isFile: () => true } as fs.Stats); + + const result = await checkCustomPath('C:\\custom\\claude'); + expect(result.exists).toBe(true); + expect(result.path).toBe('C:\\custom\\claude.exe'); + } finally { + Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true }); + } + }); + + it('should try .cmd extension on Windows if .exe not found', async () => { + const originalPlatform = process.platform; + Object.defineProperty(process, 'platform', { value: 'win32', configurable: true }); + + try { + // First call (exact), second (.exe) return false, third (.cmd) returns true + statMock + .mockRejectedValueOnce(new Error('ENOENT')) + .mockRejectedValueOnce(new Error('ENOENT')) + .mockResolvedValueOnce({ isFile: () => true } as fs.Stats); + + const result = await checkCustomPath('C:\\custom\\claude'); + expect(result.exists).toBe(true); + expect(result.path).toBe('C:\\custom\\claude.cmd'); + } finally { + Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true }); + } + }); + + it('should skip executable check on Windows', async () => { + const originalPlatform = process.platform; + Object.defineProperty(process, 'platform', { value: 'win32', configurable: true }); + + try { + statMock.mockResolvedValue({ isFile: () => true } as fs.Stats); + // Don't mock access - it shouldn't be called for X_OK on Windows + + const result = await checkCustomPath('C:\\custom\\claude.exe'); + expect(result.exists).toBe(true); + // access should not be called with X_OK on Windows + } finally { + Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true }); + } + }); + }); + + describe('probeWindowsPaths', () => { + let accessMock: ReturnType; + + beforeEach(() => { + accessMock = vi.spyOn(fs.promises, 'access'); + }); + + afterEach(() => { + accessMock.mockRestore(); + }); + + it('should return null for unknown binary', async () => { + accessMock.mockRejectedValue(new Error('ENOENT')); + + const result = await probeWindowsPaths('unknown-binary'); + expect(result).toBeNull(); + }); + + it('should probe known paths for claude binary', async () => { + // All paths fail - binary not found + accessMock.mockRejectedValue(new Error('ENOENT')); + + const result = await probeWindowsPaths('claude'); + // Should return null since all probes fail + expect(result).toBeNull(); + // Should have tried multiple paths + expect(accessMock).toHaveBeenCalled(); + }); + }); + + describe('probeUnixPaths', () => { + let accessMock: ReturnType; + + beforeEach(() => { + accessMock = vi.spyOn(fs.promises, 'access'); + }); + + afterEach(() => { + accessMock.mockRestore(); + }); + + it('should return null for unknown binary', async () => { + accessMock.mockRejectedValue(new Error('ENOENT')); + + const result = await probeUnixPaths('unknown-binary'); + expect(result).toBeNull(); + }); + + it('should probe known paths for claude binary', async () => { + // All paths fail - binary not found + accessMock.mockRejectedValue(new Error('ENOENT')); + + const result = await probeUnixPaths('claude'); + // Should return null since all probes fail + expect(result).toBeNull(); + // Should have tried multiple paths + expect(accessMock).toHaveBeenCalled(); + }); + + it('should check both existence and executability', async () => { + const originalPlatform = process.platform; + Object.defineProperty(process, 'platform', { value: 'darwin', configurable: true }); + + try { + accessMock.mockRejectedValue(new Error('ENOENT')); + + const result = await probeUnixPaths('claude'); + expect(result).toBeNull(); + + // Verify access was called with F_OK | X_OK + expect(accessMock).toHaveBeenCalled(); + } finally { + Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true }); + } + }); + }); + + describe('checkBinaryExists', () => { + let accessMock: ReturnType; + const execMock = vi.mocked(execFileNoThrow); + + beforeEach(() => { + accessMock = vi.spyOn(fs.promises, 'access'); + }); + + afterEach(() => { + accessMock.mockRestore(); + }); + + it('should try direct probe first on Unix', async () => { + const originalPlatform = process.platform; + Object.defineProperty(process, 'platform', { value: 'darwin', configurable: true }); + + try { + // Direct probe finds the binary (first path in the list exists) + accessMock.mockResolvedValueOnce(undefined); + + const result = await checkBinaryExists('claude'); + expect(result.exists).toBe(true); + expect(result.path).toContain('claude'); + // which should not be called if direct probe succeeds + } finally { + Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true }); + } + }); + + it('should fall back to which on Unix if probe fails', async () => { + const originalPlatform = process.platform; + Object.defineProperty(process, 'platform', { value: 'darwin', configurable: true }); + + try { + // Direct probe fails + accessMock.mockRejectedValue(new Error('ENOENT')); + + // which succeeds + execMock.mockResolvedValue({ + exitCode: 0, + stdout: '/usr/local/bin/test-binary\n', + stderr: '', + }); + + const result = await checkBinaryExists('test-binary'); + expect(result.exists).toBe(true); + expect(result.path).toBe('/usr/local/bin/test-binary'); + expect(execMock).toHaveBeenCalledWith( + 'which', + ['test-binary'], + undefined, + expect.any(Object) + ); + } finally { + Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true }); + } + }); + + it('should use where on Windows', async () => { + const originalPlatform = process.platform; + Object.defineProperty(process, 'platform', { value: 'win32', configurable: true }); + + try { + // Direct probe fails + accessMock.mockRejectedValue(new Error('ENOENT')); + + // where succeeds + execMock.mockResolvedValue({ + exitCode: 0, + stdout: 'C:\\Users\\Test\\AppData\\Roaming\\npm\\test.cmd\r\n', + stderr: '', + }); + + const result = await checkBinaryExists('test'); + expect(result.exists).toBe(true); + expect(execMock).toHaveBeenCalledWith('where', ['test'], undefined, expect.any(Object)); + } finally { + Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true }); + } + }); + + it('should return exists: false if binary not found', async () => { + const originalPlatform = process.platform; + Object.defineProperty(process, 'platform', { value: 'darwin', configurable: true }); + + try { + // Direct probe fails + accessMock.mockRejectedValue(new Error('ENOENT')); + + // which fails + execMock.mockResolvedValue({ + exitCode: 1, + stdout: '', + stderr: 'not found', + }); + + const result = await checkBinaryExists('non-existent'); + expect(result.exists).toBe(false); + expect(result.path).toBeUndefined(); + } finally { + Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true }); + } + }); + + it('should prefer .exe over .cmd on Windows', async () => { + const originalPlatform = process.platform; + Object.defineProperty(process, 'platform', { value: 'win32', configurable: true }); + + try { + // Direct probe fails + accessMock.mockRejectedValue(new Error('ENOENT')); + + // where returns both .exe and .cmd + execMock.mockResolvedValue({ + exitCode: 0, + stdout: 'C:\\path\\to\\binary.cmd\r\nC:\\path\\to\\binary.exe\r\n', + stderr: '', + }); + + const result = await checkBinaryExists('binary'); + expect(result.exists).toBe(true); + expect(result.path).toBe('C:\\path\\to\\binary.exe'); + } finally { + Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true }); + } + }); + + it('should handle Windows CRLF line endings', async () => { + const originalPlatform = process.platform; + Object.defineProperty(process, 'platform', { value: 'win32', configurable: true }); + + try { + accessMock.mockRejectedValue(new Error('ENOENT')); + + execMock.mockResolvedValue({ + exitCode: 0, + stdout: 'C:\\path\\to\\binary.exe\r\n', + stderr: '', + }); + + const result = await checkBinaryExists('binary'); + expect(result.exists).toBe(true); + expect(result.path).toBe('C:\\path\\to\\binary.exe'); + // Path should not contain \r + expect(result.path).not.toContain('\r'); + } finally { + Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true }); + } + }); + }); + + describe('BinaryDetectionResult type', () => { + it('should allow exists: true with path', () => { + const result: BinaryDetectionResult = { + exists: true, + path: '/usr/local/bin/claude', + }; + expect(result.exists).toBe(true); + expect(result.path).toBeDefined(); + }); + + it('should allow exists: false without path', () => { + const result: BinaryDetectionResult = { + exists: false, + }; + expect(result.exists).toBe(false); + expect(result.path).toBeUndefined(); + }); + }); +}); diff --git a/src/__tests__/main/agent-session-storage.test.ts b/src/__tests__/main/agents/session-storage.test.ts similarity index 97% rename from src/__tests__/main/agent-session-storage.test.ts rename to src/__tests__/main/agents/session-storage.test.ts index 9587f0a9..94f79dde 100644 --- a/src/__tests__/main/agent-session-storage.test.ts +++ b/src/__tests__/main/agents/session-storage.test.ts @@ -1,4 +1,6 @@ import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest'; +import type Store from 'electron-store'; +import type { ClaudeSessionOriginsData } from '../../../main/storage/claude-session-storage'; import { AgentSessionStorage, AgentSessionInfo, @@ -11,8 +13,8 @@ import { hasSessionStorage, getAllSessionStorages, clearStorageRegistry, -} from '../../main/agent-session-storage'; -import type { ToolType } from '../../shared/types'; +} from '../../../main/agents'; +import type { ToolType } from '../../../shared/types'; // Mock storage implementation for testing class MockSessionStorage implements AgentSessionStorage { @@ -198,12 +200,12 @@ describe('ClaudeSessionStorage', () => { // For now, we test that the class can be imported it('should be importable', async () => { // Dynamic import to test module loading - const { ClaudeSessionStorage } = await import('../../main/storage/claude-session-storage'); + const { ClaudeSessionStorage } = await import('../../../main/storage/claude-session-storage'); expect(ClaudeSessionStorage).toBeDefined(); }); it('should have claude-code as agentId', async () => { - const { ClaudeSessionStorage } = await import('../../main/storage/claude-session-storage'); + const { ClaudeSessionStorage } = await import('../../../main/storage/claude-session-storage'); // Create instance without store (it will create its own) // Note: In a real test, we'd mock electron-store @@ -214,18 +216,21 @@ describe('ClaudeSessionStorage', () => { describe('OpenCodeSessionStorage', () => { it('should be importable', async () => { - const { OpenCodeSessionStorage } = await import('../../main/storage/opencode-session-storage'); + const { OpenCodeSessionStorage } = + await import('../../../main/storage/opencode-session-storage'); expect(OpenCodeSessionStorage).toBeDefined(); }); it('should have opencode as agentId', async () => { - const { OpenCodeSessionStorage } = await import('../../main/storage/opencode-session-storage'); + const { OpenCodeSessionStorage } = + await import('../../../main/storage/opencode-session-storage'); const storage = new OpenCodeSessionStorage(); expect(storage.agentId).toBe('opencode'); }); it('should return empty results for non-existent projects', async () => { - const { OpenCodeSessionStorage } = await import('../../main/storage/opencode-session-storage'); + const { OpenCodeSessionStorage } = + await import('../../../main/storage/opencode-session-storage'); const storage = new OpenCodeSessionStorage(); // Non-existent project should return empty results @@ -245,7 +250,8 @@ describe('OpenCodeSessionStorage', () => { }); it('should return message directory path for getSessionPath', async () => { - const { OpenCodeSessionStorage } = await import('../../main/storage/opencode-session-storage'); + const { OpenCodeSessionStorage } = + await import('../../../main/storage/opencode-session-storage'); const storage = new OpenCodeSessionStorage(); // getSessionPath returns the message directory for the session @@ -257,7 +263,8 @@ describe('OpenCodeSessionStorage', () => { }); it('should fail gracefully when deleting from non-existent session', async () => { - const { OpenCodeSessionStorage } = await import('../../main/storage/opencode-session-storage'); + const { OpenCodeSessionStorage } = + await import('../../../main/storage/opencode-session-storage'); const storage = new OpenCodeSessionStorage(); const deleteResult = await storage.deleteMessagePair( @@ -272,18 +279,18 @@ describe('OpenCodeSessionStorage', () => { describe('CodexSessionStorage', () => { it('should be importable', async () => { - const { CodexSessionStorage } = await import('../../main/storage/codex-session-storage'); + const { CodexSessionStorage } = await import('../../../main/storage/codex-session-storage'); expect(CodexSessionStorage).toBeDefined(); }); it('should have codex as agentId', async () => { - const { CodexSessionStorage } = await import('../../main/storage/codex-session-storage'); + const { CodexSessionStorage } = await import('../../../main/storage/codex-session-storage'); const storage = new CodexSessionStorage(); expect(storage.agentId).toBe('codex'); }); it('should return empty results for non-existent sessions directory', async () => { - const { CodexSessionStorage } = await import('../../main/storage/codex-session-storage'); + const { CodexSessionStorage } = await import('../../../main/storage/codex-session-storage'); const storage = new CodexSessionStorage(); // Non-existent project should return empty results (since ~/.codex/sessions/ likely doesn't exist in test) @@ -306,7 +313,7 @@ describe('CodexSessionStorage', () => { }); it('should return null for getSessionPath (async operation required)', async () => { - const { CodexSessionStorage } = await import('../../main/storage/codex-session-storage'); + const { CodexSessionStorage } = await import('../../../main/storage/codex-session-storage'); const storage = new CodexSessionStorage(); // getSessionPath is synchronous and always returns null for Codex @@ -316,7 +323,7 @@ describe('CodexSessionStorage', () => { }); it('should fail gracefully when deleting from non-existent session', async () => { - const { CodexSessionStorage } = await import('../../main/storage/codex-session-storage'); + const { CodexSessionStorage } = await import('../../../main/storage/codex-session-storage'); const storage = new CodexSessionStorage(); const deleteResult = await storage.deleteMessagePair( @@ -329,7 +336,7 @@ describe('CodexSessionStorage', () => { }); it('should handle empty search query', async () => { - const { CodexSessionStorage } = await import('../../main/storage/codex-session-storage'); + const { CodexSessionStorage } = await import('../../../main/storage/codex-session-storage'); const storage = new CodexSessionStorage(); const search = await storage.searchSessions('/test/project', '', 'all'); @@ -342,12 +349,12 @@ describe('CodexSessionStorage', () => { describe('Storage Module Initialization', () => { it('should export initializeSessionStorages function', async () => { - const { initializeSessionStorages } = await import('../../main/storage/index'); + const { initializeSessionStorages } = await import('../../../main/storage/index'); expect(typeof initializeSessionStorages).toBe('function'); }); it('should export CodexSessionStorage', async () => { - const { CodexSessionStorage } = await import('../../main/storage/index'); + const { CodexSessionStorage } = await import('../../../main/storage/index'); expect(CodexSessionStorage).toBeDefined(); }); @@ -355,7 +362,7 @@ describe('Storage Module Initialization', () => { // This tests that ClaudeSessionStorage can receive an external store // This prevents the dual-store bug where IPC handlers and storage class // use different electron-store instances - const { ClaudeSessionStorage } = await import('../../main/storage/claude-session-storage'); + const { ClaudeSessionStorage } = await import('../../../main/storage/claude-session-storage'); // Create a mock store const mockStore = { @@ -366,14 +373,14 @@ describe('Storage Module Initialization', () => { // Should be able to create with external store (no throw) const storage = new ClaudeSessionStorage( - mockStore as unknown as import('electron-store').default + mockStore as unknown as Store ); expect(storage.agentId).toBe('claude-code'); }); it('should export InitializeSessionStoragesOptions interface', async () => { // This tests that the options interface is exported for type-safe initialization - const storageModule = await import('../../main/storage/index'); + const storageModule = await import('../../../main/storage/index'); // The function should accept options object expect(typeof storageModule.initializeSessionStorages).toBe('function'); // Function should accept undefined options (backward compatible) @@ -383,9 +390,8 @@ describe('Storage Module Initialization', () => { it('should accept claudeSessionOriginsStore in options', async () => { // This tests the fix for the dual-store bug // When a shared store is passed, it should be used instead of creating a new one - const { initializeSessionStorages } = await import('../../main/storage/index'); - const { getSessionStorage, clearStorageRegistry } = - await import('../../main/agent-session-storage'); + const { initializeSessionStorages } = await import('../../../main/storage/index'); + const { getSessionStorage, clearStorageRegistry } = await import('../../../main/agents'); // Clear registry first clearStorageRegistry(); @@ -402,7 +408,7 @@ describe('Storage Module Initialization', () => { // Initialize with the shared store // This mimics what main/index.ts does initializeSessionStorages({ - claudeSessionOriginsStore: mockStore as unknown as import('electron-store').default, + claudeSessionOriginsStore: mockStore as unknown as Store, }); // Verify ClaudeSessionStorage was registered diff --git a/src/main/debug-package/__tests__/packager.test.ts b/src/__tests__/main/debug-package/packager.test.ts similarity index 99% rename from src/main/debug-package/__tests__/packager.test.ts rename to src/__tests__/main/debug-package/packager.test.ts index 5f6d9b16..ced70dfa 100644 --- a/src/main/debug-package/__tests__/packager.test.ts +++ b/src/__tests__/main/debug-package/packager.test.ts @@ -13,7 +13,7 @@ import { describe, it, expect, beforeEach, afterEach } from 'vitest'; import * as path from 'path'; -import { createZipPackage, PackageContents } from '../packager'; +import { createZipPackage, PackageContents } from '../../../main/debug-package/packager'; import AdmZip from 'adm-zip'; // Use the native node:fs module to avoid any vitest mocks diff --git a/src/main/debug-package/__tests__/sanitization.test.ts b/src/__tests__/main/debug-package/sanitization.test.ts similarity index 81% rename from src/main/debug-package/__tests__/sanitization.test.ts rename to src/__tests__/main/debug-package/sanitization.test.ts index 5e8993ff..66dc38bd 100644 --- a/src/main/debug-package/__tests__/sanitization.test.ts +++ b/src/__tests__/main/debug-package/sanitization.test.ts @@ -51,7 +51,7 @@ describe('Debug Package Sanitization', () => { describe('sanitizePath', () => { describe('home directory replacement', () => { it('should replace home directory with ~', async () => { - const { sanitizePath } = await import('../collectors/settings'); + const { sanitizePath } = await import('../../../main/debug-package/collectors/settings'); const homeDir = os.homedir(); const testPath = `${homeDir}/Projects/MyApp`; @@ -62,7 +62,7 @@ describe('Debug Package Sanitization', () => { }); it('should replace home directory at any position in path', async () => { - const { sanitizePath } = await import('../collectors/settings'); + const { sanitizePath } = await import('../../../main/debug-package/collectors/settings'); const homeDir = os.homedir(); const testPath = `${homeDir}/deeply/nested/folder/file.txt`; @@ -72,7 +72,7 @@ describe('Debug Package Sanitization', () => { }); it('should handle home directory with trailing slash', async () => { - const { sanitizePath } = await import('../collectors/settings'); + const { sanitizePath } = await import('../../../main/debug-package/collectors/settings'); const homeDir = os.homedir(); const testPath = `${homeDir}/`; @@ -82,7 +82,7 @@ describe('Debug Package Sanitization', () => { }); it('should handle path that is exactly the home directory', async () => { - const { sanitizePath } = await import('../collectors/settings'); + const { sanitizePath } = await import('../../../main/debug-package/collectors/settings'); const homeDir = os.homedir(); const result = sanitizePath(homeDir); @@ -91,7 +91,7 @@ describe('Debug Package Sanitization', () => { }); it('should not modify paths that do not contain home directory', async () => { - const { sanitizePath } = await import('../collectors/settings'); + const { sanitizePath } = await import('../../../main/debug-package/collectors/settings'); const testPath = '/usr/local/bin/app'; const result = sanitizePath(testPath); @@ -100,7 +100,7 @@ describe('Debug Package Sanitization', () => { }); it('should handle empty string', async () => { - const { sanitizePath } = await import('../collectors/settings'); + const { sanitizePath } = await import('../../../main/debug-package/collectors/settings'); const result = sanitizePath(''); @@ -110,7 +110,7 @@ describe('Debug Package Sanitization', () => { describe('Windows path handling', () => { it('should normalize backslashes to forward slashes', async () => { - const { sanitizePath } = await import('../collectors/settings'); + const { sanitizePath } = await import('../../../main/debug-package/collectors/settings'); const testPath = 'C:\\Users\\testuser\\Documents\\Project'; const result = sanitizePath(testPath); @@ -120,7 +120,8 @@ describe('Debug Package Sanitization', () => { }); it('should handle Windows-style home directory', async () => { - const { sanitizePath: _sanitizePath } = await import('../collectors/settings'); + const { sanitizePath: _sanitizePath } = + await import('../../../main/debug-package/collectors/settings'); // Mock homedir to return Windows-style path const originalHomedir = os.homedir(); @@ -128,7 +129,8 @@ describe('Debug Package Sanitization', () => { // Re-import to get fresh module with mocked homedir vi.resetModules(); - const { sanitizePath: freshSanitizePath } = await import('../collectors/settings'); + const { sanitizePath: freshSanitizePath } = + await import('../../../main/debug-package/collectors/settings'); const testPath = 'C:\\Users\\testuser\\Documents\\Project'; const result = freshSanitizePath(testPath); @@ -139,7 +141,7 @@ describe('Debug Package Sanitization', () => { }); it('should handle mixed slash styles', async () => { - const { sanitizePath } = await import('../collectors/settings'); + const { sanitizePath } = await import('../../../main/debug-package/collectors/settings'); const testPath = '/path/to\\mixed\\slashes/file.txt'; const result = sanitizePath(testPath); @@ -152,7 +154,7 @@ describe('Debug Package Sanitization', () => { describe('edge cases and type handling', () => { it('should return null when given null', async () => { - const { sanitizePath } = await import('../collectors/settings'); + const { sanitizePath } = await import('../../../main/debug-package/collectors/settings'); // @ts-expect-error - Testing runtime behavior with wrong type const result = sanitizePath(null); @@ -161,7 +163,7 @@ describe('Debug Package Sanitization', () => { }); it('should return undefined when given undefined', async () => { - const { sanitizePath } = await import('../collectors/settings'); + const { sanitizePath } = await import('../../../main/debug-package/collectors/settings'); // @ts-expect-error - Testing runtime behavior with wrong type const result = sanitizePath(undefined); @@ -170,7 +172,7 @@ describe('Debug Package Sanitization', () => { }); it('should return numbers unchanged', async () => { - const { sanitizePath } = await import('../collectors/settings'); + const { sanitizePath } = await import('../../../main/debug-package/collectors/settings'); // @ts-expect-error - Testing runtime behavior with wrong type const result = sanitizePath(12345); @@ -179,7 +181,7 @@ describe('Debug Package Sanitization', () => { }); it('should return objects unchanged', async () => { - const { sanitizePath } = await import('../collectors/settings'); + const { sanitizePath } = await import('../../../main/debug-package/collectors/settings'); const obj = { path: '/some/path' }; // @ts-expect-error - Testing runtime behavior with wrong type @@ -189,7 +191,7 @@ describe('Debug Package Sanitization', () => { }); it('should handle paths with spaces', async () => { - const { sanitizePath } = await import('../collectors/settings'); + const { sanitizePath } = await import('../../../main/debug-package/collectors/settings'); const homeDir = os.homedir(); const testPath = `${homeDir}/My Documents/Project Files/app.tsx`; @@ -199,7 +201,7 @@ describe('Debug Package Sanitization', () => { }); it('should handle paths with special characters', async () => { - const { sanitizePath } = await import('../collectors/settings'); + const { sanitizePath } = await import('../../../main/debug-package/collectors/settings'); const homeDir = os.homedir(); const testPath = `${homeDir}/Projects/@company/app-v2.0#beta`; @@ -209,7 +211,7 @@ describe('Debug Package Sanitization', () => { }); it('should handle very long paths', async () => { - const { sanitizePath } = await import('../collectors/settings'); + const { sanitizePath } = await import('../../../main/debug-package/collectors/settings'); const homeDir = os.homedir(); const longPath = `${homeDir}/` + 'a/'.repeat(100) + 'file.txt'; @@ -228,7 +230,7 @@ describe('Debug Package Sanitization', () => { describe('API key redaction', () => { describe('sensitive key detection', () => { it('should redact apiKey', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -242,7 +244,7 @@ describe('Debug Package Sanitization', () => { }); it('should redact api_key (snake_case)', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -255,7 +257,7 @@ describe('Debug Package Sanitization', () => { }); it('should redact authToken', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -268,7 +270,7 @@ describe('Debug Package Sanitization', () => { }); it('should redact auth_token (snake_case)', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -281,7 +283,7 @@ describe('Debug Package Sanitization', () => { }); it('should redact clientToken', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -294,7 +296,7 @@ describe('Debug Package Sanitization', () => { }); it('should redact client_token (snake_case)', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -307,7 +309,7 @@ describe('Debug Package Sanitization', () => { }); it('should redact password', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -320,7 +322,7 @@ describe('Debug Package Sanitization', () => { }); it('should redact secret', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -333,7 +335,7 @@ describe('Debug Package Sanitization', () => { }); it('should redact credential', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -346,7 +348,7 @@ describe('Debug Package Sanitization', () => { }); it('should redact accessToken', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -359,7 +361,7 @@ describe('Debug Package Sanitization', () => { }); it('should redact access_token (snake_case)', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -372,7 +374,7 @@ describe('Debug Package Sanitization', () => { }); it('should redact refreshToken', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -385,7 +387,7 @@ describe('Debug Package Sanitization', () => { }); it('should redact refresh_token (snake_case)', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -398,7 +400,7 @@ describe('Debug Package Sanitization', () => { }); it('should redact privateKey', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -411,7 +413,7 @@ describe('Debug Package Sanitization', () => { }); it('should redact private_key (snake_case)', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -426,7 +428,7 @@ describe('Debug Package Sanitization', () => { describe('case insensitivity', () => { it('should redact APIKEY (uppercase)', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -439,7 +441,7 @@ describe('Debug Package Sanitization', () => { }); it('should redact ApiKey (mixed case)', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -452,7 +454,7 @@ describe('Debug Package Sanitization', () => { }); it('should redact API_KEY (uppercase snake_case)', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -465,7 +467,7 @@ describe('Debug Package Sanitization', () => { }); it('should redact PASSWORD (uppercase)', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -478,7 +480,7 @@ describe('Debug Package Sanitization', () => { }); it('should redact Secret (capitalized)', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -493,7 +495,7 @@ describe('Debug Package Sanitization', () => { describe('key name patterns containing sensitive words', () => { it('should redact myApiKeyValue (key within name)', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -506,7 +508,7 @@ describe('Debug Package Sanitization', () => { }); it('should redact userPassword (password in name)', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -519,7 +521,7 @@ describe('Debug Package Sanitization', () => { }); it('should redact adminSecret (secret in name)', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -532,7 +534,7 @@ describe('Debug Package Sanitization', () => { }); it('should redact bearerAccessToken (accesstoken in name)', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -545,7 +547,7 @@ describe('Debug Package Sanitization', () => { }); it('should redact dbCredential (credential in name)', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -560,7 +562,7 @@ describe('Debug Package Sanitization', () => { describe('nested object handling', () => { it('should redact sensitive keys in nested objects', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -577,7 +579,7 @@ describe('Debug Package Sanitization', () => { }); it('should redact deeply nested sensitive keys', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -602,7 +604,7 @@ describe('Debug Package Sanitization', () => { }); it('should track sanitized fields with full path', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -621,7 +623,7 @@ describe('Debug Package Sanitization', () => { }); it('should redact multiple sensitive keys at different levels', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -646,7 +648,7 @@ describe('Debug Package Sanitization', () => { describe('array handling', () => { it('should process arrays containing objects with sensitive keys', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -667,7 +669,7 @@ describe('Debug Package Sanitization', () => { }); it('should handle empty arrays', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -680,7 +682,7 @@ describe('Debug Package Sanitization', () => { }); it('should handle arrays of primitives', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -695,7 +697,7 @@ describe('Debug Package Sanitization', () => { describe('preservation of non-sensitive data', () => { it('should preserve boolean values', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -709,7 +711,7 @@ describe('Debug Package Sanitization', () => { }); it('should preserve number values', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -724,7 +726,7 @@ describe('Debug Package Sanitization', () => { }); it('should preserve string values without sensitive keywords', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -739,7 +741,7 @@ describe('Debug Package Sanitization', () => { }); it('should preserve null values', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const mockStore = { get: vi.fn(), set: vi.fn(), @@ -760,7 +762,7 @@ describe('Debug Package Sanitization', () => { describe('environment variable filtering', () => { describe('custom env vars masking', () => { it('should not expose custom env var values in agents collector', async () => { - const { collectAgents } = await import('../collectors/agents'); + const { collectAgents } = await import('../../../main/debug-package/collectors/agents'); const mockAgentDetector = { detectAgents: vi.fn().mockResolvedValue([ @@ -786,7 +788,7 @@ describe('Debug Package Sanitization', () => { }); it('should indicate env vars are set without showing values', async () => { - const { collectAgents } = await import('../collectors/agents'); + const { collectAgents } = await import('../../../main/debug-package/collectors/agents'); const mockAgentDetector = { detectAgents: vi.fn().mockResolvedValue([ @@ -812,7 +814,7 @@ describe('Debug Package Sanitization', () => { describe('custom args masking', () => { it('should not expose custom args values containing secrets', async () => { - const { collectAgents } = await import('../collectors/agents'); + const { collectAgents } = await import('../../../main/debug-package/collectors/agents'); const mockAgentDetector = { detectAgents: vi.fn().mockResolvedValue([ @@ -836,7 +838,7 @@ describe('Debug Package Sanitization', () => { describe('path-based environment variables', () => { it('should sanitize custom path settings', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const homeDir = os.homedir(); const mockStore = { @@ -855,7 +857,7 @@ describe('Debug Package Sanitization', () => { }); it('should sanitize folderPath settings', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const homeDir = os.homedir(); const mockStore = { @@ -879,7 +881,7 @@ describe('Debug Package Sanitization', () => { describe('comprehensive sanitization', () => { it('should sanitize complex settings object with mixed sensitive data', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const homeDir = os.homedir(); const mockStore = { @@ -931,7 +933,7 @@ describe('Debug Package Sanitization', () => { }); it('should track all sanitized fields', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const homeDir = os.homedir(); const mockStore = { @@ -952,7 +954,7 @@ describe('Debug Package Sanitization', () => { }); it('should produce output that contains no home directory paths for recognized path keys', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const homeDir = os.homedir(); const mockStore = { @@ -980,7 +982,7 @@ describe('Debug Package Sanitization', () => { }); it('should not sanitize paths in array values (by design)', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const homeDir = os.homedir(); // Note: Arrays of string paths are NOT sanitized by design @@ -1002,7 +1004,7 @@ describe('Debug Package Sanitization', () => { }); it('should produce output that contains no API keys or secrets', async () => { - const { collectSettings } = await import('../collectors/settings'); + const { collectSettings } = await import('../../../main/debug-package/collectors/settings'); const secrets = [ 'sk-1234567890abcdef', diff --git a/src/__tests__/main/group-chat/group-chat-router.test.ts b/src/__tests__/main/group-chat/group-chat-router.test.ts index aadad061..3c7d3b1b 100644 --- a/src/__tests__/main/group-chat/group-chat-router.test.ts +++ b/src/__tests__/main/group-chat/group-chat-router.test.ts @@ -63,7 +63,7 @@ import { GroupChatParticipant, } from '../../../main/group-chat/group-chat-storage'; import { readLog } from '../../../main/group-chat/group-chat-log'; -import { AgentDetector } from '../../../main/agent-detector'; +import { AgentDetector } from '../../../main/agents'; describe('group-chat-router', () => { let mockProcessManager: IProcessManager; diff --git a/src/__tests__/main/ipc/handlers/agentSessions.test.ts b/src/__tests__/main/ipc/handlers/agentSessions.test.ts index 84b84b22..baccd997 100644 --- a/src/__tests__/main/ipc/handlers/agentSessions.test.ts +++ b/src/__tests__/main/ipc/handlers/agentSessions.test.ts @@ -8,7 +8,7 @@ import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; import { ipcMain } from 'electron'; import { registerAgentSessionsHandlers } from '../../../../main/ipc/handlers/agentSessions'; -import * as agentSessionStorage from '../../../../main/agent-session-storage'; +import * as agentSessionStorage from '../../../../main/agents'; // Mock electron's ipcMain vi.mock('electron', () => ({ @@ -18,8 +18,8 @@ vi.mock('electron', () => ({ }, })); -// Mock the agent-session-storage module -vi.mock('../../../../main/agent-session-storage', () => ({ +// Mock the agents module (session storage exports) +vi.mock('../../../../main/agents', () => ({ getSessionStorage: vi.fn(), hasSessionStorage: vi.fn(), getAllSessionStorages: vi.fn(), diff --git a/src/__tests__/main/ipc/handlers/agents.test.ts b/src/__tests__/main/ipc/handlers/agents.test.ts index 5671d5fb..a58e4367 100644 --- a/src/__tests__/main/ipc/handlers/agents.test.ts +++ b/src/__tests__/main/ipc/handlers/agents.test.ts @@ -10,7 +10,7 @@ import { registerAgentsHandlers, AgentsHandlerDependencies, } from '../../../../main/ipc/handlers/agents'; -import * as agentCapabilities from '../../../../main/agent-capabilities'; +import * as agentCapabilities from '../../../../main/agents'; // Mock electron's ipcMain vi.mock('electron', () => ({ @@ -20,8 +20,8 @@ vi.mock('electron', () => ({ }, })); -// Mock agent-capabilities module -vi.mock('../../../../main/agent-capabilities', () => ({ +// Mock agents module (capabilities exports) +vi.mock('../../../../main/agents', () => ({ getAgentCapabilities: vi.fn(), DEFAULT_CAPABILITIES: { supportsResume: false, diff --git a/src/__tests__/main/ipc/handlers/debug.test.ts b/src/__tests__/main/ipc/handlers/debug.test.ts index a882eb59..fa1d1a74 100644 --- a/src/__tests__/main/ipc/handlers/debug.test.ts +++ b/src/__tests__/main/ipc/handlers/debug.test.ts @@ -13,7 +13,7 @@ import { DebugHandlerDependencies, } from '../../../../main/ipc/handlers/debug'; import * as debugPackage from '../../../../main/debug-package'; -import { AgentDetector } from '../../../../main/agent-detector'; +import { AgentDetector } from '../../../../main/agents'; import { ProcessManager } from '../../../../main/process-manager'; import { WebServer } from '../../../../main/web-server'; diff --git a/src/__tests__/main/ipc/handlers/stats.test.ts b/src/__tests__/main/ipc/handlers/stats.test.ts index e6cd2e7a..04b33c2c 100644 --- a/src/__tests__/main/ipc/handlers/stats.test.ts +++ b/src/__tests__/main/ipc/handlers/stats.test.ts @@ -8,8 +8,8 @@ import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; import { ipcMain, BrowserWindow } from 'electron'; import { registerStatsHandlers } from '../../../../main/ipc/handlers/stats'; -import * as statsDbModule from '../../../../main/stats-db'; -import type { StatsDB } from '../../../../main/stats-db'; +import * as statsDbModule from '../../../../main/stats'; +import type { StatsDB } from '../../../../main/stats'; // Mock electron's ipcMain and BrowserWindow vi.mock('electron', () => ({ @@ -21,7 +21,7 @@ vi.mock('electron', () => ({ })); // Mock the stats-db module -vi.mock('../../../../main/stats-db', () => ({ +vi.mock('../../../../main/stats', () => ({ getStatsDB: vi.fn(), getInitializationResult: vi.fn(), clearInitializationResult: vi.fn(), diff --git a/src/__tests__/main/ipc/handlers/tabNaming.test.ts b/src/__tests__/main/ipc/handlers/tabNaming.test.ts index e6d9ca3f..f5970b2b 100644 --- a/src/__tests__/main/ipc/handlers/tabNaming.test.ts +++ b/src/__tests__/main/ipc/handlers/tabNaming.test.ts @@ -9,7 +9,7 @@ import { describe, it, expect, beforeEach, vi, Mock } from 'vitest'; import { ipcMain } from 'electron'; import { registerTabNamingHandlers } from '../../../../main/ipc/handlers/tabNaming'; import type { ProcessManager } from '../../../../main/process-manager'; -import type { AgentDetector, AgentConfig } from '../../../../main/agent-detector'; +import type { AgentDetector, AgentConfig } from '../../../../main/agents'; // Mock the logger vi.mock('../../../../main/utils/logger', () => ({ diff --git a/src/__tests__/main/parsers/usage-aggregator.test.ts b/src/__tests__/main/parsers/usage-aggregator.test.ts index 7adfc71c..f3fde93a 100644 --- a/src/__tests__/main/parsers/usage-aggregator.test.ts +++ b/src/__tests__/main/parsers/usage-aggregator.test.ts @@ -2,6 +2,7 @@ * Tests for usage aggregator utilities */ +import { describe, expect, it } from 'vitest'; import { aggregateModelUsage, estimateContextUsage, @@ -96,15 +97,32 @@ describe('estimateContextUsage', () => { expect(result).toBe(10); }); - it('should cap at 100%', () => { + it('should correctly calculate for Claude with all token types', () => { + // Simulates a real Claude response: input + cacheRead + cacheCreation = total const stats = createStats({ - inputTokens: 150000, - outputTokens: 100000, + inputTokens: 2, + cacheReadInputTokens: 33541, + cacheCreationInputTokens: 11657, + outputTokens: 12, contextWindow: 200000, }); const result = estimateContextUsage(stats, 'claude-code'); - // Output tokens excluded; 150k / 200k = 75% - expect(result).toBe(75); + // (2 + 33541 + 11657) / 200000 = 45200 / 200000 = 22.6% -> 23% + expect(result).toBe(23); + }); + + it('should return null when tokens exceed context window (accumulated values)', () => { + // When Claude Code does complex multi-tool turns, token values accumulate + // across internal API calls and can exceed the context window + const stats = createStats({ + inputTokens: 21627, + cacheReadInputTokens: 1079415, + cacheCreationInputTokens: 39734, + contextWindow: 200000, + }); + const result = estimateContextUsage(stats, 'claude-code'); + // Total = 1,140,776 > 200,000 -> null (accumulated, skip update) + expect(result).toBeNull(); }); }); @@ -112,6 +130,7 @@ describe('estimateContextUsage', () => { it('should use claude-code default context window (200k)', () => { const stats = createStats({ contextWindow: 0 }); const result = estimateContextUsage(stats, 'claude-code'); + // 10000 + 0 + 0 = 10000 / 200000 = 5% expect(result).toBe(5); }); @@ -149,6 +168,18 @@ describe('estimateContextUsage', () => { const result = estimateContextUsage(stats, 'claude-code'); expect(result).toBe(0); }); + + it('should return null when accumulated tokens exceed default window', () => { + const stats = createStats({ + inputTokens: 50000, + cacheReadInputTokens: 500000, + cacheCreationInputTokens: 10000, + contextWindow: 0, + }); + const result = estimateContextUsage(stats, 'claude-code'); + // 560000 > 200000 default -> null + expect(result).toBeNull(); + }); }); }); @@ -166,47 +197,62 @@ describe('calculateContextTokens', () => { ...overrides, }); - it('should include input, cacheRead, and cacheCreation tokens for Claude agents', () => { + it('should include input + cacheRead + cacheCreation for Claude agents', () => { const stats = createStats(); const result = calculateContextTokens(stats, 'claude-code'); - // 10000 + 1000 + 2000 = 13000 (all context tokens, excludes output) + // 10000 + 2000 + 1000 = 13000 (all input token types, excludes output) expect(result).toBe(13000); }); - it('should include output tokens in addition to all context tokens for Codex agents', () => { + it('should include input + cacheCreation + output for Codex agents', () => { const stats = createStats(); const result = calculateContextTokens(stats, 'codex'); - // 10000 + 5000 + 1000 + 2000 = 18000 (includes output and all cache tokens) - expect(result).toBe(18000); + // 10000 + 1000 + 5000 = 16000 (combined input+output window) + expect(result).toBe(16000); }); it('should default to Claude behavior when agent is undefined', () => { const stats = createStats(); const result = calculateContextTokens(stats); - // 10000 + 1000 + 2000 = 13000 (includes cacheRead, defaults to Claude behavior) + // 10000 + 2000 + 1000 = 13000 (Claude default: all input token types) expect(result).toBe(13000); }); - it('should include cacheReadInputTokens in context calculation', () => { - // Cached tokens still occupy context window space - they're just cheaper to process. - // This matches ClawdBot's working implementation: input + cacheRead + cacheWrite + it('should calculate correctly for typical first Claude turn', () => { + // Real-world scenario: first message with system prompt cache + const stats = createStats({ + inputTokens: 2, + cacheReadInputTokens: 33541, + cacheCreationInputTokens: 11657, + outputTokens: 12, + }); + const result = calculateContextTokens(stats, 'claude-code'); + // 2 + 33541 + 11657 = 45200 (total context for the API call) + expect(result).toBe(45200); + }); + + it('should handle accumulated values from multi-tool turns', () => { + // When values are accumulated across internal API calls, + // the total can exceed the context window. calculateContextTokens + // returns the raw total; callers must check against contextWindow. const stats = createStats({ inputTokens: 5000, cacheCreationInputTokens: 1000, - cacheReadInputTokens: 100000, // Represents actual cached context size + cacheReadInputTokens: 500000, // Accumulated from many internal calls }); const result = calculateContextTokens(stats, 'claude-code'); - // 5000 + 1000 + 100000 = 106000 (all context tokens) - expect(result).toBe(106000); + // 5000 + 500000 + 1000 = 506000 (raw total, may exceed window) + expect(result).toBe(506000); }); }); describe('DEFAULT_CONTEXT_WINDOWS', () => { it('should have context windows defined for all known agent types', () => { expect(DEFAULT_CONTEXT_WINDOWS['claude-code']).toBe(200000); + expect(DEFAULT_CONTEXT_WINDOWS['claude']).toBe(200000); expect(DEFAULT_CONTEXT_WINDOWS['codex']).toBe(200000); expect(DEFAULT_CONTEXT_WINDOWS['opencode']).toBe(128000); - expect(DEFAULT_CONTEXT_WINDOWS['factory-droid']).toBe(200000); + expect(DEFAULT_CONTEXT_WINDOWS['aider']).toBe(128000); expect(DEFAULT_CONTEXT_WINDOWS['terminal']).toBe(0); }); }); diff --git a/src/__tests__/main/process-listeners/data-listener.test.ts b/src/__tests__/main/process-listeners/data-listener.test.ts new file mode 100644 index 00000000..71e0f861 --- /dev/null +++ b/src/__tests__/main/process-listeners/data-listener.test.ts @@ -0,0 +1,324 @@ +/** + * Tests for data listener. + * Handles process output data including group chat buffering and web broadcasting. + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { setupDataListener } from '../../../main/process-listeners/data-listener'; +import type { ProcessManager } from '../../../main/process-manager'; +import type { SafeSendFn } from '../../../main/utils/safe-send'; +import type { ProcessListenerDependencies } from '../../../main/process-listeners/types'; + +describe('Data Listener', () => { + let mockProcessManager: ProcessManager; + let mockSafeSend: SafeSendFn; + let mockGetWebServer: ProcessListenerDependencies['getWebServer']; + let mockWebServer: { broadcastToSessionClients: ReturnType }; + let mockOutputBuffer: ProcessListenerDependencies['outputBuffer']; + let mockOutputParser: ProcessListenerDependencies['outputParser']; + let mockDebugLog: ProcessListenerDependencies['debugLog']; + let mockPatterns: ProcessListenerDependencies['patterns']; + let eventHandlers: Map void>; + + beforeEach(() => { + vi.clearAllMocks(); + eventHandlers = new Map(); + + mockSafeSend = vi.fn(); + mockWebServer = { + broadcastToSessionClients: vi.fn(), + }; + mockGetWebServer = vi.fn().mockReturnValue(mockWebServer); + mockOutputBuffer = { + appendToGroupChatBuffer: vi.fn().mockReturnValue(100), + getGroupChatBufferedOutput: vi.fn().mockReturnValue('test output'), + clearGroupChatBuffer: vi.fn(), + }; + mockOutputParser = { + extractTextFromStreamJson: vi.fn().mockReturnValue('parsed response'), + parseParticipantSessionId: vi.fn().mockReturnValue(null), + }; + mockDebugLog = vi.fn(); + mockPatterns = { + REGEX_MODERATOR_SESSION: /^group-chat-(.+)-moderator-/, + REGEX_MODERATOR_SESSION_TIMESTAMP: /^group-chat-(.+)-moderator-\d+$/, + REGEX_AI_SUFFIX: /-ai-[^-]+$/, + REGEX_AI_TAB_ID: /-ai-([^-]+)$/, + REGEX_BATCH_SESSION: /-batch-\d+$/, + REGEX_SYNOPSIS_SESSION: /-synopsis-\d+$/, + }; + + mockProcessManager = { + on: vi.fn((event: string, handler: (...args: unknown[]) => void) => { + eventHandlers.set(event, handler); + }), + } as unknown as ProcessManager; + }); + + const setupListener = () => { + setupDataListener(mockProcessManager, { + safeSend: mockSafeSend, + getWebServer: mockGetWebServer, + outputBuffer: mockOutputBuffer, + outputParser: mockOutputParser, + debugLog: mockDebugLog, + patterns: mockPatterns, + }); + }; + + describe('Event Registration', () => { + it('should register the data event listener', () => { + setupListener(); + expect(mockProcessManager.on).toHaveBeenCalledWith('data', expect.any(Function)); + }); + }); + + describe('Regular Process Data', () => { + it('should forward data to renderer for non-group-chat sessions', () => { + setupListener(); + const handler = eventHandlers.get('data'); + + handler?.('regular-session-123', 'test output'); + + expect(mockSafeSend).toHaveBeenCalledWith( + 'process:data', + 'regular-session-123', + 'test output' + ); + }); + + it('should broadcast to web clients for AI sessions', () => { + setupListener(); + const handler = eventHandlers.get('data'); + + handler?.('session-123-ai-tab1', 'test output'); + + expect(mockWebServer.broadcastToSessionClients).toHaveBeenCalledWith( + 'session-123', + expect.objectContaining({ + type: 'session_output', + sessionId: 'session-123', + tabId: 'tab1', + data: 'test output', + source: 'ai', + }) + ); + }); + + it('should extract base session ID correctly', () => { + setupListener(); + const handler = eventHandlers.get('data'); + + handler?.('my-session-ai-mytab', 'test output'); + + expect(mockWebServer.broadcastToSessionClients).toHaveBeenCalledWith( + 'my-session', + expect.objectContaining({ + sessionId: 'my-session', + tabId: 'mytab', + }) + ); + }); + }); + + describe('Moderator Output Buffering', () => { + it('should buffer moderator output instead of forwarding', () => { + setupListener(); + const handler = eventHandlers.get('data'); + const sessionId = 'group-chat-test-chat-123-moderator-abc123'; + + handler?.(sessionId, 'moderator output'); + + expect(mockOutputBuffer.appendToGroupChatBuffer).toHaveBeenCalledWith( + sessionId, + 'moderator output' + ); + expect(mockSafeSend).not.toHaveBeenCalled(); + }); + + it('should extract group chat ID from moderator session', () => { + setupListener(); + const handler = eventHandlers.get('data'); + const sessionId = 'group-chat-my-chat-id-moderator-12345'; + + handler?.(sessionId, 'test'); + + expect(mockDebugLog).toHaveBeenCalledWith( + 'GroupChat:Debug', + expect.stringContaining('my-chat-id') + ); + }); + + it('should warn when buffer size exceeds limit', () => { + mockOutputBuffer.appendToGroupChatBuffer = vi.fn().mockReturnValue(15 * 1024 * 1024); // 15MB + setupListener(); + const handler = eventHandlers.get('data'); + const sessionId = 'group-chat-test-chat-123-moderator-abc123'; + + handler?.(sessionId, 'large output'); + + expect(mockDebugLog).toHaveBeenCalledWith( + 'GroupChat:Debug', + expect.stringContaining('WARNING: Buffer size') + ); + }); + }); + + describe('Participant Output Buffering', () => { + beforeEach(() => { + mockOutputParser.parseParticipantSessionId = vi.fn().mockReturnValue({ + groupChatId: 'test-chat-123', + participantName: 'TestAgent', + }); + }); + + it('should buffer participant output instead of forwarding', () => { + setupListener(); + const handler = eventHandlers.get('data'); + const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123'; + + handler?.(sessionId, 'participant output'); + + expect(mockOutputBuffer.appendToGroupChatBuffer).toHaveBeenCalledWith( + sessionId, + 'participant output' + ); + expect(mockSafeSend).not.toHaveBeenCalled(); + }); + + it('should warn when participant buffer size exceeds limit', () => { + mockOutputBuffer.appendToGroupChatBuffer = vi.fn().mockReturnValue(15 * 1024 * 1024); // 15MB + setupListener(); + const handler = eventHandlers.get('data'); + const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123'; + + handler?.(sessionId, 'large output'); + + expect(mockDebugLog).toHaveBeenCalledWith( + 'GroupChat:Debug', + expect.stringContaining('WARNING: Buffer size') + ); + }); + }); + + describe('Web Broadcast Filtering', () => { + it('should skip PTY terminal output', () => { + setupListener(); + const handler = eventHandlers.get('data'); + + handler?.('session-123-terminal', 'terminal output'); + + expect(mockWebServer.broadcastToSessionClients).not.toHaveBeenCalled(); + // But should still forward to renderer + expect(mockSafeSend).toHaveBeenCalledWith( + 'process:data', + 'session-123-terminal', + 'terminal output' + ); + }); + + it('should skip batch session output using regex pattern', () => { + setupListener(); + const handler = eventHandlers.get('data'); + + handler?.('session-123-batch-1234567890', 'batch output'); + + expect(mockWebServer.broadcastToSessionClients).not.toHaveBeenCalled(); + }); + + it('should skip synopsis session output using regex pattern', () => { + setupListener(); + const handler = eventHandlers.get('data'); + + handler?.('session-123-synopsis-1234567890', 'synopsis output'); + + expect(mockWebServer.broadcastToSessionClients).not.toHaveBeenCalled(); + }); + + it('should NOT skip sessions with "batch" in UUID (false positive prevention)', () => { + setupListener(); + const handler = eventHandlers.get('data'); + + // Session ID with "batch" in the UUID but not matching the pattern -batch-{digits} + handler?.('session-batch-uuid-ai-tab1', 'output'); + + // Should broadcast because it doesn't match the -batch-\d+$ pattern + expect(mockWebServer.broadcastToSessionClients).toHaveBeenCalled(); + }); + + it('should broadcast when no web server is available', () => { + mockGetWebServer = vi.fn().mockReturnValue(null); + setupListener(); + const handler = eventHandlers.get('data'); + + handler?.('session-123-ai-tab1', 'test output'); + + // Should still forward to renderer + expect(mockSafeSend).toHaveBeenCalledWith( + 'process:data', + 'session-123-ai-tab1', + 'test output' + ); + // But not broadcast (no web server) + expect(mockWebServer.broadcastToSessionClients).not.toHaveBeenCalled(); + }); + }); + + describe('Message ID Generation', () => { + it('should generate unique message IDs for broadcasts', () => { + setupListener(); + const handler = eventHandlers.get('data'); + + handler?.('session-123-ai-tab1', 'output 1'); + handler?.('session-123-ai-tab1', 'output 2'); + + const calls = mockWebServer.broadcastToSessionClients.mock.calls; + const msgId1 = calls[0][1].msgId; + const msgId2 = calls[1][1].msgId; + + expect(msgId1).toBeDefined(); + expect(msgId2).toBeDefined(); + expect(msgId1).not.toBe(msgId2); + }); + + it('should include timestamp in message ID', () => { + const beforeTime = Date.now(); + setupListener(); + const handler = eventHandlers.get('data'); + + handler?.('session-123-ai-tab1', 'test output'); + + const msgId = mockWebServer.broadcastToSessionClients.mock.calls[0][1].msgId; + const timestamp = parseInt(msgId.split('-')[0], 10); + + expect(timestamp).toBeGreaterThanOrEqual(beforeTime); + expect(timestamp).toBeLessThanOrEqual(Date.now()); + }); + }); + + describe('Source Detection', () => { + it('should identify AI source from session ID', () => { + setupListener(); + const handler = eventHandlers.get('data'); + + handler?.('session-123-ai-tab1', 'ai output'); + + expect(mockWebServer.broadcastToSessionClients).toHaveBeenCalledWith( + expect.anything(), + expect.objectContaining({ source: 'ai' }) + ); + }); + + it('should identify terminal source for non-AI sessions', () => { + setupListener(); + const handler = eventHandlers.get('data'); + + handler?.('session-123', 'terminal output'); + + expect(mockWebServer.broadcastToSessionClients).toHaveBeenCalledWith( + expect.anything(), + expect.objectContaining({ source: 'terminal' }) + ); + }); + }); +}); diff --git a/src/__tests__/main/process-listeners/error-listener.test.ts b/src/__tests__/main/process-listeners/error-listener.test.ts new file mode 100644 index 00000000..80d89512 --- /dev/null +++ b/src/__tests__/main/process-listeners/error-listener.test.ts @@ -0,0 +1,118 @@ +/** + * Tests for error listener. + * Handles agent errors (auth expired, token exhaustion, rate limits, etc.). + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { setupErrorListener } from '../../../main/process-listeners/error-listener'; +import type { ProcessManager } from '../../../main/process-manager'; +import type { SafeSendFn } from '../../../main/utils/safe-send'; +import type { AgentError } from '../../../shared/types'; +import type { ProcessListenerDependencies } from '../../../main/process-listeners/types'; + +describe('Error Listener', () => { + let mockProcessManager: ProcessManager; + let mockSafeSend: SafeSendFn; + let mockLogger: ProcessListenerDependencies['logger']; + let eventHandlers: Map void>; + + beforeEach(() => { + vi.clearAllMocks(); + eventHandlers = new Map(); + + mockSafeSend = vi.fn(); + mockLogger = { + info: vi.fn(), + error: vi.fn(), + warn: vi.fn(), + debug: vi.fn(), + }; + + mockProcessManager = { + on: vi.fn((event: string, handler: (...args: unknown[]) => void) => { + eventHandlers.set(event, handler); + }), + } as unknown as ProcessManager; + }); + + it('should register the agent-error event listener', () => { + setupErrorListener(mockProcessManager, { safeSend: mockSafeSend, logger: mockLogger }); + + expect(mockProcessManager.on).toHaveBeenCalledWith('agent-error', expect.any(Function)); + }); + + it('should log agent error and forward to renderer', () => { + setupErrorListener(mockProcessManager, { safeSend: mockSafeSend, logger: mockLogger }); + + const handler = eventHandlers.get('agent-error'); + const testSessionId = 'test-session-123'; + const testAgentError: AgentError = { + type: 'auth_expired', + agentId: 'claude-code', + message: 'Authentication token has expired', + recoverable: true, + timestamp: Date.now(), + }; + + handler?.(testSessionId, testAgentError); + + expect(mockLogger.info).toHaveBeenCalledWith( + 'Agent error detected: auth_expired', + 'AgentError', + expect.objectContaining({ + sessionId: testSessionId, + agentId: 'claude-code', + errorType: 'auth_expired', + message: 'Authentication token has expired', + recoverable: true, + }) + ); + + expect(mockSafeSend).toHaveBeenCalledWith('agent:error', testSessionId, testAgentError); + }); + + it('should handle token exhaustion errors', () => { + setupErrorListener(mockProcessManager, { safeSend: mockSafeSend, logger: mockLogger }); + + const handler = eventHandlers.get('agent-error'); + const testSessionId = 'session-456'; + const testAgentError: AgentError = { + type: 'token_exhaustion', + agentId: 'codex', + message: 'Token limit exceeded', + recoverable: false, + timestamp: Date.now(), + }; + + handler?.(testSessionId, testAgentError); + + expect(mockSafeSend).toHaveBeenCalledWith('agent:error', testSessionId, testAgentError); + }); + + it('should handle rate limit errors', () => { + setupErrorListener(mockProcessManager, { safeSend: mockSafeSend, logger: mockLogger }); + + const handler = eventHandlers.get('agent-error'); + const testSessionId = 'session-789'; + const testAgentError: AgentError = { + type: 'rate_limited', + agentId: 'opencode', + message: 'Rate limit exceeded, retry after 60 seconds', + recoverable: true, + timestamp: Date.now(), + }; + + handler?.(testSessionId, testAgentError); + + expect(mockLogger.info).toHaveBeenCalledWith( + 'Agent error detected: rate_limited', + 'AgentError', + expect.objectContaining({ + sessionId: testSessionId, + errorType: 'rate_limited', + }) + ); + + expect(mockSafeSend).toHaveBeenCalledWith('agent:error', testSessionId, testAgentError); + }); +}); diff --git a/src/__tests__/main/process-listeners/exit-listener.test.ts b/src/__tests__/main/process-listeners/exit-listener.test.ts new file mode 100644 index 00000000..1f96f193 --- /dev/null +++ b/src/__tests__/main/process-listeners/exit-listener.test.ts @@ -0,0 +1,420 @@ +/** + * Tests for exit listener. + * Handles process exit events including group chat moderator/participant exits. + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { setupExitListener } from '../../../main/process-listeners/exit-listener'; +import type { ProcessManager } from '../../../main/process-manager'; +import type { ProcessListenerDependencies } from '../../../main/process-listeners/types'; + +describe('Exit Listener', () => { + let mockProcessManager: ProcessManager; + let mockDeps: Parameters[1]; + let eventHandlers: Map void>; + + // Create a minimal mock group chat + const createMockGroupChat = () => ({ + id: 'test-chat-123', + name: 'Test Chat', + moderatorAgentId: 'claude-code', + moderatorSessionId: 'group-chat-test-chat-123-moderator', + participants: [ + { + name: 'TestAgent', + agentId: 'claude-code', + sessionId: 'group-chat-test-chat-123-participant-TestAgent-abc123', + addedAt: Date.now(), + }, + ], + createdAt: Date.now(), + updatedAt: Date.now(), + logPath: '/tmp/test-chat.log', + imagesDir: '/tmp/test-chat-images', + }); + + beforeEach(() => { + vi.clearAllMocks(); + eventHandlers = new Map(); + + mockProcessManager = { + on: vi.fn((event: string, handler: (...args: unknown[]) => void) => { + eventHandlers.set(event, handler); + }), + } as unknown as ProcessManager; + + mockDeps = { + safeSend: vi.fn(), + powerManager: { + addBlockReason: vi.fn(), + removeBlockReason: vi.fn(), + }, + groupChatEmitters: { + emitStateChange: vi.fn(), + emitParticipantState: vi.fn(), + emitParticipantsChanged: vi.fn(), + emitModeratorUsage: vi.fn(), + }, + groupChatRouter: { + routeModeratorResponse: vi.fn().mockResolvedValue(undefined), + routeAgentResponse: vi.fn().mockResolvedValue(undefined), + markParticipantResponded: vi.fn().mockResolvedValue(undefined), + spawnModeratorSynthesis: vi.fn().mockResolvedValue(undefined), + getGroupChatReadOnlyState: vi.fn().mockReturnValue(false), + respawnParticipantWithRecovery: vi.fn().mockResolvedValue(undefined), + }, + groupChatStorage: { + loadGroupChat: vi.fn().mockResolvedValue(createMockGroupChat()), + updateGroupChat: vi.fn().mockResolvedValue(createMockGroupChat()), + updateParticipant: vi.fn().mockResolvedValue(createMockGroupChat()), + }, + sessionRecovery: { + needsSessionRecovery: vi.fn().mockReturnValue(false), + initiateSessionRecovery: vi.fn().mockResolvedValue(true), + }, + outputBuffer: { + appendToGroupChatBuffer: vi.fn().mockReturnValue(100), + getGroupChatBufferedOutput: vi.fn().mockReturnValue('{"type":"text","text":"test output"}'), + clearGroupChatBuffer: vi.fn(), + }, + outputParser: { + extractTextFromStreamJson: vi.fn().mockReturnValue('parsed response'), + parseParticipantSessionId: vi.fn().mockReturnValue(null), + }, + getProcessManager: () => mockProcessManager, + getAgentDetector: () => + ({ + detectAgents: vi.fn(), + }) as unknown as ReturnType, + getWebServer: () => null, + logger: { + info: vi.fn(), + error: vi.fn(), + warn: vi.fn(), + debug: vi.fn(), + }, + debugLog: vi.fn(), + patterns: { + REGEX_MODERATOR_SESSION: /^group-chat-(.+)-moderator-/, + REGEX_MODERATOR_SESSION_TIMESTAMP: /^group-chat-(.+)-moderator-\d+$/, + REGEX_AI_SUFFIX: /-ai-[^-]+$/, + REGEX_AI_TAB_ID: /-ai-([^-]+)$/, + REGEX_BATCH_SESSION: /-batch-\d+$/, + REGEX_SYNOPSIS_SESSION: /-synopsis-\d+$/, + }, + }; + }); + + const setupListener = () => { + setupExitListener(mockProcessManager, mockDeps); + }; + + describe('Event Registration', () => { + it('should register the exit event listener', () => { + setupListener(); + expect(mockProcessManager.on).toHaveBeenCalledWith('exit', expect.any(Function)); + }); + }); + + describe('Regular Process Exit', () => { + it('should forward exit event to renderer for non-group-chat sessions', () => { + setupListener(); + const handler = eventHandlers.get('exit'); + + handler?.('regular-session-123', 0); + + expect(mockDeps.safeSend).toHaveBeenCalledWith('process:exit', 'regular-session-123', 0); + }); + + it('should remove power block for non-group-chat sessions', () => { + setupListener(); + const handler = eventHandlers.get('exit'); + + handler?.('regular-session-123', 0); + + expect(mockDeps.powerManager.removeBlockReason).toHaveBeenCalledWith( + 'session:regular-session-123' + ); + }); + }); + + describe('Participant Exit', () => { + beforeEach(() => { + mockDeps.outputParser.parseParticipantSessionId = vi.fn().mockReturnValue({ + groupChatId: 'test-chat-123', + participantName: 'TestAgent', + }); + }); + + it('should parse and route participant response on exit', async () => { + setupListener(); + const handler = eventHandlers.get('exit'); + const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123'; + + handler?.(sessionId, 0); + + await vi.waitFor(() => { + expect(mockDeps.groupChatRouter.routeAgentResponse).toHaveBeenCalledWith( + 'test-chat-123', + 'TestAgent', + 'parsed response', + expect.anything() + ); + }); + }); + + it('should mark participant as responded after successful routing', async () => { + setupListener(); + const handler = eventHandlers.get('exit'); + const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123'; + + handler?.(sessionId, 0); + + await vi.waitFor(() => { + expect(mockDeps.groupChatRouter.markParticipantResponded).toHaveBeenCalledWith( + 'test-chat-123', + 'TestAgent' + ); + }); + }); + + it('should clear output buffer after processing', async () => { + setupListener(); + const handler = eventHandlers.get('exit'); + const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123'; + + handler?.(sessionId, 0); + + await vi.waitFor(() => { + expect(mockDeps.outputBuffer.clearGroupChatBuffer).toHaveBeenCalledWith(sessionId); + }); + }); + + it('should not route when buffered output is empty', async () => { + mockDeps.outputBuffer.getGroupChatBufferedOutput = vi.fn().mockReturnValue(''); + setupListener(); + const handler = eventHandlers.get('exit'); + const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123'; + + handler?.(sessionId, 0); + + // Give async operations time to complete + await new Promise((resolve) => setTimeout(resolve, 50)); + + expect(mockDeps.groupChatRouter.routeAgentResponse).not.toHaveBeenCalled(); + }); + + it('should not route when parsed text is empty', async () => { + mockDeps.outputParser.extractTextFromStreamJson = vi.fn().mockReturnValue(' '); + setupListener(); + const handler = eventHandlers.get('exit'); + const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123'; + + handler?.(sessionId, 0); + + // Give async operations time to complete + await new Promise((resolve) => setTimeout(resolve, 50)); + + expect(mockDeps.groupChatRouter.routeAgentResponse).not.toHaveBeenCalled(); + }); + }); + + describe('Session Recovery', () => { + beforeEach(() => { + mockDeps.outputParser.parseParticipantSessionId = vi.fn().mockReturnValue({ + groupChatId: 'test-chat-123', + participantName: 'TestAgent', + }); + mockDeps.sessionRecovery.needsSessionRecovery = vi.fn().mockReturnValue(true); + }); + + it('should initiate session recovery when needed', async () => { + setupListener(); + const handler = eventHandlers.get('exit'); + const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123'; + + handler?.(sessionId, 0); + + await vi.waitFor(() => { + expect(mockDeps.sessionRecovery.initiateSessionRecovery).toHaveBeenCalledWith( + 'test-chat-123', + 'TestAgent' + ); + }); + }); + + it('should respawn participant after recovery initiation', async () => { + setupListener(); + const handler = eventHandlers.get('exit'); + const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123'; + + handler?.(sessionId, 0); + + await vi.waitFor(() => { + expect(mockDeps.groupChatRouter.respawnParticipantWithRecovery).toHaveBeenCalledWith( + 'test-chat-123', + 'TestAgent', + expect.anything(), + expect.anything() + ); + }); + }); + + it('should clear buffer before initiating recovery', async () => { + setupListener(); + const handler = eventHandlers.get('exit'); + const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123'; + + handler?.(sessionId, 0); + + await vi.waitFor(() => { + expect(mockDeps.outputBuffer.clearGroupChatBuffer).toHaveBeenCalledWith(sessionId); + }); + }); + + it('should not mark participant as responded when recovery succeeds', async () => { + setupListener(); + const handler = eventHandlers.get('exit'); + const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123'; + + handler?.(sessionId, 0); + + // Wait for async operations + await new Promise((resolve) => setTimeout(resolve, 50)); + + // When recovery succeeds, markParticipantResponded should NOT be called + // because the recovery spawn will handle that + expect(mockDeps.groupChatRouter.markParticipantResponded).not.toHaveBeenCalled(); + }); + + it('should mark participant as responded when recovery fails', async () => { + mockDeps.groupChatRouter.respawnParticipantWithRecovery = vi + .fn() + .mockRejectedValue(new Error('Recovery failed')); + setupListener(); + const handler = eventHandlers.get('exit'); + const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123'; + + handler?.(sessionId, 0); + + await vi.waitFor(() => { + expect(mockDeps.groupChatRouter.markParticipantResponded).toHaveBeenCalledWith( + 'test-chat-123', + 'TestAgent' + ); + }); + }); + }); + + describe('Moderator Exit', () => { + it('should route moderator response on exit', async () => { + setupListener(); + const handler = eventHandlers.get('exit'); + const sessionId = 'group-chat-test-chat-123-moderator-1234567890'; + + handler?.(sessionId, 0); + + await vi.waitFor(() => { + expect(mockDeps.groupChatRouter.routeModeratorResponse).toHaveBeenCalledWith( + 'test-chat-123', + 'parsed response', + expect.anything(), + expect.anything(), + false + ); + }); + }); + + it('should clear moderator buffer after processing', async () => { + setupListener(); + const handler = eventHandlers.get('exit'); + const sessionId = 'group-chat-test-chat-123-moderator-1234567890'; + + handler?.(sessionId, 0); + + await vi.waitFor(() => { + expect(mockDeps.outputBuffer.clearGroupChatBuffer).toHaveBeenCalledWith(sessionId); + }); + }); + + it('should handle synthesis sessions correctly', async () => { + setupListener(); + const handler = eventHandlers.get('exit'); + const sessionId = 'group-chat-test-chat-123-moderator-synthesis-1234567890'; + + handler?.(sessionId, 0); + + await vi.waitFor(() => { + expect(mockDeps.groupChatRouter.routeModeratorResponse).toHaveBeenCalled(); + }); + }); + }); + + describe('Error Handling', () => { + beforeEach(() => { + mockDeps.outputParser.parseParticipantSessionId = vi.fn().mockReturnValue({ + groupChatId: 'test-chat-123', + participantName: 'TestAgent', + }); + }); + + it('should log error when routing fails', async () => { + mockDeps.groupChatRouter.routeAgentResponse = vi + .fn() + .mockRejectedValue(new Error('Route failed')); + setupListener(); + const handler = eventHandlers.get('exit'); + const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123'; + + handler?.(sessionId, 0); + + await vi.waitFor(() => { + expect(mockDeps.logger.error).toHaveBeenCalled(); + }); + }); + + it('should attempt fallback parsing when primary parsing fails', async () => { + // First call throws, second call (fallback) succeeds + mockDeps.outputParser.extractTextFromStreamJson = vi + .fn() + .mockImplementationOnce(() => { + throw new Error('Parse error'); + }) + .mockReturnValueOnce('fallback parsed response'); + + setupListener(); + const handler = eventHandlers.get('exit'); + const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123'; + + handler?.(sessionId, 0); + + await vi.waitFor(() => { + // Should have been called twice: once with agentType, once without (fallback) + expect(mockDeps.outputParser.extractTextFromStreamJson).toHaveBeenCalledTimes(2); + }); + }); + + it('should still mark participant as responded after routing error', async () => { + mockDeps.groupChatRouter.routeAgentResponse = vi + .fn() + .mockRejectedValue(new Error('Route failed')); + mockDeps.outputParser.extractTextFromStreamJson = vi + .fn() + .mockReturnValueOnce('parsed response') + .mockReturnValueOnce('fallback response'); + + setupListener(); + const handler = eventHandlers.get('exit'); + const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123'; + + handler?.(sessionId, 0); + + await vi.waitFor(() => { + expect(mockDeps.groupChatRouter.markParticipantResponded).toHaveBeenCalledWith( + 'test-chat-123', + 'TestAgent' + ); + }); + }); + }); +}); diff --git a/src/__tests__/main/process-listeners/forwarding-listeners.test.ts b/src/__tests__/main/process-listeners/forwarding-listeners.test.ts new file mode 100644 index 00000000..2975dac1 --- /dev/null +++ b/src/__tests__/main/process-listeners/forwarding-listeners.test.ts @@ -0,0 +1,106 @@ +/** + * Tests for forwarding listeners. + * These listeners simply forward process events to the renderer via IPC. + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { setupForwardingListeners } from '../../../main/process-listeners/forwarding-listeners'; +import type { ProcessManager } from '../../../main/process-manager'; +import type { SafeSendFn } from '../../../main/utils/safe-send'; + +describe('Forwarding Listeners', () => { + let mockProcessManager: ProcessManager; + let mockSafeSend: SafeSendFn; + let eventHandlers: Map void>; + + beforeEach(() => { + vi.clearAllMocks(); + eventHandlers = new Map(); + + mockSafeSend = vi.fn(); + + mockProcessManager = { + on: vi.fn((event: string, handler: (...args: unknown[]) => void) => { + eventHandlers.set(event, handler); + }), + } as unknown as ProcessManager; + }); + + it('should register all forwarding event listeners', () => { + setupForwardingListeners(mockProcessManager, { safeSend: mockSafeSend }); + + expect(mockProcessManager.on).toHaveBeenCalledWith('slash-commands', expect.any(Function)); + expect(mockProcessManager.on).toHaveBeenCalledWith('thinking-chunk', expect.any(Function)); + expect(mockProcessManager.on).toHaveBeenCalledWith('tool-execution', expect.any(Function)); + expect(mockProcessManager.on).toHaveBeenCalledWith('stderr', expect.any(Function)); + expect(mockProcessManager.on).toHaveBeenCalledWith('command-exit', expect.any(Function)); + }); + + it('should forward slash-commands events to renderer', () => { + setupForwardingListeners(mockProcessManager, { safeSend: mockSafeSend }); + + const handler = eventHandlers.get('slash-commands'); + const testSessionId = 'test-session-123'; + const testCommands = ['/help', '/clear']; + + handler?.(testSessionId, testCommands); + + expect(mockSafeSend).toHaveBeenCalledWith( + 'process:slash-commands', + testSessionId, + testCommands + ); + }); + + it('should forward thinking-chunk events to renderer', () => { + setupForwardingListeners(mockProcessManager, { safeSend: mockSafeSend }); + + const handler = eventHandlers.get('thinking-chunk'); + const testSessionId = 'test-session-123'; + const testChunk = { content: 'thinking...' }; + + handler?.(testSessionId, testChunk); + + expect(mockSafeSend).toHaveBeenCalledWith('process:thinking-chunk', testSessionId, testChunk); + }); + + it('should forward tool-execution events to renderer', () => { + setupForwardingListeners(mockProcessManager, { safeSend: mockSafeSend }); + + const handler = eventHandlers.get('tool-execution'); + const testSessionId = 'test-session-123'; + const testToolExecution = { tool: 'read_file', status: 'completed' }; + + handler?.(testSessionId, testToolExecution); + + expect(mockSafeSend).toHaveBeenCalledWith( + 'process:tool-execution', + testSessionId, + testToolExecution + ); + }); + + it('should forward stderr events to renderer', () => { + setupForwardingListeners(mockProcessManager, { safeSend: mockSafeSend }); + + const handler = eventHandlers.get('stderr'); + const testSessionId = 'test-session-123'; + const testStderr = 'Error: something went wrong'; + + handler?.(testSessionId, testStderr); + + expect(mockSafeSend).toHaveBeenCalledWith('process:stderr', testSessionId, testStderr); + }); + + it('should forward command-exit events to renderer', () => { + setupForwardingListeners(mockProcessManager, { safeSend: mockSafeSend }); + + const handler = eventHandlers.get('command-exit'); + const testSessionId = 'test-session-123'; + const testExitCode = 0; + + handler?.(testSessionId, testExitCode); + + expect(mockSafeSend).toHaveBeenCalledWith('process:command-exit', testSessionId, testExitCode); + }); +}); diff --git a/src/__tests__/main/process-listeners/session-id-listener.test.ts b/src/__tests__/main/process-listeners/session-id-listener.test.ts new file mode 100644 index 00000000..c166ac9d --- /dev/null +++ b/src/__tests__/main/process-listeners/session-id-listener.test.ts @@ -0,0 +1,402 @@ +/** + * Tests for session ID listener. + * Handles agent session ID storage for conversation resume. + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { setupSessionIdListener } from '../../../main/process-listeners/session-id-listener'; +import type { ProcessManager } from '../../../main/process-manager'; + +describe('Session ID Listener', () => { + let mockProcessManager: ProcessManager; + let mockDeps: Parameters[1]; + let eventHandlers: Map void>; + + // Create a minimal mock group chat + const createMockGroupChat = () => ({ + id: 'test-chat-123', + name: 'Test Chat', + moderatorAgentId: 'claude-code', + moderatorSessionId: 'group-chat-test-chat-123-moderator', + participants: [ + { + name: 'TestAgent', + agentId: 'claude-code', + sessionId: 'group-chat-test-chat-123-participant-TestAgent-abc123', + addedAt: Date.now(), + }, + ], + createdAt: Date.now(), + updatedAt: Date.now(), + logPath: '/tmp/test-chat.log', + imagesDir: '/tmp/test-chat-images', + }); + + beforeEach(() => { + vi.clearAllMocks(); + eventHandlers = new Map(); + + mockProcessManager = { + on: vi.fn((event: string, handler: (...args: unknown[]) => void) => { + eventHandlers.set(event, handler); + }), + } as unknown as ProcessManager; + + mockDeps = { + safeSend: vi.fn(), + logger: { + info: vi.fn(), + error: vi.fn(), + warn: vi.fn(), + debug: vi.fn(), + }, + groupChatEmitters: { + emitParticipantsChanged: vi.fn(), + emitModeratorSessionIdChanged: vi.fn(), + }, + groupChatStorage: { + loadGroupChat: vi.fn().mockResolvedValue(createMockGroupChat()), + updateGroupChat: vi.fn().mockResolvedValue(createMockGroupChat()), + updateParticipant: vi.fn().mockResolvedValue(createMockGroupChat()), + }, + outputParser: { + extractTextFromStreamJson: vi.fn().mockReturnValue('parsed response'), + parseParticipantSessionId: vi.fn().mockReturnValue(null), + }, + patterns: { + REGEX_MODERATOR_SESSION: /^group-chat-(.+)-moderator-/, + REGEX_MODERATOR_SESSION_TIMESTAMP: /^group-chat-(.+)-moderator-\d+$/, + REGEX_AI_SUFFIX: /-ai-[^-]+$/, + REGEX_AI_TAB_ID: /-ai-([^-]+)$/, + REGEX_BATCH_SESSION: /-batch-\d+$/, + REGEX_SYNOPSIS_SESSION: /-synopsis-\d+$/, + }, + }; + }); + + const setupListener = () => { + setupSessionIdListener(mockProcessManager, mockDeps); + }; + + describe('Event Registration', () => { + it('should register the session-id event listener', () => { + setupListener(); + expect(mockProcessManager.on).toHaveBeenCalledWith('session-id', expect.any(Function)); + }); + }); + + describe('Regular Process Session ID', () => { + it('should forward session ID to renderer', () => { + setupListener(); + const handler = eventHandlers.get('session-id'); + + handler?.('regular-session-123', 'agent-session-abc'); + + expect(mockDeps.safeSend).toHaveBeenCalledWith( + 'process:session-id', + 'regular-session-123', + 'agent-session-abc' + ); + }); + }); + + describe('Participant Session ID Storage', () => { + beforeEach(() => { + mockDeps.outputParser.parseParticipantSessionId = vi.fn().mockReturnValue({ + groupChatId: 'test-chat-123', + participantName: 'TestAgent', + }); + }); + + it('should store agent session ID for participant', async () => { + setupListener(); + const handler = eventHandlers.get('session-id'); + + handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', 'agent-session-xyz'); + + await vi.waitFor(() => { + expect(mockDeps.groupChatStorage.updateParticipant).toHaveBeenCalledWith( + 'test-chat-123', + 'TestAgent', + { agentSessionId: 'agent-session-xyz' } + ); + }); + }); + + it('should emit participants changed after storage', async () => { + setupListener(); + const handler = eventHandlers.get('session-id'); + + handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', 'agent-session-xyz'); + + await vi.waitFor(() => { + expect(mockDeps.groupChatEmitters.emitParticipantsChanged).toHaveBeenCalledWith( + 'test-chat-123', + expect.any(Array) + ); + }); + }); + + it('should use updateParticipant return value instead of loading chat again (DB caching)', async () => { + setupListener(); + const handler = eventHandlers.get('session-id'); + + handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', 'agent-session-xyz'); + + await vi.waitFor(() => { + expect(mockDeps.groupChatEmitters.emitParticipantsChanged).toHaveBeenCalled(); + }); + + // Verify we didn't make a redundant loadGroupChat call + // The code should use the return value from updateParticipant directly + expect(mockDeps.groupChatStorage.loadGroupChat).not.toHaveBeenCalled(); + }); + + it('should pass exact participants from updateParticipant return value', async () => { + const specificParticipants = [ + { name: 'Agent1', agentId: 'claude-code', sessionId: 'session-1', addedAt: 1000 }, + { name: 'Agent2', agentId: 'codex', sessionId: 'session-2', addedAt: 2000 }, + ]; + mockDeps.groupChatStorage.updateParticipant = vi.fn().mockResolvedValue({ + ...createMockGroupChat(), + participants: specificParticipants, + }); + setupListener(); + const handler = eventHandlers.get('session-id'); + + handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', 'agent-session-xyz'); + + await vi.waitFor(() => { + expect(mockDeps.groupChatEmitters.emitParticipantsChanged).toHaveBeenCalledWith( + 'test-chat-123', + specificParticipants + ); + }); + }); + + it('should handle empty participants array from updateParticipant', async () => { + mockDeps.groupChatStorage.updateParticipant = vi.fn().mockResolvedValue({ + ...createMockGroupChat(), + participants: [], + }); + setupListener(); + const handler = eventHandlers.get('session-id'); + + handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', 'agent-session-xyz'); + + await vi.waitFor(() => { + expect(mockDeps.groupChatEmitters.emitParticipantsChanged).toHaveBeenCalledWith( + 'test-chat-123', + [] + ); + }); + }); + + it('should handle undefined emitParticipantsChanged gracefully (optional chaining)', async () => { + mockDeps.groupChatEmitters.emitParticipantsChanged = undefined; + setupListener(); + const handler = eventHandlers.get('session-id'); + + // Should not throw + handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', 'agent-session-xyz'); + + await vi.waitFor(() => { + expect(mockDeps.groupChatStorage.updateParticipant).toHaveBeenCalled(); + }); + // No error should be logged for the optional emitter + expect(mockDeps.logger.error).not.toHaveBeenCalled(); + }); + + it('should log error when storage fails', async () => { + mockDeps.groupChatStorage.updateParticipant = vi + .fn() + .mockRejectedValue(new Error('DB error')); + setupListener(); + const handler = eventHandlers.get('session-id'); + + handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', 'agent-session-xyz'); + + await vi.waitFor(() => { + expect(mockDeps.logger.error).toHaveBeenCalledWith( + '[GroupChat] Failed to update participant agentSessionId', + 'ProcessListener', + expect.objectContaining({ + error: 'Error: DB error', + participant: 'TestAgent', + }) + ); + }); + }); + + it('should still forward to renderer after storage', () => { + setupListener(); + const handler = eventHandlers.get('session-id'); + + handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', 'agent-session-xyz'); + + expect(mockDeps.safeSend).toHaveBeenCalledWith( + 'process:session-id', + 'group-chat-test-chat-123-participant-TestAgent-abc123', + 'agent-session-xyz' + ); + }); + }); + + describe('Moderator Session ID Storage', () => { + it('should store agent session ID for moderator', async () => { + setupListener(); + const handler = eventHandlers.get('session-id'); + + handler?.('group-chat-test-chat-123-moderator-1234567890', 'moderator-session-xyz'); + + await vi.waitFor(() => { + expect(mockDeps.groupChatStorage.updateGroupChat).toHaveBeenCalledWith('test-chat-123', { + moderatorAgentSessionId: 'moderator-session-xyz', + }); + }); + }); + + it('should emit moderator session ID changed after storage', async () => { + setupListener(); + const handler = eventHandlers.get('session-id'); + + handler?.('group-chat-test-chat-123-moderator-1234567890', 'moderator-session-xyz'); + + await vi.waitFor(() => { + expect(mockDeps.groupChatEmitters.emitModeratorSessionIdChanged).toHaveBeenCalledWith( + 'test-chat-123', + 'moderator-session-xyz' + ); + }); + }); + + it('should log error when moderator storage fails', async () => { + mockDeps.groupChatStorage.updateGroupChat = vi.fn().mockRejectedValue(new Error('DB error')); + setupListener(); + const handler = eventHandlers.get('session-id'); + + handler?.('group-chat-test-chat-123-moderator-1234567890', 'moderator-session-xyz'); + + await vi.waitFor(() => { + expect(mockDeps.logger.error).toHaveBeenCalledWith( + '[GroupChat] Failed to update moderator agent session ID', + 'ProcessListener', + expect.objectContaining({ + error: 'Error: DB error', + groupChatId: 'test-chat-123', + }) + ); + }); + }); + + it('should still forward to renderer for moderator sessions', () => { + setupListener(); + const handler = eventHandlers.get('session-id'); + + handler?.('group-chat-test-chat-123-moderator-1234567890', 'moderator-session-xyz'); + + expect(mockDeps.safeSend).toHaveBeenCalledWith( + 'process:session-id', + 'group-chat-test-chat-123-moderator-1234567890', + 'moderator-session-xyz' + ); + }); + + it('should NOT store for synthesis moderator sessions (different pattern)', () => { + setupListener(); + const handler = eventHandlers.get('session-id'); + + // Synthesis session ID doesn't match REGEX_MODERATOR_SESSION_TIMESTAMP + // because it has 'synthesis' in it: group-chat-xxx-moderator-synthesis-timestamp + handler?.('group-chat-test-chat-123-moderator-synthesis-1234567890', 'synthesis-session-xyz'); + + // Should NOT call updateGroupChat for synthesis sessions (doesn't match timestamp pattern) + expect(mockDeps.groupChatStorage.updateGroupChat).not.toHaveBeenCalled(); + }); + }); + + describe('Session ID Format Handling', () => { + it('should handle empty agent session ID', () => { + setupListener(); + const handler = eventHandlers.get('session-id'); + + handler?.('regular-session-123', ''); + + expect(mockDeps.safeSend).toHaveBeenCalledWith( + 'process:session-id', + 'regular-session-123', + '' + ); + }); + + it('should handle UUID format session IDs', () => { + setupListener(); + const handler = eventHandlers.get('session-id'); + + handler?.('regular-session-123', 'a1b2c3d4-e5f6-7890-abcd-ef1234567890'); + + expect(mockDeps.safeSend).toHaveBeenCalledWith( + 'process:session-id', + 'regular-session-123', + 'a1b2c3d4-e5f6-7890-abcd-ef1234567890' + ); + }); + + it('should handle long session IDs', () => { + setupListener(); + const handler = eventHandlers.get('session-id'); + const longSessionId = 'a'.repeat(500); + + handler?.('regular-session-123', longSessionId); + + expect(mockDeps.safeSend).toHaveBeenCalledWith( + 'process:session-id', + 'regular-session-123', + longSessionId + ); + }); + }); + + describe('Performance Optimization', () => { + it('should skip participant parsing for non-group-chat sessions (prefix check)', () => { + setupListener(); + const handler = eventHandlers.get('session-id'); + + // Regular session ID doesn't start with 'group-chat-' + handler?.('regular-session-123', 'agent-session-abc'); + + // parseParticipantSessionId should NOT be called for non-group-chat sessions + expect(mockDeps.outputParser.parseParticipantSessionId).not.toHaveBeenCalled(); + }); + + it('should only parse participant session ID for group-chat sessions', () => { + mockDeps.outputParser.parseParticipantSessionId = vi.fn().mockReturnValue(null); + setupListener(); + const handler = eventHandlers.get('session-id'); + + // Group chat session ID starts with 'group-chat-' + handler?.('group-chat-test-123-participant-Agent-abc', 'agent-session-xyz'); + + // parseParticipantSessionId SHOULD be called for group-chat sessions + expect(mockDeps.outputParser.parseParticipantSessionId).toHaveBeenCalledWith( + 'group-chat-test-123-participant-Agent-abc' + ); + }); + + it('should skip moderator regex for non-group-chat sessions', () => { + setupListener(); + const handler = eventHandlers.get('session-id'); + + // Process many non-group-chat sessions - should be fast since regex is skipped + for (let i = 0; i < 100; i++) { + handler?.(`regular-session-${i}`, `agent-session-${i}`); + } + + // Neither storage method should be called for regular sessions + expect(mockDeps.groupChatStorage.updateParticipant).not.toHaveBeenCalled(); + expect(mockDeps.groupChatStorage.updateGroupChat).not.toHaveBeenCalled(); + // But all should still forward to renderer + expect(mockDeps.safeSend).toHaveBeenCalledTimes(100); + }); + }); +}); diff --git a/src/__tests__/main/process-listeners/stats-listener.test.ts b/src/__tests__/main/process-listeners/stats-listener.test.ts new file mode 100644 index 00000000..fb4c67d2 --- /dev/null +++ b/src/__tests__/main/process-listeners/stats-listener.test.ts @@ -0,0 +1,239 @@ +/** + * Tests for stats listener. + * Handles query-complete events for usage statistics tracking. + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { setupStatsListener } from '../../../main/process-listeners/stats-listener'; +import type { ProcessManager } from '../../../main/process-manager'; +import type { SafeSendFn } from '../../../main/utils/safe-send'; +import type { QueryCompleteData } from '../../../main/process-manager/types'; +import type { StatsDB } from '../../../main/stats'; +import type { ProcessListenerDependencies } from '../../../main/process-listeners/types'; + +describe('Stats Listener', () => { + let mockProcessManager: ProcessManager; + let mockSafeSend: SafeSendFn; + let mockStatsDB: StatsDB; + let mockLogger: ProcessListenerDependencies['logger']; + let eventHandlers: Map void>; + + beforeEach(() => { + vi.clearAllMocks(); + eventHandlers = new Map(); + + mockSafeSend = vi.fn(); + mockLogger = { + info: vi.fn(), + error: vi.fn(), + warn: vi.fn(), + debug: vi.fn(), + }; + + mockStatsDB = { + isReady: vi.fn(() => true), + insertQueryEvent: vi.fn(() => 'event-id-123'), + } as unknown as StatsDB; + + mockProcessManager = { + on: vi.fn((event: string, handler: (...args: unknown[]) => void) => { + eventHandlers.set(event, handler); + }), + } as unknown as ProcessManager; + }); + + it('should register the query-complete event listener', () => { + setupStatsListener(mockProcessManager, { + safeSend: mockSafeSend, + getStatsDB: () => mockStatsDB, + logger: mockLogger, + }); + + expect(mockProcessManager.on).toHaveBeenCalledWith('query-complete', expect.any(Function)); + }); + + it('should record query event to stats database when ready', async () => { + setupStatsListener(mockProcessManager, { + safeSend: mockSafeSend, + getStatsDB: () => mockStatsDB, + logger: mockLogger, + }); + + const handler = eventHandlers.get('query-complete'); + const testSessionId = 'test-session-123'; + const testQueryData: QueryCompleteData = { + sessionId: testSessionId, + agentType: 'claude-code', + source: 'user', + startTime: Date.now() - 5000, + duration: 5000, + projectPath: '/test/project', + tabId: 'tab-123', + }; + + handler?.(testSessionId, testQueryData); + + // Wait for async processing + await vi.waitFor(() => { + expect(mockStatsDB.isReady).toHaveBeenCalled(); + expect(mockStatsDB.insertQueryEvent).toHaveBeenCalledWith({ + sessionId: testQueryData.sessionId, + agentType: testQueryData.agentType, + source: testQueryData.source, + startTime: testQueryData.startTime, + duration: testQueryData.duration, + projectPath: testQueryData.projectPath, + tabId: testQueryData.tabId, + }); + expect(mockSafeSend).toHaveBeenCalledWith('stats:updated'); + }); + }); + + it('should not record event when stats database is not ready', () => { + vi.mocked(mockStatsDB.isReady).mockReturnValue(false); + + setupStatsListener(mockProcessManager, { + safeSend: mockSafeSend, + getStatsDB: () => mockStatsDB, + logger: mockLogger, + }); + + const handler = eventHandlers.get('query-complete'); + const testQueryData: QueryCompleteData = { + sessionId: 'session-456', + agentType: 'codex', + source: 'auto', + startTime: Date.now(), + duration: 1000, + projectPath: '/test/project', + tabId: 'tab-456', + }; + + handler?.('session-456', testQueryData); + + expect(mockStatsDB.isReady).toHaveBeenCalled(); + expect(mockStatsDB.insertQueryEvent).not.toHaveBeenCalled(); + expect(mockSafeSend).not.toHaveBeenCalled(); + }); + + it('should log error when recording fails after retries', async () => { + vi.mocked(mockStatsDB.insertQueryEvent).mockImplementation(() => { + throw new Error('Database error'); + }); + + setupStatsListener(mockProcessManager, { + safeSend: mockSafeSend, + getStatsDB: () => mockStatsDB, + logger: mockLogger, + }); + + const handler = eventHandlers.get('query-complete'); + const testQueryData: QueryCompleteData = { + sessionId: 'session-789', + agentType: 'opencode', + source: 'user', + startTime: Date.now(), + duration: 2000, + projectPath: '/test/project', + tabId: 'tab-789', + }; + + handler?.('session-789', testQueryData); + + // Wait for all retries to complete (100ms + 200ms + final attempt) + await vi.waitFor( + () => { + expect(mockLogger.error).toHaveBeenCalledWith( + expect.stringContaining('Failed to record query event after 3 attempts'), + '[Stats]', + expect.objectContaining({ + sessionId: 'session-789', + }) + ); + }, + { timeout: 1000 } + ); + // Should have tried 3 times + expect(mockStatsDB.insertQueryEvent).toHaveBeenCalledTimes(3); + // Should not have broadcasted update on failure + expect(mockSafeSend).not.toHaveBeenCalled(); + }); + + it('should log debug info when recording succeeds', async () => { + setupStatsListener(mockProcessManager, { + safeSend: mockSafeSend, + getStatsDB: () => mockStatsDB, + logger: mockLogger, + }); + + const handler = eventHandlers.get('query-complete'); + const testQueryData: QueryCompleteData = { + sessionId: 'session-abc', + agentType: 'claude-code', + source: 'user', + startTime: Date.now(), + duration: 3000, + projectPath: '/test/project', + tabId: 'tab-abc', + }; + + handler?.('session-abc', testQueryData); + + // Wait for async processing + await vi.waitFor(() => { + expect(mockLogger.debug).toHaveBeenCalledWith( + expect.stringContaining('Recorded query event'), + '[Stats]', + expect.objectContaining({ + sessionId: 'session-abc', + agentType: 'claude-code', + source: 'user', + duration: 3000, + }) + ); + }); + }); + + it('should retry on transient failure and succeed', async () => { + // First call fails, second succeeds + vi.mocked(mockStatsDB.insertQueryEvent) + .mockImplementationOnce(() => { + throw new Error('Transient error'); + }) + .mockImplementationOnce(() => 'event-id-456'); + + setupStatsListener(mockProcessManager, { + safeSend: mockSafeSend, + getStatsDB: () => mockStatsDB, + logger: mockLogger, + }); + + const handler = eventHandlers.get('query-complete'); + const testQueryData: QueryCompleteData = { + sessionId: 'session-retry', + agentType: 'claude-code', + source: 'user', + startTime: Date.now(), + duration: 1000, + projectPath: '/test/project', + tabId: 'tab-retry', + }; + + handler?.('session-retry', testQueryData); + + // Wait for retry to complete + await vi.waitFor( + () => { + expect(mockStatsDB.insertQueryEvent).toHaveBeenCalledTimes(2); + expect(mockSafeSend).toHaveBeenCalledWith('stats:updated'); + }, + { timeout: 500 } + ); + // Should have logged warning for first failure + expect(mockLogger.warn).toHaveBeenCalledWith( + expect.stringContaining('Stats DB insert failed'), + '[Stats]', + expect.any(Object) + ); + }); +}); diff --git a/src/__tests__/main/process-listeners/usage-listener.test.ts b/src/__tests__/main/process-listeners/usage-listener.test.ts new file mode 100644 index 00000000..6d5385ce --- /dev/null +++ b/src/__tests__/main/process-listeners/usage-listener.test.ts @@ -0,0 +1,433 @@ +/** + * Tests for usage listener. + * Handles token/cost statistics from AI responses. + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { setupUsageListener } from '../../../main/process-listeners/usage-listener'; +import type { ProcessManager } from '../../../main/process-manager'; +import type { UsageStats } from '../../../main/process-listeners/types'; + +describe('Usage Listener', () => { + let mockProcessManager: ProcessManager; + let mockDeps: Parameters[1]; + let eventHandlers: Map void>; + + const createMockUsageStats = (overrides: Partial = {}): UsageStats => ({ + inputTokens: 1000, + outputTokens: 500, + cacheReadInputTokens: 200, + cacheCreationInputTokens: 100, + totalCostUsd: 0.05, + contextWindow: 100000, + ...overrides, + }); + + // Create a minimal mock group chat + const createMockGroupChat = () => ({ + id: 'test-chat-123', + name: 'Test Chat', + moderatorAgentId: 'claude-code', + moderatorSessionId: 'group-chat-test-chat-123-moderator', + participants: [ + { + name: 'TestAgent', + agentId: 'claude-code', + sessionId: 'group-chat-test-chat-123-participant-TestAgent-abc123', + addedAt: Date.now(), + }, + ], + createdAt: Date.now(), + updatedAt: Date.now(), + logPath: '/tmp/test-chat.log', + imagesDir: '/tmp/test-chat-images', + }); + + beforeEach(() => { + vi.clearAllMocks(); + eventHandlers = new Map(); + + mockProcessManager = { + on: vi.fn((event: string, handler: (...args: unknown[]) => void) => { + eventHandlers.set(event, handler); + }), + } as unknown as ProcessManager; + + mockDeps = { + safeSend: vi.fn(), + logger: { + info: vi.fn(), + error: vi.fn(), + warn: vi.fn(), + debug: vi.fn(), + }, + groupChatEmitters: { + emitParticipantsChanged: vi.fn(), + emitModeratorUsage: vi.fn(), + }, + groupChatStorage: { + loadGroupChat: vi.fn().mockResolvedValue(createMockGroupChat()), + updateGroupChat: vi.fn().mockResolvedValue(createMockGroupChat()), + updateParticipant: vi.fn().mockResolvedValue(createMockGroupChat()), + }, + outputParser: { + extractTextFromStreamJson: vi.fn().mockReturnValue('parsed response'), + parseParticipantSessionId: vi.fn().mockReturnValue(null), + }, + usageAggregator: { + calculateContextTokens: vi.fn().mockReturnValue(1800), + }, + patterns: { + REGEX_MODERATOR_SESSION: /^group-chat-(.+)-moderator-/, + REGEX_MODERATOR_SESSION_TIMESTAMP: /^group-chat-(.+)-moderator-\d+$/, + REGEX_AI_SUFFIX: /-ai-[^-]+$/, + REGEX_AI_TAB_ID: /-ai-([^-]+)$/, + REGEX_BATCH_SESSION: /-batch-\d+$/, + REGEX_SYNOPSIS_SESSION: /-synopsis-\d+$/, + }, + }; + }); + + const setupListener = () => { + setupUsageListener(mockProcessManager, mockDeps); + }; + + describe('Event Registration', () => { + it('should register the usage event listener', () => { + setupListener(); + expect(mockProcessManager.on).toHaveBeenCalledWith('usage', expect.any(Function)); + }); + }); + + describe('Regular Process Usage', () => { + it('should forward usage stats to renderer', () => { + setupListener(); + const handler = eventHandlers.get('usage'); + const usageStats = createMockUsageStats(); + + handler?.('regular-session-123', usageStats); + + expect(mockDeps.safeSend).toHaveBeenCalledWith( + 'process:usage', + 'regular-session-123', + usageStats + ); + }); + }); + + describe('Participant Usage', () => { + beforeEach(() => { + mockDeps.outputParser.parseParticipantSessionId = vi.fn().mockReturnValue({ + groupChatId: 'test-chat-123', + participantName: 'TestAgent', + }); + }); + + it('should update participant with usage stats', async () => { + setupListener(); + const handler = eventHandlers.get('usage'); + const usageStats = createMockUsageStats(); + + handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', usageStats); + + await vi.waitFor(() => { + expect(mockDeps.groupChatStorage.updateParticipant).toHaveBeenCalledWith( + 'test-chat-123', + 'TestAgent', + expect.objectContaining({ + contextUsage: expect.any(Number), + tokenCount: 1800, + totalCost: 0.05, + }) + ); + }); + }); + + it('should calculate context usage percentage correctly', async () => { + mockDeps.usageAggregator.calculateContextTokens = vi.fn().mockReturnValue(50000); // 50% of 100000 + setupListener(); + const handler = eventHandlers.get('usage'); + const usageStats = createMockUsageStats({ contextWindow: 100000 }); + + handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', usageStats); + + await vi.waitFor(() => { + expect(mockDeps.groupChatStorage.updateParticipant).toHaveBeenCalledWith( + 'test-chat-123', + 'TestAgent', + expect.objectContaining({ + contextUsage: 50, + }) + ); + }); + }); + + it('should handle zero context window gracefully (falls back to 200k default)', async () => { + setupListener(); + const handler = eventHandlers.get('usage'); + const usageStats = createMockUsageStats({ contextWindow: 0 }); + + handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', usageStats); + + // With contextWindow 0, falls back to 200k default + // 1800 / 200000 = 0.9% -> rounds to 1% + await vi.waitFor(() => { + expect(mockDeps.groupChatStorage.updateParticipant).toHaveBeenCalledWith( + 'test-chat-123', + 'TestAgent', + expect.objectContaining({ + contextUsage: 1, + }) + ); + }); + }); + + it('should emit participants changed after update', async () => { + setupListener(); + const handler = eventHandlers.get('usage'); + const usageStats = createMockUsageStats(); + + handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', usageStats); + + await vi.waitFor(() => { + expect(mockDeps.groupChatEmitters.emitParticipantsChanged).toHaveBeenCalledWith( + 'test-chat-123', + expect.any(Array) + ); + }); + }); + + it('should use updateParticipant return value instead of loading chat again (DB caching)', async () => { + setupListener(); + const handler = eventHandlers.get('usage'); + const usageStats = createMockUsageStats(); + + handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', usageStats); + + await vi.waitFor(() => { + expect(mockDeps.groupChatEmitters.emitParticipantsChanged).toHaveBeenCalled(); + }); + + // Verify we didn't make a redundant loadGroupChat call + // The code should use the return value from updateParticipant directly + expect(mockDeps.groupChatStorage.loadGroupChat).not.toHaveBeenCalled(); + }); + + it('should pass exact participants from updateParticipant return value', async () => { + const specificParticipants = [ + { name: 'Agent1', agentId: 'claude-code', sessionId: 'session-1', addedAt: 1000 }, + { name: 'Agent2', agentId: 'codex', sessionId: 'session-2', addedAt: 2000 }, + ]; + mockDeps.groupChatStorage.updateParticipant = vi.fn().mockResolvedValue({ + ...createMockGroupChat(), + participants: specificParticipants, + }); + setupListener(); + const handler = eventHandlers.get('usage'); + const usageStats = createMockUsageStats(); + + handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', usageStats); + + await vi.waitFor(() => { + expect(mockDeps.groupChatEmitters.emitParticipantsChanged).toHaveBeenCalledWith( + 'test-chat-123', + specificParticipants + ); + }); + }); + + it('should handle empty participants array from updateParticipant', async () => { + mockDeps.groupChatStorage.updateParticipant = vi.fn().mockResolvedValue({ + ...createMockGroupChat(), + participants: [], + }); + setupListener(); + const handler = eventHandlers.get('usage'); + const usageStats = createMockUsageStats(); + + handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', usageStats); + + await vi.waitFor(() => { + expect(mockDeps.groupChatEmitters.emitParticipantsChanged).toHaveBeenCalledWith( + 'test-chat-123', + [] + ); + }); + }); + + it('should handle undefined emitParticipantsChanged gracefully (optional chaining)', async () => { + mockDeps.groupChatEmitters.emitParticipantsChanged = undefined; + setupListener(); + const handler = eventHandlers.get('usage'); + const usageStats = createMockUsageStats(); + + // Should not throw + handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', usageStats); + + await vi.waitFor(() => { + expect(mockDeps.groupChatStorage.updateParticipant).toHaveBeenCalled(); + }); + // No error should be logged for the optional emitter + expect(mockDeps.logger.error).not.toHaveBeenCalled(); + }); + + it('should log error when participant update fails', async () => { + mockDeps.groupChatStorage.updateParticipant = vi + .fn() + .mockRejectedValue(new Error('DB error')); + setupListener(); + const handler = eventHandlers.get('usage'); + const usageStats = createMockUsageStats(); + + handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', usageStats); + + await vi.waitFor(() => { + expect(mockDeps.logger.error).toHaveBeenCalledWith( + '[GroupChat] Failed to update participant usage', + 'ProcessListener', + expect.objectContaining({ + error: 'Error: DB error', + participant: 'TestAgent', + }) + ); + }); + }); + + it('should still forward to renderer for participant usage', () => { + setupListener(); + const handler = eventHandlers.get('usage'); + const usageStats = createMockUsageStats(); + + handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', usageStats); + + expect(mockDeps.safeSend).toHaveBeenCalledWith( + 'process:usage', + 'group-chat-test-chat-123-participant-TestAgent-abc123', + usageStats + ); + }); + }); + + describe('Moderator Usage', () => { + it('should emit moderator usage for moderator sessions', () => { + setupListener(); + const handler = eventHandlers.get('usage'); + const usageStats = createMockUsageStats(); + + handler?.('group-chat-test-chat-123-moderator-1234567890', usageStats); + + expect(mockDeps.groupChatEmitters.emitModeratorUsage).toHaveBeenCalledWith( + 'test-chat-123', + expect.objectContaining({ + contextUsage: expect.any(Number), + totalCost: 0.05, + tokenCount: 1800, + }) + ); + }); + + it('should calculate moderator context usage correctly', () => { + mockDeps.usageAggregator.calculateContextTokens = vi.fn().mockReturnValue(25000); // 25% of 100000 + setupListener(); + const handler = eventHandlers.get('usage'); + const usageStats = createMockUsageStats({ contextWindow: 100000 }); + + handler?.('group-chat-test-chat-123-moderator-1234567890', usageStats); + + expect(mockDeps.groupChatEmitters.emitModeratorUsage).toHaveBeenCalledWith( + 'test-chat-123', + expect.objectContaining({ + contextUsage: 25, + }) + ); + }); + + it('should still forward to renderer for moderator usage', () => { + setupListener(); + const handler = eventHandlers.get('usage'); + const usageStats = createMockUsageStats(); + + handler?.('group-chat-test-chat-123-moderator-1234567890', usageStats); + + expect(mockDeps.safeSend).toHaveBeenCalledWith( + 'process:usage', + 'group-chat-test-chat-123-moderator-1234567890', + usageStats + ); + }); + + it('should handle synthesis moderator sessions', () => { + setupListener(); + const handler = eventHandlers.get('usage'); + const usageStats = createMockUsageStats(); + + handler?.('group-chat-test-chat-123-moderator-synthesis-1234567890', usageStats); + + expect(mockDeps.groupChatEmitters.emitModeratorUsage).toHaveBeenCalledWith( + 'test-chat-123', + expect.any(Object) + ); + }); + }); + + describe('Usage with Reasoning Tokens', () => { + it('should handle usage stats with reasoning tokens', () => { + setupListener(); + const handler = eventHandlers.get('usage'); + const usageStats = createMockUsageStats({ reasoningTokens: 1000 }); + + handler?.('regular-session-123', usageStats); + + expect(mockDeps.safeSend).toHaveBeenCalledWith( + 'process:usage', + 'regular-session-123', + expect.objectContaining({ reasoningTokens: 1000 }) + ); + }); + }); + + describe('Performance Optimization', () => { + it('should skip participant parsing for non-group-chat sessions (prefix check)', () => { + setupListener(); + const handler = eventHandlers.get('usage'); + const usageStats = createMockUsageStats(); + + // Regular session ID doesn't start with 'group-chat-' + handler?.('regular-session-123', usageStats); + + // parseParticipantSessionId should NOT be called for non-group-chat sessions + expect(mockDeps.outputParser.parseParticipantSessionId).not.toHaveBeenCalled(); + }); + + it('should only parse participant session ID for group-chat sessions', () => { + mockDeps.outputParser.parseParticipantSessionId = vi.fn().mockReturnValue(null); + setupListener(); + const handler = eventHandlers.get('usage'); + const usageStats = createMockUsageStats(); + + // Group chat session ID starts with 'group-chat-' + handler?.('group-chat-test-123-participant-Agent-abc', usageStats); + + // parseParticipantSessionId SHOULD be called for group-chat sessions + expect(mockDeps.outputParser.parseParticipantSessionId).toHaveBeenCalledWith( + 'group-chat-test-123-participant-Agent-abc' + ); + }); + + it('should skip moderator regex for non-group-chat sessions', () => { + setupListener(); + const handler = eventHandlers.get('usage'); + const usageStats = createMockUsageStats(); + + // Process many non-group-chat sessions - should be fast since regex is skipped + for (let i = 0; i < 100; i++) { + handler?.(`regular-session-${i}`, usageStats); + } + + // Moderator usage should NOT be emitted for any regular sessions + expect(mockDeps.groupChatEmitters.emitModeratorUsage).not.toHaveBeenCalled(); + // But all should still forward to renderer + expect(mockDeps.safeSend).toHaveBeenCalledTimes(100); + }); + }); +}); diff --git a/src/__tests__/main/stats-db.test.ts b/src/__tests__/main/stats-db.test.ts deleted file mode 100644 index 9552e184..00000000 --- a/src/__tests__/main/stats-db.test.ts +++ /dev/null @@ -1,6459 +0,0 @@ -/** - * Tests for stats-db.ts - * - * Note: better-sqlite3 is a native module compiled for Electron's Node version. - * Direct testing with the native module in vitest is not possible without - * electron-rebuild for the vitest runtime. These tests use mocked database - * operations to verify the logic without requiring the actual native module. - * - * For full integration testing of the SQLite database, use the Electron test - * environment (e2e tests) where the native module is properly loaded. - */ - -import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; -import * as path from 'path'; -import * as os from 'os'; - -// Track Database constructor calls to verify file path -let lastDbPath: string | null = null; - -// Store mock references so they can be accessed in tests -const mockStatement = { - run: vi.fn(() => ({ changes: 1 })), - get: vi.fn(() => ({ count: 0, total_duration: 0 })), - all: vi.fn(() => []), -}; - -const mockDb = { - pragma: vi.fn((sql: string) => { - // Handle different pragma calls appropriately - if (sql === 'user_version') return [{ user_version: 0 }]; - if (sql === 'integrity_check') return [{ integrity_check: 'ok' }]; - if (sql.startsWith('journal_mode')) return undefined; - if (sql.startsWith('user_version =')) return undefined; - return [{ user_version: 0 }]; - }), - prepare: vi.fn(() => mockStatement), - close: vi.fn(), - // Transaction mock that immediately executes the function - transaction: vi.fn((fn: () => void) => { - return () => fn(); - }), -}; - -// Mock better-sqlite3 as a class -vi.mock('better-sqlite3', () => { - return { - default: class MockDatabase { - constructor(dbPath: string) { - lastDbPath = dbPath; - } - pragma = mockDb.pragma; - prepare = mockDb.prepare; - close = mockDb.close; - transaction = mockDb.transaction; - }, - }; -}); - -// Mock electron's app module with trackable userData path -const mockUserDataPath = path.join(os.tmpdir(), 'maestro-test-stats-db'); -vi.mock('electron', () => ({ - app: { - getPath: vi.fn((name: string) => { - if (name === 'userData') return mockUserDataPath; - return os.tmpdir(); - }), - }, -})); - -// Track fs calls -const mockFsExistsSync = vi.fn(() => true); -const mockFsMkdirSync = vi.fn(); -const mockFsCopyFileSync = vi.fn(); -const mockFsUnlinkSync = vi.fn(); -const mockFsRenameSync = vi.fn(); -const mockFsStatSync = vi.fn(() => ({ size: 1024 })); -const mockFsReadFileSync = vi.fn(() => '0'); // Default: old timestamp (triggers vacuum check) -const mockFsWriteFileSync = vi.fn(); - -// Mock fs -vi.mock('fs', () => ({ - existsSync: (...args: unknown[]) => mockFsExistsSync(...args), - mkdirSync: (...args: unknown[]) => mockFsMkdirSync(...args), - copyFileSync: (...args: unknown[]) => mockFsCopyFileSync(...args), - unlinkSync: (...args: unknown[]) => mockFsUnlinkSync(...args), - renameSync: (...args: unknown[]) => mockFsRenameSync(...args), - statSync: (...args: unknown[]) => mockFsStatSync(...args), - readFileSync: (...args: unknown[]) => mockFsReadFileSync(...args), - writeFileSync: (...args: unknown[]) => mockFsWriteFileSync(...args), -})); - -// Mock logger -vi.mock('../../main/utils/logger', () => ({ - logger: { - info: vi.fn(), - warn: vi.fn(), - error: vi.fn(), - debug: vi.fn(), - }, -})); - -// Import types only - we'll test the type definitions -import type { - QueryEvent, - AutoRunSession, - AutoRunTask, - SessionLifecycleEvent, - StatsTimeRange, - StatsFilters, - StatsAggregation, -} from '../../shared/stats-types'; - -describe('stats-types.ts', () => { - describe('QueryEvent interface', () => { - it('should define proper QueryEvent structure', () => { - const event: QueryEvent = { - id: 'test-id', - sessionId: 'session-1', - agentType: 'claude-code', - source: 'user', - startTime: Date.now(), - duration: 5000, - projectPath: '/test/project', - tabId: 'tab-1', - }; - - expect(event.id).toBe('test-id'); - expect(event.sessionId).toBe('session-1'); - expect(event.source).toBe('user'); - }); - - it('should allow optional fields to be undefined', () => { - const event: QueryEvent = { - id: 'test-id', - sessionId: 'session-1', - agentType: 'claude-code', - source: 'auto', - startTime: Date.now(), - duration: 3000, - }; - - expect(event.projectPath).toBeUndefined(); - expect(event.tabId).toBeUndefined(); - }); - }); - - describe('AutoRunSession interface', () => { - it('should define proper AutoRunSession structure', () => { - const session: AutoRunSession = { - id: 'auto-run-1', - sessionId: 'session-1', - agentType: 'claude-code', - documentPath: '/docs/task.md', - startTime: Date.now(), - duration: 60000, - tasksTotal: 5, - tasksCompleted: 3, - projectPath: '/test/project', - }; - - expect(session.id).toBe('auto-run-1'); - expect(session.tasksTotal).toBe(5); - expect(session.tasksCompleted).toBe(3); - }); - }); - - describe('AutoRunTask interface', () => { - it('should define proper AutoRunTask structure', () => { - const task: AutoRunTask = { - id: 'task-1', - autoRunSessionId: 'auto-run-1', - sessionId: 'session-1', - agentType: 'claude-code', - taskIndex: 0, - taskContent: 'First task content', - startTime: Date.now(), - duration: 10000, - success: true, - }; - - expect(task.id).toBe('task-1'); - expect(task.taskIndex).toBe(0); - expect(task.success).toBe(true); - }); - - it('should handle failed tasks', () => { - const task: AutoRunTask = { - id: 'task-2', - autoRunSessionId: 'auto-run-1', - sessionId: 'session-1', - agentType: 'claude-code', - taskIndex: 1, - startTime: Date.now(), - duration: 5000, - success: false, - }; - - expect(task.success).toBe(false); - expect(task.taskContent).toBeUndefined(); - }); - }); - - describe('SessionLifecycleEvent interface', () => { - it('should define proper SessionLifecycleEvent structure for created session', () => { - const event: SessionLifecycleEvent = { - id: 'lifecycle-1', - sessionId: 'session-1', - agentType: 'claude-code', - projectPath: '/test/project', - createdAt: Date.now(), - isRemote: false, - }; - - expect(event.id).toBe('lifecycle-1'); - expect(event.sessionId).toBe('session-1'); - expect(event.agentType).toBe('claude-code'); - expect(event.closedAt).toBeUndefined(); - expect(event.duration).toBeUndefined(); - }); - - it('should define proper SessionLifecycleEvent structure for closed session', () => { - // Use fixed timestamps to avoid race conditions from multiple Date.now() calls - const createdAt = 1700000000000; // Fixed timestamp - const closedAt = 1700003600000; // Exactly 1 hour later - const event: SessionLifecycleEvent = { - id: 'lifecycle-2', - sessionId: 'session-2', - agentType: 'claude-code', - projectPath: '/test/project', - createdAt, - closedAt, - duration: closedAt - createdAt, - isRemote: true, - }; - - expect(event.closedAt).toBe(closedAt); - expect(event.duration).toBe(3600000); - expect(event.isRemote).toBe(true); - }); - - it('should allow optional fields to be undefined', () => { - const event: SessionLifecycleEvent = { - id: 'lifecycle-3', - sessionId: 'session-3', - agentType: 'opencode', - createdAt: Date.now(), - }; - - expect(event.projectPath).toBeUndefined(); - expect(event.closedAt).toBeUndefined(); - expect(event.duration).toBeUndefined(); - expect(event.isRemote).toBeUndefined(); - }); - }); - - describe('StatsTimeRange type', () => { - it('should accept valid time ranges', () => { - const ranges: StatsTimeRange[] = ['day', 'week', 'month', 'year', 'all']; - - expect(ranges).toHaveLength(5); - expect(ranges).toContain('day'); - expect(ranges).toContain('all'); - }); - }); - - describe('StatsFilters interface', () => { - it('should allow partial filters', () => { - const filters1: StatsFilters = { agentType: 'claude-code' }; - const filters2: StatsFilters = { source: 'user' }; - const filters3: StatsFilters = { - agentType: 'opencode', - source: 'auto', - projectPath: '/test', - }; - - expect(filters1.agentType).toBe('claude-code'); - expect(filters2.source).toBe('user'); - expect(filters3.projectPath).toBe('/test'); - }); - }); - - describe('StatsAggregation interface', () => { - it('should define proper aggregation structure', () => { - const aggregation: StatsAggregation = { - totalQueries: 100, - totalDuration: 500000, - avgDuration: 5000, - byAgent: { - 'claude-code': { count: 70, duration: 350000 }, - opencode: { count: 30, duration: 150000 }, - }, - bySource: { user: 60, auto: 40 }, - byLocation: { local: 80, remote: 20 }, - byDay: [ - { date: '2024-01-01', count: 10, duration: 50000 }, - { date: '2024-01-02', count: 15, duration: 75000 }, - ], - byHour: [ - { hour: 9, count: 20, duration: 100000 }, - { hour: 10, count: 25, duration: 125000 }, - ], - // Session lifecycle fields - totalSessions: 15, - sessionsByAgent: { - 'claude-code': 10, - opencode: 5, - }, - sessionsByDay: [ - { date: '2024-01-01', count: 3 }, - { date: '2024-01-02', count: 5 }, - ], - avgSessionDuration: 1800000, - }; - - expect(aggregation.totalQueries).toBe(100); - expect(aggregation.byAgent['claude-code'].count).toBe(70); - expect(aggregation.bySource.user).toBe(60); - expect(aggregation.byDay).toHaveLength(2); - // Session lifecycle assertions - expect(aggregation.totalSessions).toBe(15); - expect(aggregation.sessionsByAgent['claude-code']).toBe(10); - expect(aggregation.sessionsByDay).toHaveLength(2); - expect(aggregation.avgSessionDuration).toBe(1800000); - }); - }); -}); - -describe('StatsDB class (mocked)', () => { - beforeEach(() => { - vi.clearAllMocks(); - lastDbPath = null; - mockDb.pragma.mockReturnValue([{ user_version: 0 }]); - mockDb.prepare.mockReturnValue(mockStatement); - mockStatement.run.mockReturnValue({ changes: 1 }); - mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); - mockStatement.all.mockReturnValue([]); - mockFsExistsSync.mockReturnValue(true); - mockFsMkdirSync.mockClear(); - }); - - afterEach(() => { - vi.resetModules(); - }); - - describe('module exports', () => { - it('should export StatsDB class', async () => { - const { StatsDB } = await import('../../main/stats-db'); - expect(StatsDB).toBeDefined(); - expect(typeof StatsDB).toBe('function'); - }); - - it('should export singleton functions', async () => { - const { getStatsDB, initializeStatsDB, closeStatsDB } = await import('../../main/stats-db'); - expect(getStatsDB).toBeDefined(); - expect(initializeStatsDB).toBeDefined(); - expect(closeStatsDB).toBeDefined(); - }); - }); - - describe('StatsDB instantiation', () => { - it('should create instance without initialization', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - expect(db).toBeDefined(); - expect(db.isReady()).toBe(false); - }); - - it('should return database path', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - expect(db.getDbPath()).toContain('stats.db'); - }); - }); - - describe('initialization', () => { - it('should initialize database and set isReady to true', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - db.initialize(); - - expect(db.isReady()).toBe(true); - }); - - it('should enable WAL mode', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - db.initialize(); - - expect(mockDb.pragma).toHaveBeenCalledWith('journal_mode = WAL'); - }); - - it('should run v1 migration for fresh database', async () => { - mockDb.pragma.mockImplementation((sql: string) => { - if (sql === 'user_version') return [{ user_version: 0 }]; - return undefined; - }); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Should set user_version to 1 - expect(mockDb.pragma).toHaveBeenCalledWith('user_version = 1'); - }); - - it('should skip migration for already migrated database', async () => { - mockDb.pragma.mockImplementation((sql: string) => { - if (sql === 'user_version') return [{ user_version: 1 }]; - return undefined; - }); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Should NOT set user_version (no migration needed) - expect(mockDb.pragma).not.toHaveBeenCalledWith('user_version = 1'); - }); - - it('should create _migrations table on initialization', async () => { - mockDb.pragma.mockImplementation((sql: string) => { - if (sql === 'user_version') return [{ user_version: 0 }]; - return undefined; - }); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Should have prepared the CREATE TABLE IF NOT EXISTS _migrations statement - expect(mockDb.prepare).toHaveBeenCalledWith( - expect.stringContaining('CREATE TABLE IF NOT EXISTS _migrations') - ); - }); - - it('should record successful migration in _migrations table', async () => { - mockDb.pragma.mockImplementation((sql: string) => { - if (sql === 'user_version') return [{ user_version: 0 }]; - return undefined; - }); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Should have inserted a success record into _migrations - expect(mockDb.prepare).toHaveBeenCalledWith( - expect.stringContaining('INSERT OR REPLACE INTO _migrations') - ); - }); - - it('should use transaction for migration atomicity', async () => { - mockDb.pragma.mockImplementation((sql: string) => { - if (sql === 'user_version') return [{ user_version: 0 }]; - return undefined; - }); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Should have used transaction - expect(mockDb.transaction).toHaveBeenCalled(); - }); - }); - - describe('migration system API', () => { - beforeEach(() => { - vi.clearAllMocks(); - mockDb.pragma.mockImplementation((sql: string) => { - if (sql === 'user_version') return [{ user_version: 1 }]; - return undefined; - }); - mockDb.prepare.mockReturnValue(mockStatement); - mockStatement.run.mockReturnValue({ changes: 1 }); - mockStatement.get.mockReturnValue(null); - mockStatement.all.mockReturnValue([]); - mockFsExistsSync.mockReturnValue(true); - }); - - afterEach(() => { - vi.resetModules(); - }); - - it('should return current version via getCurrentVersion()', async () => { - mockDb.pragma.mockImplementation((sql: string) => { - if (sql === 'user_version') return [{ user_version: 1 }]; - return undefined; - }); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - expect(db.getCurrentVersion()).toBe(1); - }); - - it('should return target version via getTargetVersion()', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Currently we have version 3 migration (v1: initial schema, v2: is_remote column, v3: session_lifecycle table) - expect(db.getTargetVersion()).toBe(3); - }); - - it('should return false from hasPendingMigrations() when up to date', async () => { - mockDb.pragma.mockImplementation((sql: string) => { - if (sql === 'user_version') return [{ user_version: 3 }]; - return undefined; - }); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - expect(db.hasPendingMigrations()).toBe(false); - }); - - it('should correctly identify pending migrations based on version difference', async () => { - // This test verifies the hasPendingMigrations() logic - // by checking current version < target version - - // Simulate a database that's already at version 3 (target version) - let currentVersion = 3; - mockDb.pragma.mockImplementation((sql: string) => { - if (sql === 'user_version') return [{ user_version: currentVersion }]; - // Handle version updates from migration - if (sql.startsWith('user_version = ')) { - currentVersion = parseInt(sql.replace('user_version = ', '')); - } - return undefined; - }); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // At version 3, target is 3, so no pending migrations - expect(db.getCurrentVersion()).toBe(3); - expect(db.getTargetVersion()).toBe(3); - expect(db.hasPendingMigrations()).toBe(false); - }); - - it('should return empty array from getMigrationHistory() when no _migrations table', async () => { - mockStatement.get.mockReturnValue(null); // No table exists - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const history = db.getMigrationHistory(); - expect(history).toEqual([]); - }); - - it('should return migration records from getMigrationHistory()', async () => { - const mockMigrationRows = [ - { - version: 1, - description: 'Initial schema', - applied_at: 1704067200000, - status: 'success' as const, - error_message: null, - }, - ]; - - mockStatement.get.mockReturnValue({ name: '_migrations' }); // Table exists - mockStatement.all.mockReturnValue(mockMigrationRows); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const history = db.getMigrationHistory(); - expect(history).toHaveLength(1); - expect(history[0]).toEqual({ - version: 1, - description: 'Initial schema', - appliedAt: 1704067200000, - status: 'success', - errorMessage: undefined, - }); - }); - - it('should include errorMessage in migration history for failed migrations', async () => { - const mockMigrationRows = [ - { - version: 2, - description: 'Add new column', - applied_at: 1704067200000, - status: 'failed' as const, - error_message: 'SQLITE_ERROR: duplicate column name', - }, - ]; - - mockStatement.get.mockReturnValue({ name: '_migrations' }); - mockStatement.all.mockReturnValue(mockMigrationRows); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const history = db.getMigrationHistory(); - expect(history[0].status).toBe('failed'); - expect(history[0].errorMessage).toBe('SQLITE_ERROR: duplicate column name'); - }); - }); - - describe('error handling', () => { - it('should throw when calling insertQueryEvent before initialization', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - expect(() => - db.insertQueryEvent({ - sessionId: 'test', - agentType: 'claude-code', - source: 'user', - startTime: Date.now(), - duration: 1000, - }) - ).toThrow('Database not initialized'); - }); - - it('should throw when calling getQueryEvents before initialization', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - expect(() => db.getQueryEvents('day')).toThrow('Database not initialized'); - }); - - it('should throw when calling getAggregatedStats before initialization', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - expect(() => db.getAggregatedStats('week')).toThrow('Database not initialized'); - }); - }); - - describe('query events', () => { - it('should insert a query event and return an id', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const eventId = db.insertQueryEvent({ - sessionId: 'session-1', - agentType: 'claude-code', - source: 'user', - startTime: Date.now(), - duration: 5000, - projectPath: '/test/project', - tabId: 'tab-1', - }); - - expect(eventId).toBeDefined(); - expect(typeof eventId).toBe('string'); - expect(mockStatement.run).toHaveBeenCalled(); - }); - - it('should retrieve query events within time range', async () => { - mockStatement.all.mockReturnValue([ - { - id: 'event-1', - session_id: 'session-1', - agent_type: 'claude-code', - source: 'user', - start_time: Date.now(), - duration: 5000, - project_path: '/test', - tab_id: 'tab-1', - }, - ]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const events = db.getQueryEvents('day'); - - expect(events).toHaveLength(1); - expect(events[0].sessionId).toBe('session-1'); - expect(events[0].agentType).toBe('claude-code'); - }); - }); - - describe('close', () => { - it('should close the database connection', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.close(); - - expect(mockDb.close).toHaveBeenCalled(); - expect(db.isReady()).toBe(false); - }); - }); -}); - -/** - * Database file creation verification tests - * - * These tests verify that the database file is created at the correct path - * in the user's application data directory on first launch. - */ -describe('Database file creation on first launch', () => { - beforeEach(() => { - vi.clearAllMocks(); - lastDbPath = null; - mockDb.pragma.mockReturnValue([{ user_version: 0 }]); - mockDb.prepare.mockReturnValue(mockStatement); - mockFsExistsSync.mockReturnValue(true); - mockFsMkdirSync.mockClear(); - }); - - afterEach(() => { - vi.resetModules(); - }); - - describe('database path computation', () => { - it('should compute database path using electron app.getPath("userData")', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - const dbPath = db.getDbPath(); - - // Verify the path is in the userData directory - expect(dbPath).toContain(mockUserDataPath); - expect(dbPath).toContain('stats.db'); - }); - - it('should create database file at userData/stats.db path', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Verify better-sqlite3 was called with the correct path - expect(lastDbPath).toBe(path.join(mockUserDataPath, 'stats.db')); - }); - - it('should use platform-appropriate userData path', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - // The path should be absolute and contain stats.db - const dbPath = db.getDbPath(); - expect(path.isAbsolute(dbPath)).toBe(true); - expect(path.basename(dbPath)).toBe('stats.db'); - }); - }); - - describe('directory creation', () => { - it('should create userData directory if it does not exist', async () => { - // Simulate directory not existing - mockFsExistsSync.mockReturnValue(false); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Verify mkdirSync was called with recursive option - expect(mockFsMkdirSync).toHaveBeenCalledWith(mockUserDataPath, { recursive: true }); - }); - - it('should not create directory if it already exists', async () => { - // Simulate directory already existing - mockFsExistsSync.mockReturnValue(true); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Verify mkdirSync was NOT called - expect(mockFsMkdirSync).not.toHaveBeenCalled(); - }); - }); - - describe('database initialization', () => { - it('should open database connection on initialize', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - expect(db.isReady()).toBe(false); - db.initialize(); - expect(db.isReady()).toBe(true); - }); - - it('should only initialize once (idempotent)', async () => { - mockDb.pragma.mockClear(); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - db.initialize(); - const firstCallCount = mockDb.pragma.mock.calls.length; - - db.initialize(); // Second call should be a no-op - const secondCallCount = mockDb.pragma.mock.calls.length; - - expect(secondCallCount).toBe(firstCallCount); - }); - - it('should create all three tables on fresh database', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Verify prepare was called with CREATE TABLE statements - const prepareCalls = mockDb.prepare.mock.calls.map((call) => call[0]); - - // Check for query_events table - expect( - prepareCalls.some((sql: string) => sql.includes('CREATE TABLE IF NOT EXISTS query_events')) - ).toBe(true); - - // Check for auto_run_sessions table - expect( - prepareCalls.some((sql: string) => - sql.includes('CREATE TABLE IF NOT EXISTS auto_run_sessions') - ) - ).toBe(true); - - // Check for auto_run_tasks table - expect( - prepareCalls.some((sql: string) => - sql.includes('CREATE TABLE IF NOT EXISTS auto_run_tasks') - ) - ).toBe(true); - }); - - it('should create all required indexes', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const prepareCalls = mockDb.prepare.mock.calls.map((call) => call[0]); - - // Verify all 7 indexes are created - const expectedIndexes = [ - 'idx_query_start_time', - 'idx_query_agent_type', - 'idx_query_source', - 'idx_query_session', - 'idx_auto_session_start', - 'idx_task_auto_session', - 'idx_task_start', - ]; - - for (const indexName of expectedIndexes) { - expect(prepareCalls.some((sql: string) => sql.includes(indexName))).toBe(true); - } - }); - - it('should return success result on normal initialization', async () => { - // Ensure fresh database (file doesn't exist) for this test - mockFsExistsSync.mockImplementation((p: string) => { - // Return false for the database file so it creates a new one - if (typeof p === 'string' && p.includes('stats.db')) return false; - return true; // Directory exists - }); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - const result = db.initialize(); - - expect(result.success).toBe(true); - expect(result.wasReset).toBe(false); - expect(result.userMessage).toBeUndefined(); - expect(result.error).toBeUndefined(); - - // Restore default mock behavior - mockFsExistsSync.mockReturnValue(true); - }); - - it('should return success with wasReset=false on subsequent initializations', async () => { - // Ensure fresh database (file doesn't exist) for first init - mockFsExistsSync.mockImplementation((p: string) => { - if (typeof p === 'string' && p.includes('stats.db')) return false; - return true; - }); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - db.initialize(); - const result = db.initialize(); // Second call - - expect(result.success).toBe(true); - expect(result.wasReset).toBe(false); - - // Restore default mock behavior - mockFsExistsSync.mockReturnValue(true); - }); - }); - - describe('singleton pattern', () => { - it('should return same instance from getStatsDB', async () => { - const { getStatsDB, closeStatsDB } = await import('../../main/stats-db'); - - const instance1 = getStatsDB(); - const instance2 = getStatsDB(); - - expect(instance1).toBe(instance2); - - // Cleanup - closeStatsDB(); - }); - - it('should initialize database via initializeStatsDB', async () => { - const { initializeStatsDB, getStatsDB, closeStatsDB } = await import('../../main/stats-db'); - - initializeStatsDB(); - const db = getStatsDB(); - - expect(db.isReady()).toBe(true); - - // Cleanup - closeStatsDB(); - }); - - it('should close database and reset singleton via closeStatsDB', async () => { - const { initializeStatsDB, getStatsDB, closeStatsDB } = await import('../../main/stats-db'); - - initializeStatsDB(); - const dbBefore = getStatsDB(); - expect(dbBefore.isReady()).toBe(true); - - closeStatsDB(); - - // After close, a new instance should be returned - const dbAfter = getStatsDB(); - expect(dbAfter).not.toBe(dbBefore); - expect(dbAfter.isReady()).toBe(false); - }); - }); -}); - -/** - * Auto Run session and task recording tests - */ -describe('Auto Run session and task recording', () => { - beforeEach(() => { - vi.clearAllMocks(); - lastDbPath = null; - mockDb.pragma.mockReturnValue([{ user_version: 0 }]); - mockDb.prepare.mockReturnValue(mockStatement); - mockStatement.run.mockReturnValue({ changes: 1 }); - mockFsExistsSync.mockReturnValue(true); - }); - - afterEach(() => { - vi.resetModules(); - }); - - describe('Auto Run sessions', () => { - it('should insert Auto Run session and return id', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const sessionId = db.insertAutoRunSession({ - sessionId: 'session-1', - agentType: 'claude-code', - documentPath: '/docs/TASK-1.md', - startTime: Date.now(), - duration: 0, - tasksTotal: 5, - tasksCompleted: 0, - projectPath: '/project', - }); - - expect(sessionId).toBeDefined(); - expect(typeof sessionId).toBe('string'); - expect(mockStatement.run).toHaveBeenCalled(); - }); - - it('should update Auto Run session on completion', async () => { - mockStatement.run.mockReturnValue({ changes: 1 }); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const updated = db.updateAutoRunSession('session-id', { - duration: 60000, - tasksCompleted: 5, - }); - - expect(updated).toBe(true); - expect(mockStatement.run).toHaveBeenCalled(); - }); - - it('should retrieve Auto Run sessions within time range', async () => { - mockStatement.all.mockReturnValue([ - { - id: 'auto-1', - session_id: 'session-1', - agent_type: 'claude-code', - document_path: '/docs/TASK-1.md', - start_time: Date.now(), - duration: 60000, - tasks_total: 5, - tasks_completed: 5, - project_path: '/project', - }, - ]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const sessions = db.getAutoRunSessions('week'); - - expect(sessions).toHaveLength(1); - expect(sessions[0].sessionId).toBe('session-1'); - expect(sessions[0].tasksTotal).toBe(5); - }); - }); - - describe('Auto Run tasks', () => { - it('should insert Auto Run task with success=true', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const taskId = db.insertAutoRunTask({ - autoRunSessionId: 'auto-1', - sessionId: 'session-1', - agentType: 'claude-code', - taskIndex: 0, - taskContent: 'First task', - startTime: Date.now(), - duration: 10000, - success: true, - }); - - expect(taskId).toBeDefined(); - - // Verify success was converted to 1 for SQLite - const runCall = mockStatement.run.mock.calls[mockStatement.run.mock.calls.length - 1]; - expect(runCall[8]).toBe(1); // success parameter (last one) - }); - - it('should insert Auto Run task with success=false', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.insertAutoRunTask({ - autoRunSessionId: 'auto-1', - sessionId: 'session-1', - agentType: 'claude-code', - taskIndex: 1, - taskContent: 'Failed task', - startTime: Date.now(), - duration: 5000, - success: false, - }); - - // Verify success was converted to 0 for SQLite - const runCall = mockStatement.run.mock.calls[mockStatement.run.mock.calls.length - 1]; - expect(runCall[8]).toBe(0); // success parameter (last one) - }); - - it('should retrieve tasks for Auto Run session ordered by task_index', async () => { - mockStatement.all.mockReturnValue([ - { - id: 'task-1', - auto_run_session_id: 'auto-1', - session_id: 'session-1', - agent_type: 'claude-code', - task_index: 0, - task_content: 'First task', - start_time: Date.now(), - duration: 10000, - success: 1, - }, - { - id: 'task-2', - auto_run_session_id: 'auto-1', - session_id: 'session-1', - agent_type: 'claude-code', - task_index: 1, - task_content: 'Second task', - start_time: Date.now(), - duration: 15000, - success: 1, - }, - ]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const tasks = db.getAutoRunTasks('auto-1'); - - expect(tasks).toHaveLength(2); - expect(tasks[0].taskIndex).toBe(0); - expect(tasks[1].taskIndex).toBe(1); - expect(tasks[0].success).toBe(true); - }); - }); -}); - -/** - * Aggregation and filtering tests - */ -describe('Stats aggregation and filtering', () => { - beforeEach(() => { - vi.clearAllMocks(); - mockDb.pragma.mockReturnValue([{ user_version: 0 }]); - mockDb.prepare.mockReturnValue(mockStatement); - mockStatement.run.mockReturnValue({ changes: 1 }); - mockFsExistsSync.mockReturnValue(true); - }); - - afterEach(() => { - vi.resetModules(); - }); - - describe('time range filtering', () => { - it('should filter query events by day range', async () => { - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getQueryEvents('day'); - - // Verify the SQL includes time filter - const prepareCall = mockDb.prepare.mock.calls.find((call) => - (call[0] as string).includes('SELECT * FROM query_events') - ); - expect(prepareCall).toBeDefined(); - }); - - it('should filter with agentType filter', async () => { - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getQueryEvents('week', { agentType: 'claude-code' }); - - // Verify the SQL includes agent_type filter - expect(mockStatement.all).toHaveBeenCalled(); - }); - - it('should filter with source filter', async () => { - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getQueryEvents('month', { source: 'auto' }); - - // Verify the SQL includes source filter - expect(mockStatement.all).toHaveBeenCalled(); - }); - - it('should filter with projectPath filter', async () => { - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getQueryEvents('year', { projectPath: '/test/project' }); - - // Verify the SQL includes project_path filter - expect(mockStatement.all).toHaveBeenCalled(); - }); - - it('should filter with sessionId filter', async () => { - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getQueryEvents('all', { sessionId: 'session-123' }); - - // Verify the SQL includes session_id filter - expect(mockStatement.all).toHaveBeenCalled(); - }); - - it('should combine multiple filters', async () => { - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getQueryEvents('week', { - agentType: 'claude-code', - source: 'user', - projectPath: '/test', - sessionId: 'session-1', - }); - - // Verify all parameters were passed - expect(mockStatement.all).toHaveBeenCalled(); - }); - }); - - describe('aggregation queries', () => { - it('should compute aggregated stats correctly', async () => { - mockStatement.get.mockReturnValue({ count: 100, total_duration: 500000 }); - mockStatement.all.mockReturnValue([ - { agent_type: 'claude-code', count: 70, duration: 350000 }, - { agent_type: 'opencode', count: 30, duration: 150000 }, - ]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('week'); - - expect(stats.totalQueries).toBe(100); - expect(stats.totalDuration).toBe(500000); - expect(stats.avgDuration).toBe(5000); - }); - - it('should handle empty results for aggregation', async () => { - mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('day'); - - expect(stats.totalQueries).toBe(0); - expect(stats.avgDuration).toBe(0); - expect(stats.byAgent).toEqual({}); - }); - }); - - describe('CSV export', () => { - it('should export query events to CSV format', async () => { - const now = Date.now(); - mockStatement.all.mockReturnValue([ - { - id: 'event-1', - session_id: 'session-1', - agent_type: 'claude-code', - source: 'user', - start_time: now, - duration: 5000, - project_path: '/test', - tab_id: 'tab-1', - }, - ]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const csv = db.exportToCsv('week'); - - // Verify CSV structure - expect(csv).toContain('id,sessionId,agentType,source,startTime,duration,projectPath,tabId'); - expect(csv).toContain('event-1'); - expect(csv).toContain('session-1'); - expect(csv).toContain('claude-code'); - }); - - it('should handle empty data for CSV export', async () => { - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const csv = db.exportToCsv('day'); - - // Should only contain headers - expect(csv).toBe('id,sessionId,agentType,source,startTime,duration,projectPath,tabId'); - }); - }); -}); - -/** - * Interactive session query event recording tests - * - * These tests verify that query events are properly recorded for interactive - * (user-initiated) sessions, which is the core validation for: - * - [ ] Verify query events are recorded for interactive sessions - */ -describe('Query events recorded for interactive sessions', () => { - beforeEach(() => { - vi.clearAllMocks(); - mockDb.pragma.mockReturnValue([{ user_version: 1 }]); - mockDb.prepare.mockReturnValue(mockStatement); - mockStatement.run.mockReturnValue({ changes: 1 }); - mockStatement.all.mockReturnValue([]); - mockFsExistsSync.mockReturnValue(true); - }); - - afterEach(() => { - vi.resetModules(); - }); - - describe('user-initiated interactive session recording', () => { - it('should record query event with source="user" for interactive session', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const startTime = Date.now(); - const eventId = db.insertQueryEvent({ - sessionId: 'interactive-session-1', - agentType: 'claude-code', - source: 'user', // Interactive session is always 'user' - startTime, - duration: 5000, - projectPath: '/Users/test/myproject', - tabId: 'tab-1', - }); - - expect(eventId).toBeDefined(); - expect(typeof eventId).toBe('string'); - - // Verify the INSERT was called with correct parameters - const runCalls = mockStatement.run.mock.calls; - const lastCall = runCalls[runCalls.length - 1]; - - // Parameters: id, session_id, agent_type, source, start_time, duration, project_path, tab_id - expect(lastCall[1]).toBe('interactive-session-1'); // session_id - expect(lastCall[2]).toBe('claude-code'); // agent_type - expect(lastCall[3]).toBe('user'); // source - expect(lastCall[4]).toBe(startTime); // start_time - expect(lastCall[5]).toBe(5000); // duration - expect(lastCall[6]).toBe('/Users/test/myproject'); // project_path - expect(lastCall[7]).toBe('tab-1'); // tab_id - }); - - it('should record interactive query without optional fields', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const startTime = Date.now(); - const eventId = db.insertQueryEvent({ - sessionId: 'minimal-session', - agentType: 'claude-code', - source: 'user', - startTime, - duration: 3000, - // projectPath and tabId are optional - }); - - expect(eventId).toBeDefined(); - - // Verify NULL values for optional fields - const runCalls = mockStatement.run.mock.calls; - const lastCall = runCalls[runCalls.length - 1]; - expect(lastCall[6]).toBeNull(); // project_path - expect(lastCall[7]).toBeNull(); // tab_id - }); - - it('should record multiple interactive queries for the same session', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Clear mocks after initialize() to count only test operations - mockStatement.run.mockClear(); - - const baseTime = Date.now(); - - // First query - const id1 = db.insertQueryEvent({ - sessionId: 'multi-query-session', - agentType: 'claude-code', - source: 'user', - startTime: baseTime, - duration: 5000, - projectPath: '/project', - tabId: 'tab-1', - }); - - // Second query (same session, different tab) - const id2 = db.insertQueryEvent({ - sessionId: 'multi-query-session', - agentType: 'claude-code', - source: 'user', - startTime: baseTime + 10000, - duration: 3000, - projectPath: '/project', - tabId: 'tab-2', - }); - - // Third query (same session, same tab as first) - const id3 = db.insertQueryEvent({ - sessionId: 'multi-query-session', - agentType: 'claude-code', - source: 'user', - startTime: baseTime + 20000, - duration: 7000, - projectPath: '/project', - tabId: 'tab-1', - }); - - // All should have unique IDs - expect(id1).not.toBe(id2); - expect(id2).not.toBe(id3); - expect(id1).not.toBe(id3); - - // All should be recorded (3 INSERT calls after initialization) - expect(mockStatement.run).toHaveBeenCalledTimes(3); - }); - - it('should record interactive queries with different agent types', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Clear mocks after initialize() to count only test operations - mockStatement.run.mockClear(); - - const startTime = Date.now(); - - // Claude Code query - const claudeId = db.insertQueryEvent({ - sessionId: 'session-1', - agentType: 'claude-code', - source: 'user', - startTime, - duration: 5000, - }); - - // OpenCode query - const opencodeId = db.insertQueryEvent({ - sessionId: 'session-2', - agentType: 'opencode', - source: 'user', - startTime: startTime + 10000, - duration: 3000, - }); - - // Codex query - const codexId = db.insertQueryEvent({ - sessionId: 'session-3', - agentType: 'codex', - source: 'user', - startTime: startTime + 20000, - duration: 4000, - }); - - expect(claudeId).toBeDefined(); - expect(opencodeId).toBeDefined(); - expect(codexId).toBeDefined(); - - // Verify different agent types were recorded - const runCalls = mockStatement.run.mock.calls; - expect(runCalls[0][2]).toBe('claude-code'); - expect(runCalls[1][2]).toBe('opencode'); - expect(runCalls[2][2]).toBe('codex'); - }); - }); - - describe('retrieval of interactive session query events', () => { - it('should retrieve interactive query events filtered by source=user', async () => { - const now = Date.now(); - mockStatement.all.mockReturnValue([ - { - id: 'event-1', - session_id: 'session-1', - agent_type: 'claude-code', - source: 'user', - start_time: now - 1000, - duration: 5000, - project_path: '/project', - tab_id: 'tab-1', - }, - { - id: 'event-2', - session_id: 'session-2', - agent_type: 'claude-code', - source: 'user', - start_time: now - 2000, - duration: 3000, - project_path: '/project', - tab_id: 'tab-2', - }, - ]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Filter by source='user' to get only interactive sessions - const events = db.getQueryEvents('day', { source: 'user' }); - - expect(events).toHaveLength(2); - expect(events[0].source).toBe('user'); - expect(events[1].source).toBe('user'); - expect(events[0].sessionId).toBe('session-1'); - expect(events[1].sessionId).toBe('session-2'); - }); - - it('should retrieve interactive query events filtered by sessionId', async () => { - const now = Date.now(); - mockStatement.all.mockReturnValue([ - { - id: 'event-1', - session_id: 'target-session', - agent_type: 'claude-code', - source: 'user', - start_time: now - 1000, - duration: 5000, - project_path: '/project', - tab_id: 'tab-1', - }, - ]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const events = db.getQueryEvents('week', { sessionId: 'target-session' }); - - expect(events).toHaveLength(1); - expect(events[0].sessionId).toBe('target-session'); - }); - - it('should retrieve interactive query events filtered by projectPath', async () => { - const now = Date.now(); - mockStatement.all.mockReturnValue([ - { - id: 'event-1', - session_id: 'session-1', - agent_type: 'claude-code', - source: 'user', - start_time: now - 1000, - duration: 5000, - project_path: '/specific/project', - tab_id: 'tab-1', - }, - ]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const events = db.getQueryEvents('month', { projectPath: '/specific/project' }); - - expect(events).toHaveLength(1); - expect(events[0].projectPath).toBe('/specific/project'); - }); - - it('should correctly map database columns to QueryEvent interface fields', async () => { - const now = Date.now(); - mockStatement.all.mockReturnValue([ - { - id: 'db-event-id', - session_id: 'db-session-id', - agent_type: 'claude-code', - source: 'user', - start_time: now, - duration: 5000, - project_path: '/project/path', - tab_id: 'tab-123', - }, - ]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const events = db.getQueryEvents('day'); - - expect(events).toHaveLength(1); - const event = events[0]; - - // Verify snake_case -> camelCase mapping - expect(event.id).toBe('db-event-id'); - expect(event.sessionId).toBe('db-session-id'); - expect(event.agentType).toBe('claude-code'); - expect(event.source).toBe('user'); - expect(event.startTime).toBe(now); - expect(event.duration).toBe(5000); - expect(event.projectPath).toBe('/project/path'); - expect(event.tabId).toBe('tab-123'); - }); - }); - - describe('aggregation includes interactive session data', () => { - it('should include interactive sessions in aggregated stats', async () => { - mockStatement.get.mockReturnValue({ count: 10, total_duration: 50000 }); - - // The aggregation calls mockStatement.all multiple times for different queries - // We return based on the call sequence: byAgent, bySource, byDay - let callCount = 0; - mockStatement.all.mockImplementation(() => { - callCount++; - if (callCount === 1) { - // byAgent breakdown - return [{ agent_type: 'claude-code', count: 10, duration: 50000 }]; - } - if (callCount === 2) { - // bySource breakdown - return [{ source: 'user', count: 10 }]; - } - // byDay breakdown - return [{ date: '2024-12-28', count: 10, duration: 50000 }]; - }); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('week'); - - expect(stats.totalQueries).toBe(10); - expect(stats.totalDuration).toBe(50000); - expect(stats.avgDuration).toBe(5000); - expect(stats.bySource.user).toBe(10); - expect(stats.bySource.auto).toBe(0); - }); - - it('should correctly separate user vs auto queries in bySource', async () => { - mockStatement.get.mockReturnValue({ count: 15, total_duration: 75000 }); - - // Return by-source breakdown with both user and auto on second call - let callCount = 0; - mockStatement.all.mockImplementation(() => { - callCount++; - if (callCount === 2) { - // bySource breakdown - return [ - { source: 'user', count: 10 }, - { source: 'auto', count: 5 }, - ]; - } - return []; - }); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('month'); - - expect(stats.bySource.user).toBe(10); - expect(stats.bySource.auto).toBe(5); - }); - }); - - describe('timing accuracy for interactive sessions', () => { - it('should preserve exact startTime and duration values', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const exactStartTime = 1735344000000; // Specific timestamp - const exactDuration = 12345; // Specific duration in ms - - db.insertQueryEvent({ - sessionId: 'timing-test-session', - agentType: 'claude-code', - source: 'user', - startTime: exactStartTime, - duration: exactDuration, - }); - - const runCalls = mockStatement.run.mock.calls; - const lastCall = runCalls[runCalls.length - 1]; - - expect(lastCall[4]).toBe(exactStartTime); // Exact start_time preserved - expect(lastCall[5]).toBe(exactDuration); // Exact duration preserved - }); - - it('should handle zero duration (immediate responses)', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const eventId = db.insertQueryEvent({ - sessionId: 'zero-duration-session', - agentType: 'claude-code', - source: 'user', - startTime: Date.now(), - duration: 0, // Zero duration is valid (e.g., cached response) - }); - - expect(eventId).toBeDefined(); - - const runCalls = mockStatement.run.mock.calls; - const lastCall = runCalls[runCalls.length - 1]; - expect(lastCall[5]).toBe(0); - }); - - it('should handle very long durations', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const longDuration = 10 * 60 * 1000; // 10 minutes in ms - - const eventId = db.insertQueryEvent({ - sessionId: 'long-duration-session', - agentType: 'claude-code', - source: 'user', - startTime: Date.now(), - duration: longDuration, - }); - - expect(eventId).toBeDefined(); - - const runCalls = mockStatement.run.mock.calls; - const lastCall = runCalls[runCalls.length - 1]; - expect(lastCall[5]).toBe(longDuration); - }); - }); -}); - -/** - * Comprehensive Auto Run session and task recording verification tests - * - * These tests verify the complete Auto Run tracking workflow: - * 1. Auto Run sessions are properly recorded when batch processing starts - * 2. Individual tasks within sessions are recorded with timing data - * 3. Sessions are updated correctly when batch processing completes - * 4. All data can be retrieved with proper field mapping - */ -describe('Auto Run sessions and tasks recorded correctly', () => { - beforeEach(() => { - vi.clearAllMocks(); - mockDb.pragma.mockReturnValue([{ user_version: 1 }]); - mockDb.prepare.mockReturnValue(mockStatement); - mockStatement.run.mockReturnValue({ changes: 1 }); - mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); - mockStatement.all.mockReturnValue([]); - mockFsExistsSync.mockReturnValue(true); - }); - - afterEach(() => { - vi.resetModules(); - }); - - describe('Auto Run session lifecycle', () => { - it('should record Auto Run session with all required fields', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const startTime = Date.now(); - const sessionId = db.insertAutoRunSession({ - sessionId: 'maestro-session-123', - agentType: 'claude-code', - documentPath: 'Auto Run Docs/PHASE-1.md', - startTime, - duration: 0, // Duration is 0 at start - tasksTotal: 10, - tasksCompleted: 0, - projectPath: '/Users/test/my-project', - }); - - expect(sessionId).toBeDefined(); - expect(typeof sessionId).toBe('string'); - - // Verify all fields were passed correctly to the INSERT statement - const runCalls = mockStatement.run.mock.calls; - const lastCall = runCalls[runCalls.length - 1]; - - // INSERT parameters: id, session_id, agent_type, document_path, start_time, duration, tasks_total, tasks_completed, project_path - expect(lastCall[1]).toBe('maestro-session-123'); // session_id - expect(lastCall[2]).toBe('claude-code'); // agent_type - expect(lastCall[3]).toBe('Auto Run Docs/PHASE-1.md'); // document_path - expect(lastCall[4]).toBe(startTime); // start_time - expect(lastCall[5]).toBe(0); // duration (0 at start) - expect(lastCall[6]).toBe(10); // tasks_total - expect(lastCall[7]).toBe(0); // tasks_completed (0 at start) - expect(lastCall[8]).toBe('/Users/test/my-project'); // project_path - }); - - it('should record Auto Run session with multiple documents (comma-separated)', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const sessionId = db.insertAutoRunSession({ - sessionId: 'multi-doc-session', - agentType: 'claude-code', - documentPath: 'PHASE-1.md, PHASE-2.md, PHASE-3.md', - startTime: Date.now(), - duration: 0, - tasksTotal: 25, - tasksCompleted: 0, - projectPath: '/project', - }); - - expect(sessionId).toBeDefined(); - - const runCalls = mockStatement.run.mock.calls; - const lastCall = runCalls[runCalls.length - 1]; - expect(lastCall[3]).toBe('PHASE-1.md, PHASE-2.md, PHASE-3.md'); - }); - - it('should update Auto Run session duration and tasks on completion', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // First, insert the session - const autoRunId = db.insertAutoRunSession({ - sessionId: 'session-to-update', - agentType: 'claude-code', - documentPath: 'TASKS.md', - startTime: Date.now() - 60000, // Started 1 minute ago - duration: 0, - tasksTotal: 5, - tasksCompleted: 0, - projectPath: '/project', - }); - - // Now update it with completion data - const updated = db.updateAutoRunSession(autoRunId, { - duration: 60000, // 1 minute - tasksCompleted: 5, - }); - - expect(updated).toBe(true); - - // Verify UPDATE was called - expect(mockStatement.run).toHaveBeenCalled(); - }); - - it('should update Auto Run session with partial completion (some tasks skipped)', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const autoRunId = db.insertAutoRunSession({ - sessionId: 'partial-session', - agentType: 'claude-code', - documentPath: 'COMPLEX-TASKS.md', - startTime: Date.now(), - duration: 0, - tasksTotal: 10, - tasksCompleted: 0, - projectPath: '/project', - }); - - // Update with partial completion (7 of 10 tasks) - const updated = db.updateAutoRunSession(autoRunId, { - duration: 120000, // 2 minutes - tasksCompleted: 7, - }); - - expect(updated).toBe(true); - }); - - it('should handle Auto Run session stopped by user (wasStopped)', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const autoRunId = db.insertAutoRunSession({ - sessionId: 'stopped-session', - agentType: 'claude-code', - documentPath: 'TASKS.md', - startTime: Date.now(), - duration: 0, - tasksTotal: 20, - tasksCompleted: 0, - projectPath: '/project', - }); - - // User stopped after 3 tasks - const updated = db.updateAutoRunSession(autoRunId, { - duration: 30000, // 30 seconds - tasksCompleted: 3, - }); - - expect(updated).toBe(true); - }); - }); - - describe('Auto Run task recording', () => { - it('should record individual task with all fields', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const taskStartTime = Date.now() - 5000; - const taskId = db.insertAutoRunTask({ - autoRunSessionId: 'auto-run-session-1', - sessionId: 'maestro-session-1', - agentType: 'claude-code', - taskIndex: 0, - taskContent: 'Implement user authentication module', - startTime: taskStartTime, - duration: 5000, - success: true, - }); - - expect(taskId).toBeDefined(); - - const runCalls = mockStatement.run.mock.calls; - const lastCall = runCalls[runCalls.length - 1]; - - // INSERT parameters: id, auto_run_session_id, session_id, agent_type, task_index, task_content, start_time, duration, success - expect(lastCall[1]).toBe('auto-run-session-1'); // auto_run_session_id - expect(lastCall[2]).toBe('maestro-session-1'); // session_id - expect(lastCall[3]).toBe('claude-code'); // agent_type - expect(lastCall[4]).toBe(0); // task_index - expect(lastCall[5]).toBe('Implement user authentication module'); // task_content - expect(lastCall[6]).toBe(taskStartTime); // start_time - expect(lastCall[7]).toBe(5000); // duration - expect(lastCall[8]).toBe(1); // success (true -> 1) - }); - - it('should record failed task with success=false', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.insertAutoRunTask({ - autoRunSessionId: 'auto-run-1', - sessionId: 'session-1', - agentType: 'claude-code', - taskIndex: 2, - taskContent: 'Fix complex edge case that requires manual intervention', - startTime: Date.now(), - duration: 10000, - success: false, // Task failed - }); - - const runCalls = mockStatement.run.mock.calls; - const lastCall = runCalls[runCalls.length - 1]; - expect(lastCall[8]).toBe(0); // success (false -> 0) - }); - - it('should record multiple tasks for same Auto Run session', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Clear mocks after initialize() to count only test operations - mockStatement.run.mockClear(); - - const autoRunSessionId = 'multi-task-session'; - const baseTime = Date.now(); - - // Task 0 - const task0Id = db.insertAutoRunTask({ - autoRunSessionId, - sessionId: 'session-1', - agentType: 'claude-code', - taskIndex: 0, - taskContent: 'Task 0: Initialize project', - startTime: baseTime, - duration: 3000, - success: true, - }); - - // Task 1 - const task1Id = db.insertAutoRunTask({ - autoRunSessionId, - sessionId: 'session-1', - agentType: 'claude-code', - taskIndex: 1, - taskContent: 'Task 1: Add dependencies', - startTime: baseTime + 3000, - duration: 5000, - success: true, - }); - - // Task 2 - const task2Id = db.insertAutoRunTask({ - autoRunSessionId, - sessionId: 'session-1', - agentType: 'claude-code', - taskIndex: 2, - taskContent: 'Task 2: Configure build system', - startTime: baseTime + 8000, - duration: 7000, - success: true, - }); - - // All tasks should have unique IDs - expect(task0Id).not.toBe(task1Id); - expect(task1Id).not.toBe(task2Id); - expect(task0Id).not.toBe(task2Id); - - // All 3 INSERT calls should have happened - expect(mockStatement.run).toHaveBeenCalledTimes(3); - }); - - it('should record task without optional taskContent', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const taskId = db.insertAutoRunTask({ - autoRunSessionId: 'auto-run-1', - sessionId: 'session-1', - agentType: 'claude-code', - taskIndex: 0, - // taskContent is omitted - startTime: Date.now(), - duration: 2000, - success: true, - }); - - expect(taskId).toBeDefined(); - - const runCalls = mockStatement.run.mock.calls; - const lastCall = runCalls[runCalls.length - 1]; - expect(lastCall[5]).toBeNull(); // task_content should be NULL - }); - }); - - describe('Auto Run session and task retrieval', () => { - it('should retrieve Auto Run sessions with proper field mapping', async () => { - const now = Date.now(); - mockStatement.all.mockReturnValue([ - { - id: 'auto-run-id-1', - session_id: 'session-1', - agent_type: 'claude-code', - document_path: 'PHASE-1.md', - start_time: now - 60000, - duration: 60000, - tasks_total: 10, - tasks_completed: 10, - project_path: '/project/path', - }, - { - id: 'auto-run-id-2', - session_id: 'session-2', - agent_type: 'opencode', - document_path: null, // No document path - start_time: now - 120000, - duration: 45000, - tasks_total: 5, - tasks_completed: 4, - project_path: null, - }, - ]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const sessions = db.getAutoRunSessions('week'); - - expect(sessions).toHaveLength(2); - - // First session - all fields present - expect(sessions[0].id).toBe('auto-run-id-1'); - expect(sessions[0].sessionId).toBe('session-1'); - expect(sessions[0].agentType).toBe('claude-code'); - expect(sessions[0].documentPath).toBe('PHASE-1.md'); - expect(sessions[0].startTime).toBe(now - 60000); - expect(sessions[0].duration).toBe(60000); - expect(sessions[0].tasksTotal).toBe(10); - expect(sessions[0].tasksCompleted).toBe(10); - expect(sessions[0].projectPath).toBe('/project/path'); - - // Second session - optional fields are undefined - expect(sessions[1].id).toBe('auto-run-id-2'); - expect(sessions[1].documentPath).toBeUndefined(); - expect(sessions[1].projectPath).toBeUndefined(); - expect(sessions[1].tasksCompleted).toBe(4); - }); - - it('should retrieve tasks for Auto Run session with proper field mapping', async () => { - const now = Date.now(); - mockStatement.all.mockReturnValue([ - { - id: 'task-id-0', - auto_run_session_id: 'auto-run-1', - session_id: 'session-1', - agent_type: 'claude-code', - task_index: 0, - task_content: 'First task description', - start_time: now - 15000, - duration: 5000, - success: 1, - }, - { - id: 'task-id-1', - auto_run_session_id: 'auto-run-1', - session_id: 'session-1', - agent_type: 'claude-code', - task_index: 1, - task_content: null, // No content - start_time: now - 10000, - duration: 5000, - success: 1, - }, - { - id: 'task-id-2', - auto_run_session_id: 'auto-run-1', - session_id: 'session-1', - agent_type: 'claude-code', - task_index: 2, - task_content: 'Failed task', - start_time: now - 5000, - duration: 3000, - success: 0, // Failed - }, - ]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const tasks = db.getAutoRunTasks('auto-run-1'); - - expect(tasks).toHaveLength(3); - - // First task - expect(tasks[0].id).toBe('task-id-0'); - expect(tasks[0].autoRunSessionId).toBe('auto-run-1'); - expect(tasks[0].sessionId).toBe('session-1'); - expect(tasks[0].agentType).toBe('claude-code'); - expect(tasks[0].taskIndex).toBe(0); - expect(tasks[0].taskContent).toBe('First task description'); - expect(tasks[0].startTime).toBe(now - 15000); - expect(tasks[0].duration).toBe(5000); - expect(tasks[0].success).toBe(true); // 1 -> true - - // Second task - no content - expect(tasks[1].taskContent).toBeUndefined(); - expect(tasks[1].success).toBe(true); - - // Third task - failed - expect(tasks[2].success).toBe(false); // 0 -> false - }); - - it('should return tasks ordered by task_index ASC', async () => { - // Return tasks in wrong order to verify sorting - mockStatement.all.mockReturnValue([ - { - id: 't2', - auto_run_session_id: 'ar1', - session_id: 's1', - agent_type: 'claude-code', - task_index: 2, - task_content: 'C', - start_time: 3, - duration: 1, - success: 1, - }, - { - id: 't0', - auto_run_session_id: 'ar1', - session_id: 's1', - agent_type: 'claude-code', - task_index: 0, - task_content: 'A', - start_time: 1, - duration: 1, - success: 1, - }, - { - id: 't1', - auto_run_session_id: 'ar1', - session_id: 's1', - agent_type: 'claude-code', - task_index: 1, - task_content: 'B', - start_time: 2, - duration: 1, - success: 1, - }, - ]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const tasks = db.getAutoRunTasks('ar1'); - - // Should be returned as-is (the SQL query handles ordering) - // The mock returns them unsorted, but the real DB would sort them - expect(tasks).toHaveLength(3); - }); - }); - - describe('Auto Run time range filtering', () => { - it('should filter Auto Run sessions by day range', async () => { - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getAutoRunSessions('day'); - - // Verify the query was prepared with time filter - const prepareCalls = mockDb.prepare.mock.calls; - const selectCall = prepareCalls.find((call) => - (call[0] as string).includes('SELECT * FROM auto_run_sessions') - ); - expect(selectCall).toBeDefined(); - expect(selectCall![0]).toContain('start_time >= ?'); - }); - - it('should return all Auto Run sessions for "all" time range', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - mockStatement.all.mockReturnValue([ - { - id: 'old', - session_id: 's1', - agent_type: 'claude-code', - document_path: null, - start_time: 1000, - duration: 100, - tasks_total: 1, - tasks_completed: 1, - project_path: null, - }, - { - id: 'new', - session_id: 's2', - agent_type: 'claude-code', - document_path: null, - start_time: Date.now(), - duration: 100, - tasks_total: 1, - tasks_completed: 1, - project_path: null, - }, - ]); - - const sessions = db.getAutoRunSessions('all'); - - // With 'all' range, startTime should be 0, so all sessions should be returned - expect(sessions).toHaveLength(2); - }); - }); - - describe('complete Auto Run workflow', () => { - it('should support the full Auto Run lifecycle: start -> record tasks -> end', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Clear mocks after initialize() to count only test operations - mockStatement.run.mockClear(); - - const batchStartTime = Date.now(); - - // Step 1: Start Auto Run session - const autoRunId = db.insertAutoRunSession({ - sessionId: 'complete-workflow-session', - agentType: 'claude-code', - documentPath: 'PHASE-1.md, PHASE-2.md', - startTime: batchStartTime, - duration: 0, - tasksTotal: 5, - tasksCompleted: 0, - projectPath: '/test/project', - }); - - expect(autoRunId).toBeDefined(); - - // Step 2: Record individual tasks as they complete - let taskTime = batchStartTime; - - for (let i = 0; i < 5; i++) { - const taskDuration = 2000 + i * 500; // Varying durations - db.insertAutoRunTask({ - autoRunSessionId: autoRunId, - sessionId: 'complete-workflow-session', - agentType: 'claude-code', - taskIndex: i, - taskContent: `Task ${i + 1}: Implementation step ${i + 1}`, - startTime: taskTime, - duration: taskDuration, - success: i !== 3, // Task 4 (index 3) fails - }); - taskTime += taskDuration; - } - - // Step 3: End Auto Run session - const totalDuration = taskTime - batchStartTime; - const updated = db.updateAutoRunSession(autoRunId, { - duration: totalDuration, - tasksCompleted: 4, // 4 of 5 succeeded - }); - - expect(updated).toBe(true); - - // Verify the total number of INSERT/UPDATE calls - // 1 session insert + 5 task inserts + 1 session update = 7 calls - expect(mockStatement.run).toHaveBeenCalledTimes(7); - }); - - it('should handle Auto Run with loop mode (multiple passes)', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Clear mocks after initialize() to count only test operations - mockStatement.run.mockClear(); - - const startTime = Date.now(); - - // Start session for loop mode run - const autoRunId = db.insertAutoRunSession({ - sessionId: 'loop-mode-session', - agentType: 'claude-code', - documentPath: 'RECURRING-TASKS.md', - startTime, - duration: 0, - tasksTotal: 15, // Initial estimate (may grow with loops) - tasksCompleted: 0, - projectPath: '/project', - }); - - // Record tasks from multiple loop iterations - // Loop 1: 5 tasks - for (let i = 0; i < 5; i++) { - db.insertAutoRunTask({ - autoRunSessionId: autoRunId, - sessionId: 'loop-mode-session', - agentType: 'claude-code', - taskIndex: i, - taskContent: `Loop 1, Task ${i + 1}`, - startTime: startTime + i * 3000, - duration: 3000, - success: true, - }); - } - - // Loop 2: 5 more tasks - for (let i = 0; i < 5; i++) { - db.insertAutoRunTask({ - autoRunSessionId: autoRunId, - sessionId: 'loop-mode-session', - agentType: 'claude-code', - taskIndex: 5 + i, // Continue indexing from where loop 1 ended - taskContent: `Loop 2, Task ${i + 1}`, - startTime: startTime + 15000 + i * 3000, - duration: 3000, - success: true, - }); - } - - // Update with final stats - db.updateAutoRunSession(autoRunId, { - duration: 30000, // 30 seconds total - tasksCompleted: 10, - }); - - // 1 session + 10 tasks + 1 update = 12 calls - expect(mockStatement.run).toHaveBeenCalledTimes(12); - }); - }); - - describe('edge cases and error scenarios', () => { - it('should handle very long task content (synopsis)', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const longContent = 'A'.repeat(10000); // 10KB task content - - const taskId = db.insertAutoRunTask({ - autoRunSessionId: 'ar1', - sessionId: 's1', - agentType: 'claude-code', - taskIndex: 0, - taskContent: longContent, - startTime: Date.now(), - duration: 5000, - success: true, - }); - - expect(taskId).toBeDefined(); - - const runCalls = mockStatement.run.mock.calls; - const lastCall = runCalls[runCalls.length - 1]; - expect(lastCall[5]).toBe(longContent); - }); - - it('should handle zero duration tasks', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const taskId = db.insertAutoRunTask({ - autoRunSessionId: 'ar1', - sessionId: 's1', - agentType: 'claude-code', - taskIndex: 0, - taskContent: 'Instant task', - startTime: Date.now(), - duration: 0, // Zero duration (e.g., cached result) - success: true, - }); - - expect(taskId).toBeDefined(); - - const runCalls = mockStatement.run.mock.calls; - const lastCall = runCalls[runCalls.length - 1]; - expect(lastCall[7]).toBe(0); - }); - - it('should handle Auto Run session with zero tasks total', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // This shouldn't happen in practice, but the database should handle it - const sessionId = db.insertAutoRunSession({ - sessionId: 'empty-session', - agentType: 'claude-code', - documentPath: 'EMPTY.md', - startTime: Date.now(), - duration: 100, - tasksTotal: 0, - tasksCompleted: 0, - projectPath: '/project', - }); - - expect(sessionId).toBeDefined(); - }); - - it('should handle different agent types for Auto Run', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Clear mocks after initialize() to count only test operations - mockStatement.run.mockClear(); - - // Claude Code Auto Run - db.insertAutoRunSession({ - sessionId: 's1', - agentType: 'claude-code', - documentPath: 'TASKS.md', - startTime: Date.now(), - duration: 1000, - tasksTotal: 5, - tasksCompleted: 5, - projectPath: '/project', - }); - - // OpenCode Auto Run - db.insertAutoRunSession({ - sessionId: 's2', - agentType: 'opencode', - documentPath: 'TASKS.md', - startTime: Date.now(), - duration: 2000, - tasksTotal: 3, - tasksCompleted: 3, - projectPath: '/project', - }); - - // Verify both agent types were recorded - const runCalls = mockStatement.run.mock.calls; - expect(runCalls[0][2]).toBe('claude-code'); - expect(runCalls[1][2]).toBe('opencode'); - }); - }); -}); - -/** - * Foreign key relationship verification tests - * - * These tests verify that the foreign key relationship between auto_run_tasks - * and auto_run_sessions is properly defined in the schema, ensuring referential - * integrity can be enforced when foreign key constraints are enabled. - */ -describe('Foreign key relationship between tasks and sessions', () => { - beforeEach(() => { - vi.clearAllMocks(); - mockDb.pragma.mockReturnValue([{ user_version: 0 }]); - mockDb.prepare.mockReturnValue(mockStatement); - mockStatement.run.mockReturnValue({ changes: 1 }); - mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); - mockStatement.all.mockReturnValue([]); - mockFsExistsSync.mockReturnValue(true); - }); - - afterEach(() => { - vi.resetModules(); - }); - - describe('schema definition', () => { - it('should create auto_run_tasks table with REFERENCES clause to auto_run_sessions', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Verify the CREATE TABLE statement includes the foreign key reference - const prepareCalls = mockDb.prepare.mock.calls.map((call) => call[0] as string); - const createTasksTable = prepareCalls.find((sql) => - sql.includes('CREATE TABLE IF NOT EXISTS auto_run_tasks') - ); - - expect(createTasksTable).toBeDefined(); - expect(createTasksTable).toContain( - 'auto_run_session_id TEXT NOT NULL REFERENCES auto_run_sessions(id)' - ); - }); - - it('should have auto_run_session_id column as NOT NULL in auto_run_tasks', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const prepareCalls = mockDb.prepare.mock.calls.map((call) => call[0] as string); - const createTasksTable = prepareCalls.find((sql) => - sql.includes('CREATE TABLE IF NOT EXISTS auto_run_tasks') - ); - - expect(createTasksTable).toBeDefined(); - // Verify NOT NULL constraint is present for auto_run_session_id - expect(createTasksTable).toContain('auto_run_session_id TEXT NOT NULL'); - }); - - it('should create index on auto_run_session_id foreign key column', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const prepareCalls = mockDb.prepare.mock.calls.map((call) => call[0] as string); - const indexCreation = prepareCalls.find((sql) => sql.includes('idx_task_auto_session')); - - expect(indexCreation).toBeDefined(); - expect(indexCreation).toContain('ON auto_run_tasks(auto_run_session_id)'); - }); - }); - - describe('referential integrity behavior', () => { - it('should store auto_run_session_id when inserting task', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const autoRunSessionId = 'parent-session-abc-123'; - db.insertAutoRunTask({ - autoRunSessionId, - sessionId: 'maestro-session-1', - agentType: 'claude-code', - taskIndex: 0, - taskContent: 'Test task', - startTime: Date.now(), - duration: 1000, - success: true, - }); - - // Verify the auto_run_session_id was passed to the INSERT - const runCalls = mockStatement.run.mock.calls; - const lastCall = runCalls[runCalls.length - 1]; - - // INSERT parameters: id, auto_run_session_id, session_id, agent_type, task_index, task_content, start_time, duration, success - expect(lastCall[1]).toBe(autoRunSessionId); - }); - - it('should insert task with matching auto_run_session_id from parent session', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Clear calls from initialization - mockStatement.run.mockClear(); - - // First insert a session - const autoRunId = db.insertAutoRunSession({ - sessionId: 'session-1', - agentType: 'claude-code', - documentPath: 'PHASE-1.md', - startTime: Date.now(), - duration: 0, - tasksTotal: 5, - tasksCompleted: 0, - projectPath: '/project', - }); - - // Then insert a task referencing that session - const taskId = db.insertAutoRunTask({ - autoRunSessionId: autoRunId, - sessionId: 'session-1', - agentType: 'claude-code', - taskIndex: 0, - taskContent: 'First task', - startTime: Date.now(), - duration: 1000, - success: true, - }); - - expect(autoRunId).toBeDefined(); - expect(taskId).toBeDefined(); - - // Both inserts should have succeeded (session + task) - expect(mockStatement.run).toHaveBeenCalledTimes(2); - - // Verify the task INSERT used the session ID returned from the session INSERT - const runCalls = mockStatement.run.mock.calls; - const taskInsertCall = runCalls[1]; - expect(taskInsertCall[1]).toBe(autoRunId); // auto_run_session_id matches - }); - - it('should retrieve tasks only for the specific parent session', async () => { - const now = Date.now(); - - // Mock returns tasks for session 'auto-run-A' only - mockStatement.all.mockReturnValue([ - { - id: 'task-1', - auto_run_session_id: 'auto-run-A', - session_id: 'session-1', - agent_type: 'claude-code', - task_index: 0, - task_content: 'Task for session A', - start_time: now, - duration: 1000, - success: 1, - }, - { - id: 'task-2', - auto_run_session_id: 'auto-run-A', - session_id: 'session-1', - agent_type: 'claude-code', - task_index: 1, - task_content: 'Another task for session A', - start_time: now + 1000, - duration: 2000, - success: 1, - }, - ]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Query tasks for 'auto-run-A' - const tasksA = db.getAutoRunTasks('auto-run-A'); - - expect(tasksA).toHaveLength(2); - expect(tasksA[0].autoRunSessionId).toBe('auto-run-A'); - expect(tasksA[1].autoRunSessionId).toBe('auto-run-A'); - - // Verify the WHERE clause used the correct auto_run_session_id - expect(mockStatement.all).toHaveBeenCalledWith('auto-run-A'); - }); - - it('should return empty array when no tasks exist for a session', async () => { - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const tasks = db.getAutoRunTasks('non-existent-session'); - - expect(tasks).toHaveLength(0); - expect(tasks).toEqual([]); - }); - }); - - describe('data consistency verification', () => { - it('should maintain consistent auto_run_session_id across multiple tasks', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Clear calls from initialization - mockStatement.run.mockClear(); - - const parentSessionId = 'consistent-parent-session'; - - // Insert multiple tasks for the same parent session - for (let i = 0; i < 5; i++) { - db.insertAutoRunTask({ - autoRunSessionId: parentSessionId, - sessionId: 'maestro-session', - agentType: 'claude-code', - taskIndex: i, - taskContent: `Task ${i + 1}`, - startTime: Date.now() + i * 1000, - duration: 1000, - success: true, - }); - } - - // Verify all 5 tasks used the same parent session ID - const runCalls = mockStatement.run.mock.calls; - expect(runCalls).toHaveLength(5); - - for (const call of runCalls) { - expect(call[1]).toBe(parentSessionId); // auto_run_session_id - } - }); - - it('should allow tasks from different sessions to be inserted independently', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Clear calls from initialization - mockStatement.run.mockClear(); - - // Insert tasks for session A - db.insertAutoRunTask({ - autoRunSessionId: 'session-A', - sessionId: 'maestro-1', - agentType: 'claude-code', - taskIndex: 0, - taskContent: 'Task A1', - startTime: Date.now(), - duration: 1000, - success: true, - }); - - // Insert tasks for session B - db.insertAutoRunTask({ - autoRunSessionId: 'session-B', - sessionId: 'maestro-2', - agentType: 'opencode', - taskIndex: 0, - taskContent: 'Task B1', - startTime: Date.now(), - duration: 2000, - success: true, - }); - - // Insert another task for session A - db.insertAutoRunTask({ - autoRunSessionId: 'session-A', - sessionId: 'maestro-1', - agentType: 'claude-code', - taskIndex: 1, - taskContent: 'Task A2', - startTime: Date.now(), - duration: 1500, - success: true, - }); - - const runCalls = mockStatement.run.mock.calls; - expect(runCalls).toHaveLength(3); - - // Verify parent session IDs are correctly assigned - expect(runCalls[0][1]).toBe('session-A'); - expect(runCalls[1][1]).toBe('session-B'); - expect(runCalls[2][1]).toBe('session-A'); - }); - - it('should use generated session ID as foreign key when retrieved after insertion', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Clear calls from initialization - mockStatement.run.mockClear(); - - // Insert a session and capture the generated ID - const generatedSessionId = db.insertAutoRunSession({ - sessionId: 'maestro-session', - agentType: 'claude-code', - documentPath: 'DOC.md', - startTime: Date.now(), - duration: 0, - tasksTotal: 3, - tasksCompleted: 0, - projectPath: '/project', - }); - - // The generated ID should be a string with timestamp-random format - expect(generatedSessionId).toMatch(/^\d+-[a-z0-9]+$/); - - // Use this generated ID as the foreign key for tasks - db.insertAutoRunTask({ - autoRunSessionId: generatedSessionId, - sessionId: 'maestro-session', - agentType: 'claude-code', - taskIndex: 0, - taskContent: 'First task', - startTime: Date.now(), - duration: 1000, - success: true, - }); - - const runCalls = mockStatement.run.mock.calls; - const taskInsert = runCalls[1]; // Second call is the task insert (first is session insert) - - // Verify the task uses the exact same ID that was generated for the session - expect(taskInsert[1]).toBe(generatedSessionId); - }); - }); - - describe('query filtering by foreign key', () => { - it('should filter tasks using WHERE auto_run_session_id clause', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getAutoRunTasks('specific-session-id'); - - // Verify the SQL query includes proper WHERE clause for foreign key - const prepareCalls = mockDb.prepare.mock.calls; - const selectTasksCall = prepareCalls.find( - (call) => - (call[0] as string).includes('SELECT * FROM auto_run_tasks') && - (call[0] as string).includes('WHERE auto_run_session_id = ?') - ); - - expect(selectTasksCall).toBeDefined(); - }); - - it('should order tasks by task_index within a session', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getAutoRunTasks('any-session'); - - // Verify the query includes ORDER BY task_index - const prepareCalls = mockDb.prepare.mock.calls; - const selectTasksCall = prepareCalls.find((call) => - (call[0] as string).includes('ORDER BY task_index ASC') - ); - - expect(selectTasksCall).toBeDefined(); - }); - }); -}); - -/** - * Time-range filtering verification tests - * - * These tests verify that time-range filtering works correctly for all supported - * ranges: 'day', 'week', 'month', 'year', and 'all'. Each range should correctly - * calculate the start timestamp and use it to filter database queries. - */ -describe('Time-range filtering works correctly for all ranges', () => { - beforeEach(() => { - vi.clearAllMocks(); - mockDb.pragma.mockReturnValue([{ user_version: 1 }]); - mockDb.prepare.mockReturnValue(mockStatement); - mockStatement.run.mockReturnValue({ changes: 1 }); - mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); - mockStatement.all.mockReturnValue([]); - mockFsExistsSync.mockReturnValue(true); - }); - - afterEach(() => { - vi.resetModules(); - }); - - describe('getQueryEvents time range calculations', () => { - it('should filter by "day" range (last 24 hours)', async () => { - const now = Date.now(); - const oneDayMs = 24 * 60 * 60 * 1000; - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getQueryEvents('day'); - - // Verify the start_time parameter is approximately 24 hours ago - const allCalls = mockStatement.all.mock.calls; - expect(allCalls.length).toBeGreaterThan(0); - - const lastCall = allCalls[allCalls.length - 1]; - const startTimeParam = lastCall[0] as number; - - // The start time should be approximately now - 24 hours (within a few seconds tolerance) - expect(startTimeParam).toBeGreaterThanOrEqual(now - oneDayMs - 5000); - expect(startTimeParam).toBeLessThanOrEqual(now - oneDayMs + 5000); - }); - - it('should filter by "week" range (last 7 days)', async () => { - const now = Date.now(); - const oneWeekMs = 7 * 24 * 60 * 60 * 1000; - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getQueryEvents('week'); - - const allCalls = mockStatement.all.mock.calls; - expect(allCalls.length).toBeGreaterThan(0); - - const lastCall = allCalls[allCalls.length - 1]; - const startTimeParam = lastCall[0] as number; - - // The start time should be approximately now - 7 days (within a few seconds tolerance) - expect(startTimeParam).toBeGreaterThanOrEqual(now - oneWeekMs - 5000); - expect(startTimeParam).toBeLessThanOrEqual(now - oneWeekMs + 5000); - }); - - it('should filter by "month" range (last 30 days)', async () => { - const now = Date.now(); - const oneMonthMs = 30 * 24 * 60 * 60 * 1000; - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getQueryEvents('month'); - - const allCalls = mockStatement.all.mock.calls; - expect(allCalls.length).toBeGreaterThan(0); - - const lastCall = allCalls[allCalls.length - 1]; - const startTimeParam = lastCall[0] as number; - - // The start time should be approximately now - 30 days (within a few seconds tolerance) - expect(startTimeParam).toBeGreaterThanOrEqual(now - oneMonthMs - 5000); - expect(startTimeParam).toBeLessThanOrEqual(now - oneMonthMs + 5000); - }); - - it('should filter by "year" range (last 365 days)', async () => { - const now = Date.now(); - const oneYearMs = 365 * 24 * 60 * 60 * 1000; - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getQueryEvents('year'); - - const allCalls = mockStatement.all.mock.calls; - expect(allCalls.length).toBeGreaterThan(0); - - const lastCall = allCalls[allCalls.length - 1]; - const startTimeParam = lastCall[0] as number; - - // The start time should be approximately now - 365 days (within a few seconds tolerance) - expect(startTimeParam).toBeGreaterThanOrEqual(now - oneYearMs - 5000); - expect(startTimeParam).toBeLessThanOrEqual(now - oneYearMs + 5000); - }); - - it('should filter by "all" range (from epoch/timestamp 0)', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getQueryEvents('all'); - - const allCalls = mockStatement.all.mock.calls; - expect(allCalls.length).toBeGreaterThan(0); - - const lastCall = allCalls[allCalls.length - 1]; - const startTimeParam = lastCall[0] as number; - - // For 'all' range, start time should be 0 (epoch) - expect(startTimeParam).toBe(0); - }); - }); - - describe('getAutoRunSessions time range calculations', () => { - it('should filter Auto Run sessions by "day" range', async () => { - const now = Date.now(); - const oneDayMs = 24 * 60 * 60 * 1000; - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getAutoRunSessions('day'); - - const allCalls = mockStatement.all.mock.calls; - expect(allCalls.length).toBeGreaterThan(0); - - const lastCall = allCalls[allCalls.length - 1]; - const startTimeParam = lastCall[0] as number; - - expect(startTimeParam).toBeGreaterThanOrEqual(now - oneDayMs - 5000); - expect(startTimeParam).toBeLessThanOrEqual(now - oneDayMs + 5000); - }); - - it('should filter Auto Run sessions by "week" range', async () => { - const now = Date.now(); - const oneWeekMs = 7 * 24 * 60 * 60 * 1000; - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getAutoRunSessions('week'); - - const allCalls = mockStatement.all.mock.calls; - expect(allCalls.length).toBeGreaterThan(0); - - const lastCall = allCalls[allCalls.length - 1]; - const startTimeParam = lastCall[0] as number; - - expect(startTimeParam).toBeGreaterThanOrEqual(now - oneWeekMs - 5000); - expect(startTimeParam).toBeLessThanOrEqual(now - oneWeekMs + 5000); - }); - - it('should filter Auto Run sessions by "month" range', async () => { - const now = Date.now(); - const oneMonthMs = 30 * 24 * 60 * 60 * 1000; - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getAutoRunSessions('month'); - - const allCalls = mockStatement.all.mock.calls; - expect(allCalls.length).toBeGreaterThan(0); - - const lastCall = allCalls[allCalls.length - 1]; - const startTimeParam = lastCall[0] as number; - - expect(startTimeParam).toBeGreaterThanOrEqual(now - oneMonthMs - 5000); - expect(startTimeParam).toBeLessThanOrEqual(now - oneMonthMs + 5000); - }); - - it('should filter Auto Run sessions by "year" range', async () => { - const now = Date.now(); - const oneYearMs = 365 * 24 * 60 * 60 * 1000; - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getAutoRunSessions('year'); - - const allCalls = mockStatement.all.mock.calls; - expect(allCalls.length).toBeGreaterThan(0); - - const lastCall = allCalls[allCalls.length - 1]; - const startTimeParam = lastCall[0] as number; - - expect(startTimeParam).toBeGreaterThanOrEqual(now - oneYearMs - 5000); - expect(startTimeParam).toBeLessThanOrEqual(now - oneYearMs + 5000); - }); - - it('should filter Auto Run sessions by "all" range', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getAutoRunSessions('all'); - - const allCalls = mockStatement.all.mock.calls; - expect(allCalls.length).toBeGreaterThan(0); - - const lastCall = allCalls[allCalls.length - 1]; - const startTimeParam = lastCall[0] as number; - - expect(startTimeParam).toBe(0); - }); - }); - - describe('getAggregatedStats time range calculations', () => { - it('should aggregate stats for "day" range', async () => { - const now = Date.now(); - const oneDayMs = 24 * 60 * 60 * 1000; - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getAggregatedStats('day'); - - // getAggregatedStats calls multiple queries, verify the totals query used correct time range - const getCalls = mockStatement.get.mock.calls; - expect(getCalls.length).toBeGreaterThan(0); - - const firstCall = getCalls[0]; - const startTimeParam = firstCall[0] as number; - - expect(startTimeParam).toBeGreaterThanOrEqual(now - oneDayMs - 5000); - expect(startTimeParam).toBeLessThanOrEqual(now - oneDayMs + 5000); - }); - - it('should aggregate stats for "week" range', async () => { - const now = Date.now(); - const oneWeekMs = 7 * 24 * 60 * 60 * 1000; - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getAggregatedStats('week'); - - const getCalls = mockStatement.get.mock.calls; - expect(getCalls.length).toBeGreaterThan(0); - - const firstCall = getCalls[0]; - const startTimeParam = firstCall[0] as number; - - expect(startTimeParam).toBeGreaterThanOrEqual(now - oneWeekMs - 5000); - expect(startTimeParam).toBeLessThanOrEqual(now - oneWeekMs + 5000); - }); - - it('should aggregate stats for "month" range', async () => { - const now = Date.now(); - const oneMonthMs = 30 * 24 * 60 * 60 * 1000; - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getAggregatedStats('month'); - - const getCalls = mockStatement.get.mock.calls; - expect(getCalls.length).toBeGreaterThan(0); - - const firstCall = getCalls[0]; - const startTimeParam = firstCall[0] as number; - - expect(startTimeParam).toBeGreaterThanOrEqual(now - oneMonthMs - 5000); - expect(startTimeParam).toBeLessThanOrEqual(now - oneMonthMs + 5000); - }); - - it('should aggregate stats for "year" range', async () => { - const now = Date.now(); - const oneYearMs = 365 * 24 * 60 * 60 * 1000; - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getAggregatedStats('year'); - - const getCalls = mockStatement.get.mock.calls; - expect(getCalls.length).toBeGreaterThan(0); - - const firstCall = getCalls[0]; - const startTimeParam = firstCall[0] as number; - - expect(startTimeParam).toBeGreaterThanOrEqual(now - oneYearMs - 5000); - expect(startTimeParam).toBeLessThanOrEqual(now - oneYearMs + 5000); - }); - - it('should aggregate stats for "all" range', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getAggregatedStats('all'); - - const getCalls = mockStatement.get.mock.calls; - expect(getCalls.length).toBeGreaterThan(0); - - const firstCall = getCalls[0]; - const startTimeParam = firstCall[0] as number; - - expect(startTimeParam).toBe(0); - }); - }); - - describe('exportToCsv time range calculations', () => { - it('should export CSV for "day" range only', async () => { - const now = Date.now(); - const oneDayMs = 24 * 60 * 60 * 1000; - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.exportToCsv('day'); - - const allCalls = mockStatement.all.mock.calls; - expect(allCalls.length).toBeGreaterThan(0); - - const lastCall = allCalls[allCalls.length - 1]; - const startTimeParam = lastCall[0] as number; - - expect(startTimeParam).toBeGreaterThanOrEqual(now - oneDayMs - 5000); - expect(startTimeParam).toBeLessThanOrEqual(now - oneDayMs + 5000); - }); - - it('should export CSV for "all" range', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.exportToCsv('all'); - - const allCalls = mockStatement.all.mock.calls; - expect(allCalls.length).toBeGreaterThan(0); - - const lastCall = allCalls[allCalls.length - 1]; - const startTimeParam = lastCall[0] as number; - - expect(startTimeParam).toBe(0); - }); - }); - - describe('SQL query structure verification', () => { - it('should include start_time >= ? in getQueryEvents SQL', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getQueryEvents('week'); - - const prepareCalls = mockDb.prepare.mock.calls; - const selectCall = prepareCalls.find((call) => - (call[0] as string).includes('SELECT * FROM query_events') - ); - - expect(selectCall).toBeDefined(); - expect(selectCall![0]).toContain('start_time >= ?'); - }); - - it('should include start_time >= ? in getAutoRunSessions SQL', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getAutoRunSessions('month'); - - const prepareCalls = mockDb.prepare.mock.calls; - const selectCall = prepareCalls.find((call) => - (call[0] as string).includes('SELECT * FROM auto_run_sessions') - ); - - expect(selectCall).toBeDefined(); - expect(selectCall![0]).toContain('start_time >= ?'); - }); - - it('should include start_time >= ? in aggregation queries', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getAggregatedStats('year'); - - const prepareCalls = mockDb.prepare.mock.calls; - - // Verify the totals query includes the filter - const totalsCall = prepareCalls.find( - (call) => - (call[0] as string).includes('COUNT(*)') && (call[0] as string).includes('SUM(duration)') - ); - expect(totalsCall).toBeDefined(); - expect(totalsCall![0]).toContain('WHERE start_time >= ?'); - - // Verify the byAgent query includes the filter - const byAgentCall = prepareCalls.find((call) => - (call[0] as string).includes('GROUP BY agent_type') - ); - expect(byAgentCall).toBeDefined(); - expect(byAgentCall![0]).toContain('WHERE start_time >= ?'); - - // Verify the bySource query includes the filter - const bySourceCall = prepareCalls.find((call) => - (call[0] as string).includes('GROUP BY source') - ); - expect(bySourceCall).toBeDefined(); - expect(bySourceCall![0]).toContain('WHERE start_time >= ?'); - - // Verify the byDay query includes the filter - const byDayCall = prepareCalls.find((call) => (call[0] as string).includes('GROUP BY date(')); - expect(byDayCall).toBeDefined(); - expect(byDayCall![0]).toContain('WHERE start_time >= ?'); - }); - }); - - describe('time range boundary behavior', () => { - it('should include events exactly at the range boundary', async () => { - const now = Date.now(); - const oneDayMs = 24 * 60 * 60 * 1000; - const boundaryTime = now - oneDayMs; - - // Mock event exactly at the boundary - mockStatement.all.mockReturnValue([ - { - id: 'boundary-event', - session_id: 'session-1', - agent_type: 'claude-code', - source: 'user', - start_time: boundaryTime, - duration: 1000, - project_path: null, - tab_id: null, - }, - ]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const events = db.getQueryEvents('day'); - - // Event at the boundary should be included (start_time >= boundary) - expect(events).toHaveLength(1); - expect(events[0].id).toBe('boundary-event'); - }); - - it('should exclude events before the range boundary', async () => { - // The actual filtering happens in the SQL query via WHERE clause - // We verify this by checking the SQL structure - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getQueryEvents('day'); - - const prepareCalls = mockDb.prepare.mock.calls; - const selectCall = prepareCalls.find((call) => - (call[0] as string).includes('SELECT * FROM query_events') - ); - - // Verify it uses >= (greater than or equal), not just > (greater than) - expect(selectCall![0]).toContain('start_time >= ?'); - }); - - it('should return consistent results for multiple calls with same range', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Call twice in quick succession - db.getQueryEvents('week'); - db.getQueryEvents('week'); - - const allCalls = mockStatement.all.mock.calls; - expect(allCalls.length).toBe(2); - - // Both calls should have very close (within a few ms) start times - const firstStartTime = allCalls[0][0] as number; - const secondStartTime = allCalls[1][0] as number; - - // Difference should be minimal (test executes quickly) - expect(Math.abs(secondStartTime - firstStartTime)).toBeLessThan(1000); - }); - }); - - describe('combined filters with time range', () => { - it('should combine time range with agentType filter', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getQueryEvents('week', { agentType: 'claude-code' }); - - const allCalls = mockStatement.all.mock.calls; - expect(allCalls.length).toBeGreaterThan(0); - - const lastCall = allCalls[allCalls.length - 1]; - // Should have 2 parameters: start_time and agentType - expect(lastCall).toHaveLength(2); - expect(lastCall[1]).toBe('claude-code'); - }); - - it('should combine time range with source filter', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getQueryEvents('month', { source: 'auto' }); - - const allCalls = mockStatement.all.mock.calls; - expect(allCalls.length).toBeGreaterThan(0); - - const lastCall = allCalls[allCalls.length - 1]; - // Should have 2 parameters: start_time and source - expect(lastCall).toHaveLength(2); - expect(lastCall[1]).toBe('auto'); - }); - - it('should combine time range with multiple filters', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getQueryEvents('year', { - agentType: 'opencode', - source: 'user', - projectPath: '/test/path', - sessionId: 'session-123', - }); - - const allCalls = mockStatement.all.mock.calls; - expect(allCalls.length).toBeGreaterThan(0); - - const lastCall = allCalls[allCalls.length - 1]; - // Should have 5 parameters: start_time + 4 filters - expect(lastCall).toHaveLength(5); - expect(lastCall[1]).toBe('opencode'); - expect(lastCall[2]).toBe('user'); - expect(lastCall[3]).toBe('/test/path'); - expect(lastCall[4]).toBe('session-123'); - }); - }); -}); - -/** - * Comprehensive tests for aggregation query calculations - * - * These tests verify that the getAggregatedStats method returns correct calculations: - * - Total queries count - * - Total duration sum - * - Average duration calculation - * - Breakdown by agent type (count and duration) - * - Breakdown by source (user vs auto) - * - Daily breakdown for charts - */ -describe('Aggregation queries return correct calculations', () => { - beforeEach(() => { - vi.clearAllMocks(); - mockDb.pragma.mockReturnValue([{ user_version: 1 }]); - mockDb.prepare.mockReturnValue(mockStatement); - mockStatement.run.mockReturnValue({ changes: 1 }); - mockFsExistsSync.mockReturnValue(true); - }); - - afterEach(() => { - vi.resetModules(); - }); - - describe('totalQueries and totalDuration calculations', () => { - it('should return correct totalQueries count from database', async () => { - // Mock the totals query result - mockStatement.get.mockReturnValue({ count: 42, total_duration: 126000 }); - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('week'); - - expect(stats.totalQueries).toBe(42); - }); - - it('should return correct totalDuration sum from database', async () => { - mockStatement.get.mockReturnValue({ count: 10, total_duration: 50000 }); - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('month'); - - expect(stats.totalDuration).toBe(50000); - }); - - it('should handle zero queries correctly', async () => { - mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('day'); - - expect(stats.totalQueries).toBe(0); - expect(stats.totalDuration).toBe(0); - }); - - it('should handle large query counts correctly', async () => { - mockStatement.get.mockReturnValue({ count: 10000, total_duration: 5000000 }); - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('year'); - - expect(stats.totalQueries).toBe(10000); - expect(stats.totalDuration).toBe(5000000); - }); - - it('should handle very large durations correctly', async () => { - // 1 day of continuous usage = 86400000ms - const largeDuration = 86400000; - mockStatement.get.mockReturnValue({ count: 100, total_duration: largeDuration }); - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('all'); - - expect(stats.totalDuration).toBe(largeDuration); - }); - }); - - describe('avgDuration calculation', () => { - it('should calculate correct average duration', async () => { - // 100 queries, 500000ms total = 5000ms average - mockStatement.get.mockReturnValue({ count: 100, total_duration: 500000 }); - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('week'); - - expect(stats.avgDuration).toBe(5000); - }); - - it('should return 0 average duration when no queries', async () => { - mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('day'); - - // Avoid division by zero - should return 0 - expect(stats.avgDuration).toBe(0); - }); - - it('should round average duration to nearest integer', async () => { - // 3 queries, 10000ms total = 3333.33... average, should round to 3333 - mockStatement.get.mockReturnValue({ count: 3, total_duration: 10000 }); - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('month'); - - // Math.round(10000 / 3) = 3333 - expect(stats.avgDuration).toBe(3333); - }); - - it('should handle single query average correctly', async () => { - mockStatement.get.mockReturnValue({ count: 1, total_duration: 12345 }); - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('day'); - - expect(stats.avgDuration).toBe(12345); - }); - - it('should handle edge case of tiny durations', async () => { - // 5 queries with 1ms each = 5ms total, 1ms average - mockStatement.get.mockReturnValue({ count: 5, total_duration: 5 }); - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('day'); - - expect(stats.avgDuration).toBe(1); - }); - }); - - describe('byAgent breakdown calculations', () => { - it('should return correct breakdown by single agent type', async () => { - mockStatement.get.mockReturnValue({ count: 50, total_duration: 250000 }); - mockStatement.all - .mockReturnValueOnce([]) // First all() call (we handle this below) - .mockReturnValueOnce([{ agent_type: 'claude-code', count: 50, duration: 250000 }]) - .mockReturnValueOnce([{ source: 'user', count: 50 }]) - .mockReturnValueOnce([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Reset to control exact mock responses for getAggregatedStats - mockStatement.all.mockReset(); - mockStatement.all - .mockReturnValueOnce([{ agent_type: 'claude-code', count: 50, duration: 250000 }]) - .mockReturnValueOnce([{ source: 'user', count: 50 }]) - .mockReturnValueOnce([]); - - const stats = db.getAggregatedStats('week'); - - expect(stats.byAgent).toHaveProperty('claude-code'); - expect(stats.byAgent['claude-code'].count).toBe(50); - expect(stats.byAgent['claude-code'].duration).toBe(250000); - }); - - it('should return correct breakdown for multiple agent types', async () => { - mockStatement.get.mockReturnValue({ count: 150, total_duration: 750000 }); - mockStatement.all - .mockReturnValueOnce([ - { agent_type: 'claude-code', count: 100, duration: 500000 }, - { agent_type: 'opencode', count: 30, duration: 150000 }, - { agent_type: 'gemini-cli', count: 20, duration: 100000 }, - ]) - .mockReturnValueOnce([ - { source: 'user', count: 120 }, - { source: 'auto', count: 30 }, - ]) - .mockReturnValueOnce([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('month'); - - // Verify all agents are present - expect(Object.keys(stats.byAgent)).toHaveLength(3); - - // Verify claude-code stats - expect(stats.byAgent['claude-code'].count).toBe(100); - expect(stats.byAgent['claude-code'].duration).toBe(500000); - - // Verify opencode stats - expect(stats.byAgent['opencode'].count).toBe(30); - expect(stats.byAgent['opencode'].duration).toBe(150000); - - // Verify gemini-cli stats - expect(stats.byAgent['gemini-cli'].count).toBe(20); - expect(stats.byAgent['gemini-cli'].duration).toBe(100000); - }); - - it('should return empty byAgent object when no queries exist', async () => { - mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('day'); - - expect(stats.byAgent).toEqual({}); - expect(Object.keys(stats.byAgent)).toHaveLength(0); - }); - - it('should maintain correct duration per agent when durations vary', async () => { - mockStatement.get.mockReturnValue({ count: 4, total_duration: 35000 }); - mockStatement.all - .mockReturnValueOnce([ - { agent_type: 'claude-code', count: 3, duration: 30000 }, // Avg 10000 - { agent_type: 'opencode', count: 1, duration: 5000 }, // Avg 5000 - ]) - .mockReturnValueOnce([{ source: 'user', count: 4 }]) - .mockReturnValueOnce([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('week'); - - // Verify duration totals per agent are preserved - expect(stats.byAgent['claude-code'].duration).toBe(30000); - expect(stats.byAgent['opencode'].duration).toBe(5000); - - // Total should match sum of all agents - const totalAgentDuration = Object.values(stats.byAgent).reduce( - (sum, agent) => sum + agent.duration, - 0 - ); - expect(totalAgentDuration).toBe(35000); - }); - }); - - describe('bySource breakdown calculations', () => { - it('should return correct user vs auto counts', async () => { - mockStatement.get.mockReturnValue({ count: 100, total_duration: 500000 }); - mockStatement.all - .mockReturnValueOnce([{ agent_type: 'claude-code', count: 100, duration: 500000 }]) - .mockReturnValueOnce([ - { source: 'user', count: 70 }, - { source: 'auto', count: 30 }, - ]) - .mockReturnValueOnce([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('week'); - - expect(stats.bySource.user).toBe(70); - expect(stats.bySource.auto).toBe(30); - }); - - it('should handle all queries from user source', async () => { - mockStatement.get.mockReturnValue({ count: 50, total_duration: 250000 }); - mockStatement.all - .mockReturnValueOnce([{ agent_type: 'claude-code', count: 50, duration: 250000 }]) - .mockReturnValueOnce([{ source: 'user', count: 50 }]) - .mockReturnValueOnce([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('month'); - - expect(stats.bySource.user).toBe(50); - expect(stats.bySource.auto).toBe(0); - }); - - it('should handle all queries from auto source', async () => { - mockStatement.get.mockReturnValue({ count: 200, total_duration: 1000000 }); - mockStatement.all - .mockReturnValueOnce([{ agent_type: 'claude-code', count: 200, duration: 1000000 }]) - .mockReturnValueOnce([{ source: 'auto', count: 200 }]) - .mockReturnValueOnce([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('year'); - - expect(stats.bySource.user).toBe(0); - expect(stats.bySource.auto).toBe(200); - }); - - it('should initialize bySource with zeros when no data', async () => { - mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('day'); - - expect(stats.bySource).toEqual({ user: 0, auto: 0 }); - }); - - it('should sum correctly across source types', async () => { - mockStatement.get.mockReturnValue({ count: 1000, total_duration: 5000000 }); - mockStatement.all - .mockReturnValueOnce([{ agent_type: 'claude-code', count: 1000, duration: 5000000 }]) - .mockReturnValueOnce([ - { source: 'user', count: 650 }, - { source: 'auto', count: 350 }, - ]) - .mockReturnValueOnce([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('all'); - - // Verify sum equals totalQueries - expect(stats.bySource.user + stats.bySource.auto).toBe(stats.totalQueries); - }); - }); - - describe('byDay breakdown calculations', () => { - it('should return daily breakdown with correct structure', async () => { - mockStatement.get.mockReturnValue({ count: 30, total_duration: 150000 }); - mockStatement.all - .mockReturnValueOnce([{ agent_type: 'claude-code', count: 30, duration: 150000 }]) // byAgent - .mockReturnValueOnce([{ source: 'user', count: 30 }]) // bySource - .mockReturnValueOnce([{ is_remote: 0, count: 30 }]) // byLocation - .mockReturnValueOnce([ - { date: '2024-01-01', count: 10, duration: 50000 }, - { date: '2024-01-02', count: 12, duration: 60000 }, - { date: '2024-01-03', count: 8, duration: 40000 }, - ]); // byDay - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('week'); - - expect(stats.byDay).toHaveLength(3); - expect(stats.byDay[0]).toEqual({ date: '2024-01-01', count: 10, duration: 50000 }); - expect(stats.byDay[1]).toEqual({ date: '2024-01-02', count: 12, duration: 60000 }); - expect(stats.byDay[2]).toEqual({ date: '2024-01-03', count: 8, duration: 40000 }); - }); - - it('should return empty array when no daily data exists', async () => { - mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('day'); - - expect(stats.byDay).toEqual([]); - expect(stats.byDay).toHaveLength(0); - }); - - it('should handle single day of data', async () => { - mockStatement.get.mockReturnValue({ count: 5, total_duration: 25000 }); - mockStatement.all - .mockReturnValueOnce([{ agent_type: 'claude-code', count: 5, duration: 25000 }]) // byAgent - .mockReturnValueOnce([{ source: 'user', count: 5 }]) // bySource - .mockReturnValueOnce([{ is_remote: 0, count: 5 }]) // byLocation - .mockReturnValueOnce([{ date: '2024-06-15', count: 5, duration: 25000 }]); // byDay - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('day'); - - expect(stats.byDay).toHaveLength(1); - expect(stats.byDay[0].date).toBe('2024-06-15'); - expect(stats.byDay[0].count).toBe(5); - expect(stats.byDay[0].duration).toBe(25000); - }); - - it('should order daily data chronologically (ASC)', async () => { - mockStatement.get.mockReturnValue({ count: 15, total_duration: 75000 }); - mockStatement.all - .mockReturnValueOnce([{ agent_type: 'claude-code', count: 15, duration: 75000 }]) // byAgent - .mockReturnValueOnce([{ source: 'user', count: 15 }]) // bySource - .mockReturnValueOnce([{ is_remote: 0, count: 15 }]) // byLocation - .mockReturnValueOnce([ - { date: '2024-03-01', count: 3, duration: 15000 }, - { date: '2024-03-02', count: 5, duration: 25000 }, - { date: '2024-03-03', count: 7, duration: 35000 }, - ]); // byDay - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('week'); - - // Verify ASC order (earliest date first) - expect(stats.byDay[0].date).toBe('2024-03-01'); - expect(stats.byDay[1].date).toBe('2024-03-02'); - expect(stats.byDay[2].date).toBe('2024-03-03'); - }); - - it('should sum daily counts equal to totalQueries', async () => { - mockStatement.get.mockReturnValue({ count: 25, total_duration: 125000 }); - mockStatement.all - .mockReturnValueOnce([{ agent_type: 'claude-code', count: 25, duration: 125000 }]) // byAgent - .mockReturnValueOnce([{ source: 'user', count: 25 }]) // bySource - .mockReturnValueOnce([{ is_remote: 0, count: 25 }]) // byLocation - .mockReturnValueOnce([ - { date: '2024-02-01', count: 8, duration: 40000 }, - { date: '2024-02-02', count: 10, duration: 50000 }, - { date: '2024-02-03', count: 7, duration: 35000 }, - ]); // byDay - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('week'); - - // Sum of daily counts should equal totalQueries - const dailySum = stats.byDay.reduce((sum, day) => sum + day.count, 0); - expect(dailySum).toBe(stats.totalQueries); - }); - - it('should sum daily durations equal to totalDuration', async () => { - mockStatement.get.mockReturnValue({ count: 20, total_duration: 100000 }); - mockStatement.all - .mockReturnValueOnce([{ agent_type: 'opencode', count: 20, duration: 100000 }]) // byAgent - .mockReturnValueOnce([{ source: 'auto', count: 20 }]) // bySource - .mockReturnValueOnce([{ is_remote: 0, count: 20 }]) // byLocation - .mockReturnValueOnce([ - { date: '2024-04-10', count: 5, duration: 25000 }, - { date: '2024-04-11', count: 8, duration: 40000 }, - { date: '2024-04-12', count: 7, duration: 35000 }, - ]); // byDay - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('week'); - - // Sum of daily durations should equal totalDuration - const dailyDurationSum = stats.byDay.reduce((sum, day) => sum + day.duration, 0); - expect(dailyDurationSum).toBe(stats.totalDuration); - }); - }); - - describe('aggregation consistency across multiple queries', () => { - it('should return consistent results when called multiple times', async () => { - mockStatement.get.mockReturnValue({ count: 50, total_duration: 250000 }); - mockStatement.all.mockReturnValue([ - { agent_type: 'claude-code', count: 50, duration: 250000 }, - ]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats1 = db.getAggregatedStats('week'); - const stats2 = db.getAggregatedStats('week'); - - expect(stats1.totalQueries).toBe(stats2.totalQueries); - expect(stats1.totalDuration).toBe(stats2.totalDuration); - expect(stats1.avgDuration).toBe(stats2.avgDuration); - }); - - it('should handle concurrent access correctly', async () => { - mockStatement.get.mockReturnValue({ count: 100, total_duration: 500000 }); - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Simulate concurrent calls - const [result1, result2, result3] = [ - db.getAggregatedStats('day'), - db.getAggregatedStats('week'), - db.getAggregatedStats('month'), - ]; - - expect(result1.totalQueries).toBe(100); - expect(result2.totalQueries).toBe(100); - expect(result3.totalQueries).toBe(100); - }); - }); - - describe('SQL query structure verification', () => { - it('should use COALESCE for totalDuration to handle NULL', async () => { - mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getAggregatedStats('week'); - - // Verify the SQL query uses COALESCE - const prepareCalls = mockDb.prepare.mock.calls; - const totalsCall = prepareCalls.find((call) => - (call[0] as string).includes('COALESCE(SUM(duration), 0)') - ); - - expect(totalsCall).toBeDefined(); - }); - - it('should GROUP BY agent_type for byAgent breakdown', async () => { - mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getAggregatedStats('month'); - - const prepareCalls = mockDb.prepare.mock.calls; - const byAgentCall = prepareCalls.find( - (call) => - (call[0] as string).includes('GROUP BY agent_type') && - (call[0] as string).includes('FROM query_events') - ); - - expect(byAgentCall).toBeDefined(); - }); - - it('should GROUP BY source for bySource breakdown', async () => { - mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getAggregatedStats('year'); - - const prepareCalls = mockDb.prepare.mock.calls; - const bySourceCall = prepareCalls.find( - (call) => - (call[0] as string).includes('GROUP BY source') && - (call[0] as string).includes('FROM query_events') - ); - - expect(bySourceCall).toBeDefined(); - }); - - it('should use date() function for daily grouping', async () => { - mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getAggregatedStats('all'); - - const prepareCalls = mockDb.prepare.mock.calls; - const byDayCall = prepareCalls.find((call) => - (call[0] as string).includes("date(start_time / 1000, 'unixepoch'") - ); - - expect(byDayCall).toBeDefined(); - }); - - it('should ORDER BY date ASC in byDay query', async () => { - mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getAggregatedStats('week'); - - const prepareCalls = mockDb.prepare.mock.calls; - const byDayCall = prepareCalls.find( - (call) => - (call[0] as string).includes('ORDER BY date ASC') || - ((call[0] as string).includes('date(start_time') && (call[0] as string).includes('ASC')) - ); - - expect(byDayCall).toBeDefined(); - }); - }); - - describe('edge case calculations', () => { - it('should handle very small average (less than 1ms)', async () => { - // 10 queries, 5ms total = 0.5ms average, should round to 1 (or 0) - mockStatement.get.mockReturnValue({ count: 10, total_duration: 5 }); - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('day'); - - // Math.round(5 / 10) = 1 - expect(stats.avgDuration).toBe(1); - }); - - it('should handle maximum JavaScript safe integer values', async () => { - const maxSafe = Number.MAX_SAFE_INTEGER; - // Use a count that divides evenly to avoid rounding issues - mockStatement.get.mockReturnValue({ count: 1, total_duration: maxSafe }); - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('all'); - - expect(stats.totalDuration).toBe(maxSafe); - expect(stats.avgDuration).toBe(maxSafe); - }); - - it('should handle mixed zero and non-zero durations in agents', async () => { - mockStatement.get.mockReturnValue({ count: 3, total_duration: 5000 }); - mockStatement.all - .mockReturnValueOnce([ - { agent_type: 'claude-code', count: 2, duration: 5000 }, - { agent_type: 'opencode', count: 1, duration: 0 }, // Zero duration - ]) - .mockReturnValueOnce([{ source: 'user', count: 3 }]) - .mockReturnValueOnce([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('week'); - - expect(stats.byAgent['claude-code'].duration).toBe(5000); - expect(stats.byAgent['opencode'].duration).toBe(0); - }); - - it('should handle dates spanning year boundaries', async () => { - mockStatement.get.mockReturnValue({ count: 2, total_duration: 10000 }); - mockStatement.all - .mockReturnValueOnce([{ agent_type: 'claude-code', count: 2, duration: 10000 }]) // byAgent - .mockReturnValueOnce([{ source: 'user', count: 2 }]) // bySource - .mockReturnValueOnce([{ is_remote: 0, count: 2 }]) // byLocation - .mockReturnValueOnce([ - { date: '2023-12-31', count: 1, duration: 5000 }, - { date: '2024-01-01', count: 1, duration: 5000 }, - ]); // byDay - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const stats = db.getAggregatedStats('week'); - - expect(stats.byDay).toHaveLength(2); - expect(stats.byDay[0].date).toBe('2023-12-31'); - expect(stats.byDay[1].date).toBe('2024-01-01'); - }); - }); -}); - -/** - * Cross-platform database path resolution tests - * - * Tests verify that the stats database file is created at the correct - * platform-appropriate path on macOS, Windows, and Linux. Electron's - * app.getPath('userData') returns: - * - * - macOS: ~/Library/Application Support/Maestro/ - * - Windows: %APPDATA%\Maestro\ (e.g., C:\Users\\AppData\Roaming\Maestro\) - * - Linux: ~/.config/Maestro/ - * - * The stats database is always created at {userData}/stats.db - */ -describe('Cross-platform database path resolution (macOS, Windows, Linux)', () => { - beforeEach(() => { - vi.clearAllMocks(); - lastDbPath = null; - mockDb.pragma.mockReturnValue([{ user_version: 0 }]); - mockDb.prepare.mockReturnValue(mockStatement); - mockStatement.run.mockReturnValue({ changes: 1 }); - mockFsExistsSync.mockReturnValue(true); - mockFsMkdirSync.mockClear(); - }); - - afterEach(() => { - vi.resetModules(); - }); - - describe('macOS path resolution', () => { - it('should use macOS-style userData path: ~/Library/Application Support/Maestro/', async () => { - // Simulate macOS userData path - const macOsUserData = '/Users/testuser/Library/Application Support/Maestro'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(macOsUserData); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - expect(lastDbPath).toBe(path.join(macOsUserData, 'stats.db')); - }); - - it('should handle macOS path with spaces in Application Support', async () => { - const macOsUserData = '/Users/testuser/Library/Application Support/Maestro'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(macOsUserData); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - const dbPath = db.getDbPath(); - expect(dbPath).toContain('Application Support'); - expect(dbPath).toContain('stats.db'); - }); - - it('should handle macOS username with special characters', async () => { - const macOsUserData = '/Users/test.user-name/Library/Application Support/Maestro'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(macOsUserData); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - expect(lastDbPath).toBe(path.join(macOsUserData, 'stats.db')); - }); - - it('should resolve to absolute path on macOS', async () => { - const macOsUserData = '/Users/testuser/Library/Application Support/Maestro'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(macOsUserData); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - expect(path.isAbsolute(db.getDbPath())).toBe(true); - }); - }); - - describe('Windows path resolution', () => { - it('should use Windows-style userData path: %APPDATA%\\Maestro\\', async () => { - // Simulate Windows userData path - const windowsUserData = 'C:\\Users\\TestUser\\AppData\\Roaming\\Maestro'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(windowsUserData); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // path.join will use the platform's native separator - expect(lastDbPath).toBe(path.join(windowsUserData, 'stats.db')); - }); - - it('should handle Windows path with drive letter', async () => { - const windowsUserData = 'D:\\Users\\TestUser\\AppData\\Roaming\\Maestro'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(windowsUserData); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - const dbPath = db.getDbPath(); - expect(dbPath).toContain('stats.db'); - // The path should start with a drive letter pattern when on Windows - // or be a proper path when joined - }); - - it('should handle Windows username with spaces', async () => { - const windowsUserData = 'C:\\Users\\Test User\\AppData\\Roaming\\Maestro'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(windowsUserData); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - expect(lastDbPath).toBe(path.join(windowsUserData, 'stats.db')); - }); - - it('should handle Windows UNC paths (network drives)', async () => { - const windowsUncPath = '\\\\NetworkDrive\\SharedFolder\\AppData\\Maestro'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(windowsUncPath); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - expect(lastDbPath).toBe(path.join(windowsUncPath, 'stats.db')); - }); - - it('should handle portable Windows installation path', async () => { - // Portable apps might use a different structure - const portablePath = 'E:\\PortableApps\\Maestro\\Data'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(portablePath); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - expect(lastDbPath).toBe(path.join(portablePath, 'stats.db')); - }); - }); - - describe('Linux path resolution', () => { - it('should use Linux-style userData path: ~/.config/Maestro/', async () => { - // Simulate Linux userData path - const linuxUserData = '/home/testuser/.config/Maestro'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(linuxUserData); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - expect(lastDbPath).toBe(path.join(linuxUserData, 'stats.db')); - }); - - it('should handle Linux XDG_CONFIG_HOME override', async () => { - // Custom XDG_CONFIG_HOME might result in different path - const customConfigHome = '/custom/config/path/Maestro'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(customConfigHome); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - expect(lastDbPath).toBe(path.join(customConfigHome, 'stats.db')); - }); - - it('should handle Linux username with underscore', async () => { - const linuxUserData = '/home/test_user/.config/Maestro'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(linuxUserData); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - expect(lastDbPath).toBe(path.join(linuxUserData, 'stats.db')); - }); - - it('should resolve to absolute path on Linux', async () => { - const linuxUserData = '/home/testuser/.config/Maestro'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(linuxUserData); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - expect(path.isAbsolute(db.getDbPath())).toBe(true); - }); - - it('should handle Linux Snap/Flatpak sandboxed paths', async () => { - // Snap packages have a different path structure - const snapPath = '/home/testuser/snap/maestro/current/.config/Maestro'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(snapPath); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - expect(lastDbPath).toBe(path.join(snapPath, 'stats.db')); - }); - }); - - describe('path.join cross-platform behavior', () => { - it('should use path.join to combine userData and stats.db', async () => { - const testUserData = '/test/user/data'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(testUserData); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - // path.join should be used (not string concatenation) - expect(db.getDbPath()).toBe(path.join(testUserData, 'stats.db')); - }); - - it('should handle trailing slash in userData path', async () => { - const userDataWithSlash = '/test/user/data/'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(userDataWithSlash); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - // path.join normalizes trailing slashes - const dbPath = db.getDbPath(); - expect(dbPath.endsWith('stats.db')).toBe(true); - // Should not have double slashes - expect(dbPath).not.toContain('//'); - }); - - it('should result in stats.db as the basename on all platforms', async () => { - const testUserData = '/any/path/structure'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(testUserData); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - expect(path.basename(db.getDbPath())).toBe('stats.db'); - }); - - it('should result in userData directory as the parent', async () => { - const testUserData = '/any/path/structure'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(testUserData); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - // Normalize expected path to use platform separators - const expectedDir = path.dirname(path.join(testUserData, 'stats.db')); - expect(path.dirname(db.getDbPath())).toBe(expectedDir); - }); - }); - - describe('directory creation cross-platform', () => { - it('should create directory on macOS if it does not exist', async () => { - mockFsExistsSync.mockReturnValue(false); - const macOsUserData = '/Users/testuser/Library/Application Support/Maestro'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(macOsUserData); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Normalize expected path to use platform separators - const expectedDir = path.dirname(path.join(macOsUserData, 'stats.db')); - expect(mockFsMkdirSync).toHaveBeenCalledWith(expectedDir, { recursive: true }); - }); - - it('should create directory on Windows if it does not exist', async () => { - mockFsExistsSync.mockReturnValue(false); - const windowsUserData = 'C:\\Users\\TestUser\\AppData\\Roaming\\Maestro'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(windowsUserData); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - expect(mockFsMkdirSync).toHaveBeenCalledWith(windowsUserData, { recursive: true }); - }); - - it('should create directory on Linux if it does not exist', async () => { - mockFsExistsSync.mockReturnValue(false); - const linuxUserData = '/home/testuser/.config/Maestro'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(linuxUserData); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Normalize expected path to use platform separators - const expectedDir = path.dirname(path.join(linuxUserData, 'stats.db')); - expect(mockFsMkdirSync).toHaveBeenCalledWith(expectedDir, { recursive: true }); - }); - - it('should use recursive option for deeply nested paths', async () => { - mockFsExistsSync.mockReturnValue(false); - const deepPath = '/very/deep/nested/path/structure/Maestro'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(deepPath); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Normalize expected path to use platform separators - const expectedDir = path.normalize(deepPath); - expect(mockFsMkdirSync).toHaveBeenCalledWith(expectedDir, { recursive: true }); - }); - }); - - describe('edge cases for path resolution', () => { - it('should handle unicode characters in path', async () => { - const unicodePath = '/Users/用户名/Library/Application Support/Maestro'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(unicodePath); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - expect(lastDbPath).toBe(path.join(unicodePath, 'stats.db')); - }); - - it('should handle emoji in path (macOS supports this)', async () => { - const emojiPath = '/Users/test/Documents/🎵Music/Maestro'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(emojiPath); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - expect(lastDbPath).toBe(path.join(emojiPath, 'stats.db')); - }); - - it('should handle very long paths (approaching Windows MAX_PATH)', async () => { - // Windows MAX_PATH is 260 characters by default - const longPath = '/very' + '/long'.repeat(50) + '/path/Maestro'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(longPath); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - const dbPath = db.getDbPath(); - expect(dbPath.endsWith('stats.db')).toBe(true); - }); - - it('should handle path with single quotes', async () => { - const quotedPath = "/Users/O'Brien/Library/Application Support/Maestro"; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(quotedPath); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - expect(lastDbPath).toBe(path.join(quotedPath, 'stats.db')); - }); - - it('should handle path with double quotes (Windows allows this)', async () => { - // Note: Double quotes aren't typically valid in Windows paths but path.join handles them - const quotedPath = 'C:\\Users\\Test"User\\AppData\\Roaming\\Maestro'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(quotedPath); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - const dbPath = db.getDbPath(); - expect(path.basename(dbPath)).toBe('stats.db'); - }); - - it('should handle path with ampersand', async () => { - const ampersandPath = '/Users/Smith & Jones/Library/Application Support/Maestro'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(ampersandPath); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - expect(lastDbPath).toBe(path.join(ampersandPath, 'stats.db')); - }); - }); - - describe('consistency across platform simulations', () => { - it('should always produce a path ending with stats.db regardless of platform', async () => { - const platforms = [ - '/Users/mac/Library/Application Support/Maestro', - 'C:\\Users\\Windows\\AppData\\Roaming\\Maestro', - '/home/linux/.config/Maestro', - ]; - - for (const platformPath of platforms) { - vi.resetModules(); - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(platformPath); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - expect(path.basename(db.getDbPath())).toBe('stats.db'); - } - }); - - it('should always initialize successfully regardless of platform path format', async () => { - const platforms = [ - '/Users/mac/Library/Application Support/Maestro', - 'C:\\Users\\Windows\\AppData\\Roaming\\Maestro', - '/home/linux/.config/Maestro', - ]; - - for (const platformPath of platforms) { - vi.resetModules(); - vi.clearAllMocks(); - mockDb.pragma.mockReturnValue([{ user_version: 0 }]); - mockDb.prepare.mockReturnValue(mockStatement); - mockFsExistsSync.mockReturnValue(true); - - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(platformPath); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - expect(db.isReady()).toBe(true); - } - }); - - it('should pass correct directory to mkdirSync on all platforms', async () => { - const platforms = [ - '/Users/mac/Library/Application Support/Maestro', - 'C:\\Users\\Windows\\AppData\\Roaming\\Maestro', - '/home/linux/.config/Maestro', - ]; - - for (const platformPath of platforms) { - vi.resetModules(); - vi.clearAllMocks(); - mockDb.pragma.mockReturnValue([{ user_version: 0 }]); - mockDb.prepare.mockReturnValue(mockStatement); - mockFsExistsSync.mockReturnValue(false); - mockFsMkdirSync.mockClear(); - - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(platformPath); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Normalize expected path to use platform separators - const expectedDir = path.normalize(platformPath); - expect(mockFsMkdirSync).toHaveBeenCalledWith(expectedDir, { recursive: true }); - } - }); - }); - - describe('electron app.getPath integration', () => { - it('should call app.getPath with "userData" argument', async () => { - const { app } = await import('electron'); - - const { StatsDB } = await import('../../main/stats-db'); - new StatsDB(); - - expect(app.getPath).toHaveBeenCalledWith('userData'); - }); - - it('should respect the value returned by app.getPath', async () => { - const customPath = '/custom/electron/user/data/path'; - const { app } = await import('electron'); - vi.mocked(app.getPath).mockReturnValue(customPath); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - expect(db.getDbPath()).toBe(path.join(customPath, 'stats.db')); - }); - - it('should use userData path at construction time (not lazily)', async () => { - const { app } = await import('electron'); - const initialPath = '/initial/path'; - vi.mocked(app.getPath).mockReturnValue(initialPath); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - // Change the mock after construction - vi.mocked(app.getPath).mockReturnValue('/different/path'); - - // Should still use the initial path - expect(db.getDbPath()).toBe(path.join(initialPath, 'stats.db')); - }); - }); -}); - -/** - * Concurrent writes and database locking tests - * - * Tests that verify concurrent write operations don't cause database locking issues. - * better-sqlite3 uses synchronous operations and WAL mode for optimal concurrent access. - * - * Key behaviors tested: - * - Rapid sequential writes complete without errors - * - Concurrent write operations all succeed (via Promise.all) - * - Interleaved read/write operations work correctly - * - High-volume concurrent writes complete without data loss - * - WAL mode is properly enabled for concurrent access - */ -describe('Concurrent writes and database locking', () => { - let writeCount: number; - let insertedIds: string[]; - - beforeEach(() => { - vi.clearAllMocks(); - lastDbPath = null; - writeCount = 0; - insertedIds = []; - - // Mock pragma to return version 1 (skip migrations for these tests) - mockDb.pragma.mockImplementation((sql: string) => { - if (sql === 'user_version') return [{ user_version: 1 }]; - if (sql === 'journal_mode') return [{ journal_mode: 'wal' }]; - if (sql === 'journal_mode = WAL') return undefined; - return undefined; - }); - - // Track each write and generate unique IDs - mockStatement.run.mockImplementation(() => { - writeCount++; - return { changes: 1 }; - }); - - mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); - mockStatement.all.mockReturnValue([]); - mockFsExistsSync.mockReturnValue(true); - }); - - afterEach(() => { - vi.resetModules(); - }); - - describe('WAL mode for concurrent access', () => { - it('should enable WAL journal mode on initialization', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - expect(mockDb.pragma).toHaveBeenCalledWith('journal_mode = WAL'); - }); - - it('should enable WAL mode before running migrations', async () => { - const pragmaCalls: string[] = []; - mockDb.pragma.mockImplementation((sql: string) => { - pragmaCalls.push(sql); - if (sql === 'user_version') return [{ user_version: 0 }]; - return undefined; - }); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // WAL mode should be set early in initialization - const walIndex = pragmaCalls.indexOf('journal_mode = WAL'); - const versionIndex = pragmaCalls.indexOf('user_version'); - expect(walIndex).toBeGreaterThan(-1); - expect(versionIndex).toBeGreaterThan(-1); - expect(walIndex).toBeLessThan(versionIndex); - }); - }); - - describe('rapid sequential writes', () => { - it('should handle 10 rapid sequential query event inserts', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Clear mocks after initialize() to count only test operations - mockStatement.run.mockClear(); - - const ids: string[] = []; - for (let i = 0; i < 10; i++) { - const id = db.insertQueryEvent({ - sessionId: `session-${i}`, - agentType: 'claude-code', - source: 'user', - startTime: Date.now() + i, - duration: 1000 + i, - projectPath: '/test/project', - tabId: `tab-${i}`, - }); - ids.push(id); - } - - expect(ids).toHaveLength(10); - // All IDs should be unique - expect(new Set(ids).size).toBe(10); - expect(mockStatement.run).toHaveBeenCalledTimes(10); - }); - - it('should handle 10 rapid sequential Auto Run session inserts', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Clear mocks after initialize() to count only test operations - mockStatement.run.mockClear(); - - const ids: string[] = []; - for (let i = 0; i < 10; i++) { - const id = db.insertAutoRunSession({ - sessionId: `session-${i}`, - agentType: 'claude-code', - documentPath: `/docs/TASK-${i}.md`, - startTime: Date.now() + i, - duration: 0, - tasksTotal: 5, - tasksCompleted: 0, - projectPath: '/test/project', - }); - ids.push(id); - } - - expect(ids).toHaveLength(10); - expect(new Set(ids).size).toBe(10); - expect(mockStatement.run).toHaveBeenCalledTimes(10); - }); - - it('should handle 10 rapid sequential task inserts', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Clear mocks after initialize() to count only test operations - mockStatement.run.mockClear(); - - const ids: string[] = []; - for (let i = 0; i < 10; i++) { - const id = db.insertAutoRunTask({ - autoRunSessionId: 'auto-run-1', - sessionId: 'session-1', - agentType: 'claude-code', - taskIndex: i, - taskContent: `Task ${i} content`, - startTime: Date.now() + i, - duration: 1000 + i, - success: i % 2 === 0, - }); - ids.push(id); - } - - expect(ids).toHaveLength(10); - expect(new Set(ids).size).toBe(10); - expect(mockStatement.run).toHaveBeenCalledTimes(10); - }); - }); - - describe('concurrent write operations', () => { - it('should handle concurrent writes to different tables via Promise.all', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Clear mocks after initialize() to count only test operations - mockStatement.run.mockClear(); - - // Simulate concurrent writes by wrapping synchronous operations in promises - const writeOperations = [ - Promise.resolve().then(() => - db.insertQueryEvent({ - sessionId: 'session-1', - agentType: 'claude-code', - source: 'user', - startTime: Date.now(), - duration: 5000, - }) - ), - Promise.resolve().then(() => - db.insertAutoRunSession({ - sessionId: 'session-2', - agentType: 'claude-code', - startTime: Date.now(), - duration: 0, - tasksTotal: 3, - }) - ), - Promise.resolve().then(() => - db.insertAutoRunTask({ - autoRunSessionId: 'auto-1', - sessionId: 'session-3', - agentType: 'claude-code', - taskIndex: 0, - startTime: Date.now(), - duration: 1000, - success: true, - }) - ), - ]; - - const results = await Promise.all(writeOperations); - - expect(results).toHaveLength(3); - expect(results.every((id) => typeof id === 'string' && id.length > 0)).toBe(true); - expect(mockStatement.run).toHaveBeenCalledTimes(3); - }); - - it('should handle 20 concurrent query event inserts via Promise.all', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Clear mocks after initialize() to count only test operations - mockStatement.run.mockClear(); - - const writeOperations = Array.from({ length: 20 }, (_, i) => - Promise.resolve().then(() => - db.insertQueryEvent({ - sessionId: `session-${i}`, - agentType: i % 2 === 0 ? 'claude-code' : 'opencode', - source: i % 3 === 0 ? 'auto' : 'user', - startTime: Date.now() + i, - duration: 1000 + i * 100, - projectPath: `/project/${i}`, - }) - ) - ); - - const results = await Promise.all(writeOperations); - - expect(results).toHaveLength(20); - expect(new Set(results).size).toBe(20); // All IDs unique - expect(mockStatement.run).toHaveBeenCalledTimes(20); - }); - - it('should handle mixed insert and update operations concurrently', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Clear mocks after initialize() to count only test operations - mockStatement.run.mockClear(); - - const operations = [ - Promise.resolve().then(() => - db.insertQueryEvent({ - sessionId: 'session-1', - agentType: 'claude-code', - source: 'user', - startTime: Date.now(), - duration: 5000, - }) - ), - Promise.resolve().then(() => - db.updateAutoRunSession('existing-session', { - duration: 60000, - tasksCompleted: 5, - }) - ), - Promise.resolve().then(() => - db.insertAutoRunTask({ - autoRunSessionId: 'auto-1', - sessionId: 'session-2', - agentType: 'claude-code', - taskIndex: 0, - startTime: Date.now(), - duration: 1000, - success: true, - }) - ), - ]; - - const results = await Promise.all(operations); - - expect(results).toHaveLength(3); - // First and third return IDs, second returns boolean - expect(typeof results[0]).toBe('string'); - expect(typeof results[1]).toBe('boolean'); - expect(typeof results[2]).toBe('string'); - expect(mockStatement.run).toHaveBeenCalledTimes(3); - }); - }); - - describe('interleaved read/write operations', () => { - it('should handle reads during writes without blocking', async () => { - mockStatement.all.mockReturnValue([ - { - id: 'event-1', - session_id: 'session-1', - agent_type: 'claude-code', - source: 'user', - start_time: Date.now(), - duration: 5000, - project_path: '/test', - tab_id: null, - }, - ]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const operations = [ - // Write - Promise.resolve().then(() => - db.insertQueryEvent({ - sessionId: 'session-new', - agentType: 'claude-code', - source: 'user', - startTime: Date.now(), - duration: 3000, - }) - ), - // Read - Promise.resolve().then(() => db.getQueryEvents('day')), - // Write - Promise.resolve().then(() => - db.insertAutoRunSession({ - sessionId: 'session-2', - agentType: 'claude-code', - startTime: Date.now(), - duration: 0, - tasksTotal: 5, - }) - ), - // Read - Promise.resolve().then(() => db.getAutoRunSessions('week')), - ]; - - const results = await Promise.all(operations); - - expect(results).toHaveLength(4); - expect(typeof results[0]).toBe('string'); // Insert ID - expect(Array.isArray(results[1])).toBe(true); // Query events array - expect(typeof results[2]).toBe('string'); // Insert ID - expect(Array.isArray(results[3])).toBe(true); // Auto run sessions array - }); - - it('should allow reads to complete while multiple writes are pending', async () => { - let readCompleted = false; - mockStatement.all.mockImplementation(() => { - readCompleted = true; - return [{ count: 42 }]; - }); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Start multiple writes - const writes = Array.from({ length: 5 }, (_, i) => - Promise.resolve().then(() => - db.insertQueryEvent({ - sessionId: `session-${i}`, - agentType: 'claude-code', - source: 'user', - startTime: Date.now() + i, - duration: 1000, - }) - ) - ); - - // Interleave a read - const read = Promise.resolve().then(() => db.getQueryEvents('day')); - - const [writeResults, readResult] = await Promise.all([Promise.all(writes), read]); - - expect(writeResults).toHaveLength(5); - expect(readCompleted).toBe(true); - }); - }); - - describe('high-volume concurrent writes', () => { - it('should handle 50 concurrent writes without data loss', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Reset counter after initialize() to count only test operations - const insertedCount = { value: 0 }; - mockStatement.run.mockImplementation(() => { - insertedCount.value++; - return { changes: 1 }; - }); - - const writeOperations = Array.from({ length: 50 }, (_, i) => - Promise.resolve().then(() => - db.insertQueryEvent({ - sessionId: `session-${i}`, - agentType: 'claude-code', - source: i % 2 === 0 ? 'user' : 'auto', - startTime: Date.now() + i, - duration: 1000 + i, - }) - ) - ); - - const results = await Promise.all(writeOperations); - - expect(results).toHaveLength(50); - expect(insertedCount.value).toBe(50); // All writes completed - expect(new Set(results).size).toBe(50); // All IDs unique - }); - - it('should handle 100 concurrent writes across all three tables', async () => { - const writesByTable = { query: 0, session: 0, task: 0 }; - - // Track which table each insert goes to based on SQL - mockDb.prepare.mockImplementation((sql: string) => { - const tracker = mockStatement; - if (sql.includes('INSERT INTO query_events')) { - tracker.run = vi.fn(() => { - writesByTable.query++; - return { changes: 1 }; - }); - } else if (sql.includes('INSERT INTO auto_run_sessions')) { - tracker.run = vi.fn(() => { - writesByTable.session++; - return { changes: 1 }; - }); - } else if (sql.includes('INSERT INTO auto_run_tasks')) { - tracker.run = vi.fn(() => { - writesByTable.task++; - return { changes: 1 }; - }); - } - return tracker; - }); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // 40 query events + 30 sessions + 30 tasks = 100 writes - const queryWrites = Array.from({ length: 40 }, (_, i) => - Promise.resolve().then(() => - db.insertQueryEvent({ - sessionId: `query-session-${i}`, - agentType: 'claude-code', - source: 'user', - startTime: Date.now() + i, - duration: 1000, - }) - ) - ); - - const sessionWrites = Array.from({ length: 30 }, (_, i) => - Promise.resolve().then(() => - db.insertAutoRunSession({ - sessionId: `autorun-session-${i}`, - agentType: 'claude-code', - startTime: Date.now() + i, - duration: 0, - tasksTotal: 5, - }) - ) - ); - - const taskWrites = Array.from({ length: 30 }, (_, i) => - Promise.resolve().then(() => - db.insertAutoRunTask({ - autoRunSessionId: `auto-${i}`, - sessionId: `task-session-${i}`, - agentType: 'claude-code', - taskIndex: i, - startTime: Date.now() + i, - duration: 1000, - success: true, - }) - ) - ); - - const allResults = await Promise.all([...queryWrites, ...sessionWrites, ...taskWrites]); - - expect(allResults).toHaveLength(100); - expect(allResults.every((id) => typeof id === 'string' && id.length > 0)).toBe(true); - expect(writesByTable.query).toBe(40); - expect(writesByTable.session).toBe(30); - expect(writesByTable.task).toBe(30); - }); - }); - - describe('unique ID generation under concurrent load', () => { - it('should generate unique IDs even with high-frequency calls', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Generate 100 IDs as fast as possible - const ids: string[] = []; - for (let i = 0; i < 100; i++) { - const id = db.insertQueryEvent({ - sessionId: 'session-1', - agentType: 'claude-code', - source: 'user', - startTime: Date.now(), - duration: 1000, - }); - ids.push(id); - } - - // All IDs must be unique - expect(new Set(ids).size).toBe(100); - }); - - it('should generate IDs with timestamp-random format', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const id = db.insertQueryEvent({ - sessionId: 'session-1', - agentType: 'claude-code', - source: 'user', - startTime: Date.now(), - duration: 1000, - }); - - // ID format: timestamp-randomString - expect(id).toMatch(/^\d+-[a-z0-9]+$/); - }); - }); - - describe('database connection stability', () => { - it('should maintain stable connection during intensive operations', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Perform many operations - for (let i = 0; i < 30; i++) { - db.insertQueryEvent({ - sessionId: `session-${i}`, - agentType: 'claude-code', - source: 'user', - startTime: Date.now() + i, - duration: 1000, - }); - } - - // Database should still be ready - expect(db.isReady()).toBe(true); - }); - - it('should handle operations after previous operations complete', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Track call count manually since we're testing sequential batches - // Set up tracking AFTER initialize() to count only test operations - let runCallCount = 0; - const trackingStatement = { - run: vi.fn(() => { - runCallCount++; - return { changes: 1 }; - }), - get: vi.fn(() => ({ count: 0, total_duration: 0 })), - all: vi.fn(() => []), - }; - mockDb.prepare.mockReturnValue(trackingStatement); - - // First batch - for (let i = 0; i < 10; i++) { - db.insertQueryEvent({ - sessionId: `batch1-${i}`, - agentType: 'claude-code', - source: 'user', - startTime: Date.now() + i, - duration: 1000, - }); - } - - // Second batch (should work without issues) - const secondBatchIds: string[] = []; - for (let i = 0; i < 10; i++) { - const id = db.insertQueryEvent({ - sessionId: `batch2-${i}`, - agentType: 'claude-code', - source: 'user', - startTime: Date.now() + 100 + i, - duration: 2000, - }); - secondBatchIds.push(id); - } - - expect(secondBatchIds).toHaveLength(10); - expect(runCallCount).toBe(20); - }); - }); -}); - -/** - * electron-rebuild verification tests - * - * These tests verify that better-sqlite3 is correctly configured to be built - * via electron-rebuild on all platforms (macOS, Windows, Linux). The native - * module must be compiled against Electron's Node.js headers to work correctly - * in the Electron runtime. - * - * Key verification points: - * 1. postinstall script is configured to run electron-rebuild - * 2. better-sqlite3 is excluded from asar packaging (must be unpacked) - * 3. Native module paths are platform-appropriate - * 4. CI/CD workflow includes architecture verification - * - * Note: These tests verify the configuration and mock the build process. - * Actual native module compilation is tested in CI/CD workflows. - */ -describe('electron-rebuild verification for better-sqlite3', () => { - describe('package.json configuration', () => { - it('should have postinstall script that runs electron-rebuild for better-sqlite3', async () => { - // Use node:fs to bypass the mock and access the real filesystem - const fs = await import('node:fs'); - const path = await import('node:path'); - - // Find package.json relative to the test file - let packageJsonPath = path.join(__dirname, '..', '..', '..', 'package.json'); - - // The package.json should exist and contain electron-rebuild for better-sqlite3 - const packageJsonContent = fs.readFileSync(packageJsonPath, 'utf8'); - const packageJson = JSON.parse(packageJsonContent); - - expect(packageJson.scripts).toBeDefined(); - expect(packageJson.scripts.postinstall).toBeDefined(); - expect(packageJson.scripts.postinstall).toContain('electron-rebuild'); - expect(packageJson.scripts.postinstall).toContain('better-sqlite3'); - }); - - it('should have better-sqlite3 in dependencies', async () => { - const fs = await import('node:fs'); - const path = await import('node:path'); - - let packageJsonPath = path.join(__dirname, '..', '..', '..', 'package.json'); - const packageJsonContent = fs.readFileSync(packageJsonPath, 'utf8'); - const packageJson = JSON.parse(packageJsonContent); - - expect(packageJson.dependencies).toBeDefined(); - expect(packageJson.dependencies['better-sqlite3']).toBeDefined(); - }); - - it('should have electron-rebuild in devDependencies', async () => { - const fs = await import('node:fs'); - const path = await import('node:path'); - - let packageJsonPath = path.join(__dirname, '..', '..', '..', 'package.json'); - const packageJsonContent = fs.readFileSync(packageJsonPath, 'utf8'); - const packageJson = JSON.parse(packageJsonContent); - - expect(packageJson.devDependencies).toBeDefined(); - expect(packageJson.devDependencies['electron-rebuild']).toBeDefined(); - }); - - it('should have @types/better-sqlite3 in devDependencies', async () => { - const fs = await import('node:fs'); - const path = await import('node:path'); - - let packageJsonPath = path.join(__dirname, '..', '..', '..', 'package.json'); - const packageJsonContent = fs.readFileSync(packageJsonPath, 'utf8'); - const packageJson = JSON.parse(packageJsonContent); - - expect(packageJson.devDependencies).toBeDefined(); - expect(packageJson.devDependencies['@types/better-sqlite3']).toBeDefined(); - }); - - it('should configure asarUnpack for better-sqlite3 (native modules must be unpacked)', async () => { - const fs = await import('node:fs'); - const path = await import('node:path'); - - let packageJsonPath = path.join(__dirname, '..', '..', '..', 'package.json'); - const packageJsonContent = fs.readFileSync(packageJsonPath, 'utf8'); - const packageJson = JSON.parse(packageJsonContent); - - // electron-builder config should unpack native modules from asar - expect(packageJson.build).toBeDefined(); - expect(packageJson.build.asarUnpack).toBeDefined(); - expect(Array.isArray(packageJson.build.asarUnpack)).toBe(true); - expect(packageJson.build.asarUnpack).toContain('node_modules/better-sqlite3/**/*'); - }); - - it('should disable npmRebuild in electron-builder (we use postinstall instead)', async () => { - const fs = await import('node:fs'); - const path = await import('node:path'); - - let packageJsonPath = path.join(__dirname, '..', '..', '..', 'package.json'); - const packageJsonContent = fs.readFileSync(packageJsonPath, 'utf8'); - const packageJson = JSON.parse(packageJsonContent); - - // npmRebuild should be false because we explicitly run electron-rebuild - // in postinstall and CI/CD workflows - expect(packageJson.build).toBeDefined(); - expect(packageJson.build.npmRebuild).toBe(false); - }); - }); - - describe('CI/CD workflow configuration', () => { - it('should have release workflow that rebuilds native modules', async () => { - const fs = await import('node:fs'); - const path = await import('node:path'); - - const workflowPath = path.join( - __dirname, - '..', - '..', - '..', - '.github', - 'workflows', - 'release.yml' - ); - const workflowContent = fs.readFileSync(workflowPath, 'utf8'); - - // Workflow should run postinstall which triggers electron-rebuild - expect(workflowContent).toContain('npm run postinstall'); - expect(workflowContent).toContain('npm_config_build_from_source'); - }); - - it('should configure builds for all target platforms', async () => { - const fs = await import('node:fs'); - const path = await import('node:path'); - - const workflowPath = path.join( - __dirname, - '..', - '..', - '..', - '.github', - 'workflows', - 'release.yml' - ); - const workflowContent = fs.readFileSync(workflowPath, 'utf8'); - - // Verify all platforms are configured - expect(workflowContent).toContain('macos-latest'); - expect(workflowContent).toContain('ubuntu-latest'); - expect(workflowContent).toContain('ubuntu-24.04-arm'); // ARM64 Linux - expect(workflowContent).toContain('windows-latest'); - }); - - it('should have architecture verification for native modules', async () => { - const fs = await import('node:fs'); - const path = await import('node:path'); - - const workflowPath = path.join( - __dirname, - '..', - '..', - '..', - '.github', - 'workflows', - 'release.yml' - ); - const workflowContent = fs.readFileSync(workflowPath, 'utf8'); - - // Workflow should verify native module architecture before packaging - expect(workflowContent).toContain('Verify'); - expect(workflowContent).toContain('electron-rebuild'); - }); - - it('should use --force flag for electron-rebuild', async () => { - const fs = await import('node:fs'); - const path = await import('node:path'); - - let packageJsonPath = path.join(__dirname, '..', '..', '..', 'package.json'); - const packageJsonContent = fs.readFileSync(packageJsonPath, 'utf8'); - const packageJson = JSON.parse(packageJsonContent); - - // The -f (force) flag ensures rebuild even if binaries exist - expect(packageJson.scripts.postinstall).toContain('-f'); - }); - }); - - describe('native module structure (macOS verification)', () => { - it('should have better-sqlite3 native binding in expected location', async () => { - const fs = await import('node:fs'); - const path = await import('node:path'); - - // Check if the native binding exists in build/Release (compiled location) - const nativeModulePath = path.join( - __dirname, - '..', - '..', - '..', - 'node_modules', - 'better-sqlite3', - 'build', - 'Release', - 'better_sqlite3.node' - ); - - // The native module should exist after electron-rebuild - // This test will pass on dev machines where npm install was run - const exists = fs.existsSync(nativeModulePath); - - // If the native module doesn't exist, check if there's a prebuilt binary - if (!exists) { - // Check for prebuilt binaries in the bin directory - const binDir = path.join( - __dirname, - '..', - '..', - '..', - 'node_modules', - 'better-sqlite3', - 'bin' - ); - - if (fs.existsSync(binDir)) { - const binContents = fs.readdirSync(binDir); - // Should have platform-specific prebuilt binaries - expect(binContents.length).toBeGreaterThan(0); - } else { - // Neither compiled nor prebuilt binary exists - fail - expect(exists).toBe(true); - } - } - }); - - it('should verify binding.gyp exists for native compilation', async () => { - const fs = await import('node:fs'); - const path = await import('node:path'); - - const bindingGypPath = path.join( - __dirname, - '..', - '..', - '..', - 'node_modules', - 'better-sqlite3', - 'binding.gyp' - ); - - // binding.gyp is required for node-gyp compilation - expect(fs.existsSync(bindingGypPath)).toBe(true); - }); - }); - - describe('platform-specific build paths', () => { - it('should verify macOS native module extension is .node', () => { - // On macOS, native modules have .node extension (Mach-O bundle) - const platform = process.platform; - if (platform === 'darwin') { - expect('.node').toBe('.node'); - } - }); - - it('should verify Windows native module extension is .node', () => { - // On Windows, native modules have .node extension (DLL) - const platform = process.platform; - if (platform === 'win32') { - expect('.node').toBe('.node'); - } - }); - - it('should verify Linux native module extension is .node', () => { - // On Linux, native modules have .node extension (shared object) - const platform = process.platform; - if (platform === 'linux') { - expect('.node').toBe('.node'); - } - }); - - it('should verify electron target is specified in postinstall', async () => { - const fs = await import('node:fs'); - const path = await import('node:path'); - - let packageJsonPath = path.join(__dirname, '..', '..', '..', 'package.json'); - const packageJsonContent = fs.readFileSync(packageJsonPath, 'utf8'); - const packageJson = JSON.parse(packageJsonContent); - - // postinstall uses electron-rebuild which automatically detects electron version - expect(packageJson.scripts.postinstall).toContain('electron-rebuild'); - // The -w flag specifies which modules to rebuild - expect(packageJson.scripts.postinstall).toContain('-w'); - }); - }); - - describe('database import verification', () => { - it('should be able to mock better-sqlite3 for testing', async () => { - // This test verifies our mock setup is correct - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - // Should be able to initialize with mocked database - expect(() => db.initialize()).not.toThrow(); - expect(db.isReady()).toBe(true); - }); - - it('should verify StatsDB uses better-sqlite3 correctly', async () => { - // Reset mocks to track this specific test - vi.clearAllMocks(); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Database should be initialized and ready - expect(db.isReady()).toBe(true); - - // Verify WAL mode is enabled for concurrent access - expect(mockDb.pragma).toHaveBeenCalled(); - }); - }); -}); - -/** - * File path normalization tests - * - * These tests verify that file paths are normalized to use forward slashes - * consistently across platforms. This ensures: - * 1. Windows-style paths (backslashes) are converted to forward slashes - * 2. Paths stored in the database are platform-independent - * 3. Filtering by project path works regardless of input path format - * 4. Cross-platform data portability is maintained - */ -describe('File path normalization in database (forward slashes consistently)', () => { - beforeEach(() => { - vi.clearAllMocks(); - lastDbPath = null; - mockDb.pragma.mockReturnValue([{ user_version: 1 }]); - mockDb.prepare.mockReturnValue(mockStatement); - mockStatement.run.mockReturnValue({ changes: 1 }); - mockStatement.all.mockReturnValue([]); - mockFsExistsSync.mockReturnValue(true); - mockFsMkdirSync.mockClear(); - }); - - afterEach(() => { - vi.resetModules(); - }); - - describe('normalizePath utility function', () => { - it('should convert Windows backslashes to forward slashes', async () => { - const { normalizePath } = await import('../../main/stats-db'); - expect(normalizePath('C:\\Users\\TestUser\\Projects\\MyApp')).toBe( - 'C:/Users/TestUser/Projects/MyApp' - ); - }); - - it('should preserve Unix-style forward slashes unchanged', async () => { - const { normalizePath } = await import('../../main/stats-db'); - expect(normalizePath('/Users/testuser/Projects/MyApp')).toBe( - '/Users/testuser/Projects/MyApp' - ); - }); - - it('should handle mixed slashes (normalize to forward slashes)', async () => { - const { normalizePath } = await import('../../main/stats-db'); - expect(normalizePath('C:\\Users/TestUser\\Projects/MyApp')).toBe( - 'C:/Users/TestUser/Projects/MyApp' - ); - }); - - it('should handle UNC paths (Windows network shares)', async () => { - const { normalizePath } = await import('../../main/stats-db'); - expect(normalizePath('\\\\NetworkServer\\Share\\Folder\\File.md')).toBe( - '//NetworkServer/Share/Folder/File.md' - ); - }); - - it('should return null for null input', async () => { - const { normalizePath } = await import('../../main/stats-db'); - expect(normalizePath(null)).toBeNull(); - }); - - it('should return null for undefined input', async () => { - const { normalizePath } = await import('../../main/stats-db'); - expect(normalizePath(undefined)).toBeNull(); - }); - - it('should handle empty string', async () => { - const { normalizePath } = await import('../../main/stats-db'); - expect(normalizePath('')).toBe(''); - }); - - it('should handle path with spaces', async () => { - const { normalizePath } = await import('../../main/stats-db'); - expect(normalizePath('C:\\Users\\Test User\\My Documents\\Project')).toBe( - 'C:/Users/Test User/My Documents/Project' - ); - }); - - it('should handle path with special characters', async () => { - const { normalizePath } = await import('../../main/stats-db'); - expect(normalizePath('C:\\Users\\test.user-name\\Projects\\[MyApp]')).toBe( - 'C:/Users/test.user-name/Projects/[MyApp]' - ); - }); - - it('should handle consecutive backslashes', async () => { - const { normalizePath } = await import('../../main/stats-db'); - expect(normalizePath('C:\\\\Users\\\\TestUser')).toBe('C://Users//TestUser'); - }); - - it('should handle path ending with backslash', async () => { - const { normalizePath } = await import('../../main/stats-db'); - expect(normalizePath('C:\\Users\\TestUser\\')).toBe('C:/Users/TestUser/'); - }); - - it('should handle Japanese/CJK characters in path', async () => { - const { normalizePath } = await import('../../main/stats-db'); - expect(normalizePath('C:\\Users\\ユーザー\\プロジェクト')).toBe( - 'C:/Users/ユーザー/プロジェクト' - ); - }); - }); - - describe('insertQueryEvent path normalization', () => { - it('should normalize Windows projectPath to forward slashes', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.insertQueryEvent({ - sessionId: 'session-1', - agentType: 'claude-code', - source: 'user', - startTime: Date.now(), - duration: 5000, - projectPath: 'C:\\Users\\TestUser\\Projects\\MyApp', - tabId: 'tab-1', - }); - - // Verify that the statement was called with normalized path - // insertQueryEvent now has 9 parameters: id, sessionId, agentType, source, startTime, duration, projectPath, tabId, isRemote - expect(mockStatement.run).toHaveBeenCalledWith( - expect.any(String), // id - 'session-1', - 'claude-code', - 'user', - expect.any(Number), // startTime - 5000, - 'C:/Users/TestUser/Projects/MyApp', // normalized path - 'tab-1', - null // isRemote (undefined → null) - ); - }); - - it('should preserve Unix projectPath unchanged', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.insertQueryEvent({ - sessionId: 'session-1', - agentType: 'claude-code', - source: 'user', - startTime: Date.now(), - duration: 5000, - projectPath: '/Users/testuser/Projects/MyApp', - tabId: 'tab-1', - }); - - // insertQueryEvent now has 9 parameters including isRemote - expect(mockStatement.run).toHaveBeenCalledWith( - expect.any(String), - 'session-1', - 'claude-code', - 'user', - expect.any(Number), - 5000, - '/Users/testuser/Projects/MyApp', // unchanged - 'tab-1', - null // isRemote (undefined → null) - ); - }); - - it('should store null for undefined projectPath', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.insertQueryEvent({ - sessionId: 'session-1', - agentType: 'claude-code', - source: 'user', - startTime: Date.now(), - duration: 5000, - // projectPath is undefined - }); - - // insertQueryEvent now has 9 parameters including isRemote - expect(mockStatement.run).toHaveBeenCalledWith( - expect.any(String), - 'session-1', - 'claude-code', - 'user', - expect.any(Number), - 5000, - null, // undefined becomes null - null, // tabId undefined → null - null // isRemote undefined → null - ); - }); - }); - - describe('getQueryEvents filter path normalization', () => { - it('should normalize Windows filter projectPath for matching', async () => { - // Setup: database returns events with normalized paths - mockStatement.all.mockReturnValue([ - { - id: 'event-1', - session_id: 'session-1', - agent_type: 'claude-code', - source: 'user', - start_time: Date.now(), - duration: 5000, - project_path: 'C:/Users/TestUser/Projects/MyApp', // normalized in DB - tab_id: 'tab-1', - }, - ]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Query with Windows-style path (backslashes) - const events = db.getQueryEvents('day', { - projectPath: 'C:\\Users\\TestUser\\Projects\\MyApp', // Windows style - }); - - // Verify the prepared statement was called with normalized path - expect(mockDb.prepare).toHaveBeenCalledWith(expect.stringContaining('project_path = ?')); - - // The filter should be normalized to forward slashes for matching - const prepareCallArgs = mockStatement.all.mock.calls[0]; - expect(prepareCallArgs).toContain('C:/Users/TestUser/Projects/MyApp'); - }); - - it('should preserve Unix filter projectPath unchanged', async () => { - mockStatement.all.mockReturnValue([]); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.getQueryEvents('week', { - projectPath: '/Users/testuser/Projects/MyApp', - }); - - const prepareCallArgs = mockStatement.all.mock.calls[0]; - expect(prepareCallArgs).toContain('/Users/testuser/Projects/MyApp'); - }); - }); - - describe('insertAutoRunSession path normalization', () => { - it('should normalize Windows documentPath and projectPath', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.insertAutoRunSession({ - sessionId: 'session-1', - agentType: 'claude-code', - documentPath: 'C:\\Users\\TestUser\\Docs\\task.md', - startTime: Date.now(), - duration: 60000, - tasksTotal: 5, - tasksCompleted: 3, - projectPath: 'C:\\Users\\TestUser\\Projects\\MyApp', - }); - - expect(mockStatement.run).toHaveBeenCalledWith( - expect.any(String), - 'session-1', - 'claude-code', - 'C:/Users/TestUser/Docs/task.md', // normalized documentPath - expect.any(Number), - 60000, - 5, - 3, - 'C:/Users/TestUser/Projects/MyApp' // normalized projectPath - ); - }); - - it('should handle null paths correctly', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.insertAutoRunSession({ - sessionId: 'session-1', - agentType: 'claude-code', - startTime: Date.now(), - duration: 60000, - // documentPath and projectPath are undefined - }); - - expect(mockStatement.run).toHaveBeenCalledWith( - expect.any(String), - 'session-1', - 'claude-code', - null, // undefined documentPath becomes null - expect.any(Number), - 60000, - null, - null, - null // undefined projectPath becomes null - ); - }); - }); - - describe('updateAutoRunSession path normalization', () => { - it('should normalize Windows documentPath on update', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.updateAutoRunSession('auto-run-1', { - duration: 120000, - documentPath: 'D:\\Projects\\NewDocs\\updated.md', - }); - - // The SQL should include document_path update with normalized path - expect(mockDb.prepare).toHaveBeenCalledWith(expect.stringContaining('document_path = ?')); - expect(mockStatement.run).toHaveBeenCalled(); - }); - - it('should handle undefined documentPath in update (no change)', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - db.updateAutoRunSession('auto-run-1', { - duration: 120000, - tasksCompleted: 5, - // documentPath not included - }); - - // The SQL should NOT include document_path - const prepareCalls = mockDb.prepare.mock.calls; - const updateCall = prepareCalls.find((call) => call[0]?.includes?.('UPDATE')); - if (updateCall) { - expect(updateCall[0]).not.toContain('document_path'); - } - }); - }); - - describe('cross-platform path consistency', () => { - it('should produce identical normalized paths from Windows and Unix inputs for same logical path', async () => { - const { normalizePath } = await import('../../main/stats-db'); - - const windowsPath = 'C:\\Users\\Test\\project'; - const unixPath = 'C:/Users/Test/project'; - - expect(normalizePath(windowsPath)).toBe(normalizePath(unixPath)); - }); - - it('should allow filtering by either path style and match stored normalized path', async () => { - // Setup: database returns events with normalized paths - const storedPath = 'C:/Users/TestUser/Projects/MyApp'; - mockStatement.all.mockReturnValue([ - { - id: 'event-1', - session_id: 'session-1', - agent_type: 'claude-code', - source: 'user', - start_time: Date.now(), - duration: 5000, - project_path: storedPath, - tab_id: 'tab-1', - }, - ]); - - const { StatsDB, normalizePath } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Both Windows and Unix style filters should normalize to the same value - const windowsFilter = 'C:\\Users\\TestUser\\Projects\\MyApp'; - const unixFilter = 'C:/Users/TestUser/Projects/MyApp'; - - expect(normalizePath(windowsFilter)).toBe(storedPath); - expect(normalizePath(unixFilter)).toBe(storedPath); - }); - - it('should handle Linux paths correctly', async () => { - const { normalizePath } = await import('../../main/stats-db'); - expect(normalizePath('/home/user/.config/maestro')).toBe('/home/user/.config/maestro'); - }); - - it('should handle macOS Application Support paths correctly', async () => { - const { normalizePath } = await import('../../main/stats-db'); - expect(normalizePath('/Users/test/Library/Application Support/Maestro')).toBe( - '/Users/test/Library/Application Support/Maestro' - ); - }); - }); - - describe('edge cases and special characters', () => { - it('should handle paths with unicode characters', async () => { - const { normalizePath } = await import('../../main/stats-db'); - expect(normalizePath('C:\\Users\\用户\\项目')).toBe('C:/Users/用户/项目'); - }); - - it('should handle paths with emoji (if supported by filesystem)', async () => { - const { normalizePath } = await import('../../main/stats-db'); - expect(normalizePath('C:\\Users\\Test\\📁Projects\\MyApp')).toBe( - 'C:/Users/Test/📁Projects/MyApp' - ); - }); - - it('should handle very long paths', async () => { - const { normalizePath } = await import('../../main/stats-db'); - const longPath = - 'C:\\Users\\TestUser\\' + 'VeryLongDirectoryName\\'.repeat(20) + 'FinalFile.md'; - const normalizedPath = normalizePath(longPath); - expect(normalizedPath).not.toContain('\\'); - expect(normalizedPath).toContain('/'); - }); - - it('should handle root paths', async () => { - const { normalizePath } = await import('../../main/stats-db'); - expect(normalizePath('C:\\')).toBe('C:/'); - expect(normalizePath('/')).toBe('/'); - }); - - it('should handle drive letter only', async () => { - const { normalizePath } = await import('../../main/stats-db'); - expect(normalizePath('D:')).toBe('D:'); - }); - - it('should handle paths with dots', async () => { - const { normalizePath } = await import('../../main/stats-db'); - expect(normalizePath('C:\\Users\\..\\TestUser\\.hidden\\file.txt')).toBe( - 'C:/Users/../TestUser/.hidden/file.txt' - ); - }); - }); -}); - -/** - * Database VACUUM functionality tests - * - * Tests for the automatic database vacuum feature that runs on startup - * when the database exceeds 100MB to maintain performance. - */ -describe('Database VACUUM functionality', () => { - beforeEach(() => { - vi.clearAllMocks(); - lastDbPath = null; - mockDb.pragma.mockReturnValue([{ user_version: 0 }]); - mockDb.prepare.mockReturnValue(mockStatement); - mockStatement.run.mockReturnValue({ changes: 1 }); - mockFsExistsSync.mockReturnValue(true); - // Reset statSync to throw by default (simulates file not existing) - mockFsStatSync.mockImplementation(() => { - throw new Error('ENOENT: no such file or directory'); - }); - }); - - afterEach(() => { - vi.resetModules(); - }); - - describe('getDatabaseSize', () => { - it('should return 0 when statSync throws (file missing)', async () => { - // The mock fs.statSync is not configured to return size by default - // so getDatabaseSize will catch the error and return 0 - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Since mockFsExistsSync.mockReturnValue(true) is set but statSync is not mocked, - // getDatabaseSize will try to call the real statSync on a non-existent path - // and catch the error, returning 0 - const size = db.getDatabaseSize(); - - // The mock environment doesn't have actual file, so expect 0 - expect(size).toBe(0); - }); - - it('should handle statSync gracefully when file does not exist', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // getDatabaseSize should not throw - expect(() => db.getDatabaseSize()).not.toThrow(); - }); - }); - - describe('vacuum', () => { - it('should execute VACUUM SQL command', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Clear mocks from initialization - mockStatement.run.mockClear(); - mockDb.prepare.mockClear(); - - const result = db.vacuum(); - - expect(result.success).toBe(true); - expect(mockDb.prepare).toHaveBeenCalledWith('VACUUM'); - expect(mockStatement.run).toHaveBeenCalled(); - }); - - it('should return success true when vacuum completes', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const result = db.vacuum(); - - expect(result.success).toBe(true); - expect(result.error).toBeUndefined(); - }); - - it('should return bytesFreed of 0 when sizes are equal (mocked)', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const result = db.vacuum(); - - // With mock fs, both before and after sizes will be 0 - expect(result.bytesFreed).toBe(0); - }); - - it('should return error if database not initialized', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - // Don't initialize - - const result = db.vacuum(); - - expect(result.success).toBe(false); - expect(result.bytesFreed).toBe(0); - expect(result.error).toBe('Database not initialized'); - }); - - it('should handle VACUUM failure gracefully', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Make VACUUM fail - mockDb.prepare.mockImplementation((sql: string) => { - if (sql === 'VACUUM') { - return { - run: vi.fn().mockImplementation(() => { - throw new Error('database is locked'); - }), - }; - } - return mockStatement; - }); - - const result = db.vacuum(); - - expect(result.success).toBe(false); - expect(result.error).toContain('database is locked'); - }); - - it('should log vacuum progress with size information', async () => { - const { logger } = await import('../../main/utils/logger'); - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Clear logger mocks from initialization - vi.mocked(logger.info).mockClear(); - - db.vacuum(); - - // Check that logger was called with vacuum-related messages - expect(logger.info).toHaveBeenCalledWith( - expect.stringContaining('Starting VACUUM'), - expect.any(String) - ); - expect(logger.info).toHaveBeenCalledWith( - expect.stringContaining('VACUUM completed'), - expect.any(String) - ); - }); - }); - - describe('vacuumIfNeeded', () => { - it('should skip vacuum if database size is 0 (below threshold)', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Clear mocks from initialization - mockStatement.run.mockClear(); - mockDb.prepare.mockClear(); - - const result = db.vacuumIfNeeded(); - - // Size is 0 (mock fs), which is below 100MB threshold - expect(result.vacuumed).toBe(false); - expect(result.databaseSize).toBe(0); - expect(result.result).toBeUndefined(); - }); - - it('should return correct databaseSize in result', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const result = db.vacuumIfNeeded(); - - // Size property should be present - expect(typeof result.databaseSize).toBe('number'); - }); - - it('should use default 100MB threshold when not specified', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // With 0 byte size (mocked), should skip vacuum - const result = db.vacuumIfNeeded(); - - expect(result.vacuumed).toBe(false); - }); - - it('should not vacuum with threshold 0 and size 0 since 0 is not > 0', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Clear mocks from initialization - mockStatement.run.mockClear(); - mockDb.prepare.mockClear(); - - // With 0 threshold and 0 byte file: 0 is NOT greater than 0 - const result = db.vacuumIfNeeded(0); - - // The condition is: databaseSize < thresholdBytes - // 0 < 0 is false, so vacuumed should be true (it tries to vacuum) - expect(result.databaseSize).toBe(0); - // Since 0 is NOT less than 0, it proceeds to vacuum - expect(result.vacuumed).toBe(true); - }); - - it('should log appropriate message when skipping vacuum', async () => { - const { logger } = await import('../../main/utils/logger'); - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Clear logger mocks from initialization - vi.mocked(logger.debug).mockClear(); - - db.vacuumIfNeeded(); - - expect(logger.debug).toHaveBeenCalledWith( - expect.stringContaining('below vacuum threshold'), - expect.any(String) - ); - }); - }); - - describe('vacuumIfNeeded with custom thresholds', () => { - it('should respect custom threshold parameter (threshold = -1 means always vacuum)', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Clear mocks from initialization - mockStatement.run.mockClear(); - mockDb.prepare.mockClear(); - - // With -1 threshold, 0 > -1 is true, so should vacuum - const result = db.vacuumIfNeeded(-1); - - expect(result.vacuumed).toBe(true); - expect(mockDb.prepare).toHaveBeenCalledWith('VACUUM'); - }); - - it('should not vacuum with very large threshold', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Clear mocks from initialization - mockStatement.run.mockClear(); - mockDb.prepare.mockClear(); - - // With 1TB threshold, should NOT trigger vacuum - const result = db.vacuumIfNeeded(1024 * 1024 * 1024 * 1024); - - expect(result.vacuumed).toBe(false); - expect(mockDb.prepare).not.toHaveBeenCalledWith('VACUUM'); - }); - }); - - describe('initialize with vacuumIfNeeded integration', () => { - it('should call vacuumIfNeededWeekly during initialization', async () => { - const { logger } = await import('../../main/utils/logger'); - - // Clear logger mocks before test - vi.mocked(logger.debug).mockClear(); - - // Mock timestamp file as old (0 = epoch, triggers vacuum check) - mockFsReadFileSync.mockReturnValue('0'); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - db.initialize(); - - // With old timestamp, vacuumIfNeededWeekly should proceed to call vacuumIfNeeded - // which logs "below vacuum threshold" for small databases (mocked as 1024 bytes) - expect(logger.debug).toHaveBeenCalledWith( - expect.stringContaining('below vacuum threshold'), - expect.any(String) - ); - }); - - it('should complete initialization even if vacuum would fail', async () => { - // Make VACUUM fail if called - mockDb.prepare.mockImplementation((sql: string) => { - if (sql === 'VACUUM') { - return { - run: vi.fn().mockImplementation(() => { - throw new Error('VACUUM failed: database is locked'); - }), - }; - } - return mockStatement; - }); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - // Initialize should not throw (vacuum is skipped due to 0 size anyway) - expect(() => db.initialize()).not.toThrow(); - - // Database should still be ready - expect(db.isReady()).toBe(true); - }); - - it('should not block initialization for small databases', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - - // Time the initialization (should be fast for mock) - const start = Date.now(); - db.initialize(); - const elapsed = Date.now() - start; - - expect(db.isReady()).toBe(true); - expect(elapsed).toBeLessThan(1000); // Should be fast in mock environment - }); - }); - - describe('vacuum return types', () => { - it('vacuum should return object with success, bytesFreed, and optional error', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const result = db.vacuum(); - - expect(typeof result.success).toBe('boolean'); - expect(typeof result.bytesFreed).toBe('number'); - expect(result.error === undefined || typeof result.error === 'string').toBe(true); - }); - - it('vacuumIfNeeded should return object with vacuumed, databaseSize, and optional result', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const result = db.vacuumIfNeeded(); - - expect(typeof result.vacuumed).toBe('boolean'); - expect(typeof result.databaseSize).toBe('number'); - expect(result.result === undefined || typeof result.result === 'object').toBe(true); - }); - - it('vacuumIfNeeded should include result when vacuum is performed', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Use -1 threshold to force vacuum - const result = db.vacuumIfNeeded(-1); - - expect(result.vacuumed).toBe(true); - expect(result.result).toBeDefined(); - expect(result.result?.success).toBe(true); - }); - }); - - describe('clearOldData method', () => { - beforeEach(() => { - vi.clearAllMocks(); - vi.resetModules(); - }); - - it('should return error when database is not initialized', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - // Don't initialize - - const result = db.clearOldData(30); - - expect(result.success).toBe(false); - expect(result.deletedQueryEvents).toBe(0); - expect(result.deletedAutoRunSessions).toBe(0); - expect(result.deletedAutoRunTasks).toBe(0); - expect(result.error).toBe('Database not initialized'); - }); - - it('should return error when olderThanDays is 0 or negative', async () => { - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const resultZero = db.clearOldData(0); - expect(resultZero.success).toBe(false); - expect(resultZero.error).toBe('olderThanDays must be greater than 0'); - - const resultNegative = db.clearOldData(-10); - expect(resultNegative.success).toBe(false); - expect(resultNegative.error).toBe('olderThanDays must be greater than 0'); - }); - - it('should successfully clear old data with valid parameters', async () => { - // Mock prepare to return statements with expected behavior - mockStatement.all.mockReturnValue([{ id: 'session-1' }, { id: 'session-2' }]); - mockStatement.run.mockReturnValue({ changes: 5 }); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const result = db.clearOldData(30); - - expect(result.success).toBe(true); - expect(result.deletedQueryEvents).toBe(5); - expect(result.deletedAutoRunSessions).toBe(5); - expect(result.deletedAutoRunTasks).toBe(5); - expect(result.error).toBeUndefined(); - }); - - it('should handle empty results (no old data)', async () => { - mockStatement.all.mockReturnValue([]); - mockStatement.run.mockReturnValue({ changes: 0 }); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const result = db.clearOldData(365); - - expect(result.success).toBe(true); - expect(result.deletedQueryEvents).toBe(0); - expect(result.deletedAutoRunSessions).toBe(0); - expect(result.deletedAutoRunTasks).toBe(0); - expect(result.error).toBeUndefined(); - }); - - it('should calculate correct cutoff time based on days', async () => { - let capturedCutoffTime: number | null = null; - - mockDb.prepare.mockImplementation((sql: string) => { - return { - run: vi.fn((cutoff: number) => { - if (sql.includes('DELETE FROM query_events')) { - capturedCutoffTime = cutoff; - } - return { changes: 0 }; - }), - get: mockStatement.get, - all: vi.fn(() => []), - }; - }); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const beforeCall = Date.now(); - db.clearOldData(7); - const afterCall = Date.now(); - - // Cutoff should be approximately 7 days ago - const expectedCutoff = beforeCall - 7 * 24 * 60 * 60 * 1000; - expect(capturedCutoffTime).not.toBeNull(); - expect(capturedCutoffTime!).toBeGreaterThanOrEqual(expectedCutoff - 1000); - expect(capturedCutoffTime!).toBeLessThanOrEqual(afterCall - 7 * 24 * 60 * 60 * 1000 + 1000); - }); - - it('should handle database errors gracefully', async () => { - mockDb.prepare.mockImplementation((sql: string) => { - if (sql.includes('DELETE FROM query_events')) { - return { - run: vi.fn(() => { - throw new Error('Database locked'); - }), - }; - } - return mockStatement; - }); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - const result = db.clearOldData(30); - - expect(result.success).toBe(false); - expect(result.error).toBe('Database locked'); - expect(result.deletedQueryEvents).toBe(0); - expect(result.deletedAutoRunSessions).toBe(0); - expect(result.deletedAutoRunTasks).toBe(0); - }); - - it('should support various time periods', async () => { - mockStatement.all.mockReturnValue([]); - mockStatement.run.mockReturnValue({ changes: 0 }); - - const { StatsDB } = await import('../../main/stats-db'); - const db = new StatsDB(); - db.initialize(); - - // Test common time periods from Settings UI - const periods = [7, 30, 90, 180, 365]; - for (const days of periods) { - const result = db.clearOldData(days); - expect(result.success).toBe(true); - } - }); - }); - - // ===================================================================== -}); diff --git a/src/__tests__/main/stats/aggregations.test.ts b/src/__tests__/main/stats/aggregations.test.ts new file mode 100644 index 00000000..875cf671 --- /dev/null +++ b/src/__tests__/main/stats/aggregations.test.ts @@ -0,0 +1,1385 @@ +/** + * Tests for time range filtering and aggregation calculations. + * + * Note: better-sqlite3 is a native module compiled for Electron's Node version. + * Direct testing with the native module in vitest is not possible without + * electron-rebuild for the vitest runtime. These tests use mocked database + * operations to verify the logic without requiring the actual native module. + * + * For full integration testing of the SQLite database, use the Electron test + * environment (e2e tests) where the native module is properly loaded. + */ + +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import * as path from 'path'; +import * as os from 'os'; + +// Track Database constructor calls to verify file path +let lastDbPath: string | null = null; + +// Store mock references so they can be accessed in tests +const mockStatement = { + run: vi.fn(() => ({ changes: 1 })), + get: vi.fn(() => ({ count: 0, total_duration: 0 })), + all: vi.fn(() => []), +}; + +const mockDb = { + pragma: vi.fn(() => [{ user_version: 0 }]), + prepare: vi.fn(() => mockStatement), + close: vi.fn(), + // Transaction mock that immediately executes the function + transaction: vi.fn((fn: () => void) => { + return () => fn(); + }), +}; + +// Mock better-sqlite3 as a class +vi.mock('better-sqlite3', () => { + return { + default: class MockDatabase { + constructor(dbPath: string) { + lastDbPath = dbPath; + } + pragma = mockDb.pragma; + prepare = mockDb.prepare; + close = mockDb.close; + transaction = mockDb.transaction; + }, + }; +}); + +// Mock electron's app module with trackable userData path +const mockUserDataPath = path.join(os.tmpdir(), 'maestro-test-stats-db'); +vi.mock('electron', () => ({ + app: { + getPath: vi.fn((name: string) => { + if (name === 'userData') return mockUserDataPath; + return os.tmpdir(); + }), + }, +})); + +// Track fs calls +const mockFsExistsSync = vi.fn(() => true); +const mockFsMkdirSync = vi.fn(); +const mockFsCopyFileSync = vi.fn(); +const mockFsUnlinkSync = vi.fn(); +const mockFsRenameSync = vi.fn(); +const mockFsStatSync = vi.fn(() => ({ size: 1024 })); +const mockFsReadFileSync = vi.fn(() => '0'); // Default: old timestamp (triggers vacuum check) +const mockFsWriteFileSync = vi.fn(); + +// Mock fs +vi.mock('fs', () => ({ + existsSync: (...args: unknown[]) => mockFsExistsSync(...args), + mkdirSync: (...args: unknown[]) => mockFsMkdirSync(...args), + copyFileSync: (...args: unknown[]) => mockFsCopyFileSync(...args), + unlinkSync: (...args: unknown[]) => mockFsUnlinkSync(...args), + renameSync: (...args: unknown[]) => mockFsRenameSync(...args), + statSync: (...args: unknown[]) => mockFsStatSync(...args), + readFileSync: (...args: unknown[]) => mockFsReadFileSync(...args), + writeFileSync: (...args: unknown[]) => mockFsWriteFileSync(...args), +})); + +// Mock logger +vi.mock('../../../main/utils/logger', () => ({ + logger: { + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + debug: vi.fn(), + }, +})); + +// Import types only - we'll test the type definitions +import type { + QueryEvent, + AutoRunSession, + AutoRunTask, + SessionLifecycleEvent, + StatsTimeRange, + StatsFilters, + StatsAggregation, +} from '../../../shared/stats-types'; + +describe('Time-range filtering works correctly for all ranges', () => { + beforeEach(() => { + vi.clearAllMocks(); + mockDb.pragma.mockReturnValue([{ user_version: 1 }]); + mockDb.prepare.mockReturnValue(mockStatement); + mockStatement.run.mockReturnValue({ changes: 1 }); + mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); + mockStatement.all.mockReturnValue([]); + mockFsExistsSync.mockReturnValue(true); + }); + + afterEach(() => { + vi.resetModules(); + }); + + describe('getQueryEvents time range calculations', () => { + it('should filter by "day" range (last 24 hours)', async () => { + const now = Date.now(); + const oneDayMs = 24 * 60 * 60 * 1000; + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getQueryEvents('day'); + + // Verify the start_time parameter is approximately 24 hours ago + const allCalls = mockStatement.all.mock.calls; + expect(allCalls.length).toBeGreaterThan(0); + + const lastCall = allCalls[allCalls.length - 1]; + const startTimeParam = lastCall[0] as number; + + // The start time should be approximately now - 24 hours (within a few seconds tolerance) + expect(startTimeParam).toBeGreaterThanOrEqual(now - oneDayMs - 5000); + expect(startTimeParam).toBeLessThanOrEqual(now - oneDayMs + 5000); + }); + + it('should filter by "week" range (last 7 days)', async () => { + const now = Date.now(); + const oneWeekMs = 7 * 24 * 60 * 60 * 1000; + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getQueryEvents('week'); + + const allCalls = mockStatement.all.mock.calls; + expect(allCalls.length).toBeGreaterThan(0); + + const lastCall = allCalls[allCalls.length - 1]; + const startTimeParam = lastCall[0] as number; + + // The start time should be approximately now - 7 days (within a few seconds tolerance) + expect(startTimeParam).toBeGreaterThanOrEqual(now - oneWeekMs - 5000); + expect(startTimeParam).toBeLessThanOrEqual(now - oneWeekMs + 5000); + }); + + it('should filter by "month" range (last 30 days)', async () => { + const now = Date.now(); + const oneMonthMs = 30 * 24 * 60 * 60 * 1000; + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getQueryEvents('month'); + + const allCalls = mockStatement.all.mock.calls; + expect(allCalls.length).toBeGreaterThan(0); + + const lastCall = allCalls[allCalls.length - 1]; + const startTimeParam = lastCall[0] as number; + + // The start time should be approximately now - 30 days (within a few seconds tolerance) + expect(startTimeParam).toBeGreaterThanOrEqual(now - oneMonthMs - 5000); + expect(startTimeParam).toBeLessThanOrEqual(now - oneMonthMs + 5000); + }); + + it('should filter by "year" range (last 365 days)', async () => { + const now = Date.now(); + const oneYearMs = 365 * 24 * 60 * 60 * 1000; + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getQueryEvents('year'); + + const allCalls = mockStatement.all.mock.calls; + expect(allCalls.length).toBeGreaterThan(0); + + const lastCall = allCalls[allCalls.length - 1]; + const startTimeParam = lastCall[0] as number; + + // The start time should be approximately now - 365 days (within a few seconds tolerance) + expect(startTimeParam).toBeGreaterThanOrEqual(now - oneYearMs - 5000); + expect(startTimeParam).toBeLessThanOrEqual(now - oneYearMs + 5000); + }); + + it('should filter by "all" range (from epoch/timestamp 0)', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getQueryEvents('all'); + + const allCalls = mockStatement.all.mock.calls; + expect(allCalls.length).toBeGreaterThan(0); + + const lastCall = allCalls[allCalls.length - 1]; + const startTimeParam = lastCall[0] as number; + + // For 'all' range, start time should be 0 (epoch) + expect(startTimeParam).toBe(0); + }); + }); + + describe('getAutoRunSessions time range calculations', () => { + it('should filter Auto Run sessions by "day" range', async () => { + const now = Date.now(); + const oneDayMs = 24 * 60 * 60 * 1000; + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getAutoRunSessions('day'); + + const allCalls = mockStatement.all.mock.calls; + expect(allCalls.length).toBeGreaterThan(0); + + const lastCall = allCalls[allCalls.length - 1]; + const startTimeParam = lastCall[0] as number; + + expect(startTimeParam).toBeGreaterThanOrEqual(now - oneDayMs - 5000); + expect(startTimeParam).toBeLessThanOrEqual(now - oneDayMs + 5000); + }); + + it('should filter Auto Run sessions by "week" range', async () => { + const now = Date.now(); + const oneWeekMs = 7 * 24 * 60 * 60 * 1000; + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getAutoRunSessions('week'); + + const allCalls = mockStatement.all.mock.calls; + expect(allCalls.length).toBeGreaterThan(0); + + const lastCall = allCalls[allCalls.length - 1]; + const startTimeParam = lastCall[0] as number; + + expect(startTimeParam).toBeGreaterThanOrEqual(now - oneWeekMs - 5000); + expect(startTimeParam).toBeLessThanOrEqual(now - oneWeekMs + 5000); + }); + + it('should filter Auto Run sessions by "month" range', async () => { + const now = Date.now(); + const oneMonthMs = 30 * 24 * 60 * 60 * 1000; + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getAutoRunSessions('month'); + + const allCalls = mockStatement.all.mock.calls; + expect(allCalls.length).toBeGreaterThan(0); + + const lastCall = allCalls[allCalls.length - 1]; + const startTimeParam = lastCall[0] as number; + + expect(startTimeParam).toBeGreaterThanOrEqual(now - oneMonthMs - 5000); + expect(startTimeParam).toBeLessThanOrEqual(now - oneMonthMs + 5000); + }); + + it('should filter Auto Run sessions by "year" range', async () => { + const now = Date.now(); + const oneYearMs = 365 * 24 * 60 * 60 * 1000; + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getAutoRunSessions('year'); + + const allCalls = mockStatement.all.mock.calls; + expect(allCalls.length).toBeGreaterThan(0); + + const lastCall = allCalls[allCalls.length - 1]; + const startTimeParam = lastCall[0] as number; + + expect(startTimeParam).toBeGreaterThanOrEqual(now - oneYearMs - 5000); + expect(startTimeParam).toBeLessThanOrEqual(now - oneYearMs + 5000); + }); + + it('should filter Auto Run sessions by "all" range', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getAutoRunSessions('all'); + + const allCalls = mockStatement.all.mock.calls; + expect(allCalls.length).toBeGreaterThan(0); + + const lastCall = allCalls[allCalls.length - 1]; + const startTimeParam = lastCall[0] as number; + + expect(startTimeParam).toBe(0); + }); + }); + + describe('getAggregatedStats time range calculations', () => { + it('should aggregate stats for "day" range', async () => { + const now = Date.now(); + const oneDayMs = 24 * 60 * 60 * 1000; + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + mockStatement.get.mockClear(); + + db.getAggregatedStats('day'); + + // getAggregatedStats calls multiple queries, verify the totals query used correct time range + const getCalls = mockStatement.get.mock.calls; + expect(getCalls.length).toBeGreaterThan(0); + + const firstCall = getCalls[0]; + const startTimeParam = firstCall[0] as number; + + expect(startTimeParam).toBeGreaterThanOrEqual(now - oneDayMs - 5000); + expect(startTimeParam).toBeLessThanOrEqual(now - oneDayMs + 5000); + }); + + it('should aggregate stats for "week" range', async () => { + const now = Date.now(); + const oneWeekMs = 7 * 24 * 60 * 60 * 1000; + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + mockStatement.get.mockClear(); + + db.getAggregatedStats('week'); + + const getCalls = mockStatement.get.mock.calls; + expect(getCalls.length).toBeGreaterThan(0); + + const firstCall = getCalls[0]; + const startTimeParam = firstCall[0] as number; + + expect(startTimeParam).toBeGreaterThanOrEqual(now - oneWeekMs - 5000); + expect(startTimeParam).toBeLessThanOrEqual(now - oneWeekMs + 5000); + }); + + it('should aggregate stats for "month" range', async () => { + const now = Date.now(); + const oneMonthMs = 30 * 24 * 60 * 60 * 1000; + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + mockStatement.get.mockClear(); + + db.getAggregatedStats('month'); + + const getCalls = mockStatement.get.mock.calls; + expect(getCalls.length).toBeGreaterThan(0); + + const firstCall = getCalls[0]; + const startTimeParam = firstCall[0] as number; + + expect(startTimeParam).toBeGreaterThanOrEqual(now - oneMonthMs - 5000); + expect(startTimeParam).toBeLessThanOrEqual(now - oneMonthMs + 5000); + }); + + it('should aggregate stats for "year" range', async () => { + const now = Date.now(); + const oneYearMs = 365 * 24 * 60 * 60 * 1000; + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + mockStatement.get.mockClear(); + + db.getAggregatedStats('year'); + + const getCalls = mockStatement.get.mock.calls; + expect(getCalls.length).toBeGreaterThan(0); + + const firstCall = getCalls[0]; + const startTimeParam = firstCall[0] as number; + + expect(startTimeParam).toBeGreaterThanOrEqual(now - oneYearMs - 5000); + expect(startTimeParam).toBeLessThanOrEqual(now - oneYearMs + 5000); + }); + + it('should aggregate stats for "all" range', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + mockStatement.get.mockClear(); + + db.getAggregatedStats('all'); + + const getCalls = mockStatement.get.mock.calls; + expect(getCalls.length).toBeGreaterThan(0); + + const firstCall = getCalls[0]; + const startTimeParam = firstCall[0] as number; + + expect(startTimeParam).toBe(0); + }); + }); + + describe('exportToCsv time range calculations', () => { + it('should export CSV for "day" range only', async () => { + const now = Date.now(); + const oneDayMs = 24 * 60 * 60 * 1000; + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.exportToCsv('day'); + + const allCalls = mockStatement.all.mock.calls; + expect(allCalls.length).toBeGreaterThan(0); + + const lastCall = allCalls[allCalls.length - 1]; + const startTimeParam = lastCall[0] as number; + + expect(startTimeParam).toBeGreaterThanOrEqual(now - oneDayMs - 5000); + expect(startTimeParam).toBeLessThanOrEqual(now - oneDayMs + 5000); + }); + + it('should export CSV for "all" range', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.exportToCsv('all'); + + const allCalls = mockStatement.all.mock.calls; + expect(allCalls.length).toBeGreaterThan(0); + + const lastCall = allCalls[allCalls.length - 1]; + const startTimeParam = lastCall[0] as number; + + expect(startTimeParam).toBe(0); + }); + }); + + describe('SQL query structure verification', () => { + it('should include start_time >= ? in getQueryEvents SQL', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getQueryEvents('week'); + + const prepareCalls = mockDb.prepare.mock.calls; + const selectCall = prepareCalls.find((call) => + (call[0] as string).includes('SELECT * FROM query_events') + ); + + expect(selectCall).toBeDefined(); + expect(selectCall![0]).toContain('start_time >= ?'); + }); + + it('should include start_time >= ? in getAutoRunSessions SQL', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getAutoRunSessions('month'); + + const prepareCalls = mockDb.prepare.mock.calls; + const selectCall = prepareCalls.find((call) => + (call[0] as string).includes('SELECT * FROM auto_run_sessions') + ); + + expect(selectCall).toBeDefined(); + expect(selectCall![0]).toContain('start_time >= ?'); + }); + + it('should include start_time >= ? in aggregation queries', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getAggregatedStats('year'); + + const prepareCalls = mockDb.prepare.mock.calls; + + // Verify the totals query includes the filter + const totalsCall = prepareCalls.find( + (call) => + (call[0] as string).includes('COUNT(*)') && (call[0] as string).includes('SUM(duration)') + ); + expect(totalsCall).toBeDefined(); + expect(totalsCall![0]).toContain('WHERE start_time >= ?'); + + // Verify the byAgent query includes the filter + const byAgentCall = prepareCalls.find((call) => + (call[0] as string).includes('GROUP BY agent_type') + ); + expect(byAgentCall).toBeDefined(); + expect(byAgentCall![0]).toContain('WHERE start_time >= ?'); + + // Verify the bySource query includes the filter + const bySourceCall = prepareCalls.find((call) => + (call[0] as string).includes('GROUP BY source') + ); + expect(bySourceCall).toBeDefined(); + expect(bySourceCall![0]).toContain('WHERE start_time >= ?'); + + // Verify the byDay query includes the filter + const byDayCall = prepareCalls.find((call) => (call[0] as string).includes('GROUP BY date(')); + expect(byDayCall).toBeDefined(); + expect(byDayCall![0]).toContain('WHERE start_time >= ?'); + }); + }); + + describe('time range boundary behavior', () => { + it('should include events exactly at the range boundary', async () => { + const now = Date.now(); + const oneDayMs = 24 * 60 * 60 * 1000; + const boundaryTime = now - oneDayMs; + + // Mock event exactly at the boundary + mockStatement.all.mockReturnValue([ + { + id: 'boundary-event', + session_id: 'session-1', + agent_type: 'claude-code', + source: 'user', + start_time: boundaryTime, + duration: 1000, + project_path: null, + tab_id: null, + }, + ]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const events = db.getQueryEvents('day'); + + // Event at the boundary should be included (start_time >= boundary) + expect(events).toHaveLength(1); + expect(events[0].id).toBe('boundary-event'); + }); + + it('should exclude events before the range boundary', async () => { + // The actual filtering happens in the SQL query via WHERE clause + // We verify this by checking the SQL structure + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getQueryEvents('day'); + + const prepareCalls = mockDb.prepare.mock.calls; + const selectCall = prepareCalls.find((call) => + (call[0] as string).includes('SELECT * FROM query_events') + ); + + // Verify it uses >= (greater than or equal), not just > (greater than) + expect(selectCall![0]).toContain('start_time >= ?'); + }); + + it('should return consistent results for multiple calls with same range', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Call twice in quick succession + db.getQueryEvents('week'); + db.getQueryEvents('week'); + + const allCalls = mockStatement.all.mock.calls; + expect(allCalls.length).toBe(2); + + // Both calls should have very close (within a few ms) start times + const firstStartTime = allCalls[0][0] as number; + const secondStartTime = allCalls[1][0] as number; + + // Difference should be minimal (test executes quickly) + expect(Math.abs(secondStartTime - firstStartTime)).toBeLessThan(1000); + }); + }); + + describe('combined filters with time range', () => { + it('should combine time range with agentType filter', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getQueryEvents('week', { agentType: 'claude-code' }); + + const allCalls = mockStatement.all.mock.calls; + expect(allCalls.length).toBeGreaterThan(0); + + const lastCall = allCalls[allCalls.length - 1]; + // Should have 2 parameters: start_time and agentType + expect(lastCall).toHaveLength(2); + expect(lastCall[1]).toBe('claude-code'); + }); + + it('should combine time range with source filter', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getQueryEvents('month', { source: 'auto' }); + + const allCalls = mockStatement.all.mock.calls; + expect(allCalls.length).toBeGreaterThan(0); + + const lastCall = allCalls[allCalls.length - 1]; + // Should have 2 parameters: start_time and source + expect(lastCall).toHaveLength(2); + expect(lastCall[1]).toBe('auto'); + }); + + it('should combine time range with multiple filters', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getQueryEvents('year', { + agentType: 'opencode', + source: 'user', + projectPath: '/test/path', + sessionId: 'session-123', + }); + + const allCalls = mockStatement.all.mock.calls; + expect(allCalls.length).toBeGreaterThan(0); + + const lastCall = allCalls[allCalls.length - 1]; + // Should have 5 parameters: start_time + 4 filters + expect(lastCall).toHaveLength(5); + expect(lastCall[1]).toBe('opencode'); + expect(lastCall[2]).toBe('user'); + expect(lastCall[3]).toBe('/test/path'); + expect(lastCall[4]).toBe('session-123'); + }); + }); +}); + +/** + * Comprehensive tests for aggregation query calculations + * + * These tests verify that the getAggregatedStats method returns correct calculations: + * - Total queries count + * - Total duration sum + * - Average duration calculation + * - Breakdown by agent type (count and duration) + * - Breakdown by source (user vs auto) + * - Daily breakdown for charts + */ +describe('Aggregation queries return correct calculations', () => { + beforeEach(() => { + vi.clearAllMocks(); + mockDb.pragma.mockReturnValue([{ user_version: 1 }]); + mockDb.prepare.mockReturnValue(mockStatement); + mockStatement.run.mockReturnValue({ changes: 1 }); + mockFsExistsSync.mockReturnValue(true); + }); + + afterEach(() => { + vi.resetModules(); + }); + + describe('totalQueries and totalDuration calculations', () => { + it('should return correct totalQueries count from database', async () => { + // Mock the totals query result + mockStatement.get.mockReturnValue({ count: 42, total_duration: 126000 }); + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('week'); + + expect(stats.totalQueries).toBe(42); + }); + + it('should return correct totalDuration sum from database', async () => { + mockStatement.get.mockReturnValue({ count: 10, total_duration: 50000 }); + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('month'); + + expect(stats.totalDuration).toBe(50000); + }); + + it('should handle zero queries correctly', async () => { + mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('day'); + + expect(stats.totalQueries).toBe(0); + expect(stats.totalDuration).toBe(0); + }); + + it('should handle large query counts correctly', async () => { + mockStatement.get.mockReturnValue({ count: 10000, total_duration: 5000000 }); + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('year'); + + expect(stats.totalQueries).toBe(10000); + expect(stats.totalDuration).toBe(5000000); + }); + + it('should handle very large durations correctly', async () => { + // 1 day of continuous usage = 86400000ms + const largeDuration = 86400000; + mockStatement.get.mockReturnValue({ count: 100, total_duration: largeDuration }); + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('all'); + + expect(stats.totalDuration).toBe(largeDuration); + }); + }); + + describe('avgDuration calculation', () => { + it('should calculate correct average duration', async () => { + // 100 queries, 500000ms total = 5000ms average + mockStatement.get.mockReturnValue({ count: 100, total_duration: 500000 }); + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('week'); + + expect(stats.avgDuration).toBe(5000); + }); + + it('should return 0 average duration when no queries', async () => { + mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('day'); + + // Avoid division by zero - should return 0 + expect(stats.avgDuration).toBe(0); + }); + + it('should round average duration to nearest integer', async () => { + // 3 queries, 10000ms total = 3333.33... average, should round to 3333 + mockStatement.get.mockReturnValue({ count: 3, total_duration: 10000 }); + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('month'); + + // Math.round(10000 / 3) = 3333 + expect(stats.avgDuration).toBe(3333); + }); + + it('should handle single query average correctly', async () => { + mockStatement.get.mockReturnValue({ count: 1, total_duration: 12345 }); + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('day'); + + expect(stats.avgDuration).toBe(12345); + }); + + it('should handle edge case of tiny durations', async () => { + // 5 queries with 1ms each = 5ms total, 1ms average + mockStatement.get.mockReturnValue({ count: 5, total_duration: 5 }); + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('day'); + + expect(stats.avgDuration).toBe(1); + }); + }); + + describe('byAgent breakdown calculations', () => { + it('should return correct breakdown by single agent type', async () => { + mockStatement.get.mockReturnValue({ count: 50, total_duration: 250000 }); + mockStatement.all + .mockReturnValueOnce([]) // First all() call (we handle this below) + .mockReturnValueOnce([{ agent_type: 'claude-code', count: 50, duration: 250000 }]) + .mockReturnValueOnce([{ source: 'user', count: 50 }]) + .mockReturnValueOnce([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Reset to control exact mock responses for getAggregatedStats + mockStatement.all.mockReset(); + mockStatement.all + .mockReturnValueOnce([{ agent_type: 'claude-code', count: 50, duration: 250000 }]) + .mockReturnValueOnce([{ source: 'user', count: 50 }]) + .mockReturnValueOnce([]); + + const stats = db.getAggregatedStats('week'); + + expect(stats.byAgent).toHaveProperty('claude-code'); + expect(stats.byAgent['claude-code'].count).toBe(50); + expect(stats.byAgent['claude-code'].duration).toBe(250000); + }); + + it('should return correct breakdown for multiple agent types', async () => { + mockStatement.get.mockReturnValue({ count: 150, total_duration: 750000 }); + mockStatement.all + .mockReturnValueOnce([ + { agent_type: 'claude-code', count: 100, duration: 500000 }, + { agent_type: 'opencode', count: 30, duration: 150000 }, + { agent_type: 'gemini-cli', count: 20, duration: 100000 }, + ]) + .mockReturnValueOnce([ + { source: 'user', count: 120 }, + { source: 'auto', count: 30 }, + ]) + .mockReturnValueOnce([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('month'); + + // Verify all agents are present + expect(Object.keys(stats.byAgent)).toHaveLength(3); + + // Verify claude-code stats + expect(stats.byAgent['claude-code'].count).toBe(100); + expect(stats.byAgent['claude-code'].duration).toBe(500000); + + // Verify opencode stats + expect(stats.byAgent['opencode'].count).toBe(30); + expect(stats.byAgent['opencode'].duration).toBe(150000); + + // Verify gemini-cli stats + expect(stats.byAgent['gemini-cli'].count).toBe(20); + expect(stats.byAgent['gemini-cli'].duration).toBe(100000); + }); + + it('should return empty byAgent object when no queries exist', async () => { + mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('day'); + + expect(stats.byAgent).toEqual({}); + expect(Object.keys(stats.byAgent)).toHaveLength(0); + }); + + it('should maintain correct duration per agent when durations vary', async () => { + mockStatement.get.mockReturnValue({ count: 4, total_duration: 35000 }); + mockStatement.all + .mockReturnValueOnce([ + { agent_type: 'claude-code', count: 3, duration: 30000 }, // Avg 10000 + { agent_type: 'opencode', count: 1, duration: 5000 }, // Avg 5000 + ]) + .mockReturnValueOnce([{ source: 'user', count: 4 }]) + .mockReturnValueOnce([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('week'); + + // Verify duration totals per agent are preserved + expect(stats.byAgent['claude-code'].duration).toBe(30000); + expect(stats.byAgent['opencode'].duration).toBe(5000); + + // Total should match sum of all agents + const totalAgentDuration = Object.values(stats.byAgent).reduce( + (sum, agent) => sum + agent.duration, + 0 + ); + expect(totalAgentDuration).toBe(35000); + }); + }); + + describe('bySource breakdown calculations', () => { + it('should return correct user vs auto counts', async () => { + mockStatement.get.mockReturnValue({ count: 100, total_duration: 500000 }); + mockStatement.all + .mockReturnValueOnce([{ agent_type: 'claude-code', count: 100, duration: 500000 }]) + .mockReturnValueOnce([ + { source: 'user', count: 70 }, + { source: 'auto', count: 30 }, + ]) + .mockReturnValueOnce([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('week'); + + expect(stats.bySource.user).toBe(70); + expect(stats.bySource.auto).toBe(30); + }); + + it('should handle all queries from user source', async () => { + mockStatement.get.mockReturnValue({ count: 50, total_duration: 250000 }); + mockStatement.all + .mockReturnValueOnce([{ agent_type: 'claude-code', count: 50, duration: 250000 }]) + .mockReturnValueOnce([{ source: 'user', count: 50 }]) + .mockReturnValueOnce([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('month'); + + expect(stats.bySource.user).toBe(50); + expect(stats.bySource.auto).toBe(0); + }); + + it('should handle all queries from auto source', async () => { + mockStatement.get.mockReturnValue({ count: 200, total_duration: 1000000 }); + mockStatement.all + .mockReturnValueOnce([{ agent_type: 'claude-code', count: 200, duration: 1000000 }]) + .mockReturnValueOnce([{ source: 'auto', count: 200 }]) + .mockReturnValueOnce([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('year'); + + expect(stats.bySource.user).toBe(0); + expect(stats.bySource.auto).toBe(200); + }); + + it('should initialize bySource with zeros when no data', async () => { + mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('day'); + + expect(stats.bySource).toEqual({ user: 0, auto: 0 }); + }); + + it('should sum correctly across source types', async () => { + mockStatement.get.mockReturnValue({ count: 1000, total_duration: 5000000 }); + mockStatement.all + .mockReturnValueOnce([{ agent_type: 'claude-code', count: 1000, duration: 5000000 }]) + .mockReturnValueOnce([ + { source: 'user', count: 650 }, + { source: 'auto', count: 350 }, + ]) + .mockReturnValueOnce([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('all'); + + // Verify sum equals totalQueries + expect(stats.bySource.user + stats.bySource.auto).toBe(stats.totalQueries); + }); + }); + + describe('byDay breakdown calculations', () => { + it('should return daily breakdown with correct structure', async () => { + mockStatement.get.mockReturnValue({ count: 30, total_duration: 150000 }); + mockStatement.all + .mockReturnValueOnce([{ agent_type: 'claude-code', count: 30, duration: 150000 }]) // byAgent + .mockReturnValueOnce([{ source: 'user', count: 30 }]) // bySource + .mockReturnValueOnce([{ is_remote: 0, count: 30 }]) // byLocation + .mockReturnValueOnce([ + { date: '2024-01-01', count: 10, duration: 50000 }, + { date: '2024-01-02', count: 12, duration: 60000 }, + { date: '2024-01-03', count: 8, duration: 40000 }, + ]); // byDay + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('week'); + + expect(stats.byDay).toHaveLength(3); + expect(stats.byDay[0]).toEqual({ date: '2024-01-01', count: 10, duration: 50000 }); + expect(stats.byDay[1]).toEqual({ date: '2024-01-02', count: 12, duration: 60000 }); + expect(stats.byDay[2]).toEqual({ date: '2024-01-03', count: 8, duration: 40000 }); + }); + + it('should return empty array when no daily data exists', async () => { + mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('day'); + + expect(stats.byDay).toEqual([]); + expect(stats.byDay).toHaveLength(0); + }); + + it('should handle single day of data', async () => { + mockStatement.get.mockReturnValue({ count: 5, total_duration: 25000 }); + mockStatement.all + .mockReturnValueOnce([{ agent_type: 'claude-code', count: 5, duration: 25000 }]) // byAgent + .mockReturnValueOnce([{ source: 'user', count: 5 }]) // bySource + .mockReturnValueOnce([{ is_remote: 0, count: 5 }]) // byLocation + .mockReturnValueOnce([{ date: '2024-06-15', count: 5, duration: 25000 }]); // byDay + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('day'); + + expect(stats.byDay).toHaveLength(1); + expect(stats.byDay[0].date).toBe('2024-06-15'); + expect(stats.byDay[0].count).toBe(5); + expect(stats.byDay[0].duration).toBe(25000); + }); + + it('should order daily data chronologically (ASC)', async () => { + mockStatement.get.mockReturnValue({ count: 15, total_duration: 75000 }); + mockStatement.all + .mockReturnValueOnce([{ agent_type: 'claude-code', count: 15, duration: 75000 }]) // byAgent + .mockReturnValueOnce([{ source: 'user', count: 15 }]) // bySource + .mockReturnValueOnce([{ is_remote: 0, count: 15 }]) // byLocation + .mockReturnValueOnce([ + { date: '2024-03-01', count: 3, duration: 15000 }, + { date: '2024-03-02', count: 5, duration: 25000 }, + { date: '2024-03-03', count: 7, duration: 35000 }, + ]); // byDay + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('week'); + + // Verify ASC order (earliest date first) + expect(stats.byDay[0].date).toBe('2024-03-01'); + expect(stats.byDay[1].date).toBe('2024-03-02'); + expect(stats.byDay[2].date).toBe('2024-03-03'); + }); + + it('should sum daily counts equal to totalQueries', async () => { + mockStatement.get.mockReturnValue({ count: 25, total_duration: 125000 }); + mockStatement.all + .mockReturnValueOnce([{ agent_type: 'claude-code', count: 25, duration: 125000 }]) // byAgent + .mockReturnValueOnce([{ source: 'user', count: 25 }]) // bySource + .mockReturnValueOnce([{ is_remote: 0, count: 25 }]) // byLocation + .mockReturnValueOnce([ + { date: '2024-02-01', count: 8, duration: 40000 }, + { date: '2024-02-02', count: 10, duration: 50000 }, + { date: '2024-02-03', count: 7, duration: 35000 }, + ]); // byDay + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('week'); + + // Sum of daily counts should equal totalQueries + const dailySum = stats.byDay.reduce((sum, day) => sum + day.count, 0); + expect(dailySum).toBe(stats.totalQueries); + }); + + it('should sum daily durations equal to totalDuration', async () => { + mockStatement.get.mockReturnValue({ count: 20, total_duration: 100000 }); + mockStatement.all + .mockReturnValueOnce([{ agent_type: 'opencode', count: 20, duration: 100000 }]) // byAgent + .mockReturnValueOnce([{ source: 'auto', count: 20 }]) // bySource + .mockReturnValueOnce([{ is_remote: 0, count: 20 }]) // byLocation + .mockReturnValueOnce([ + { date: '2024-04-10', count: 5, duration: 25000 }, + { date: '2024-04-11', count: 8, duration: 40000 }, + { date: '2024-04-12', count: 7, duration: 35000 }, + ]); // byDay + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('week'); + + // Sum of daily durations should equal totalDuration + const dailyDurationSum = stats.byDay.reduce((sum, day) => sum + day.duration, 0); + expect(dailyDurationSum).toBe(stats.totalDuration); + }); + }); + + describe('aggregation consistency across multiple queries', () => { + it('should return consistent results when called multiple times', async () => { + mockStatement.get.mockReturnValue({ count: 50, total_duration: 250000 }); + mockStatement.all.mockReturnValue([ + { agent_type: 'claude-code', count: 50, duration: 250000 }, + ]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats1 = db.getAggregatedStats('week'); + const stats2 = db.getAggregatedStats('week'); + + expect(stats1.totalQueries).toBe(stats2.totalQueries); + expect(stats1.totalDuration).toBe(stats2.totalDuration); + expect(stats1.avgDuration).toBe(stats2.avgDuration); + }); + + it('should handle concurrent access correctly', async () => { + mockStatement.get.mockReturnValue({ count: 100, total_duration: 500000 }); + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Simulate concurrent calls + const [result1, result2, result3] = [ + db.getAggregatedStats('day'), + db.getAggregatedStats('week'), + db.getAggregatedStats('month'), + ]; + + expect(result1.totalQueries).toBe(100); + expect(result2.totalQueries).toBe(100); + expect(result3.totalQueries).toBe(100); + }); + }); + + describe('SQL query structure verification', () => { + it('should use COALESCE for totalDuration to handle NULL', async () => { + mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getAggregatedStats('week'); + + // Verify the SQL query uses COALESCE + const prepareCalls = mockDb.prepare.mock.calls; + const totalsCall = prepareCalls.find((call) => + (call[0] as string).includes('COALESCE(SUM(duration), 0)') + ); + + expect(totalsCall).toBeDefined(); + }); + + it('should GROUP BY agent_type for byAgent breakdown', async () => { + mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getAggregatedStats('month'); + + const prepareCalls = mockDb.prepare.mock.calls; + const byAgentCall = prepareCalls.find( + (call) => + (call[0] as string).includes('GROUP BY agent_type') && + (call[0] as string).includes('FROM query_events') + ); + + expect(byAgentCall).toBeDefined(); + }); + + it('should GROUP BY source for bySource breakdown', async () => { + mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getAggregatedStats('year'); + + const prepareCalls = mockDb.prepare.mock.calls; + const bySourceCall = prepareCalls.find( + (call) => + (call[0] as string).includes('GROUP BY source') && + (call[0] as string).includes('FROM query_events') + ); + + expect(bySourceCall).toBeDefined(); + }); + + it('should use date() function for daily grouping', async () => { + mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getAggregatedStats('all'); + + const prepareCalls = mockDb.prepare.mock.calls; + const byDayCall = prepareCalls.find((call) => + (call[0] as string).includes("date(start_time / 1000, 'unixepoch'") + ); + + expect(byDayCall).toBeDefined(); + }); + + it('should ORDER BY date ASC in byDay query', async () => { + mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getAggregatedStats('week'); + + const prepareCalls = mockDb.prepare.mock.calls; + const byDayCall = prepareCalls.find( + (call) => + (call[0] as string).includes('ORDER BY date ASC') || + ((call[0] as string).includes('date(start_time') && (call[0] as string).includes('ASC')) + ); + + expect(byDayCall).toBeDefined(); + }); + }); + + describe('edge case calculations', () => { + it('should handle very small average (less than 1ms)', async () => { + // 10 queries, 5ms total = 0.5ms average, should round to 1 (or 0) + mockStatement.get.mockReturnValue({ count: 10, total_duration: 5 }); + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('day'); + + // Math.round(5 / 10) = 1 + expect(stats.avgDuration).toBe(1); + }); + + it('should handle maximum JavaScript safe integer values', async () => { + const maxSafe = Number.MAX_SAFE_INTEGER; + // Use a count that divides evenly to avoid rounding issues + mockStatement.get.mockReturnValue({ count: 1, total_duration: maxSafe }); + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('all'); + + expect(stats.totalDuration).toBe(maxSafe); + expect(stats.avgDuration).toBe(maxSafe); + }); + + it('should handle mixed zero and non-zero durations in agents', async () => { + mockStatement.get.mockReturnValue({ count: 3, total_duration: 5000 }); + mockStatement.all + .mockReturnValueOnce([ + { agent_type: 'claude-code', count: 2, duration: 5000 }, + { agent_type: 'opencode', count: 1, duration: 0 }, // Zero duration + ]) + .mockReturnValueOnce([{ source: 'user', count: 3 }]) + .mockReturnValueOnce([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('week'); + + expect(stats.byAgent['claude-code'].duration).toBe(5000); + expect(stats.byAgent['opencode'].duration).toBe(0); + }); + + it('should handle dates spanning year boundaries', async () => { + mockStatement.get.mockReturnValue({ count: 2, total_duration: 10000 }); + mockStatement.all + .mockReturnValueOnce([{ agent_type: 'claude-code', count: 2, duration: 10000 }]) // byAgent + .mockReturnValueOnce([{ source: 'user', count: 2 }]) // bySource + .mockReturnValueOnce([{ is_remote: 0, count: 2 }]) // byLocation + .mockReturnValueOnce([ + { date: '2023-12-31', count: 1, duration: 5000 }, + { date: '2024-01-01', count: 1, duration: 5000 }, + ]); // byDay + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('week'); + + expect(stats.byDay).toHaveLength(2); + expect(stats.byDay[0].date).toBe('2023-12-31'); + expect(stats.byDay[1].date).toBe('2024-01-01'); + }); + }); +}); + +/** + * Cross-platform database path resolution tests + * + * Tests verify that the stats database file is created at the correct + * platform-appropriate path on macOS, Windows, and Linux. Electron's + * app.getPath('userData') returns: + * + * - macOS: ~/Library/Application Support/Maestro/ + * - Windows: %APPDATA%\Maestro\ (e.g., C:\Users\\AppData\Roaming\Maestro\) + * - Linux: ~/.config/Maestro/ + * + * The stats database is always created at {userData}/stats.db + */ diff --git a/src/__tests__/main/stats/auto-run.test.ts b/src/__tests__/main/stats/auto-run.test.ts new file mode 100644 index 00000000..51ab6f41 --- /dev/null +++ b/src/__tests__/main/stats/auto-run.test.ts @@ -0,0 +1,1385 @@ +/** + * Tests for Auto Run session and task CRUD operations. + * + * Note: better-sqlite3 is a native module compiled for Electron's Node version. + * Direct testing with the native module in vitest is not possible without + * electron-rebuild for the vitest runtime. These tests use mocked database + * operations to verify the logic without requiring the actual native module. + * + * For full integration testing of the SQLite database, use the Electron test + * environment (e2e tests) where the native module is properly loaded. + */ + +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import * as path from 'path'; +import * as os from 'os'; + +// Track Database constructor calls to verify file path +let lastDbPath: string | null = null; + +// Store mock references so they can be accessed in tests +const mockStatement = { + run: vi.fn(() => ({ changes: 1 })), + get: vi.fn(() => ({ count: 0, total_duration: 0 })), + all: vi.fn(() => []), +}; + +const mockDb = { + pragma: vi.fn(() => [{ user_version: 0 }]), + prepare: vi.fn(() => mockStatement), + close: vi.fn(), + // Transaction mock that immediately executes the function + transaction: vi.fn((fn: () => void) => { + return () => fn(); + }), +}; + +// Mock better-sqlite3 as a class +vi.mock('better-sqlite3', () => { + return { + default: class MockDatabase { + constructor(dbPath: string) { + lastDbPath = dbPath; + } + pragma = mockDb.pragma; + prepare = mockDb.prepare; + close = mockDb.close; + transaction = mockDb.transaction; + }, + }; +}); + +// Mock electron's app module with trackable userData path +const mockUserDataPath = path.join(os.tmpdir(), 'maestro-test-stats-db'); +vi.mock('electron', () => ({ + app: { + getPath: vi.fn((name: string) => { + if (name === 'userData') return mockUserDataPath; + return os.tmpdir(); + }), + }, +})); + +// Track fs calls +const mockFsExistsSync = vi.fn(() => true); +const mockFsMkdirSync = vi.fn(); +const mockFsCopyFileSync = vi.fn(); +const mockFsUnlinkSync = vi.fn(); +const mockFsRenameSync = vi.fn(); +const mockFsStatSync = vi.fn(() => ({ size: 1024 })); +const mockFsReadFileSync = vi.fn(() => '0'); // Default: old timestamp (triggers vacuum check) +const mockFsWriteFileSync = vi.fn(); + +// Mock fs +vi.mock('fs', () => ({ + existsSync: (...args: unknown[]) => mockFsExistsSync(...args), + mkdirSync: (...args: unknown[]) => mockFsMkdirSync(...args), + copyFileSync: (...args: unknown[]) => mockFsCopyFileSync(...args), + unlinkSync: (...args: unknown[]) => mockFsUnlinkSync(...args), + renameSync: (...args: unknown[]) => mockFsRenameSync(...args), + statSync: (...args: unknown[]) => mockFsStatSync(...args), + readFileSync: (...args: unknown[]) => mockFsReadFileSync(...args), + writeFileSync: (...args: unknown[]) => mockFsWriteFileSync(...args), +})); + +// Mock logger +vi.mock('../../../main/utils/logger', () => ({ + logger: { + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + debug: vi.fn(), + }, +})); + +// Import types only - we'll test the type definitions +import type { + QueryEvent, + AutoRunSession, + AutoRunTask, + SessionLifecycleEvent, + StatsTimeRange, + StatsFilters, + StatsAggregation, +} from '../../../shared/stats-types'; + +describe('Auto Run session and task recording', () => { + beforeEach(() => { + vi.clearAllMocks(); + lastDbPath = null; + mockDb.pragma.mockReturnValue([{ user_version: 0 }]); + mockDb.prepare.mockReturnValue(mockStatement); + mockStatement.run.mockReturnValue({ changes: 1 }); + mockFsExistsSync.mockReturnValue(true); + }); + + afterEach(() => { + vi.resetModules(); + }); + + describe('Auto Run sessions', () => { + it('should insert Auto Run session and return id', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const sessionId = db.insertAutoRunSession({ + sessionId: 'session-1', + agentType: 'claude-code', + documentPath: '/docs/TASK-1.md', + startTime: Date.now(), + duration: 0, + tasksTotal: 5, + tasksCompleted: 0, + projectPath: '/project', + }); + + expect(sessionId).toBeDefined(); + expect(typeof sessionId).toBe('string'); + expect(mockStatement.run).toHaveBeenCalled(); + }); + + it('should update Auto Run session on completion', async () => { + mockStatement.run.mockReturnValue({ changes: 1 }); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const updated = db.updateAutoRunSession('session-id', { + duration: 60000, + tasksCompleted: 5, + }); + + expect(updated).toBe(true); + expect(mockStatement.run).toHaveBeenCalled(); + }); + + it('should retrieve Auto Run sessions within time range', async () => { + mockStatement.all.mockReturnValue([ + { + id: 'auto-1', + session_id: 'session-1', + agent_type: 'claude-code', + document_path: '/docs/TASK-1.md', + start_time: Date.now(), + duration: 60000, + tasks_total: 5, + tasks_completed: 5, + project_path: '/project', + }, + ]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const sessions = db.getAutoRunSessions('week'); + + expect(sessions).toHaveLength(1); + expect(sessions[0].sessionId).toBe('session-1'); + expect(sessions[0].tasksTotal).toBe(5); + }); + }); + + describe('Auto Run tasks', () => { + it('should insert Auto Run task with success=true', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const taskId = db.insertAutoRunTask({ + autoRunSessionId: 'auto-1', + sessionId: 'session-1', + agentType: 'claude-code', + taskIndex: 0, + taskContent: 'First task', + startTime: Date.now(), + duration: 10000, + success: true, + }); + + expect(taskId).toBeDefined(); + + // Verify success was converted to 1 for SQLite + const runCall = mockStatement.run.mock.calls[mockStatement.run.mock.calls.length - 1]; + expect(runCall[8]).toBe(1); // success parameter (last one) + }); + + it('should insert Auto Run task with success=false', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.insertAutoRunTask({ + autoRunSessionId: 'auto-1', + sessionId: 'session-1', + agentType: 'claude-code', + taskIndex: 1, + taskContent: 'Failed task', + startTime: Date.now(), + duration: 5000, + success: false, + }); + + // Verify success was converted to 0 for SQLite + const runCall = mockStatement.run.mock.calls[mockStatement.run.mock.calls.length - 1]; + expect(runCall[8]).toBe(0); // success parameter (last one) + }); + + it('should retrieve tasks for Auto Run session ordered by task_index', async () => { + mockStatement.all.mockReturnValue([ + { + id: 'task-1', + auto_run_session_id: 'auto-1', + session_id: 'session-1', + agent_type: 'claude-code', + task_index: 0, + task_content: 'First task', + start_time: Date.now(), + duration: 10000, + success: 1, + }, + { + id: 'task-2', + auto_run_session_id: 'auto-1', + session_id: 'session-1', + agent_type: 'claude-code', + task_index: 1, + task_content: 'Second task', + start_time: Date.now(), + duration: 15000, + success: 1, + }, + ]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const tasks = db.getAutoRunTasks('auto-1'); + + expect(tasks).toHaveLength(2); + expect(tasks[0].taskIndex).toBe(0); + expect(tasks[1].taskIndex).toBe(1); + expect(tasks[0].success).toBe(true); + }); + }); +}); + +/** + * Aggregation and filtering tests + */ + +describe('Auto Run sessions and tasks recorded correctly', () => { + beforeEach(() => { + vi.clearAllMocks(); + mockDb.pragma.mockReturnValue([{ user_version: 1 }]); + mockDb.prepare.mockReturnValue(mockStatement); + mockStatement.run.mockReturnValue({ changes: 1 }); + mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); + mockStatement.all.mockReturnValue([]); + mockFsExistsSync.mockReturnValue(true); + }); + + afterEach(() => { + vi.resetModules(); + }); + + describe('Auto Run session lifecycle', () => { + it('should record Auto Run session with all required fields', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const startTime = Date.now(); + const sessionId = db.insertAutoRunSession({ + sessionId: 'maestro-session-123', + agentType: 'claude-code', + documentPath: 'Auto Run Docs/PHASE-1.md', + startTime, + duration: 0, // Duration is 0 at start + tasksTotal: 10, + tasksCompleted: 0, + projectPath: '/Users/test/my-project', + }); + + expect(sessionId).toBeDefined(); + expect(typeof sessionId).toBe('string'); + + // Verify all fields were passed correctly to the INSERT statement + const runCalls = mockStatement.run.mock.calls; + const lastCall = runCalls[runCalls.length - 1]; + + // INSERT parameters: id, session_id, agent_type, document_path, start_time, duration, tasks_total, tasks_completed, project_path + expect(lastCall[1]).toBe('maestro-session-123'); // session_id + expect(lastCall[2]).toBe('claude-code'); // agent_type + expect(lastCall[3]).toBe('Auto Run Docs/PHASE-1.md'); // document_path + expect(lastCall[4]).toBe(startTime); // start_time + expect(lastCall[5]).toBe(0); // duration (0 at start) + expect(lastCall[6]).toBe(10); // tasks_total + expect(lastCall[7]).toBe(0); // tasks_completed (0 at start) + expect(lastCall[8]).toBe('/Users/test/my-project'); // project_path + }); + + it('should record Auto Run session with multiple documents (comma-separated)', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const sessionId = db.insertAutoRunSession({ + sessionId: 'multi-doc-session', + agentType: 'claude-code', + documentPath: 'PHASE-1.md, PHASE-2.md, PHASE-3.md', + startTime: Date.now(), + duration: 0, + tasksTotal: 25, + tasksCompleted: 0, + projectPath: '/project', + }); + + expect(sessionId).toBeDefined(); + + const runCalls = mockStatement.run.mock.calls; + const lastCall = runCalls[runCalls.length - 1]; + expect(lastCall[3]).toBe('PHASE-1.md, PHASE-2.md, PHASE-3.md'); + }); + + it('should update Auto Run session duration and tasks on completion', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // First, insert the session + const autoRunId = db.insertAutoRunSession({ + sessionId: 'session-to-update', + agentType: 'claude-code', + documentPath: 'TASKS.md', + startTime: Date.now() - 60000, // Started 1 minute ago + duration: 0, + tasksTotal: 5, + tasksCompleted: 0, + projectPath: '/project', + }); + + // Now update it with completion data + const updated = db.updateAutoRunSession(autoRunId, { + duration: 60000, // 1 minute + tasksCompleted: 5, + }); + + expect(updated).toBe(true); + + // Verify UPDATE was called + expect(mockStatement.run).toHaveBeenCalled(); + }); + + it('should update Auto Run session with partial completion (some tasks skipped)', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const autoRunId = db.insertAutoRunSession({ + sessionId: 'partial-session', + agentType: 'claude-code', + documentPath: 'COMPLEX-TASKS.md', + startTime: Date.now(), + duration: 0, + tasksTotal: 10, + tasksCompleted: 0, + projectPath: '/project', + }); + + // Update with partial completion (7 of 10 tasks) + const updated = db.updateAutoRunSession(autoRunId, { + duration: 120000, // 2 minutes + tasksCompleted: 7, + }); + + expect(updated).toBe(true); + }); + + it('should handle Auto Run session stopped by user (wasStopped)', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const autoRunId = db.insertAutoRunSession({ + sessionId: 'stopped-session', + agentType: 'claude-code', + documentPath: 'TASKS.md', + startTime: Date.now(), + duration: 0, + tasksTotal: 20, + tasksCompleted: 0, + projectPath: '/project', + }); + + // User stopped after 3 tasks + const updated = db.updateAutoRunSession(autoRunId, { + duration: 30000, // 30 seconds + tasksCompleted: 3, + }); + + expect(updated).toBe(true); + }); + }); + + describe('Auto Run task recording', () => { + it('should record individual task with all fields', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const taskStartTime = Date.now() - 5000; + const taskId = db.insertAutoRunTask({ + autoRunSessionId: 'auto-run-session-1', + sessionId: 'maestro-session-1', + agentType: 'claude-code', + taskIndex: 0, + taskContent: 'Implement user authentication module', + startTime: taskStartTime, + duration: 5000, + success: true, + }); + + expect(taskId).toBeDefined(); + + const runCalls = mockStatement.run.mock.calls; + const lastCall = runCalls[runCalls.length - 1]; + + // INSERT parameters: id, auto_run_session_id, session_id, agent_type, task_index, task_content, start_time, duration, success + expect(lastCall[1]).toBe('auto-run-session-1'); // auto_run_session_id + expect(lastCall[2]).toBe('maestro-session-1'); // session_id + expect(lastCall[3]).toBe('claude-code'); // agent_type + expect(lastCall[4]).toBe(0); // task_index + expect(lastCall[5]).toBe('Implement user authentication module'); // task_content + expect(lastCall[6]).toBe(taskStartTime); // start_time + expect(lastCall[7]).toBe(5000); // duration + expect(lastCall[8]).toBe(1); // success (true -> 1) + }); + + it('should record failed task with success=false', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.insertAutoRunTask({ + autoRunSessionId: 'auto-run-1', + sessionId: 'session-1', + agentType: 'claude-code', + taskIndex: 2, + taskContent: 'Fix complex edge case that requires manual intervention', + startTime: Date.now(), + duration: 10000, + success: false, // Task failed + }); + + const runCalls = mockStatement.run.mock.calls; + const lastCall = runCalls[runCalls.length - 1]; + expect(lastCall[8]).toBe(0); // success (false -> 0) + }); + + it('should record multiple tasks for same Auto Run session', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Clear mocks after initialize() to count only test operations + mockStatement.run.mockClear(); + + const autoRunSessionId = 'multi-task-session'; + const baseTime = Date.now(); + + // Task 0 + const task0Id = db.insertAutoRunTask({ + autoRunSessionId, + sessionId: 'session-1', + agentType: 'claude-code', + taskIndex: 0, + taskContent: 'Task 0: Initialize project', + startTime: baseTime, + duration: 3000, + success: true, + }); + + // Task 1 + const task1Id = db.insertAutoRunTask({ + autoRunSessionId, + sessionId: 'session-1', + agentType: 'claude-code', + taskIndex: 1, + taskContent: 'Task 1: Add dependencies', + startTime: baseTime + 3000, + duration: 5000, + success: true, + }); + + // Task 2 + const task2Id = db.insertAutoRunTask({ + autoRunSessionId, + sessionId: 'session-1', + agentType: 'claude-code', + taskIndex: 2, + taskContent: 'Task 2: Configure build system', + startTime: baseTime + 8000, + duration: 7000, + success: true, + }); + + // All tasks should have unique IDs + expect(task0Id).not.toBe(task1Id); + expect(task1Id).not.toBe(task2Id); + expect(task0Id).not.toBe(task2Id); + + // All 3 INSERT calls should have happened + expect(mockStatement.run).toHaveBeenCalledTimes(3); + }); + + it('should record task without optional taskContent', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const taskId = db.insertAutoRunTask({ + autoRunSessionId: 'auto-run-1', + sessionId: 'session-1', + agentType: 'claude-code', + taskIndex: 0, + // taskContent is omitted + startTime: Date.now(), + duration: 2000, + success: true, + }); + + expect(taskId).toBeDefined(); + + const runCalls = mockStatement.run.mock.calls; + const lastCall = runCalls[runCalls.length - 1]; + expect(lastCall[5]).toBeNull(); // task_content should be NULL + }); + }); + + describe('Auto Run session and task retrieval', () => { + it('should retrieve Auto Run sessions with proper field mapping', async () => { + const now = Date.now(); + mockStatement.all.mockReturnValue([ + { + id: 'auto-run-id-1', + session_id: 'session-1', + agent_type: 'claude-code', + document_path: 'PHASE-1.md', + start_time: now - 60000, + duration: 60000, + tasks_total: 10, + tasks_completed: 10, + project_path: '/project/path', + }, + { + id: 'auto-run-id-2', + session_id: 'session-2', + agent_type: 'opencode', + document_path: null, // No document path + start_time: now - 120000, + duration: 45000, + tasks_total: 5, + tasks_completed: 4, + project_path: null, + }, + ]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const sessions = db.getAutoRunSessions('week'); + + expect(sessions).toHaveLength(2); + + // First session - all fields present + expect(sessions[0].id).toBe('auto-run-id-1'); + expect(sessions[0].sessionId).toBe('session-1'); + expect(sessions[0].agentType).toBe('claude-code'); + expect(sessions[0].documentPath).toBe('PHASE-1.md'); + expect(sessions[0].startTime).toBe(now - 60000); + expect(sessions[0].duration).toBe(60000); + expect(sessions[0].tasksTotal).toBe(10); + expect(sessions[0].tasksCompleted).toBe(10); + expect(sessions[0].projectPath).toBe('/project/path'); + + // Second session - optional fields are undefined + expect(sessions[1].id).toBe('auto-run-id-2'); + expect(sessions[1].documentPath).toBeUndefined(); + expect(sessions[1].projectPath).toBeUndefined(); + expect(sessions[1].tasksCompleted).toBe(4); + }); + + it('should retrieve tasks for Auto Run session with proper field mapping', async () => { + const now = Date.now(); + mockStatement.all.mockReturnValue([ + { + id: 'task-id-0', + auto_run_session_id: 'auto-run-1', + session_id: 'session-1', + agent_type: 'claude-code', + task_index: 0, + task_content: 'First task description', + start_time: now - 15000, + duration: 5000, + success: 1, + }, + { + id: 'task-id-1', + auto_run_session_id: 'auto-run-1', + session_id: 'session-1', + agent_type: 'claude-code', + task_index: 1, + task_content: null, // No content + start_time: now - 10000, + duration: 5000, + success: 1, + }, + { + id: 'task-id-2', + auto_run_session_id: 'auto-run-1', + session_id: 'session-1', + agent_type: 'claude-code', + task_index: 2, + task_content: 'Failed task', + start_time: now - 5000, + duration: 3000, + success: 0, // Failed + }, + ]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const tasks = db.getAutoRunTasks('auto-run-1'); + + expect(tasks).toHaveLength(3); + + // First task + expect(tasks[0].id).toBe('task-id-0'); + expect(tasks[0].autoRunSessionId).toBe('auto-run-1'); + expect(tasks[0].sessionId).toBe('session-1'); + expect(tasks[0].agentType).toBe('claude-code'); + expect(tasks[0].taskIndex).toBe(0); + expect(tasks[0].taskContent).toBe('First task description'); + expect(tasks[0].startTime).toBe(now - 15000); + expect(tasks[0].duration).toBe(5000); + expect(tasks[0].success).toBe(true); // 1 -> true + + // Second task - no content + expect(tasks[1].taskContent).toBeUndefined(); + expect(tasks[1].success).toBe(true); + + // Third task - failed + expect(tasks[2].success).toBe(false); // 0 -> false + }); + + it('should return tasks ordered by task_index ASC', async () => { + // Return tasks in wrong order to verify sorting + mockStatement.all.mockReturnValue([ + { + id: 't2', + auto_run_session_id: 'ar1', + session_id: 's1', + agent_type: 'claude-code', + task_index: 2, + task_content: 'C', + start_time: 3, + duration: 1, + success: 1, + }, + { + id: 't0', + auto_run_session_id: 'ar1', + session_id: 's1', + agent_type: 'claude-code', + task_index: 0, + task_content: 'A', + start_time: 1, + duration: 1, + success: 1, + }, + { + id: 't1', + auto_run_session_id: 'ar1', + session_id: 's1', + agent_type: 'claude-code', + task_index: 1, + task_content: 'B', + start_time: 2, + duration: 1, + success: 1, + }, + ]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const tasks = db.getAutoRunTasks('ar1'); + + // Should be returned as-is (the SQL query handles ordering) + // The mock returns them unsorted, but the real DB would sort them + expect(tasks).toHaveLength(3); + }); + }); + + describe('Auto Run time range filtering', () => { + it('should filter Auto Run sessions by day range', async () => { + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getAutoRunSessions('day'); + + // Verify the query was prepared with time filter + const prepareCalls = mockDb.prepare.mock.calls; + const selectCall = prepareCalls.find((call) => + (call[0] as string).includes('SELECT * FROM auto_run_sessions') + ); + expect(selectCall).toBeDefined(); + expect(selectCall![0]).toContain('start_time >= ?'); + }); + + it('should return all Auto Run sessions for "all" time range', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + mockStatement.all.mockReturnValue([ + { + id: 'old', + session_id: 's1', + agent_type: 'claude-code', + document_path: null, + start_time: 1000, + duration: 100, + tasks_total: 1, + tasks_completed: 1, + project_path: null, + }, + { + id: 'new', + session_id: 's2', + agent_type: 'claude-code', + document_path: null, + start_time: Date.now(), + duration: 100, + tasks_total: 1, + tasks_completed: 1, + project_path: null, + }, + ]); + + const sessions = db.getAutoRunSessions('all'); + + // With 'all' range, startTime should be 0, so all sessions should be returned + expect(sessions).toHaveLength(2); + }); + }); + + describe('complete Auto Run workflow', () => { + it('should support the full Auto Run lifecycle: start -> record tasks -> end', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Clear mocks after initialize() to count only test operations + mockStatement.run.mockClear(); + + const batchStartTime = Date.now(); + + // Step 1: Start Auto Run session + const autoRunId = db.insertAutoRunSession({ + sessionId: 'complete-workflow-session', + agentType: 'claude-code', + documentPath: 'PHASE-1.md, PHASE-2.md', + startTime: batchStartTime, + duration: 0, + tasksTotal: 5, + tasksCompleted: 0, + projectPath: '/test/project', + }); + + expect(autoRunId).toBeDefined(); + + // Step 2: Record individual tasks as they complete + let taskTime = batchStartTime; + + for (let i = 0; i < 5; i++) { + const taskDuration = 2000 + i * 500; // Varying durations + db.insertAutoRunTask({ + autoRunSessionId: autoRunId, + sessionId: 'complete-workflow-session', + agentType: 'claude-code', + taskIndex: i, + taskContent: `Task ${i + 1}: Implementation step ${i + 1}`, + startTime: taskTime, + duration: taskDuration, + success: i !== 3, // Task 4 (index 3) fails + }); + taskTime += taskDuration; + } + + // Step 3: End Auto Run session + const totalDuration = taskTime - batchStartTime; + const updated = db.updateAutoRunSession(autoRunId, { + duration: totalDuration, + tasksCompleted: 4, // 4 of 5 succeeded + }); + + expect(updated).toBe(true); + + // Verify the total number of INSERT/UPDATE calls + // 1 session insert + 5 task inserts + 1 session update = 7 calls + expect(mockStatement.run).toHaveBeenCalledTimes(7); + }); + + it('should handle Auto Run with loop mode (multiple passes)', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Clear mocks after initialize() to count only test operations + mockStatement.run.mockClear(); + + const startTime = Date.now(); + + // Start session for loop mode run + const autoRunId = db.insertAutoRunSession({ + sessionId: 'loop-mode-session', + agentType: 'claude-code', + documentPath: 'RECURRING-TASKS.md', + startTime, + duration: 0, + tasksTotal: 15, // Initial estimate (may grow with loops) + tasksCompleted: 0, + projectPath: '/project', + }); + + // Record tasks from multiple loop iterations + // Loop 1: 5 tasks + for (let i = 0; i < 5; i++) { + db.insertAutoRunTask({ + autoRunSessionId: autoRunId, + sessionId: 'loop-mode-session', + agentType: 'claude-code', + taskIndex: i, + taskContent: `Loop 1, Task ${i + 1}`, + startTime: startTime + i * 3000, + duration: 3000, + success: true, + }); + } + + // Loop 2: 5 more tasks + for (let i = 0; i < 5; i++) { + db.insertAutoRunTask({ + autoRunSessionId: autoRunId, + sessionId: 'loop-mode-session', + agentType: 'claude-code', + taskIndex: 5 + i, // Continue indexing from where loop 1 ended + taskContent: `Loop 2, Task ${i + 1}`, + startTime: startTime + 15000 + i * 3000, + duration: 3000, + success: true, + }); + } + + // Update with final stats + db.updateAutoRunSession(autoRunId, { + duration: 30000, // 30 seconds total + tasksCompleted: 10, + }); + + // 1 session + 10 tasks + 1 update = 12 calls + expect(mockStatement.run).toHaveBeenCalledTimes(12); + }); + }); + + describe('edge cases and error scenarios', () => { + it('should handle very long task content (synopsis)', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const longContent = 'A'.repeat(10000); // 10KB task content + + const taskId = db.insertAutoRunTask({ + autoRunSessionId: 'ar1', + sessionId: 's1', + agentType: 'claude-code', + taskIndex: 0, + taskContent: longContent, + startTime: Date.now(), + duration: 5000, + success: true, + }); + + expect(taskId).toBeDefined(); + + const runCalls = mockStatement.run.mock.calls; + const lastCall = runCalls[runCalls.length - 1]; + expect(lastCall[5]).toBe(longContent); + }); + + it('should handle zero duration tasks', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const taskId = db.insertAutoRunTask({ + autoRunSessionId: 'ar1', + sessionId: 's1', + agentType: 'claude-code', + taskIndex: 0, + taskContent: 'Instant task', + startTime: Date.now(), + duration: 0, // Zero duration (e.g., cached result) + success: true, + }); + + expect(taskId).toBeDefined(); + + const runCalls = mockStatement.run.mock.calls; + const lastCall = runCalls[runCalls.length - 1]; + expect(lastCall[7]).toBe(0); + }); + + it('should handle Auto Run session with zero tasks total', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // This shouldn't happen in practice, but the database should handle it + const sessionId = db.insertAutoRunSession({ + sessionId: 'empty-session', + agentType: 'claude-code', + documentPath: 'EMPTY.md', + startTime: Date.now(), + duration: 100, + tasksTotal: 0, + tasksCompleted: 0, + projectPath: '/project', + }); + + expect(sessionId).toBeDefined(); + }); + + it('should handle different agent types for Auto Run', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Clear mocks after initialize() to count only test operations + mockStatement.run.mockClear(); + + // Claude Code Auto Run + db.insertAutoRunSession({ + sessionId: 's1', + agentType: 'claude-code', + documentPath: 'TASKS.md', + startTime: Date.now(), + duration: 1000, + tasksTotal: 5, + tasksCompleted: 5, + projectPath: '/project', + }); + + // OpenCode Auto Run + db.insertAutoRunSession({ + sessionId: 's2', + agentType: 'opencode', + documentPath: 'TASKS.md', + startTime: Date.now(), + duration: 2000, + tasksTotal: 3, + tasksCompleted: 3, + projectPath: '/project', + }); + + // Verify both agent types were recorded + const runCalls = mockStatement.run.mock.calls; + expect(runCalls[0][2]).toBe('claude-code'); + expect(runCalls[1][2]).toBe('opencode'); + }); + }); +}); + +/** + * Foreign key relationship verification tests + * + * These tests verify that the foreign key relationship between auto_run_tasks + * and auto_run_sessions is properly defined in the schema, ensuring referential + * integrity can be enforced when foreign key constraints are enabled. + */ +describe('Foreign key relationship between tasks and sessions', () => { + beforeEach(() => { + vi.clearAllMocks(); + mockDb.pragma.mockReturnValue([{ user_version: 0 }]); + mockDb.prepare.mockReturnValue(mockStatement); + mockStatement.run.mockReturnValue({ changes: 1 }); + mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); + mockStatement.all.mockReturnValue([]); + mockFsExistsSync.mockReturnValue(true); + }); + + afterEach(() => { + vi.resetModules(); + }); + + describe('schema definition', () => { + it('should create auto_run_tasks table with REFERENCES clause to auto_run_sessions', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Verify the CREATE TABLE statement includes the foreign key reference + const prepareCalls = mockDb.prepare.mock.calls.map((call) => call[0] as string); + const createTasksTable = prepareCalls.find((sql) => + sql.includes('CREATE TABLE IF NOT EXISTS auto_run_tasks') + ); + + expect(createTasksTable).toBeDefined(); + expect(createTasksTable).toContain( + 'auto_run_session_id TEXT NOT NULL REFERENCES auto_run_sessions(id)' + ); + }); + + it('should have auto_run_session_id column as NOT NULL in auto_run_tasks', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const prepareCalls = mockDb.prepare.mock.calls.map((call) => call[0] as string); + const createTasksTable = prepareCalls.find((sql) => + sql.includes('CREATE TABLE IF NOT EXISTS auto_run_tasks') + ); + + expect(createTasksTable).toBeDefined(); + // Verify NOT NULL constraint is present for auto_run_session_id + expect(createTasksTable).toContain('auto_run_session_id TEXT NOT NULL'); + }); + + it('should create index on auto_run_session_id foreign key column', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const prepareCalls = mockDb.prepare.mock.calls.map((call) => call[0] as string); + const indexCreation = prepareCalls.find((sql) => sql.includes('idx_task_auto_session')); + + expect(indexCreation).toBeDefined(); + expect(indexCreation).toContain('ON auto_run_tasks(auto_run_session_id)'); + }); + }); + + describe('referential integrity behavior', () => { + it('should store auto_run_session_id when inserting task', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const autoRunSessionId = 'parent-session-abc-123'; + db.insertAutoRunTask({ + autoRunSessionId, + sessionId: 'maestro-session-1', + agentType: 'claude-code', + taskIndex: 0, + taskContent: 'Test task', + startTime: Date.now(), + duration: 1000, + success: true, + }); + + // Verify the auto_run_session_id was passed to the INSERT + const runCalls = mockStatement.run.mock.calls; + const lastCall = runCalls[runCalls.length - 1]; + + // INSERT parameters: id, auto_run_session_id, session_id, agent_type, task_index, task_content, start_time, duration, success + expect(lastCall[1]).toBe(autoRunSessionId); + }); + + it('should insert task with matching auto_run_session_id from parent session', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Clear calls from initialization + mockStatement.run.mockClear(); + + // First insert a session + const autoRunId = db.insertAutoRunSession({ + sessionId: 'session-1', + agentType: 'claude-code', + documentPath: 'PHASE-1.md', + startTime: Date.now(), + duration: 0, + tasksTotal: 5, + tasksCompleted: 0, + projectPath: '/project', + }); + + // Then insert a task referencing that session + const taskId = db.insertAutoRunTask({ + autoRunSessionId: autoRunId, + sessionId: 'session-1', + agentType: 'claude-code', + taskIndex: 0, + taskContent: 'First task', + startTime: Date.now(), + duration: 1000, + success: true, + }); + + expect(autoRunId).toBeDefined(); + expect(taskId).toBeDefined(); + + // Both inserts should have succeeded (session + task) + expect(mockStatement.run).toHaveBeenCalledTimes(2); + + // Verify the task INSERT used the session ID returned from the session INSERT + const runCalls = mockStatement.run.mock.calls; + const taskInsertCall = runCalls[1]; + expect(taskInsertCall[1]).toBe(autoRunId); // auto_run_session_id matches + }); + + it('should retrieve tasks only for the specific parent session', async () => { + const now = Date.now(); + + // Mock returns tasks for session 'auto-run-A' only + mockStatement.all.mockReturnValue([ + { + id: 'task-1', + auto_run_session_id: 'auto-run-A', + session_id: 'session-1', + agent_type: 'claude-code', + task_index: 0, + task_content: 'Task for session A', + start_time: now, + duration: 1000, + success: 1, + }, + { + id: 'task-2', + auto_run_session_id: 'auto-run-A', + session_id: 'session-1', + agent_type: 'claude-code', + task_index: 1, + task_content: 'Another task for session A', + start_time: now + 1000, + duration: 2000, + success: 1, + }, + ]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Query tasks for 'auto-run-A' + const tasksA = db.getAutoRunTasks('auto-run-A'); + + expect(tasksA).toHaveLength(2); + expect(tasksA[0].autoRunSessionId).toBe('auto-run-A'); + expect(tasksA[1].autoRunSessionId).toBe('auto-run-A'); + + // Verify the WHERE clause used the correct auto_run_session_id + expect(mockStatement.all).toHaveBeenCalledWith('auto-run-A'); + }); + + it('should return empty array when no tasks exist for a session', async () => { + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const tasks = db.getAutoRunTasks('non-existent-session'); + + expect(tasks).toHaveLength(0); + expect(tasks).toEqual([]); + }); + }); + + describe('data consistency verification', () => { + it('should maintain consistent auto_run_session_id across multiple tasks', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Clear calls from initialization + mockStatement.run.mockClear(); + + const parentSessionId = 'consistent-parent-session'; + + // Insert multiple tasks for the same parent session + for (let i = 0; i < 5; i++) { + db.insertAutoRunTask({ + autoRunSessionId: parentSessionId, + sessionId: 'maestro-session', + agentType: 'claude-code', + taskIndex: i, + taskContent: `Task ${i + 1}`, + startTime: Date.now() + i * 1000, + duration: 1000, + success: true, + }); + } + + // Verify all 5 tasks used the same parent session ID + const runCalls = mockStatement.run.mock.calls; + expect(runCalls).toHaveLength(5); + + for (const call of runCalls) { + expect(call[1]).toBe(parentSessionId); // auto_run_session_id + } + }); + + it('should allow tasks from different sessions to be inserted independently', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Clear calls from initialization + mockStatement.run.mockClear(); + + // Insert tasks for session A + db.insertAutoRunTask({ + autoRunSessionId: 'session-A', + sessionId: 'maestro-1', + agentType: 'claude-code', + taskIndex: 0, + taskContent: 'Task A1', + startTime: Date.now(), + duration: 1000, + success: true, + }); + + // Insert tasks for session B + db.insertAutoRunTask({ + autoRunSessionId: 'session-B', + sessionId: 'maestro-2', + agentType: 'opencode', + taskIndex: 0, + taskContent: 'Task B1', + startTime: Date.now(), + duration: 2000, + success: true, + }); + + // Insert another task for session A + db.insertAutoRunTask({ + autoRunSessionId: 'session-A', + sessionId: 'maestro-1', + agentType: 'claude-code', + taskIndex: 1, + taskContent: 'Task A2', + startTime: Date.now(), + duration: 1500, + success: true, + }); + + const runCalls = mockStatement.run.mock.calls; + expect(runCalls).toHaveLength(3); + + // Verify parent session IDs are correctly assigned + expect(runCalls[0][1]).toBe('session-A'); + expect(runCalls[1][1]).toBe('session-B'); + expect(runCalls[2][1]).toBe('session-A'); + }); + + it('should use generated session ID as foreign key when retrieved after insertion', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Clear calls from initialization + mockStatement.run.mockClear(); + + // Insert a session and capture the generated ID + const generatedSessionId = db.insertAutoRunSession({ + sessionId: 'maestro-session', + agentType: 'claude-code', + documentPath: 'DOC.md', + startTime: Date.now(), + duration: 0, + tasksTotal: 3, + tasksCompleted: 0, + projectPath: '/project', + }); + + // The generated ID should be a string with timestamp-random format + expect(generatedSessionId).toMatch(/^\d+-[a-z0-9]+$/); + + // Use this generated ID as the foreign key for tasks + db.insertAutoRunTask({ + autoRunSessionId: generatedSessionId, + sessionId: 'maestro-session', + agentType: 'claude-code', + taskIndex: 0, + taskContent: 'First task', + startTime: Date.now(), + duration: 1000, + success: true, + }); + + const runCalls = mockStatement.run.mock.calls; + const taskInsert = runCalls[1]; // Second call is the task insert (first is session insert) + + // Verify the task uses the exact same ID that was generated for the session + expect(taskInsert[1]).toBe(generatedSessionId); + }); + }); + + describe('query filtering by foreign key', () => { + it('should filter tasks using WHERE auto_run_session_id clause', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getAutoRunTasks('specific-session-id'); + + // Verify the SQL query includes proper WHERE clause for foreign key + const prepareCalls = mockDb.prepare.mock.calls; + const selectTasksCall = prepareCalls.find( + (call) => + (call[0] as string).includes('SELECT * FROM auto_run_tasks') && + (call[0] as string).includes('WHERE auto_run_session_id = ?') + ); + + expect(selectTasksCall).toBeDefined(); + }); + + it('should order tasks by task_index within a session', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getAutoRunTasks('any-session'); + + // Verify the query includes ORDER BY task_index + const prepareCalls = mockDb.prepare.mock.calls; + const selectTasksCall = prepareCalls.find((call) => + (call[0] as string).includes('ORDER BY task_index ASC') + ); + + expect(selectTasksCall).toBeDefined(); + }); + }); +}); + +/** + * Time-range filtering verification tests + * + * These tests verify that time-range filtering works correctly for all supported + * ranges: 'day', 'week', 'month', 'year', and 'all'. Each range should correctly + * calculate the start timestamp and use it to filter database queries. + */ diff --git a/src/__tests__/main/stats/data-management.test.ts b/src/__tests__/main/stats/data-management.test.ts new file mode 100644 index 00000000..954e38c1 --- /dev/null +++ b/src/__tests__/main/stats/data-management.test.ts @@ -0,0 +1,601 @@ +/** + * Tests for VACUUM scheduling, clearOldData, and database maintenance. + * + * Note: better-sqlite3 is a native module compiled for Electron's Node version. + * Direct testing with the native module in vitest is not possible without + * electron-rebuild for the vitest runtime. These tests use mocked database + * operations to verify the logic without requiring the actual native module. + * + * For full integration testing of the SQLite database, use the Electron test + * environment (e2e tests) where the native module is properly loaded. + */ + +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import * as path from 'path'; +import * as os from 'os'; + +// Track Database constructor calls to verify file path +let lastDbPath: string | null = null; + +// Store mock references so they can be accessed in tests +const mockStatement = { + run: vi.fn(() => ({ changes: 1 })), + get: vi.fn(() => ({ count: 0, total_duration: 0 })), + all: vi.fn(() => []), +}; + +const mockDb = { + pragma: vi.fn(() => [{ user_version: 0 }]), + prepare: vi.fn(() => mockStatement), + close: vi.fn(), + // Transaction mock that immediately executes the function + transaction: vi.fn((fn: () => void) => { + return () => fn(); + }), +}; + +// Mock better-sqlite3 as a class +vi.mock('better-sqlite3', () => { + return { + default: class MockDatabase { + constructor(dbPath: string) { + lastDbPath = dbPath; + } + pragma = mockDb.pragma; + prepare = mockDb.prepare; + close = mockDb.close; + transaction = mockDb.transaction; + }, + }; +}); + +// Mock electron's app module with trackable userData path +const mockUserDataPath = path.join(os.tmpdir(), 'maestro-test-stats-db'); +vi.mock('electron', () => ({ + app: { + getPath: vi.fn((name: string) => { + if (name === 'userData') return mockUserDataPath; + return os.tmpdir(); + }), + }, +})); + +// Track fs calls +const mockFsExistsSync = vi.fn(() => true); +const mockFsMkdirSync = vi.fn(); +const mockFsCopyFileSync = vi.fn(); +const mockFsUnlinkSync = vi.fn(); +const mockFsRenameSync = vi.fn(); +const mockFsStatSync = vi.fn(() => ({ size: 1024 })); +const mockFsReadFileSync = vi.fn(() => '0'); // Default: old timestamp (triggers vacuum check) +const mockFsWriteFileSync = vi.fn(); + +// Mock fs +vi.mock('fs', () => ({ + existsSync: (...args: unknown[]) => mockFsExistsSync(...args), + mkdirSync: (...args: unknown[]) => mockFsMkdirSync(...args), + copyFileSync: (...args: unknown[]) => mockFsCopyFileSync(...args), + unlinkSync: (...args: unknown[]) => mockFsUnlinkSync(...args), + renameSync: (...args: unknown[]) => mockFsRenameSync(...args), + statSync: (...args: unknown[]) => mockFsStatSync(...args), + readFileSync: (...args: unknown[]) => mockFsReadFileSync(...args), + writeFileSync: (...args: unknown[]) => mockFsWriteFileSync(...args), +})); + +// Mock logger +vi.mock('../../../main/utils/logger', () => ({ + logger: { + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + debug: vi.fn(), + }, +})); + +// Import types only - we'll test the type definitions +import type { + QueryEvent, + AutoRunSession, + AutoRunTask, + SessionLifecycleEvent, + StatsTimeRange, + StatsFilters, + StatsAggregation, +} from '../../../shared/stats-types'; + +describe('Database VACUUM functionality', () => { + beforeEach(() => { + vi.clearAllMocks(); + lastDbPath = null; + mockDb.pragma.mockReturnValue([{ user_version: 0 }]); + mockDb.prepare.mockReturnValue(mockStatement); + mockStatement.run.mockReturnValue({ changes: 1 }); + mockFsExistsSync.mockReturnValue(true); + // Reset statSync to throw by default (simulates file not existing) + mockFsStatSync.mockImplementation(() => { + throw new Error('ENOENT: no such file or directory'); + }); + }); + + afterEach(() => { + vi.resetModules(); + }); + + describe('getDatabaseSize', () => { + it('should return 0 when statSync throws (file missing)', async () => { + // The mock fs.statSync is not configured to return size by default + // so getDatabaseSize will catch the error and return 0 + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Since mockFsExistsSync.mockReturnValue(true) is set but statSync is not mocked, + // getDatabaseSize will try to call the real statSync on a non-existent path + // and catch the error, returning 0 + const size = db.getDatabaseSize(); + + // The mock environment doesn't have actual file, so expect 0 + expect(size).toBe(0); + }); + + it('should handle statSync gracefully when file does not exist', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // getDatabaseSize should not throw + expect(() => db.getDatabaseSize()).not.toThrow(); + }); + }); + + describe('vacuum', () => { + it('should execute VACUUM SQL command', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Clear mocks from initialization + mockStatement.run.mockClear(); + mockDb.prepare.mockClear(); + + const result = db.vacuum(); + + expect(result.success).toBe(true); + expect(mockDb.prepare).toHaveBeenCalledWith('VACUUM'); + expect(mockStatement.run).toHaveBeenCalled(); + }); + + it('should return success true when vacuum completes', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const result = db.vacuum(); + + expect(result.success).toBe(true); + expect(result.error).toBeUndefined(); + }); + + it('should return bytesFreed of 0 when sizes are equal (mocked)', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const result = db.vacuum(); + + // With mock fs, both before and after sizes will be 0 + expect(result.bytesFreed).toBe(0); + }); + + it('should return error if database not initialized', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + // Don't initialize + + const result = db.vacuum(); + + expect(result.success).toBe(false); + expect(result.bytesFreed).toBe(0); + expect(result.error).toBe('Database not initialized'); + }); + + it('should handle VACUUM failure gracefully', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Make VACUUM fail + mockDb.prepare.mockImplementation((sql: string) => { + if (sql === 'VACUUM') { + return { + run: vi.fn().mockImplementation(() => { + throw new Error('database is locked'); + }), + }; + } + return mockStatement; + }); + + const result = db.vacuum(); + + expect(result.success).toBe(false); + expect(result.error).toContain('database is locked'); + }); + + it('should log vacuum progress with size information', async () => { + const { logger } = await import('../../../main/utils/logger'); + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Clear logger mocks from initialization + vi.mocked(logger.info).mockClear(); + + db.vacuum(); + + // Check that logger was called with vacuum-related messages + expect(logger.info).toHaveBeenCalledWith( + expect.stringContaining('Starting VACUUM'), + expect.any(String) + ); + expect(logger.info).toHaveBeenCalledWith( + expect.stringContaining('VACUUM completed'), + expect.any(String) + ); + }); + }); + + describe('vacuumIfNeeded', () => { + it('should skip vacuum if database size is 0 (below threshold)', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Clear mocks from initialization + mockStatement.run.mockClear(); + mockDb.prepare.mockClear(); + + const result = db.vacuumIfNeeded(); + + // Size is 0 (mock fs), which is below 100MB threshold + expect(result.vacuumed).toBe(false); + expect(result.databaseSize).toBe(0); + expect(result.result).toBeUndefined(); + }); + + it('should return correct databaseSize in result', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const result = db.vacuumIfNeeded(); + + // Size property should be present + expect(typeof result.databaseSize).toBe('number'); + }); + + it('should use default 100MB threshold when not specified', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // With 0 byte size (mocked), should skip vacuum + const result = db.vacuumIfNeeded(); + + expect(result.vacuumed).toBe(false); + }); + + it('should not vacuum with threshold 0 and size 0 since 0 is not > 0', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Clear mocks from initialization + mockStatement.run.mockClear(); + mockDb.prepare.mockClear(); + + // With 0 threshold and 0 byte file: 0 is NOT greater than 0 + const result = db.vacuumIfNeeded(0); + + // The condition is: databaseSize < thresholdBytes + // 0 < 0 is false, so vacuumed should be true (it tries to vacuum) + expect(result.databaseSize).toBe(0); + // Since 0 is NOT less than 0, it proceeds to vacuum + expect(result.vacuumed).toBe(true); + }); + + it('should log appropriate message when skipping vacuum', async () => { + const { logger } = await import('../../../main/utils/logger'); + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Clear logger mocks from initialization + vi.mocked(logger.debug).mockClear(); + + db.vacuumIfNeeded(); + + expect(logger.debug).toHaveBeenCalledWith( + expect.stringContaining('below vacuum threshold'), + expect.any(String) + ); + }); + }); + + describe('vacuumIfNeeded with custom thresholds', () => { + it('should respect custom threshold parameter (threshold = -1 means always vacuum)', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Clear mocks from initialization + mockStatement.run.mockClear(); + mockDb.prepare.mockClear(); + + // With -1 threshold, 0 > -1 is true, so should vacuum + const result = db.vacuumIfNeeded(-1); + + expect(result.vacuumed).toBe(true); + expect(mockDb.prepare).toHaveBeenCalledWith('VACUUM'); + }); + + it('should not vacuum with very large threshold', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Clear mocks from initialization + mockStatement.run.mockClear(); + mockDb.prepare.mockClear(); + + // With 1TB threshold, should NOT trigger vacuum + const result = db.vacuumIfNeeded(1024 * 1024 * 1024 * 1024); + + expect(result.vacuumed).toBe(false); + expect(mockDb.prepare).not.toHaveBeenCalledWith('VACUUM'); + }); + }); + + describe('initialize with vacuumIfNeeded integration', () => { + it('should call vacuumIfNeededWeekly during initialization', async () => { + const { logger } = await import('../../../main/utils/logger'); + + // Clear logger mocks before test + vi.mocked(logger.debug).mockClear(); + + // Mock timestamp file as old (0 = epoch, triggers vacuum check) + mockFsReadFileSync.mockReturnValue('0'); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + db.initialize(); + + // With old timestamp, vacuumIfNeededWeekly should proceed to call vacuumIfNeeded + // which logs "below vacuum threshold" for small databases (mocked as 1024 bytes) + expect(logger.debug).toHaveBeenCalledWith( + expect.stringContaining('below vacuum threshold'), + expect.any(String) + ); + }); + + it('should complete initialization even if vacuum would fail', async () => { + // Make VACUUM fail if called + mockDb.prepare.mockImplementation((sql: string) => { + if (sql === 'VACUUM') { + return { + run: vi.fn().mockImplementation(() => { + throw new Error('VACUUM failed: database is locked'); + }), + }; + } + return mockStatement; + }); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + // Initialize should not throw (vacuum is skipped due to 0 size anyway) + expect(() => db.initialize()).not.toThrow(); + + // Database should still be ready + expect(db.isReady()).toBe(true); + }); + + it('should not block initialization for small databases', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + // Time the initialization (should be fast for mock) + const start = Date.now(); + db.initialize(); + const elapsed = Date.now() - start; + + expect(db.isReady()).toBe(true); + expect(elapsed).toBeLessThan(1000); // Should be fast in mock environment + }); + }); + + describe('vacuum return types', () => { + it('vacuum should return object with success, bytesFreed, and optional error', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const result = db.vacuum(); + + expect(typeof result.success).toBe('boolean'); + expect(typeof result.bytesFreed).toBe('number'); + expect(result.error === undefined || typeof result.error === 'string').toBe(true); + }); + + it('vacuumIfNeeded should return object with vacuumed, databaseSize, and optional result', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const result = db.vacuumIfNeeded(); + + expect(typeof result.vacuumed).toBe('boolean'); + expect(typeof result.databaseSize).toBe('number'); + expect(result.result === undefined || typeof result.result === 'object').toBe(true); + }); + + it('vacuumIfNeeded should include result when vacuum is performed', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Use -1 threshold to force vacuum + const result = db.vacuumIfNeeded(-1); + + expect(result.vacuumed).toBe(true); + expect(result.result).toBeDefined(); + expect(result.result?.success).toBe(true); + }); + }); + + describe('clearOldData method', () => { + beforeEach(() => { + vi.clearAllMocks(); + vi.resetModules(); + }); + + it('should return error when database is not initialized', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + // Don't initialize + + const result = db.clearOldData(30); + + expect(result.success).toBe(false); + expect(result.deletedQueryEvents).toBe(0); + expect(result.deletedAutoRunSessions).toBe(0); + expect(result.deletedAutoRunTasks).toBe(0); + expect(result.error).toBe('Database not initialized'); + }); + + it('should return error when olderThanDays is 0 or negative', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const resultZero = db.clearOldData(0); + expect(resultZero.success).toBe(false); + expect(resultZero.error).toBe('olderThanDays must be greater than 0'); + + const resultNegative = db.clearOldData(-10); + expect(resultNegative.success).toBe(false); + expect(resultNegative.error).toBe('olderThanDays must be greater than 0'); + }); + + it('should successfully clear old data with valid parameters', async () => { + // Mock prepare to return statements with expected behavior + mockStatement.all.mockReturnValue([{ id: 'session-1' }, { id: 'session-2' }]); + mockStatement.run.mockReturnValue({ changes: 5 }); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const result = db.clearOldData(30); + + expect(result.success).toBe(true); + expect(result.deletedQueryEvents).toBe(5); + expect(result.deletedAutoRunSessions).toBe(5); + expect(result.deletedAutoRunTasks).toBe(5); + expect(result.error).toBeUndefined(); + }); + + it('should handle empty results (no old data)', async () => { + mockStatement.all.mockReturnValue([]); + mockStatement.run.mockReturnValue({ changes: 0 }); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const result = db.clearOldData(365); + + expect(result.success).toBe(true); + expect(result.deletedQueryEvents).toBe(0); + expect(result.deletedAutoRunSessions).toBe(0); + expect(result.deletedAutoRunTasks).toBe(0); + expect(result.error).toBeUndefined(); + }); + + it('should calculate correct cutoff time based on days', async () => { + let capturedCutoffTime: number | null = null; + + mockDb.prepare.mockImplementation((sql: string) => { + return { + run: vi.fn((cutoff: number) => { + if (sql.includes('DELETE FROM query_events')) { + capturedCutoffTime = cutoff; + } + return { changes: 0 }; + }), + get: mockStatement.get, + all: vi.fn(() => []), + }; + }); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const beforeCall = Date.now(); + db.clearOldData(7); + const afterCall = Date.now(); + + // Cutoff should be approximately 7 days ago + const expectedCutoff = beforeCall - 7 * 24 * 60 * 60 * 1000; + expect(capturedCutoffTime).not.toBeNull(); + expect(capturedCutoffTime!).toBeGreaterThanOrEqual(expectedCutoff - 1000); + expect(capturedCutoffTime!).toBeLessThanOrEqual(afterCall - 7 * 24 * 60 * 60 * 1000 + 1000); + }); + + it('should handle database errors gracefully', async () => { + mockDb.prepare.mockImplementation((sql: string) => { + if (sql.includes('DELETE FROM query_events')) { + return { + run: vi.fn(() => { + throw new Error('Database locked'); + }), + }; + } + return mockStatement; + }); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const result = db.clearOldData(30); + + expect(result.success).toBe(false); + expect(result.error).toBe('Database locked'); + expect(result.deletedQueryEvents).toBe(0); + expect(result.deletedAutoRunSessions).toBe(0); + expect(result.deletedAutoRunTasks).toBe(0); + }); + + it('should support various time periods', async () => { + mockStatement.all.mockReturnValue([]); + mockStatement.run.mockReturnValue({ changes: 0 }); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Test common time periods from Settings UI + const periods = [7, 30, 90, 180, 365]; + for (const days of periods) { + const result = db.clearOldData(days); + expect(result.success).toBe(true); + } + }); + }); + + // ===================================================================== +}); diff --git a/src/__tests__/main/stats/integration.test.ts b/src/__tests__/main/stats/integration.test.ts new file mode 100644 index 00000000..37be73a1 --- /dev/null +++ b/src/__tests__/main/stats/integration.test.ts @@ -0,0 +1,1015 @@ +/** + * Tests for concurrent database access and native module verification. + * + * Note: better-sqlite3 is a native module compiled for Electron's Node version. + * Direct testing with the native module in vitest is not possible without + * electron-rebuild for the vitest runtime. These tests use mocked database + * operations to verify the logic without requiring the actual native module. + * + * For full integration testing of the SQLite database, use the Electron test + * environment (e2e tests) where the native module is properly loaded. + */ + +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import * as path from 'path'; +import * as os from 'os'; + +// Track Database constructor calls to verify file path +let lastDbPath: string | null = null; + +// Store mock references so they can be accessed in tests +const mockStatement = { + run: vi.fn(() => ({ changes: 1 })), + get: vi.fn(() => ({ count: 0, total_duration: 0 })), + all: vi.fn(() => []), +}; + +const mockDb = { + pragma: vi.fn(() => [{ user_version: 0 }]), + prepare: vi.fn(() => mockStatement), + close: vi.fn(), + // Transaction mock that immediately executes the function + transaction: vi.fn((fn: () => void) => { + return () => fn(); + }), +}; + +// Mock better-sqlite3 as a class +vi.mock('better-sqlite3', () => { + return { + default: class MockDatabase { + constructor(dbPath: string) { + lastDbPath = dbPath; + } + pragma = mockDb.pragma; + prepare = mockDb.prepare; + close = mockDb.close; + transaction = mockDb.transaction; + }, + }; +}); + +// Mock electron's app module with trackable userData path +const mockUserDataPath = path.join(os.tmpdir(), 'maestro-test-stats-db'); +vi.mock('electron', () => ({ + app: { + getPath: vi.fn((name: string) => { + if (name === 'userData') return mockUserDataPath; + return os.tmpdir(); + }), + }, +})); + +// Track fs calls +const mockFsExistsSync = vi.fn(() => true); +const mockFsMkdirSync = vi.fn(); +const mockFsCopyFileSync = vi.fn(); +const mockFsUnlinkSync = vi.fn(); +const mockFsRenameSync = vi.fn(); +const mockFsStatSync = vi.fn(() => ({ size: 1024 })); +const mockFsReadFileSync = vi.fn(() => '0'); // Default: old timestamp (triggers vacuum check) +const mockFsWriteFileSync = vi.fn(); + +// Mock fs +vi.mock('fs', () => ({ + existsSync: (...args: unknown[]) => mockFsExistsSync(...args), + mkdirSync: (...args: unknown[]) => mockFsMkdirSync(...args), + copyFileSync: (...args: unknown[]) => mockFsCopyFileSync(...args), + unlinkSync: (...args: unknown[]) => mockFsUnlinkSync(...args), + renameSync: (...args: unknown[]) => mockFsRenameSync(...args), + statSync: (...args: unknown[]) => mockFsStatSync(...args), + readFileSync: (...args: unknown[]) => mockFsReadFileSync(...args), + writeFileSync: (...args: unknown[]) => mockFsWriteFileSync(...args), +})); + +// Mock logger +vi.mock('../../../main/utils/logger', () => ({ + logger: { + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + debug: vi.fn(), + }, +})); + +// Import types only - we'll test the type definitions +import type { + QueryEvent, + AutoRunSession, + AutoRunTask, + SessionLifecycleEvent, + StatsTimeRange, + StatsFilters, + StatsAggregation, +} from '../../../shared/stats-types'; + +describe('Concurrent writes and database locking', () => { + let writeCount: number; + let insertedIds: string[]; + + beforeEach(() => { + vi.clearAllMocks(); + lastDbPath = null; + writeCount = 0; + insertedIds = []; + + // Mock pragma to return version 1 (skip migrations for these tests) + mockDb.pragma.mockImplementation((sql: string) => { + if (sql === 'user_version') return [{ user_version: 1 }]; + if (sql === 'journal_mode') return [{ journal_mode: 'wal' }]; + if (sql === 'journal_mode = WAL') return undefined; + return undefined; + }); + + // Track each write and generate unique IDs + mockStatement.run.mockImplementation(() => { + writeCount++; + return { changes: 1 }; + }); + + mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); + mockStatement.all.mockReturnValue([]); + mockFsExistsSync.mockReturnValue(true); + }); + + afterEach(() => { + vi.resetModules(); + }); + + describe('WAL mode for concurrent access', () => { + it('should enable WAL journal mode on initialization', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + expect(mockDb.pragma).toHaveBeenCalledWith('journal_mode = WAL'); + }); + + it('should enable WAL mode before running migrations', async () => { + const pragmaCalls: string[] = []; + mockDb.pragma.mockImplementation((sql: string) => { + pragmaCalls.push(sql); + if (sql === 'user_version') return [{ user_version: 0 }]; + return undefined; + }); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // WAL mode should be set early in initialization + const walIndex = pragmaCalls.indexOf('journal_mode = WAL'); + const versionIndex = pragmaCalls.indexOf('user_version'); + expect(walIndex).toBeGreaterThan(-1); + expect(versionIndex).toBeGreaterThan(-1); + expect(walIndex).toBeLessThan(versionIndex); + }); + }); + + describe('rapid sequential writes', () => { + it('should handle 10 rapid sequential query event inserts', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Clear mocks after initialize() to count only test operations + mockStatement.run.mockClear(); + + const ids: string[] = []; + for (let i = 0; i < 10; i++) { + const id = db.insertQueryEvent({ + sessionId: `session-${i}`, + agentType: 'claude-code', + source: 'user', + startTime: Date.now() + i, + duration: 1000 + i, + projectPath: '/test/project', + tabId: `tab-${i}`, + }); + ids.push(id); + } + + expect(ids).toHaveLength(10); + // All IDs should be unique + expect(new Set(ids).size).toBe(10); + expect(mockStatement.run).toHaveBeenCalledTimes(10); + }); + + it('should handle 10 rapid sequential Auto Run session inserts', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Clear mocks after initialize() to count only test operations + mockStatement.run.mockClear(); + + const ids: string[] = []; + for (let i = 0; i < 10; i++) { + const id = db.insertAutoRunSession({ + sessionId: `session-${i}`, + agentType: 'claude-code', + documentPath: `/docs/TASK-${i}.md`, + startTime: Date.now() + i, + duration: 0, + tasksTotal: 5, + tasksCompleted: 0, + projectPath: '/test/project', + }); + ids.push(id); + } + + expect(ids).toHaveLength(10); + expect(new Set(ids).size).toBe(10); + expect(mockStatement.run).toHaveBeenCalledTimes(10); + }); + + it('should handle 10 rapid sequential task inserts', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Clear mocks after initialize() to count only test operations + mockStatement.run.mockClear(); + + const ids: string[] = []; + for (let i = 0; i < 10; i++) { + const id = db.insertAutoRunTask({ + autoRunSessionId: 'auto-run-1', + sessionId: 'session-1', + agentType: 'claude-code', + taskIndex: i, + taskContent: `Task ${i} content`, + startTime: Date.now() + i, + duration: 1000 + i, + success: i % 2 === 0, + }); + ids.push(id); + } + + expect(ids).toHaveLength(10); + expect(new Set(ids).size).toBe(10); + expect(mockStatement.run).toHaveBeenCalledTimes(10); + }); + }); + + describe('concurrent write operations', () => { + it('should handle concurrent writes to different tables via Promise.all', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Clear mocks after initialize() to count only test operations + mockStatement.run.mockClear(); + + // Simulate concurrent writes by wrapping synchronous operations in promises + const writeOperations = [ + Promise.resolve().then(() => + db.insertQueryEvent({ + sessionId: 'session-1', + agentType: 'claude-code', + source: 'user', + startTime: Date.now(), + duration: 5000, + }) + ), + Promise.resolve().then(() => + db.insertAutoRunSession({ + sessionId: 'session-2', + agentType: 'claude-code', + startTime: Date.now(), + duration: 0, + tasksTotal: 3, + }) + ), + Promise.resolve().then(() => + db.insertAutoRunTask({ + autoRunSessionId: 'auto-1', + sessionId: 'session-3', + agentType: 'claude-code', + taskIndex: 0, + startTime: Date.now(), + duration: 1000, + success: true, + }) + ), + ]; + + const results = await Promise.all(writeOperations); + + expect(results).toHaveLength(3); + expect(results.every((id) => typeof id === 'string' && id.length > 0)).toBe(true); + expect(mockStatement.run).toHaveBeenCalledTimes(3); + }); + + it('should handle 20 concurrent query event inserts via Promise.all', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Clear mocks after initialize() to count only test operations + mockStatement.run.mockClear(); + + const writeOperations = Array.from({ length: 20 }, (_, i) => + Promise.resolve().then(() => + db.insertQueryEvent({ + sessionId: `session-${i}`, + agentType: i % 2 === 0 ? 'claude-code' : 'opencode', + source: i % 3 === 0 ? 'auto' : 'user', + startTime: Date.now() + i, + duration: 1000 + i * 100, + projectPath: `/project/${i}`, + }) + ) + ); + + const results = await Promise.all(writeOperations); + + expect(results).toHaveLength(20); + expect(new Set(results).size).toBe(20); // All IDs unique + expect(mockStatement.run).toHaveBeenCalledTimes(20); + }); + + it('should handle mixed insert and update operations concurrently', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Clear mocks after initialize() to count only test operations + mockStatement.run.mockClear(); + + const operations = [ + Promise.resolve().then(() => + db.insertQueryEvent({ + sessionId: 'session-1', + agentType: 'claude-code', + source: 'user', + startTime: Date.now(), + duration: 5000, + }) + ), + Promise.resolve().then(() => + db.updateAutoRunSession('existing-session', { + duration: 60000, + tasksCompleted: 5, + }) + ), + Promise.resolve().then(() => + db.insertAutoRunTask({ + autoRunSessionId: 'auto-1', + sessionId: 'session-2', + agentType: 'claude-code', + taskIndex: 0, + startTime: Date.now(), + duration: 1000, + success: true, + }) + ), + ]; + + const results = await Promise.all(operations); + + expect(results).toHaveLength(3); + // First and third return IDs, second returns boolean + expect(typeof results[0]).toBe('string'); + expect(typeof results[1]).toBe('boolean'); + expect(typeof results[2]).toBe('string'); + expect(mockStatement.run).toHaveBeenCalledTimes(3); + }); + }); + + describe('interleaved read/write operations', () => { + it('should handle reads during writes without blocking', async () => { + mockStatement.all.mockReturnValue([ + { + id: 'event-1', + session_id: 'session-1', + agent_type: 'claude-code', + source: 'user', + start_time: Date.now(), + duration: 5000, + project_path: '/test', + tab_id: null, + }, + ]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const operations = [ + // Write + Promise.resolve().then(() => + db.insertQueryEvent({ + sessionId: 'session-new', + agentType: 'claude-code', + source: 'user', + startTime: Date.now(), + duration: 3000, + }) + ), + // Read + Promise.resolve().then(() => db.getQueryEvents('day')), + // Write + Promise.resolve().then(() => + db.insertAutoRunSession({ + sessionId: 'session-2', + agentType: 'claude-code', + startTime: Date.now(), + duration: 0, + tasksTotal: 5, + }) + ), + // Read + Promise.resolve().then(() => db.getAutoRunSessions('week')), + ]; + + const results = await Promise.all(operations); + + expect(results).toHaveLength(4); + expect(typeof results[0]).toBe('string'); // Insert ID + expect(Array.isArray(results[1])).toBe(true); // Query events array + expect(typeof results[2]).toBe('string'); // Insert ID + expect(Array.isArray(results[3])).toBe(true); // Auto run sessions array + }); + + it('should allow reads to complete while multiple writes are pending', async () => { + let readCompleted = false; + mockStatement.all.mockImplementation(() => { + readCompleted = true; + return [{ count: 42 }]; + }); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Start multiple writes + const writes = Array.from({ length: 5 }, (_, i) => + Promise.resolve().then(() => + db.insertQueryEvent({ + sessionId: `session-${i}`, + agentType: 'claude-code', + source: 'user', + startTime: Date.now() + i, + duration: 1000, + }) + ) + ); + + // Interleave a read + const read = Promise.resolve().then(() => db.getQueryEvents('day')); + + const [writeResults, readResult] = await Promise.all([Promise.all(writes), read]); + + expect(writeResults).toHaveLength(5); + expect(readCompleted).toBe(true); + }); + }); + + describe('high-volume concurrent writes', () => { + it('should handle 50 concurrent writes without data loss', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Reset counter after initialize() to count only test operations + const insertedCount = { value: 0 }; + mockStatement.run.mockImplementation(() => { + insertedCount.value++; + return { changes: 1 }; + }); + + const writeOperations = Array.from({ length: 50 }, (_, i) => + Promise.resolve().then(() => + db.insertQueryEvent({ + sessionId: `session-${i}`, + agentType: 'claude-code', + source: i % 2 === 0 ? 'user' : 'auto', + startTime: Date.now() + i, + duration: 1000 + i, + }) + ) + ); + + const results = await Promise.all(writeOperations); + + expect(results).toHaveLength(50); + expect(insertedCount.value).toBe(50); // All writes completed + expect(new Set(results).size).toBe(50); // All IDs unique + }); + + it('should handle 100 concurrent writes across all three tables', async () => { + const writesByTable = { query: 0, session: 0, task: 0 }; + + // Track which table each insert goes to based on SQL + mockDb.prepare.mockImplementation((sql: string) => { + const tracker = mockStatement; + if (sql.includes('INSERT INTO query_events')) { + tracker.run = vi.fn(() => { + writesByTable.query++; + return { changes: 1 }; + }); + } else if (sql.includes('INSERT INTO auto_run_sessions')) { + tracker.run = vi.fn(() => { + writesByTable.session++; + return { changes: 1 }; + }); + } else if (sql.includes('INSERT INTO auto_run_tasks')) { + tracker.run = vi.fn(() => { + writesByTable.task++; + return { changes: 1 }; + }); + } + return tracker; + }); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // 40 query events + 30 sessions + 30 tasks = 100 writes + const queryWrites = Array.from({ length: 40 }, (_, i) => + Promise.resolve().then(() => + db.insertQueryEvent({ + sessionId: `query-session-${i}`, + agentType: 'claude-code', + source: 'user', + startTime: Date.now() + i, + duration: 1000, + }) + ) + ); + + const sessionWrites = Array.from({ length: 30 }, (_, i) => + Promise.resolve().then(() => + db.insertAutoRunSession({ + sessionId: `autorun-session-${i}`, + agentType: 'claude-code', + startTime: Date.now() + i, + duration: 0, + tasksTotal: 5, + }) + ) + ); + + const taskWrites = Array.from({ length: 30 }, (_, i) => + Promise.resolve().then(() => + db.insertAutoRunTask({ + autoRunSessionId: `auto-${i}`, + sessionId: `task-session-${i}`, + agentType: 'claude-code', + taskIndex: i, + startTime: Date.now() + i, + duration: 1000, + success: true, + }) + ) + ); + + const allResults = await Promise.all([...queryWrites, ...sessionWrites, ...taskWrites]); + + expect(allResults).toHaveLength(100); + expect(allResults.every((id) => typeof id === 'string' && id.length > 0)).toBe(true); + expect(writesByTable.query).toBe(40); + expect(writesByTable.session).toBe(30); + expect(writesByTable.task).toBe(30); + }); + }); + + describe('unique ID generation under concurrent load', () => { + it('should generate unique IDs even with high-frequency calls', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Generate 100 IDs as fast as possible + const ids: string[] = []; + for (let i = 0; i < 100; i++) { + const id = db.insertQueryEvent({ + sessionId: 'session-1', + agentType: 'claude-code', + source: 'user', + startTime: Date.now(), + duration: 1000, + }); + ids.push(id); + } + + // All IDs must be unique + expect(new Set(ids).size).toBe(100); + }); + + it('should generate IDs with timestamp-random format', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const id = db.insertQueryEvent({ + sessionId: 'session-1', + agentType: 'claude-code', + source: 'user', + startTime: Date.now(), + duration: 1000, + }); + + // ID format: timestamp-randomString + expect(id).toMatch(/^\d+-[a-z0-9]+$/); + }); + }); + + describe('database connection stability', () => { + it('should maintain stable connection during intensive operations', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Perform many operations + for (let i = 0; i < 30; i++) { + db.insertQueryEvent({ + sessionId: `session-${i}`, + agentType: 'claude-code', + source: 'user', + startTime: Date.now() + i, + duration: 1000, + }); + } + + // Database should still be ready + expect(db.isReady()).toBe(true); + }); + + it('should handle operations after previous operations complete', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Track call count manually since we're testing sequential batches + // Set up tracking AFTER initialize() to count only test operations + let runCallCount = 0; + const trackingStatement = { + run: vi.fn(() => { + runCallCount++; + return { changes: 1 }; + }), + get: vi.fn(() => ({ count: 0, total_duration: 0 })), + all: vi.fn(() => []), + }; + mockDb.prepare.mockReturnValue(trackingStatement); + + // First batch + for (let i = 0; i < 10; i++) { + db.insertQueryEvent({ + sessionId: `batch1-${i}`, + agentType: 'claude-code', + source: 'user', + startTime: Date.now() + i, + duration: 1000, + }); + } + + // Second batch (should work without issues) + const secondBatchIds: string[] = []; + for (let i = 0; i < 10; i++) { + const id = db.insertQueryEvent({ + sessionId: `batch2-${i}`, + agentType: 'claude-code', + source: 'user', + startTime: Date.now() + 100 + i, + duration: 2000, + }); + secondBatchIds.push(id); + } + + expect(secondBatchIds).toHaveLength(10); + expect(runCallCount).toBe(20); + }); + }); +}); + +/** + * electron-rebuild verification tests + * + * These tests verify that better-sqlite3 is correctly configured to be built + * via electron-rebuild on all platforms (macOS, Windows, Linux). The native + * module must be compiled against Electron's Node.js headers to work correctly + * in the Electron runtime. + * + * Key verification points: + * 1. postinstall script is configured to run electron-rebuild + * 2. better-sqlite3 is excluded from asar packaging (must be unpacked) + * 3. Native module paths are platform-appropriate + * 4. CI/CD workflow includes architecture verification + * + * Note: These tests verify the configuration and mock the build process. + * Actual native module compilation is tested in CI/CD workflows. + */ +describe('electron-rebuild verification for better-sqlite3', () => { + describe('package.json configuration', () => { + it('should have postinstall script that runs electron-rebuild for better-sqlite3', async () => { + // Use node:fs to bypass the mock and access the real filesystem + const fs = await import('node:fs'); + const path = await import('node:path'); + + // Find package.json relative to the test file + let packageJsonPath = path.join(__dirname, '..', '..', '..', '..', 'package.json'); + + // The package.json should exist and contain electron-rebuild for better-sqlite3 + const packageJsonContent = fs.readFileSync(packageJsonPath, 'utf8'); + const packageJson = JSON.parse(packageJsonContent); + + expect(packageJson.scripts).toBeDefined(); + expect(packageJson.scripts.postinstall).toBeDefined(); + expect(packageJson.scripts.postinstall).toContain('electron-rebuild'); + expect(packageJson.scripts.postinstall).toContain('better-sqlite3'); + }); + + it('should have better-sqlite3 in dependencies', async () => { + const fs = await import('node:fs'); + const path = await import('node:path'); + + let packageJsonPath = path.join(__dirname, '..', '..', '..', '..', 'package.json'); + const packageJsonContent = fs.readFileSync(packageJsonPath, 'utf8'); + const packageJson = JSON.parse(packageJsonContent); + + expect(packageJson.dependencies).toBeDefined(); + expect(packageJson.dependencies['better-sqlite3']).toBeDefined(); + }); + + it('should have electron-rebuild in devDependencies', async () => { + const fs = await import('node:fs'); + const path = await import('node:path'); + + let packageJsonPath = path.join(__dirname, '..', '..', '..', '..', 'package.json'); + const packageJsonContent = fs.readFileSync(packageJsonPath, 'utf8'); + const packageJson = JSON.parse(packageJsonContent); + + expect(packageJson.devDependencies).toBeDefined(); + expect(packageJson.devDependencies['electron-rebuild']).toBeDefined(); + }); + + it('should have @types/better-sqlite3 in devDependencies', async () => { + const fs = await import('node:fs'); + const path = await import('node:path'); + + let packageJsonPath = path.join(__dirname, '..', '..', '..', '..', 'package.json'); + const packageJsonContent = fs.readFileSync(packageJsonPath, 'utf8'); + const packageJson = JSON.parse(packageJsonContent); + + expect(packageJson.devDependencies).toBeDefined(); + expect(packageJson.devDependencies['@types/better-sqlite3']).toBeDefined(); + }); + + it('should configure asarUnpack for better-sqlite3 (native modules must be unpacked)', async () => { + const fs = await import('node:fs'); + const path = await import('node:path'); + + let packageJsonPath = path.join(__dirname, '..', '..', '..', '..', 'package.json'); + const packageJsonContent = fs.readFileSync(packageJsonPath, 'utf8'); + const packageJson = JSON.parse(packageJsonContent); + + // electron-builder config should unpack native modules from asar + expect(packageJson.build).toBeDefined(); + expect(packageJson.build.asarUnpack).toBeDefined(); + expect(Array.isArray(packageJson.build.asarUnpack)).toBe(true); + expect(packageJson.build.asarUnpack).toContain('node_modules/better-sqlite3/**/*'); + }); + + it('should disable npmRebuild in electron-builder (we use postinstall instead)', async () => { + const fs = await import('node:fs'); + const path = await import('node:path'); + + let packageJsonPath = path.join(__dirname, '..', '..', '..', '..', 'package.json'); + const packageJsonContent = fs.readFileSync(packageJsonPath, 'utf8'); + const packageJson = JSON.parse(packageJsonContent); + + // npmRebuild should be false because we explicitly run electron-rebuild + // in postinstall and CI/CD workflows + expect(packageJson.build).toBeDefined(); + expect(packageJson.build.npmRebuild).toBe(false); + }); + }); + + describe('CI/CD workflow configuration', () => { + it('should have release workflow that rebuilds native modules', async () => { + const fs = await import('node:fs'); + const path = await import('node:path'); + + const workflowPath = path.join( + __dirname, + '..', + '..', + '..', + '..', + '.github', + 'workflows', + 'release.yml' + ); + const workflowContent = fs.readFileSync(workflowPath, 'utf8'); + + // Workflow should run postinstall which triggers electron-rebuild + expect(workflowContent).toContain('npm run postinstall'); + expect(workflowContent).toContain('npm_config_build_from_source'); + }); + + it('should configure builds for all target platforms', async () => { + const fs = await import('node:fs'); + const path = await import('node:path'); + + const workflowPath = path.join( + __dirname, + '..', + '..', + '..', + '..', + '.github', + 'workflows', + 'release.yml' + ); + const workflowContent = fs.readFileSync(workflowPath, 'utf8'); + + // Verify all platforms are configured + expect(workflowContent).toContain('macos-latest'); + expect(workflowContent).toContain('ubuntu-latest'); + expect(workflowContent).toContain('ubuntu-24.04-arm'); // ARM64 Linux + expect(workflowContent).toContain('windows-latest'); + }); + + it('should have architecture verification for native modules', async () => { + const fs = await import('node:fs'); + const path = await import('node:path'); + + const workflowPath = path.join( + __dirname, + '..', + '..', + '..', + '..', + '.github', + 'workflows', + 'release.yml' + ); + const workflowContent = fs.readFileSync(workflowPath, 'utf8'); + + // Workflow should verify native module architecture before packaging + expect(workflowContent).toContain('Verify'); + expect(workflowContent).toContain('electron-rebuild'); + }); + + it('should use --force flag for electron-rebuild', async () => { + const fs = await import('node:fs'); + const path = await import('node:path'); + + let packageJsonPath = path.join(__dirname, '..', '..', '..', '..', 'package.json'); + const packageJsonContent = fs.readFileSync(packageJsonPath, 'utf8'); + const packageJson = JSON.parse(packageJsonContent); + + // The -f (force) flag ensures rebuild even if binaries exist + expect(packageJson.scripts.postinstall).toContain('-f'); + }); + }); + + describe('native module structure (macOS verification)', () => { + it('should have better-sqlite3 native binding in expected location', async () => { + const fs = await import('node:fs'); + const path = await import('node:path'); + + // Check if the native binding exists in build/Release (compiled location) + const nativeModulePath = path.join( + __dirname, + '..', + '..', + '..', + '..', + 'node_modules', + 'better-sqlite3', + 'build', + 'Release', + 'better_sqlite3.node' + ); + + // The native module should exist after electron-rebuild + // This test will pass on dev machines where npm install was run + const exists = fs.existsSync(nativeModulePath); + + // If the native module doesn't exist, check if there's a prebuilt binary + if (!exists) { + // Check for prebuilt binaries in the bin directory + const binDir = path.join( + __dirname, + '..', + '..', + '..', + 'node_modules', + 'better-sqlite3', + 'bin' + ); + + if (fs.existsSync(binDir)) { + const binContents = fs.readdirSync(binDir); + // Should have platform-specific prebuilt binaries + expect(binContents.length).toBeGreaterThan(0); + } else { + // Neither compiled nor prebuilt binary exists - fail + expect(exists).toBe(true); + } + } + }); + + it('should verify binding.gyp exists for native compilation', async () => { + const fs = await import('node:fs'); + const path = await import('node:path'); + + const bindingGypPath = path.join( + __dirname, + '..', + '..', + '..', + '..', + 'node_modules', + 'better-sqlite3', + 'binding.gyp' + ); + + // binding.gyp is required for node-gyp compilation + expect(fs.existsSync(bindingGypPath)).toBe(true); + }); + }); + + describe('platform-specific build paths', () => { + it('should verify macOS native module extension is .node', () => { + // On macOS, native modules have .node extension (Mach-O bundle) + const platform = process.platform; + if (platform === 'darwin') { + expect('.node').toBe('.node'); + } + }); + + it('should verify Windows native module extension is .node', () => { + // On Windows, native modules have .node extension (DLL) + const platform = process.platform; + if (platform === 'win32') { + expect('.node').toBe('.node'); + } + }); + + it('should verify Linux native module extension is .node', () => { + // On Linux, native modules have .node extension (shared object) + const platform = process.platform; + if (platform === 'linux') { + expect('.node').toBe('.node'); + } + }); + + it('should verify electron target is specified in postinstall', async () => { + const fs = await import('node:fs'); + const path = await import('node:path'); + + let packageJsonPath = path.join(__dirname, '..', '..', '..', '..', 'package.json'); + const packageJsonContent = fs.readFileSync(packageJsonPath, 'utf8'); + const packageJson = JSON.parse(packageJsonContent); + + // postinstall uses electron-rebuild which automatically detects electron version + expect(packageJson.scripts.postinstall).toContain('electron-rebuild'); + // The -w flag specifies which modules to rebuild + expect(packageJson.scripts.postinstall).toContain('-w'); + }); + }); + + describe('database import verification', () => { + it('should be able to mock better-sqlite3 for testing', async () => { + // This test verifies our mock setup is correct + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + // Should be able to initialize with mocked database + expect(() => db.initialize()).not.toThrow(); + expect(db.isReady()).toBe(true); + }); + + it('should verify StatsDB uses better-sqlite3 correctly', async () => { + // Reset mocks to track this specific test + vi.clearAllMocks(); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Database should be initialized and ready + expect(db.isReady()).toBe(true); + + // Verify WAL mode is enabled for concurrent access + expect(mockDb.pragma).toHaveBeenCalled(); + }); + }); +}); + +/** + * File path normalization tests + * + * These tests verify that file paths are normalized to use forward slashes + * consistently across platforms. This ensures: + * 1. Windows-style paths (backslashes) are converted to forward slashes + * 2. Paths stored in the database are platform-independent + * 3. Filtering by project path works regardless of input path format + * 4. Cross-platform data portability is maintained + */ diff --git a/src/__tests__/main/stats/paths.test.ts b/src/__tests__/main/stats/paths.test.ts new file mode 100644 index 00000000..6e94cfc6 --- /dev/null +++ b/src/__tests__/main/stats/paths.test.ts @@ -0,0 +1,1029 @@ +/** + * Tests for cross-platform path resolution and normalization. + * + * Note: better-sqlite3 is a native module compiled for Electron's Node version. + * Direct testing with the native module in vitest is not possible without + * electron-rebuild for the vitest runtime. These tests use mocked database + * operations to verify the logic without requiring the actual native module. + * + * For full integration testing of the SQLite database, use the Electron test + * environment (e2e tests) where the native module is properly loaded. + */ + +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import * as path from 'path'; +import * as os from 'os'; + +// Track Database constructor calls to verify file path +let lastDbPath: string | null = null; + +// Store mock references so they can be accessed in tests +const mockStatement = { + run: vi.fn(() => ({ changes: 1 })), + get: vi.fn(() => ({ count: 0, total_duration: 0 })), + all: vi.fn(() => []), +}; + +const mockDb = { + pragma: vi.fn(() => [{ user_version: 0 }]), + prepare: vi.fn(() => mockStatement), + close: vi.fn(), + // Transaction mock that immediately executes the function + transaction: vi.fn((fn: () => void) => { + return () => fn(); + }), +}; + +// Mock better-sqlite3 as a class +vi.mock('better-sqlite3', () => { + return { + default: class MockDatabase { + constructor(dbPath: string) { + lastDbPath = dbPath; + } + pragma = mockDb.pragma; + prepare = mockDb.prepare; + close = mockDb.close; + transaction = mockDb.transaction; + }, + }; +}); + +// Mock electron's app module with trackable userData path +const mockUserDataPath = path.join(os.tmpdir(), 'maestro-test-stats-db'); +vi.mock('electron', () => ({ + app: { + getPath: vi.fn((name: string) => { + if (name === 'userData') return mockUserDataPath; + return os.tmpdir(); + }), + }, +})); + +// Track fs calls +const mockFsExistsSync = vi.fn(() => true); +const mockFsMkdirSync = vi.fn(); +const mockFsCopyFileSync = vi.fn(); +const mockFsUnlinkSync = vi.fn(); +const mockFsRenameSync = vi.fn(); +const mockFsStatSync = vi.fn(() => ({ size: 1024 })); +const mockFsReadFileSync = vi.fn(() => '0'); // Default: old timestamp (triggers vacuum check) +const mockFsWriteFileSync = vi.fn(); + +// Mock fs +vi.mock('fs', () => ({ + existsSync: (...args: unknown[]) => mockFsExistsSync(...args), + mkdirSync: (...args: unknown[]) => mockFsMkdirSync(...args), + copyFileSync: (...args: unknown[]) => mockFsCopyFileSync(...args), + unlinkSync: (...args: unknown[]) => mockFsUnlinkSync(...args), + renameSync: (...args: unknown[]) => mockFsRenameSync(...args), + statSync: (...args: unknown[]) => mockFsStatSync(...args), + readFileSync: (...args: unknown[]) => mockFsReadFileSync(...args), + writeFileSync: (...args: unknown[]) => mockFsWriteFileSync(...args), +})); + +// Mock logger +vi.mock('../../../main/utils/logger', () => ({ + logger: { + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + debug: vi.fn(), + }, +})); + +// Import types only - we'll test the type definitions +import type { + QueryEvent, + AutoRunSession, + AutoRunTask, + SessionLifecycleEvent, + StatsTimeRange, + StatsFilters, + StatsAggregation, +} from '../../../shared/stats-types'; + +describe('Cross-platform database path resolution (macOS, Windows, Linux)', () => { + beforeEach(() => { + vi.clearAllMocks(); + lastDbPath = null; + mockDb.pragma.mockReturnValue([{ user_version: 0 }]); + mockDb.prepare.mockReturnValue(mockStatement); + mockStatement.run.mockReturnValue({ changes: 1 }); + mockFsExistsSync.mockReturnValue(true); + mockFsMkdirSync.mockClear(); + }); + + afterEach(() => { + vi.resetModules(); + }); + + describe('macOS path resolution', () => { + it('should use macOS-style userData path: ~/Library/Application Support/Maestro/', async () => { + // Simulate macOS userData path + const macOsUserData = '/Users/testuser/Library/Application Support/Maestro'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(macOsUserData); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + expect(lastDbPath).toBe(path.join(macOsUserData, 'stats.db')); + }); + + it('should handle macOS path with spaces in Application Support', async () => { + const macOsUserData = '/Users/testuser/Library/Application Support/Maestro'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(macOsUserData); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + const dbPath = db.getDbPath(); + expect(dbPath).toContain('Application Support'); + expect(dbPath).toContain('stats.db'); + }); + + it('should handle macOS username with special characters', async () => { + const macOsUserData = '/Users/test.user-name/Library/Application Support/Maestro'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(macOsUserData); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + expect(lastDbPath).toBe(path.join(macOsUserData, 'stats.db')); + }); + + it('should resolve to absolute path on macOS', async () => { + const macOsUserData = '/Users/testuser/Library/Application Support/Maestro'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(macOsUserData); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + expect(path.isAbsolute(db.getDbPath())).toBe(true); + }); + }); + + describe('Windows path resolution', () => { + it('should use Windows-style userData path: %APPDATA%\\Maestro\\', async () => { + // Simulate Windows userData path + const windowsUserData = 'C:\\Users\\TestUser\\AppData\\Roaming\\Maestro'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(windowsUserData); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // path.join will use the platform's native separator + expect(lastDbPath).toBe(path.join(windowsUserData, 'stats.db')); + }); + + it('should handle Windows path with drive letter', async () => { + const windowsUserData = 'D:\\Users\\TestUser\\AppData\\Roaming\\Maestro'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(windowsUserData); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + const dbPath = db.getDbPath(); + expect(dbPath).toContain('stats.db'); + // The path should start with a drive letter pattern when on Windows + // or be a proper path when joined + }); + + it('should handle Windows username with spaces', async () => { + const windowsUserData = 'C:\\Users\\Test User\\AppData\\Roaming\\Maestro'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(windowsUserData); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + expect(lastDbPath).toBe(path.join(windowsUserData, 'stats.db')); + }); + + it('should handle Windows UNC paths (network drives)', async () => { + const windowsUncPath = '\\\\NetworkDrive\\SharedFolder\\AppData\\Maestro'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(windowsUncPath); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + expect(lastDbPath).toBe(path.join(windowsUncPath, 'stats.db')); + }); + + it('should handle portable Windows installation path', async () => { + // Portable apps might use a different structure + const portablePath = 'E:\\PortableApps\\Maestro\\Data'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(portablePath); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + expect(lastDbPath).toBe(path.join(portablePath, 'stats.db')); + }); + }); + + describe('Linux path resolution', () => { + it('should use Linux-style userData path: ~/.config/Maestro/', async () => { + // Simulate Linux userData path + const linuxUserData = '/home/testuser/.config/Maestro'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(linuxUserData); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + expect(lastDbPath).toBe(path.join(linuxUserData, 'stats.db')); + }); + + it('should handle Linux XDG_CONFIG_HOME override', async () => { + // Custom XDG_CONFIG_HOME might result in different path + const customConfigHome = '/custom/config/path/Maestro'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(customConfigHome); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + expect(lastDbPath).toBe(path.join(customConfigHome, 'stats.db')); + }); + + it('should handle Linux username with underscore', async () => { + const linuxUserData = '/home/test_user/.config/Maestro'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(linuxUserData); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + expect(lastDbPath).toBe(path.join(linuxUserData, 'stats.db')); + }); + + it('should resolve to absolute path on Linux', async () => { + const linuxUserData = '/home/testuser/.config/Maestro'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(linuxUserData); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + expect(path.isAbsolute(db.getDbPath())).toBe(true); + }); + + it('should handle Linux Snap/Flatpak sandboxed paths', async () => { + // Snap packages have a different path structure + const snapPath = '/home/testuser/snap/maestro/current/.config/Maestro'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(snapPath); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + expect(lastDbPath).toBe(path.join(snapPath, 'stats.db')); + }); + }); + + describe('path.join cross-platform behavior', () => { + it('should use path.join to combine userData and stats.db', async () => { + const testUserData = '/test/user/data'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(testUserData); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + // path.join should be used (not string concatenation) + expect(db.getDbPath()).toBe(path.join(testUserData, 'stats.db')); + }); + + it('should handle trailing slash in userData path', async () => { + const userDataWithSlash = '/test/user/data/'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(userDataWithSlash); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + // path.join normalizes trailing slashes + const dbPath = db.getDbPath(); + expect(dbPath.endsWith('stats.db')).toBe(true); + // Should not have double slashes + expect(dbPath).not.toContain('//'); + }); + + it('should result in stats.db as the basename on all platforms', async () => { + const testUserData = '/any/path/structure'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(testUserData); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + expect(path.basename(db.getDbPath())).toBe('stats.db'); + }); + + it('should result in userData directory as the parent', async () => { + const testUserData = '/any/path/structure'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(testUserData); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + expect(path.dirname(db.getDbPath())).toBe(testUserData); + }); + }); + + describe('directory creation cross-platform', () => { + it('should create directory on macOS if it does not exist', async () => { + mockFsExistsSync.mockReturnValue(false); + const macOsUserData = '/Users/testuser/Library/Application Support/Maestro'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(macOsUserData); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + expect(mockFsMkdirSync).toHaveBeenCalledWith(macOsUserData, { recursive: true }); + }); + + it('should create directory on Windows if it does not exist', async () => { + mockFsExistsSync.mockReturnValue(false); + const windowsUserData = 'C:\\Users\\TestUser\\AppData\\Roaming\\Maestro'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(windowsUserData); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + expect(mockFsMkdirSync).toHaveBeenCalledWith(windowsUserData, { recursive: true }); + }); + + it('should create directory on Linux if it does not exist', async () => { + mockFsExistsSync.mockReturnValue(false); + const linuxUserData = '/home/testuser/.config/Maestro'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(linuxUserData); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + expect(mockFsMkdirSync).toHaveBeenCalledWith(linuxUserData, { recursive: true }); + }); + + it('should use recursive option for deeply nested paths', async () => { + mockFsExistsSync.mockReturnValue(false); + const deepPath = '/very/deep/nested/path/structure/Maestro'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(deepPath); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + expect(mockFsMkdirSync).toHaveBeenCalledWith(deepPath, { recursive: true }); + }); + }); + + describe('edge cases for path resolution', () => { + it('should handle unicode characters in path', async () => { + const unicodePath = '/Users/用户名/Library/Application Support/Maestro'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(unicodePath); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + expect(lastDbPath).toBe(path.join(unicodePath, 'stats.db')); + }); + + it('should handle emoji in path (macOS supports this)', async () => { + const emojiPath = '/Users/test/Documents/🎵Music/Maestro'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(emojiPath); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + expect(lastDbPath).toBe(path.join(emojiPath, 'stats.db')); + }); + + it('should handle very long paths (approaching Windows MAX_PATH)', async () => { + // Windows MAX_PATH is 260 characters by default + const longPath = '/very' + '/long'.repeat(50) + '/path/Maestro'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(longPath); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + const dbPath = db.getDbPath(); + expect(dbPath.endsWith('stats.db')).toBe(true); + }); + + it('should handle path with single quotes', async () => { + const quotedPath = "/Users/O'Brien/Library/Application Support/Maestro"; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(quotedPath); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + expect(lastDbPath).toBe(path.join(quotedPath, 'stats.db')); + }); + + it('should handle path with double quotes (Windows allows this)', async () => { + // Note: Double quotes aren't typically valid in Windows paths but path.join handles them + const quotedPath = 'C:\\Users\\Test"User\\AppData\\Roaming\\Maestro'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(quotedPath); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + const dbPath = db.getDbPath(); + expect(path.basename(dbPath)).toBe('stats.db'); + }); + + it('should handle path with ampersand', async () => { + const ampersandPath = '/Users/Smith & Jones/Library/Application Support/Maestro'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(ampersandPath); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + expect(lastDbPath).toBe(path.join(ampersandPath, 'stats.db')); + }); + }); + + describe('consistency across platform simulations', () => { + it('should always produce a path ending with stats.db regardless of platform', async () => { + const platforms = [ + '/Users/mac/Library/Application Support/Maestro', + 'C:\\Users\\Windows\\AppData\\Roaming\\Maestro', + '/home/linux/.config/Maestro', + ]; + + for (const platformPath of platforms) { + vi.resetModules(); + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(platformPath); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + expect(path.basename(db.getDbPath())).toBe('stats.db'); + } + }); + + it('should always initialize successfully regardless of platform path format', async () => { + const platforms = [ + '/Users/mac/Library/Application Support/Maestro', + 'C:\\Users\\Windows\\AppData\\Roaming\\Maestro', + '/home/linux/.config/Maestro', + ]; + + for (const platformPath of platforms) { + vi.resetModules(); + vi.clearAllMocks(); + mockDb.pragma.mockReturnValue([{ user_version: 0 }]); + mockDb.prepare.mockReturnValue(mockStatement); + mockFsExistsSync.mockReturnValue(true); + + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(platformPath); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + expect(db.isReady()).toBe(true); + } + }); + + it('should pass correct directory to mkdirSync on all platforms', async () => { + const platforms = [ + '/Users/mac/Library/Application Support/Maestro', + 'C:\\Users\\Windows\\AppData\\Roaming\\Maestro', + '/home/linux/.config/Maestro', + ]; + + for (const platformPath of platforms) { + vi.resetModules(); + vi.clearAllMocks(); + mockDb.pragma.mockReturnValue([{ user_version: 0 }]); + mockDb.prepare.mockReturnValue(mockStatement); + mockFsExistsSync.mockReturnValue(false); + mockFsMkdirSync.mockClear(); + + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(platformPath); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + expect(mockFsMkdirSync).toHaveBeenCalledWith(platformPath, { recursive: true }); + } + }); + }); + + describe('electron app.getPath integration', () => { + it('should call app.getPath with "userData" argument', async () => { + const { app } = await import('electron'); + + const { StatsDB } = await import('../../../main/stats'); + new StatsDB(); + + expect(app.getPath).toHaveBeenCalledWith('userData'); + }); + + it('should respect the value returned by app.getPath', async () => { + const customPath = '/custom/electron/user/data/path'; + const { app } = await import('electron'); + vi.mocked(app.getPath).mockReturnValue(customPath); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + expect(db.getDbPath()).toBe(path.join(customPath, 'stats.db')); + }); + + it('should use userData path at construction time (not lazily)', async () => { + const { app } = await import('electron'); + const initialPath = '/initial/path'; + vi.mocked(app.getPath).mockReturnValue(initialPath); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + // Change the mock after construction + vi.mocked(app.getPath).mockReturnValue('/different/path'); + + // Should still use the initial path + expect(db.getDbPath()).toBe(path.join(initialPath, 'stats.db')); + }); + }); +}); + +/** + * Concurrent writes and database locking tests + * + * Tests that verify concurrent write operations don't cause database locking issues. + * better-sqlite3 uses synchronous operations and WAL mode for optimal concurrent access. + * + * Key behaviors tested: + * - Rapid sequential writes complete without errors + * - Concurrent write operations all succeed (via Promise.all) + * - Interleaved read/write operations work correctly + * - High-volume concurrent writes complete without data loss + * - WAL mode is properly enabled for concurrent access + */ + +describe('File path normalization in database (forward slashes consistently)', () => { + beforeEach(() => { + vi.clearAllMocks(); + lastDbPath = null; + mockDb.pragma.mockReturnValue([{ user_version: 1 }]); + mockDb.prepare.mockReturnValue(mockStatement); + mockStatement.run.mockReturnValue({ changes: 1 }); + mockStatement.all.mockReturnValue([]); + mockFsExistsSync.mockReturnValue(true); + mockFsMkdirSync.mockClear(); + }); + + afterEach(() => { + vi.resetModules(); + }); + + describe('normalizePath utility function', () => { + it('should convert Windows backslashes to forward slashes', async () => { + const { normalizePath } = await import('../../../main/stats'); + expect(normalizePath('C:\\Users\\TestUser\\Projects\\MyApp')).toBe( + 'C:/Users/TestUser/Projects/MyApp' + ); + }); + + it('should preserve Unix-style forward slashes unchanged', async () => { + const { normalizePath } = await import('../../../main/stats'); + expect(normalizePath('/Users/testuser/Projects/MyApp')).toBe( + '/Users/testuser/Projects/MyApp' + ); + }); + + it('should handle mixed slashes (normalize to forward slashes)', async () => { + const { normalizePath } = await import('../../../main/stats'); + expect(normalizePath('C:\\Users/TestUser\\Projects/MyApp')).toBe( + 'C:/Users/TestUser/Projects/MyApp' + ); + }); + + it('should handle UNC paths (Windows network shares)', async () => { + const { normalizePath } = await import('../../../main/stats'); + expect(normalizePath('\\\\NetworkServer\\Share\\Folder\\File.md')).toBe( + '//NetworkServer/Share/Folder/File.md' + ); + }); + + it('should return null for null input', async () => { + const { normalizePath } = await import('../../../main/stats'); + expect(normalizePath(null)).toBeNull(); + }); + + it('should return null for undefined input', async () => { + const { normalizePath } = await import('../../../main/stats'); + expect(normalizePath(undefined)).toBeNull(); + }); + + it('should handle empty string', async () => { + const { normalizePath } = await import('../../../main/stats'); + expect(normalizePath('')).toBe(''); + }); + + it('should handle path with spaces', async () => { + const { normalizePath } = await import('../../../main/stats'); + expect(normalizePath('C:\\Users\\Test User\\My Documents\\Project')).toBe( + 'C:/Users/Test User/My Documents/Project' + ); + }); + + it('should handle path with special characters', async () => { + const { normalizePath } = await import('../../../main/stats'); + expect(normalizePath('C:\\Users\\test.user-name\\Projects\\[MyApp]')).toBe( + 'C:/Users/test.user-name/Projects/[MyApp]' + ); + }); + + it('should handle consecutive backslashes', async () => { + const { normalizePath } = await import('../../../main/stats'); + expect(normalizePath('C:\\\\Users\\\\TestUser')).toBe('C://Users//TestUser'); + }); + + it('should handle path ending with backslash', async () => { + const { normalizePath } = await import('../../../main/stats'); + expect(normalizePath('C:\\Users\\TestUser\\')).toBe('C:/Users/TestUser/'); + }); + + it('should handle Japanese/CJK characters in path', async () => { + const { normalizePath } = await import('../../../main/stats'); + expect(normalizePath('C:\\Users\\ユーザー\\プロジェクト')).toBe( + 'C:/Users/ユーザー/プロジェクト' + ); + }); + }); + + describe('insertQueryEvent path normalization', () => { + it('should normalize Windows projectPath to forward slashes', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.insertQueryEvent({ + sessionId: 'session-1', + agentType: 'claude-code', + source: 'user', + startTime: Date.now(), + duration: 5000, + projectPath: 'C:\\Users\\TestUser\\Projects\\MyApp', + tabId: 'tab-1', + }); + + // Verify that the statement was called with normalized path + // insertQueryEvent now has 9 parameters: id, sessionId, agentType, source, startTime, duration, projectPath, tabId, isRemote + expect(mockStatement.run).toHaveBeenCalledWith( + expect.any(String), // id + 'session-1', + 'claude-code', + 'user', + expect.any(Number), // startTime + 5000, + 'C:/Users/TestUser/Projects/MyApp', // normalized path + 'tab-1', + null // isRemote (undefined → null) + ); + }); + + it('should preserve Unix projectPath unchanged', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.insertQueryEvent({ + sessionId: 'session-1', + agentType: 'claude-code', + source: 'user', + startTime: Date.now(), + duration: 5000, + projectPath: '/Users/testuser/Projects/MyApp', + tabId: 'tab-1', + }); + + // insertQueryEvent now has 9 parameters including isRemote + expect(mockStatement.run).toHaveBeenCalledWith( + expect.any(String), + 'session-1', + 'claude-code', + 'user', + expect.any(Number), + 5000, + '/Users/testuser/Projects/MyApp', // unchanged + 'tab-1', + null // isRemote (undefined → null) + ); + }); + + it('should store null for undefined projectPath', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.insertQueryEvent({ + sessionId: 'session-1', + agentType: 'claude-code', + source: 'user', + startTime: Date.now(), + duration: 5000, + // projectPath is undefined + }); + + // insertQueryEvent now has 9 parameters including isRemote + expect(mockStatement.run).toHaveBeenCalledWith( + expect.any(String), + 'session-1', + 'claude-code', + 'user', + expect.any(Number), + 5000, + null, // undefined becomes null + null, // tabId undefined → null + null // isRemote undefined → null + ); + }); + }); + + describe('getQueryEvents filter path normalization', () => { + it('should normalize Windows filter projectPath for matching', async () => { + // Setup: database returns events with normalized paths + mockStatement.all.mockReturnValue([ + { + id: 'event-1', + session_id: 'session-1', + agent_type: 'claude-code', + source: 'user', + start_time: Date.now(), + duration: 5000, + project_path: 'C:/Users/TestUser/Projects/MyApp', // normalized in DB + tab_id: 'tab-1', + }, + ]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Query with Windows-style path (backslashes) + const events = db.getQueryEvents('day', { + projectPath: 'C:\\Users\\TestUser\\Projects\\MyApp', // Windows style + }); + + // Verify the prepared statement was called with normalized path + expect(mockDb.prepare).toHaveBeenCalledWith(expect.stringContaining('project_path = ?')); + + // The filter should be normalized to forward slashes for matching + const prepareCallArgs = mockStatement.all.mock.calls[0]; + expect(prepareCallArgs).toContain('C:/Users/TestUser/Projects/MyApp'); + }); + + it('should preserve Unix filter projectPath unchanged', async () => { + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getQueryEvents('week', { + projectPath: '/Users/testuser/Projects/MyApp', + }); + + const prepareCallArgs = mockStatement.all.mock.calls[0]; + expect(prepareCallArgs).toContain('/Users/testuser/Projects/MyApp'); + }); + }); + + describe('insertAutoRunSession path normalization', () => { + it('should normalize Windows documentPath and projectPath', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.insertAutoRunSession({ + sessionId: 'session-1', + agentType: 'claude-code', + documentPath: 'C:\\Users\\TestUser\\Docs\\task.md', + startTime: Date.now(), + duration: 60000, + tasksTotal: 5, + tasksCompleted: 3, + projectPath: 'C:\\Users\\TestUser\\Projects\\MyApp', + }); + + expect(mockStatement.run).toHaveBeenCalledWith( + expect.any(String), + 'session-1', + 'claude-code', + 'C:/Users/TestUser/Docs/task.md', // normalized documentPath + expect.any(Number), + 60000, + 5, + 3, + 'C:/Users/TestUser/Projects/MyApp' // normalized projectPath + ); + }); + + it('should handle null paths correctly', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.insertAutoRunSession({ + sessionId: 'session-1', + agentType: 'claude-code', + startTime: Date.now(), + duration: 60000, + // documentPath and projectPath are undefined + }); + + expect(mockStatement.run).toHaveBeenCalledWith( + expect.any(String), + 'session-1', + 'claude-code', + null, // undefined documentPath becomes null + expect.any(Number), + 60000, + null, + null, + null // undefined projectPath becomes null + ); + }); + }); + + describe('updateAutoRunSession path normalization', () => { + it('should normalize Windows documentPath on update', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.updateAutoRunSession('auto-run-1', { + duration: 120000, + documentPath: 'D:\\Projects\\NewDocs\\updated.md', + }); + + // The SQL should include document_path update with normalized path + expect(mockDb.prepare).toHaveBeenCalledWith(expect.stringContaining('document_path = ?')); + expect(mockStatement.run).toHaveBeenCalled(); + }); + + it('should handle undefined documentPath in update (no change)', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.updateAutoRunSession('auto-run-1', { + duration: 120000, + tasksCompleted: 5, + // documentPath not included + }); + + // The SQL should NOT include document_path + const prepareCalls = mockDb.prepare.mock.calls; + const updateCall = prepareCalls.find((call) => call[0]?.includes?.('UPDATE')); + if (updateCall) { + expect(updateCall[0]).not.toContain('document_path'); + } + }); + }); + + describe('cross-platform path consistency', () => { + it('should produce identical normalized paths from Windows and Unix inputs for same logical path', async () => { + const { normalizePath } = await import('../../../main/stats'); + + const windowsPath = 'C:\\Users\\Test\\project'; + const unixPath = 'C:/Users/Test/project'; + + expect(normalizePath(windowsPath)).toBe(normalizePath(unixPath)); + }); + + it('should allow filtering by either path style and match stored normalized path', async () => { + // Setup: database returns events with normalized paths + const storedPath = 'C:/Users/TestUser/Projects/MyApp'; + mockStatement.all.mockReturnValue([ + { + id: 'event-1', + session_id: 'session-1', + agent_type: 'claude-code', + source: 'user', + start_time: Date.now(), + duration: 5000, + project_path: storedPath, + tab_id: 'tab-1', + }, + ]); + + const { StatsDB, normalizePath } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Both Windows and Unix style filters should normalize to the same value + const windowsFilter = 'C:\\Users\\TestUser\\Projects\\MyApp'; + const unixFilter = 'C:/Users/TestUser/Projects/MyApp'; + + expect(normalizePath(windowsFilter)).toBe(storedPath); + expect(normalizePath(unixFilter)).toBe(storedPath); + }); + + it('should handle Linux paths correctly', async () => { + const { normalizePath } = await import('../../../main/stats'); + expect(normalizePath('/home/user/.config/maestro')).toBe('/home/user/.config/maestro'); + }); + + it('should handle macOS Application Support paths correctly', async () => { + const { normalizePath } = await import('../../../main/stats'); + expect(normalizePath('/Users/test/Library/Application Support/Maestro')).toBe( + '/Users/test/Library/Application Support/Maestro' + ); + }); + }); + + describe('edge cases and special characters', () => { + it('should handle paths with unicode characters', async () => { + const { normalizePath } = await import('../../../main/stats'); + expect(normalizePath('C:\\Users\\用户\\项目')).toBe('C:/Users/用户/项目'); + }); + + it('should handle paths with emoji (if supported by filesystem)', async () => { + const { normalizePath } = await import('../../../main/stats'); + expect(normalizePath('C:\\Users\\Test\\📁Projects\\MyApp')).toBe( + 'C:/Users/Test/📁Projects/MyApp' + ); + }); + + it('should handle very long paths', async () => { + const { normalizePath } = await import('../../../main/stats'); + const longPath = + 'C:\\Users\\TestUser\\' + 'VeryLongDirectoryName\\'.repeat(20) + 'FinalFile.md'; + const normalizedPath = normalizePath(longPath); + expect(normalizedPath).not.toContain('\\'); + expect(normalizedPath).toContain('/'); + }); + + it('should handle root paths', async () => { + const { normalizePath } = await import('../../../main/stats'); + expect(normalizePath('C:\\')).toBe('C:/'); + expect(normalizePath('/')).toBe('/'); + }); + + it('should handle drive letter only', async () => { + const { normalizePath } = await import('../../../main/stats'); + expect(normalizePath('D:')).toBe('D:'); + }); + + it('should handle paths with dots', async () => { + const { normalizePath } = await import('../../../main/stats'); + expect(normalizePath('C:\\Users\\..\\TestUser\\.hidden\\file.txt')).toBe( + 'C:/Users/../TestUser/.hidden/file.txt' + ); + }); + }); +}); + +/** + * Database VACUUM functionality tests + * + * Tests for the automatic database vacuum feature that runs on startup + * when the database exceeds 100MB to maintain performance. + */ diff --git a/src/__tests__/main/stats/query-events.test.ts b/src/__tests__/main/stats/query-events.test.ts new file mode 100644 index 00000000..8ff8bc39 --- /dev/null +++ b/src/__tests__/main/stats/query-events.test.ts @@ -0,0 +1,732 @@ +/** + * Tests for query event CRUD operations, filtering, and CSV export. + * + * Note: better-sqlite3 is a native module compiled for Electron's Node version. + * Direct testing with the native module in vitest is not possible without + * electron-rebuild for the vitest runtime. These tests use mocked database + * operations to verify the logic without requiring the actual native module. + * + * For full integration testing of the SQLite database, use the Electron test + * environment (e2e tests) where the native module is properly loaded. + */ + +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import * as path from 'path'; +import * as os from 'os'; + +// Track Database constructor calls to verify file path +let lastDbPath: string | null = null; + +// Store mock references so they can be accessed in tests +const mockStatement = { + run: vi.fn(() => ({ changes: 1 })), + get: vi.fn(() => ({ count: 0, total_duration: 0 })), + all: vi.fn(() => []), +}; + +const mockDb = { + pragma: vi.fn(() => [{ user_version: 0 }]), + prepare: vi.fn(() => mockStatement), + close: vi.fn(), + // Transaction mock that immediately executes the function + transaction: vi.fn((fn: () => void) => { + return () => fn(); + }), +}; + +// Mock better-sqlite3 as a class +vi.mock('better-sqlite3', () => { + return { + default: class MockDatabase { + constructor(dbPath: string) { + lastDbPath = dbPath; + } + pragma = mockDb.pragma; + prepare = mockDb.prepare; + close = mockDb.close; + transaction = mockDb.transaction; + }, + }; +}); + +// Mock electron's app module with trackable userData path +const mockUserDataPath = path.join(os.tmpdir(), 'maestro-test-stats-db'); +vi.mock('electron', () => ({ + app: { + getPath: vi.fn((name: string) => { + if (name === 'userData') return mockUserDataPath; + return os.tmpdir(); + }), + }, +})); + +// Track fs calls +const mockFsExistsSync = vi.fn(() => true); +const mockFsMkdirSync = vi.fn(); +const mockFsCopyFileSync = vi.fn(); +const mockFsUnlinkSync = vi.fn(); +const mockFsRenameSync = vi.fn(); +const mockFsStatSync = vi.fn(() => ({ size: 1024 })); +const mockFsReadFileSync = vi.fn(() => '0'); // Default: old timestamp (triggers vacuum check) +const mockFsWriteFileSync = vi.fn(); + +// Mock fs +vi.mock('fs', () => ({ + existsSync: (...args: unknown[]) => mockFsExistsSync(...args), + mkdirSync: (...args: unknown[]) => mockFsMkdirSync(...args), + copyFileSync: (...args: unknown[]) => mockFsCopyFileSync(...args), + unlinkSync: (...args: unknown[]) => mockFsUnlinkSync(...args), + renameSync: (...args: unknown[]) => mockFsRenameSync(...args), + statSync: (...args: unknown[]) => mockFsStatSync(...args), + readFileSync: (...args: unknown[]) => mockFsReadFileSync(...args), + writeFileSync: (...args: unknown[]) => mockFsWriteFileSync(...args), +})); + +// Mock logger +vi.mock('../../../main/utils/logger', () => ({ + logger: { + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + debug: vi.fn(), + }, +})); + +// Import types only - we'll test the type definitions +import type { + QueryEvent, + AutoRunSession, + AutoRunTask, + SessionLifecycleEvent, + StatsTimeRange, + StatsFilters, + StatsAggregation, +} from '../../../shared/stats-types'; + +describe('Stats aggregation and filtering', () => { + beforeEach(() => { + vi.clearAllMocks(); + mockDb.pragma.mockReturnValue([{ user_version: 0 }]); + mockDb.prepare.mockReturnValue(mockStatement); + mockStatement.run.mockReturnValue({ changes: 1 }); + mockFsExistsSync.mockReturnValue(true); + }); + + afterEach(() => { + vi.resetModules(); + }); + + describe('time range filtering', () => { + it('should filter query events by day range', async () => { + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getQueryEvents('day'); + + // Verify the SQL includes time filter + const prepareCall = mockDb.prepare.mock.calls.find((call) => + (call[0] as string).includes('SELECT * FROM query_events') + ); + expect(prepareCall).toBeDefined(); + }); + + it('should filter with agentType filter', async () => { + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getQueryEvents('week', { agentType: 'claude-code' }); + + // Verify the SQL includes agent_type filter + expect(mockStatement.all).toHaveBeenCalled(); + }); + + it('should filter with source filter', async () => { + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getQueryEvents('month', { source: 'auto' }); + + // Verify the SQL includes source filter + expect(mockStatement.all).toHaveBeenCalled(); + }); + + it('should filter with projectPath filter', async () => { + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getQueryEvents('year', { projectPath: '/test/project' }); + + // Verify the SQL includes project_path filter + expect(mockStatement.all).toHaveBeenCalled(); + }); + + it('should filter with sessionId filter', async () => { + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getQueryEvents('all', { sessionId: 'session-123' }); + + // Verify the SQL includes session_id filter + expect(mockStatement.all).toHaveBeenCalled(); + }); + + it('should combine multiple filters', async () => { + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.getQueryEvents('week', { + agentType: 'claude-code', + source: 'user', + projectPath: '/test', + sessionId: 'session-1', + }); + + // Verify all parameters were passed + expect(mockStatement.all).toHaveBeenCalled(); + }); + }); + + describe('aggregation queries', () => { + it('should compute aggregated stats correctly', async () => { + mockStatement.get.mockReturnValue({ count: 100, total_duration: 500000 }); + mockStatement.all.mockReturnValue([ + { agent_type: 'claude-code', count: 70, duration: 350000 }, + { agent_type: 'opencode', count: 30, duration: 150000 }, + ]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('week'); + + expect(stats.totalQueries).toBe(100); + expect(stats.totalDuration).toBe(500000); + expect(stats.avgDuration).toBe(5000); + }); + + it('should handle empty results for aggregation', async () => { + mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('day'); + + expect(stats.totalQueries).toBe(0); + expect(stats.avgDuration).toBe(0); + expect(stats.byAgent).toEqual({}); + }); + }); + + describe('CSV export', () => { + it('should export query events to CSV format', async () => { + const now = Date.now(); + mockStatement.all.mockReturnValue([ + { + id: 'event-1', + session_id: 'session-1', + agent_type: 'claude-code', + source: 'user', + start_time: now, + duration: 5000, + project_path: '/test', + tab_id: 'tab-1', + }, + ]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const csv = db.exportToCsv('week'); + + // Verify CSV structure + expect(csv).toContain('id,sessionId,agentType,source,startTime,duration,projectPath,tabId'); + expect(csv).toContain('event-1'); + expect(csv).toContain('session-1'); + expect(csv).toContain('claude-code'); + }); + + it('should handle empty data for CSV export', async () => { + mockStatement.all.mockReturnValue([]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const csv = db.exportToCsv('day'); + + // Should only contain headers + expect(csv).toBe( + 'id,sessionId,agentType,source,startTime,duration,projectPath,tabId,isRemote' + ); + }); + }); +}); + +/** + * Interactive session query event recording tests + * + * These tests verify that query events are properly recorded for interactive + * (user-initiated) sessions, which is the core validation for: + * - [ ] Verify query events are recorded for interactive sessions + */ +describe('Query events recorded for interactive sessions', () => { + beforeEach(() => { + vi.clearAllMocks(); + mockDb.pragma.mockReturnValue([{ user_version: 1 }]); + mockDb.prepare.mockReturnValue(mockStatement); + mockStatement.run.mockReturnValue({ changes: 1 }); + mockStatement.all.mockReturnValue([]); + mockFsExistsSync.mockReturnValue(true); + }); + + afterEach(() => { + vi.resetModules(); + }); + + describe('user-initiated interactive session recording', () => { + it('should record query event with source="user" for interactive session', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const startTime = Date.now(); + const eventId = db.insertQueryEvent({ + sessionId: 'interactive-session-1', + agentType: 'claude-code', + source: 'user', // Interactive session is always 'user' + startTime, + duration: 5000, + projectPath: '/Users/test/myproject', + tabId: 'tab-1', + }); + + expect(eventId).toBeDefined(); + expect(typeof eventId).toBe('string'); + + // Verify the INSERT was called with correct parameters + const runCalls = mockStatement.run.mock.calls; + const lastCall = runCalls[runCalls.length - 1]; + + // Parameters: id, session_id, agent_type, source, start_time, duration, project_path, tab_id + expect(lastCall[1]).toBe('interactive-session-1'); // session_id + expect(lastCall[2]).toBe('claude-code'); // agent_type + expect(lastCall[3]).toBe('user'); // source + expect(lastCall[4]).toBe(startTime); // start_time + expect(lastCall[5]).toBe(5000); // duration + expect(lastCall[6]).toBe('/Users/test/myproject'); // project_path + expect(lastCall[7]).toBe('tab-1'); // tab_id + }); + + it('should record interactive query without optional fields', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const startTime = Date.now(); + const eventId = db.insertQueryEvent({ + sessionId: 'minimal-session', + agentType: 'claude-code', + source: 'user', + startTime, + duration: 3000, + // projectPath and tabId are optional + }); + + expect(eventId).toBeDefined(); + + // Verify NULL values for optional fields + const runCalls = mockStatement.run.mock.calls; + const lastCall = runCalls[runCalls.length - 1]; + expect(lastCall[6]).toBeNull(); // project_path + expect(lastCall[7]).toBeNull(); // tab_id + }); + + it('should record multiple interactive queries for the same session', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Clear mocks after initialize() to count only test operations + mockStatement.run.mockClear(); + + const baseTime = Date.now(); + + // First query + const id1 = db.insertQueryEvent({ + sessionId: 'multi-query-session', + agentType: 'claude-code', + source: 'user', + startTime: baseTime, + duration: 5000, + projectPath: '/project', + tabId: 'tab-1', + }); + + // Second query (same session, different tab) + const id2 = db.insertQueryEvent({ + sessionId: 'multi-query-session', + agentType: 'claude-code', + source: 'user', + startTime: baseTime + 10000, + duration: 3000, + projectPath: '/project', + tabId: 'tab-2', + }); + + // Third query (same session, same tab as first) + const id3 = db.insertQueryEvent({ + sessionId: 'multi-query-session', + agentType: 'claude-code', + source: 'user', + startTime: baseTime + 20000, + duration: 7000, + projectPath: '/project', + tabId: 'tab-1', + }); + + // All should have unique IDs + expect(id1).not.toBe(id2); + expect(id2).not.toBe(id3); + expect(id1).not.toBe(id3); + + // All should be recorded (3 INSERT calls after initialization) + expect(mockStatement.run).toHaveBeenCalledTimes(3); + }); + + it('should record interactive queries with different agent types', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Clear mocks after initialize() to count only test operations + mockStatement.run.mockClear(); + + const startTime = Date.now(); + + // Claude Code query + const claudeId = db.insertQueryEvent({ + sessionId: 'session-1', + agentType: 'claude-code', + source: 'user', + startTime, + duration: 5000, + }); + + // OpenCode query + const opencodeId = db.insertQueryEvent({ + sessionId: 'session-2', + agentType: 'opencode', + source: 'user', + startTime: startTime + 10000, + duration: 3000, + }); + + // Codex query + const codexId = db.insertQueryEvent({ + sessionId: 'session-3', + agentType: 'codex', + source: 'user', + startTime: startTime + 20000, + duration: 4000, + }); + + expect(claudeId).toBeDefined(); + expect(opencodeId).toBeDefined(); + expect(codexId).toBeDefined(); + + // Verify different agent types were recorded + const runCalls = mockStatement.run.mock.calls; + expect(runCalls[0][2]).toBe('claude-code'); + expect(runCalls[1][2]).toBe('opencode'); + expect(runCalls[2][2]).toBe('codex'); + }); + }); + + describe('retrieval of interactive session query events', () => { + it('should retrieve interactive query events filtered by source=user', async () => { + const now = Date.now(); + mockStatement.all.mockReturnValue([ + { + id: 'event-1', + session_id: 'session-1', + agent_type: 'claude-code', + source: 'user', + start_time: now - 1000, + duration: 5000, + project_path: '/project', + tab_id: 'tab-1', + }, + { + id: 'event-2', + session_id: 'session-2', + agent_type: 'claude-code', + source: 'user', + start_time: now - 2000, + duration: 3000, + project_path: '/project', + tab_id: 'tab-2', + }, + ]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Filter by source='user' to get only interactive sessions + const events = db.getQueryEvents('day', { source: 'user' }); + + expect(events).toHaveLength(2); + expect(events[0].source).toBe('user'); + expect(events[1].source).toBe('user'); + expect(events[0].sessionId).toBe('session-1'); + expect(events[1].sessionId).toBe('session-2'); + }); + + it('should retrieve interactive query events filtered by sessionId', async () => { + const now = Date.now(); + mockStatement.all.mockReturnValue([ + { + id: 'event-1', + session_id: 'target-session', + agent_type: 'claude-code', + source: 'user', + start_time: now - 1000, + duration: 5000, + project_path: '/project', + tab_id: 'tab-1', + }, + ]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const events = db.getQueryEvents('week', { sessionId: 'target-session' }); + + expect(events).toHaveLength(1); + expect(events[0].sessionId).toBe('target-session'); + }); + + it('should retrieve interactive query events filtered by projectPath', async () => { + const now = Date.now(); + mockStatement.all.mockReturnValue([ + { + id: 'event-1', + session_id: 'session-1', + agent_type: 'claude-code', + source: 'user', + start_time: now - 1000, + duration: 5000, + project_path: '/specific/project', + tab_id: 'tab-1', + }, + ]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const events = db.getQueryEvents('month', { projectPath: '/specific/project' }); + + expect(events).toHaveLength(1); + expect(events[0].projectPath).toBe('/specific/project'); + }); + + it('should correctly map database columns to QueryEvent interface fields', async () => { + const now = Date.now(); + mockStatement.all.mockReturnValue([ + { + id: 'db-event-id', + session_id: 'db-session-id', + agent_type: 'claude-code', + source: 'user', + start_time: now, + duration: 5000, + project_path: '/project/path', + tab_id: 'tab-123', + }, + ]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const events = db.getQueryEvents('day'); + + expect(events).toHaveLength(1); + const event = events[0]; + + // Verify snake_case -> camelCase mapping + expect(event.id).toBe('db-event-id'); + expect(event.sessionId).toBe('db-session-id'); + expect(event.agentType).toBe('claude-code'); + expect(event.source).toBe('user'); + expect(event.startTime).toBe(now); + expect(event.duration).toBe(5000); + expect(event.projectPath).toBe('/project/path'); + expect(event.tabId).toBe('tab-123'); + }); + }); + + describe('aggregation includes interactive session data', () => { + it('should include interactive sessions in aggregated stats', async () => { + mockStatement.get.mockReturnValue({ count: 10, total_duration: 50000 }); + + // The aggregation calls mockStatement.all multiple times for different queries + // We return based on the call sequence: byAgent, bySource, byDay + let callCount = 0; + mockStatement.all.mockImplementation(() => { + callCount++; + if (callCount === 1) { + // byAgent breakdown + return [{ agent_type: 'claude-code', count: 10, duration: 50000 }]; + } + if (callCount === 2) { + // bySource breakdown + return [{ source: 'user', count: 10 }]; + } + // byDay breakdown + return [{ date: '2024-12-28', count: 10, duration: 50000 }]; + }); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('week'); + + expect(stats.totalQueries).toBe(10); + expect(stats.totalDuration).toBe(50000); + expect(stats.avgDuration).toBe(5000); + expect(stats.bySource.user).toBe(10); + expect(stats.bySource.auto).toBe(0); + }); + + it('should correctly separate user vs auto queries in bySource', async () => { + mockStatement.get.mockReturnValue({ count: 15, total_duration: 75000 }); + + // Return by-source breakdown with both user and auto on second call + let callCount = 0; + mockStatement.all.mockImplementation(() => { + callCount++; + if (callCount === 2) { + // bySource breakdown + return [ + { source: 'user', count: 10 }, + { source: 'auto', count: 5 }, + ]; + } + return []; + }); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const stats = db.getAggregatedStats('month'); + + expect(stats.bySource.user).toBe(10); + expect(stats.bySource.auto).toBe(5); + }); + }); + + describe('timing accuracy for interactive sessions', () => { + it('should preserve exact startTime and duration values', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const exactStartTime = 1735344000000; // Specific timestamp + const exactDuration = 12345; // Specific duration in ms + + db.insertQueryEvent({ + sessionId: 'timing-test-session', + agentType: 'claude-code', + source: 'user', + startTime: exactStartTime, + duration: exactDuration, + }); + + const runCalls = mockStatement.run.mock.calls; + const lastCall = runCalls[runCalls.length - 1]; + + expect(lastCall[4]).toBe(exactStartTime); // Exact start_time preserved + expect(lastCall[5]).toBe(exactDuration); // Exact duration preserved + }); + + it('should handle zero duration (immediate responses)', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const eventId = db.insertQueryEvent({ + sessionId: 'zero-duration-session', + agentType: 'claude-code', + source: 'user', + startTime: Date.now(), + duration: 0, // Zero duration is valid (e.g., cached response) + }); + + expect(eventId).toBeDefined(); + + const runCalls = mockStatement.run.mock.calls; + const lastCall = runCalls[runCalls.length - 1]; + expect(lastCall[5]).toBe(0); + }); + + it('should handle very long durations', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const longDuration = 10 * 60 * 1000; // 10 minutes in ms + + const eventId = db.insertQueryEvent({ + sessionId: 'long-duration-session', + agentType: 'claude-code', + source: 'user', + startTime: Date.now(), + duration: longDuration, + }); + + expect(eventId).toBeDefined(); + + const runCalls = mockStatement.run.mock.calls; + const lastCall = runCalls[runCalls.length - 1]; + expect(lastCall[5]).toBe(longDuration); + }); + }); +}); + +/** + * Comprehensive Auto Run session and task recording verification tests + * + * These tests verify the complete Auto Run tracking workflow: + * 1. Auto Run sessions are properly recorded when batch processing starts + * 2. Individual tasks within sessions are recorded with timing data + * 3. Sessions are updated correctly when batch processing completes + * 4. All data can be retrieved with proper field mapping + */ diff --git a/src/__tests__/main/stats/stats-db.test.ts b/src/__tests__/main/stats/stats-db.test.ts new file mode 100644 index 00000000..adc3b94f --- /dev/null +++ b/src/__tests__/main/stats/stats-db.test.ts @@ -0,0 +1,682 @@ +/** + * Tests for StatsDB core class, initialization, and singleton. + * + * Note: better-sqlite3 is a native module compiled for Electron's Node version. + * Direct testing with the native module in vitest is not possible without + * electron-rebuild for the vitest runtime. These tests use mocked database + * operations to verify the logic without requiring the actual native module. + * + * For full integration testing of the SQLite database, use the Electron test + * environment (e2e tests) where the native module is properly loaded. + */ + +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import * as path from 'path'; +import * as os from 'os'; + +// Track Database constructor calls to verify file path +let lastDbPath: string | null = null; + +// Store mock references so they can be accessed in tests +const mockStatement = { + run: vi.fn(() => ({ changes: 1 })), + get: vi.fn(() => ({ count: 0, total_duration: 0 })), + all: vi.fn(() => []), +}; + +const mockDb = { + pragma: vi.fn(() => [{ user_version: 0 }]), + prepare: vi.fn(() => mockStatement), + close: vi.fn(), + // Transaction mock that immediately executes the function + transaction: vi.fn((fn: () => void) => { + return () => fn(); + }), +}; + +// Mock better-sqlite3 as a class +vi.mock('better-sqlite3', () => { + return { + default: class MockDatabase { + constructor(dbPath: string) { + lastDbPath = dbPath; + } + pragma = mockDb.pragma; + prepare = mockDb.prepare; + close = mockDb.close; + transaction = mockDb.transaction; + }, + }; +}); + +// Mock electron's app module with trackable userData path +const mockUserDataPath = path.join(os.tmpdir(), 'maestro-test-stats-db'); +vi.mock('electron', () => ({ + app: { + getPath: vi.fn((name: string) => { + if (name === 'userData') return mockUserDataPath; + return os.tmpdir(); + }), + }, +})); + +// Track fs calls +const mockFsExistsSync = vi.fn(() => true); +const mockFsMkdirSync = vi.fn(); +const mockFsCopyFileSync = vi.fn(); +const mockFsUnlinkSync = vi.fn(); +const mockFsRenameSync = vi.fn(); +const mockFsStatSync = vi.fn(() => ({ size: 1024 })); +const mockFsReadFileSync = vi.fn(() => '0'); // Default: old timestamp (triggers vacuum check) +const mockFsWriteFileSync = vi.fn(); + +// Mock fs +vi.mock('fs', () => ({ + existsSync: (...args: unknown[]) => mockFsExistsSync(...args), + mkdirSync: (...args: unknown[]) => mockFsMkdirSync(...args), + copyFileSync: (...args: unknown[]) => mockFsCopyFileSync(...args), + unlinkSync: (...args: unknown[]) => mockFsUnlinkSync(...args), + renameSync: (...args: unknown[]) => mockFsRenameSync(...args), + statSync: (...args: unknown[]) => mockFsStatSync(...args), + readFileSync: (...args: unknown[]) => mockFsReadFileSync(...args), + writeFileSync: (...args: unknown[]) => mockFsWriteFileSync(...args), +})); + +// Mock logger +vi.mock('../../../main/utils/logger', () => ({ + logger: { + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + debug: vi.fn(), + }, +})); + +// Import types only - we'll test the type definitions +import type { + QueryEvent, + AutoRunSession, + AutoRunTask, + SessionLifecycleEvent, + StatsTimeRange, + StatsFilters, + StatsAggregation, +} from '../../../shared/stats-types'; + +describe('StatsDB class (mocked)', () => { + beforeEach(() => { + vi.clearAllMocks(); + lastDbPath = null; + mockDb.pragma.mockReturnValue([{ user_version: 0 }]); + mockDb.prepare.mockReturnValue(mockStatement); + mockStatement.run.mockReturnValue({ changes: 1 }); + mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 }); + mockStatement.all.mockReturnValue([]); + mockFsExistsSync.mockReturnValue(true); + mockFsMkdirSync.mockClear(); + }); + + afterEach(() => { + vi.resetModules(); + }); + + describe('module exports', () => { + it('should export StatsDB class', async () => { + const { StatsDB } = await import('../../../main/stats'); + expect(StatsDB).toBeDefined(); + expect(typeof StatsDB).toBe('function'); + }); + + it('should export singleton functions', async () => { + const { getStatsDB, initializeStatsDB, closeStatsDB } = await import('../../../main/stats'); + expect(getStatsDB).toBeDefined(); + expect(initializeStatsDB).toBeDefined(); + expect(closeStatsDB).toBeDefined(); + }); + }); + + describe('StatsDB instantiation', () => { + it('should create instance without initialization', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + expect(db).toBeDefined(); + expect(db.isReady()).toBe(false); + }); + + it('should return database path', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + expect(db.getDbPath()).toContain('stats.db'); + }); + }); + + describe('initialization', () => { + it('should initialize database and set isReady to true', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + db.initialize(); + + expect(db.isReady()).toBe(true); + }); + + it('should enable WAL mode', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + db.initialize(); + + expect(mockDb.pragma).toHaveBeenCalledWith('journal_mode = WAL'); + }); + + it('should run v1 migration for fresh database', async () => { + mockDb.pragma.mockImplementation((sql: string) => { + if (sql === 'user_version') return [{ user_version: 0 }]; + return undefined; + }); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Should set user_version to 1 + expect(mockDb.pragma).toHaveBeenCalledWith('user_version = 1'); + }); + + it('should skip migration for already migrated database', async () => { + mockDb.pragma.mockImplementation((sql: string) => { + if (sql === 'user_version') return [{ user_version: 1 }]; + return undefined; + }); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Should NOT set user_version (no migration needed) + expect(mockDb.pragma).not.toHaveBeenCalledWith('user_version = 1'); + }); + + it('should create _migrations table on initialization', async () => { + mockDb.pragma.mockImplementation((sql: string) => { + if (sql === 'user_version') return [{ user_version: 0 }]; + return undefined; + }); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Should have prepared the CREATE TABLE IF NOT EXISTS _migrations statement + expect(mockDb.prepare).toHaveBeenCalledWith( + expect.stringContaining('CREATE TABLE IF NOT EXISTS _migrations') + ); + }); + + it('should record successful migration in _migrations table', async () => { + mockDb.pragma.mockImplementation((sql: string) => { + if (sql === 'user_version') return [{ user_version: 0 }]; + return undefined; + }); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Should have inserted a success record into _migrations + expect(mockDb.prepare).toHaveBeenCalledWith( + expect.stringContaining('INSERT OR REPLACE INTO _migrations') + ); + }); + + it('should use transaction for migration atomicity', async () => { + mockDb.pragma.mockImplementation((sql: string) => { + if (sql === 'user_version') return [{ user_version: 0 }]; + return undefined; + }); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Should have used transaction + expect(mockDb.transaction).toHaveBeenCalled(); + }); + }); + + describe('migration system API', () => { + beforeEach(() => { + vi.clearAllMocks(); + mockDb.pragma.mockImplementation((sql: string) => { + if (sql === 'user_version') return [{ user_version: 1 }]; + return undefined; + }); + mockDb.prepare.mockReturnValue(mockStatement); + mockStatement.run.mockReturnValue({ changes: 1 }); + mockStatement.get.mockReturnValue(null); + mockStatement.all.mockReturnValue([]); + mockFsExistsSync.mockReturnValue(true); + }); + + afterEach(() => { + vi.resetModules(); + }); + + it('should return current version via getCurrentVersion()', async () => { + mockDb.pragma.mockImplementation((sql: string) => { + if (sql === 'user_version') return [{ user_version: 1 }]; + return undefined; + }); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + expect(db.getCurrentVersion()).toBe(1); + }); + + it('should return target version via getTargetVersion()', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Currently we have version 3 migration (v1: initial schema, v2: is_remote column, v3: session_lifecycle table) + expect(db.getTargetVersion()).toBe(3); + }); + + it('should return false from hasPendingMigrations() when up to date', async () => { + mockDb.pragma.mockImplementation((sql: string) => { + if (sql === 'user_version') return [{ user_version: 3 }]; + return undefined; + }); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + expect(db.hasPendingMigrations()).toBe(false); + }); + + it('should correctly identify pending migrations based on version difference', async () => { + // This test verifies the hasPendingMigrations() logic + // by checking current version < target version + + // Simulate a database that's already at version 3 (target version) + let currentVersion = 3; + mockDb.pragma.mockImplementation((sql: string) => { + if (sql === 'user_version') return [{ user_version: currentVersion }]; + // Handle version updates from migration + if (sql.startsWith('user_version = ')) { + currentVersion = parseInt(sql.replace('user_version = ', '')); + } + return undefined; + }); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // At version 3, target is 3, so no pending migrations + expect(db.getCurrentVersion()).toBe(3); + expect(db.getTargetVersion()).toBe(3); + expect(db.hasPendingMigrations()).toBe(false); + }); + + it('should return empty array from getMigrationHistory() when no _migrations table', async () => { + mockStatement.get.mockReturnValue(null); // No table exists + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const history = db.getMigrationHistory(); + expect(history).toEqual([]); + }); + + it('should return migration records from getMigrationHistory()', async () => { + const mockMigrationRows = [ + { + version: 1, + description: 'Initial schema', + applied_at: 1704067200000, + status: 'success' as const, + error_message: null, + }, + ]; + + mockStatement.get.mockReturnValue({ name: '_migrations' }); // Table exists + mockStatement.all.mockReturnValue(mockMigrationRows); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const history = db.getMigrationHistory(); + expect(history).toHaveLength(1); + expect(history[0]).toEqual({ + version: 1, + description: 'Initial schema', + appliedAt: 1704067200000, + status: 'success', + errorMessage: undefined, + }); + }); + + it('should include errorMessage in migration history for failed migrations', async () => { + const mockMigrationRows = [ + { + version: 2, + description: 'Add new column', + applied_at: 1704067200000, + status: 'failed' as const, + error_message: 'SQLITE_ERROR: duplicate column name', + }, + ]; + + mockStatement.get.mockReturnValue({ name: '_migrations' }); + mockStatement.all.mockReturnValue(mockMigrationRows); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const history = db.getMigrationHistory(); + expect(history[0].status).toBe('failed'); + expect(history[0].errorMessage).toBe('SQLITE_ERROR: duplicate column name'); + }); + }); + + describe('error handling', () => { + it('should throw when calling insertQueryEvent before initialization', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + expect(() => + db.insertQueryEvent({ + sessionId: 'test', + agentType: 'claude-code', + source: 'user', + startTime: Date.now(), + duration: 1000, + }) + ).toThrow('Database not initialized'); + }); + + it('should throw when calling getQueryEvents before initialization', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + expect(() => db.getQueryEvents('day')).toThrow('Database not initialized'); + }); + + it('should throw when calling getAggregatedStats before initialization', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + expect(() => db.getAggregatedStats('week')).toThrow('Database not initialized'); + }); + }); + + describe('query events', () => { + it('should insert a query event and return an id', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const eventId = db.insertQueryEvent({ + sessionId: 'session-1', + agentType: 'claude-code', + source: 'user', + startTime: Date.now(), + duration: 5000, + projectPath: '/test/project', + tabId: 'tab-1', + }); + + expect(eventId).toBeDefined(); + expect(typeof eventId).toBe('string'); + expect(mockStatement.run).toHaveBeenCalled(); + }); + + it('should retrieve query events within time range', async () => { + mockStatement.all.mockReturnValue([ + { + id: 'event-1', + session_id: 'session-1', + agent_type: 'claude-code', + source: 'user', + start_time: Date.now(), + duration: 5000, + project_path: '/test', + tab_id: 'tab-1', + }, + ]); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const events = db.getQueryEvents('day'); + + expect(events).toHaveLength(1); + expect(events[0].sessionId).toBe('session-1'); + expect(events[0].agentType).toBe('claude-code'); + }); + }); + + describe('close', () => { + it('should close the database connection', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + db.close(); + + expect(mockDb.close).toHaveBeenCalled(); + expect(db.isReady()).toBe(false); + }); + }); +}); + +/** + * Database file creation verification tests + * + * These tests verify that the database file is created at the correct path + * in the user's application data directory on first launch. + */ +describe('Database file creation on first launch', () => { + beforeEach(() => { + vi.clearAllMocks(); + lastDbPath = null; + mockDb.pragma.mockReturnValue([{ user_version: 0 }]); + mockDb.prepare.mockReturnValue(mockStatement); + mockFsExistsSync.mockReturnValue(true); + mockFsMkdirSync.mockClear(); + }); + + afterEach(() => { + vi.resetModules(); + }); + + describe('database path computation', () => { + it('should compute database path using electron app.getPath("userData")', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + const dbPath = db.getDbPath(); + + // Verify the path is in the userData directory + expect(dbPath).toContain(mockUserDataPath); + expect(dbPath).toContain('stats.db'); + }); + + it('should create database file at userData/stats.db path', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Verify better-sqlite3 was called with the correct path + expect(lastDbPath).toBe(path.join(mockUserDataPath, 'stats.db')); + }); + + it('should use platform-appropriate userData path', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + // The path should be absolute and contain stats.db + const dbPath = db.getDbPath(); + expect(path.isAbsolute(dbPath)).toBe(true); + expect(path.basename(dbPath)).toBe('stats.db'); + }); + }); + + describe('directory creation', () => { + it('should create userData directory if it does not exist', async () => { + // Simulate directory not existing + mockFsExistsSync.mockReturnValue(false); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Verify mkdirSync was called with recursive option + expect(mockFsMkdirSync).toHaveBeenCalledWith(mockUserDataPath, { recursive: true }); + }); + + it('should not create directory if it already exists', async () => { + // Simulate directory already existing + mockFsExistsSync.mockReturnValue(true); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Verify mkdirSync was NOT called + expect(mockFsMkdirSync).not.toHaveBeenCalled(); + }); + }); + + describe('database initialization', () => { + it('should open database connection on initialize', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + expect(db.isReady()).toBe(false); + db.initialize(); + expect(db.isReady()).toBe(true); + }); + + it('should only initialize once (idempotent)', async () => { + mockDb.pragma.mockClear(); + + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + + db.initialize(); + const firstCallCount = mockDb.pragma.mock.calls.length; + + db.initialize(); // Second call should be a no-op + const secondCallCount = mockDb.pragma.mock.calls.length; + + expect(secondCallCount).toBe(firstCallCount); + }); + + it('should create all three tables on fresh database', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + // Verify prepare was called with CREATE TABLE statements + const prepareCalls = mockDb.prepare.mock.calls.map((call) => call[0]); + + // Check for query_events table + expect( + prepareCalls.some((sql: string) => sql.includes('CREATE TABLE IF NOT EXISTS query_events')) + ).toBe(true); + + // Check for auto_run_sessions table + expect( + prepareCalls.some((sql: string) => + sql.includes('CREATE TABLE IF NOT EXISTS auto_run_sessions') + ) + ).toBe(true); + + // Check for auto_run_tasks table + expect( + prepareCalls.some((sql: string) => + sql.includes('CREATE TABLE IF NOT EXISTS auto_run_tasks') + ) + ).toBe(true); + }); + + it('should create all required indexes', async () => { + const { StatsDB } = await import('../../../main/stats'); + const db = new StatsDB(); + db.initialize(); + + const prepareCalls = mockDb.prepare.mock.calls.map((call) => call[0]); + + // Verify all 7 indexes are created + const expectedIndexes = [ + 'idx_query_start_time', + 'idx_query_agent_type', + 'idx_query_source', + 'idx_query_session', + 'idx_auto_session_start', + 'idx_task_auto_session', + 'idx_task_start', + ]; + + for (const indexName of expectedIndexes) { + expect(prepareCalls.some((sql: string) => sql.includes(indexName))).toBe(true); + } + }); + }); + + describe('singleton pattern', () => { + it('should return same instance from getStatsDB', async () => { + const { getStatsDB, closeStatsDB } = await import('../../../main/stats'); + + const instance1 = getStatsDB(); + const instance2 = getStatsDB(); + + expect(instance1).toBe(instance2); + + // Cleanup + closeStatsDB(); + }); + + it('should initialize database via initializeStatsDB', async () => { + const { initializeStatsDB, getStatsDB, closeStatsDB } = await import('../../../main/stats'); + + initializeStatsDB(); + const db = getStatsDB(); + + expect(db.isReady()).toBe(true); + + // Cleanup + closeStatsDB(); + }); + + it('should close database and reset singleton via closeStatsDB', async () => { + const { initializeStatsDB, getStatsDB, closeStatsDB } = await import('../../../main/stats'); + + initializeStatsDB(); + const dbBefore = getStatsDB(); + expect(dbBefore.isReady()).toBe(true); + + closeStatsDB(); + + // After close, a new instance should be returned + const dbAfter = getStatsDB(); + expect(dbAfter).not.toBe(dbBefore); + expect(dbAfter.isReady()).toBe(false); + }); + }); +}); + +/** + * Auto Run session and task recording tests + */ diff --git a/src/__tests__/main/stats/types.test.ts b/src/__tests__/main/stats/types.test.ts new file mode 100644 index 00000000..b506f0c8 --- /dev/null +++ b/src/__tests__/main/stats/types.test.ts @@ -0,0 +1,319 @@ +/** + * Tests for shared stats type definitions. + * + * Note: better-sqlite3 is a native module compiled for Electron's Node version. + * Direct testing with the native module in vitest is not possible without + * electron-rebuild for the vitest runtime. These tests use mocked database + * operations to verify the logic without requiring the actual native module. + * + * For full integration testing of the SQLite database, use the Electron test + * environment (e2e tests) where the native module is properly loaded. + */ + +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import * as path from 'path'; +import * as os from 'os'; + +// Track Database constructor calls to verify file path +let lastDbPath: string | null = null; + +// Store mock references so they can be accessed in tests +const mockStatement = { + run: vi.fn(() => ({ changes: 1 })), + get: vi.fn(() => ({ count: 0, total_duration: 0 })), + all: vi.fn(() => []), +}; + +const mockDb = { + pragma: vi.fn(() => [{ user_version: 0 }]), + prepare: vi.fn(() => mockStatement), + close: vi.fn(), + // Transaction mock that immediately executes the function + transaction: vi.fn((fn: () => void) => { + return () => fn(); + }), +}; + +// Mock better-sqlite3 as a class +vi.mock('better-sqlite3', () => { + return { + default: class MockDatabase { + constructor(dbPath: string) { + lastDbPath = dbPath; + } + pragma = mockDb.pragma; + prepare = mockDb.prepare; + close = mockDb.close; + transaction = mockDb.transaction; + }, + }; +}); + +// Mock electron's app module with trackable userData path +const mockUserDataPath = path.join(os.tmpdir(), 'maestro-test-stats-db'); +vi.mock('electron', () => ({ + app: { + getPath: vi.fn((name: string) => { + if (name === 'userData') return mockUserDataPath; + return os.tmpdir(); + }), + }, +})); + +// Track fs calls +const mockFsExistsSync = vi.fn(() => true); +const mockFsMkdirSync = vi.fn(); +const mockFsCopyFileSync = vi.fn(); +const mockFsUnlinkSync = vi.fn(); +const mockFsRenameSync = vi.fn(); +const mockFsStatSync = vi.fn(() => ({ size: 1024 })); +const mockFsReadFileSync = vi.fn(() => '0'); // Default: old timestamp (triggers vacuum check) +const mockFsWriteFileSync = vi.fn(); + +// Mock fs +vi.mock('fs', () => ({ + existsSync: (...args: unknown[]) => mockFsExistsSync(...args), + mkdirSync: (...args: unknown[]) => mockFsMkdirSync(...args), + copyFileSync: (...args: unknown[]) => mockFsCopyFileSync(...args), + unlinkSync: (...args: unknown[]) => mockFsUnlinkSync(...args), + renameSync: (...args: unknown[]) => mockFsRenameSync(...args), + statSync: (...args: unknown[]) => mockFsStatSync(...args), + readFileSync: (...args: unknown[]) => mockFsReadFileSync(...args), + writeFileSync: (...args: unknown[]) => mockFsWriteFileSync(...args), +})); + +// Mock logger +vi.mock('../../../main/utils/logger', () => ({ + logger: { + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + debug: vi.fn(), + }, +})); + +// Import types only - we'll test the type definitions +import type { + QueryEvent, + AutoRunSession, + AutoRunTask, + SessionLifecycleEvent, + StatsTimeRange, + StatsFilters, + StatsAggregation, +} from '../../../shared/stats-types'; + +describe('stats-types.ts', () => { + describe('QueryEvent interface', () => { + it('should define proper QueryEvent structure', () => { + const event: QueryEvent = { + id: 'test-id', + sessionId: 'session-1', + agentType: 'claude-code', + source: 'user', + startTime: Date.now(), + duration: 5000, + projectPath: '/test/project', + tabId: 'tab-1', + }; + + expect(event.id).toBe('test-id'); + expect(event.sessionId).toBe('session-1'); + expect(event.source).toBe('user'); + }); + + it('should allow optional fields to be undefined', () => { + const event: QueryEvent = { + id: 'test-id', + sessionId: 'session-1', + agentType: 'claude-code', + source: 'auto', + startTime: Date.now(), + duration: 3000, + }; + + expect(event.projectPath).toBeUndefined(); + expect(event.tabId).toBeUndefined(); + }); + }); + + describe('AutoRunSession interface', () => { + it('should define proper AutoRunSession structure', () => { + const session: AutoRunSession = { + id: 'auto-run-1', + sessionId: 'session-1', + agentType: 'claude-code', + documentPath: '/docs/task.md', + startTime: Date.now(), + duration: 60000, + tasksTotal: 5, + tasksCompleted: 3, + projectPath: '/test/project', + }; + + expect(session.id).toBe('auto-run-1'); + expect(session.tasksTotal).toBe(5); + expect(session.tasksCompleted).toBe(3); + }); + }); + + describe('AutoRunTask interface', () => { + it('should define proper AutoRunTask structure', () => { + const task: AutoRunTask = { + id: 'task-1', + autoRunSessionId: 'auto-run-1', + sessionId: 'session-1', + agentType: 'claude-code', + taskIndex: 0, + taskContent: 'First task content', + startTime: Date.now(), + duration: 10000, + success: true, + }; + + expect(task.id).toBe('task-1'); + expect(task.taskIndex).toBe(0); + expect(task.success).toBe(true); + }); + + it('should handle failed tasks', () => { + const task: AutoRunTask = { + id: 'task-2', + autoRunSessionId: 'auto-run-1', + sessionId: 'session-1', + agentType: 'claude-code', + taskIndex: 1, + startTime: Date.now(), + duration: 5000, + success: false, + }; + + expect(task.success).toBe(false); + expect(task.taskContent).toBeUndefined(); + }); + }); + + describe('SessionLifecycleEvent interface', () => { + it('should define proper SessionLifecycleEvent structure for created session', () => { + const event: SessionLifecycleEvent = { + id: 'lifecycle-1', + sessionId: 'session-1', + agentType: 'claude-code', + projectPath: '/test/project', + createdAt: Date.now(), + isRemote: false, + }; + + expect(event.id).toBe('lifecycle-1'); + expect(event.sessionId).toBe('session-1'); + expect(event.agentType).toBe('claude-code'); + expect(event.closedAt).toBeUndefined(); + expect(event.duration).toBeUndefined(); + }); + + it('should define proper SessionLifecycleEvent structure for closed session', () => { + // Use fixed timestamps to avoid race conditions from multiple Date.now() calls + const createdAt = 1700000000000; // Fixed timestamp + const closedAt = 1700003600000; // Exactly 1 hour later + const event: SessionLifecycleEvent = { + id: 'lifecycle-2', + sessionId: 'session-2', + agentType: 'claude-code', + projectPath: '/test/project', + createdAt, + closedAt, + duration: closedAt - createdAt, + isRemote: true, + }; + + expect(event.closedAt).toBe(closedAt); + expect(event.duration).toBe(3600000); + expect(event.isRemote).toBe(true); + }); + + it('should allow optional fields to be undefined', () => { + const event: SessionLifecycleEvent = { + id: 'lifecycle-3', + sessionId: 'session-3', + agentType: 'opencode', + createdAt: Date.now(), + }; + + expect(event.projectPath).toBeUndefined(); + expect(event.closedAt).toBeUndefined(); + expect(event.duration).toBeUndefined(); + expect(event.isRemote).toBeUndefined(); + }); + }); + + describe('StatsTimeRange type', () => { + it('should accept valid time ranges', () => { + const ranges: StatsTimeRange[] = ['day', 'week', 'month', 'year', 'all']; + + expect(ranges).toHaveLength(5); + expect(ranges).toContain('day'); + expect(ranges).toContain('all'); + }); + }); + + describe('StatsFilters interface', () => { + it('should allow partial filters', () => { + const filters1: StatsFilters = { agentType: 'claude-code' }; + const filters2: StatsFilters = { source: 'user' }; + const filters3: StatsFilters = { + agentType: 'opencode', + source: 'auto', + projectPath: '/test', + }; + + expect(filters1.agentType).toBe('claude-code'); + expect(filters2.source).toBe('user'); + expect(filters3.projectPath).toBe('/test'); + }); + }); + + describe('StatsAggregation interface', () => { + it('should define proper aggregation structure', () => { + const aggregation: StatsAggregation = { + totalQueries: 100, + totalDuration: 500000, + avgDuration: 5000, + byAgent: { + 'claude-code': { count: 70, duration: 350000 }, + opencode: { count: 30, duration: 150000 }, + }, + bySource: { user: 60, auto: 40 }, + byLocation: { local: 80, remote: 20 }, + byDay: [ + { date: '2024-01-01', count: 10, duration: 50000 }, + { date: '2024-01-02', count: 15, duration: 75000 }, + ], + byHour: [ + { hour: 9, count: 20, duration: 100000 }, + { hour: 10, count: 25, duration: 125000 }, + ], + // Session lifecycle fields + totalSessions: 15, + sessionsByAgent: { + 'claude-code': 10, + opencode: 5, + }, + sessionsByDay: [ + { date: '2024-01-01', count: 3 }, + { date: '2024-01-02', count: 5 }, + ], + avgSessionDuration: 1800000, + }; + + expect(aggregation.totalQueries).toBe(100); + expect(aggregation.byAgent['claude-code'].count).toBe(70); + expect(aggregation.bySource.user).toBe(60); + expect(aggregation.byDay).toHaveLength(2); + // Session lifecycle assertions + expect(aggregation.totalSessions).toBe(15); + expect(aggregation.sessionsByAgent['claude-code']).toBe(10); + expect(aggregation.sessionsByDay).toHaveLength(2); + expect(aggregation.avgSessionDuration).toBe(1800000); + }); + }); +}); diff --git a/src/__tests__/main/storage/claude-session-storage.test.ts b/src/__tests__/main/storage/claude-session-storage.test.ts new file mode 100644 index 00000000..b34496e8 --- /dev/null +++ b/src/__tests__/main/storage/claude-session-storage.test.ts @@ -0,0 +1,416 @@ +/** + * Tests for ClaudeSessionStorage + * + * Verifies: + * - Session origin registration and retrieval + * - Session naming and starring + * - Context usage tracking + * - Origin info attachment to sessions + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { ClaudeSessionStorage } from '../../../main/storage/claude-session-storage'; +import type { SshRemoteConfig } from '../../../shared/types'; +import type Store from 'electron-store'; +import type { ClaudeSessionOriginsData } from '../../../main/storage/claude-session-storage'; + +// Mock electron-store +const mockStoreData: Record = {}; +vi.mock('electron-store', () => { + return { + default: vi.fn().mockImplementation(() => ({ + get: vi.fn((key: string, defaultValue?: unknown) => { + return mockStoreData[key] ?? defaultValue; + }), + set: vi.fn((key: string, value: unknown) => { + mockStoreData[key] = value; + }), + store: mockStoreData, + })), + }; +}); + +// Mock logger +vi.mock('../../../main/utils/logger', () => ({ + logger: { + info: vi.fn(), + debug: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + }, +})); + +// Mock fs/promises +vi.mock('fs/promises', () => ({ + default: { + access: vi.fn(), + readdir: vi.fn(), + stat: vi.fn(), + readFile: vi.fn(), + writeFile: vi.fn(), + }, +})); + +// Mock remote-fs utilities +vi.mock('../../../main/utils/remote-fs', () => ({ + readDirRemote: vi.fn(), + readFileRemote: vi.fn(), + statRemote: vi.fn(), +})); + +// Mock statsCache +vi.mock('../../../main/utils/statsCache', () => ({ + encodeClaudeProjectPath: vi.fn((projectPath: string) => { + // Simple encoding for tests - replace / with - + return projectPath.replace(/\//g, '-').replace(/^-/, ''); + }), +})); + +// Mock pricing +vi.mock('../../../main/utils/pricing', () => ({ + calculateClaudeCost: vi.fn(() => 0.05), +})); + +describe('ClaudeSessionStorage', () => { + let storage: ClaudeSessionStorage; + let mockStore: { + get: ReturnType; + set: ReturnType; + store: Record; + }; + + beforeEach(() => { + vi.clearAllMocks(); + + // Reset mock store data + Object.keys(mockStoreData).forEach((key) => delete mockStoreData[key]); + mockStoreData['origins'] = {}; + + mockStore = { + get: vi.fn((key: string, defaultValue?: unknown) => { + return mockStoreData[key] ?? defaultValue; + }), + set: vi.fn((key: string, value: unknown) => { + mockStoreData[key] = value; + }), + store: mockStoreData, + }; + + // Create storage with mock store + storage = new ClaudeSessionStorage(mockStore as unknown as Store); + }); + + describe('Origin Management', () => { + describe('registerSessionOrigin', () => { + it('should register a user session origin', () => { + storage.registerSessionOrigin('/project/path', 'session-123', 'user'); + + const origins = storage.getSessionOrigins('/project/path'); + expect(origins['session-123']).toEqual({ origin: 'user' }); + }); + + it('should register an auto session origin', () => { + storage.registerSessionOrigin('/project/path', 'session-456', 'auto'); + + const origins = storage.getSessionOrigins('/project/path'); + expect(origins['session-456']).toEqual({ origin: 'auto' }); + }); + + it('should register origin with session name', () => { + storage.registerSessionOrigin('/project/path', 'session-789', 'user', 'My Session'); + + const origins = storage.getSessionOrigins('/project/path'); + expect(origins['session-789']).toEqual({ + origin: 'user', + sessionName: 'My Session', + }); + }); + + it('should handle multiple sessions for same project', () => { + storage.registerSessionOrigin('/project/path', 'session-1', 'user'); + storage.registerSessionOrigin('/project/path', 'session-2', 'auto'); + storage.registerSessionOrigin('/project/path', 'session-3', 'user', 'Named'); + + const origins = storage.getSessionOrigins('/project/path'); + expect(Object.keys(origins)).toHaveLength(3); + }); + + it('should handle multiple projects', () => { + storage.registerSessionOrigin('/project/a', 'session-a', 'user'); + storage.registerSessionOrigin('/project/b', 'session-b', 'auto'); + + expect(storage.getSessionOrigins('/project/a')['session-a']).toBeDefined(); + expect(storage.getSessionOrigins('/project/b')['session-b']).toBeDefined(); + expect(storage.getSessionOrigins('/project/a')['session-b']).toBeUndefined(); + }); + + it('should persist to store', () => { + storage.registerSessionOrigin('/project/path', 'session-123', 'user'); + + expect(mockStore.set).toHaveBeenCalledWith( + 'origins', + expect.objectContaining({ + '/project/path': expect.objectContaining({ + 'session-123': 'user', + }), + }) + ); + }); + }); + + describe('updateSessionName', () => { + it('should update name for existing session with string origin', () => { + storage.registerSessionOrigin('/project/path', 'session-123', 'user'); + storage.updateSessionName('/project/path', 'session-123', 'New Name'); + + const origins = storage.getSessionOrigins('/project/path'); + expect(origins['session-123']).toEqual({ + origin: 'user', + sessionName: 'New Name', + }); + }); + + it('should update name for existing session with object origin', () => { + storage.registerSessionOrigin('/project/path', 'session-123', 'user', 'Old Name'); + storage.updateSessionName('/project/path', 'session-123', 'New Name'); + + const origins = storage.getSessionOrigins('/project/path'); + expect(origins['session-123'].sessionName).toBe('New Name'); + }); + + it('should create origin entry if session not registered', () => { + storage.updateSessionName('/project/path', 'new-session', 'Session Name'); + + const origins = storage.getSessionOrigins('/project/path'); + expect(origins['new-session']).toEqual({ + origin: 'user', + sessionName: 'Session Name', + }); + }); + + it('should preserve existing starred status', () => { + storage.registerSessionOrigin('/project/path', 'session-123', 'user'); + storage.updateSessionStarred('/project/path', 'session-123', true); + storage.updateSessionName('/project/path', 'session-123', 'Named'); + + const origins = storage.getSessionOrigins('/project/path'); + expect(origins['session-123'].starred).toBe(true); + expect(origins['session-123'].sessionName).toBe('Named'); + }); + }); + + describe('updateSessionStarred', () => { + it('should star a session', () => { + storage.registerSessionOrigin('/project/path', 'session-123', 'user'); + storage.updateSessionStarred('/project/path', 'session-123', true); + + const origins = storage.getSessionOrigins('/project/path'); + expect(origins['session-123'].starred).toBe(true); + }); + + it('should unstar a session', () => { + storage.registerSessionOrigin('/project/path', 'session-123', 'user'); + storage.updateSessionStarred('/project/path', 'session-123', true); + storage.updateSessionStarred('/project/path', 'session-123', false); + + const origins = storage.getSessionOrigins('/project/path'); + expect(origins['session-123'].starred).toBe(false); + }); + + it('should create origin entry if session not registered', () => { + storage.updateSessionStarred('/project/path', 'new-session', true); + + const origins = storage.getSessionOrigins('/project/path'); + expect(origins['new-session']).toEqual({ + origin: 'user', + starred: true, + }); + }); + + it('should preserve existing session name', () => { + storage.registerSessionOrigin('/project/path', 'session-123', 'user', 'My Session'); + storage.updateSessionStarred('/project/path', 'session-123', true); + + const origins = storage.getSessionOrigins('/project/path'); + expect(origins['session-123'].sessionName).toBe('My Session'); + expect(origins['session-123'].starred).toBe(true); + }); + }); + + describe('updateSessionContextUsage', () => { + it('should store context usage percentage', () => { + storage.registerSessionOrigin('/project/path', 'session-123', 'user'); + storage.updateSessionContextUsage('/project/path', 'session-123', 75); + + const origins = storage.getSessionOrigins('/project/path'); + expect(origins['session-123'].contextUsage).toBe(75); + }); + + it('should create origin entry if session not registered', () => { + storage.updateSessionContextUsage('/project/path', 'new-session', 50); + + const origins = storage.getSessionOrigins('/project/path'); + expect(origins['new-session']).toEqual({ + origin: 'user', + contextUsage: 50, + }); + }); + + it('should preserve existing origin data', () => { + storage.registerSessionOrigin('/project/path', 'session-123', 'auto', 'Named'); + storage.updateSessionStarred('/project/path', 'session-123', true); + storage.updateSessionContextUsage('/project/path', 'session-123', 80); + + const origins = storage.getSessionOrigins('/project/path'); + expect(origins['session-123']).toEqual({ + origin: 'auto', + sessionName: 'Named', + starred: true, + contextUsage: 80, + }); + }); + + it('should update context usage on subsequent calls', () => { + storage.registerSessionOrigin('/project/path', 'session-123', 'user'); + storage.updateSessionContextUsage('/project/path', 'session-123', 25); + storage.updateSessionContextUsage('/project/path', 'session-123', 50); + storage.updateSessionContextUsage('/project/path', 'session-123', 75); + + const origins = storage.getSessionOrigins('/project/path'); + expect(origins['session-123'].contextUsage).toBe(75); + }); + }); + + describe('getSessionOrigins', () => { + it('should return empty object for project with no sessions', () => { + const origins = storage.getSessionOrigins('/nonexistent/project'); + expect(origins).toEqual({}); + }); + + it('should normalize string origins to SessionOriginInfo format', () => { + // Simulate legacy string-only origin stored directly + mockStoreData['origins'] = { + '/project/path': { + 'session-123': 'user', + }, + }; + + const origins = storage.getSessionOrigins('/project/path'); + expect(origins['session-123']).toEqual({ origin: 'user' }); + }); + + it('should return full SessionOriginInfo for object origins', () => { + storage.registerSessionOrigin('/project/path', 'session-123', 'user', 'Named'); + storage.updateSessionStarred('/project/path', 'session-123', true); + storage.updateSessionContextUsage('/project/path', 'session-123', 60); + + const origins = storage.getSessionOrigins('/project/path'); + expect(origins['session-123']).toEqual({ + origin: 'user', + sessionName: 'Named', + starred: true, + contextUsage: 60, + }); + }); + }); + }); + + describe('Session Path', () => { + describe('getSessionPath', () => { + it('should return correct local path', () => { + const sessionPath = storage.getSessionPath('/project/path', 'session-123'); + + expect(sessionPath).toBeDefined(); + expect(sessionPath).toContain('session-123.jsonl'); + expect(sessionPath).toContain('.claude'); + expect(sessionPath).toContain('projects'); + }); + + it('should return remote path when sshConfig provided', () => { + const sshConfig: SshRemoteConfig = { + id: 'test-remote', + name: 'Test Remote', + host: 'remote.example.com', + port: 22, + username: 'testuser', + privateKeyPath: '~/.ssh/id_rsa', + enabled: true, + useSshConfig: false, + }; + const sessionPath = storage.getSessionPath('/project/path', 'session-123', sshConfig); + + expect(sessionPath).toBeDefined(); + expect(sessionPath).toContain('session-123.jsonl'); + expect(sessionPath).toContain('~/.claude/projects'); + }); + }); + }); + + describe('Agent ID', () => { + it('should have correct agent ID', () => { + expect(storage.agentId).toBe('claude-code'); + }); + }); + + describe('Edge Cases', () => { + it('should handle special characters in project path', () => { + storage.registerSessionOrigin('/path/with spaces/and-dashes', 'session-1', 'user'); + + const origins = storage.getSessionOrigins('/path/with spaces/and-dashes'); + expect(origins['session-1']).toBeDefined(); + }); + + it('should handle special characters in session ID', () => { + storage.registerSessionOrigin('/project', 'session-with-dashes-123', 'user'); + storage.registerSessionOrigin('/project', 'session_with_underscores', 'auto'); + + const origins = storage.getSessionOrigins('/project'); + expect(origins['session-with-dashes-123']).toBeDefined(); + expect(origins['session_with_underscores']).toBeDefined(); + }); + + it('should handle empty session name', () => { + storage.registerSessionOrigin('/project', 'session-123', 'user', ''); + + const origins = storage.getSessionOrigins('/project'); + // Empty string is falsy, so sessionName is not stored when empty + expect(origins['session-123']).toEqual({ origin: 'user' }); + }); + + it('should handle zero context usage', () => { + storage.updateSessionContextUsage('/project', 'session-123', 0); + + const origins = storage.getSessionOrigins('/project'); + expect(origins['session-123'].contextUsage).toBe(0); + }); + + it('should handle 100% context usage', () => { + storage.updateSessionContextUsage('/project', 'session-123', 100); + + const origins = storage.getSessionOrigins('/project'); + expect(origins['session-123'].contextUsage).toBe(100); + }); + }); + + describe('Storage Persistence', () => { + it('should call store.set on every origin update', () => { + storage.registerSessionOrigin('/project', 'session-1', 'user'); + expect(mockStore.set).toHaveBeenCalledTimes(1); + + storage.updateSessionName('/project', 'session-1', 'Name'); + expect(mockStore.set).toHaveBeenCalledTimes(2); + + storage.updateSessionStarred('/project', 'session-1', true); + expect(mockStore.set).toHaveBeenCalledTimes(3); + + storage.updateSessionContextUsage('/project', 'session-1', 50); + expect(mockStore.set).toHaveBeenCalledTimes(4); + }); + + it('should always call store.set with origins key', () => { + storage.registerSessionOrigin('/project', 'session-1', 'user'); + + expect(mockStore.set).toHaveBeenCalledWith('origins', expect.any(Object)); + }); + }); +}); diff --git a/src/__tests__/main/utils/agent-args.test.ts b/src/__tests__/main/utils/agent-args.test.ts index 10bd4d5b..c4951b42 100644 --- a/src/__tests__/main/utils/agent-args.test.ts +++ b/src/__tests__/main/utils/agent-args.test.ts @@ -10,7 +10,7 @@ import { applyAgentConfigOverrides, getContextWindowValue, } from '../../../main/utils/agent-args'; -import type { AgentConfig } from '../../../main/agent-detector'; +import type { AgentConfig } from '../../../main/agents'; /** * Helper to create a minimal AgentConfig for testing. diff --git a/src/__tests__/main/web-server/managers/LiveSessionManager.test.ts b/src/__tests__/main/web-server/managers/LiveSessionManager.test.ts new file mode 100644 index 00000000..7aef1aff --- /dev/null +++ b/src/__tests__/main/web-server/managers/LiveSessionManager.test.ts @@ -0,0 +1,476 @@ +/** + * Tests for LiveSessionManager + * + * Verifies: + * - Live session tracking (setLive, setOffline, isLive) + * - AutoRun state management + * - Broadcast callback integration + * - Memory leak prevention (cleanup on offline) + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { + LiveSessionManager, + LiveSessionBroadcastCallbacks, +} from '../../../../main/web-server/managers/LiveSessionManager'; + +// Mock the logger +vi.mock('../../../../main/utils/logger', () => ({ + logger: { + info: vi.fn(), + debug: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + }, +})); + +describe('LiveSessionManager', () => { + let manager: LiveSessionManager; + let mockBroadcastCallbacks: LiveSessionBroadcastCallbacks; + + beforeEach(() => { + vi.clearAllMocks(); + manager = new LiveSessionManager(); + mockBroadcastCallbacks = { + broadcastSessionLive: vi.fn(), + broadcastSessionOffline: vi.fn(), + broadcastAutoRunState: vi.fn(), + }; + }); + + describe('Live Session Tracking', () => { + describe('setSessionLive', () => { + it('should mark a session as live', () => { + manager.setSessionLive('session-123'); + + expect(manager.isSessionLive('session-123')).toBe(true); + }); + + it('should store agent session ID when provided', () => { + manager.setSessionLive('session-123', 'agent-session-abc'); + + const info = manager.getLiveSessionInfo('session-123'); + expect(info?.agentSessionId).toBe('agent-session-abc'); + }); + + it('should record enabledAt timestamp', () => { + const before = Date.now(); + manager.setSessionLive('session-123'); + const after = Date.now(); + + const info = manager.getLiveSessionInfo('session-123'); + expect(info?.enabledAt).toBeGreaterThanOrEqual(before); + expect(info?.enabledAt).toBeLessThanOrEqual(after); + }); + + it('should broadcast session live when callbacks set', () => { + manager.setBroadcastCallbacks(mockBroadcastCallbacks); + manager.setSessionLive('session-123', 'agent-session-abc'); + + expect(mockBroadcastCallbacks.broadcastSessionLive).toHaveBeenCalledWith( + 'session-123', + 'agent-session-abc' + ); + }); + + it('should not broadcast when callbacks not set', () => { + // No error should occur when broadcasting without callbacks + manager.setSessionLive('session-123'); + expect(manager.isSessionLive('session-123')).toBe(true); + }); + + it('should update existing session info when called again', () => { + manager.setSessionLive('session-123', 'agent-1'); + const firstInfo = manager.getLiveSessionInfo('session-123'); + + manager.setSessionLive('session-123', 'agent-2'); + const secondInfo = manager.getLiveSessionInfo('session-123'); + + expect(secondInfo?.agentSessionId).toBe('agent-2'); + expect(secondInfo?.enabledAt).toBeGreaterThanOrEqual(firstInfo!.enabledAt); + }); + }); + + describe('setSessionOffline', () => { + it('should mark a session as offline', () => { + manager.setSessionLive('session-123'); + expect(manager.isSessionLive('session-123')).toBe(true); + + manager.setSessionOffline('session-123'); + expect(manager.isSessionLive('session-123')).toBe(false); + }); + + it('should broadcast session offline when callbacks set', () => { + manager.setBroadcastCallbacks(mockBroadcastCallbacks); + manager.setSessionLive('session-123'); + manager.setSessionOffline('session-123'); + + expect(mockBroadcastCallbacks.broadcastSessionOffline).toHaveBeenCalledWith('session-123'); + }); + + it('should not broadcast if session was not live', () => { + manager.setBroadcastCallbacks(mockBroadcastCallbacks); + manager.setSessionOffline('never-existed'); + + expect(mockBroadcastCallbacks.broadcastSessionOffline).not.toHaveBeenCalled(); + }); + + it('should clean up associated AutoRun state (memory leak prevention)', () => { + manager.setSessionLive('session-123'); + manager.setAutoRunState('session-123', { + isRunning: true, + totalTasks: 10, + completedTasks: 5, + currentTask: 'Task 5', + }); + + expect(manager.getAutoRunState('session-123')).toBeDefined(); + + manager.setSessionOffline('session-123'); + + expect(manager.getAutoRunState('session-123')).toBeUndefined(); + }); + }); + + describe('isSessionLive', () => { + it('should return false for non-existent session', () => { + expect(manager.isSessionLive('non-existent')).toBe(false); + }); + + it('should return true for live session', () => { + manager.setSessionLive('session-123'); + expect(manager.isSessionLive('session-123')).toBe(true); + }); + + it('should return false after session goes offline', () => { + manager.setSessionLive('session-123'); + manager.setSessionOffline('session-123'); + expect(manager.isSessionLive('session-123')).toBe(false); + }); + }); + + describe('getLiveSessionInfo', () => { + it('should return undefined for non-existent session', () => { + expect(manager.getLiveSessionInfo('non-existent')).toBeUndefined(); + }); + + it('should return complete session info', () => { + manager.setSessionLive('session-123', 'agent-session-abc'); + + const info = manager.getLiveSessionInfo('session-123'); + + expect(info).toEqual({ + sessionId: 'session-123', + agentSessionId: 'agent-session-abc', + enabledAt: expect.any(Number), + }); + }); + }); + + describe('getLiveSessions', () => { + it('should return empty array when no sessions', () => { + expect(manager.getLiveSessions()).toEqual([]); + }); + + it('should return all live sessions', () => { + manager.setSessionLive('session-1'); + manager.setSessionLive('session-2'); + manager.setSessionLive('session-3'); + + const sessions = manager.getLiveSessions(); + + expect(sessions).toHaveLength(3); + expect(sessions.map((s) => s.sessionId)).toContain('session-1'); + expect(sessions.map((s) => s.sessionId)).toContain('session-2'); + expect(sessions.map((s) => s.sessionId)).toContain('session-3'); + }); + + it('should not include offline sessions', () => { + manager.setSessionLive('session-1'); + manager.setSessionLive('session-2'); + manager.setSessionOffline('session-1'); + + const sessions = manager.getLiveSessions(); + + expect(sessions).toHaveLength(1); + expect(sessions[0].sessionId).toBe('session-2'); + }); + }); + + describe('getLiveSessionIds', () => { + it('should return iterable of session IDs', () => { + manager.setSessionLive('session-1'); + manager.setSessionLive('session-2'); + + const ids = Array.from(manager.getLiveSessionIds()); + + expect(ids).toHaveLength(2); + expect(ids).toContain('session-1'); + expect(ids).toContain('session-2'); + }); + }); + + describe('getLiveSessionCount', () => { + it('should return 0 when no sessions', () => { + expect(manager.getLiveSessionCount()).toBe(0); + }); + + it('should return correct count', () => { + manager.setSessionLive('session-1'); + manager.setSessionLive('session-2'); + manager.setSessionLive('session-3'); + + expect(manager.getLiveSessionCount()).toBe(3); + + manager.setSessionOffline('session-2'); + + expect(manager.getLiveSessionCount()).toBe(2); + }); + }); + }); + + describe('AutoRun State Management', () => { + describe('setAutoRunState', () => { + it('should store running AutoRun state', () => { + const state = { + isRunning: true, + totalTasks: 10, + completedTasks: 3, + currentTask: 'Task 3', + }; + + manager.setAutoRunState('session-123', state); + + expect(manager.getAutoRunState('session-123')).toEqual(state); + }); + + it('should remove state when isRunning is false', () => { + manager.setAutoRunState('session-123', { + isRunning: true, + totalTasks: 10, + completedTasks: 3, + currentTask: 'Task 3', + }); + + manager.setAutoRunState('session-123', { + isRunning: false, + totalTasks: 10, + completedTasks: 10, + currentTask: 'Complete', + }); + + expect(manager.getAutoRunState('session-123')).toBeUndefined(); + }); + + it('should remove state when null is passed', () => { + manager.setAutoRunState('session-123', { + isRunning: true, + totalTasks: 10, + completedTasks: 3, + currentTask: 'Task 3', + }); + + manager.setAutoRunState('session-123', null); + + expect(manager.getAutoRunState('session-123')).toBeUndefined(); + }); + + it('should broadcast AutoRun state when callbacks set', () => { + manager.setBroadcastCallbacks(mockBroadcastCallbacks); + const state = { + isRunning: true, + totalTasks: 10, + completedTasks: 3, + currentTask: 'Task 3', + }; + + manager.setAutoRunState('session-123', state); + + expect(mockBroadcastCallbacks.broadcastAutoRunState).toHaveBeenCalledWith( + 'session-123', + state + ); + }); + + it('should broadcast null state when clearing', () => { + manager.setBroadcastCallbacks(mockBroadcastCallbacks); + manager.setAutoRunState('session-123', { + isRunning: true, + totalTasks: 10, + completedTasks: 3, + currentTask: 'Task 3', + }); + + manager.setAutoRunState('session-123', null); + + expect(mockBroadcastCallbacks.broadcastAutoRunState).toHaveBeenLastCalledWith( + 'session-123', + null + ); + }); + }); + + describe('getAutoRunState', () => { + it('should return undefined for non-existent state', () => { + expect(manager.getAutoRunState('non-existent')).toBeUndefined(); + }); + + it('should return stored state', () => { + const state = { + isRunning: true, + totalTasks: 5, + completedTasks: 2, + currentTask: 'Task 2', + }; + manager.setAutoRunState('session-123', state); + + expect(manager.getAutoRunState('session-123')).toEqual(state); + }); + }); + + describe('getAutoRunStates', () => { + it('should return empty map when no states', () => { + const states = manager.getAutoRunStates(); + expect(states.size).toBe(0); + }); + + it('should return all stored states', () => { + manager.setAutoRunState('session-1', { + isRunning: true, + totalTasks: 5, + completedTasks: 1, + currentTask: 'Task 1', + }); + manager.setAutoRunState('session-2', { + isRunning: true, + totalTasks: 10, + completedTasks: 5, + currentTask: 'Task 5', + }); + + const states = manager.getAutoRunStates(); + + expect(states.size).toBe(2); + expect(states.get('session-1')?.totalTasks).toBe(5); + expect(states.get('session-2')?.totalTasks).toBe(10); + }); + }); + }); + + describe('clearAll', () => { + it('should mark all live sessions as offline', () => { + manager.setBroadcastCallbacks(mockBroadcastCallbacks); + manager.setSessionLive('session-1'); + manager.setSessionLive('session-2'); + manager.setSessionLive('session-3'); + + manager.clearAll(); + + expect(manager.getLiveSessionCount()).toBe(0); + expect(mockBroadcastCallbacks.broadcastSessionOffline).toHaveBeenCalledTimes(3); + }); + + it('should clear all AutoRun states', () => { + manager.setSessionLive('session-1'); + manager.setAutoRunState('session-1', { + isRunning: true, + totalTasks: 5, + completedTasks: 1, + currentTask: 'Task 1', + }); + manager.setSessionLive('session-2'); + manager.setAutoRunState('session-2', { + isRunning: true, + totalTasks: 10, + completedTasks: 5, + currentTask: 'Task 5', + }); + + manager.clearAll(); + + expect(manager.getAutoRunStates().size).toBe(0); + }); + + it('should handle being called when already empty', () => { + // Should not throw + manager.clearAll(); + expect(manager.getLiveSessionCount()).toBe(0); + }); + }); + + describe('Integration Scenarios', () => { + it('should handle full session lifecycle', () => { + manager.setBroadcastCallbacks(mockBroadcastCallbacks); + + // Session comes online + manager.setSessionLive('session-123', 'agent-abc'); + expect(manager.isSessionLive('session-123')).toBe(true); + expect(mockBroadcastCallbacks.broadcastSessionLive).toHaveBeenCalled(); + + // AutoRun starts + manager.setAutoRunState('session-123', { + isRunning: true, + totalTasks: 5, + completedTasks: 0, + currentTask: 'Task 1', + }); + expect(mockBroadcastCallbacks.broadcastAutoRunState).toHaveBeenCalled(); + + // AutoRun progresses + manager.setAutoRunState('session-123', { + isRunning: true, + totalTasks: 5, + completedTasks: 3, + currentTask: 'Task 4', + }); + + // AutoRun completes + manager.setAutoRunState('session-123', { + isRunning: false, + totalTasks: 5, + completedTasks: 5, + currentTask: 'Complete', + }); + expect(manager.getAutoRunState('session-123')).toBeUndefined(); + + // Session goes offline + manager.setSessionOffline('session-123'); + expect(manager.isSessionLive('session-123')).toBe(false); + expect(mockBroadcastCallbacks.broadcastSessionOffline).toHaveBeenCalled(); + }); + + it('should handle multiple concurrent sessions', () => { + manager.setSessionLive('session-1', 'agent-1'); + manager.setSessionLive('session-2', 'agent-2'); + manager.setSessionLive('session-3', 'agent-3'); + + manager.setAutoRunState('session-1', { + isRunning: true, + totalTasks: 3, + completedTasks: 1, + currentTask: 'Task 1', + }); + manager.setAutoRunState('session-3', { + isRunning: true, + totalTasks: 5, + completedTasks: 2, + currentTask: 'Task 2', + }); + + expect(manager.getLiveSessionCount()).toBe(3); + expect(manager.getAutoRunStates().size).toBe(2); + + // Session 2 goes offline (no AutoRun state to clean) + manager.setSessionOffline('session-2'); + expect(manager.getLiveSessionCount()).toBe(2); + expect(manager.getAutoRunStates().size).toBe(2); + + // Session 1 goes offline (has AutoRun state) + manager.setSessionOffline('session-1'); + expect(manager.getLiveSessionCount()).toBe(1); + expect(manager.getAutoRunStates().size).toBe(1); + expect(manager.getAutoRunState('session-1')).toBeUndefined(); + expect(manager.getAutoRunState('session-3')).toBeDefined(); + }); + }); +}); diff --git a/src/__tests__/renderer/components/HistoryDetailModal.test.tsx b/src/__tests__/renderer/components/HistoryDetailModal.test.tsx index ed0e829e..1db065af 100644 --- a/src/__tests__/renderer/components/HistoryDetailModal.test.tsx +++ b/src/__tests__/renderer/components/HistoryDetailModal.test.tsx @@ -460,7 +460,7 @@ describe('HistoryDetailModal', () => { usageStats: { inputTokens: 5000, outputTokens: 1000, - cacheReadInputTokens: 2000, // Included in calculation (occupies context) + cacheReadInputTokens: 2000, // Excluded from calculation (cumulative) cacheCreationInputTokens: 5000, contextWindow: 100000, totalCostUsd: 0.1, @@ -470,8 +470,8 @@ describe('HistoryDetailModal', () => { /> ); - // Context = (inputTokens + cacheCreationInputTokens + cacheReadInputTokens) / contextWindow - // (5000 + 5000 + 2000) / 100000 = 12% + // Context = (inputTokens + cacheReadInputTokens + cacheCreationInputTokens) / contextWindow + // (5000 + 2000 + 5000) / 100000 = 12% expect(screen.getByText('12%')).toBeInTheDocument(); }); diff --git a/src/__tests__/renderer/components/MainPanel.test.tsx b/src/__tests__/renderer/components/MainPanel.test.tsx index 3ddf0639..3808e781 100644 --- a/src/__tests__/renderer/components/MainPanel.test.tsx +++ b/src/__tests__/renderer/components/MainPanel.test.tsx @@ -336,6 +336,7 @@ describe('MainPanel', () => { slashCommandOpen: false, slashCommands: [], selectedSlashCommandIndex: 0, + previewFile: null, markdownEditMode: false, shortcuts: defaultShortcuts, rightPanelOpen: true, @@ -634,8 +635,100 @@ describe('MainPanel', () => { }); }); - // Note: Legacy previewFile tests removed - file preview is now handled via the tab system - // File tabs have their own content rendering and closing behavior + describe('File Preview mode', () => { + it('should render FilePreview when previewFile is set', () => { + const previewFile = { name: 'test.ts', content: 'test content', path: '/test/test.ts' }; + render(); + + expect(screen.getByTestId('file-preview')).toBeInTheDocument(); + expect(screen.getByText('File Preview: test.ts')).toBeInTheDocument(); + }); + + it('should hide TabBar when file preview is open', () => { + const previewFile = { name: 'test.ts', content: 'test content', path: '/test/test.ts' }; + render(); + + expect(screen.queryByTestId('tab-bar')).not.toBeInTheDocument(); + }); + + it('should call setPreviewFile(null) and setActiveFocus when closing preview', () => { + const setPreviewFile = vi.fn(); + const setActiveFocus = vi.fn(); + const previewFile = { name: 'test.ts', content: 'test content', path: '/test/test.ts' }; + + render( + + ); + + fireEvent.click(screen.getByTestId('file-preview-close')); + + expect(setPreviewFile).toHaveBeenCalledWith(null); + expect(setActiveFocus).toHaveBeenCalledWith('right'); + }); + + it('should focus file tree container when closing preview (setTimeout callback)', async () => { + vi.useFakeTimers(); + const setPreviewFile = vi.fn(); + const setActiveFocus = vi.fn(); + const previewFile = { name: 'test.ts', content: 'test content', path: '/test/test.ts' }; + const fileTreeContainerRef = { current: { focus: vi.fn() } }; + + render( + + ); + + fireEvent.click(screen.getByTestId('file-preview-close')); + + // Run the setTimeout callback + await act(async () => { + vi.advanceTimersByTime(1); + }); + + expect(fileTreeContainerRef.current.focus).toHaveBeenCalled(); + vi.useRealTimers(); + }); + + it('should focus file tree filter input when closing preview with filter open', async () => { + vi.useFakeTimers(); + const setPreviewFile = vi.fn(); + const setActiveFocus = vi.fn(); + const previewFile = { name: 'test.ts', content: 'test content', path: '/test/test.ts' }; + const fileTreeFilterInputRef = { current: { focus: vi.fn() } }; + + render( + + ); + + fireEvent.click(screen.getByTestId('file-preview-close')); + + // Run the setTimeout callback + await act(async () => { + vi.advanceTimersByTime(1); + }); + + expect(fileTreeFilterInputRef.current.focus).toHaveBeenCalled(); + vi.useRealTimers(); + }); + }); describe('Tab Bar', () => { it('should render TabBar in AI mode with tabs', () => { @@ -1862,7 +1955,7 @@ describe('MainPanel', () => { ); - // Context usage should be (50000 + 25000) / 200000 * 100 = 37.5% -> 38% + // Context usage: (50000 + 25000 + 0) / 200000 * 100 = 38% (input + cacheRead + cacheCreation) expect(getContextColor).toHaveBeenCalledWith(38, theme); }); }); @@ -2143,41 +2236,6 @@ describe('MainPanel', () => { expect(writeText).toHaveBeenCalledWith('https://github.com/user/repo.git'); }); - - it('should open remote URL in system browser when clicked', async () => { - setMockGitStatus('session-1', { - fileCount: 0, - branch: 'main', - remote: 'https://github.com/user/repo.git', - ahead: 0, - behind: 0, - totalAdditions: 0, - totalDeletions: 0, - modifiedCount: 0, - fileChanges: [], - lastUpdated: Date.now(), - }); - - const session = createSession({ isGitRepo: true }); - render(); - - await waitFor(() => { - expect(screen.getByText(/main|GIT/)).toBeInTheDocument(); - }); - - const gitBadge = screen.getByText(/main|GIT/); - fireEvent.mouseEnter(gitBadge.parentElement!); - - await waitFor(() => { - expect(screen.getByText('github.com/user/repo')).toBeInTheDocument(); - }); - - // Click the remote URL link - const remoteLink = screen.getByText('github.com/user/repo'); - fireEvent.click(remoteLink); - - expect(window.maestro.shell.openExternal).toHaveBeenCalledWith('https://github.com/user/repo'); - }); }); describe('Edge cases', () => { @@ -2315,9 +2373,10 @@ describe('MainPanel', () => { expect(screen.queryByText('Context Window')).not.toBeInTheDocument(); }); - it('should cap context usage at 100%', () => { - const getContextColor = vi.fn().mockReturnValue('#ef4444'); + it('should use preserved session.contextUsage when accumulated values exceed window', () => { + const getContextColor = vi.fn().mockReturnValue('#22c55e'); const session = createSession({ + contextUsage: 45, // Preserved valid percentage from last non-accumulated update aiTabs: [ { id: 'tab-1', @@ -2328,8 +2387,8 @@ describe('MainPanel', () => { usageStats: { inputTokens: 150000, outputTokens: 100000, - cacheReadInputTokens: 100000, // Excluded from calculation (cumulative) - cacheCreationInputTokens: 100000, // Included in calculation + cacheReadInputTokens: 100000, // Accumulated from multi-tool turn + cacheCreationInputTokens: 100000, // Accumulated from multi-tool turn totalCostUsd: 0.05, contextWindow: 200000, }, @@ -2342,8 +2401,9 @@ describe('MainPanel', () => { ); - // Context usage: (150000 + 100000) / 200000 = 125% -> capped at 100% - expect(getContextColor).toHaveBeenCalledWith(100, theme); + // raw = 150000 + 100000 + 100000 = 350000 > 200000 (accumulated) + // Falls back to session.contextUsage = 45% + expect(getContextColor).toHaveBeenCalledWith(45, theme); }); }); @@ -2930,8 +2990,32 @@ describe('MainPanel', () => { expect(screen.getByText(longMessage)).toBeInTheDocument(); }); - // Note: Legacy test for previewFile removed - file preview is now handled via the tab system - // The error banner still displays above file tabs when activeFileTabId is set + it('should still display error banner when previewFile is open', () => { + // The error banner appears above file preview in the layout hierarchy + // This ensures users see critical errors even while previewing files + const previewFile = { name: 'test.ts', content: 'test content', path: '/test/test.ts' }; + const session = createSession({ + inputMode: 'ai', + aiTabs: [ + { + id: 'tab-1', + name: 'Tab 1', + isUnread: false, + createdAt: Date.now(), + agentError: createAgentError(), + }, + ], + activeTabId: 'tab-1', + }); + + render(); + + // Both error banner and file preview should be visible + expect( + screen.getByText('Authentication token has expired. Please re-authenticate.') + ).toBeInTheDocument(); + expect(screen.getByTestId('file-preview')).toBeInTheDocument(); + }); it('should handle error with empty message gracefully', () => { const session = createSession({ @@ -2993,7 +3077,7 @@ describe('MainPanel', () => { previousUIState: { readOnlyMode: false, saveToHistory: true, - showThinking: 'off', + showThinking: false, }, }); @@ -3022,7 +3106,7 @@ describe('MainPanel', () => { previousUIState: { readOnlyMode: false, saveToHistory: true, - showThinking: 'off', + showThinking: false, }, }); @@ -3044,7 +3128,7 @@ describe('MainPanel', () => { previousUIState: { readOnlyMode: false, saveToHistory: true, - showThinking: 'off', + showThinking: false, }, }); @@ -3063,7 +3147,7 @@ describe('MainPanel', () => { previousUIState: { readOnlyMode: false, saveToHistory: true, - showThinking: 'off', + showThinking: false, }, }); @@ -3088,7 +3172,7 @@ describe('MainPanel', () => { previousUIState: { readOnlyMode: false, saveToHistory: true, - showThinking: 'off', + showThinking: false, }, }, { @@ -3102,151 +3186,4 @@ describe('MainPanel', () => { expect(screen.getByTestId('wizard-conversation-view')).toBeInTheDocument(); }); }); - - describe('File Tab Loading State (SSH Remote Files)', () => { - // Helper to create a file preview tab - const createFileTab = ( - overrides: Partial = {} - ): import('../../../renderer/types').FilePreviewTab => ({ - id: 'file-tab-1', - path: '/remote/path/file.ts', - name: 'file', - extension: '.ts', - content: '', - scrollTop: 0, - searchQuery: '', - editMode: false, - editContent: undefined, - createdAt: Date.now(), - lastModified: 0, - ...overrides, - }); - - it('should display loading spinner when file tab isLoading is true', () => { - const fileTab = createFileTab({ - sshRemoteId: 'ssh-remote-1', - isLoading: true, // SSH remote file loading - }); - - const session = createSession({ - inputMode: 'ai', - filePreviewTabs: [fileTab], - activeFileTabId: 'file-tab-1', - unifiedTabOrder: [ - { type: 'ai' as const, id: 'tab-1' }, - { type: 'file' as const, id: 'file-tab-1' }, - ], - }); - - render( - - ); - - // Should display loading text with file name - expect(screen.getByText('Loading file.ts')).toBeInTheDocument(); - // Should display "Fetching from remote server..." subtitle - expect(screen.getByText('Fetching from remote server...')).toBeInTheDocument(); - }); - - it('should render FilePreview when file tab isLoading is false', () => { - const fileTab = createFileTab({ - content: 'const x = 1;', - lastModified: Date.now(), - sshRemoteId: 'ssh-remote-1', - isLoading: false, // Loading complete - }); - - const session = createSession({ - inputMode: 'ai', - filePreviewTabs: [fileTab], - activeFileTabId: 'file-tab-1', - unifiedTabOrder: [ - { type: 'ai' as const, id: 'tab-1' }, - { type: 'file' as const, id: 'file-tab-1' }, - ], - }); - - render( - - ); - - // Should render file preview (mocked component) - expect(screen.getByTestId('file-preview')).toBeInTheDocument(); - // Should NOT display loading state - expect(screen.queryByText('Fetching from remote server...')).not.toBeInTheDocument(); - }); - - it('should display loading state for file tab without sshRemoteId (local file loading)', () => { - const fileTab = createFileTab({ - path: '/local/path/config.json', - name: 'config', - extension: '.json', - isLoading: true, // Even local files can show loading briefly - }); - - const session = createSession({ - inputMode: 'ai', - filePreviewTabs: [fileTab], - activeFileTabId: 'file-tab-1', - unifiedTabOrder: [ - { type: 'ai' as const, id: 'tab-1' }, - { type: 'file' as const, id: 'file-tab-1' }, - ], - }); - - render( - - ); - - // Should display loading text with file name - expect(screen.getByText('Loading config.json')).toBeInTheDocument(); - }); - - it('should not show loading state when AI tab is active', () => { - const fileTab = createFileTab({ - sshRemoteId: 'ssh-remote-1', - isLoading: true, // Loading but not active - }); - - const session = createSession({ - inputMode: 'ai', - filePreviewTabs: [fileTab], - activeFileTabId: null, // AI tab is active, not file tab - activeTabId: 'tab-1', - unifiedTabOrder: [ - { type: 'ai' as const, id: 'tab-1' }, - { type: 'file' as const, id: 'file-tab-1' }, - ], - }); - - render( - - ); - - // Should NOT display loading state (file tab is not active) - expect(screen.queryByText('Fetching from remote server...')).not.toBeInTheDocument(); - // Should display terminal output (default for AI tab) - expect(screen.getByTestId('terminal-output')).toBeInTheDocument(); - }); - }); }); diff --git a/src/__tests__/renderer/hooks/useAtMentionCompletion.test.ts b/src/__tests__/renderer/hooks/useAtMentionCompletion.test.ts index 28b2aa01..601a7984 100644 --- a/src/__tests__/renderer/hooks/useAtMentionCompletion.test.ts +++ b/src/__tests__/renderer/hooks/useAtMentionCompletion.test.ts @@ -860,4 +860,73 @@ describe('useAtMentionCompletion', () => { expect(matchingFiles.length).toBeGreaterThan(0); }); }); + + // ============================================================================= + // PERFORMANCE OPTIMIZATION TESTS + // ============================================================================= + + describe('performance optimizations', () => { + it('caps file tree traversal at MAX_FILE_TREE_ENTRIES', () => { + // Generate a tree with more than 50k files + const largeFolder: FileNode[] = []; + for (let i = 0; i < 200; i++) { + const children: FileNode[] = []; + for (let j = 0; j < 300; j++) { + children.push(createFile(`file_${i}_${j}.ts`)); + } + largeFolder.push(createFolder(`dir_${i}`, children)); + } + // This tree has 200 folders + 60,000 files = 60,200 nodes total + + const session = createMockSession(largeFolder); + const { result } = renderHook(() => useAtMentionCompletion(session)); + + // With empty filter, should return at most 15 suggestions + const suggestions = result.current.getSuggestions(''); + expect(suggestions.length).toBeLessThanOrEqual(15); + + // With a filter that would match many files, should still return max 15 + const filtered = result.current.getSuggestions('file'); + expect(filtered.length).toBeLessThanOrEqual(15); + }); + + it('empty filter skips fuzzy matching and returns sorted results', () => { + const session = createMockSession([ + createFolder('zebra'), + createFile('banana.ts'), + createFile('apple.ts'), + ]); + const { result } = renderHook(() => useAtMentionCompletion(session)); + + const suggestions = result.current.getSuggestions(''); + // Files should come before folders, then alphabetical + expect(suggestions[0].displayText).toBe('apple.ts'); + expect(suggestions[1].displayText).toBe('banana.ts'); + expect(suggestions[2].displayText).toBe('zebra'); + // All scores should be 0 (no fuzzy matching performed) + expect(suggestions.every((s) => s.score === 0)).toBe(true); + }); + + it('early exits after enough exact substring matches', () => { + // Create 200 files that contain "match" in their name (exact substring matches) + // plus files that would only fuzzy-match + const files: FileNode[] = []; + for (let i = 0; i < 200; i++) { + files.push(createFile(`match_${i}.ts`)); + } + // Add some files that would only fuzzy match (no "match" substring) + for (let i = 0; i < 100; i++) { + files.push(createFile(`m_a_t_c_h_${i}.ts`)); + } + + const session = createMockSession(files); + const { result } = renderHook(() => useAtMentionCompletion(session)); + + const suggestions = result.current.getSuggestions('match'); + // Should still return valid results with max 15 + expect(suggestions.length).toBe(15); + // Top results should be exact substring matches (higher score) + expect(suggestions[0].displayText).toContain('match'); + }); + }); }); diff --git a/src/__tests__/renderer/hooks/useGitStatusPolling.test.ts b/src/__tests__/renderer/hooks/useGitStatusPolling.test.ts index 850ac3a2..30674c0f 100644 --- a/src/__tests__/renderer/hooks/useGitStatusPolling.test.ts +++ b/src/__tests__/renderer/hooks/useGitStatusPolling.test.ts @@ -9,7 +9,7 @@ import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; import { renderHook, act, waitFor } from '@testing-library/react'; -import { useGitStatusPolling } from '../../../renderer/hooks'; +import { useGitStatusPolling, getScaledPollInterval } from '../../../renderer/hooks'; import type { Session } from '../../../renderer/types'; import { gitService } from '../../../renderer/services/git'; @@ -109,4 +109,37 @@ describe('useGitStatusPolling', () => { expect(gitService.getStatus).toHaveBeenCalledTimes(1); }); }); + + describe('getScaledPollInterval', () => { + it('returns default 30s for 1-3 git sessions', () => { + expect(getScaledPollInterval(30000, 1)).toBe(30000); + expect(getScaledPollInterval(30000, 2)).toBe(30000); + expect(getScaledPollInterval(30000, 3)).toBe(30000); + }); + + it('returns 45s for 4-7 git sessions', () => { + expect(getScaledPollInterval(30000, 4)).toBe(45000); + expect(getScaledPollInterval(30000, 7)).toBe(45000); + }); + + it('returns 60s for 8-12 git sessions', () => { + expect(getScaledPollInterval(30000, 8)).toBe(60000); + expect(getScaledPollInterval(30000, 12)).toBe(60000); + }); + + it('returns 90s for 13+ git sessions', () => { + expect(getScaledPollInterval(30000, 13)).toBe(90000); + expect(getScaledPollInterval(30000, 50)).toBe(90000); + }); + + it('does not scale custom (non-default) poll intervals', () => { + // A user-configured interval of 10s should not be scaled + expect(getScaledPollInterval(10000, 10)).toBe(10000); + expect(getScaledPollInterval(60000, 20)).toBe(60000); + }); + + it('returns 30s for zero git sessions', () => { + expect(getScaledPollInterval(30000, 0)).toBe(30000); + }); + }); }); diff --git a/src/__tests__/renderer/hooks/useTabCompletion.test.ts b/src/__tests__/renderer/hooks/useTabCompletion.test.ts index 17230419..0631f48e 100644 --- a/src/__tests__/renderer/hooks/useTabCompletion.test.ts +++ b/src/__tests__/renderer/hooks/useTabCompletion.test.ts @@ -1088,4 +1088,30 @@ describe('useTabCompletion', () => { expect(filters.length).toBe(5); }); }); + + describe('performance optimizations', () => { + it('caps file tree traversal at MAX_FILE_TREE_ENTRIES', () => { + // Generate a tree with more than 50k files + const largeTree: FileNode[] = []; + for (let i = 0; i < 200; i++) { + const children: FileNode[] = []; + for (let j = 0; j < 300; j++) { + children.push({ name: `file_${i}_${j}.ts`, type: 'file' }); + } + largeTree.push({ name: `dir_${i}`, type: 'folder', children }); + } + // 200 folders + 60,000 files = 60,200 nodes total + + const session = createMockSession({ + fileTree: largeTree, + shellCwd: '/project', + }); + const { result } = renderHook(() => useTabCompletion(session)); + + // Even with 60k+ files, getSuggestions should work without hanging + // and return at most 15 results + const suggestions = result.current.getSuggestions('file', 'file'); + expect(suggestions.length).toBeLessThanOrEqual(15); + }); + }); }); diff --git a/src/__tests__/renderer/utils/contextExtractor.test.ts b/src/__tests__/renderer/utils/contextExtractor.test.ts index b9ad8c42..149cd1aa 100644 --- a/src/__tests__/renderer/utils/contextExtractor.test.ts +++ b/src/__tests__/renderer/utils/contextExtractor.test.ts @@ -650,9 +650,8 @@ describe('calculateTotalTokens', () => { const total = calculateTotalTokens(contexts); - // Per Anthropic docs: input + cacheRead + cacheCreation for each context - // (100+50+25) + (300+75+25) = 575 - expect(total).toBe(575); + // input + cacheRead + cacheCreation for each context + expect(total).toBe(575); // (100+50+25) + (300+75+25) }); }); @@ -695,7 +694,7 @@ describe('getContextSummary', () => { expect(summary.totalSources).toBe(2); expect(summary.totalLogs).toBe(5); - // Per Anthropic docs: (100+50+25) + (200+75+25) = 475 + // (100+50+25) + (200+75+25) = 475 (input + cacheRead + cacheCreation) expect(summary.estimatedTokens).toBe(475); expect(summary.byAgent['claude-code']).toBe(1); expect(summary.byAgent['opencode']).toBe(1); diff --git a/src/__tests__/renderer/utils/contextUsage.test.ts b/src/__tests__/renderer/utils/contextUsage.test.ts index a31d894a..b3865c22 100644 --- a/src/__tests__/renderer/utils/contextUsage.test.ts +++ b/src/__tests__/renderer/utils/contextUsage.test.ts @@ -1,13 +1,5 @@ /** * Tests for context usage estimation utilities - * - * Claude Code reports per-turn context window usage directly (no normalization needed). - * Codex reports cumulative session totals, which are normalized in StdoutHandler. - * - * Per Anthropic documentation: - * total_context = input_tokens + cache_read_input_tokens + cache_creation_input_tokens - * - * @see https://platform.claude.com/docs/en/build-with-claude/prompt-caching */ import { @@ -36,13 +28,11 @@ describe('estimateContextUsage', () => { expect(result).toBe(10); }); - it('should include cacheReadInputTokens in context calculation (per Anthropic docs)', () => { - // Per Anthropic docs: total_context = input + cacheRead + cacheCreation - // Claude Code reports per-turn values directly, Codex is normalized in StdoutHandler + it('should include cacheReadInputTokens in calculation (part of total input context)', () => { const stats = createStats({ inputTokens: 1000, outputTokens: 500, - cacheReadInputTokens: 50000, // INCLUDED - represents cached context for this turn + cacheReadInputTokens: 50000, cacheCreationInputTokens: 5000, contextWindow: 100000, }); @@ -51,17 +41,17 @@ describe('estimateContextUsage', () => { expect(result).toBe(56); }); - it('should cap at 100%', () => { + it('should return null when accumulated tokens exceed context window', () => { const stats = createStats({ inputTokens: 50000, outputTokens: 50000, - cacheReadInputTokens: 100000, // Large cached context - cacheCreationInputTokens: 100000, // Large new cache + cacheReadInputTokens: 150000, + cacheCreationInputTokens: 200000, contextWindow: 200000, }); const result = estimateContextUsage(stats, 'claude-code'); - // (50000 + 100000 + 100000) / 200000 = 125% -> capped at 100% - expect(result).toBe(100); + // (50000 + 150000 + 200000) = 400000 > 200000 -> null (accumulated values) + expect(result).toBeNull(); }); it('should round to nearest integer', () => { @@ -85,10 +75,16 @@ describe('estimateContextUsage', () => { expect(result).toBe(5); }); + it('should use claude default context window (200k)', () => { + const stats = createStats({ contextWindow: 0 }); + const result = estimateContextUsage(stats, 'claude'); + expect(result).toBe(5); + }); + it('should use codex default context window (200k) and include output tokens', () => { const stats = createStats({ contextWindow: 0 }); const result = estimateContextUsage(stats, 'codex'); - // Codex includes output tokens: (10000 + 5000 + 0 + 0) / 200000 = 7.5% -> 8% + // Codex includes output tokens: (10000 + 5000 + 0) / 200000 = 7.5% -> 8% expect(result).toBe(8); }); @@ -99,11 +95,10 @@ describe('estimateContextUsage', () => { expect(result).toBe(8); }); - it('should use factory-droid default context window (200k)', () => { + it('should use aider default context window (128k)', () => { const stats = createStats({ contextWindow: 0 }); - const result = estimateContextUsage(stats, 'factory-droid'); - // (10000 + 0 + 0) / 200000 = 5% - expect(result).toBe(5); + const result = estimateContextUsage(stats, 'aider'); + expect(result).toBe(8); }); it('should return null for terminal agent', () => { @@ -140,23 +135,24 @@ describe('estimateContextUsage', () => { // @ts-expect-error - testing undefined case stats.cacheReadInputTokens = undefined; const result = estimateContextUsage(stats, 'claude-code'); - // (10000 + 0 + 0) / 100000 = 10% + // (10000 + 0) / 100000 = 10% expect(result).toBe(10); }); - it('should include cache read tokens in context (represents context window usage)', () => { - // Per Anthropic docs, cacheRead represents tokens retrieved from cache - // and DOES occupy context window space for this turn. + it('should return null when accumulated cacheRead tokens cause total to exceed context window', () => { + // During multi-tool turns, Claude Code accumulates token values across + // internal API calls. When accumulated total exceeds context window, + // return null to signal callers should preserve previous valid percentage. const stats = createStats({ inputTokens: 500, outputTokens: 1000, - cacheReadInputTokens: 100000, // Large cached context for this turn + cacheReadInputTokens: 758000, // accumulated across multi-tool turn cacheCreationInputTokens: 50000, contextWindow: 200000, }); const result = estimateContextUsage(stats, 'claude-code'); - // (500 + 100000 + 50000) / 200000 = 75% - expect(result).toBe(75); + // (500 + 758000 + 50000) = 808500 > 200000 -> null (accumulated values) + expect(result).toBeNull(); }); }); @@ -177,17 +173,17 @@ describe('estimateContextUsage', () => { expect(result).toBe(5); }); - it('should handle very large token counts', () => { + it('should return null for very large accumulated token counts', () => { const stats = createStats({ inputTokens: 250000, outputTokens: 500000, - cacheReadInputTokens: 50000, - cacheCreationInputTokens: 50000, + cacheReadInputTokens: 500000, + cacheCreationInputTokens: 250000, contextWindow: 0, }); const result = estimateContextUsage(stats, 'claude-code'); - // (250000 + 50000 + 50000) / 200000 = 175% -> capped at 100% - expect(result).toBe(100); + // (250000 + 500000 + 250000) = 1000000 > 200000 -> null (accumulated values) + expect(result).toBeNull(); }); it('should handle very small percentages', () => { @@ -198,7 +194,7 @@ describe('estimateContextUsage', () => { contextWindow: 0, }); const result = estimateContextUsage(stats, 'claude-code'); - // (100 + 0 + 0) / 200000 = 0.05% -> 0% (output excluded for Claude) + // (100 + 0) / 200000 = 0.05% -> 0% (output excluded for Claude) expect(result).toBe(0); }); }); @@ -218,28 +214,34 @@ describe('calculateContextTokens', () => { ...overrides, }); - describe('Claude agents (per Anthropic formula: input + cacheRead + cacheCreation)', () => { - it('should include all input-related tokens for claude-code', () => { + describe('Claude agents (input + cacheRead + cacheCreation)', () => { + it('should include input, cacheRead, and cacheCreation tokens for claude-code', () => { const stats = createStats(); const result = calculateContextTokens(stats, 'claude-code'); - // Per Anthropic docs: 10000 + 2000 + 1000 = 13000 + // 10000 + 2000 + 1000 = 13000 (excludes output only) expect(result).toBe(13000); }); - it('should include all input-related tokens when agent is undefined (defaults to Claude)', () => { + it('should include input, cacheRead, and cacheCreation tokens for claude', () => { + const stats = createStats(); + const result = calculateContextTokens(stats, 'claude'); + expect(result).toBe(13000); + }); + + it('should include input, cacheRead, and cacheCreation tokens when agent is undefined', () => { const stats = createStats(); const result = calculateContextTokens(stats); - // Defaults to Claude behavior: input + cacheRead + cacheCreation + // Defaults to Claude behavior expect(result).toBe(13000); }); }); - describe('OpenAI agents (includes output tokens in combined limit)', () => { - it('should include output tokens for codex', () => { + describe('OpenAI agents (includes output tokens)', () => { + it('should include input, output, and cacheCreation tokens for codex', () => { const stats = createStats(); const result = calculateContextTokens(stats, 'codex'); - // 10000 + 2000 + 1000 + 5000 = 18000 (input + cacheRead + cacheCreation + output) - expect(result).toBe(18000); + // 10000 + 5000 + 1000 = 16000 (input + output + cacheCreation, excludes cacheRead) + expect(result).toBe(16000); }); }); @@ -266,18 +268,19 @@ describe('calculateContextTokens', () => { expect(result).toBe(10000); }); - it('should include cacheRead in context calculation (per Anthropic docs)', () => { - // Per Anthropic documentation, total_context = input + cacheRead + cacheCreation - // All three components occupy context window space. + it('should include cacheRead in raw calculation (callers detect accumulated values)', () => { + // calculateContextTokens returns the raw total including cacheRead. + // Callers (estimateContextUsage) detect when total > contextWindow + // and return null to signal accumulated values from multi-tool turns. const stats = createStats({ inputTokens: 50000, outputTokens: 9000, - cacheReadInputTokens: 100000, // INCLUDED - represents cached context - cacheCreationInputTokens: 25000, + cacheReadInputTokens: 758000, + cacheCreationInputTokens: 75000, }); const result = calculateContextTokens(stats, 'claude-code'); - // 50000 + 100000 + 25000 = 175000 - expect(result).toBe(175000); + // 50000 + 758000 + 75000 = 883000 (raw total, callers check against window) + expect(result).toBe(883000); }); }); }); @@ -285,9 +288,10 @@ describe('calculateContextTokens', () => { describe('DEFAULT_CONTEXT_WINDOWS', () => { it('should have context windows defined for all known agent types', () => { expect(DEFAULT_CONTEXT_WINDOWS['claude-code']).toBe(200000); + expect(DEFAULT_CONTEXT_WINDOWS['claude']).toBe(200000); expect(DEFAULT_CONTEXT_WINDOWS['codex']).toBe(200000); expect(DEFAULT_CONTEXT_WINDOWS['opencode']).toBe(128000); - expect(DEFAULT_CONTEXT_WINDOWS['factory-droid']).toBe(200000); + expect(DEFAULT_CONTEXT_WINDOWS['aider']).toBe(128000); expect(DEFAULT_CONTEXT_WINDOWS['terminal']).toBe(0); }); }); diff --git a/src/__tests__/shared/contextUsage.test.ts b/src/__tests__/shared/contextUsage.test.ts deleted file mode 100644 index 6c8020dc..00000000 --- a/src/__tests__/shared/contextUsage.test.ts +++ /dev/null @@ -1,313 +0,0 @@ -/** - * Tests for the Context Usage Estimation Utilities. - * - * These tests verify: - * - DEFAULT_CONTEXT_WINDOWS constant values - * - COMBINED_CONTEXT_AGENTS membership - * - calculateContextTokens() with various agent types and token fields - * - estimateContextUsage() percentage calculation, fallback logic, and capping - */ - -import { describe, it, expect } from 'vitest'; -import { - DEFAULT_CONTEXT_WINDOWS, - COMBINED_CONTEXT_AGENTS, - calculateContextTokens, - estimateContextUsage, - type ContextUsageStats, -} from '../../shared/contextUsage'; - -describe('DEFAULT_CONTEXT_WINDOWS', () => { - it('should have the correct context window for claude-code', () => { - expect(DEFAULT_CONTEXT_WINDOWS['claude-code']).toBe(200000); - }); - - it('should have the correct context window for codex', () => { - expect(DEFAULT_CONTEXT_WINDOWS['codex']).toBe(200000); - }); - - it('should have the correct context window for opencode', () => { - expect(DEFAULT_CONTEXT_WINDOWS['opencode']).toBe(128000); - }); - - it('should have the correct context window for factory-droid', () => { - expect(DEFAULT_CONTEXT_WINDOWS['factory-droid']).toBe(200000); - }); - - it('should have zero context window for terminal', () => { - expect(DEFAULT_CONTEXT_WINDOWS['terminal']).toBe(0); - }); - - it('should have entries for all expected agent types', () => { - const expectedKeys = ['claude-code', 'codex', 'opencode', 'factory-droid', 'terminal']; - expect(Object.keys(DEFAULT_CONTEXT_WINDOWS).sort()).toEqual(expectedKeys.sort()); - }); -}); - -describe('COMBINED_CONTEXT_AGENTS', () => { - it('should contain codex', () => { - expect(COMBINED_CONTEXT_AGENTS.has('codex')).toBe(true); - }); - - it('should not contain claude-code', () => { - expect(COMBINED_CONTEXT_AGENTS.has('claude-code')).toBe(false); - }); - - it('should not contain opencode', () => { - expect(COMBINED_CONTEXT_AGENTS.has('opencode')).toBe(false); - }); - - it('should not contain factory-droid', () => { - expect(COMBINED_CONTEXT_AGENTS.has('factory-droid')).toBe(false); - }); - - it('should not contain terminal', () => { - expect(COMBINED_CONTEXT_AGENTS.has('terminal')).toBe(false); - }); - - it('should have exactly one member', () => { - expect(COMBINED_CONTEXT_AGENTS.size).toBe(1); - }); -}); - -describe('calculateContextTokens', () => { - it('should calculate Claude-style tokens: input + cacheRead + cacheCreation (no output)', () => { - const stats: ContextUsageStats = { - inputTokens: 1000, - cacheReadInputTokens: 5000, - cacheCreationInputTokens: 2000, - outputTokens: 3000, - }; - const result = calculateContextTokens(stats, 'claude-code'); - expect(result).toBe(8000); // 1000 + 5000 + 2000, output excluded - }); - - it('should calculate Codex tokens: input + cacheRead + cacheCreation + output (combined)', () => { - const stats: ContextUsageStats = { - inputTokens: 1000, - cacheReadInputTokens: 5000, - cacheCreationInputTokens: 2000, - outputTokens: 3000, - }; - const result = calculateContextTokens(stats, 'codex'); - expect(result).toBe(11000); // 1000 + 5000 + 2000 + 3000 - }); - - it('should default missing token fields to 0', () => { - const stats: ContextUsageStats = { - inputTokens: 500, - }; - const result = calculateContextTokens(stats, 'claude-code'); - expect(result).toBe(500); // 500 + 0 + 0 - }); - - it('should handle all undefined token fields', () => { - const stats: ContextUsageStats = {}; - const result = calculateContextTokens(stats, 'claude-code'); - expect(result).toBe(0); - }); - - it('should use base formula for terminal agent', () => { - const stats: ContextUsageStats = { - inputTokens: 100, - cacheReadInputTokens: 200, - cacheCreationInputTokens: 300, - outputTokens: 400, - }; - const result = calculateContextTokens(stats, 'terminal'); - expect(result).toBe(600); // 100 + 200 + 300, no output - }); - - it('should use base formula when no agentId is provided', () => { - const stats: ContextUsageStats = { - inputTokens: 100, - cacheReadInputTokens: 200, - cacheCreationInputTokens: 300, - outputTokens: 400, - }; - const result = calculateContextTokens(stats); - expect(result).toBe(600); // 100 + 200 + 300, no output - }); - - it('should return 0 when all tokens are zero', () => { - const stats: ContextUsageStats = { - inputTokens: 0, - cacheReadInputTokens: 0, - cacheCreationInputTokens: 0, - outputTokens: 0, - }; - const result = calculateContextTokens(stats, 'claude-code'); - expect(result).toBe(0); - }); - - it('should use base formula for opencode agent', () => { - const stats: ContextUsageStats = { - inputTokens: 1000, - cacheReadInputTokens: 2000, - cacheCreationInputTokens: 500, - outputTokens: 1500, - }; - const result = calculateContextTokens(stats, 'opencode'); - expect(result).toBe(3500); // 1000 + 2000 + 500, output excluded - }); - - it('should use base formula for factory-droid agent', () => { - const stats: ContextUsageStats = { - inputTokens: 1000, - outputTokens: 2000, - }; - const result = calculateContextTokens(stats, 'factory-droid'); - expect(result).toBe(1000); // only input, no cacheRead or cacheCreation - }); - - it('should default outputTokens to 0 for codex when undefined', () => { - const stats: ContextUsageStats = { - inputTokens: 1000, - }; - const result = calculateContextTokens(stats, 'codex'); - expect(result).toBe(1000); // 1000 + 0 + 0 + 0 - }); -}); - -describe('estimateContextUsage', () => { - it('should use contextWindow from stats when provided', () => { - const stats: ContextUsageStats = { - inputTokens: 5000, - cacheReadInputTokens: 0, - cacheCreationInputTokens: 0, - contextWindow: 10000, - }; - const result = estimateContextUsage(stats, 'claude-code'); - expect(result).toBe(50); // 5000 / 10000 * 100 = 50% - }); - - it('should fall back to DEFAULT_CONTEXT_WINDOWS when no contextWindow in stats', () => { - const stats: ContextUsageStats = { - inputTokens: 100000, - cacheReadInputTokens: 0, - cacheCreationInputTokens: 0, - }; - const result = estimateContextUsage(stats, 'claude-code'); - // 100000 / 200000 * 100 = 50% - expect(result).toBe(50); - }); - - it('should return null for terminal agent', () => { - const stats: ContextUsageStats = { - inputTokens: 100, - }; - const result = estimateContextUsage(stats, 'terminal'); - expect(result).toBeNull(); - }); - - it('should return null when no agentId and no contextWindow', () => { - const stats: ContextUsageStats = { - inputTokens: 100, - }; - const result = estimateContextUsage(stats); - expect(result).toBeNull(); - }); - - it('should return 0 when all tokens are 0', () => { - const stats: ContextUsageStats = { - inputTokens: 0, - cacheReadInputTokens: 0, - cacheCreationInputTokens: 0, - outputTokens: 0, - }; - const result = estimateContextUsage(stats, 'claude-code'); - expect(result).toBe(0); - }); - - it('should cap at 100% when tokens exceed context window', () => { - const stats: ContextUsageStats = { - inputTokens: 300000, - cacheReadInputTokens: 0, - cacheCreationInputTokens: 0, - }; - const result = estimateContextUsage(stats, 'claude-code'); - // 300000 / 200000 * 100 = 150%, capped at 100 - expect(result).toBe(100); - }); - - it('should cap at 100% when using stats contextWindow', () => { - const stats: ContextUsageStats = { - inputTokens: 15000, - contextWindow: 10000, - }; - const result = estimateContextUsage(stats, 'claude-code'); - expect(result).toBe(100); - }); - - it('should calculate ~50% usage for claude-code agent', () => { - const stats: ContextUsageStats = { - inputTokens: 50000, - cacheReadInputTokens: 30000, - cacheCreationInputTokens: 20000, - }; - const result = estimateContextUsage(stats, 'claude-code'); - // (50000 + 30000 + 20000) / 200000 * 100 = 50% - expect(result).toBe(50); - }); - - it('should include output tokens in calculation for codex agent', () => { - const stats: ContextUsageStats = { - inputTokens: 50000, - cacheReadInputTokens: 0, - cacheCreationInputTokens: 0, - outputTokens: 50000, - }; - const result = estimateContextUsage(stats, 'codex'); - // (50000 + 0 + 0 + 50000) / 200000 * 100 = 50% - expect(result).toBe(50); - }); - - it('should use contextWindow from stats even without agentId', () => { - const stats: ContextUsageStats = { - inputTokens: 5000, - contextWindow: 10000, - }; - const result = estimateContextUsage(stats); - // 5000 / 10000 * 100 = 50% - expect(result).toBe(50); - }); - - it('should round the percentage to nearest integer', () => { - const stats: ContextUsageStats = { - inputTokens: 33333, - contextWindow: 100000, - }; - const result = estimateContextUsage(stats, 'claude-code'); - // 33333 / 100000 * 100 = 33.333 => rounded to 33 - expect(result).toBe(33); - }); - - it('should use opencode default context window of 128000', () => { - const stats: ContextUsageStats = { - inputTokens: 64000, - }; - const result = estimateContextUsage(stats, 'opencode'); - // 64000 / 128000 * 100 = 50% - expect(result).toBe(50); - }); - - it('should return null for unknown agent without contextWindow', () => { - const stats: ContextUsageStats = { - inputTokens: 100, - }; - // Cast to bypass type checking for an unknown agent - const result = estimateContextUsage(stats, 'unknown-agent' as any); - expect(result).toBeNull(); - }); - - it('should handle contextWindow of 0 by falling back to defaults', () => { - const stats: ContextUsageStats = { - inputTokens: 100000, - contextWindow: 0, - }; - const result = estimateContextUsage(stats, 'claude-code'); - // contextWindow is 0 (falsy), falls back to default 200000 - // 100000 / 200000 * 100 = 50% - expect(result).toBe(50); - }); -}); diff --git a/src/main/agent-detector.ts b/src/main/agent-detector.ts deleted file mode 100644 index 1d2d2fe2..00000000 --- a/src/main/agent-detector.ts +++ /dev/null @@ -1,859 +0,0 @@ -import { execFileNoThrow } from './utils/execFile'; -import { logger } from './utils/logger'; -import * as os from 'os'; -import * as fs from 'fs'; -import * as path from 'path'; -import { AgentCapabilities, getAgentCapabilities } from './agent-capabilities'; -import { expandTilde, detectNodeVersionManagerBinPaths, buildExpandedEnv } from '../shared/pathUtils'; - -// Re-export AgentCapabilities for convenience -export { AgentCapabilities } from './agent-capabilities'; - -// Configuration option types for agent-specific settings -export interface AgentConfigOption { - key: string; // Storage key - type: 'checkbox' | 'text' | 'number' | 'select'; - label: string; // UI label - description: string; // Help text - default: any; // Default value - options?: string[]; // For select type - argBuilder?: (value: any) => string[]; // Converts config value to CLI args -} - -export interface AgentConfig { - id: string; - name: string; - binaryName: string; - command: string; - args: string[]; // Base args always included (excludes batch mode prefix) - available: boolean; - path?: string; - customPath?: string; // User-specified custom path (shown in UI even if not available) - requiresPty?: boolean; // Whether this agent needs a pseudo-terminal - configOptions?: AgentConfigOption[]; // Agent-specific configuration - hidden?: boolean; // If true, agent is hidden from UI (internal use only) - capabilities: AgentCapabilities; // Agent feature capabilities - - // Argument builders for dynamic CLI construction - // These are optional - agents that don't have them use hardcoded behavior - batchModePrefix?: string[]; // Args added before base args for batch mode (e.g., ['run'] for OpenCode) - batchModeArgs?: string[]; // Args only applied in batch mode (e.g., ['--skip-git-repo-check'] for Codex exec) - jsonOutputArgs?: string[]; // Args for JSON output format (e.g., ['--format', 'json']) - resumeArgs?: (sessionId: string) => string[]; // Function to build resume args - readOnlyArgs?: string[]; // Args for read-only/plan mode (e.g., ['--agent', 'plan']) - modelArgs?: (modelId: string) => string[]; // Function to build model selection args (e.g., ['--model', modelId]) - yoloModeArgs?: string[]; // Args for YOLO/full-access mode (e.g., ['--dangerously-bypass-approvals-and-sandbox']) - workingDirArgs?: (dir: string) => string[]; // Function to build working directory args (e.g., ['-C', dir]) - imageArgs?: (imagePath: string) => string[]; // Function to build image attachment args (e.g., ['-i', imagePath] for Codex) - promptArgs?: (prompt: string) => string[]; // Function to build prompt args (e.g., ['-p', prompt] for OpenCode) - noPromptSeparator?: boolean; // If true, don't add '--' before the prompt in batch mode (OpenCode doesn't support it) - defaultEnvVars?: Record; // Default environment variables for this agent (merged with user customEnvVars) -} - -export const AGENT_DEFINITIONS: Omit[] = [ - { - id: 'terminal', - name: 'Terminal', - // Use platform-appropriate default shell - binaryName: process.platform === 'win32' ? 'powershell.exe' : 'bash', - command: process.platform === 'win32' ? 'powershell.exe' : 'bash', - args: [], - requiresPty: true, - hidden: true, // Internal agent, not shown in UI - }, - { - id: 'claude-code', - name: 'Claude Code', - binaryName: 'claude', - command: 'claude', - // YOLO mode (--dangerously-skip-permissions) is always enabled - Maestro requires it - args: [ - '--print', - '--verbose', - '--output-format', - 'stream-json', - '--dangerously-skip-permissions', - ], - resumeArgs: (sessionId: string) => ['--resume', sessionId], // Resume with session ID - readOnlyArgs: ['--permission-mode', 'plan'], // Read-only/plan mode - }, - { - id: 'codex', - name: 'Codex', - binaryName: 'codex', - command: 'codex', - // Base args for interactive mode (no flags that are exec-only) - args: [], - // Codex CLI argument builders - // Batch mode: codex exec --json --dangerously-bypass-approvals-and-sandbox --skip-git-repo-check [--sandbox read-only] [-C dir] [resume ] -- "prompt" - // Sandbox modes: - // - Default (YOLO): --dangerously-bypass-approvals-and-sandbox (full system access, required by Maestro) - // - Read-only: --sandbox read-only (can only read files, overrides YOLO) - batchModePrefix: ['exec'], // Codex uses 'exec' subcommand for batch mode - batchModeArgs: ['--dangerously-bypass-approvals-and-sandbox', '--skip-git-repo-check'], // Args only valid on 'exec' subcommand - jsonOutputArgs: ['--json'], // JSON output format (must come before resume subcommand) - resumeArgs: (sessionId: string) => ['resume', sessionId], // Resume with session/thread ID - readOnlyArgs: ['--sandbox', 'read-only'], // Read-only/plan mode - yoloModeArgs: ['--dangerously-bypass-approvals-and-sandbox'], // Full access mode - workingDirArgs: (dir: string) => ['-C', dir], // Set working directory - imageArgs: (imagePath: string) => ['-i', imagePath], // Image attachment: codex exec -i /path/to/image.png - // Agent-specific configuration options shown in UI - configOptions: [ - { - key: 'contextWindow', - type: 'number', - label: 'Context Window Size', - description: - 'Maximum context window size in tokens. Required for context usage display. Common values: 400000 (GPT-5.2), 128000 (GPT-4o).', - default: 400000, // Default for GPT-5.2 models - }, - ], - }, - { - id: 'gemini-cli', - name: 'Gemini CLI', - binaryName: 'gemini', - command: 'gemini', - args: [], - }, - { - id: 'qwen3-coder', - name: 'Qwen3 Coder', - binaryName: 'qwen3-coder', - command: 'qwen3-coder', - args: [], - }, - { - id: 'opencode', - name: 'OpenCode', - binaryName: 'opencode', - command: 'opencode', - args: [], // Base args (none for OpenCode - batch mode uses 'run' subcommand) - // OpenCode CLI argument builders - // Batch mode: opencode run --format json [--model provider/model] [--session ] [--agent plan] "prompt" - // YOLO mode (auto-approve all permissions) is enabled via OPENCODE_CONFIG_CONTENT env var. - // This prevents OpenCode from prompting for permission on external_directory access, which would hang in batch mode. - batchModePrefix: ['run'], // OpenCode uses 'run' subcommand for batch mode - jsonOutputArgs: ['--format', 'json'], // JSON output format - resumeArgs: (sessionId: string) => ['--session', sessionId], // Resume with session ID - readOnlyArgs: ['--agent', 'plan'], // Read-only/plan mode - modelArgs: (modelId: string) => ['--model', modelId], // Model selection (e.g., 'ollama/qwen3:8b') - imageArgs: (imagePath: string) => ['-f', imagePath], // Image/file attachment: opencode run -f /path/to/image.png -- "prompt" - noPromptSeparator: true, // OpenCode doesn't need '--' before prompt - yargs handles positional args - // Default env vars: enable YOLO mode (allow all permissions including external_directory) - // Users can override by setting customEnvVars in agent config - defaultEnvVars: { - OPENCODE_CONFIG_CONTENT: '{"permission":{"*":"allow","external_directory":"allow"}}', - }, - // Agent-specific configuration options shown in UI - configOptions: [ - { - key: 'model', - type: 'text', - label: 'Model', - description: - 'Model to use (e.g., "ollama/qwen3:8b", "anthropic/claude-sonnet-4-20250514"). Leave empty for default.', - default: '', // Empty string means use OpenCode's default model - argBuilder: (value: string) => { - // Only add --model arg if a model is specified - if (value && value.trim()) { - return ['--model', value.trim()]; - } - return []; - }, - }, - { - key: 'contextWindow', - type: 'number', - label: 'Context Window Size', - description: - 'Maximum context window size in tokens. Required for context usage display. Varies by model (e.g., 400000 for Claude/GPT-5.2, 128000 for GPT-4o).', - default: 128000, // Default for common models (GPT-4, etc.) - }, - ], - }, - { - id: 'factory-droid', - name: 'Factory Droid', - binaryName: 'droid', - command: 'droid', - args: [], // Base args for interactive mode (none) - requiresPty: false, // Batch mode uses child process - - // Batch mode: droid exec [options] "prompt" - batchModePrefix: ['exec'], - // Always skip permissions in batch mode (like Claude Code's --dangerously-skip-permissions) - // Maestro requires full access to work properly - batchModeArgs: ['--skip-permissions-unsafe'], - - // JSON output for parsing - jsonOutputArgs: ['-o', 'stream-json'], - - // Session resume: -s (requires a prompt) - resumeArgs: (sessionId: string) => ['-s', sessionId], - - // Read-only mode is DEFAULT in droid exec (no flag needed) - readOnlyArgs: [], - - // YOLO mode (same as batchModeArgs, kept for explicit yoloMode requests) - yoloModeArgs: ['--skip-permissions-unsafe'], - - // Model selection is handled by configOptions.model.argBuilder below - // Don't define modelArgs here to avoid duplicate -m flags - - // Working directory - workingDirArgs: (dir: string) => ['--cwd', dir], - - // File/image input - imageArgs: (imagePath: string) => ['-f', imagePath], - - // Prompt is positional argument (no separator needed) - noPromptSeparator: true, - - // Default env vars - don't set NO_COLOR as it conflicts with FORCE_COLOR - defaultEnvVars: {}, - - // UI config options - // Model IDs from droid CLI (exact IDs required) - // NOTE: autonomyLevel is NOT configurable - Maestro always uses --skip-permissions-unsafe - // which conflicts with --auto. This matches Claude Code's behavior. - configOptions: [ - { - key: 'model', - type: 'select', - label: 'Model', - description: 'Model to use for Factory Droid', - // Model IDs from `droid exec --help` (2026-01-22) - options: [ - '', // Empty = use droid's default (claude-opus-4-5-20251101) - // OpenAI models - 'gpt-5.1', - 'gpt-5.1-codex', - 'gpt-5.1-codex-max', - 'gpt-5.2', - // Claude models - 'claude-sonnet-4-5-20250929', - 'claude-opus-4-5-20251101', - 'claude-haiku-4-5-20251001', - // Google models - 'gemini-3-pro-preview', - ], - default: '', // Empty = use droid's default (claude-opus-4-5-20251101) - argBuilder: (value: string) => (value && value.trim() ? ['-m', value.trim()] : []), - }, - { - key: 'reasoningEffort', - type: 'select', - label: 'Reasoning Effort', - description: 'How much the model should reason before responding', - options: ['', 'low', 'medium', 'high'], - default: '', // Empty = use droid's default reasoning - argBuilder: (value: string) => (value && value.trim() ? ['-r', value.trim()] : []), - }, - { - key: 'contextWindow', - type: 'number', - label: 'Context Window Size', - description: 'Maximum context window in tokens (for UI display)', - default: 200000, - }, - ], - }, -]; - -export class AgentDetector { - private cachedAgents: AgentConfig[] | null = null; - private detectionInProgress: Promise | null = null; - private customPaths: Record = {}; - // Cache for model discovery results: agentId -> { models, timestamp } - private modelCache: Map = new Map(); - // Cache TTL: 5 minutes (model lists don't change frequently) - private readonly MODEL_CACHE_TTL_MS = 5 * 60 * 1000; - - /** - * Set custom paths for agents (from user configuration) - */ - setCustomPaths(paths: Record): void { - this.customPaths = paths; - // Clear cache when custom paths change - this.cachedAgents = null; - } - - /** - * Get the current custom paths - */ - getCustomPaths(): Record { - return { ...this.customPaths }; - } - - /** - * Detect which agents are available on the system - * Uses promise deduplication to prevent parallel detection when multiple calls arrive simultaneously - */ - async detectAgents(): Promise { - if (this.cachedAgents) { - return this.cachedAgents; - } - - // If detection is already in progress, return the same promise to avoid parallel runs - if (this.detectionInProgress) { - return this.detectionInProgress; - } - - // Start detection and track the promise - this.detectionInProgress = this.doDetectAgents(); - try { - return await this.detectionInProgress; - } finally { - this.detectionInProgress = null; - } - } - - /** - * Internal method that performs the actual agent detection - */ - private async doDetectAgents(): Promise { - const agents: AgentConfig[] = []; - const expandedEnv = this.getExpandedEnv(); - - logger.info(`Agent detection starting. PATH: ${expandedEnv.PATH}`, 'AgentDetector'); - - for (const agentDef of AGENT_DEFINITIONS) { - const customPath = this.customPaths[agentDef.id]; - let detection: { exists: boolean; path?: string }; - - // If user has specified a custom path, check that first - if (customPath) { - detection = await this.checkCustomPath(customPath); - if (detection.exists) { - logger.info( - `Agent "${agentDef.name}" found at custom path: ${detection.path}`, - 'AgentDetector' - ); - } else { - logger.warn( - `Agent "${agentDef.name}" custom path not valid: ${customPath}`, - 'AgentDetector' - ); - // Fall back to PATH detection - detection = await this.checkBinaryExists(agentDef.binaryName); - if (detection.exists) { - logger.info( - `Agent "${agentDef.name}" found in PATH at: ${detection.path}`, - 'AgentDetector' - ); - } - } - } else { - detection = await this.checkBinaryExists(agentDef.binaryName); - - if (detection.exists) { - logger.info(`Agent "${agentDef.name}" found at: ${detection.path}`, 'AgentDetector'); - } else if (agentDef.binaryName !== 'bash') { - // Don't log bash as missing since it's always present, log others as warnings - logger.warn( - `Agent "${agentDef.name}" (binary: ${agentDef.binaryName}) not found. ` + - `Searched in PATH: ${expandedEnv.PATH}`, - 'AgentDetector' - ); - } - } - - agents.push({ - ...agentDef, - available: detection.exists, - path: detection.path, - customPath: customPath || undefined, - capabilities: getAgentCapabilities(agentDef.id), - }); - } - - const availableAgents = agents.filter((a) => a.available); - const isWindows = process.platform === 'win32'; - - // On Windows, log detailed path info to help debug shell execution issues - if (isWindows) { - logger.info(`Agent detection complete (Windows)`, 'AgentDetector', { - platform: process.platform, - agents: availableAgents.map((a) => ({ - id: a.id, - name: a.name, - path: a.path, - pathExtension: a.path ? path.extname(a.path) : 'none', - // .exe = direct execution, .cmd = requires shell - willUseShell: a.path - ? a.path.toLowerCase().endsWith('.cmd') || - a.path.toLowerCase().endsWith('.bat') || - !path.extname(a.path) - : true, - })), - }); - } else { - logger.info( - `Agent detection complete. Available: ${availableAgents.map((a) => a.name).join(', ') || 'none'}`, - 'AgentDetector' - ); - } - - this.cachedAgents = agents; - return agents; - } - - /** - * Check if a custom path points to a valid executable - * On Windows, also tries .cmd and .exe extensions if the path doesn't exist as-is - */ - private async checkCustomPath(customPath: string): Promise<{ exists: boolean; path?: string }> { - const isWindows = process.platform === 'win32'; - - // Expand tilde to home directory (Node.js fs doesn't understand ~) - const expandedPath = expandTilde(customPath); - - // Helper to check if a specific path exists and is a file - const checkPath = async (pathToCheck: string): Promise => { - try { - const stats = await fs.promises.stat(pathToCheck); - return stats.isFile(); - } catch { - return false; - } - }; - - try { - // First, try the exact path provided (with tilde expanded) - if (await checkPath(expandedPath)) { - // Check if file is executable (on Unix systems) - if (!isWindows) { - try { - await fs.promises.access(expandedPath, fs.constants.X_OK); - } catch { - logger.warn(`Custom path exists but is not executable: ${customPath}`, 'AgentDetector'); - return { exists: false }; - } - } - // Return the expanded path so it can be used directly - return { exists: true, path: expandedPath }; - } - - // On Windows, if the exact path doesn't exist, try with .cmd and .exe extensions - if (isWindows) { - const lowerPath = expandedPath.toLowerCase(); - // Only try extensions if the path doesn't already have one - if (!lowerPath.endsWith('.cmd') && !lowerPath.endsWith('.exe')) { - // Try .exe first (preferred), then .cmd - const exePath = expandedPath + '.exe'; - if (await checkPath(exePath)) { - logger.debug(`Custom path resolved with .exe extension`, 'AgentDetector', { - original: customPath, - resolved: exePath, - }); - return { exists: true, path: exePath }; - } - - const cmdPath = expandedPath + '.cmd'; - if (await checkPath(cmdPath)) { - logger.debug(`Custom path resolved with .cmd extension`, 'AgentDetector', { - original: customPath, - resolved: cmdPath, - }); - return { exists: true, path: cmdPath }; - } - } - } - - return { exists: false }; - } catch { - return { exists: false }; - } - } - - /** - * Build an expanded PATH that includes common binary installation locations. - * This is necessary because packaged Electron apps don't inherit shell environment. - */ - private getExpandedEnv(): NodeJS.ProcessEnv { - return buildExpandedEnv(); - } - - /** - * On Windows, directly probe known installation paths for a binary. - * This is more reliable than `where` command which may fail in packaged Electron apps. - * Returns the first existing path found, preferring .exe over .cmd. - */ - private async probeWindowsPaths(binaryName: string): Promise { - const home = os.homedir(); - const appData = process.env.APPDATA || path.join(home, 'AppData', 'Roaming'); - const localAppData = process.env.LOCALAPPDATA || path.join(home, 'AppData', 'Local'); - const programFiles = process.env.ProgramFiles || 'C:\\Program Files'; - - // Define known installation paths for each binary, in priority order - // Prefer .exe (standalone installers) over .cmd (npm wrappers) - const knownPaths: Record = { - claude: [ - // PowerShell installer (primary method) - installs claude.exe - path.join(home, '.local', 'bin', 'claude.exe'), - // Winget installation - path.join(localAppData, 'Microsoft', 'WinGet', 'Links', 'claude.exe'), - path.join(programFiles, 'WinGet', 'Links', 'claude.exe'), - // npm global installation - creates .cmd wrapper - path.join(appData, 'npm', 'claude.cmd'), - path.join(localAppData, 'npm', 'claude.cmd'), - // WindowsApps (Microsoft Store style) - path.join(localAppData, 'Microsoft', 'WindowsApps', 'claude.exe'), - ], - codex: [ - // npm global installation (primary method for Codex) - path.join(appData, 'npm', 'codex.cmd'), - path.join(localAppData, 'npm', 'codex.cmd'), - // Possible standalone in future - path.join(home, '.local', 'bin', 'codex.exe'), - ], - opencode: [ - // Scoop installation (recommended for OpenCode) - path.join(home, 'scoop', 'shims', 'opencode.exe'), - path.join(home, 'scoop', 'apps', 'opencode', 'current', 'opencode.exe'), - // Chocolatey installation - path.join( - process.env.ChocolateyInstall || 'C:\\ProgramData\\chocolatey', - 'bin', - 'opencode.exe' - ), - // Go install - path.join(home, 'go', 'bin', 'opencode.exe'), - // npm (has known issues on Windows, but check anyway) - path.join(appData, 'npm', 'opencode.cmd'), - ], - gemini: [ - // npm global installation - path.join(appData, 'npm', 'gemini.cmd'), - path.join(localAppData, 'npm', 'gemini.cmd'), - ], - droid: [ - // Factory Droid installation paths - path.join(home, '.factory', 'bin', 'droid.exe'), - path.join(localAppData, 'Factory', 'droid.exe'), - path.join(appData, 'Factory', 'droid.exe'), - path.join(home, '.local', 'bin', 'droid.exe'), - // npm global installation - path.join(appData, 'npm', 'droid.cmd'), - path.join(localAppData, 'npm', 'droid.cmd'), - ], - }; - - const pathsToCheck = knownPaths[binaryName] || []; - - for (const probePath of pathsToCheck) { - try { - await fs.promises.access(probePath, fs.constants.F_OK); - logger.debug(`Direct probe found ${binaryName}`, 'AgentDetector', { path: probePath }); - return probePath; - } catch { - // Path doesn't exist, continue to next - } - } - - return null; - } - - /** - * On macOS/Linux, directly probe known installation paths for a binary. - * This is necessary because packaged Electron apps don't inherit shell aliases, - * and 'which' may fail to find binaries in non-standard locations. - * Returns the first existing executable path found. - */ - private async probeUnixPaths(binaryName: string): Promise { - const home = os.homedir(); - - // Get dynamic paths from Node version managers (nvm, fnm, volta, etc.) - const versionManagerPaths = detectNodeVersionManagerBinPaths(); - - // Define known installation paths for each binary, in priority order - const knownPaths: Record = { - claude: [ - // Claude Code default installation location (irm https://claude.ai/install.ps1 equivalent on macOS) - path.join(home, '.claude', 'local', 'claude'), - // User local bin (pip, manual installs) - path.join(home, '.local', 'bin', 'claude'), - // Homebrew on Apple Silicon - '/opt/homebrew/bin/claude', - // Homebrew on Intel Mac - '/usr/local/bin/claude', - // npm global with custom prefix - path.join(home, '.npm-global', 'bin', 'claude'), - // User bin directory - path.join(home, 'bin', 'claude'), - // Add paths from Node version managers (nvm, fnm, volta, etc.) - ...versionManagerPaths.map((p) => path.join(p, 'claude')), - ], - codex: [ - // User local bin - path.join(home, '.local', 'bin', 'codex'), - // Homebrew paths - '/opt/homebrew/bin/codex', - '/usr/local/bin/codex', - // npm global - path.join(home, '.npm-global', 'bin', 'codex'), - // Add paths from Node version managers (nvm, fnm, volta, etc.) - ...versionManagerPaths.map((p) => path.join(p, 'codex')), - ], - opencode: [ - // OpenCode installer default location - path.join(home, '.opencode', 'bin', 'opencode'), - // Go install location - path.join(home, 'go', 'bin', 'opencode'), - // User local bin - path.join(home, '.local', 'bin', 'opencode'), - // Homebrew paths - '/opt/homebrew/bin/opencode', - '/usr/local/bin/opencode', - // Add paths from Node version managers (nvm, fnm, volta, etc.) - ...versionManagerPaths.map((p) => path.join(p, 'opencode')), - ], - gemini: [ - // npm global paths - path.join(home, '.npm-global', 'bin', 'gemini'), - '/opt/homebrew/bin/gemini', - '/usr/local/bin/gemini', - // Add paths from Node version managers (nvm, fnm, volta, etc.) - ...versionManagerPaths.map((p) => path.join(p, 'gemini')), - ], - droid: [ - // Factory Droid installation paths - path.join(home, '.factory', 'bin', 'droid'), - path.join(home, '.local', 'bin', 'droid'), - '/opt/homebrew/bin/droid', - '/usr/local/bin/droid', - // Add paths from Node version managers (in case installed via npm) - ...versionManagerPaths.map((p) => path.join(p, 'droid')), - ], - }; - - const pathsToCheck = knownPaths[binaryName] || []; - - for (const probePath of pathsToCheck) { - try { - // Check both existence and executability - await fs.promises.access(probePath, fs.constants.F_OK | fs.constants.X_OK); - logger.debug(`Direct probe found ${binaryName}`, 'AgentDetector', { path: probePath }); - return probePath; - } catch { - // Path doesn't exist or isn't executable, continue to next - } - } - - return null; - } - - /** - * Check if a binary exists in PATH - * On Windows, this also handles .cmd and .exe extensions properly - */ - private async checkBinaryExists(binaryName: string): Promise<{ exists: boolean; path?: string }> { - const isWindows = process.platform === 'win32'; - - // First try direct file probing of known installation paths - // This is more reliable than which/where in packaged Electron apps - if (isWindows) { - const probedPath = await this.probeWindowsPaths(binaryName); - if (probedPath) { - return { exists: true, path: probedPath }; - } - logger.debug(`Direct probe failed for ${binaryName}, falling back to where`, 'AgentDetector'); - } else { - // macOS/Linux: probe known paths first - const probedPath = await this.probeUnixPaths(binaryName); - if (probedPath) { - return { exists: true, path: probedPath }; - } - logger.debug(`Direct probe failed for ${binaryName}, falling back to which`, 'AgentDetector'); - } - - try { - // Use 'which' on Unix-like systems, 'where' on Windows - const command = isWindows ? 'where' : 'which'; - - // Use expanded PATH to find binaries in common installation locations - // This is critical for packaged Electron apps which don't inherit shell env - const env = this.getExpandedEnv(); - const result = await execFileNoThrow(command, [binaryName], undefined, env); - - if (result.exitCode === 0 && result.stdout.trim()) { - // Get all matches (Windows 'where' can return multiple) - // Handle both Unix (\n) and Windows (\r\n) line endings - const matches = result.stdout - .trim() - .split(/\r?\n/) - .map((p) => p.trim()) - .filter((p) => p); - - if (process.platform === 'win32' && matches.length > 0) { - // On Windows, prefer .exe over .cmd over extensionless - // This helps with proper execution handling - const exeMatch = matches.find((p) => p.toLowerCase().endsWith('.exe')); - const cmdMatch = matches.find((p) => p.toLowerCase().endsWith('.cmd')); - - // Return the best match: .exe > .cmd > first result - let bestMatch = exeMatch || cmdMatch || matches[0]; - - // If the first match doesn't have an extension, check if .cmd or .exe version exists - // This handles cases where 'where' returns a path without extension - if ( - !bestMatch.toLowerCase().endsWith('.exe') && - !bestMatch.toLowerCase().endsWith('.cmd') - ) { - const cmdPath = bestMatch + '.cmd'; - const exePath = bestMatch + '.exe'; - - // Check if the .exe or .cmd version exists - try { - await fs.promises.access(exePath, fs.constants.F_OK); - bestMatch = exePath; - logger.debug(`Found .exe version of ${binaryName}`, 'AgentDetector', { - path: exePath, - }); - } catch { - try { - await fs.promises.access(cmdPath, fs.constants.F_OK); - bestMatch = cmdPath; - logger.debug(`Found .cmd version of ${binaryName}`, 'AgentDetector', { - path: cmdPath, - }); - } catch { - // Neither .exe nor .cmd exists, use the original path - } - } - } - - logger.debug(`Windows binary detection for ${binaryName}`, 'AgentDetector', { - allMatches: matches, - selectedMatch: bestMatch, - isCmd: bestMatch.toLowerCase().endsWith('.cmd'), - isExe: bestMatch.toLowerCase().endsWith('.exe'), - }); - - return { - exists: true, - path: bestMatch, - }; - } - - return { - exists: true, - path: matches[0], // First match for Unix - }; - } - - return { exists: false }; - } catch { - return { exists: false }; - } - } - - /** - * Get a specific agent by ID - */ - async getAgent(agentId: string): Promise { - const agents = await this.detectAgents(); - return agents.find((a) => a.id === agentId) || null; - } - - /** - * Clear the cache (useful if PATH changes) - */ - clearCache(): void { - this.cachedAgents = null; - } - - /** - * Clear the model cache for a specific agent or all agents - */ - clearModelCache(agentId?: string): void { - if (agentId) { - this.modelCache.delete(agentId); - } else { - this.modelCache.clear(); - } - } - - /** - * Discover available models for an agent that supports model selection. - * Returns cached results if available and not expired. - * - * @param agentId - The agent identifier (e.g., 'opencode') - * @param forceRefresh - If true, bypass cache and fetch fresh model list - * @returns Array of model names, or empty array if agent doesn't support model discovery - */ - async discoverModels(agentId: string, forceRefresh = false): Promise { - const agent = await this.getAgent(agentId); - - if (!agent || !agent.available) { - logger.warn(`Cannot discover models: agent ${agentId} not available`, 'AgentDetector'); - return []; - } - - // Check if agent supports model selection - if (!agent.capabilities.supportsModelSelection) { - logger.debug(`Agent ${agentId} does not support model selection`, 'AgentDetector'); - return []; - } - - // Check cache unless force refresh - if (!forceRefresh) { - const cached = this.modelCache.get(agentId); - if (cached && Date.now() - cached.timestamp < this.MODEL_CACHE_TTL_MS) { - logger.debug(`Returning cached models for ${agentId}`, 'AgentDetector'); - return cached.models; - } - } - - // Run agent-specific model discovery command - const models = await this.runModelDiscovery(agentId, agent); - - // Cache the results - this.modelCache.set(agentId, { models, timestamp: Date.now() }); - - return models; - } - - /** - * Run the agent-specific model discovery command. - * Each agent may have a different way to list available models. - */ - private async runModelDiscovery(agentId: string, agent: AgentConfig): Promise { - const env = this.getExpandedEnv(); - const command = agent.path || agent.command; - - // Agent-specific model discovery commands - switch (agentId) { - case 'opencode': { - // OpenCode: `opencode models` returns one model per line - const result = await execFileNoThrow(command, ['models'], undefined, env); - - if (result.exitCode !== 0) { - logger.warn( - `Model discovery failed for ${agentId}: exit code ${result.exitCode}`, - 'AgentDetector', - { stderr: result.stderr } - ); - return []; - } - - // Parse output: one model per line (e.g., "opencode/gpt-5-nano", "ollama/gpt-oss:latest") - const models = result.stdout - .split('\n') - .map((line) => line.trim()) - .filter((line) => line.length > 0); - - logger.info(`Discovered ${models.length} models for ${agentId}`, 'AgentDetector', { - models, - }); - return models; - } - - default: - // For agents without model discovery implemented, return empty array - logger.debug(`No model discovery implemented for ${agentId}`, 'AgentDetector'); - return []; - } - } -} diff --git a/src/main/agent-capabilities.ts b/src/main/agents/capabilities.ts similarity index 100% rename from src/main/agent-capabilities.ts rename to src/main/agents/capabilities.ts diff --git a/src/main/agents/definitions.ts b/src/main/agents/definitions.ts new file mode 100644 index 00000000..b0653551 --- /dev/null +++ b/src/main/agents/definitions.ts @@ -0,0 +1,346 @@ +/** + * Agent Definitions + * + * Contains the configuration definitions for all supported AI agents. + * This includes CLI arguments, configuration options, and default settings. + */ + +import type { AgentCapabilities } from './capabilities'; + +// ============ Configuration Types ============ + +/** + * Base configuration option fields shared by all types + */ +interface BaseConfigOption { + key: string; // Storage key + label: string; // UI label + description: string; // Help text +} + +/** + * Checkbox configuration option (boolean value) + */ +interface CheckboxConfigOption extends BaseConfigOption { + type: 'checkbox'; + default: boolean; + argBuilder?: (value: boolean) => string[]; +} + +/** + * Text configuration option (string value) + */ +interface TextConfigOption extends BaseConfigOption { + type: 'text'; + default: string; + argBuilder?: (value: string) => string[]; +} + +/** + * Number configuration option (numeric value) + */ +interface NumberConfigOption extends BaseConfigOption { + type: 'number'; + default: number; + argBuilder?: (value: number) => string[]; +} + +/** + * Select configuration option (string value from predefined options) + */ +interface SelectConfigOption extends BaseConfigOption { + type: 'select'; + default: string; + options: string[]; + argBuilder?: (value: string) => string[]; +} + +/** + * Configuration option types for agent-specific settings. + * Uses discriminated union for full type safety. + */ +export type AgentConfigOption = + | CheckboxConfigOption + | TextConfigOption + | NumberConfigOption + | SelectConfigOption; + +/** + * Full agent configuration including runtime detection state + */ +export interface AgentConfig { + id: string; + name: string; + binaryName: string; + command: string; + args: string[]; // Base args always included (excludes batch mode prefix) + available: boolean; + path?: string; + customPath?: string; // User-specified custom path (shown in UI even if not available) + requiresPty?: boolean; // Whether this agent needs a pseudo-terminal + configOptions?: AgentConfigOption[]; // Agent-specific configuration + hidden?: boolean; // If true, agent is hidden from UI (internal use only) + capabilities: AgentCapabilities; // Agent feature capabilities + + // Argument builders for dynamic CLI construction + // These are optional - agents that don't have them use hardcoded behavior + batchModePrefix?: string[]; // Args added before base args for batch mode (e.g., ['run'] for OpenCode) + batchModeArgs?: string[]; // Args only applied in batch mode (e.g., ['--skip-git-repo-check'] for Codex exec) + jsonOutputArgs?: string[]; // Args for JSON output format (e.g., ['--format', 'json']) + resumeArgs?: (sessionId: string) => string[]; // Function to build resume args + readOnlyArgs?: string[]; // Args for read-only/plan mode (e.g., ['--agent', 'plan']) + modelArgs?: (modelId: string) => string[]; // Function to build model selection args (e.g., ['--model', modelId]) + yoloModeArgs?: string[]; // Args for YOLO/full-access mode (e.g., ['--dangerously-bypass-approvals-and-sandbox']) + workingDirArgs?: (dir: string) => string[]; // Function to build working directory args (e.g., ['-C', dir]) + imageArgs?: (imagePath: string) => string[]; // Function to build image attachment args (e.g., ['-i', imagePath] for Codex) + promptArgs?: (prompt: string) => string[]; // Function to build prompt args (e.g., ['-p', prompt] for OpenCode) + noPromptSeparator?: boolean; // If true, don't add '--' before the prompt in batch mode (OpenCode doesn't support it) + defaultEnvVars?: Record; // Default environment variables for this agent (merged with user customEnvVars) +} + +/** + * Agent definition without runtime detection state (used for static definitions) + */ +export type AgentDefinition = Omit; + +// ============ Agent Definitions ============ + +/** + * Static definitions for all supported agents. + * These are the base configurations before runtime detection adds availability info. + */ +export const AGENT_DEFINITIONS: AgentDefinition[] = [ + { + id: 'terminal', + name: 'Terminal', + // Use platform-appropriate default shell + binaryName: process.platform === 'win32' ? 'powershell.exe' : 'bash', + command: process.platform === 'win32' ? 'powershell.exe' : 'bash', + args: [], + requiresPty: true, + hidden: true, // Internal agent, not shown in UI + }, + { + id: 'claude-code', + name: 'Claude Code', + binaryName: 'claude', + command: 'claude', + // YOLO mode (--dangerously-skip-permissions) is always enabled - Maestro requires it + args: [ + '--print', + '--verbose', + '--output-format', + 'stream-json', + '--dangerously-skip-permissions', + ], + resumeArgs: (sessionId: string) => ['--resume', sessionId], // Resume with session ID + readOnlyArgs: ['--permission-mode', 'plan'], // Read-only/plan mode + }, + { + id: 'codex', + name: 'Codex', + binaryName: 'codex', + command: 'codex', + // Base args for interactive mode (no flags that are exec-only) + args: [], + // Codex CLI argument builders + // Batch mode: codex exec --json --dangerously-bypass-approvals-and-sandbox --skip-git-repo-check [--sandbox read-only] [-C dir] [resume ] -- "prompt" + // Sandbox modes: + // - Default (YOLO): --dangerously-bypass-approvals-and-sandbox (full system access, required by Maestro) + // - Read-only: --sandbox read-only (can only read files, overrides YOLO) + batchModePrefix: ['exec'], // Codex uses 'exec' subcommand for batch mode + batchModeArgs: ['--dangerously-bypass-approvals-and-sandbox', '--skip-git-repo-check'], // Args only valid on 'exec' subcommand + jsonOutputArgs: ['--json'], // JSON output format (must come before resume subcommand) + resumeArgs: (sessionId: string) => ['resume', sessionId], // Resume with session/thread ID + readOnlyArgs: ['--sandbox', 'read-only'], // Read-only/plan mode + yoloModeArgs: ['--dangerously-bypass-approvals-and-sandbox'], // Full access mode + workingDirArgs: (dir: string) => ['-C', dir], // Set working directory + imageArgs: (imagePath: string) => ['-i', imagePath], // Image attachment: codex exec -i /path/to/image.png + // Agent-specific configuration options shown in UI + configOptions: [ + { + key: 'contextWindow', + type: 'number', + label: 'Context Window Size', + description: + 'Maximum context window size in tokens. Required for context usage display. Common values: 400000 (GPT-5.2), 128000 (GPT-4o).', + default: 400000, // Default for GPT-5.2 models + }, + ], + }, + { + id: 'gemini-cli', + name: 'Gemini CLI', + binaryName: 'gemini', + command: 'gemini', + args: [], + }, + { + id: 'qwen3-coder', + name: 'Qwen3 Coder', + binaryName: 'qwen3-coder', + command: 'qwen3-coder', + args: [], + }, + { + id: 'opencode', + name: 'OpenCode', + binaryName: 'opencode', + command: 'opencode', + args: [], // Base args (none for OpenCode - batch mode uses 'run' subcommand) + // OpenCode CLI argument builders + // Batch mode: opencode run --format json [--model provider/model] [--session ] [--agent plan] "prompt" + // YOLO mode (auto-approve all permissions) is enabled via OPENCODE_CONFIG_CONTENT env var. + // This prevents OpenCode from prompting for permission on external_directory access, which would hang in batch mode. + batchModePrefix: ['run'], // OpenCode uses 'run' subcommand for batch mode + jsonOutputArgs: ['--format', 'json'], // JSON output format + resumeArgs: (sessionId: string) => ['--session', sessionId], // Resume with session ID + readOnlyArgs: ['--agent', 'plan'], // Read-only/plan mode + modelArgs: (modelId: string) => ['--model', modelId], // Model selection (e.g., 'ollama/qwen3:8b') + imageArgs: (imagePath: string) => ['-f', imagePath], // Image/file attachment: opencode run -f /path/to/image.png -- "prompt" + noPromptSeparator: true, // OpenCode doesn't need '--' before prompt - yargs handles positional args + // Default env vars: enable YOLO mode (allow all permissions including external_directory) + // Users can override by setting customEnvVars in agent config + defaultEnvVars: { + OPENCODE_CONFIG_CONTENT: '{"permission":{"*":"allow","external_directory":"allow"}}', + }, + // Agent-specific configuration options shown in UI + configOptions: [ + { + key: 'model', + type: 'text', + label: 'Model', + description: + 'Model to use (e.g., "ollama/qwen3:8b", "anthropic/claude-sonnet-4-20250514"). Leave empty for default.', + default: '', // Empty string means use OpenCode's default model + argBuilder: (value: string) => { + // Only add --model arg if a model is specified + if (value && value.trim()) { + return ['--model', value.trim()]; + } + return []; + }, + }, + { + key: 'contextWindow', + type: 'number', + label: 'Context Window Size', + description: + 'Maximum context window size in tokens. Required for context usage display. Varies by model (e.g., 400000 for Claude/GPT-5.2, 128000 for GPT-4o).', + default: 128000, // Default for common models (GPT-4, etc.) + }, + ], + }, + { + id: 'factory-droid', + name: 'Factory Droid', + binaryName: 'droid', + command: 'droid', + args: [], // Base args for interactive mode (none) + requiresPty: false, // Batch mode uses child process + + // Batch mode: droid exec [options] "prompt" + batchModePrefix: ['exec'], + // Always skip permissions in batch mode (like Claude Code's --dangerously-skip-permissions) + // Maestro requires full access to work properly + batchModeArgs: ['--skip-permissions-unsafe'], + + // JSON output for parsing + jsonOutputArgs: ['-o', 'stream-json'], + + // Session resume: -s (requires a prompt) + resumeArgs: (sessionId: string) => ['-s', sessionId], + + // Read-only mode is DEFAULT in droid exec (no flag needed) + readOnlyArgs: [], + + // YOLO mode (same as batchModeArgs, kept for explicit yoloMode requests) + yoloModeArgs: ['--skip-permissions-unsafe'], + + // Working directory + workingDirArgs: (dir: string) => ['--cwd', dir], + + // File/image input + imageArgs: (imagePath: string) => ['-f', imagePath], + + // Prompt is positional argument (no separator needed) + noPromptSeparator: true, + + // Default env vars - don't set NO_COLOR as it conflicts with FORCE_COLOR + defaultEnvVars: {}, + + // UI config options + // Model IDs from droid CLI (exact IDs required) + // NOTE: autonomyLevel is NOT configurable - Maestro always uses --skip-permissions-unsafe + // which conflicts with --auto. This matches Claude Code's behavior. + configOptions: [ + { + key: 'model', + type: 'select', + label: 'Model', + description: 'Model to use for Factory Droid', + // Model IDs from `droid exec --help` + options: [ + '', // Empty = use droid's default + // OpenAI models + 'gpt-5.1', + 'gpt-5.1-codex', + 'gpt-5.1-codex-max', + 'gpt-5.2', + // Claude models + 'claude-sonnet-4-5-20250929', + 'claude-opus-4-5-20251101', + 'claude-haiku-4-5-20251001', + // Google models + 'gemini-3-pro-preview', + ], + default: '', // Empty = use droid's default + argBuilder: (value: string) => (value && value.trim() ? ['-m', value.trim()] : []), + }, + { + key: 'reasoningEffort', + type: 'select', + label: 'Reasoning Effort', + description: 'How much the model should reason before responding', + options: ['', 'low', 'medium', 'high'], + default: '', // Empty = use droid's default reasoning + argBuilder: (value: string) => (value && value.trim() ? ['-r', value.trim()] : []), + }, + { + key: 'contextWindow', + type: 'number', + label: 'Context Window Size', + description: 'Maximum context window in tokens (for UI display)', + default: 200000, + }, + ], + }, + { + id: 'aider', + name: 'Aider', + binaryName: 'aider', + command: 'aider', + args: [], // Base args (placeholder - to be configured when implemented) + }, +]; + +/** + * Get an agent definition by ID (without runtime detection state) + */ +export function getAgentDefinition(agentId: string): AgentDefinition | undefined { + return AGENT_DEFINITIONS.find((def) => def.id === agentId); +} + +/** + * Get all agent IDs + */ +export function getAgentIds(): string[] { + return AGENT_DEFINITIONS.map((def) => def.id); +} + +/** + * Get all visible (non-hidden) agent definitions + */ +export function getVisibleAgentDefinitions(): AgentDefinition[] { + return AGENT_DEFINITIONS.filter((def) => !def.hidden); +} diff --git a/src/main/agents/detector.ts b/src/main/agents/detector.ts new file mode 100644 index 00000000..98c09478 --- /dev/null +++ b/src/main/agents/detector.ts @@ -0,0 +1,288 @@ +/** + * Agent Detection and Configuration Manager + * + * Responsibilities: + * - Detects installed agents via file system probing and PATH resolution + * - Manages agent configuration and capability metadata + * - Caches detection results for performance + * - Discovers available models for agents that support model selection + * + * Model Discovery: + * - Model lists are cached for 5 minutes (configurable) to balance freshness and performance + * - Each agent implements its own model discovery command + * - Cache can be manually cleared or bypassed with forceRefresh flag + */ + +import * as path from 'path'; +import { execFileNoThrow } from '../utils/execFile'; +import { logger } from '../utils/logger'; +import { getAgentCapabilities } from './capabilities'; +import { checkBinaryExists, checkCustomPath, getExpandedEnv } from './path-prober'; +import { AGENT_DEFINITIONS, type AgentConfig } from './definitions'; + +const LOG_CONTEXT = 'AgentDetector'; + +// ============ Agent Detector Class ============ + +/** Default cache TTL: 5 minutes (model lists don't change frequently) */ +const DEFAULT_MODEL_CACHE_TTL_MS = 5 * 60 * 1000; + +export class AgentDetector { + private cachedAgents: AgentConfig[] | null = null; + private detectionInProgress: Promise | null = null; + private customPaths: Record = {}; + // Cache for model discovery results: agentId -> { models, timestamp } + private modelCache: Map = new Map(); + // Configurable cache TTL (useful for testing or different environments) + private readonly modelCacheTtlMs: number; + + /** + * Create an AgentDetector instance + * @param modelCacheTtlMs - Model cache TTL in milliseconds (default: 5 minutes) + */ + constructor(modelCacheTtlMs: number = DEFAULT_MODEL_CACHE_TTL_MS) { + this.modelCacheTtlMs = modelCacheTtlMs; + } + + /** + * Set custom paths for agents (from user configuration) + */ + setCustomPaths(paths: Record): void { + this.customPaths = paths; + // Clear cache when custom paths change + this.cachedAgents = null; + } + + /** + * Get the current custom paths + */ + getCustomPaths(): Record { + return { ...this.customPaths }; + } + + /** + * Detect which agents are available on the system + * Uses promise deduplication to prevent parallel detection when multiple calls arrive simultaneously + */ + async detectAgents(): Promise { + if (this.cachedAgents) { + return this.cachedAgents; + } + + // If detection is already in progress, return the same promise to avoid parallel runs + if (this.detectionInProgress) { + return this.detectionInProgress; + } + + // Start detection and track the promise + this.detectionInProgress = this.doDetectAgents(); + try { + return await this.detectionInProgress; + } finally { + this.detectionInProgress = null; + } + } + + /** + * Internal method that performs the actual agent detection + */ + private async doDetectAgents(): Promise { + const agents: AgentConfig[] = []; + const expandedEnv = getExpandedEnv(); + + logger.info(`Agent detection starting. PATH: ${expandedEnv.PATH}`, LOG_CONTEXT); + + for (const agentDef of AGENT_DEFINITIONS) { + const customPath = this.customPaths[agentDef.id]; + let detection: { exists: boolean; path?: string }; + + // If user has specified a custom path, check that first + if (customPath) { + detection = await checkCustomPath(customPath); + if (detection.exists) { + logger.info( + `Agent "${agentDef.name}" found at custom path: ${detection.path}`, + LOG_CONTEXT + ); + } else { + logger.warn(`Agent "${agentDef.name}" custom path not valid: ${customPath}`, LOG_CONTEXT); + // Fall back to PATH detection + detection = await checkBinaryExists(agentDef.binaryName); + if (detection.exists) { + logger.info( + `Agent "${agentDef.name}" found in PATH at: ${detection.path}`, + LOG_CONTEXT + ); + } + } + } else { + detection = await checkBinaryExists(agentDef.binaryName); + + if (detection.exists) { + logger.info(`Agent "${agentDef.name}" found at: ${detection.path}`, LOG_CONTEXT); + } else if (agentDef.binaryName !== 'bash') { + // Don't log bash as missing since it's always present, log others as warnings + logger.warn( + `Agent "${agentDef.name}" (binary: ${agentDef.binaryName}) not found. ` + + `Searched in PATH: ${expandedEnv.PATH}`, + LOG_CONTEXT + ); + } + } + + agents.push({ + ...agentDef, + available: detection.exists, + path: detection.path, + customPath: customPath || undefined, + capabilities: getAgentCapabilities(agentDef.id), + }); + } + + const availableAgents = agents.filter((a) => a.available); + const isWindows = process.platform === 'win32'; + + // On Windows, log detailed path info to help debug shell execution issues + if (isWindows) { + logger.info(`Agent detection complete (Windows)`, LOG_CONTEXT, { + platform: process.platform, + agents: availableAgents.map((a) => ({ + id: a.id, + name: a.name, + path: a.path, + pathExtension: a.path ? path.extname(a.path) : 'none', + // .exe = direct execution, .cmd = requires shell + willUseShell: a.path + ? a.path.toLowerCase().endsWith('.cmd') || + a.path.toLowerCase().endsWith('.bat') || + !path.extname(a.path) + : true, + })), + }); + } else { + logger.info( + `Agent detection complete. Available: ${availableAgents.map((a) => a.name).join(', ') || 'none'}`, + LOG_CONTEXT + ); + } + + this.cachedAgents = agents; + return agents; + } + + /** + * Get a specific agent by ID + */ + async getAgent(agentId: string): Promise { + const agents = await this.detectAgents(); + return agents.find((a) => a.id === agentId) || null; + } + + /** + * Clear the cache (useful if PATH changes) + */ + clearCache(): void { + this.cachedAgents = null; + } + + /** + * Clear the model cache for a specific agent or all agents + */ + clearModelCache(agentId?: string): void { + if (agentId) { + this.modelCache.delete(agentId); + } else { + this.modelCache.clear(); + } + } + + /** + * Discover available models for an agent that supports model selection. + * Returns cached results if available and not expired. + * + * @param agentId - The agent identifier (e.g., 'opencode') + * @param forceRefresh - If true, bypass cache and fetch fresh model list + * @returns Array of model names, or empty array if agent doesn't support model discovery + */ + async discoverModels(agentId: string, forceRefresh = false): Promise { + const agent = await this.getAgent(agentId); + + if (!agent || !agent.available) { + logger.warn(`Cannot discover models: agent ${agentId} not available`, LOG_CONTEXT); + return []; + } + + // Check if agent supports model selection + if (!agent.capabilities.supportsModelSelection) { + logger.debug(`Agent ${agentId} does not support model selection`, LOG_CONTEXT); + return []; + } + + // Check cache unless force refresh + if (!forceRefresh) { + const cached = this.modelCache.get(agentId); + if (cached && Date.now() - cached.timestamp < this.modelCacheTtlMs) { + logger.debug(`Returning cached models for ${agentId}`, LOG_CONTEXT); + return cached.models; + } + } + + // Run agent-specific model discovery command + const models = await this.runModelDiscovery(agentId, agent); + + // Cache the results + this.modelCache.set(agentId, { models, timestamp: Date.now() }); + + return models; + } + + /** + * Run the agent-specific model discovery command. + * Each agent may have a different way to list available models. + * + * This method catches all exceptions to ensure graceful degradation + * when model discovery fails for any reason. + */ + private async runModelDiscovery(agentId: string, agent: AgentConfig): Promise { + const env = getExpandedEnv(); + const command = agent.path || agent.command; + + try { + // Agent-specific model discovery commands + switch (agentId) { + case 'opencode': { + // OpenCode: `opencode models` returns one model per line + const result = await execFileNoThrow(command, ['models'], undefined, env); + + if (result.exitCode !== 0) { + logger.warn( + `Model discovery failed for ${agentId}: exit code ${result.exitCode}`, + LOG_CONTEXT, + { stderr: result.stderr } + ); + return []; + } + + // Parse output: one model per line (e.g., "opencode/gpt-5-nano", "ollama/gpt-oss:latest") + const models = result.stdout + .split('\n') + .map((line) => line.trim()) + .filter((line) => line.length > 0); + + logger.info(`Discovered ${models.length} models for ${agentId}`, LOG_CONTEXT, { + models, + }); + return models; + } + + default: + // For agents without model discovery implemented, return empty array + logger.debug(`No model discovery implemented for ${agentId}`, LOG_CONTEXT); + return []; + } + } catch (error) { + logger.error(`Model discovery threw exception for ${agentId}`, LOG_CONTEXT, { error }); + return []; + } + } +} diff --git a/src/main/agents/index.ts b/src/main/agents/index.ts new file mode 100644 index 00000000..4748060d --- /dev/null +++ b/src/main/agents/index.ts @@ -0,0 +1,68 @@ +/** + * Agents Module + * + * This module consolidates all agent-related functionality: + * - Agent detection and configuration + * - Agent definitions and types + * - Agent capabilities + * - Session storage interface + * - Binary path probing + * + * Usage: + * ```typescript + * import { AgentDetector, AGENT_DEFINITIONS, getAgentCapabilities } from './agents'; + * ``` + */ + +// ============ Capabilities ============ +export { + type AgentCapabilities, + DEFAULT_CAPABILITIES, + AGENT_CAPABILITIES, + getAgentCapabilities, + hasCapability, +} from './capabilities'; + +// ============ Definitions ============ +export { + type AgentConfigOption, + type AgentConfig, + type AgentDefinition, + AGENT_DEFINITIONS, + getAgentDefinition, + getAgentIds, + getVisibleAgentDefinitions, +} from './definitions'; + +// ============ Detector ============ +export { AgentDetector } from './detector'; + +// ============ Path Prober ============ +export { + type BinaryDetectionResult, + getExpandedEnv, + checkCustomPath, + probeWindowsPaths, + probeUnixPaths, + checkBinaryExists, +} from './path-prober'; + +// ============ Session Storage ============ +export { + type AgentSessionOrigin, + type SessionMessage, + type AgentSessionInfo, + type PaginatedSessionsResult, + type SessionMessagesResult, + type SessionSearchResult, + type SessionSearchMode, + type SessionListOptions, + type SessionReadOptions, + type SessionOriginInfo, + type AgentSessionStorage, + registerSessionStorage, + getSessionStorage, + hasSessionStorage, + getAllSessionStorages, + clearStorageRegistry, +} from './session-storage'; diff --git a/src/main/agents/path-prober.ts b/src/main/agents/path-prober.ts new file mode 100644 index 00000000..07c3278d --- /dev/null +++ b/src/main/agents/path-prober.ts @@ -0,0 +1,534 @@ +/** + * Binary Path Detection Utilities + * + * Packaged Electron apps don't inherit shell environment, so we need to + * probe known installation paths directly. + * + * Detection Strategy: + * 1. Direct file system probing of known installation paths (fastest, most reliable) + * 2. Fall back to which/where command with expanded PATH + * + * This two-tier approach ensures we find binaries even when: + * - PATH is not inherited correctly + * - Binaries are in non-standard locations + * - Shell initialization files (.bashrc, .zshrc) aren't sourced + */ + +import * as os from 'os'; +import * as fs from 'fs'; +import * as path from 'path'; +import { execFileNoThrow } from '../utils/execFile'; +import { logger } from '../utils/logger'; +import { expandTilde, detectNodeVersionManagerBinPaths } from '../../shared/pathUtils'; + +const LOG_CONTEXT = 'PathProber'; + +// ============ Types ============ + +export interface BinaryDetectionResult { + exists: boolean; + path?: string; +} + +// ============ Environment Expansion ============ + +/** + * Build an expanded PATH that includes common binary installation locations. + * This is necessary because packaged Electron apps don't inherit shell environment. + */ +export function getExpandedEnv(): NodeJS.ProcessEnv { + const home = os.homedir(); + const env = { ...process.env }; + const isWindows = process.platform === 'win32'; + + // Platform-specific paths + let additionalPaths: string[]; + + if (isWindows) { + // Windows-specific paths + const appData = process.env.APPDATA || path.join(home, 'AppData', 'Roaming'); + const localAppData = process.env.LOCALAPPDATA || path.join(home, 'AppData', 'Local'); + const programFiles = process.env.ProgramFiles || 'C:\\Program Files'; + const programFilesX86 = process.env['ProgramFiles(x86)'] || 'C:\\Program Files (x86)'; + + additionalPaths = [ + // Claude Code PowerShell installer (irm https://claude.ai/install.ps1 | iex) + // This is the primary installation method - installs claude.exe to ~/.local/bin + path.join(home, '.local', 'bin'), + // Claude Code winget install (winget install --id Anthropic.ClaudeCode) + path.join(localAppData, 'Microsoft', 'WinGet', 'Links'), + path.join(programFiles, 'WinGet', 'Links'), + path.join(localAppData, 'Microsoft', 'WinGet', 'Packages'), + path.join(programFiles, 'WinGet', 'Packages'), + // npm global installs (Claude Code, Codex CLI, Gemini CLI) + path.join(appData, 'npm'), + path.join(localAppData, 'npm'), + // Claude Code CLI install location (npm global) + path.join(appData, 'npm', 'node_modules', '@anthropic-ai', 'claude-code', 'cli'), + // Codex CLI install location (npm global) + path.join(appData, 'npm', 'node_modules', '@openai', 'codex', 'bin'), + // User local programs + path.join(localAppData, 'Programs'), + path.join(localAppData, 'Microsoft', 'WindowsApps'), + // Python/pip user installs (for Aider) + path.join(appData, 'Python', 'Scripts'), + path.join(localAppData, 'Programs', 'Python', 'Python312', 'Scripts'), + path.join(localAppData, 'Programs', 'Python', 'Python311', 'Scripts'), + path.join(localAppData, 'Programs', 'Python', 'Python310', 'Scripts'), + // Git for Windows (provides bash, common tools) + path.join(programFiles, 'Git', 'cmd'), + path.join(programFiles, 'Git', 'bin'), + path.join(programFiles, 'Git', 'usr', 'bin'), + path.join(programFilesX86, 'Git', 'cmd'), + path.join(programFilesX86, 'Git', 'bin'), + // Node.js + path.join(programFiles, 'nodejs'), + path.join(localAppData, 'Programs', 'node'), + // Scoop package manager (OpenCode, other tools) + path.join(home, 'scoop', 'shims'), + path.join(home, 'scoop', 'apps', 'opencode', 'current'), + // Chocolatey (OpenCode, other tools) + path.join(process.env.ChocolateyInstall || 'C:\\ProgramData\\chocolatey', 'bin'), + // Go binaries (some tools installed via 'go install') + path.join(home, 'go', 'bin'), + // Windows system paths + path.join(process.env.SystemRoot || 'C:\\Windows', 'System32'), + path.join(process.env.SystemRoot || 'C:\\Windows'), + ]; + } else { + // Unix-like paths (macOS/Linux) + additionalPaths = [ + '/opt/homebrew/bin', // Homebrew on Apple Silicon + '/opt/homebrew/sbin', + '/usr/local/bin', // Homebrew on Intel, common install location + '/usr/local/sbin', + `${home}/.local/bin`, // User local installs (pip, etc.) + `${home}/.npm-global/bin`, // npm global with custom prefix + `${home}/bin`, // User bin directory + `${home}/.claude/local`, // Claude local install location + `${home}/.opencode/bin`, // OpenCode installer default location + '/usr/bin', + '/bin', + '/usr/sbin', + '/sbin', + ]; + } + + const currentPath = env.PATH || ''; + // Use platform-appropriate path delimiter + const pathParts = currentPath.split(path.delimiter); + + // Add paths that aren't already present + for (const p of additionalPaths) { + if (!pathParts.includes(p)) { + pathParts.unshift(p); + } + } + + env.PATH = pathParts.join(path.delimiter); + return env; +} + +// ============ Custom Path Validation ============ + +/** + * Check if a custom path points to a valid executable + * On Windows, also tries .cmd and .exe extensions if the path doesn't exist as-is + */ +export async function checkCustomPath(customPath: string): Promise { + const isWindows = process.platform === 'win32'; + + // Expand tilde to home directory (Node.js fs doesn't understand ~) + const expandedPath = expandTilde(customPath); + + // Helper to check if a specific path exists and is a file + const checkPath = async (pathToCheck: string): Promise => { + try { + const stats = await fs.promises.stat(pathToCheck); + return stats.isFile(); + } catch { + return false; + } + }; + + try { + // First, try the exact path provided (with tilde expanded) + if (await checkPath(expandedPath)) { + // Check if file is executable (on Unix systems) + if (!isWindows) { + try { + await fs.promises.access(expandedPath, fs.constants.X_OK); + } catch { + logger.warn(`Custom path exists but is not executable: ${customPath}`, LOG_CONTEXT); + return { exists: false }; + } + } + // Return the expanded path so it can be used directly + return { exists: true, path: expandedPath }; + } + + // On Windows, if the exact path doesn't exist, try with .cmd and .exe extensions + if (isWindows) { + const lowerPath = expandedPath.toLowerCase(); + // Only try extensions if the path doesn't already have one + if (!lowerPath.endsWith('.cmd') && !lowerPath.endsWith('.exe')) { + // Try .exe first (preferred), then .cmd + const exePath = expandedPath + '.exe'; + if (await checkPath(exePath)) { + logger.debug(`Custom path resolved with .exe extension`, LOG_CONTEXT, { + original: customPath, + resolved: exePath, + }); + return { exists: true, path: exePath }; + } + + const cmdPath = expandedPath + '.cmd'; + if (await checkPath(cmdPath)) { + logger.debug(`Custom path resolved with .cmd extension`, LOG_CONTEXT, { + original: customPath, + resolved: cmdPath, + }); + return { exists: true, path: cmdPath }; + } + } + } + + return { exists: false }; + } catch (error) { + logger.debug(`Error checking custom path: ${customPath}`, LOG_CONTEXT, { error }); + return { exists: false }; + } +} + +// ============ Windows Path Probing ============ + +/** + * Known installation paths for binaries on Windows + */ +function getWindowsKnownPaths(binaryName: string): string[] { + const home = os.homedir(); + const appData = process.env.APPDATA || path.join(home, 'AppData', 'Roaming'); + const localAppData = process.env.LOCALAPPDATA || path.join(home, 'AppData', 'Local'); + const programFiles = process.env.ProgramFiles || 'C:\\Program Files'; + + // Common path builders to reduce duplication across binary definitions + const npmGlobal = (bin: string) => [ + path.join(appData, 'npm', `${bin}.cmd`), + path.join(localAppData, 'npm', `${bin}.cmd`), + ]; + const localBin = (bin: string) => [path.join(home, '.local', 'bin', `${bin}.exe`)]; + const wingetLinks = (bin: string) => [ + path.join(localAppData, 'Microsoft', 'WinGet', 'Links', `${bin}.exe`), + path.join(programFiles, 'WinGet', 'Links', `${bin}.exe`), + ]; + const goBin = (bin: string) => [path.join(home, 'go', 'bin', `${bin}.exe`)]; + const pythonScripts = (bin: string) => [ + path.join(appData, 'Python', 'Scripts', `${bin}.exe`), + path.join(localAppData, 'Programs', 'Python', 'Python312', 'Scripts', `${bin}.exe`), + path.join(localAppData, 'Programs', 'Python', 'Python311', 'Scripts', `${bin}.exe`), + path.join(localAppData, 'Programs', 'Python', 'Python310', 'Scripts', `${bin}.exe`), + ]; + + // Define known installation paths for each binary, in priority order + // Prefer .exe (standalone installers) over .cmd (npm wrappers) + const knownPaths: Record = { + claude: [ + // PowerShell installer (primary method) - installs claude.exe + ...localBin('claude'), + // Winget installation + ...wingetLinks('claude'), + // npm global installation - creates .cmd wrapper + ...npmGlobal('claude'), + // WindowsApps (Microsoft Store style) + path.join(localAppData, 'Microsoft', 'WindowsApps', 'claude.exe'), + ], + codex: [ + // npm global installation (primary method for Codex) + ...npmGlobal('codex'), + // Possible standalone in future + ...localBin('codex'), + ], + opencode: [ + // Scoop installation (recommended for OpenCode) + path.join(home, 'scoop', 'shims', 'opencode.exe'), + path.join(home, 'scoop', 'apps', 'opencode', 'current', 'opencode.exe'), + // Chocolatey installation + path.join( + process.env.ChocolateyInstall || 'C:\\ProgramData\\chocolatey', + 'bin', + 'opencode.exe' + ), + // Go install + ...goBin('opencode'), + // npm (has known issues on Windows, but check anyway) + ...npmGlobal('opencode'), + ], + gemini: [ + // npm global installation + ...npmGlobal('gemini'), + ], + aider: [ + // pip installation + ...pythonScripts('aider'), + ], + }; + + return knownPaths[binaryName] || []; +} + +/** + * On Windows, directly probe known installation paths for a binary. + * This is more reliable than `where` command which may fail in packaged Electron apps. + * Returns the first existing path found (in priority order), preferring .exe over .cmd. + * + * Uses parallel probing for performance on slow file systems. + */ +export async function probeWindowsPaths(binaryName: string): Promise { + const pathsToCheck = getWindowsKnownPaths(binaryName); + + if (pathsToCheck.length === 0) { + return null; + } + + // Check all paths in parallel for performance + const results = await Promise.allSettled( + pathsToCheck.map(async (probePath) => { + await fs.promises.access(probePath, fs.constants.F_OK); + return probePath; + }) + ); + + // Return the first successful result (maintains priority order from pathsToCheck) + for (let i = 0; i < results.length; i++) { + const result = results[i]; + if (result.status === 'fulfilled') { + logger.debug(`Direct probe found ${binaryName}`, LOG_CONTEXT, { path: result.value }); + return result.value; + } + } + + return null; +} + +// ============ Unix Path Probing ============ + +/** + * Known installation paths for binaries on Unix-like systems + */ +function getUnixKnownPaths(binaryName: string): string[] { + const home = os.homedir(); + + // Get dynamic paths from Node version managers (nvm, fnm, volta, etc.) + const versionManagerPaths = detectNodeVersionManagerBinPaths(); + + // Common path builders to reduce duplication across binary definitions + const homebrew = (bin: string) => [`/opt/homebrew/bin/${bin}`, `/usr/local/bin/${bin}`]; + const localBin = (bin: string) => [path.join(home, '.local', 'bin', bin)]; + const npmGlobal = (bin: string) => [path.join(home, '.npm-global', 'bin', bin)]; + const nodeVersionManagers = (bin: string) => versionManagerPaths.map((p) => path.join(p, bin)); + + // Define known installation paths for each binary, in priority order + const knownPaths: Record = { + claude: [ + // Claude Code default installation location + path.join(home, '.claude', 'local', 'claude'), + // User local bin (pip, manual installs) + ...localBin('claude'), + // Homebrew (Apple Silicon + Intel) + ...homebrew('claude'), + // npm global with custom prefix + ...npmGlobal('claude'), + // User bin directory + path.join(home, 'bin', 'claude'), + // Node version managers (nvm, fnm, volta, etc.) + ...nodeVersionManagers('claude'), + ], + codex: [ + // User local bin + ...localBin('codex'), + // Homebrew paths + ...homebrew('codex'), + // npm global + ...npmGlobal('codex'), + // Node version managers (nvm, fnm, volta, etc.) + ...nodeVersionManagers('codex'), + ], + opencode: [ + // OpenCode installer default location + path.join(home, '.opencode', 'bin', 'opencode'), + // Go install location + path.join(home, 'go', 'bin', 'opencode'), + // User local bin + ...localBin('opencode'), + // Homebrew paths + ...homebrew('opencode'), + // Node version managers (nvm, fnm, volta, etc.) + ...nodeVersionManagers('opencode'), + ], + gemini: [ + // npm global paths + ...npmGlobal('gemini'), + // Homebrew paths + ...homebrew('gemini'), + // Node version managers (nvm, fnm, volta, etc.) + ...nodeVersionManagers('gemini'), + ], + aider: [ + // pip installation + ...localBin('aider'), + // Homebrew paths + ...homebrew('aider'), + // Node version managers (in case installed via npm) + ...nodeVersionManagers('aider'), + ], + }; + + return knownPaths[binaryName] || []; +} + +/** + * On macOS/Linux, directly probe known installation paths for a binary. + * This is necessary because packaged Electron apps don't inherit shell aliases, + * and 'which' may fail to find binaries in non-standard locations. + * Returns the first existing executable path found (in priority order). + * + * Uses parallel probing for performance on slow file systems. + */ +export async function probeUnixPaths(binaryName: string): Promise { + const pathsToCheck = getUnixKnownPaths(binaryName); + + if (pathsToCheck.length === 0) { + return null; + } + + // Check all paths in parallel for performance + const results = await Promise.allSettled( + pathsToCheck.map(async (probePath) => { + // Check both existence and executability + await fs.promises.access(probePath, fs.constants.F_OK | fs.constants.X_OK); + return probePath; + }) + ); + + // Return the first successful result (maintains priority order from pathsToCheck) + for (let i = 0; i < results.length; i++) { + const result = results[i]; + if (result.status === 'fulfilled') { + logger.debug(`Direct probe found ${binaryName}`, LOG_CONTEXT, { path: result.value }); + return result.value; + } + } + + return null; +} + +// ============ Binary Detection ============ + +/** + * Check if a binary exists in PATH or known installation locations. + * On Windows, this also handles .cmd and .exe extensions properly. + * + * Detection order: + * 1. Direct probe of known installation paths (most reliable) + * 2. Fall back to which/where command with expanded PATH + */ +export async function checkBinaryExists(binaryName: string): Promise { + const isWindows = process.platform === 'win32'; + + // First try direct file probing of known installation paths + // This is more reliable than which/where in packaged Electron apps + if (isWindows) { + const probedPath = await probeWindowsPaths(binaryName); + if (probedPath) { + return { exists: true, path: probedPath }; + } + logger.debug(`Direct probe failed for ${binaryName}, falling back to where`, LOG_CONTEXT); + } else { + // macOS/Linux: probe known paths first + const probedPath = await probeUnixPaths(binaryName); + if (probedPath) { + return { exists: true, path: probedPath }; + } + logger.debug(`Direct probe failed for ${binaryName}, falling back to which`, LOG_CONTEXT); + } + + try { + // Use 'which' on Unix-like systems, 'where' on Windows + const command = isWindows ? 'where' : 'which'; + + // Use expanded PATH to find binaries in common installation locations + // This is critical for packaged Electron apps which don't inherit shell env + const env = getExpandedEnv(); + const result = await execFileNoThrow(command, [binaryName], undefined, env); + + if (result.exitCode === 0 && result.stdout.trim()) { + // Get all matches (Windows 'where' can return multiple) + // Handle both Unix (\n) and Windows (\r\n) line endings + const matches = result.stdout + .trim() + .split(/\r?\n/) + .map((p) => p.trim()) + .filter((p) => p); + + if (process.platform === 'win32' && matches.length > 0) { + // On Windows, prefer .exe over .cmd over extensionless + // This helps with proper execution handling + const exeMatch = matches.find((p) => p.toLowerCase().endsWith('.exe')); + const cmdMatch = matches.find((p) => p.toLowerCase().endsWith('.cmd')); + + // Return the best match: .exe > .cmd > first result + let bestMatch = exeMatch || cmdMatch || matches[0]; + + // If the first match doesn't have an extension, check if .cmd or .exe version exists + // This handles cases where 'where' returns a path without extension + if ( + !bestMatch.toLowerCase().endsWith('.exe') && + !bestMatch.toLowerCase().endsWith('.cmd') + ) { + const cmdPath = bestMatch + '.cmd'; + const exePath = bestMatch + '.exe'; + + // Check if the .exe or .cmd version exists + try { + await fs.promises.access(exePath, fs.constants.F_OK); + bestMatch = exePath; + logger.debug(`Found .exe version of ${binaryName}`, LOG_CONTEXT, { + path: exePath, + }); + } catch { + try { + await fs.promises.access(cmdPath, fs.constants.F_OK); + bestMatch = cmdPath; + logger.debug(`Found .cmd version of ${binaryName}`, LOG_CONTEXT, { + path: cmdPath, + }); + } catch { + // Neither .exe nor .cmd exists, use the original path + } + } + } + + logger.debug(`Windows binary detection for ${binaryName}`, LOG_CONTEXT, { + allMatches: matches, + selectedMatch: bestMatch, + isCmd: bestMatch.toLowerCase().endsWith('.cmd'), + isExe: bestMatch.toLowerCase().endsWith('.exe'), + }); + + return { + exists: true, + path: bestMatch, + }; + } + + return { + exists: true, + path: matches[0], // First match for Unix + }; + } + + return { exists: false }; + } catch { + return { exists: false }; + } +} diff --git a/src/main/agent-session-storage.ts b/src/main/agents/session-storage.ts similarity index 98% rename from src/main/agent-session-storage.ts rename to src/main/agents/session-storage.ts index 8ffc7fee..79c438f8 100644 --- a/src/main/agent-session-storage.ts +++ b/src/main/agents/session-storage.ts @@ -14,8 +14,8 @@ * ``` */ -import type { ToolType, SshRemoteConfig } from '../shared/types'; -import { logger } from './utils/logger'; +import type { ToolType, SshRemoteConfig } from '../../shared/types'; +import { logger } from '../utils/logger'; const LOG_CONTEXT = '[AgentSessionStorage]'; diff --git a/src/main/debug-package/collectors/agents.ts b/src/main/debug-package/collectors/agents.ts index 22e21b42..7ba24c72 100644 --- a/src/main/debug-package/collectors/agents.ts +++ b/src/main/debug-package/collectors/agents.ts @@ -6,7 +6,7 @@ * - Custom args/env vars show only whether they're set, not values */ -import { AgentDetector, AgentCapabilities } from '../../agent-detector'; +import { AgentDetector, type AgentCapabilities } from '../../agents'; import { sanitizePath } from './settings'; export interface AgentInfo { diff --git a/src/main/debug-package/index.ts b/src/main/debug-package/index.ts index 5e49dc44..37a16700 100644 --- a/src/main/debug-package/index.ts +++ b/src/main/debug-package/index.ts @@ -29,7 +29,7 @@ import { } from './collectors/windows-diagnostics'; import { createZipPackage, PackageContents } from './packager'; import { logger } from '../utils/logger'; -import { AgentDetector } from '../agent-detector'; +import { AgentDetector } from '../agents'; import { ProcessManager } from '../process-manager'; import { WebServer } from '../web-server'; import Store from 'electron-store'; diff --git a/src/main/group-chat/group-chat-agent.ts b/src/main/group-chat/group-chat-agent.ts index dc211ae1..9afaaca0 100644 --- a/src/main/group-chat/group-chat-agent.ts +++ b/src/main/group-chat/group-chat-agent.ts @@ -19,7 +19,7 @@ import { } from './group-chat-storage'; import { appendToLog } from './group-chat-log'; import { IProcessManager, isModeratorActive } from './group-chat-moderator'; -import type { AgentDetector } from '../agent-detector'; +import type { AgentDetector } from '../agents'; import { buildAgentArgs, applyAgentConfigOverrides, diff --git a/src/main/group-chat/group-chat-router.ts b/src/main/group-chat/group-chat-router.ts index 8dc2d93f..032a5062 100644 --- a/src/main/group-chat/group-chat-router.ts +++ b/src/main/group-chat/group-chat-router.ts @@ -31,7 +31,7 @@ import { getModeratorSynthesisPrompt, } from './group-chat-moderator'; import { addParticipant } from './group-chat-agent'; -import { AgentDetector } from '../agent-detector'; +import { AgentDetector } from '../agents'; import { powerManager } from '../power-manager'; import { buildAgentArgs, diff --git a/src/main/index.ts b/src/main/index.ts index f9192f38..0ed72a9c 100644 --- a/src/main/index.ts +++ b/src/main/index.ts @@ -6,7 +6,7 @@ import crypto from 'crypto'; // which causes "Cannot read properties of undefined (reading 'getAppPath')" errors import { ProcessManager } from './process-manager'; import { WebServer } from './web-server'; -import { AgentDetector } from './agent-detector'; +import { AgentDetector } from './agents'; import { logger } from './utils/logger'; import { tunnelManager } from './tunnel-manager'; import { powerManager } from './power-manager'; @@ -53,7 +53,7 @@ import { cleanupAllGroomingSessions, getActiveGroomingSessionCount, } from './ipc/handlers'; -import { initializeStatsDB, closeStatsDB, getStatsDB } from './stats-db'; +import { initializeStatsDB, closeStatsDB, getStatsDB } from './stats'; import { groupChatEmitters } from './ipc/handlers/groupChat'; import { routeModeratorResponse, diff --git a/src/main/ipc/handlers/agentSessions.ts b/src/main/ipc/handlers/agentSessions.ts index 30505624..7a231983 100644 --- a/src/main/ipc/handlers/agentSessions.ts +++ b/src/main/ipc/handlers/agentSessions.ts @@ -26,7 +26,7 @@ import { getSessionStorage, hasSessionStorage, getAllSessionStorages, -} from '../../agent-session-storage'; +} from '../../agents'; import { calculateClaudeCost } from '../../utils/pricing'; import { loadGlobalStatsCache, @@ -43,7 +43,7 @@ import type { SessionSearchMode, SessionListOptions, SessionReadOptions, -} from '../../agent-session-storage'; +} from '../../agents'; import type { GlobalAgentStats, ProviderStats, SshRemoteConfig } from '../../../shared/types'; import type { MaestroSettings } from './persistence'; diff --git a/src/main/ipc/handlers/agents.ts b/src/main/ipc/handlers/agents.ts index c98630da..be35ae94 100644 --- a/src/main/ipc/handlers/agents.ts +++ b/src/main/ipc/handlers/agents.ts @@ -1,8 +1,7 @@ import { ipcMain } from 'electron'; import Store from 'electron-store'; import * as fs from 'fs'; -import { AgentDetector, AGENT_DEFINITIONS } from '../../agent-detector'; -import { getAgentCapabilities } from '../../agent-capabilities'; +import { AgentDetector, AGENT_DEFINITIONS, getAgentCapabilities } from '../../agents'; import { execFileNoThrow } from '../../utils/execFile'; import { logger } from '../../utils/logger'; import { diff --git a/src/main/ipc/handlers/context.ts b/src/main/ipc/handlers/context.ts index 0db8a85c..c427e818 100644 --- a/src/main/ipc/handlers/context.ts +++ b/src/main/ipc/handlers/context.ts @@ -20,10 +20,10 @@ import { requireDependency, CreateHandlerOptions, } from '../../utils/ipcHandler'; -import { getSessionStorage, type SessionMessagesResult } from '../../agent-session-storage'; +import { getSessionStorage, type SessionMessagesResult } from '../../agents'; import { groomContext, cancelAllGroomingSessions } from '../../utils/context-groomer'; import type { ProcessManager } from '../../process-manager'; -import type { AgentDetector } from '../../agent-detector'; +import type { AgentDetector } from '../../agents'; const LOG_CONTEXT = '[ContextMerge]'; diff --git a/src/main/ipc/handlers/debug.ts b/src/main/ipc/handlers/debug.ts index e6ef1a0c..f546a5ce 100644 --- a/src/main/ipc/handlers/debug.ts +++ b/src/main/ipc/handlers/debug.ts @@ -16,7 +16,7 @@ import { DebugPackageOptions, DebugPackageDependencies, } from '../../debug-package'; -import { AgentDetector } from '../../agent-detector'; +import { AgentDetector } from '../../agents'; import { ProcessManager } from '../../process-manager'; import { WebServer } from '../../web-server'; diff --git a/src/main/ipc/handlers/groupChat.ts b/src/main/ipc/handlers/groupChat.ts index 181f26d2..9becd893 100644 --- a/src/main/ipc/handlers/groupChat.ts +++ b/src/main/ipc/handlers/groupChat.ts @@ -64,7 +64,7 @@ import { import { routeUserMessage } from '../../group-chat/group-chat-router'; // Agent detector import -import { AgentDetector } from '../../agent-detector'; +import { AgentDetector } from '../../agents'; import { groomContext } from '../../utils/context-groomer'; import { v4 as uuidv4 } from 'uuid'; diff --git a/src/main/ipc/handlers/index.ts b/src/main/ipc/handlers/index.ts index f7e1b18e..d26c5310 100644 --- a/src/main/ipc/handlers/index.ts +++ b/src/main/ipc/handlers/index.ts @@ -51,7 +51,7 @@ import { registerNotificationsHandlers } from './notifications'; import { registerSymphonyHandlers, SymphonyHandlerDependencies } from './symphony'; import { registerAgentErrorHandlers } from './agent-error'; import { registerTabNamingHandlers, TabNamingHandlerDependencies } from './tabNaming'; -import { AgentDetector } from '../../agent-detector'; +import { AgentDetector } from '../../agents'; import { ProcessManager } from '../../process-manager'; import { WebServer } from '../../web-server'; import { tunnelManager as tunnelManagerInstance } from '../../tunnel-manager'; diff --git a/src/main/ipc/handlers/process.ts b/src/main/ipc/handlers/process.ts index bb79a1d2..b4c3cbc4 100644 --- a/src/main/ipc/handlers/process.ts +++ b/src/main/ipc/handlers/process.ts @@ -2,7 +2,7 @@ import { ipcMain, BrowserWindow } from 'electron'; import Store from 'electron-store'; import * as os from 'os'; import { ProcessManager } from '../../process-manager'; -import { AgentDetector } from '../../agent-detector'; +import { AgentDetector } from '../../agents'; import { logger } from '../../utils/logger'; import { isWebContentsAvailable } from '../../utils/safe-send'; import { diff --git a/src/main/ipc/handlers/stats.ts b/src/main/ipc/handlers/stats.ts index f9b3068e..7f962d0e 100644 --- a/src/main/ipc/handlers/stats.ts +++ b/src/main/ipc/handlers/stats.ts @@ -15,8 +15,7 @@ import { ipcMain, BrowserWindow } from 'electron'; import { logger } from '../../utils/logger'; import { withIpcErrorLogging, CreateHandlerOptions } from '../../utils/ipcHandler'; -import { getStatsDB, getInitializationResult, clearInitializationResult } from '../../stats-db'; -import { isWebContentsAvailable } from '../../utils/safe-send'; +import { getStatsDB } from '../../stats'; import { QueryEvent, AutoRunSession, @@ -59,7 +58,7 @@ function isStatsCollectionEnabled(settingsStore?: { get: (key: string) => unknow */ function broadcastStatsUpdate(getMainWindow: () => BrowserWindow | null): void { const mainWindow = getMainWindow(); - if (isWebContentsAvailable(mainWindow)) { + if (mainWindow && !mainWindow.isDestroyed()) { mainWindow.webContents.send('stats:updated'); } } @@ -244,15 +243,6 @@ export function registerStatsHandlers(deps: StatsHandlerDependencies): void { }) ); - // Get earliest stat timestamp (for UI display) - ipcMain.handle( - 'stats:get-earliest-timestamp', - withIpcErrorLogging(handlerOpts('getEarliestTimestamp'), async () => { - const db = getStatsDB(); - return db.getEarliestStatTimestamp(); - }) - ); - // Record session creation (launched) ipcMain.handle( 'stats:record-session-created', @@ -302,22 +292,4 @@ export function registerStatsHandlers(deps: StatsHandlerDependencies): void { return db.getSessionLifecycleEvents(range); }) ); - - // Get initialization result (for showing database reset notification) - // This returns info about whether the database was reset due to corruption - ipcMain.handle( - 'stats:get-initialization-result', - withIpcErrorLogging(handlerOpts('getInitializationResult'), async () => { - return getInitializationResult(); - }) - ); - - // Clear initialization result (after user has acknowledged the notification) - ipcMain.handle( - 'stats:clear-initialization-result', - withIpcErrorLogging(handlerOpts('clearInitializationResult'), async () => { - clearInitializationResult(); - return true; - }) - ); } diff --git a/src/main/ipc/handlers/tabNaming.ts b/src/main/ipc/handlers/tabNaming.ts index 8501046f..5d2dcc99 100644 --- a/src/main/ipc/handlers/tabNaming.ts +++ b/src/main/ipc/handlers/tabNaming.ts @@ -19,7 +19,7 @@ import { getSshRemoteConfig, createSshRemoteStoreAdapter } from '../../utils/ssh import { buildSshCommand } from '../../utils/ssh-command-builder'; import { tabNamingPrompt } from '../../../prompts'; import type { ProcessManager } from '../../process-manager'; -import type { AgentDetector } from '../../agent-detector'; +import type { AgentDetector } from '../../agents'; import type { MaestroSettings } from './persistence'; const LOG_CONTEXT = '[TabNaming]'; diff --git a/src/main/parsers/usage-aggregator.ts b/src/main/parsers/usage-aggregator.ts index 067dba06..fdc1ad4e 100644 --- a/src/main/parsers/usage-aggregator.ts +++ b/src/main/parsers/usage-aggregator.ts @@ -4,20 +4,9 @@ * Utility functions for aggregating token usage statistics from AI agents. * This module is separate from process-manager to avoid circular dependencies * and allow parsers to use it without importing node-pty dependencies. - * - * SYNC: Context calculation utilities are re-exported from shared/contextUsage.ts. - * See that file for the canonical formula and all locations that must stay in sync. - * This module provides the re-exports for the main process. */ -// Re-export context utilities from shared module -// SYNC: See shared/contextUsage.ts for the canonical calculation -export { - DEFAULT_CONTEXT_WINDOWS, - COMBINED_CONTEXT_AGENTS, - calculateContextTokens, - estimateContextUsage, -} from '../../shared/contextUsage'; +import type { ToolType } from '../../shared/types'; /** * Model statistics from Claude Code modelUsage response @@ -48,6 +37,122 @@ export interface UsageStats { reasoningTokens?: number; } +/** + * Default context window sizes for different agents. + * Used as fallback when the agent doesn't report its context window size. + */ +export const DEFAULT_CONTEXT_WINDOWS: Record = { + 'claude-code': 200000, // Claude 3.5 Sonnet/Claude 4 default context + codex: 200000, // OpenAI o3/o4-mini context window + opencode: 128000, // OpenCode (depends on model, 128k is conservative default) + 'factory-droid': 200000, // Factory Droid (varies by model, defaults to Claude Opus) + terminal: 0, // Terminal has no context window +}; + +/** + * Agents that use combined input+output context windows. + * OpenAI models (Codex, o3, o4-mini) have a single context window that includes + * both input and output tokens, unlike Claude which has separate limits. + */ +const COMBINED_CONTEXT_AGENTS: Set = new Set(['codex']); + +/** + * Calculate total context tokens based on agent-specific semantics. + * + * For a single Anthropic API call, the total input context is the sum of: + * inputTokens + cacheReadInputTokens + cacheCreationInputTokens + * These three fields partition the input into uncached, cache-hit, and newly-cached segments. + * + * CAVEAT: When Claude Code performs multi-tool turns (many internal API calls), + * the reported values may be accumulated across all internal calls within the turn. + * In that case the total can exceed the context window. Callers should check for + * this and skip the update (see estimateContextUsage). + * + * Claude models: Context = input + cacheRead + cacheCreation + * OpenAI models: Context = input + output (combined limit) + * + * @param stats - The usage statistics containing token counts + * @param agentId - The agent identifier for agent-specific calculation + * @returns Total context tokens used + */ +export function calculateContextTokens( + stats: Pick< + UsageStats, + 'inputTokens' | 'outputTokens' | 'cacheReadInputTokens' | 'cacheCreationInputTokens' + >, + agentId?: ToolType +): number { + // OpenAI models have combined input+output context limits + if (agentId && COMBINED_CONTEXT_AGENTS.has(agentId)) { + return stats.inputTokens + (stats.cacheCreationInputTokens || 0) + stats.outputTokens; + } + + // Claude models: total input = uncached + cache-hit + newly-cached + // Output tokens don't consume the input context window + return ( + stats.inputTokens + (stats.cacheReadInputTokens || 0) + (stats.cacheCreationInputTokens || 0) + ); +} + +/** + * Estimate context usage percentage when the agent doesn't provide it directly. + * Uses agent-specific default context window sizes for accurate estimation. + * + * Context calculation varies by agent: + * - Claude models: inputTokens + cacheReadInputTokens + cacheCreationInputTokens + * - OpenAI models (Codex): inputTokens + outputTokens (combined limit) + * + * Returns null when the calculated total exceeds the context window, which indicates + * accumulated values from multi-tool turns (many internal API calls within one turn). + * A single API call's total input can never exceed the context window, so values + * above it are definitely accumulated. Callers should preserve the previous valid + * percentage when this returns null. + * + * @param stats - The usage statistics containing token counts + * @param agentId - The agent identifier for agent-specific context window size + * @returns Estimated context usage percentage (0-100), or null if cannot be estimated + */ +export function estimateContextUsage( + stats: Pick< + UsageStats, + | 'inputTokens' + | 'outputTokens' + | 'cacheReadInputTokens' + | 'cacheCreationInputTokens' + | 'contextWindow' + >, + agentId?: ToolType +): number | null { + // Calculate total context using agent-specific semantics + const totalContextTokens = calculateContextTokens(stats, agentId); + + // Determine effective context window + const effectiveContextWindow = + stats.contextWindow && stats.contextWindow > 0 + ? stats.contextWindow + : agentId && agentId !== 'terminal' + ? DEFAULT_CONTEXT_WINDOWS[agentId] || 0 + : 0; + + if (!effectiveContextWindow || effectiveContextWindow <= 0) { + return null; + } + + // If total exceeds context window, the values are accumulated across multiple + // internal API calls within a complex turn (tool use chains). A single API call's + // total input cannot exceed the context window. Return null to signal callers + // should keep the previous valid percentage. + if (totalContextTokens > effectiveContextWindow) { + return null; + } + + if (totalContextTokens <= 0) { + return 0; + } + + return Math.round((totalContextTokens / effectiveContextWindow) * 100); +} + /** * Aggregate token counts from modelUsage for accurate context tracking. * modelUsage contains per-model breakdown with actual context tokens (including cache hits). @@ -89,7 +194,6 @@ export function aggregateModelUsage( modelStats.cacheCreationInputTokens || 0 ); // Use the highest context window from any model - // This ensures we track the maximum context limit across multi-model turns if (modelStats.contextWindow && modelStats.contextWindow > contextWindow) { contextWindow = modelStats.contextWindow; } diff --git a/src/main/process-listeners/__tests__/stats-listener.test.ts b/src/main/process-listeners/__tests__/stats-listener.test.ts index ff2a0a30..8d064860 100644 --- a/src/main/process-listeners/__tests__/stats-listener.test.ts +++ b/src/main/process-listeners/__tests__/stats-listener.test.ts @@ -8,7 +8,7 @@ import { setupStatsListener } from '../stats-listener'; import type { ProcessManager } from '../../process-manager'; import type { SafeSendFn } from '../../utils/safe-send'; import type { QueryCompleteData } from '../../process-manager/types'; -import type { StatsDB } from '../../stats-db'; +import type { StatsDB } from '../../stats'; import type { ProcessListenerDependencies } from '../types'; describe('Stats Listener', () => { diff --git a/src/main/process-listeners/types.ts b/src/main/process-listeners/types.ts index b80f267d..7d0c71cf 100644 --- a/src/main/process-listeners/types.ts +++ b/src/main/process-listeners/types.ts @@ -5,9 +5,9 @@ import type { ProcessManager } from '../process-manager'; import type { WebServer } from '../web-server'; -import type { AgentDetector } from '../agent-detector'; +import type { AgentDetector } from '../agents'; import type { SafeSendFn } from '../utils/safe-send'; -import type { StatsDB } from '../stats-db'; +import type { StatsDB } from '../stats'; import type { GroupChat, GroupChatParticipant } from '../group-chat/group-chat-storage'; import type { GroupChatState } from '../../shared/group-chat-types'; import type { ParticipantState } from '../ipc/handlers/groupChat'; diff --git a/src/main/process-listeners/usage-listener.ts b/src/main/process-listeners/usage-listener.ts index 17dfbef4..26c2a042 100644 --- a/src/main/process-listeners/usage-listener.ts +++ b/src/main/process-listeners/usage-listener.ts @@ -1,10 +1,6 @@ /** * Usage statistics listener. * Handles usage stats from AI responses, including group chat participant/moderator updates. - * - * SYNC: Context calculations use usageAggregator.calculateContextTokens() which wraps - * the shared calculateContextTokens() function from shared/contextUsage.ts. - * See that file for the canonical formula and all locations that must stay in sync. */ import type { ProcessManager } from '../process-manager'; @@ -56,18 +52,29 @@ export function setupUsageListener( // Calculate context usage percentage using agent-specific logic // Note: For group chat, we don't have agent type here, defaults to Claude behavior const totalContextTokens = usageAggregator.calculateContextTokens(usageStats); - const contextUsage = - usageStats.contextWindow > 0 - ? Math.round((totalContextTokens / usageStats.contextWindow) * 100) - : 0; + const effectiveWindow = usageStats.contextWindow > 0 ? usageStats.contextWindow : 200000; + + // Skip update if values are accumulated (total > window) from multi-tool turns + const contextUsage = + totalContextTokens <= effectiveWindow + ? Math.round((totalContextTokens / effectiveWindow) * 100) + : -1; // -1 signals "skip update" + + // Update participant with usage stats (skip context update if accumulated) + const updateData: { + contextUsage?: number; + tokenCount?: number; + totalCost: number; + } = { + totalCost: usageStats.totalCostUsd, + }; + if (contextUsage >= 0) { + updateData.contextUsage = contextUsage; + updateData.tokenCount = totalContextTokens; + } - // Update participant with usage stats groupChatStorage - .updateParticipant(groupChatId, participantName, { - contextUsage, - tokenCount: totalContextTokens, - totalCost: usageStats.totalCostUsd, - }) + .updateParticipant(groupChatId, participantName, updateData) .then((updatedChat) => { // Emit participants changed so UI updates // Note: updateParticipant returns the updated chat, avoiding extra DB read @@ -91,17 +98,25 @@ export function setupUsageListener( // Calculate context usage percentage using agent-specific logic // Note: Moderator is typically Claude, defaults to Claude behavior const totalContextTokens = usageAggregator.calculateContextTokens(usageStats); - const contextUsage = - usageStats.contextWindow > 0 - ? Math.round((totalContextTokens / usageStats.contextWindow) * 100) - : 0; + const effectiveWindow = usageStats.contextWindow > 0 ? usageStats.contextWindow : 200000; - // Emit moderator usage for the moderator card - groupChatEmitters.emitModeratorUsage?.(groupChatId, { - contextUsage, - totalCost: usageStats.totalCostUsd, - tokenCount: totalContextTokens, - }); + // Skip context update if values are accumulated (total > window) from multi-tool turns. + // When accumulated, emit with contextUsage/tokenCount as -1 so the handler + // knows to preserve the previous values. Cost is always updated. + if (totalContextTokens <= effectiveWindow) { + const contextUsage = Math.round((totalContextTokens / effectiveWindow) * 100); + groupChatEmitters.emitModeratorUsage?.(groupChatId, { + contextUsage, + totalCost: usageStats.totalCostUsd, + tokenCount: totalContextTokens, + }); + } else { + groupChatEmitters.emitModeratorUsage?.(groupChatId, { + contextUsage: -1, + totalCost: usageStats.totalCostUsd, + tokenCount: -1, + }); + } } safeSend('process:usage', sessionId, usageStats); diff --git a/src/main/process-manager/spawners/ChildProcessSpawner.ts b/src/main/process-manager/spawners/ChildProcessSpawner.ts index c557b1e5..5bfad408 100644 --- a/src/main/process-manager/spawners/ChildProcessSpawner.ts +++ b/src/main/process-manager/spawners/ChildProcessSpawner.ts @@ -5,7 +5,7 @@ import { EventEmitter } from 'events'; import * as path from 'path'; import { logger } from '../../utils/logger'; import { getOutputParser } from '../../parsers'; -import { getAgentCapabilities } from '../../agent-capabilities'; +import { getAgentCapabilities } from '../../agents'; import type { ProcessConfig, ManagedProcess, SpawnResult } from '../types'; import type { DataBufferManager } from '../handlers/DataBufferManager'; import { StdoutHandler } from '../handlers/StdoutHandler'; diff --git a/src/main/stats-db.ts b/src/main/stats-db.ts deleted file mode 100644 index d0b4bd2a..00000000 --- a/src/main/stats-db.ts +++ /dev/null @@ -1,2079 +0,0 @@ -/** - * Stats Database Service - * - * SQLite-based storage for tracking all AI interactions across Maestro. - * Uses better-sqlite3 for synchronous, fast database operations. - * - * Database location: ~/Library/Application Support/Maestro/stats.db - * (platform-appropriate path resolved via app.getPath('userData')) - * - * ## Migration System - * - * This module uses a versioned migration system to manage schema changes: - * - * 1. **Version Tracking**: Uses SQLite's `user_version` pragma for fast version checks - * 2. **Migrations Table**: Stores detailed migration history with timestamps and status - * 3. **Sequential Execution**: Migrations run in order, skipping already-applied ones - * - * ### Adding New Migrations - * - * To add a new migration: - * 1. Create a new migration function following the pattern: `migrateVN()` - * 2. Add it to the `MIGRATIONS` array with version number and description - * 3. Update `STATS_DB_VERSION` in `../shared/stats-types.ts` - * - * Example: - * ```typescript - * // In MIGRATIONS array: - * { version: 2, description: 'Add token_count column', up: () => this.migrateV2() } - * - * // Migration function: - * private migrateV2(): void { - * this.db.prepare('ALTER TABLE query_events ADD COLUMN token_count INTEGER').run(); - * } - * ``` - */ - -import Database from 'better-sqlite3'; -import * as path from 'path'; -import * as fs from 'fs'; -import { app } from 'electron'; -import { logger } from './utils/logger'; -import { captureException, captureMessage } from './utils/sentry'; -import { - QueryEvent, - AutoRunSession, - AutoRunTask, - SessionLifecycleEvent, - StatsTimeRange, - StatsFilters, - StatsAggregation, -} from '../shared/stats-types'; -import { PerformanceMetrics, PERFORMANCE_THRESHOLDS } from '../shared/performance-metrics'; - -const LOG_CONTEXT = '[StatsDB]'; - -/** - * Performance metrics logger for StatsDB operations. - * - * Disabled by default - enable via setPerformanceLoggingEnabled(true). - * Logs at debug level through the main process logger. - */ -const perfMetrics = new PerformanceMetrics( - 'StatsDB', - (message, context) => logger.debug(message, context ?? LOG_CONTEXT), - false // Disabled by default - enable for debugging -); - -/** - * Result of a database integrity check - */ -export interface IntegrityCheckResult { - /** Whether the database passed the integrity check */ - ok: boolean; - /** Error messages from the integrity check (empty if ok is true) */ - errors: string[]; -} - -/** - * Result of a database backup operation - */ -export interface BackupResult { - /** Whether the backup succeeded */ - success: boolean; - /** Path to the backup file (if success is true) */ - backupPath?: string; - /** Error message (if success is false) */ - error?: string; -} - -/** - * Result of corruption recovery - */ -export interface CorruptionRecoveryResult { - /** Whether recovery was performed */ - recovered: boolean; - /** Path to the backup of the corrupted database */ - backupPath?: string; - /** Error during recovery (if any) */ - error?: string; -} - -/** - * Result of database initialization - */ -export interface InitializationResult { - /** Whether initialization succeeded */ - success: boolean; - /** Whether the database was reset due to corruption */ - wasReset: boolean; - /** Path to the backup of the corrupted database (if reset) */ - backupPath?: string; - /** Error message if initialization failed */ - error?: string; - /** User-friendly message about what happened */ - userMessage?: string; -} - -// ============================================================================ -// Migration System Types -// ============================================================================ - -/** - * Represents a single database migration - */ -export interface Migration { - /** Version number (must be sequential starting from 1) */ - version: number; - /** Human-readable description of the migration */ - description: string; - /** Function to apply the migration */ - up: () => void; -} - -/** - * Record of an applied migration stored in the migrations table - */ -export interface MigrationRecord { - version: number; - description: string; - appliedAt: number; - status: 'success' | 'failed'; - errorMessage?: string; -} - -/** - * SQL for creating the migrations tracking table - */ -const CREATE_MIGRATIONS_TABLE_SQL = ` - CREATE TABLE IF NOT EXISTS _migrations ( - version INTEGER PRIMARY KEY, - description TEXT NOT NULL, - applied_at INTEGER NOT NULL, - status TEXT NOT NULL CHECK(status IN ('success', 'failed')), - error_message TEXT - ) -`; - -/** - * Generate a unique ID for database entries - */ -function generateId(): string { - return `${Date.now()}-${Math.random().toString(36).substring(2, 11)}`; -} - -/** - * Get timestamp for start of time range - */ -function getTimeRangeStart(range: StatsTimeRange): number { - const now = Date.now(); - const day = 24 * 60 * 60 * 1000; - - switch (range) { - case 'day': - return now - day; - case 'week': - return now - 7 * day; - case 'month': - return now - 30 * day; - case 'quarter': - return now - 90 * day; - case 'year': - return now - 365 * day; - case 'all': - return 0; - } -} - -/** - * Normalize file paths to use forward slashes consistently across platforms. - * - * This ensures that paths stored in the database use a consistent format - * regardless of the operating system, enabling cross-platform data portability - * and consistent filtering by project path. - * - * - Converts Windows-style backslashes to forward slashes - * - Preserves UNC paths (\\server\share → //server/share) - * - Handles null/undefined by returning null - * - * @param filePath - The file path to normalize (may be Windows or Unix style) - * @returns The normalized path with forward slashes, or null if input is null/undefined - */ -export function normalizePath(filePath: string | null | undefined): string | null { - if (filePath == null) { - return null; - } - // Replace all backslashes with forward slashes - return filePath.replace(/\\/g, '/'); -} - -/** - * SQL for creating query_events table - */ -const CREATE_QUERY_EVENTS_SQL = ` - CREATE TABLE IF NOT EXISTS query_events ( - id TEXT PRIMARY KEY, - session_id TEXT NOT NULL, - agent_type TEXT NOT NULL, - source TEXT NOT NULL CHECK(source IN ('user', 'auto')), - start_time INTEGER NOT NULL, - duration INTEGER NOT NULL, - project_path TEXT, - tab_id TEXT - ) -`; - -const CREATE_QUERY_EVENTS_INDEXES_SQL = ` - CREATE INDEX IF NOT EXISTS idx_query_start_time ON query_events(start_time); - CREATE INDEX IF NOT EXISTS idx_query_agent_type ON query_events(agent_type); - CREATE INDEX IF NOT EXISTS idx_query_source ON query_events(source); - CREATE INDEX IF NOT EXISTS idx_query_session ON query_events(session_id); - CREATE INDEX IF NOT EXISTS idx_query_project_path ON query_events(project_path); - CREATE INDEX IF NOT EXISTS idx_query_agent_time ON query_events(agent_type, start_time) -`; - -/** - * SQL for creating auto_run_sessions table - */ -const CREATE_AUTO_RUN_SESSIONS_SQL = ` - CREATE TABLE IF NOT EXISTS auto_run_sessions ( - id TEXT PRIMARY KEY, - session_id TEXT NOT NULL, - agent_type TEXT NOT NULL, - document_path TEXT, - start_time INTEGER NOT NULL, - duration INTEGER NOT NULL, - tasks_total INTEGER, - tasks_completed INTEGER, - project_path TEXT - ) -`; - -const CREATE_AUTO_RUN_SESSIONS_INDEXES_SQL = ` - CREATE INDEX IF NOT EXISTS idx_auto_session_start ON auto_run_sessions(start_time) -`; - -/** - * SQL for creating auto_run_tasks table - */ -const CREATE_AUTO_RUN_TASKS_SQL = ` - CREATE TABLE IF NOT EXISTS auto_run_tasks ( - id TEXT PRIMARY KEY, - auto_run_session_id TEXT NOT NULL REFERENCES auto_run_sessions(id), - session_id TEXT NOT NULL, - agent_type TEXT NOT NULL, - task_index INTEGER NOT NULL, - task_content TEXT, - start_time INTEGER NOT NULL, - duration INTEGER NOT NULL, - success INTEGER NOT NULL CHECK(success IN (0, 1)) - ) -`; - -const CREATE_AUTO_RUN_TASKS_INDEXES_SQL = ` - CREATE INDEX IF NOT EXISTS idx_task_auto_session ON auto_run_tasks(auto_run_session_id); - CREATE INDEX IF NOT EXISTS idx_task_start ON auto_run_tasks(start_time) -`; - -/** - * SQL for creating session_lifecycle table - */ -const CREATE_SESSION_LIFECYCLE_SQL = ` - CREATE TABLE IF NOT EXISTS session_lifecycle ( - id TEXT PRIMARY KEY, - session_id TEXT NOT NULL UNIQUE, - agent_type TEXT NOT NULL, - project_path TEXT, - created_at INTEGER NOT NULL, - closed_at INTEGER, - duration INTEGER, - is_remote INTEGER - ) -`; - -const CREATE_SESSION_LIFECYCLE_INDEXES_SQL = ` - CREATE INDEX IF NOT EXISTS idx_session_created_at ON session_lifecycle(created_at); - CREATE INDEX IF NOT EXISTS idx_session_agent_type ON session_lifecycle(agent_type) -`; - -/** - * StatsDB manages the SQLite database for usage statistics. - * Implements singleton pattern for database connection management. - */ -export class StatsDB { - private db: Database.Database | null = null; - private dbPath: string; - private initialized = false; - - /** - * Registry of all database migrations. - * Migrations must be sequential starting from version 1. - * Each migration is run exactly once and recorded in the _migrations table. - */ - private getMigrations(): Migration[] { - return [ - { - version: 1, - description: 'Initial schema: query_events, auto_run_sessions, auto_run_tasks tables', - up: () => this.migrateV1(), - }, - { - version: 2, - description: 'Add is_remote column to query_events for tracking SSH sessions', - up: () => this.migrateV2(), - }, - { - version: 3, - description: 'Add session_lifecycle table for tracking session creation and closure', - up: () => this.migrateV3(), - }, - ]; - } - - constructor() { - this.dbPath = path.join(app.getPath('userData'), 'stats.db'); - } - - /** - * Initialize the database - create file, tables, and indexes. - * Also runs VACUUM if the database exceeds 100MB to maintain performance. - * - * If the database is corrupted, this method will: - * 1. Backup the corrupted database file - * 2. Delete the corrupted file and any associated WAL/SHM files - * 3. Create a fresh database - * - * The backup is preserved for potential manual recovery with specialized tools. - * - * @returns Result object indicating success/failure and whether database was reset - */ - initialize(): InitializationResult { - if (this.initialized) { - return { success: true, wasReset: false }; - } - - let wasReset = false; - let backupPath: string | undefined; - - try { - // Ensure the directory exists - const dir = path.dirname(this.dbPath); - if (!fs.existsSync(dir)) { - fs.mkdirSync(dir, { recursive: true }); - } - - // Check if database file exists - const dbExists = fs.existsSync(this.dbPath); - - if (dbExists) { - // Open with corruption handling for existing databases - const openResult = this.openWithCorruptionHandling(); - if (!openResult.db) { - // If we still can't open the database, try one more time to reset it - logger.error('Failed to open database even after recovery attempt', LOG_CONTEXT); - const lastChanceRecovery = this.recoverFromCorruption(); - if (lastChanceRecovery.recovered) { - wasReset = true; - backupPath = lastChanceRecovery.backupPath; - try { - this.db = new Database(this.dbPath); - } catch (finalError) { - const errorMessage = - finalError instanceof Error ? finalError.message : String(finalError); - logger.error( - `Failed to create fresh database after recovery: ${errorMessage}`, - LOG_CONTEXT - ); - return { - success: false, - wasReset: true, - backupPath, - error: errorMessage, - userMessage: - 'Failed to create fresh database after corruption recovery. The statistics database is unavailable.', - }; - } - } else { - return { - success: false, - wasReset: false, - error: 'Failed to open or recover database', - userMessage: - 'The statistics database could not be opened or recovered. Usage statistics are unavailable.', - }; - } - } else { - this.db = openResult.db; - wasReset = openResult.wasReset; - backupPath = openResult.backupPath; - } - } else { - // Create new database - try { - this.db = new Database(this.dbPath); - } catch (createError) { - // This can happen if the native module fails to load - const errorMessage = createError instanceof Error ? createError.message : String(createError); - logger.error(`Failed to create database: ${errorMessage}`, LOG_CONTEXT); - - // Report to Sentry - void captureException(createError, { - context: 'initialize:createNewDatabase', - dbPath: this.dbPath, - }); - - return { - success: false, - wasReset: false, - error: errorMessage, - userMessage: this.getNativeModuleErrorMessage(errorMessage), - }; - } - } - - // Enable WAL mode for better concurrent access - this.db.pragma('journal_mode = WAL'); - - // Run migrations - this.runMigrations(); - - this.initialized = true; - logger.info(`Stats database initialized at ${this.dbPath}`, LOG_CONTEXT); - - // Schedule VACUUM to run weekly instead of on every startup - // This avoids blocking the main process during initialization - this.vacuumIfNeededWeekly(); - - // Return success with reset info - if (wasReset) { - return { - success: true, - wasReset: true, - backupPath, - userMessage: - 'The statistics database was corrupted and has been reset. Your usage history has been cleared, but a backup of the old data was saved. This only affects usage statistics - no session data was lost.', - }; - } - - return { success: true, wasReset: false }; - } catch (error) { - const errorMessage = error instanceof Error ? error.message : String(error); - logger.error(`Failed to initialize stats database: ${errorMessage}`, LOG_CONTEXT); - - // Report to Sentry - void captureException(error, { - context: 'initialize:outerCatch', - dbPath: this.dbPath, - wasReset, - }); - - return { - success: false, - wasReset, - backupPath, - error: errorMessage, - userMessage: this.getNativeModuleErrorMessage(errorMessage), - }; - } - } - - /** - * Get a user-friendly error message for native module loading failures. - */ - private getNativeModuleErrorMessage(errorMessage: string): string { - // Check for common native module loading issues - if (errorMessage.includes('dlopen') || errorMessage.includes('better_sqlite3.node')) { - return 'The statistics database module failed to load. This may happen after an app update. Try restarting Maestro, and if the issue persists, try reinstalling the application.'; - } - if (errorMessage.includes('SQLITE_CORRUPT') || errorMessage.includes('malformed')) { - return 'The statistics database was corrupted and could not be recovered. Usage statistics are unavailable.'; - } - return `Failed to initialize statistics database: ${errorMessage}`; - } - - // ============================================================================ - // Migration System - // ============================================================================ - - /** - * Run all pending database migrations. - * - * The migration system: - * 1. Creates the _migrations table if it doesn't exist - * 2. Gets the current schema version from user_version pragma - * 3. Runs each pending migration in a transaction - * 4. Records each migration in the _migrations table - * 5. Updates the user_version pragma - * - * If a migration fails, it is recorded as 'failed' with an error message, - * and the error is re-thrown to prevent the app from starting with an - * inconsistent database state. - */ - private runMigrations(): void { - if (!this.db) throw new Error('Database not initialized'); - - // Create migrations table (this is the only table created outside the migration system) - this.db.prepare(CREATE_MIGRATIONS_TABLE_SQL).run(); - - // Get current version (0 if fresh database) - const versionResult = this.db.pragma('user_version') as Array<{ user_version: number }>; - const currentVersion = versionResult[0]?.user_version ?? 0; - - const migrations = this.getMigrations(); - const pendingMigrations = migrations.filter((m) => m.version > currentVersion); - - if (pendingMigrations.length === 0) { - logger.debug(`Database is up to date (version ${currentVersion})`, LOG_CONTEXT); - return; - } - - // Sort by version to ensure sequential execution - pendingMigrations.sort((a, b) => a.version - b.version); - - logger.info( - `Running ${pendingMigrations.length} pending migration(s) (current version: ${currentVersion})`, - LOG_CONTEXT - ); - - for (const migration of pendingMigrations) { - this.applyMigration(migration); - } - } - - /** - * Apply a single migration within a transaction. - * Records the migration in the _migrations table with success/failure status. - */ - private applyMigration(migration: Migration): void { - if (!this.db) throw new Error('Database not initialized'); - - const startTime = Date.now(); - logger.info(`Applying migration v${migration.version}: ${migration.description}`, LOG_CONTEXT); - - try { - // Run migration in a transaction for atomicity - const runMigration = this.db.transaction(() => { - // Execute the migration - migration.up(); - - // Record success in _migrations table - this.db!.prepare( - ` - INSERT OR REPLACE INTO _migrations (version, description, applied_at, status, error_message) - VALUES (?, ?, ?, 'success', NULL) - ` - ).run(migration.version, migration.description, Date.now()); - - // Update user_version pragma - this.db!.pragma(`user_version = ${migration.version}`); - }); - - runMigration(); - - const duration = Date.now() - startTime; - logger.info(`Migration v${migration.version} completed in ${duration}ms`, LOG_CONTEXT); - } catch (error) { - // Record failure in _migrations table (outside transaction since it was rolled back) - const errorMessage = error instanceof Error ? error.message : String(error); - - this.db - .prepare( - ` - INSERT OR REPLACE INTO _migrations (version, description, applied_at, status, error_message) - VALUES (?, ?, ?, 'failed', ?) - ` - ) - .run(migration.version, migration.description, Date.now(), errorMessage); - - logger.error(`Migration v${migration.version} failed: ${errorMessage}`, LOG_CONTEXT); - - // Re-throw to prevent app from starting with inconsistent state - throw error; - } - } - - /** - * Get the list of applied migrations from the _migrations table. - * Useful for debugging and diagnostics. - */ - getMigrationHistory(): MigrationRecord[] { - if (!this.db) throw new Error('Database not initialized'); - - // Check if _migrations table exists - const tableExists = this.db - .prepare( - ` - SELECT name FROM sqlite_master WHERE type='table' AND name='_migrations' - ` - ) - .get(); - - if (!tableExists) { - return []; - } - - const rows = this.db - .prepare( - ` - SELECT version, description, applied_at, status, error_message - FROM _migrations - ORDER BY version ASC - ` - ) - .all() as Array<{ - version: number; - description: string; - applied_at: number; - status: 'success' | 'failed'; - error_message: string | null; - }>; - - return rows.map((row) => ({ - version: row.version, - description: row.description, - appliedAt: row.applied_at, - status: row.status, - errorMessage: row.error_message ?? undefined, - })); - } - - /** - * Get the current database schema version. - */ - getCurrentVersion(): number { - if (!this.db) throw new Error('Database not initialized'); - - const versionResult = this.db.pragma('user_version') as Array<{ user_version: number }>; - return versionResult[0]?.user_version ?? 0; - } - - /** - * Get the target version (highest version in migrations registry). - */ - getTargetVersion(): number { - const migrations = this.getMigrations(); - if (migrations.length === 0) return 0; - return Math.max(...migrations.map((m) => m.version)); - } - - /** - * Check if any migrations are pending. - */ - hasPendingMigrations(): boolean { - return this.getCurrentVersion() < this.getTargetVersion(); - } - - // ============================================================================ - // Individual Migration Functions - // ============================================================================ - - /** - * Migration v1: Initial schema creation - * - * Creates the core tables for tracking AI interactions: - * - query_events: Individual AI query/response cycles - * - auto_run_sessions: Batch processing runs - * - auto_run_tasks: Individual tasks within batch runs - */ - private migrateV1(): void { - if (!this.db) throw new Error('Database not initialized'); - - // Create query_events table and indexes - this.db.prepare(CREATE_QUERY_EVENTS_SQL).run(); - for (const indexSql of CREATE_QUERY_EVENTS_INDEXES_SQL.split(';').filter((s) => s.trim())) { - this.db.prepare(indexSql).run(); - } - - // Create auto_run_sessions table and indexes - this.db.prepare(CREATE_AUTO_RUN_SESSIONS_SQL).run(); - for (const indexSql of CREATE_AUTO_RUN_SESSIONS_INDEXES_SQL.split(';').filter((s) => - s.trim() - )) { - this.db.prepare(indexSql).run(); - } - - // Create auto_run_tasks table and indexes - this.db.prepare(CREATE_AUTO_RUN_TASKS_SQL).run(); - for (const indexSql of CREATE_AUTO_RUN_TASKS_INDEXES_SQL.split(';').filter((s) => s.trim())) { - this.db.prepare(indexSql).run(); - } - - logger.debug('Created stats database tables and indexes', LOG_CONTEXT); - } - - /** - * Migration v2: Add is_remote column for SSH session tracking - * - * Adds a new column to track whether queries were executed on remote SSH sessions - * vs local sessions. This enables usage analytics broken down by session location. - */ - private migrateV2(): void { - if (!this.db) throw new Error('Database not initialized'); - - // Add is_remote column (0 = local, 1 = remote, NULL = unknown/legacy data) - this.db.prepare('ALTER TABLE query_events ADD COLUMN is_remote INTEGER').run(); - - // Add index for efficient filtering by location - this.db - .prepare('CREATE INDEX IF NOT EXISTS idx_query_is_remote ON query_events(is_remote)') - .run(); - - logger.debug('Added is_remote column to query_events table', LOG_CONTEXT); - } - - /** - * Migration v3: Add session_lifecycle table for tracking session creation and closure - * - * This enables tracking of unique sessions launched over time, session duration, - * and session lifecycle metrics in the Usage Dashboard. - */ - private migrateV3(): void { - if (!this.db) throw new Error('Database not initialized'); - - // Create session_lifecycle table - this.db.prepare(CREATE_SESSION_LIFECYCLE_SQL).run(); - - // Create indexes - for (const indexSql of CREATE_SESSION_LIFECYCLE_INDEXES_SQL.split(';').filter((s) => - s.trim() - )) { - this.db.prepare(indexSql).run(); - } - - logger.debug('Created session_lifecycle table', LOG_CONTEXT); - } - - // ============================================================================ - // Database Lifecycle - // ============================================================================ - - /** - * Close the database connection - */ - close(): void { - if (this.db) { - this.db.close(); - this.db = null; - this.initialized = false; - logger.info('Stats database closed', LOG_CONTEXT); - } - } - - /** - * Check if database is initialized and ready - */ - isReady(): boolean { - return this.initialized && this.db !== null; - } - - /** - * Get the database file path - */ - getDbPath(): string { - return this.dbPath; - } - - /** - * Get the database file size in bytes. - * Returns 0 if the file doesn't exist or can't be read. - */ - getDatabaseSize(): number { - try { - const stats = fs.statSync(this.dbPath); - return stats.size; - } catch { - return 0; - } - } - - /** - * Get the timestamp of the earliest stat entry in the database. - * Checks query_events, auto_run_sessions, and session_lifecycle tables. - * Returns null if no entries exist. - */ - getEarliestStatTimestamp(): number | null { - if (!this.db) throw new Error('Database not initialized'); - - // Query minimum start_time from each table and find the overall minimum - const queryEventsMin = this.db - .prepare('SELECT MIN(start_time) as min_time FROM query_events') - .get() as { min_time: number | null } | undefined; - - const autoRunMin = this.db - .prepare('SELECT MIN(start_time) as min_time FROM auto_run_sessions') - .get() as { min_time: number | null } | undefined; - - const lifecycleMin = this.db - .prepare('SELECT MIN(created_at) as min_time FROM session_lifecycle') - .get() as { min_time: number | null } | undefined; - - const timestamps = [ - queryEventsMin?.min_time, - autoRunMin?.min_time, - lifecycleMin?.min_time, - ].filter((t): t is number => t !== null && t !== undefined); - - if (timestamps.length === 0) { - return null; - } - - return Math.min(...timestamps); - } - - /** - * Run VACUUM on the database to reclaim unused space and optimize structure. - * - * VACUUM rebuilds the database file, repacking it into a minimal amount of disk space. - * This is useful after many deletes or updates that leave fragmented space. - * - * Note: VACUUM requires exclusive access and may take a few seconds for large databases. - * It also temporarily requires up to 2x the database size in disk space. - * - * @returns Object with success status, bytes freed, and any error message - */ - vacuum(): { success: boolean; bytesFreed: number; error?: string } { - if (!this.db) { - return { success: false, bytesFreed: 0, error: 'Database not initialized' }; - } - - try { - const sizeBefore = this.getDatabaseSize(); - logger.info( - `Starting VACUUM (current size: ${(sizeBefore / 1024 / 1024).toFixed(2)} MB)`, - LOG_CONTEXT - ); - - // Use prepare().run() for VACUUM - consistent with better-sqlite3 patterns - this.db.prepare('VACUUM').run(); - - const sizeAfter = this.getDatabaseSize(); - const bytesFreed = sizeBefore - sizeAfter; - - logger.info( - `VACUUM completed: ${(sizeBefore / 1024 / 1024).toFixed(2)} MB → ${(sizeAfter / 1024 / 1024).toFixed(2)} MB (freed ${(bytesFreed / 1024 / 1024).toFixed(2)} MB)`, - LOG_CONTEXT - ); - - return { success: true, bytesFreed }; - } catch (error) { - const errorMessage = error instanceof Error ? error.message : String(error); - logger.error(`VACUUM failed: ${errorMessage}`, LOG_CONTEXT); - return { success: false, bytesFreed: 0, error: errorMessage }; - } - } - - /** - * Conditionally vacuum the database if it exceeds a size threshold. - * - * This method is designed to be called on app startup to maintain database health. - * It only runs VACUUM if the database exceeds the specified threshold (default: 100MB), - * avoiding unnecessary work for smaller databases. - * - * @param thresholdBytes - Size threshold in bytes (default: 100MB = 104857600 bytes) - * @returns Object with vacuumed flag, database size, and vacuum result if performed - */ - vacuumIfNeeded(thresholdBytes: number = 100 * 1024 * 1024): { - vacuumed: boolean; - databaseSize: number; - result?: { success: boolean; bytesFreed: number; error?: string }; - } { - const databaseSize = this.getDatabaseSize(); - - if (databaseSize < thresholdBytes) { - logger.debug( - `Database size (${(databaseSize / 1024 / 1024).toFixed(2)} MB) below vacuum threshold (${(thresholdBytes / 1024 / 1024).toFixed(2)} MB), skipping VACUUM`, - LOG_CONTEXT - ); - return { vacuumed: false, databaseSize }; - } - - logger.info( - `Database size (${(databaseSize / 1024 / 1024).toFixed(2)} MB) exceeds vacuum threshold (${(thresholdBytes / 1024 / 1024).toFixed(2)} MB), running VACUUM`, - LOG_CONTEXT - ); - - const result = this.vacuum(); - return { vacuumed: true, databaseSize, result }; - } - - /** - * Run VACUUM only if it hasn't been run in the last 7 days. - * - * This avoids blocking startup on every app launch. The last vacuum timestamp - * is stored in a separate file alongside the database. - * - * @param intervalMs - Minimum time between vacuums (default: 7 days) - */ - private vacuumIfNeededWeekly(intervalMs: number = 7 * 24 * 60 * 60 * 1000): void { - const vacuumTimestampPath = path.join(path.dirname(this.dbPath), 'stats-vacuum-timestamp'); - - try { - // Check when we last ran VACUUM - let lastVacuum = 0; - if (fs.existsSync(vacuumTimestampPath)) { - const content = fs.readFileSync(vacuumTimestampPath, 'utf-8').trim(); - lastVacuum = parseInt(content, 10) || 0; - } - - const now = Date.now(); - const timeSinceLastVacuum = now - lastVacuum; - - if (timeSinceLastVacuum < intervalMs) { - const daysRemaining = ((intervalMs - timeSinceLastVacuum) / (24 * 60 * 60 * 1000)).toFixed( - 1 - ); - logger.debug( - `Skipping VACUUM (last run ${((now - lastVacuum) / (24 * 60 * 60 * 1000)).toFixed(1)} days ago, next in ${daysRemaining} days)`, - LOG_CONTEXT - ); - return; - } - - // Run VACUUM if database is large enough - const result = this.vacuumIfNeeded(); - - if (result.vacuumed) { - // Update timestamp only if we actually ran VACUUM - fs.writeFileSync(vacuumTimestampPath, String(now), 'utf-8'); - logger.info('Updated VACUUM timestamp for weekly scheduling', LOG_CONTEXT); - } - } catch (error) { - // Non-fatal - log and continue - logger.warn(`Failed to check/update VACUUM schedule: ${error}`, LOG_CONTEXT); - } - } - - // ============================================================================ - // Database Integrity & Corruption Handling - // ============================================================================ - - /** - * Check the integrity of the database using SQLite's PRAGMA integrity_check. - * - * This runs a full integrity check on the database, verifying that: - * - All pages are accessible - * - All indexes are properly formed - * - All constraints are satisfied - * - * For large databases this may take a few seconds. - * - * @returns Object with ok flag and any error messages - */ - checkIntegrity(): IntegrityCheckResult { - if (!this.db) { - return { ok: false, errors: ['Database not initialized'] }; - } - - try { - // PRAGMA integrity_check returns 'ok' if the database is valid, - // otherwise it returns a list of error messages - const result = this.db.pragma('integrity_check') as Array<{ integrity_check: string }>; - - if (result.length === 1 && result[0].integrity_check === 'ok') { - return { ok: true, errors: [] }; - } - - // Collect all error messages - const errors = result.map((row) => row.integrity_check); - return { ok: false, errors }; - } catch (error) { - const errorMessage = error instanceof Error ? error.message : String(error); - return { ok: false, errors: [errorMessage] }; - } - } - - /** - * Create a backup of the current database file. - * - * The backup is created with a timestamp suffix to avoid overwriting previous backups. - * Format: stats.db.backup.{timestamp} - * - * @returns Object with success flag, backup path, and any error message - */ - backupDatabase(): BackupResult { - try { - // Check if the database file exists - if (!fs.existsSync(this.dbPath)) { - return { success: false, error: 'Database file does not exist' }; - } - - // Generate backup path with timestamp - const timestamp = Date.now(); - const backupPath = `${this.dbPath}.backup.${timestamp}`; - - // Copy the database file - fs.copyFileSync(this.dbPath, backupPath); - - logger.info(`Created database backup at ${backupPath}`, LOG_CONTEXT); - return { success: true, backupPath }; - } catch (error) { - const errorMessage = error instanceof Error ? error.message : String(error); - logger.error(`Failed to create database backup: ${errorMessage}`, LOG_CONTEXT); - return { success: false, error: errorMessage }; - } - } - - /** - * Handle a corrupted database by backing it up and recreating a fresh database. - * - * This is the nuclear option when the database is unrecoverable: - * 1. Close the current database connection - * 2. Backup the corrupted database file - * 3. Delete the corrupted database file - * 4. Create a fresh database - * - * Note: This will result in loss of historical data, but preserves a backup - * that could potentially be recovered with specialized SQLite tools. - * - * @returns Object with recovery status, backup path, and any error - */ - private recoverFromCorruption(): CorruptionRecoveryResult { - logger.warn('Attempting to recover from database corruption...', LOG_CONTEXT); - - try { - // Close current connection if open - if (this.db) { - try { - this.db.close(); - } catch { - // Ignore errors closing corrupted database - } - this.db = null; - this.initialized = false; - } - - // Backup the corrupted database - const backupResult = this.backupDatabase(); - if (!backupResult.success) { - // If backup fails but file exists, try to rename it - if (fs.existsSync(this.dbPath)) { - const timestamp = Date.now(); - const emergencyBackupPath = `${this.dbPath}.corrupted.${timestamp}`; - try { - fs.renameSync(this.dbPath, emergencyBackupPath); - logger.warn(`Emergency backup created at ${emergencyBackupPath}`, LOG_CONTEXT); - } catch { - // If we can't even rename, just delete and lose the data - logger.error('Failed to backup corrupted database, data will be lost', LOG_CONTEXT); - fs.unlinkSync(this.dbPath); - } - } - } - - // Delete WAL and SHM files if they exist (they're associated with the corrupted db) - const walPath = `${this.dbPath}-wal`; - const shmPath = `${this.dbPath}-shm`; - if (fs.existsSync(walPath)) { - fs.unlinkSync(walPath); - } - if (fs.existsSync(shmPath)) { - fs.unlinkSync(shmPath); - } - - // Delete the main database file if it still exists - if (fs.existsSync(this.dbPath)) { - fs.unlinkSync(this.dbPath); - } - - logger.info('Corrupted database removed, will create fresh database', LOG_CONTEXT); - - return { - recovered: true, - backupPath: backupResult.backupPath, - }; - } catch (error) { - const errorMessage = error instanceof Error ? error.message : String(error); - logger.error(`Failed to recover from database corruption: ${errorMessage}`, LOG_CONTEXT); - return { - recovered: false, - error: errorMessage, - }; - } - } - - /** - * Attempt to open and validate a database, handling corruption if detected. - * - * This method: - * 1. Tries to open the database file - * 2. Runs a quick integrity check - * 3. If corrupted, backs up and recreates the database - * 4. Returns whether the database is now usable - * - * @returns Object with database instance, whether it was reset, and backup path - */ - private openWithCorruptionHandling(): { - db: Database.Database | null; - wasReset: boolean; - backupPath?: string; - } { - // First attempt: try to open normally - try { - const db = new Database(this.dbPath); - - // Quick integrity check on the existing database - const result = db.pragma('integrity_check') as Array<{ integrity_check: string }>; - if (result.length === 1 && result[0].integrity_check === 'ok') { - return { db, wasReset: false }; - } - - // Database is corrupted - const errors = result.map((row) => row.integrity_check); - logger.error(`Database integrity check failed: ${errors.join(', ')}`, LOG_CONTEXT); - - // Report corruption to Sentry for monitoring - void captureMessage('Stats database corruption detected', 'error', { - integrityErrors: errors, - dbPath: this.dbPath, - }); - - // Close before recovery - db.close(); - } catch (error) { - // Failed to open database - likely severely corrupted, locked, or native module issue - const errorMessage = error instanceof Error ? error.message : String(error); - logger.error(`Failed to open database: ${errorMessage}`, LOG_CONTEXT); - - // Report failure to Sentry - void captureException(error, { - context: 'openWithCorruptionHandling', - dbPath: this.dbPath, - isNativeModuleError: - errorMessage.includes('dlopen') || errorMessage.includes('better_sqlite3.node'), - }); - - // Check if this is a native module loading issue (not recoverable by reset) - if (errorMessage.includes('dlopen') || errorMessage.includes('better_sqlite3.node')) { - logger.error('Native SQLite module failed to load - cannot recover', LOG_CONTEXT); - return { db: null, wasReset: false }; - } - } - - // Recovery attempt - const recoveryResult = this.recoverFromCorruption(); - if (!recoveryResult.recovered) { - logger.error('Database corruption recovery failed', LOG_CONTEXT); - return { db: null, wasReset: false }; - } - - // Second attempt: create fresh database - try { - const db = new Database(this.dbPath); - logger.info('Fresh database created after corruption recovery', LOG_CONTEXT); - return { db, wasReset: true, backupPath: recoveryResult.backupPath }; - } catch (error) { - logger.error(`Failed to create fresh database after recovery: ${error}`, LOG_CONTEXT); - return { db: null, wasReset: true, backupPath: recoveryResult.backupPath }; - } - } - - // ============================================================================ - // Query Events - // ============================================================================ - - /** - * Insert a new query event - */ - insertQueryEvent(event: Omit): string { - if (!this.db) throw new Error('Database not initialized'); - - const id = generateId(); - const stmt = this.db.prepare(` - INSERT INTO query_events (id, session_id, agent_type, source, start_time, duration, project_path, tab_id, is_remote) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) - `); - - stmt.run( - id, - event.sessionId, - event.agentType, - event.source, - event.startTime, - event.duration, - normalizePath(event.projectPath), - event.tabId ?? null, - event.isRemote !== undefined ? (event.isRemote ? 1 : 0) : null - ); - - logger.debug(`Inserted query event ${id}`, LOG_CONTEXT); - return id; - } - - /** - * Get query events within a time range with optional filters - */ - getQueryEvents(range: StatsTimeRange, filters?: StatsFilters): QueryEvent[] { - if (!this.db) throw new Error('Database not initialized'); - - const startTime = getTimeRangeStart(range); - let sql = 'SELECT * FROM query_events WHERE start_time >= ?'; - const params: (string | number)[] = [startTime]; - - if (filters?.agentType) { - sql += ' AND agent_type = ?'; - params.push(filters.agentType); - } - if (filters?.source) { - sql += ' AND source = ?'; - params.push(filters.source); - } - if (filters?.projectPath) { - sql += ' AND project_path = ?'; - // Normalize filter path to match stored format - params.push(normalizePath(filters.projectPath) ?? ''); - } - if (filters?.sessionId) { - sql += ' AND session_id = ?'; - params.push(filters.sessionId); - } - - sql += ' ORDER BY start_time DESC'; - - const stmt = this.db.prepare(sql); - const rows = stmt.all(...params) as Array<{ - id: string; - session_id: string; - agent_type: string; - source: 'user' | 'auto'; - start_time: number; - duration: number; - project_path: string | null; - tab_id: string | null; - is_remote: number | null; - }>; - - return rows.map((row) => ({ - id: row.id, - sessionId: row.session_id, - agentType: row.agent_type, - source: row.source, - startTime: row.start_time, - duration: row.duration, - projectPath: row.project_path ?? undefined, - tabId: row.tab_id ?? undefined, - isRemote: row.is_remote !== null ? row.is_remote === 1 : undefined, - })); - } - - // ============================================================================ - // Auto Run Sessions - // ============================================================================ - - /** - * Insert a new Auto Run session - */ - insertAutoRunSession(session: Omit): string { - if (!this.db) throw new Error('Database not initialized'); - - const id = generateId(); - const stmt = this.db.prepare(` - INSERT INTO auto_run_sessions (id, session_id, agent_type, document_path, start_time, duration, tasks_total, tasks_completed, project_path) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) - `); - - stmt.run( - id, - session.sessionId, - session.agentType, - normalizePath(session.documentPath), - session.startTime, - session.duration, - session.tasksTotal ?? null, - session.tasksCompleted ?? null, - normalizePath(session.projectPath) - ); - - logger.debug(`Inserted Auto Run session ${id}`, LOG_CONTEXT); - return id; - } - - /** - * Update an existing Auto Run session (e.g., when it completes) - */ - updateAutoRunSession(id: string, updates: Partial): boolean { - if (!this.db) throw new Error('Database not initialized'); - - const setClauses: string[] = []; - const params: (string | number | null)[] = []; - - if (updates.duration !== undefined) { - setClauses.push('duration = ?'); - params.push(updates.duration); - } - if (updates.tasksTotal !== undefined) { - setClauses.push('tasks_total = ?'); - params.push(updates.tasksTotal ?? null); - } - if (updates.tasksCompleted !== undefined) { - setClauses.push('tasks_completed = ?'); - params.push(updates.tasksCompleted ?? null); - } - if (updates.documentPath !== undefined) { - setClauses.push('document_path = ?'); - params.push(normalizePath(updates.documentPath)); - } - - if (setClauses.length === 0) { - return false; - } - - params.push(id); - const sql = `UPDATE auto_run_sessions SET ${setClauses.join(', ')} WHERE id = ?`; - const stmt = this.db.prepare(sql); - const result = stmt.run(...params); - - logger.debug(`Updated Auto Run session ${id}`, LOG_CONTEXT); - return result.changes > 0; - } - - /** - * Get Auto Run sessions within a time range - */ - getAutoRunSessions(range: StatsTimeRange): AutoRunSession[] { - if (!this.db) throw new Error('Database not initialized'); - - const startTime = getTimeRangeStart(range); - const stmt = this.db.prepare(` - SELECT * FROM auto_run_sessions - WHERE start_time >= ? - ORDER BY start_time DESC - `); - - const rows = stmt.all(startTime) as Array<{ - id: string; - session_id: string; - agent_type: string; - document_path: string | null; - start_time: number; - duration: number; - tasks_total: number | null; - tasks_completed: number | null; - project_path: string | null; - }>; - - return rows.map((row) => ({ - id: row.id, - sessionId: row.session_id, - agentType: row.agent_type, - documentPath: row.document_path ?? undefined, - startTime: row.start_time, - duration: row.duration, - tasksTotal: row.tasks_total ?? undefined, - tasksCompleted: row.tasks_completed ?? undefined, - projectPath: row.project_path ?? undefined, - })); - } - - // ============================================================================ - // Auto Run Tasks - // ============================================================================ - - /** - * Insert a new Auto Run task - */ - insertAutoRunTask(task: Omit): string { - if (!this.db) throw new Error('Database not initialized'); - - const id = generateId(); - const stmt = this.db.prepare(` - INSERT INTO auto_run_tasks (id, auto_run_session_id, session_id, agent_type, task_index, task_content, start_time, duration, success) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) - `); - - stmt.run( - id, - task.autoRunSessionId, - task.sessionId, - task.agentType, - task.taskIndex, - task.taskContent ?? null, - task.startTime, - task.duration, - task.success ? 1 : 0 - ); - - logger.debug(`Inserted Auto Run task ${id}`, LOG_CONTEXT); - return id; - } - - /** - * Get all tasks for a specific Auto Run session - */ - getAutoRunTasks(autoRunSessionId: string): AutoRunTask[] { - if (!this.db) throw new Error('Database not initialized'); - - const stmt = this.db.prepare(` - SELECT * FROM auto_run_tasks - WHERE auto_run_session_id = ? - ORDER BY task_index ASC - `); - - const rows = stmt.all(autoRunSessionId) as Array<{ - id: string; - auto_run_session_id: string; - session_id: string; - agent_type: string; - task_index: number; - task_content: string | null; - start_time: number; - duration: number; - success: number; - }>; - - return rows.map((row) => ({ - id: row.id, - autoRunSessionId: row.auto_run_session_id, - sessionId: row.session_id, - agentType: row.agent_type, - taskIndex: row.task_index, - taskContent: row.task_content ?? undefined, - startTime: row.start_time, - duration: row.duration, - success: row.success === 1, - })); - } - - // ============================================================================ - // Session Lifecycle - // ============================================================================ - - /** - * Record a session being created (launched) - */ - recordSessionCreated(event: Omit): string { - if (!this.db) throw new Error('Database not initialized'); - - const id = generateId(); - const stmt = this.db.prepare(` - INSERT INTO session_lifecycle (id, session_id, agent_type, project_path, created_at, is_remote) - VALUES (?, ?, ?, ?, ?, ?) - `); - - stmt.run( - id, - event.sessionId, - event.agentType, - normalizePath(event.projectPath), - event.createdAt, - event.isRemote !== undefined ? (event.isRemote ? 1 : 0) : null - ); - - logger.debug(`Recorded session created: ${event.sessionId}`, LOG_CONTEXT); - return id; - } - - /** - * Record a session being closed - */ - recordSessionClosed(sessionId: string, closedAt: number): boolean { - if (!this.db) throw new Error('Database not initialized'); - - // Get the session's created_at time to calculate duration - const session = this.db - .prepare( - ` - SELECT created_at FROM session_lifecycle WHERE session_id = ? - ` - ) - .get(sessionId) as { created_at: number } | undefined; - - if (!session) { - logger.debug(`Session not found for closure: ${sessionId}`, LOG_CONTEXT); - return false; - } - - const duration = closedAt - session.created_at; - - const stmt = this.db.prepare(` - UPDATE session_lifecycle - SET closed_at = ?, duration = ? - WHERE session_id = ? - `); - - const result = stmt.run(closedAt, duration, sessionId); - logger.debug(`Recorded session closed: ${sessionId}, duration: ${duration}ms`, LOG_CONTEXT); - return result.changes > 0; - } - - /** - * Get session lifecycle events within a time range - */ - getSessionLifecycleEvents(range: StatsTimeRange): SessionLifecycleEvent[] { - if (!this.db) throw new Error('Database not initialized'); - - const startTime = getTimeRangeStart(range); - const stmt = this.db.prepare(` - SELECT * FROM session_lifecycle - WHERE created_at >= ? - ORDER BY created_at DESC - `); - - const rows = stmt.all(startTime) as Array<{ - id: string; - session_id: string; - agent_type: string; - project_path: string | null; - created_at: number; - closed_at: number | null; - duration: number | null; - is_remote: number | null; - }>; - - return rows.map((row) => ({ - id: row.id, - sessionId: row.session_id, - agentType: row.agent_type, - projectPath: row.project_path ?? undefined, - createdAt: row.created_at, - closedAt: row.closed_at ?? undefined, - duration: row.duration ?? undefined, - isRemote: row.is_remote !== null ? row.is_remote === 1 : undefined, - })); - } - - // ============================================================================ - // Aggregations - // ============================================================================ - - /** - * Get aggregated statistics for a time range - */ - getAggregatedStats(range: StatsTimeRange): StatsAggregation { - if (!this.db) throw new Error('Database not initialized'); - - const perfStart = perfMetrics.start(); - const startTime = getTimeRangeStart(range); - - // Total queries and duration - const totalsStart = perfMetrics.start(); - const totalsStmt = this.db.prepare(` - SELECT COUNT(*) as count, COALESCE(SUM(duration), 0) as total_duration - FROM query_events - WHERE start_time >= ? - `); - const totals = totalsStmt.get(startTime) as { count: number; total_duration: number }; - perfMetrics.end(totalsStart, 'getAggregatedStats:totals', { range }); - - // By agent type - const byAgentStart = perfMetrics.start(); - const byAgentStmt = this.db.prepare(` - SELECT agent_type, COUNT(*) as count, SUM(duration) as duration - FROM query_events - WHERE start_time >= ? - GROUP BY agent_type - `); - const byAgentRows = byAgentStmt.all(startTime) as Array<{ - agent_type: string; - count: number; - duration: number; - }>; - const byAgent: Record = {}; - for (const row of byAgentRows) { - byAgent[row.agent_type] = { count: row.count, duration: row.duration }; - } - perfMetrics.end(byAgentStart, 'getAggregatedStats:byAgent', { - range, - agentCount: byAgentRows.length, - }); - - // By source (user vs auto) - const bySourceStart = perfMetrics.start(); - const bySourceStmt = this.db.prepare(` - SELECT source, COUNT(*) as count - FROM query_events - WHERE start_time >= ? - GROUP BY source - `); - const bySourceRows = bySourceStmt.all(startTime) as Array<{ - source: 'user' | 'auto'; - count: number; - }>; - const bySource = { user: 0, auto: 0 }; - for (const row of bySourceRows) { - bySource[row.source] = row.count; - } - perfMetrics.end(bySourceStart, 'getAggregatedStats:bySource', { range }); - - // By location (local vs remote SSH) - const byLocationStart = perfMetrics.start(); - const byLocationStmt = this.db.prepare(` - SELECT is_remote, COUNT(*) as count - FROM query_events - WHERE start_time >= ? - GROUP BY is_remote - `); - const byLocationRows = byLocationStmt.all(startTime) as Array<{ - is_remote: number | null; - count: number; - }>; - const byLocation = { local: 0, remote: 0 }; - for (const row of byLocationRows) { - if (row.is_remote === 1) { - byLocation.remote = row.count; - } else { - // Treat NULL (legacy data) and 0 as local - byLocation.local += row.count; - } - } - perfMetrics.end(byLocationStart, 'getAggregatedStats:byLocation', { range }); - - // By day (for charts) - const byDayStart = perfMetrics.start(); - const byDayStmt = this.db.prepare(` - SELECT date(start_time / 1000, 'unixepoch', 'localtime') as date, - COUNT(*) as count, - SUM(duration) as duration - FROM query_events - WHERE start_time >= ? - GROUP BY date(start_time / 1000, 'unixepoch', 'localtime') - ORDER BY date ASC - `); - const byDayRows = byDayStmt.all(startTime) as Array<{ - date: string; - count: number; - duration: number; - }>; - perfMetrics.end(byDayStart, 'getAggregatedStats:byDay', { range, dayCount: byDayRows.length }); - - // By agent by day (for provider usage chart) - const byAgentByDayStart = perfMetrics.start(); - const byAgentByDayStmt = this.db.prepare(` - SELECT agent_type, - date(start_time / 1000, 'unixepoch', 'localtime') as date, - COUNT(*) as count, - SUM(duration) as duration - FROM query_events - WHERE start_time >= ? - GROUP BY agent_type, date(start_time / 1000, 'unixepoch', 'localtime') - ORDER BY agent_type, date ASC - `); - const byAgentByDayRows = byAgentByDayStmt.all(startTime) as Array<{ - agent_type: string; - date: string; - count: number; - duration: number; - }>; - // Group by agent type - const byAgentByDay: Record< - string, - Array<{ date: string; count: number; duration: number }> - > = {}; - for (const row of byAgentByDayRows) { - if (!byAgentByDay[row.agent_type]) { - byAgentByDay[row.agent_type] = []; - } - byAgentByDay[row.agent_type].push({ - date: row.date, - count: row.count, - duration: row.duration, - }); - } - perfMetrics.end(byAgentByDayStart, 'getAggregatedStats:byAgentByDay', { range }); - - // By hour (for peak hours chart) - const byHourStart = perfMetrics.start(); - const byHourStmt = this.db.prepare(` - SELECT CAST(strftime('%H', start_time / 1000, 'unixepoch', 'localtime') AS INTEGER) as hour, - COUNT(*) as count, - SUM(duration) as duration - FROM query_events - WHERE start_time >= ? - GROUP BY hour - ORDER BY hour ASC - `); - const byHourRows = byHourStmt.all(startTime) as Array<{ - hour: number; - count: number; - duration: number; - }>; - perfMetrics.end(byHourStart, 'getAggregatedStats:byHour', { range }); - - // Session stats (counting unique session IDs from query_events, which includes tab GUIDs) - const sessionsStart = perfMetrics.start(); - - // Total unique sessions with queries (counts tabs that have had at least one query) - const sessionTotalsStmt = this.db.prepare(` - SELECT COUNT(DISTINCT session_id) as count - FROM query_events - WHERE start_time >= ? - `); - const sessionTotals = sessionTotalsStmt.get(startTime) as { count: number }; - - // Average session duration from lifecycle table (for sessions that have been closed) - const avgSessionDurationStmt = this.db.prepare(` - SELECT COALESCE(AVG(duration), 0) as avg_duration - FROM session_lifecycle - WHERE created_at >= ? AND duration IS NOT NULL - `); - const avgSessionDurationResult = avgSessionDurationStmt.get(startTime) as { - avg_duration: number; - }; - - // Sessions by agent type - const sessionsByAgentStmt = this.db.prepare(` - SELECT agent_type, COUNT(*) as count - FROM session_lifecycle - WHERE created_at >= ? - GROUP BY agent_type - `); - const sessionsByAgentRows = sessionsByAgentStmt.all(startTime) as Array<{ - agent_type: string; - count: number; - }>; - const sessionsByAgent: Record = {}; - for (const row of sessionsByAgentRows) { - sessionsByAgent[row.agent_type] = row.count; - } - - // Sessions by day - const sessionsByDayStmt = this.db.prepare(` - SELECT date(created_at / 1000, 'unixepoch', 'localtime') as date, - COUNT(*) as count - FROM session_lifecycle - WHERE created_at >= ? - GROUP BY date(created_at / 1000, 'unixepoch', 'localtime') - ORDER BY date ASC - `); - const sessionsByDayRows = sessionsByDayStmt.all(startTime) as Array<{ - date: string; - count: number; - }>; - - perfMetrics.end(sessionsStart, 'getAggregatedStats:sessions', { - range, - sessionCount: sessionTotals.count, - }); - - // By session by day (for agent usage chart - shows each Maestro session's usage over time) - const bySessionByDayStart = perfMetrics.start(); - const bySessionByDayStmt = this.db.prepare(` - SELECT session_id, - date(start_time / 1000, 'unixepoch', 'localtime') as date, - COUNT(*) as count, - SUM(duration) as duration - FROM query_events - WHERE start_time >= ? - GROUP BY session_id, date(start_time / 1000, 'unixepoch', 'localtime') - ORDER BY session_id, date ASC - `); - const bySessionByDayRows = bySessionByDayStmt.all(startTime) as Array<{ - session_id: string; - date: string; - count: number; - duration: number; - }>; - const bySessionByDay: Record< - string, - Array<{ date: string; count: number; duration: number }> - > = {}; - for (const row of bySessionByDayRows) { - if (!bySessionByDay[row.session_id]) { - bySessionByDay[row.session_id] = []; - } - bySessionByDay[row.session_id].push({ - date: row.date, - count: row.count, - duration: row.duration, - }); - } - perfMetrics.end(bySessionByDayStart, 'getAggregatedStats:bySessionByDay', { range }); - - const totalDuration = perfMetrics.end(perfStart, 'getAggregatedStats:total', { - range, - totalQueries: totals.count, - }); - - // Log warning if the aggregation is slow - if (totalDuration > PERFORMANCE_THRESHOLDS.DASHBOARD_LOAD) { - logger.warn( - `getAggregatedStats took ${totalDuration.toFixed(0)}ms (threshold: ${PERFORMANCE_THRESHOLDS.DASHBOARD_LOAD}ms)`, - LOG_CONTEXT, - { range, totalQueries: totals.count } - ); - } - - return { - totalQueries: totals.count, - totalDuration: totals.total_duration, - avgDuration: totals.count > 0 ? Math.round(totals.total_duration / totals.count) : 0, - byAgent, - bySource, - byDay: byDayRows, - byLocation, - byHour: byHourRows, - totalSessions: sessionTotals.count, - sessionsByAgent, - sessionsByDay: sessionsByDayRows, - avgSessionDuration: Math.round(avgSessionDurationResult.avg_duration), - byAgentByDay, - bySessionByDay, - }; - } - - // ============================================================================ - // Data Management - // ============================================================================ - - /** - * Clear old data from the database. - * - * Deletes query_events, auto_run_sessions, auto_run_tasks, and session_lifecycle - * records that are older than the specified number of days. This is useful for - * managing database size and removing stale historical data. - * - * @param olderThanDays - Delete records older than this many days (e.g., 30, 90, 180, 365) - * @returns Object with success status, number of records deleted from each table, and any error - */ - clearOldData(olderThanDays: number): { - success: boolean; - deletedQueryEvents: number; - deletedAutoRunSessions: number; - deletedAutoRunTasks: number; - deletedSessionLifecycle: number; - error?: string; - } { - if (!this.db) { - return { - success: false, - deletedQueryEvents: 0, - deletedAutoRunSessions: 0, - deletedAutoRunTasks: 0, - deletedSessionLifecycle: 0, - error: 'Database not initialized', - }; - } - - if (olderThanDays <= 0) { - return { - success: false, - deletedQueryEvents: 0, - deletedAutoRunSessions: 0, - deletedAutoRunTasks: 0, - deletedSessionLifecycle: 0, - error: 'olderThanDays must be greater than 0', - }; - } - - try { - const cutoffTime = Date.now() - olderThanDays * 24 * 60 * 60 * 1000; - - logger.info( - `Clearing stats data older than ${olderThanDays} days (before ${new Date(cutoffTime).toISOString()})`, - LOG_CONTEXT - ); - - // Get IDs of auto_run_sessions to be deleted (for cascading to tasks) - const sessionsToDelete = this.db - .prepare('SELECT id FROM auto_run_sessions WHERE start_time < ?') - .all(cutoffTime) as Array<{ id: string }>; - const sessionIds = sessionsToDelete.map((row) => row.id); - - // Delete auto_run_tasks for the sessions being deleted - let deletedTasks = 0; - if (sessionIds.length > 0) { - // SQLite doesn't support array binding, so we use a subquery - const tasksResult = this.db - .prepare( - 'DELETE FROM auto_run_tasks WHERE auto_run_session_id IN (SELECT id FROM auto_run_sessions WHERE start_time < ?)' - ) - .run(cutoffTime); - deletedTasks = tasksResult.changes; - } - - // Delete auto_run_sessions - const sessionsResult = this.db - .prepare('DELETE FROM auto_run_sessions WHERE start_time < ?') - .run(cutoffTime); - const deletedSessions = sessionsResult.changes; - - // Delete query_events - const eventsResult = this.db - .prepare('DELETE FROM query_events WHERE start_time < ?') - .run(cutoffTime); - const deletedEvents = eventsResult.changes; - - // Delete session_lifecycle - const lifecycleResult = this.db - .prepare('DELETE FROM session_lifecycle WHERE created_at < ?') - .run(cutoffTime); - const deletedLifecycle = lifecycleResult.changes; - - const totalDeleted = deletedEvents + deletedSessions + deletedTasks + deletedLifecycle; - logger.info( - `Cleared ${totalDeleted} old stats records (${deletedEvents} query events, ${deletedSessions} auto-run sessions, ${deletedTasks} auto-run tasks, ${deletedLifecycle} session lifecycle)`, - LOG_CONTEXT - ); - - return { - success: true, - deletedQueryEvents: deletedEvents, - deletedAutoRunSessions: deletedSessions, - deletedAutoRunTasks: deletedTasks, - deletedSessionLifecycle: deletedLifecycle, - }; - } catch (error) { - const errorMessage = error instanceof Error ? error.message : String(error); - logger.error(`Failed to clear old stats data: ${errorMessage}`, LOG_CONTEXT); - return { - success: false, - deletedQueryEvents: 0, - deletedAutoRunSessions: 0, - deletedAutoRunTasks: 0, - deletedSessionLifecycle: 0, - error: errorMessage, - }; - } - } - - // ============================================================================ - // Export - // ============================================================================ - - /** - * Export query events to CSV format - */ - exportToCsv(range: StatsTimeRange): string { - const events = this.getQueryEvents(range); - - const headers = [ - 'id', - 'sessionId', - 'agentType', - 'source', - 'startTime', - 'duration', - 'projectPath', - 'tabId', - ]; - const rows = events.map((e) => [ - e.id, - e.sessionId, - e.agentType, - e.source, - new Date(e.startTime).toISOString(), - e.duration.toString(), - e.projectPath ?? '', - e.tabId ?? '', - ]); - - const csvContent = [ - headers.join(','), - ...rows.map((row) => row.map((cell) => `"${cell}"`).join(',')), - ].join('\n'); - - return csvContent; - } -} - -// ============================================================================ -// Singleton Instance -// ============================================================================ - -let statsDbInstance: StatsDB | null = null; - -/** Stores the result of the last initialization attempt */ -let lastInitializationResult: InitializationResult | null = null; - -/** - * Get the singleton StatsDB instance - */ -export function getStatsDB(): StatsDB { - if (!statsDbInstance) { - statsDbInstance = new StatsDB(); - } - return statsDbInstance; -} - -/** - * Initialize the stats database (call on app ready) - * - * @returns InitializationResult with success status and reset information - */ -export function initializeStatsDB(): InitializationResult { - const db = getStatsDB(); - const result = db.initialize(); - lastInitializationResult = result; - return result; -} - -/** - * Get the result of the last initialization attempt. - * Used by the renderer to check if the database was reset and show a notification. - * - * @returns InitializationResult or null if initialize() hasn't been called - */ -export function getInitializationResult(): InitializationResult | null { - return lastInitializationResult; -} - -/** - * Clear the initialization result (e.g., after the user has acknowledged the notification) - */ -export function clearInitializationResult(): void { - // Only clear the user message, keep the rest for debugging - if (lastInitializationResult) { - lastInitializationResult = { - ...lastInitializationResult, - userMessage: undefined, - }; - } -} - -/** - * Close the stats database (call on app quit) - */ -export function closeStatsDB(): void { - if (statsDbInstance) { - statsDbInstance.close(); - statsDbInstance = null; - } -} - -// ============================================================================ -// Performance Metrics API -// ============================================================================ - -/** - * Enable or disable performance metrics logging for StatsDB operations. - * - * When enabled, detailed timing information is logged at debug level for: - * - Database queries (getAggregatedStats, getQueryEvents, etc.) - * - Individual SQL operations (totals, byAgent, bySource, byDay queries) - * - * Performance warnings are always logged (even when metrics are disabled) - * when operations exceed defined thresholds. - * - * @param enabled - Whether to enable performance metrics logging - */ -export function setPerformanceLoggingEnabled(enabled: boolean): void { - perfMetrics.setEnabled(enabled); - logger.info(`Performance metrics logging ${enabled ? 'enabled' : 'disabled'}`, LOG_CONTEXT); -} - -/** - * Check if performance metrics logging is currently enabled. - * - * @returns true if performance metrics are being logged - */ -export function isPerformanceLoggingEnabled(): boolean { - return perfMetrics.isEnabled(); -} - -/** - * Get collected performance metrics for analysis. - * - * Returns the last 100 recorded metrics (when enabled). - * Useful for debugging and performance analysis. - * - * @returns Array of performance metric entries - */ -export function getPerformanceMetrics() { - return perfMetrics.getMetrics(); -} - -/** - * Clear collected performance metrics. - */ -export function clearPerformanceMetrics(): void { - perfMetrics.clearMetrics(); -} diff --git a/src/main/stats/aggregations.ts b/src/main/stats/aggregations.ts new file mode 100644 index 00000000..68c2ddfd --- /dev/null +++ b/src/main/stats/aggregations.ts @@ -0,0 +1,353 @@ +/** + * Stats Aggregation Queries + * + * Decomposes the monolithic getAggregatedStats into focused sub-query functions, + * each independently testable and readable. + */ + +import type Database from 'better-sqlite3'; +import type { StatsTimeRange, StatsAggregation } from '../../shared/stats-types'; +import { PERFORMANCE_THRESHOLDS } from '../../shared/performance-metrics'; +import { getTimeRangeStart, perfMetrics, LOG_CONTEXT } from './utils'; +import { logger } from '../utils/logger'; + +// ============================================================================ +// Sub-query Functions +// ============================================================================ + +function queryTotals( + db: Database.Database, + startTime: number +): { count: number; total_duration: number } { + const perfStart = perfMetrics.start(); + const result = db + .prepare( + ` + SELECT COUNT(*) as count, COALESCE(SUM(duration), 0) as total_duration + FROM query_events + WHERE start_time >= ? + ` + ) + .get(startTime) as { count: number; total_duration: number }; + perfMetrics.end(perfStart, 'getAggregatedStats:totals'); + return result; +} + +function queryByAgent( + db: Database.Database, + startTime: number +): Record { + const perfStart = perfMetrics.start(); + const rows = db + .prepare( + ` + SELECT agent_type, COUNT(*) as count, SUM(duration) as duration + FROM query_events + WHERE start_time >= ? + GROUP BY agent_type + ` + ) + .all(startTime) as Array<{ agent_type: string; count: number; duration: number }>; + + const result: Record = {}; + for (const row of rows) { + result[row.agent_type] = { count: row.count, duration: row.duration }; + } + perfMetrics.end(perfStart, 'getAggregatedStats:byAgent', { agentCount: rows.length }); + return result; +} + +function queryBySource(db: Database.Database, startTime: number): { user: number; auto: number } { + const perfStart = perfMetrics.start(); + const rows = db + .prepare( + ` + SELECT source, COUNT(*) as count + FROM query_events + WHERE start_time >= ? + GROUP BY source + ` + ) + .all(startTime) as Array<{ source: 'user' | 'auto'; count: number }>; + + const result = { user: 0, auto: 0 }; + for (const row of rows) { + result[row.source] = row.count; + } + perfMetrics.end(perfStart, 'getAggregatedStats:bySource'); + return result; +} + +function queryByLocation( + db: Database.Database, + startTime: number +): { local: number; remote: number } { + const perfStart = perfMetrics.start(); + const rows = db + .prepare( + ` + SELECT is_remote, COUNT(*) as count + FROM query_events + WHERE start_time >= ? + GROUP BY is_remote + ` + ) + .all(startTime) as Array<{ is_remote: number | null; count: number }>; + + const result = { local: 0, remote: 0 }; + for (const row of rows) { + if (row.is_remote === 1) { + result.remote = row.count; + } else { + // Treat NULL (legacy data) and 0 as local + result.local += row.count; + } + } + perfMetrics.end(perfStart, 'getAggregatedStats:byLocation'); + return result; +} + +function queryByDay( + db: Database.Database, + startTime: number +): Array<{ date: string; count: number; duration: number }> { + const perfStart = perfMetrics.start(); + const rows = db + .prepare( + ` + SELECT date(start_time / 1000, 'unixepoch', 'localtime') as date, + COUNT(*) as count, + SUM(duration) as duration + FROM query_events + WHERE start_time >= ? + GROUP BY date(start_time / 1000, 'unixepoch', 'localtime') + ORDER BY date ASC + ` + ) + .all(startTime) as Array<{ date: string; count: number; duration: number }>; + perfMetrics.end(perfStart, 'getAggregatedStats:byDay', { dayCount: rows.length }); + return rows; +} + +function queryByAgentByDay( + db: Database.Database, + startTime: number +): Record> { + const perfStart = perfMetrics.start(); + const rows = db + .prepare( + ` + SELECT agent_type, + date(start_time / 1000, 'unixepoch', 'localtime') as date, + COUNT(*) as count, + SUM(duration) as duration + FROM query_events + WHERE start_time >= ? + GROUP BY agent_type, date(start_time / 1000, 'unixepoch', 'localtime') + ORDER BY agent_type, date ASC + ` + ) + .all(startTime) as Array<{ + agent_type: string; + date: string; + count: number; + duration: number; + }>; + + const result: Record> = {}; + for (const row of rows) { + if (!result[row.agent_type]) { + result[row.agent_type] = []; + } + result[row.agent_type].push({ date: row.date, count: row.count, duration: row.duration }); + } + perfMetrics.end(perfStart, 'getAggregatedStats:byAgentByDay'); + return result; +} + +function queryByHour( + db: Database.Database, + startTime: number +): Array<{ hour: number; count: number; duration: number }> { + const perfStart = perfMetrics.start(); + const rows = db + .prepare( + ` + SELECT CAST(strftime('%H', start_time / 1000, 'unixepoch', 'localtime') AS INTEGER) as hour, + COUNT(*) as count, + SUM(duration) as duration + FROM query_events + WHERE start_time >= ? + GROUP BY hour + ORDER BY hour ASC + ` + ) + .all(startTime) as Array<{ hour: number; count: number; duration: number }>; + perfMetrics.end(perfStart, 'getAggregatedStats:byHour'); + return rows; +} + +function querySessionStats( + db: Database.Database, + startTime: number +): { + totalSessions: number; + sessionsByAgent: Record; + sessionsByDay: Array<{ date: string; count: number }>; + avgSessionDuration: number; +} { + const perfStart = perfMetrics.start(); + + // Total unique sessions with queries + const sessionTotals = db + .prepare( + ` + SELECT COUNT(DISTINCT session_id) as count + FROM query_events + WHERE start_time >= ? + ` + ) + .get(startTime) as { count: number }; + + // Average session duration from lifecycle table + const avgResult = db + .prepare( + ` + SELECT COALESCE(AVG(duration), 0) as avg_duration + FROM session_lifecycle + WHERE created_at >= ? AND duration IS NOT NULL + ` + ) + .get(startTime) as { avg_duration: number }; + + // Sessions by agent type + const byAgentRows = db + .prepare( + ` + SELECT agent_type, COUNT(*) as count + FROM session_lifecycle + WHERE created_at >= ? + GROUP BY agent_type + ` + ) + .all(startTime) as Array<{ agent_type: string; count: number }>; + + const sessionsByAgent: Record = {}; + for (const row of byAgentRows) { + sessionsByAgent[row.agent_type] = row.count; + } + + // Sessions by day + const byDayRows = db + .prepare( + ` + SELECT date(created_at / 1000, 'unixepoch', 'localtime') as date, + COUNT(*) as count + FROM session_lifecycle + WHERE created_at >= ? + GROUP BY date(created_at / 1000, 'unixepoch', 'localtime') + ORDER BY date ASC + ` + ) + .all(startTime) as Array<{ date: string; count: number }>; + + perfMetrics.end(perfStart, 'getAggregatedStats:sessions', { + sessionCount: sessionTotals.count, + }); + + return { + totalSessions: sessionTotals.count, + sessionsByAgent, + sessionsByDay: byDayRows, + avgSessionDuration: Math.round(avgResult.avg_duration), + }; +} + +function queryBySessionByDay( + db: Database.Database, + startTime: number +): Record> { + const perfStart = perfMetrics.start(); + const rows = db + .prepare( + ` + SELECT session_id, + date(start_time / 1000, 'unixepoch', 'localtime') as date, + COUNT(*) as count, + SUM(duration) as duration + FROM query_events + WHERE start_time >= ? + GROUP BY session_id, date(start_time / 1000, 'unixepoch', 'localtime') + ORDER BY session_id, date ASC + ` + ) + .all(startTime) as Array<{ + session_id: string; + date: string; + count: number; + duration: number; + }>; + + const result: Record> = {}; + for (const row of rows) { + if (!result[row.session_id]) { + result[row.session_id] = []; + } + result[row.session_id].push({ date: row.date, count: row.count, duration: row.duration }); + } + perfMetrics.end(perfStart, 'getAggregatedStats:bySessionByDay'); + return result; +} + +// ============================================================================ +// Orchestrator +// ============================================================================ + +/** + * Get aggregated statistics for a time range. + * + * Composes results from focused sub-query functions for readability + * and independent testability. + */ +export function getAggregatedStats(db: Database.Database, range: StatsTimeRange): StatsAggregation { + const perfStart = perfMetrics.start(); + const startTime = getTimeRangeStart(range); + + const totals = queryTotals(db, startTime); + const byAgent = queryByAgent(db, startTime); + const bySource = queryBySource(db, startTime); + const byLocation = queryByLocation(db, startTime); + const byDay = queryByDay(db, startTime); + const byAgentByDay = queryByAgentByDay(db, startTime); + const byHour = queryByHour(db, startTime); + const sessionStats = querySessionStats(db, startTime); + const bySessionByDay = queryBySessionByDay(db, startTime); + + const totalDuration = perfMetrics.end(perfStart, 'getAggregatedStats:total', { + range, + totalQueries: totals.count, + }); + + // Log warning if the aggregation is slow + if (totalDuration > PERFORMANCE_THRESHOLDS.DASHBOARD_LOAD) { + logger.warn( + `getAggregatedStats took ${totalDuration.toFixed(0)}ms (threshold: ${PERFORMANCE_THRESHOLDS.DASHBOARD_LOAD}ms)`, + LOG_CONTEXT, + { range, totalQueries: totals.count } + ); + } + + return { + totalQueries: totals.count, + totalDuration: totals.total_duration, + avgDuration: totals.count > 0 ? Math.round(totals.total_duration / totals.count) : 0, + byAgent, + bySource, + byDay, + byLocation, + byHour, + ...sessionStats, + byAgentByDay, + bySessionByDay, + }; +} diff --git a/src/main/stats/auto-run.ts b/src/main/stats/auto-run.ts new file mode 100644 index 00000000..1c784334 --- /dev/null +++ b/src/main/stats/auto-run.ts @@ -0,0 +1,169 @@ +/** + * Auto Run CRUD Operations + * + * Handles insertion, updating, and retrieval of Auto Run sessions and tasks. + */ + +import type Database from 'better-sqlite3'; +import type { AutoRunSession, AutoRunTask, StatsTimeRange } from '../../shared/stats-types'; +import { generateId, getTimeRangeStart, normalizePath, LOG_CONTEXT } from './utils'; +import { + mapAutoRunSessionRow, + mapAutoRunTaskRow, + type AutoRunSessionRow, + type AutoRunTaskRow, +} from './row-mappers'; +import { StatementCache } from './utils'; +import { logger } from '../utils/logger'; + +const stmtCache = new StatementCache(); + +// ============================================================================ +// Auto Run Sessions +// ============================================================================ + +const INSERT_SESSION_SQL = ` + INSERT INTO auto_run_sessions (id, session_id, agent_type, document_path, start_time, duration, tasks_total, tasks_completed, project_path) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) +`; + +/** + * Insert a new Auto Run session + */ +export function insertAutoRunSession( + db: Database.Database, + session: Omit +): string { + const id = generateId(); + const stmt = stmtCache.get(db, INSERT_SESSION_SQL); + + stmt.run( + id, + session.sessionId, + session.agentType, + normalizePath(session.documentPath), + session.startTime, + session.duration, + session.tasksTotal ?? null, + session.tasksCompleted ?? null, + normalizePath(session.projectPath) + ); + + logger.debug(`Inserted Auto Run session ${id}`, LOG_CONTEXT); + return id; +} + +/** + * Update an existing Auto Run session (e.g., when it completes) + */ +export function updateAutoRunSession( + db: Database.Database, + id: string, + updates: Partial +): boolean { + const setClauses: string[] = []; + const params: (string | number | null)[] = []; + + if (updates.duration !== undefined) { + setClauses.push('duration = ?'); + params.push(updates.duration); + } + if (updates.tasksTotal !== undefined) { + setClauses.push('tasks_total = ?'); + params.push(updates.tasksTotal ?? null); + } + if (updates.tasksCompleted !== undefined) { + setClauses.push('tasks_completed = ?'); + params.push(updates.tasksCompleted ?? null); + } + if (updates.documentPath !== undefined) { + setClauses.push('document_path = ?'); + params.push(normalizePath(updates.documentPath)); + } + + if (setClauses.length === 0) { + return false; + } + + params.push(id); + const sql = `UPDATE auto_run_sessions SET ${setClauses.join(', ')} WHERE id = ?`; + const stmt = db.prepare(sql); + const result = stmt.run(...params); + + logger.debug(`Updated Auto Run session ${id}`, LOG_CONTEXT); + return result.changes > 0; +} + +/** + * Get Auto Run sessions within a time range + */ +export function getAutoRunSessions(db: Database.Database, range: StatsTimeRange): AutoRunSession[] { + const startTime = getTimeRangeStart(range); + const stmt = stmtCache.get( + db, + ` + SELECT * FROM auto_run_sessions + WHERE start_time >= ? + ORDER BY start_time DESC + ` + ); + + const rows = stmt.all(startTime) as AutoRunSessionRow[]; + return rows.map(mapAutoRunSessionRow); +} + +// ============================================================================ +// Auto Run Tasks +// ============================================================================ + +const INSERT_TASK_SQL = ` + INSERT INTO auto_run_tasks (id, auto_run_session_id, session_id, agent_type, task_index, task_content, start_time, duration, success) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) +`; + +/** + * Insert a new Auto Run task + */ +export function insertAutoRunTask(db: Database.Database, task: Omit): string { + const id = generateId(); + const stmt = stmtCache.get(db, INSERT_TASK_SQL); + + stmt.run( + id, + task.autoRunSessionId, + task.sessionId, + task.agentType, + task.taskIndex, + task.taskContent ?? null, + task.startTime, + task.duration, + task.success ? 1 : 0 + ); + + logger.debug(`Inserted Auto Run task ${id}`, LOG_CONTEXT); + return id; +} + +/** + * Get all tasks for a specific Auto Run session + */ +export function getAutoRunTasks(db: Database.Database, autoRunSessionId: string): AutoRunTask[] { + const stmt = stmtCache.get( + db, + ` + SELECT * FROM auto_run_tasks + WHERE auto_run_session_id = ? + ORDER BY task_index ASC + ` + ); + + const rows = stmt.all(autoRunSessionId) as AutoRunTaskRow[]; + return rows.map(mapAutoRunTaskRow); +} + +/** + * Clear the statement cache (call when database is closed) + */ +export function clearAutoRunCache(): void { + stmtCache.clear(); +} diff --git a/src/main/stats/data-management.ts b/src/main/stats/data-management.ts new file mode 100644 index 00000000..215efd76 --- /dev/null +++ b/src/main/stats/data-management.ts @@ -0,0 +1,170 @@ +/** + * Data Management Operations + * + * Handles data cleanup (with transactional safety) and CSV export + * (with proper escaping and complete field coverage). + */ + +import type Database from 'better-sqlite3'; +import type { StatsTimeRange } from '../../shared/stats-types'; +import { getQueryEvents } from './query-events'; +import { LOG_CONTEXT } from './utils'; +import { logger } from '../utils/logger'; + +// ============================================================================ +// Data Cleanup +// ============================================================================ + +/** + * Clear old data from the database. + * + * Deletes query_events, auto_run_sessions, auto_run_tasks, and session_lifecycle + * records that are older than the specified number of days. + * + * All deletes run within a single transaction for atomicity — either all tables + * are cleaned or none are. + * + * @param olderThanDays - Delete records older than this many days + */ +export function clearOldData( + db: Database.Database, + olderThanDays: number +): { + success: boolean; + deletedQueryEvents: number; + deletedAutoRunSessions: number; + deletedAutoRunTasks: number; + deletedSessionLifecycle: number; + error?: string; +} { + if (olderThanDays <= 0) { + return { + success: false, + deletedQueryEvents: 0, + deletedAutoRunSessions: 0, + deletedAutoRunTasks: 0, + deletedSessionLifecycle: 0, + error: 'olderThanDays must be greater than 0', + }; + } + + try { + const cutoffTime = Date.now() - olderThanDays * 24 * 60 * 60 * 1000; + + logger.info( + `Clearing stats data older than ${olderThanDays} days (before ${new Date(cutoffTime).toISOString()})`, + LOG_CONTEXT + ); + + let deletedEvents = 0; + let deletedSessions = 0; + let deletedTasks = 0; + let deletedLifecycle = 0; + + // Wrap all deletes in a transaction for atomicity + const runCleanup = db.transaction(() => { + // Delete auto_run_tasks for sessions being deleted (cascade) + const tasksResult = db + .prepare( + 'DELETE FROM auto_run_tasks WHERE auto_run_session_id IN (SELECT id FROM auto_run_sessions WHERE start_time < ?)' + ) + .run(cutoffTime); + deletedTasks = tasksResult.changes; + + // Delete auto_run_sessions + const sessionsResult = db + .prepare('DELETE FROM auto_run_sessions WHERE start_time < ?') + .run(cutoffTime); + deletedSessions = sessionsResult.changes; + + // Delete query_events + const eventsResult = db + .prepare('DELETE FROM query_events WHERE start_time < ?') + .run(cutoffTime); + deletedEvents = eventsResult.changes; + + // Delete session_lifecycle + const lifecycleResult = db + .prepare('DELETE FROM session_lifecycle WHERE created_at < ?') + .run(cutoffTime); + deletedLifecycle = lifecycleResult.changes; + }); + + runCleanup(); + + const totalDeleted = deletedEvents + deletedSessions + deletedTasks + deletedLifecycle; + logger.info( + `Cleared ${totalDeleted} old stats records (${deletedEvents} query events, ${deletedSessions} auto-run sessions, ${deletedTasks} auto-run tasks, ${deletedLifecycle} session lifecycle)`, + LOG_CONTEXT + ); + + return { + success: true, + deletedQueryEvents: deletedEvents, + deletedAutoRunSessions: deletedSessions, + deletedAutoRunTasks: deletedTasks, + deletedSessionLifecycle: deletedLifecycle, + }; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + logger.error(`Failed to clear old stats data: ${errorMessage}`, LOG_CONTEXT); + return { + success: false, + deletedQueryEvents: 0, + deletedAutoRunSessions: 0, + deletedAutoRunTasks: 0, + deletedSessionLifecycle: 0, + error: errorMessage, + }; + } +} + +// ============================================================================ +// CSV Export +// ============================================================================ + +/** + * Escape a value for CSV output. + * + * Wraps the value in double quotes and escapes any embedded double quotes + * by doubling them (RFC 4180 compliant). + */ +function csvEscape(value: string): string { + return `"${value.replace(/"/g, '""')}"`; +} + +/** + * Export query events to CSV format. + * + * Includes all fields (including isRemote added in migration v2) + * with proper CSV escaping for values containing quotes, commas, or newlines. + */ +export function exportToCsv(db: Database.Database, range: StatsTimeRange): string { + const events = getQueryEvents(db, range); + + const headers = [ + 'id', + 'sessionId', + 'agentType', + 'source', + 'startTime', + 'duration', + 'projectPath', + 'tabId', + 'isRemote', + ]; + + const rows = events.map((e) => [ + csvEscape(e.id), + csvEscape(e.sessionId), + csvEscape(e.agentType), + csvEscape(e.source), + csvEscape(new Date(e.startTime).toISOString()), + csvEscape(e.duration.toString()), + csvEscape(e.projectPath ?? ''), + csvEscape(e.tabId ?? ''), + csvEscape(e.isRemote !== undefined ? String(e.isRemote) : ''), + ]); + + return [headers.join(','), ...rows.map((row) => row.join(','))].join('\n'); +} diff --git a/src/main/stats/index.ts b/src/main/stats/index.ts new file mode 100644 index 00000000..4a1a145d --- /dev/null +++ b/src/main/stats/index.ts @@ -0,0 +1,44 @@ +/** + * Stats Module + * + * Consolidated module for all stats database functionality: + * - SQLite database lifecycle and integrity management + * - Migration system for schema evolution + * - CRUD operations for query events, auto-run sessions/tasks, and session lifecycle + * - Aggregated statistics for the Usage Dashboard + * - Data management (cleanup, CSV export) + * - Singleton instance management + * - Performance metrics API + * + * Usage: + * ```typescript + * import { getStatsDB, initializeStatsDB, closeStatsDB } from './stats'; + * import type { StatsDB } from './stats'; + * ``` + */ + +// ============ Types ============ +export type { + IntegrityCheckResult, + BackupResult, + CorruptionRecoveryResult, + Migration, + MigrationRecord, +} from './types'; + +// ============ Utilities ============ +export { normalizePath } from './utils'; + +// ============ Core Database ============ +export { StatsDB } from './stats-db'; + +// ============ Singleton & Lifecycle ============ +export { getStatsDB, initializeStatsDB, closeStatsDB } from './singleton'; + +// ============ Performance Metrics API ============ +export { + setPerformanceLoggingEnabled, + isPerformanceLoggingEnabled, + getPerformanceMetrics, + clearPerformanceMetrics, +} from './singleton'; diff --git a/src/main/stats/migrations.ts b/src/main/stats/migrations.ts new file mode 100644 index 00000000..4d356cee --- /dev/null +++ b/src/main/stats/migrations.ts @@ -0,0 +1,234 @@ +/** + * Stats Database Migration System + * + * Manages schema evolution through versioned, sequential migrations. + * Each migration runs exactly once and is recorded in the _migrations table. + * + * ### Adding New Migrations + * + * 1. Create a new `migrateVN()` function + * 2. Add it to the `getMigrations()` array with version number and description + * 3. Update `STATS_DB_VERSION` in `../../shared/stats-types.ts` + */ + +import type Database from 'better-sqlite3'; +import type { Migration, MigrationRecord } from './types'; +import { mapMigrationRecordRow, type MigrationRecordRow } from './row-mappers'; +import { + CREATE_MIGRATIONS_TABLE_SQL, + CREATE_QUERY_EVENTS_SQL, + CREATE_QUERY_EVENTS_INDEXES_SQL, + CREATE_AUTO_RUN_SESSIONS_SQL, + CREATE_AUTO_RUN_SESSIONS_INDEXES_SQL, + CREATE_AUTO_RUN_TASKS_SQL, + CREATE_AUTO_RUN_TASKS_INDEXES_SQL, + CREATE_SESSION_LIFECYCLE_SQL, + CREATE_SESSION_LIFECYCLE_INDEXES_SQL, + runStatements, +} from './schema'; +import { LOG_CONTEXT } from './utils'; +import { logger } from '../utils/logger'; + +// ============================================================================ +// Migration Registry +// ============================================================================ + +/** + * Registry of all database migrations. + * Migrations must be sequential starting from version 1. + */ +export function getMigrations(): Migration[] { + return [ + { + version: 1, + description: 'Initial schema: query_events, auto_run_sessions, auto_run_tasks tables', + up: (db) => migrateV1(db), + }, + { + version: 2, + description: 'Add is_remote column to query_events for tracking SSH sessions', + up: (db) => migrateV2(db), + }, + { + version: 3, + description: 'Add session_lifecycle table for tracking session creation and closure', + up: (db) => migrateV3(db), + }, + ]; +} + +// ============================================================================ +// Migration Execution +// ============================================================================ + +/** + * Run all pending database migrations. + * + * 1. Creates the _migrations table if it doesn't exist + * 2. Gets the current schema version from user_version pragma + * 3. Runs each pending migration in a transaction + * 4. Records each migration in the _migrations table + * 5. Updates the user_version pragma + */ +export function runMigrations(db: Database.Database): void { + // Create migrations table (the only table created outside the migration system) + db.prepare(CREATE_MIGRATIONS_TABLE_SQL).run(); + + // Get current version (0 if fresh database) + const versionResult = db.pragma('user_version') as Array<{ user_version: number }>; + const currentVersion = versionResult[0]?.user_version ?? 0; + + const migrations = getMigrations(); + const pendingMigrations = migrations.filter((m) => m.version > currentVersion); + + if (pendingMigrations.length === 0) { + logger.debug(`Database is up to date (version ${currentVersion})`, LOG_CONTEXT); + return; + } + + // Sort by version to ensure sequential execution + pendingMigrations.sort((a, b) => a.version - b.version); + + logger.info( + `Running ${pendingMigrations.length} pending migration(s) (current version: ${currentVersion})`, + LOG_CONTEXT + ); + + for (const migration of pendingMigrations) { + applyMigration(db, migration); + } +} + +/** + * Apply a single migration within a transaction. + * Records the migration in the _migrations table with success/failure status. + */ +function applyMigration(db: Database.Database, migration: Migration): void { + const startTime = Date.now(); + logger.info(`Applying migration v${migration.version}: ${migration.description}`, LOG_CONTEXT); + + try { + const runMigrationTxn = db.transaction(() => { + migration.up(db); + + db.prepare( + ` + INSERT OR REPLACE INTO _migrations (version, description, applied_at, status, error_message) + VALUES (?, ?, ?, 'success', NULL) + ` + ).run(migration.version, migration.description, Date.now()); + + db.pragma(`user_version = ${migration.version}`); + }); + + runMigrationTxn(); + + const duration = Date.now() - startTime; + logger.info(`Migration v${migration.version} completed in ${duration}ms`, LOG_CONTEXT); + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + + db.prepare( + ` + INSERT OR REPLACE INTO _migrations (version, description, applied_at, status, error_message) + VALUES (?, ?, ?, 'failed', ?) + ` + ).run(migration.version, migration.description, Date.now(), errorMessage); + + logger.error(`Migration v${migration.version} failed: ${errorMessage}`, LOG_CONTEXT); + throw error; + } +} + +// ============================================================================ +// Migration Queries +// ============================================================================ + +/** + * Get the list of applied migrations from the _migrations table. + */ +export function getMigrationHistory(db: Database.Database): MigrationRecord[] { + const tableExists = db + .prepare("SELECT name FROM sqlite_master WHERE type='table' AND name='_migrations'") + .get(); + + if (!tableExists) { + return []; + } + + const rows = db + .prepare( + ` + SELECT version, description, applied_at, status, error_message + FROM _migrations + ORDER BY version ASC + ` + ) + .all() as MigrationRecordRow[]; + + return rows.map(mapMigrationRecordRow); +} + +/** + * Get the current database schema version. + */ +export function getCurrentVersion(db: Database.Database): number { + const versionResult = db.pragma('user_version') as Array<{ user_version: number }>; + return versionResult[0]?.user_version ?? 0; +} + +/** + * Get the target version (highest version in migrations registry). + */ +export function getTargetVersion(): number { + const migrations = getMigrations(); + if (migrations.length === 0) return 0; + return Math.max(...migrations.map((m) => m.version)); +} + +/** + * Check if any migrations are pending. + */ +export function hasPendingMigrations(db: Database.Database): boolean { + return getCurrentVersion(db) < getTargetVersion(); +} + +// ============================================================================ +// Individual Migration Functions +// ============================================================================ + +/** + * Migration v1: Initial schema creation + */ +function migrateV1(db: Database.Database): void { + db.prepare(CREATE_QUERY_EVENTS_SQL).run(); + runStatements(db, CREATE_QUERY_EVENTS_INDEXES_SQL); + + db.prepare(CREATE_AUTO_RUN_SESSIONS_SQL).run(); + runStatements(db, CREATE_AUTO_RUN_SESSIONS_INDEXES_SQL); + + db.prepare(CREATE_AUTO_RUN_TASKS_SQL).run(); + runStatements(db, CREATE_AUTO_RUN_TASKS_INDEXES_SQL); + + logger.debug('Created stats database tables and indexes', LOG_CONTEXT); +} + +/** + * Migration v2: Add is_remote column for SSH session tracking + */ +function migrateV2(db: Database.Database): void { + db.prepare('ALTER TABLE query_events ADD COLUMN is_remote INTEGER').run(); + db.prepare('CREATE INDEX IF NOT EXISTS idx_query_is_remote ON query_events(is_remote)').run(); + + logger.debug('Added is_remote column to query_events table', LOG_CONTEXT); +} + +/** + * Migration v3: Add session_lifecycle table + */ +function migrateV3(db: Database.Database): void { + db.prepare(CREATE_SESSION_LIFECYCLE_SQL).run(); + runStatements(db, CREATE_SESSION_LIFECYCLE_INDEXES_SQL); + + logger.debug('Created session_lifecycle table', LOG_CONTEXT); +} diff --git a/src/main/stats/query-events.ts b/src/main/stats/query-events.ts new file mode 100644 index 00000000..c39d7b36 --- /dev/null +++ b/src/main/stats/query-events.ts @@ -0,0 +1,87 @@ +/** + * Query Event CRUD Operations + * + * Handles insertion and retrieval of individual AI query/response cycle records. + */ + +import type Database from 'better-sqlite3'; +import type { QueryEvent, StatsTimeRange, StatsFilters } from '../../shared/stats-types'; +import { generateId, getTimeRangeStart, normalizePath, LOG_CONTEXT } from './utils'; +import { mapQueryEventRow, type QueryEventRow } from './row-mappers'; +import { StatementCache } from './utils'; +import { logger } from '../utils/logger'; + +const stmtCache = new StatementCache(); + +const INSERT_SQL = ` + INSERT INTO query_events (id, session_id, agent_type, source, start_time, duration, project_path, tab_id, is_remote) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) +`; + +/** + * Insert a new query event + */ +export function insertQueryEvent(db: Database.Database, event: Omit): string { + const id = generateId(); + const stmt = stmtCache.get(db, INSERT_SQL); + + stmt.run( + id, + event.sessionId, + event.agentType, + event.source, + event.startTime, + event.duration, + normalizePath(event.projectPath), + event.tabId ?? null, + event.isRemote !== undefined ? (event.isRemote ? 1 : 0) : null + ); + + logger.debug(`Inserted query event ${id}`, LOG_CONTEXT); + return id; +} + +/** + * Get query events within a time range with optional filters + */ +export function getQueryEvents( + db: Database.Database, + range: StatsTimeRange, + filters?: StatsFilters +): QueryEvent[] { + const startTime = getTimeRangeStart(range); + let sql = 'SELECT * FROM query_events WHERE start_time >= ?'; + const params: (string | number)[] = [startTime]; + + if (filters?.agentType) { + sql += ' AND agent_type = ?'; + params.push(filters.agentType); + } + if (filters?.source) { + sql += ' AND source = ?'; + params.push(filters.source); + } + if (filters?.projectPath) { + sql += ' AND project_path = ?'; + // Normalize filter path to match stored format + params.push(normalizePath(filters.projectPath) ?? ''); + } + if (filters?.sessionId) { + sql += ' AND session_id = ?'; + params.push(filters.sessionId); + } + + sql += ' ORDER BY start_time DESC'; + + const stmt = db.prepare(sql); + const rows = stmt.all(...params) as QueryEventRow[]; + + return rows.map(mapQueryEventRow); +} + +/** + * Clear the statement cache (call when database is closed) + */ +export function clearQueryEventCache(): void { + stmtCache.clear(); +} diff --git a/src/main/stats/row-mappers.ts b/src/main/stats/row-mappers.ts new file mode 100644 index 00000000..ad133933 --- /dev/null +++ b/src/main/stats/row-mappers.ts @@ -0,0 +1,142 @@ +/** + * Row Mapper Functions + * + * Converts snake_case SQLite row objects to camelCase TypeScript interfaces. + * Centralizes the mapping logic that was previously duplicated across CRUD methods. + */ + +import type { + QueryEvent, + AutoRunSession, + AutoRunTask, + SessionLifecycleEvent, +} from '../../shared/stats-types'; +import type { MigrationRecord } from './types'; + +// ============================================================================ +// Raw Row Types (snake_case from SQLite) +// ============================================================================ + +export interface QueryEventRow { + id: string; + session_id: string; + agent_type: string; + source: 'user' | 'auto'; + start_time: number; + duration: number; + project_path: string | null; + tab_id: string | null; + is_remote: number | null; +} + +export interface AutoRunSessionRow { + id: string; + session_id: string; + agent_type: string; + document_path: string | null; + start_time: number; + duration: number; + tasks_total: number | null; + tasks_completed: number | null; + project_path: string | null; +} + +export interface AutoRunTaskRow { + id: string; + auto_run_session_id: string; + session_id: string; + agent_type: string; + task_index: number; + task_content: string | null; + start_time: number; + duration: number; + success: number; +} + +export interface SessionLifecycleRow { + id: string; + session_id: string; + agent_type: string; + project_path: string | null; + created_at: number; + closed_at: number | null; + duration: number | null; + is_remote: number | null; +} + +export interface MigrationRecordRow { + version: number; + description: string; + applied_at: number; + status: 'success' | 'failed'; + error_message: string | null; +} + +// ============================================================================ +// Mapper Functions +// ============================================================================ + +export function mapQueryEventRow(row: QueryEventRow): QueryEvent { + return { + id: row.id, + sessionId: row.session_id, + agentType: row.agent_type, + source: row.source, + startTime: row.start_time, + duration: row.duration, + projectPath: row.project_path ?? undefined, + tabId: row.tab_id ?? undefined, + isRemote: row.is_remote !== null ? row.is_remote === 1 : undefined, + }; +} + +export function mapAutoRunSessionRow(row: AutoRunSessionRow): AutoRunSession { + return { + id: row.id, + sessionId: row.session_id, + agentType: row.agent_type, + documentPath: row.document_path ?? undefined, + startTime: row.start_time, + duration: row.duration, + tasksTotal: row.tasks_total ?? undefined, + tasksCompleted: row.tasks_completed ?? undefined, + projectPath: row.project_path ?? undefined, + }; +} + +export function mapAutoRunTaskRow(row: AutoRunTaskRow): AutoRunTask { + return { + id: row.id, + autoRunSessionId: row.auto_run_session_id, + sessionId: row.session_id, + agentType: row.agent_type, + taskIndex: row.task_index, + taskContent: row.task_content ?? undefined, + startTime: row.start_time, + duration: row.duration, + success: row.success === 1, + }; +} + +export function mapSessionLifecycleRow(row: SessionLifecycleRow): SessionLifecycleEvent { + return { + id: row.id, + sessionId: row.session_id, + agentType: row.agent_type, + projectPath: row.project_path ?? undefined, + createdAt: row.created_at, + closedAt: row.closed_at ?? undefined, + duration: row.duration ?? undefined, + isRemote: row.is_remote !== null ? row.is_remote === 1 : undefined, + }; +} + +export function mapMigrationRecordRow(row: MigrationRecordRow): MigrationRecord { + return { + version: row.version, + description: row.description, + appliedAt: row.applied_at, + status: row.status, + errorMessage: row.error_message ?? undefined, + }; +} diff --git a/src/main/stats/schema.ts b/src/main/stats/schema.ts new file mode 100644 index 00000000..861b5815 --- /dev/null +++ b/src/main/stats/schema.ts @@ -0,0 +1,141 @@ +/** + * Stats Database Schema + * + * SQL definitions for all tables and indexes, plus helper utilities + * for executing multi-statement SQL strings. + */ + +import type Database from 'better-sqlite3'; + +// ============================================================================ +// Migrations Infrastructure +// ============================================================================ + +export const CREATE_MIGRATIONS_TABLE_SQL = ` + CREATE TABLE IF NOT EXISTS _migrations ( + version INTEGER PRIMARY KEY, + description TEXT NOT NULL, + applied_at INTEGER NOT NULL, + status TEXT NOT NULL CHECK(status IN ('success', 'failed')), + error_message TEXT + ) +`; + +// ============================================================================ +// Metadata Table (for internal key-value storage like vacuum timestamps) +// ============================================================================ + +export const CREATE_META_TABLE_SQL = ` + CREATE TABLE IF NOT EXISTS _meta ( + key TEXT PRIMARY KEY, + value TEXT NOT NULL + ) +`; + +// ============================================================================ +// Query Events (Migration v1) +// ============================================================================ + +export const CREATE_QUERY_EVENTS_SQL = ` + CREATE TABLE IF NOT EXISTS query_events ( + id TEXT PRIMARY KEY, + session_id TEXT NOT NULL, + agent_type TEXT NOT NULL, + source TEXT NOT NULL CHECK(source IN ('user', 'auto')), + start_time INTEGER NOT NULL, + duration INTEGER NOT NULL, + project_path TEXT, + tab_id TEXT + ) +`; + +export const CREATE_QUERY_EVENTS_INDEXES_SQL = ` + CREATE INDEX IF NOT EXISTS idx_query_start_time ON query_events(start_time); + CREATE INDEX IF NOT EXISTS idx_query_agent_type ON query_events(agent_type); + CREATE INDEX IF NOT EXISTS idx_query_source ON query_events(source); + CREATE INDEX IF NOT EXISTS idx_query_session ON query_events(session_id); + CREATE INDEX IF NOT EXISTS idx_query_project_path ON query_events(project_path); + CREATE INDEX IF NOT EXISTS idx_query_agent_time ON query_events(agent_type, start_time) +`; + +// ============================================================================ +// Auto Run Sessions (Migration v1) +// ============================================================================ + +export const CREATE_AUTO_RUN_SESSIONS_SQL = ` + CREATE TABLE IF NOT EXISTS auto_run_sessions ( + id TEXT PRIMARY KEY, + session_id TEXT NOT NULL, + agent_type TEXT NOT NULL, + document_path TEXT, + start_time INTEGER NOT NULL, + duration INTEGER NOT NULL, + tasks_total INTEGER, + tasks_completed INTEGER, + project_path TEXT + ) +`; + +export const CREATE_AUTO_RUN_SESSIONS_INDEXES_SQL = ` + CREATE INDEX IF NOT EXISTS idx_auto_session_start ON auto_run_sessions(start_time) +`; + +// ============================================================================ +// Auto Run Tasks (Migration v1) +// ============================================================================ + +export const CREATE_AUTO_RUN_TASKS_SQL = ` + CREATE TABLE IF NOT EXISTS auto_run_tasks ( + id TEXT PRIMARY KEY, + auto_run_session_id TEXT NOT NULL REFERENCES auto_run_sessions(id), + session_id TEXT NOT NULL, + agent_type TEXT NOT NULL, + task_index INTEGER NOT NULL, + task_content TEXT, + start_time INTEGER NOT NULL, + duration INTEGER NOT NULL, + success INTEGER NOT NULL CHECK(success IN (0, 1)) + ) +`; + +export const CREATE_AUTO_RUN_TASKS_INDEXES_SQL = ` + CREATE INDEX IF NOT EXISTS idx_task_auto_session ON auto_run_tasks(auto_run_session_id); + CREATE INDEX IF NOT EXISTS idx_task_start ON auto_run_tasks(start_time) +`; + +// ============================================================================ +// Session Lifecycle (Migration v3) +// ============================================================================ + +export const CREATE_SESSION_LIFECYCLE_SQL = ` + CREATE TABLE IF NOT EXISTS session_lifecycle ( + id TEXT PRIMARY KEY, + session_id TEXT NOT NULL UNIQUE, + agent_type TEXT NOT NULL, + project_path TEXT, + created_at INTEGER NOT NULL, + closed_at INTEGER, + duration INTEGER, + is_remote INTEGER + ) +`; + +export const CREATE_SESSION_LIFECYCLE_INDEXES_SQL = ` + CREATE INDEX IF NOT EXISTS idx_session_created_at ON session_lifecycle(created_at); + CREATE INDEX IF NOT EXISTS idx_session_agent_type ON session_lifecycle(agent_type) +`; + +// ============================================================================ +// Utilities +// ============================================================================ + +/** + * Execute a multi-statement SQL string by splitting on semicolons. + * + * Useful for running multiple CREATE INDEX statements defined in a single string. + */ +export function runStatements(db: Database.Database, multiStatementSql: string): void { + for (const sql of multiStatementSql.split(';').filter((s) => s.trim())) { + db.prepare(sql).run(); + } +} diff --git a/src/main/stats/session-lifecycle.ts b/src/main/stats/session-lifecycle.ts new file mode 100644 index 00000000..c23c0a71 --- /dev/null +++ b/src/main/stats/session-lifecycle.ts @@ -0,0 +1,105 @@ +/** + * Session Lifecycle CRUD Operations + * + * Tracks when sessions are created (launched) and closed, + * enabling session duration and lifecycle analytics. + */ + +import type Database from 'better-sqlite3'; +import type { SessionLifecycleEvent, StatsTimeRange } from '../../shared/stats-types'; +import { generateId, getTimeRangeStart, normalizePath, LOG_CONTEXT } from './utils'; +import { mapSessionLifecycleRow, type SessionLifecycleRow } from './row-mappers'; +import { StatementCache } from './utils'; +import { logger } from '../utils/logger'; + +const stmtCache = new StatementCache(); + +const INSERT_SQL = ` + INSERT INTO session_lifecycle (id, session_id, agent_type, project_path, created_at, is_remote) + VALUES (?, ?, ?, ?, ?, ?) +`; + +/** + * Record a session being created (launched) + */ +export function recordSessionCreated( + db: Database.Database, + event: Omit +): string { + const id = generateId(); + const stmt = stmtCache.get(db, INSERT_SQL); + + stmt.run( + id, + event.sessionId, + event.agentType, + normalizePath(event.projectPath), + event.createdAt, + event.isRemote !== undefined ? (event.isRemote ? 1 : 0) : null + ); + + logger.debug(`Recorded session created: ${event.sessionId}`, LOG_CONTEXT); + return id; +} + +/** + * Record a session being closed + */ +export function recordSessionClosed( + db: Database.Database, + sessionId: string, + closedAt: number +): boolean { + // Get the session's created_at time to calculate duration + const session = db + .prepare('SELECT created_at FROM session_lifecycle WHERE session_id = ?') + .get(sessionId) as { created_at: number } | undefined; + + if (!session) { + logger.debug(`Session not found for closure: ${sessionId}`, LOG_CONTEXT); + return false; + } + + const duration = closedAt - session.created_at; + + const stmt = stmtCache.get( + db, + ` + UPDATE session_lifecycle + SET closed_at = ?, duration = ? + WHERE session_id = ? + ` + ); + + const result = stmt.run(closedAt, duration, sessionId); + logger.debug(`Recorded session closed: ${sessionId}, duration: ${duration}ms`, LOG_CONTEXT); + return result.changes > 0; +} + +/** + * Get session lifecycle events within a time range + */ +export function getSessionLifecycleEvents( + db: Database.Database, + range: StatsTimeRange +): SessionLifecycleEvent[] { + const startTime = getTimeRangeStart(range); + const stmt = stmtCache.get( + db, + ` + SELECT * FROM session_lifecycle + WHERE created_at >= ? + ORDER BY created_at DESC + ` + ); + + const rows = stmt.all(startTime) as SessionLifecycleRow[]; + return rows.map(mapSessionLifecycleRow); +} + +/** + * Clear the statement cache (call when database is closed) + */ +export function clearSessionLifecycleCache(): void { + stmtCache.clear(); +} diff --git a/src/main/stats/singleton.ts b/src/main/stats/singleton.ts new file mode 100644 index 00000000..810888e0 --- /dev/null +++ b/src/main/stats/singleton.ts @@ -0,0 +1,87 @@ +/** + * Stats Database Singleton Management & Performance Metrics API + * + * Provides the global StatsDB instance and performance monitoring utilities. + */ + +import { StatsDB } from './stats-db'; +import { perfMetrics, LOG_CONTEXT } from './utils'; +import { logger } from '../utils/logger'; + +// ============================================================================ +// Singleton Instance +// ============================================================================ + +let statsDbInstance: StatsDB | null = null; + +/** + * Get the singleton StatsDB instance + */ +export function getStatsDB(): StatsDB { + if (!statsDbInstance) { + statsDbInstance = new StatsDB(); + } + return statsDbInstance; +} + +/** + * Initialize the stats database (call on app ready) + */ +export function initializeStatsDB(): void { + const db = getStatsDB(); + db.initialize(); +} + +/** + * Close the stats database (call on app quit) + */ +export function closeStatsDB(): void { + if (statsDbInstance) { + statsDbInstance.close(); + statsDbInstance = null; + } +} + +// ============================================================================ +// Performance Metrics API +// ============================================================================ + +/** + * Enable or disable performance metrics logging for StatsDB operations. + * + * When enabled, detailed timing information is logged at debug level for: + * - Database queries (getAggregatedStats, getQueryEvents, etc.) + * - Individual SQL operations (totals, byAgent, bySource, byDay queries) + * + * Performance warnings are always logged (even when metrics are disabled) + * when operations exceed defined thresholds. + * + * @param enabled - Whether to enable performance metrics logging + */ +export function setPerformanceLoggingEnabled(enabled: boolean): void { + perfMetrics.setEnabled(enabled); + logger.info(`Performance metrics logging ${enabled ? 'enabled' : 'disabled'}`, LOG_CONTEXT); +} + +/** + * Check if performance metrics logging is currently enabled. + */ +export function isPerformanceLoggingEnabled(): boolean { + return perfMetrics.isEnabled(); +} + +/** + * Get collected performance metrics for analysis. + * + * Returns the last 100 recorded metrics (when enabled). + */ +export function getPerformanceMetrics() { + return perfMetrics.getMetrics(); +} + +/** + * Clear collected performance metrics. + */ +export function clearPerformanceMetrics(): void { + perfMetrics.clearMetrics(); +} diff --git a/src/main/stats/stats-db.ts b/src/main/stats/stats-db.ts new file mode 100644 index 00000000..ee940229 --- /dev/null +++ b/src/main/stats/stats-db.ts @@ -0,0 +1,543 @@ +/** + * Stats Database Core Class + * + * Manages the SQLite database lifecycle: initialization, integrity checks, + * corruption recovery, VACUUM scheduling, and connection management. + * + * CRUD operations are delegated to focused modules (query-events, auto-run, + * session-lifecycle, aggregations, data-management). + */ + +import Database from 'better-sqlite3'; +import * as path from 'path'; +import * as fs from 'fs'; +import { app } from 'electron'; +import { logger } from '../utils/logger'; +import type { + QueryEvent, + AutoRunSession, + AutoRunTask, + SessionLifecycleEvent, + StatsTimeRange, + StatsFilters, + StatsAggregation, +} from '../../shared/stats-types'; +import type { + IntegrityCheckResult, + BackupResult, + CorruptionRecoveryResult, + MigrationRecord, +} from './types'; +import { LOG_CONTEXT } from './utils'; +import { CREATE_META_TABLE_SQL } from './schema'; +import { + runMigrations, + getMigrationHistory, + getCurrentVersion, + getTargetVersion, + hasPendingMigrations, +} from './migrations'; +import { insertQueryEvent, getQueryEvents, clearQueryEventCache } from './query-events'; +import { + insertAutoRunSession, + updateAutoRunSession, + getAutoRunSessions, + insertAutoRunTask, + getAutoRunTasks, + clearAutoRunCache, +} from './auto-run'; +import { + recordSessionCreated, + recordSessionClosed, + getSessionLifecycleEvents, + clearSessionLifecycleCache, +} from './session-lifecycle'; +import { getAggregatedStats } from './aggregations'; +import { clearOldData, exportToCsv } from './data-management'; + +/** + * StatsDB manages the SQLite database for usage statistics. + */ +export class StatsDB { + private db: Database.Database | null = null; + private dbPath: string; + private initialized = false; + + constructor() { + this.dbPath = path.join(app.getPath('userData'), 'stats.db'); + } + + // ============================================================================ + // Database Accessor + // ============================================================================ + + /** + * Get the underlying database handle, throwing if not initialized. + * Replaces the repeated `if (!this.db) throw` guard clauses. + */ + get database(): Database.Database { + if (!this.db) throw new Error('Database not initialized'); + return this.db; + } + + // ============================================================================ + // Lifecycle + // ============================================================================ + + /** + * Initialize the database - create file, tables, and indexes. + * + * If the database is corrupted, this method will: + * 1. Backup the corrupted database file + * 2. Delete the corrupted file and any associated WAL/SHM files + * 3. Create a fresh database + */ + initialize(): void { + if (this.initialized) { + return; + } + + try { + const dir = path.dirname(this.dbPath); + if (!fs.existsSync(dir)) { + fs.mkdirSync(dir, { recursive: true }); + } + + const dbExists = fs.existsSync(this.dbPath); + + if (dbExists) { + const db = this.openWithCorruptionHandling(); + if (!db) { + throw new Error('Failed to open or recover database'); + } + this.db = db; + } else { + this.db = new Database(this.dbPath); + } + + // Enable WAL mode for better concurrent access + this.db.pragma('journal_mode = WAL'); + + // Create the _meta table for internal key-value storage + this.db.prepare(CREATE_META_TABLE_SQL).run(); + + // Run migrations + runMigrations(this.db); + + this.initialized = true; + logger.info(`Stats database initialized at ${this.dbPath}`, LOG_CONTEXT); + + // Schedule VACUUM to run weekly instead of on every startup + this.vacuumIfNeededWeekly(); + } catch (error) { + logger.error(`Failed to initialize stats database: ${error}`, LOG_CONTEXT); + throw error; + } + } + + /** + * Close the database connection + */ + close(): void { + if (this.db) { + this.db.close(); + this.db = null; + this.initialized = false; + + // Clear all statement caches + clearQueryEventCache(); + clearAutoRunCache(); + clearSessionLifecycleCache(); + + logger.info('Stats database closed', LOG_CONTEXT); + } + } + + /** + * Check if database is initialized and ready + */ + isReady(): boolean { + return this.initialized && this.db !== null; + } + + /** + * Get the database file path + */ + getDbPath(): string { + return this.dbPath; + } + + /** + * Get the database file size in bytes. + */ + getDatabaseSize(): number { + try { + const stats = fs.statSync(this.dbPath); + return stats.size; + } catch { + return 0; + } + } + + // ============================================================================ + // VACUUM + // ============================================================================ + + /** + * Run VACUUM on the database to reclaim unused space and optimize structure. + */ + vacuum(): { success: boolean; bytesFreed: number; error?: string } { + if (!this.db) { + return { success: false, bytesFreed: 0, error: 'Database not initialized' }; + } + + try { + const sizeBefore = this.getDatabaseSize(); + logger.info( + `Starting VACUUM (current size: ${(sizeBefore / 1024 / 1024).toFixed(2)} MB)`, + LOG_CONTEXT + ); + + this.db.prepare('VACUUM').run(); + + const sizeAfter = this.getDatabaseSize(); + const bytesFreed = sizeBefore - sizeAfter; + + logger.info( + `VACUUM completed: ${(sizeBefore / 1024 / 1024).toFixed(2)} MB -> ${(sizeAfter / 1024 / 1024).toFixed(2)} MB (freed ${(bytesFreed / 1024 / 1024).toFixed(2)} MB)`, + LOG_CONTEXT + ); + + return { success: true, bytesFreed }; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + logger.error(`VACUUM failed: ${errorMessage}`, LOG_CONTEXT); + return { success: false, bytesFreed: 0, error: errorMessage }; + } + } + + /** + * Conditionally vacuum the database if it exceeds a size threshold. + * + * @param thresholdBytes - Size threshold in bytes (default: 100MB) + */ + vacuumIfNeeded(thresholdBytes: number = 100 * 1024 * 1024): { + vacuumed: boolean; + databaseSize: number; + result?: { success: boolean; bytesFreed: number; error?: string }; + } { + const databaseSize = this.getDatabaseSize(); + + if (databaseSize < thresholdBytes) { + logger.debug( + `Database size (${(databaseSize / 1024 / 1024).toFixed(2)} MB) below vacuum threshold (${(thresholdBytes / 1024 / 1024).toFixed(2)} MB), skipping VACUUM`, + LOG_CONTEXT + ); + return { vacuumed: false, databaseSize }; + } + + logger.info( + `Database size (${(databaseSize / 1024 / 1024).toFixed(2)} MB) exceeds vacuum threshold (${(thresholdBytes / 1024 / 1024).toFixed(2)} MB), running VACUUM`, + LOG_CONTEXT + ); + + const result = this.vacuum(); + return { vacuumed: true, databaseSize, result }; + } + + /** + * Run VACUUM only if it hasn't been run in the last 7 days. + * + * Stores the last vacuum timestamp in the database's _meta table + * instead of an external file. + * + * @param intervalMs - Minimum time between vacuums (default: 7 days) + */ + private vacuumIfNeededWeekly(intervalMs: number = 7 * 24 * 60 * 60 * 1000): void { + try { + // Read last vacuum timestamp from _meta table + const row = this.database + .prepare("SELECT value FROM _meta WHERE key = 'last_vacuum_at'") + .get() as { value: string } | undefined; + + const lastVacuum = row ? parseInt(row.value, 10) || 0 : 0; + const now = Date.now(); + const timeSinceLastVacuum = now - lastVacuum; + + if (timeSinceLastVacuum < intervalMs) { + const daysRemaining = ((intervalMs - timeSinceLastVacuum) / (24 * 60 * 60 * 1000)).toFixed( + 1 + ); + logger.debug( + `Skipping VACUUM (last run ${((now - lastVacuum) / (24 * 60 * 60 * 1000)).toFixed(1)} days ago, next in ${daysRemaining} days)`, + LOG_CONTEXT + ); + return; + } + + // Run VACUUM if database is large enough + const result = this.vacuumIfNeeded(); + + if (result.vacuumed) { + // Update timestamp in _meta table + this.database + .prepare("INSERT OR REPLACE INTO _meta (key, value) VALUES ('last_vacuum_at', ?)") + .run(String(now)); + logger.info('Updated VACUUM timestamp in _meta table', LOG_CONTEXT); + } + } catch (error) { + // Non-fatal - log and continue + logger.warn(`Failed to check/update VACUUM schedule: ${error}`, LOG_CONTEXT); + } + } + + // ============================================================================ + // Integrity & Corruption Handling + // ============================================================================ + + /** + * Check the integrity of the database using SQLite's PRAGMA integrity_check. + */ + checkIntegrity(): IntegrityCheckResult { + if (!this.db) { + return { ok: false, errors: ['Database not initialized'] }; + } + + try { + const result = this.db.pragma('integrity_check') as Array<{ integrity_check: string }>; + + if (result.length === 1 && result[0].integrity_check === 'ok') { + return { ok: true, errors: [] }; + } + + const errors = result.map((row) => row.integrity_check); + return { ok: false, errors }; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + return { ok: false, errors: [errorMessage] }; + } + } + + /** + * Create a backup of the current database file. + */ + backupDatabase(): BackupResult { + try { + if (!fs.existsSync(this.dbPath)) { + return { success: false, error: 'Database file does not exist' }; + } + + const timestamp = Date.now(); + const backupPath = `${this.dbPath}.backup.${timestamp}`; + + fs.copyFileSync(this.dbPath, backupPath); + + logger.info(`Created database backup at ${backupPath}`, LOG_CONTEXT); + return { success: true, backupPath }; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + logger.error(`Failed to create database backup: ${errorMessage}`, LOG_CONTEXT); + return { success: false, error: errorMessage }; + } + } + + /** + * Handle a corrupted database by backing it up and recreating a fresh database. + */ + private recoverFromCorruption(): CorruptionRecoveryResult { + logger.warn('Attempting to recover from database corruption...', LOG_CONTEXT); + + try { + if (this.db) { + try { + this.db.close(); + } catch { + // Ignore errors closing corrupted database + } + this.db = null; + this.initialized = false; + } + + const backupResult = this.backupDatabase(); + if (!backupResult.success) { + if (fs.existsSync(this.dbPath)) { + const timestamp = Date.now(); + const emergencyBackupPath = `${this.dbPath}.corrupted.${timestamp}`; + try { + fs.renameSync(this.dbPath, emergencyBackupPath); + logger.warn(`Emergency backup created at ${emergencyBackupPath}`, LOG_CONTEXT); + } catch { + logger.error('Failed to backup corrupted database, data will be lost', LOG_CONTEXT); + fs.unlinkSync(this.dbPath); + } + } + } + + // Delete WAL and SHM files + const walPath = `${this.dbPath}-wal`; + const shmPath = `${this.dbPath}-shm`; + if (fs.existsSync(walPath)) { + fs.unlinkSync(walPath); + } + if (fs.existsSync(shmPath)) { + fs.unlinkSync(shmPath); + } + + if (fs.existsSync(this.dbPath)) { + fs.unlinkSync(this.dbPath); + } + + logger.info('Corrupted database removed, will create fresh database', LOG_CONTEXT); + + return { + recovered: true, + backupPath: backupResult.backupPath, + }; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + logger.error(`Failed to recover from database corruption: ${errorMessage}`, LOG_CONTEXT); + return { + recovered: false, + error: errorMessage, + }; + } + } + + /** + * Attempt to open and validate a database, handling corruption if detected. + */ + private openWithCorruptionHandling(): Database.Database | null { + try { + const db = new Database(this.dbPath); + + const result = db.pragma('integrity_check') as Array<{ integrity_check: string }>; + if (result.length === 1 && result[0].integrity_check === 'ok') { + return db; + } + + const errors = result.map((row) => row.integrity_check); + logger.error(`Database integrity check failed: ${errors.join(', ')}`, LOG_CONTEXT); + + db.close(); + } catch (error) { + logger.error(`Failed to open database: ${error}`, LOG_CONTEXT); + } + + const recoveryResult = this.recoverFromCorruption(); + if (!recoveryResult.recovered) { + logger.error('Database corruption recovery failed', LOG_CONTEXT); + return null; + } + + try { + const db = new Database(this.dbPath); + logger.info('Fresh database created after corruption recovery', LOG_CONTEXT); + return db; + } catch (error) { + logger.error(`Failed to create fresh database after recovery: ${error}`, LOG_CONTEXT); + return null; + } + } + + // ============================================================================ + // Migration Queries (delegated) + // ============================================================================ + + getMigrationHistory(): MigrationRecord[] { + return getMigrationHistory(this.database); + } + + getCurrentVersion(): number { + return getCurrentVersion(this.database); + } + + getTargetVersion(): number { + return getTargetVersion(); + } + + hasPendingMigrations(): boolean { + return hasPendingMigrations(this.database); + } + + // ============================================================================ + // Query Events (delegated) + // ============================================================================ + + insertQueryEvent(event: Omit): string { + return insertQueryEvent(this.database, event); + } + + getQueryEvents(range: StatsTimeRange, filters?: StatsFilters): QueryEvent[] { + return getQueryEvents(this.database, range, filters); + } + + // ============================================================================ + // Auto Run (delegated) + // ============================================================================ + + insertAutoRunSession(session: Omit): string { + return insertAutoRunSession(this.database, session); + } + + updateAutoRunSession(id: string, updates: Partial): boolean { + return updateAutoRunSession(this.database, id, updates); + } + + getAutoRunSessions(range: StatsTimeRange): AutoRunSession[] { + return getAutoRunSessions(this.database, range); + } + + insertAutoRunTask(task: Omit): string { + return insertAutoRunTask(this.database, task); + } + + getAutoRunTasks(autoRunSessionId: string): AutoRunTask[] { + return getAutoRunTasks(this.database, autoRunSessionId); + } + + // ============================================================================ + // Session Lifecycle (delegated) + // ============================================================================ + + recordSessionCreated(event: Omit): string { + return recordSessionCreated(this.database, event); + } + + recordSessionClosed(sessionId: string, closedAt: number): boolean { + return recordSessionClosed(this.database, sessionId, closedAt); + } + + getSessionLifecycleEvents(range: StatsTimeRange): SessionLifecycleEvent[] { + return getSessionLifecycleEvents(this.database, range); + } + + // ============================================================================ + // Aggregations (delegated) + // ============================================================================ + + getAggregatedStats(range: StatsTimeRange): StatsAggregation { + return getAggregatedStats(this.database, range); + } + + // ============================================================================ + // Data Management (delegated) + // ============================================================================ + + clearOldData(olderThanDays: number) { + if (!this.db) { + return { + success: false, + deletedQueryEvents: 0, + deletedAutoRunSessions: 0, + deletedAutoRunTasks: 0, + deletedSessionLifecycle: 0, + error: 'Database not initialized', + }; + } + return clearOldData(this.database, olderThanDays); + } + + exportToCsv(range: StatsTimeRange): string { + return exportToCsv(this.database, range); + } +} diff --git a/src/main/stats/types.ts b/src/main/stats/types.ts new file mode 100644 index 00000000..f8e3502f --- /dev/null +++ b/src/main/stats/types.ts @@ -0,0 +1,65 @@ +/** + * Stats Database Internal Types + * + * These types are specific to the stats database implementation. + * Shared types (QueryEvent, AutoRunSession, etc.) remain in src/shared/stats-types.ts. + */ + +import type Database from 'better-sqlite3'; + +/** + * Result of a database integrity check + */ +export interface IntegrityCheckResult { + /** Whether the database passed the integrity check */ + ok: boolean; + /** Error messages from the integrity check (empty if ok is true) */ + errors: string[]; +} + +/** + * Result of a database backup operation + */ +export interface BackupResult { + /** Whether the backup succeeded */ + success: boolean; + /** Path to the backup file (if success is true) */ + backupPath?: string; + /** Error message (if success is false) */ + error?: string; +} + +/** + * Result of corruption recovery + */ +export interface CorruptionRecoveryResult { + /** Whether recovery was performed */ + recovered: boolean; + /** Path to the backup of the corrupted database */ + backupPath?: string; + /** Error during recovery (if any) */ + error?: string; +} + +/** + * Represents a single database migration + */ +export interface Migration { + /** Version number (must be sequential starting from 1) */ + version: number; + /** Human-readable description of the migration */ + description: string; + /** Function to apply the migration */ + up: (db: Database.Database) => void; +} + +/** + * Record of an applied migration stored in the migrations table + */ +export interface MigrationRecord { + version: number; + description: string; + appliedAt: number; + status: 'success' | 'failed'; + errorMessage?: string; +} diff --git a/src/main/stats/utils.ts b/src/main/stats/utils.ts new file mode 100644 index 00000000..6854e04f --- /dev/null +++ b/src/main/stats/utils.ts @@ -0,0 +1,100 @@ +/** + * Stats Database Utilities + * + * Shared helper functions and constants used across the stats module. + */ + +import type Database from 'better-sqlite3'; +import { logger } from '../utils/logger'; +import { PerformanceMetrics } from '../../shared/performance-metrics'; +import type { StatsTimeRange } from '../../shared/stats-types'; + +export const LOG_CONTEXT = '[StatsDB]'; + +/** + * Performance metrics logger for StatsDB operations. + * + * Disabled by default - enable via setPerformanceLoggingEnabled(true). + * Logs at debug level through the main process logger. + */ +export const perfMetrics = new PerformanceMetrics( + 'StatsDB', + (message, context) => logger.debug(message, context ?? LOG_CONTEXT), + false // Disabled by default - enable for debugging +); + +/** + * Generate a unique ID for database entries + */ +export function generateId(): string { + return `${Date.now()}-${Math.random().toString(36).substring(2, 11)}`; +} + +/** + * Get timestamp for start of time range + */ +export function getTimeRangeStart(range: StatsTimeRange): number { + const now = Date.now(); + const day = 24 * 60 * 60 * 1000; + + switch (range) { + case 'day': + return now - day; + case 'week': + return now - 7 * day; + case 'month': + return now - 30 * day; + case 'year': + return now - 365 * day; + case 'all': + return 0; + default: + // Exhaustive check - should never reach here + return 0; + } +} + +/** + * Normalize file paths to use forward slashes consistently across platforms. + * + * This ensures that paths stored in the database use a consistent format + * regardless of the operating system, enabling cross-platform data portability + * and consistent filtering by project path. + * + * - Converts Windows-style backslashes to forward slashes + * - Preserves UNC paths (\\server\share -> //server/share) + * - Handles null/undefined by returning null + * + * @param filePath - The file path to normalize (may be Windows or Unix style) + * @returns The normalized path with forward slashes, or null if input is null/undefined + */ +export function normalizePath(filePath: string | null | undefined): string | null { + if (filePath == null) { + return null; + } + // Replace all backslashes with forward slashes + return filePath.replace(/\\/g, '/'); +} + +/** + * Cache for prepared SQL statements. + * + * Eliminates repeated `db.prepare()` overhead for frequently executed queries. + * Each cache instance should be cleared when the database connection is closed. + */ +export class StatementCache { + private cache = new Map(); + + get(db: Database.Database, sql: string): Database.Statement { + let stmt = this.cache.get(sql); + if (!stmt) { + stmt = db.prepare(sql); + this.cache.set(sql, stmt); + } + return stmt; + } + + clear(): void { + this.cache.clear(); + } +} diff --git a/src/main/storage/claude-session-storage.ts b/src/main/storage/claude-session-storage.ts index f2383af7..fa53a1ab 100644 --- a/src/main/storage/claude-session-storage.ts +++ b/src/main/storage/claude-session-storage.ts @@ -32,7 +32,7 @@ import type { AgentSessionOrigin, SessionOriginInfo, SessionMessage, -} from '../agent-session-storage'; +} from '../agents'; import type { ToolType, SshRemoteConfig } from '../../shared/types'; const LOG_CONTEXT = '[ClaudeSessionStorage]'; diff --git a/src/main/storage/codex-session-storage.ts b/src/main/storage/codex-session-storage.ts index f6ce7c55..997788e4 100644 --- a/src/main/storage/codex-session-storage.ts +++ b/src/main/storage/codex-session-storage.ts @@ -36,7 +36,7 @@ import type { SessionListOptions, SessionReadOptions, SessionMessage, -} from '../agent-session-storage'; +} from '../agents'; import type { ToolType, SshRemoteConfig } from '../../shared/types'; const LOG_CONTEXT = '[CodexSessionStorage]'; diff --git a/src/main/storage/factory-droid-session-storage.ts b/src/main/storage/factory-droid-session-storage.ts index bbfef494..a2195418 100644 --- a/src/main/storage/factory-droid-session-storage.ts +++ b/src/main/storage/factory-droid-session-storage.ts @@ -40,7 +40,7 @@ import type { SessionListOptions, SessionReadOptions, SessionMessage, -} from '../agent-session-storage'; +} from '../agents'; import type { ToolType, SshRemoteConfig } from '../../shared/types'; const LOG_CONTEXT = '[FactoryDroidSessionStorage]'; diff --git a/src/main/storage/index.ts b/src/main/storage/index.ts index e905bc1d..71981b03 100644 --- a/src/main/storage/index.ts +++ b/src/main/storage/index.ts @@ -11,7 +11,7 @@ export { CodexSessionStorage } from './codex-session-storage'; export { FactoryDroidSessionStorage } from './factory-droid-session-storage'; import Store from 'electron-store'; -import { registerSessionStorage } from '../agent-session-storage'; +import { registerSessionStorage } from '../agents'; import { ClaudeSessionStorage, ClaudeSessionOriginsData } from './claude-session-storage'; import { OpenCodeSessionStorage } from './opencode-session-storage'; import { CodexSessionStorage } from './codex-session-storage'; diff --git a/src/main/storage/opencode-session-storage.ts b/src/main/storage/opencode-session-storage.ts index 0ad5dea4..e8523fef 100644 --- a/src/main/storage/opencode-session-storage.ts +++ b/src/main/storage/opencode-session-storage.ts @@ -34,7 +34,7 @@ import type { SessionListOptions, SessionReadOptions, SessionMessage, -} from '../agent-session-storage'; +} from '../agents'; import type { ToolType, SshRemoteConfig } from '../../shared/types'; const LOG_CONTEXT = '[OpenCodeSessionStorage]'; diff --git a/src/main/utils/agent-args.ts b/src/main/utils/agent-args.ts index b66becac..d71c815e 100644 --- a/src/main/utils/agent-args.ts +++ b/src/main/utils/agent-args.ts @@ -1,4 +1,4 @@ -import type { AgentConfig } from '../agent-detector'; +import type { AgentConfig } from '../agents'; type BuildAgentArgsOptions = { baseArgs: string[]; @@ -118,7 +118,10 @@ export function applyAgentConfigOverrides( : option.default; } - finalArgs = [...finalArgs, ...option.argBuilder(value)]; + // Type assertion needed because AgentConfigOption is a discriminated union + // and we're handling all types generically here + const argBuilderFn = option.argBuilder as (value: unknown) => string[]; + finalArgs = [...finalArgs, ...argBuilderFn(value)]; } } @@ -179,9 +182,11 @@ export function getContextWindowValue( } // Fall back to agent-level config const contextWindowOption = agent?.configOptions?.find( - (option) => option.key === 'contextWindow' + (option) => option.key === 'contextWindow' && option.type === 'number' ); - const contextWindowDefault = contextWindowOption?.default ?? 0; + // Extract default value, ensuring it's a number (contextWindow should always be a number config) + const defaultValue = contextWindowOption?.default; + const contextWindowDefault = typeof defaultValue === 'number' ? defaultValue : 0; return typeof agentConfigValues.contextWindow === 'number' ? agentConfigValues.contextWindow : contextWindowDefault; diff --git a/src/main/utils/context-groomer.ts b/src/main/utils/context-groomer.ts index f3956dd7..e5bc6d06 100644 --- a/src/main/utils/context-groomer.ts +++ b/src/main/utils/context-groomer.ts @@ -15,7 +15,7 @@ import { v4 as uuidv4 } from 'uuid'; import { logger } from './logger'; import { buildAgentArgs } from './agent-args'; -import type { AgentDetector } from '../agent-detector'; +import type { AgentDetector } from '../agents'; const LOG_CONTEXT = '[ContextGroomer]'; diff --git a/src/renderer/App.tsx b/src/renderer/App.tsx index d62260f0..13a1f2e4 100644 --- a/src/renderer/App.tsx +++ b/src/renderer/App.tsx @@ -186,7 +186,7 @@ import { shouldOpenExternally, flattenTree } from './utils/fileExplorer'; import type { FileNode } from './types/fileTree'; import { substituteTemplateVariables } from './utils/templateVariables'; import { validateNewSession, getProviderDisplayName } from './utils/sessionValidation'; -import { estimateContextUsage, calculateContextTokens } from './utils/contextUsage'; +import { estimateContextUsage } from './utils/contextUsage'; import { formatLogsForClipboard } from './utils/contextExtractor'; import { parseSessionId, @@ -2942,90 +2942,24 @@ function MaestroConsoleInner() { const parsed = parseSessionId(sessionId); const { actualSessionId, tabId, baseSessionId } = parsed; - // Calculate context window usage percentage from CURRENT (per-turn) tokens. - // Claude Code usage is normalized to per-turn values in StdoutHandler before reaching here. - // - // SYNC: Uses calculateContextTokens() from shared/contextUsage.ts - // This MUST match the calculation used in: - // - contextSummarizer.ts (compaction eligibility) - // - MainPanel.tsx (tab context display) - // - TabSwitcherModal.tsx (tab switcher) - // - HistoryDetailModal.tsx (history view) - // - usage-listener.ts (main process usage events) - // - // @see src/shared/contextUsage.ts for the canonical calculation + // Estimate context usage percentage using agent-specific calculation. + // estimateContextUsage returns null when values are accumulated across multiple + // internal API calls within a complex turn. In that case, the UI may update less + // during tool-heavy turns, but it's always accurate when it does update, + // keeping the compact warning reliable. // Use baseSessionId for lookup to handle synopsis/batch sessions that inherit parent's agent type const sessionForUsage = sessionsRef.current.find((s) => s.id === baseSessionId); const agentToolType = sessionForUsage?.toolType; - const currentContextTokens = calculateContextTokens( - { - inputTokens: usageStats.inputTokens, - outputTokens: usageStats.outputTokens, - cacheReadInputTokens: usageStats.cacheReadInputTokens, - cacheCreationInputTokens: usageStats.cacheCreationInputTokens, - }, - agentToolType - ); - - // Calculate context percentage, falling back to agent-specific defaults if contextWindow not provided - let contextPercentage: number; - const effectiveContextWindow = usageStats.contextWindow > 0 ? usageStats.contextWindow : 200000; - - // Sanity check: if tokens exceed 150% of context window, the data is likely corrupt - // (e.g., accumulated session totals instead of per-turn values). In this case, - // preserve the previous context percentage rather than showing misleading 100%. - if (currentContextTokens > effectiveContextWindow * 1.5) { - console.warn('[onUsage] Ignoring anomalous context data - tokens exceed 150% of window', { - sessionId: actualSessionId, - currentContextTokens, - contextWindow: effectiveContextWindow, - inputTokens: usageStats.inputTokens, - cacheReadInputTokens: usageStats.cacheReadInputTokens, - cacheCreationInputTokens: usageStats.cacheCreationInputTokens, - }); - // Keep existing context percentage (don't update) - contextPercentage = sessionForUsage?.contextUsage ?? 0; - // Skip usage updates to avoid polluting UI with cumulative totals - return; - } else if (usageStats.contextWindow > 0) { - contextPercentage = Math.min( - Math.round((currentContextTokens / usageStats.contextWindow) * 100), - 100 - ); - } else { - // Use fallback estimation with agent-specific default context window - const estimated = estimateContextUsage(usageStats, agentToolType); - contextPercentage = estimated ?? 0; - } - - // DEBUG: Log context calculation details - // Uses calculateContextTokens() from shared/contextUsage.ts for consistency - const isCombinedContext = agentToolType === 'codex'; - console.log('[onUsage] Context calculation', { - sessionId: actualSessionId, - agentType: agentToolType, - raw: { - inputTokens: usageStats.inputTokens, - outputTokens: usageStats.outputTokens, - cacheReadInputTokens: usageStats.cacheReadInputTokens, - cacheCreationInputTokens: usageStats.cacheCreationInputTokens, - contextWindow: usageStats.contextWindow, - }, - calculated: { - currentContextTokens, - effectiveContextWindow, - contextPercentage, - formula: isCombinedContext - ? 'input + output (combined)' - : 'input + cacheRead + cacheCreation', - }, - }); + const contextPercentage = estimateContextUsage(usageStats, agentToolType); // Batch the usage stats update, context percentage, and cycle tokens // The batched updater handles the accumulation logic internally batchedUpdater.updateUsage(actualSessionId, tabId, usageStats); batchedUpdater.updateUsage(actualSessionId, null, usageStats); // Session-level accumulation - batchedUpdater.updateContextUsage(actualSessionId, contextPercentage); + // Only update context percentage if we got a valid value (not accumulated) + if (contextPercentage !== null) { + batchedUpdater.updateContextUsage(actualSessionId, contextPercentage); + } batchedUpdater.updateCycleTokens(actualSessionId, usageStats.outputTokens); // Update persistent global stats (not batched - this is a separate concern) diff --git a/src/renderer/components/MainPanel.tsx b/src/renderer/components/MainPanel.tsx index a908110b..c173b468 100644 --- a/src/renderer/components/MainPanel.tsx +++ b/src/renderer/components/MainPanel.tsx @@ -574,12 +574,14 @@ export const MainPanel = React.memo( return configured > 0 ? configured : reported; }, [configuredContextWindow, activeTab?.usageStats?.contextWindow]); - // Compute context tokens using agent-specific calculation - // SYNC: Uses calculateContextTokens() from shared/contextUsage.ts - // See that file for the canonical formula and all locations that must stay in sync. + // Compute context tokens using agent-specific calculation. + // Claude: input + cacheRead + cacheCreation (total input for the request) + // Codex: input + output (combined limit) + // When values are accumulated from multi-tool turns, total may exceed contextWindow. + // In that case, derive tokens from session.contextUsage (preserved last valid percentage). const activeTabContextTokens = useMemo(() => { if (!activeTab?.usageStats) return 0; - return calculateContextTokens( + const raw = calculateContextTokens( { inputTokens: activeTab.usageStats.inputTokens, outputTokens: activeTab.usageStats.outputTokens, @@ -588,43 +590,25 @@ export const MainPanel = React.memo( }, activeSession?.toolType ); - }, [activeTab?.usageStats, activeSession?.toolType]); - // Compute context usage percentage from context tokens and window size + // If raw exceeds window, values are accumulated from multi-tool turns. + // Fall back to deriving from the preserved contextUsage percentage. + const effectiveWindow = activeTabContextWindow || 200000; + if (raw > effectiveWindow && activeSession?.contextUsage != null) { + return Math.round((activeSession.contextUsage / 100) * effectiveWindow); + } + + return raw; + }, [activeTab?.usageStats, activeSession?.toolType, activeTabContextWindow, activeSession?.contextUsage]); + + // Compute context usage percentage from context tokens and window size. + // Since we already handle accumulated values in activeTabContextTokens, + // we just calculate the percentage directly. const activeTabContextUsage = useMemo(() => { if (!activeTabContextWindow || activeTabContextWindow === 0) return 0; if (activeTabContextTokens === 0) return 0; - const percentage = Math.min( - Math.round((activeTabContextTokens / activeTabContextWindow) * 100), - 100 - ); - - // DEBUG: Log MainPanel context display calculation - console.log('[MainPanel] Context display calculation', { - sessionId: activeSession?.id, - tabId: activeTab?.id, - usageStats: activeTab?.usageStats - ? { - inputTokens: activeTab.usageStats.inputTokens, - outputTokens: activeTab.usageStats.outputTokens, - cacheReadInputTokens: activeTab.usageStats.cacheReadInputTokens, - cacheCreationInputTokens: activeTab.usageStats.cacheCreationInputTokens, - contextWindow: activeTab.usageStats.contextWindow, - } - : null, - activeTabContextTokens, - activeTabContextWindow, - displayedPercentage: percentage, - }); - - return percentage; - }, [ - activeTabContextTokens, - activeTabContextWindow, - activeSession?.id, - activeTab?.id, - activeTab?.usageStats, - ]); + return Math.round((activeTabContextTokens / activeTabContextWindow) * 100); + }, [activeTabContextTokens, activeTabContextWindow]); // PERF: Track panel width for responsive widget hiding with threshold-based updates // Only update state when width crosses a meaningful threshold (20px) to prevent diff --git a/src/renderer/hooks/git/index.ts b/src/renderer/hooks/git/index.ts index 50823747..552e9d05 100644 --- a/src/renderer/hooks/git/index.ts +++ b/src/renderer/hooks/git/index.ts @@ -5,7 +5,7 @@ */ // Git status polling -export { useGitStatusPolling } from './useGitStatusPolling'; +export { useGitStatusPolling, getScaledPollInterval } from './useGitStatusPolling'; export type { UseGitStatusPollingReturn, UseGitStatusPollingOptions, diff --git a/src/renderer/hooks/git/useGitStatusPolling.ts b/src/renderer/hooks/git/useGitStatusPolling.ts index 63b1ad15..3aa9d616 100644 --- a/src/renderer/hooks/git/useGitStatusPolling.ts +++ b/src/renderer/hooks/git/useGitStatusPolling.ts @@ -1,4 +1,4 @@ -import { useState, useEffect, useRef, useCallback } from 'react'; +import { useState, useEffect, useRef, useCallback, useMemo } from 'react'; import type { Session } from '../../types'; import { gitService } from '../../services/git'; @@ -90,6 +90,31 @@ export interface UseGitStatusPollingOptions { const DEFAULT_POLL_INTERVAL = 30000; // 30 seconds const DEFAULT_INACTIVITY_TIMEOUT = 60000; // 60 seconds +/** + * PERF: Scale polling interval based on the number of git sessions. + * With many sessions, each poll spawns N parallel git processes which creates + * sustained CPU/IO load (especially on large repos where `git status` takes seconds). + * Only applies when using the default poll interval; custom intervals are respected. + */ +const POLL_INTERVAL_SCALE_THRESHOLDS: { maxSessions: number; interval: number }[] = [ + { maxSessions: 3, interval: 30000 }, // 1-3 sessions: 30s (unchanged) + { maxSessions: 7, interval: 45000 }, // 4-7 sessions: 45s + { maxSessions: 12, interval: 60000 }, // 8-12 sessions: 60s + { maxSessions: Infinity, interval: 90000 }, // 13+: 90s +]; + +export function getScaledPollInterval(basePollInterval: number, gitSessionCount: number): number { + // Only scale if using the default interval (user-configured intervals are respected) + if (basePollInterval !== DEFAULT_POLL_INTERVAL) return basePollInterval; + + for (const threshold of POLL_INTERVAL_SCALE_THRESHOLDS) { + if (gitSessionCount <= threshold.maxSessions) { + return threshold.interval; + } + } + return 90000; +} + /** * PERF: Compare two GitStatusData objects for meaningful changes. * Ignores lastUpdated since that always changes and would cause unnecessary re-renders. @@ -324,9 +349,16 @@ export function useGitStatusPolling( } }, [pauseWhenHidden]); + // PERF: Track git session count to dynamically scale the polling interval + const gitSessionCount = useMemo(() => sessions.filter((s) => s.isGitRepo).length, [sessions]); + const gitSessionCountRef = useRef(gitSessionCount); + gitSessionCountRef.current = gitSessionCount; + const startPolling = useCallback(() => { if (!intervalRef.current && (!pauseWhenHidden || !document.hidden)) { pollGitStatus(); + // Scale interval based on how many git sessions are active + const scaledInterval = getScaledPollInterval(pollInterval, gitSessionCountRef.current); intervalRef.current = setInterval(() => { const now = Date.now(); const timeSinceLastActivity = now - lastActivityRef.current; @@ -342,7 +374,7 @@ export function useGitStatusPolling( intervalRef.current = null; } } - }, pollInterval); + }, scaledInterval); } }, [pollInterval, inactivityTimeout, pollGitStatus]); @@ -431,6 +463,28 @@ export function useGitStatusPolling( }; }, [pauseWhenHidden, startPolling, stopPolling]); + // PERF: Restart polling when git session count crosses a scaling threshold + // so the interval adapts to the current load level + const prevScaledIntervalRef = useRef(getScaledPollInterval(pollInterval, gitSessionCount)); + useEffect(() => { + // Ensure ref reflects current count before startPolling reads it. + // (The render-phase assignment at line 330 already does this, but being + // explicit here makes the data-flow self-documenting.) + gitSessionCountRef.current = gitSessionCount; + + const newScaledInterval = getScaledPollInterval(pollInterval, gitSessionCount); + if (newScaledInterval !== prevScaledIntervalRef.current) { + prevScaledIntervalRef.current = newScaledInterval; + // Restart with new interval if currently polling + if (intervalRef.current) { + stopPolling(); + if (isActiveRef.current && (!pauseWhenHidden || !document.hidden)) { + startPolling(); + } + } + } + }, [gitSessionCount, pollInterval, stopPolling, startPolling, pauseWhenHidden]); + // Refresh immediately when active session changes to get detailed data useEffect(() => { if (activeSessionId) { diff --git a/src/renderer/hooks/input/useAtMentionCompletion.ts b/src/renderer/hooks/input/useAtMentionCompletion.ts index 0ccb2350..a1c5a2f7 100644 --- a/src/renderer/hooks/input/useAtMentionCompletion.ts +++ b/src/renderer/hooks/input/useAtMentionCompletion.ts @@ -17,6 +17,30 @@ export interface UseAtMentionCompletionReturn { getSuggestions: (filter: string) => AtMentionSuggestion[]; } +/** + * PERF: Maximum number of file tree entries to flatten. + * For repos with 100k+ files, unbounded traversal creates a massive array + * that blocks the main thread. 50k entries is more than enough for + * meaningful @mention suggestions while keeping traversal fast. + * Breadth-first-like order naturally prioritizes shallower (more relevant) files. + */ +const MAX_FILE_TREE_ENTRIES = 50_000; + +/** + * PERF: Maximum number of results to return from fuzzy search. + */ +const MAX_SUGGESTION_RESULTS = 15; + +/** + * PERF: Once this many exact substring matches are found (and we have MAX_SUGGESTION_RESULTS), + * stop searching. Exact matches score highest in fuzzyMatchWithScore (they receive a +50 + * bonus in search.ts), so once we have 50 exact substring matches the top-15 results are + * virtually guaranteed to be optimal — any remaining files would only contribute weaker + * fuzzy-only matches that cannot outscore them. 50 provides a comfortable margin over + * MAX_SUGGESTION_RESULTS (15) to account for score ties and type-based sorting. + */ +const EARLY_EXIT_EXACT_MATCH_THRESHOLD = 50; + /** * Hook for providing @ mention file completion in AI mode. * Uses fuzzy matching to find files in the project tree and Auto Run folder. @@ -93,6 +117,7 @@ export function useAtMentionCompletion(session: Session | null): UseAtMentionCom }, [autoRunFolderPath, sessionCwd]); // Build a flat list of all files/folders from the file tree + // PERF: Capped at MAX_FILE_TREE_ENTRIES to avoid blocking the main thread on huge repos const projectFiles = useMemo(() => { if (!session?.fileTree) return []; @@ -100,6 +125,8 @@ export function useAtMentionCompletion(session: Session | null): UseAtMentionCom const traverse = (nodes: FileNode[], currentPath = '') => { for (const node of nodes) { + if (files.length >= MAX_FILE_TREE_ENTRIES) return; + const fullPath = currentPath ? `${currentPath}/${node.name}` : node.name; files.push({ name: node.name, @@ -140,7 +167,32 @@ export function useAtMentionCompletion(session: Session | null): UseAtMentionCom // Early return if no files available (allFiles is empty when session is null) if (allFiles.length === 0) return []; + // PERF: When no filter (user just typed @), skip all fuzzy matching + // and return the first N files directly. Avoids 200k+ no-op fuzzyMatchWithScore calls. + if (!filter) { + const results: AtMentionSuggestion[] = []; + for (let i = 0; i < Math.min(allFiles.length, MAX_SUGGESTION_RESULTS); i++) { + const file = allFiles[i]; + results.push({ + value: file.path, + type: file.type, + displayText: file.name, + fullPath: file.path, + score: 0, + source: file.source, + }); + } + // Sort the small result set (sorting 15 items is essentially free) + results.sort((a, b) => { + if (a.type !== b.type) return a.type === 'file' ? -1 : 1; + return a.displayText.localeCompare(b.displayText); + }); + return results; + } + const suggestions: AtMentionSuggestion[] = []; + const filterLower = filter.toLowerCase(); + let exactSubstringMatchCount = 0; for (const file of allFiles) { // Match against both file name and full path @@ -150,7 +202,7 @@ export function useAtMentionCompletion(session: Session | null): UseAtMentionCom // Use the better of the two scores const bestMatch = nameMatch.score > pathMatch.score ? nameMatch : pathMatch; - if (bestMatch.matches || !filter) { + if (bestMatch.matches) { suggestions.push({ value: file.path, type: file.type, @@ -159,6 +211,24 @@ export function useAtMentionCompletion(session: Session | null): UseAtMentionCom score: bestMatch.score, source: file.source, }); + + // Track exact substring matches for early exit + if ( + file.name.toLowerCase().includes(filterLower) || + file.path.toLowerCase().includes(filterLower) + ) { + exactSubstringMatchCount++; + } + + // PERF: Early exit - once we have enough high-quality exact substring + // matches and enough total results, further searching through remaining + // files would only yield lower-scoring fuzzy matches. + if ( + exactSubstringMatchCount >= EARLY_EXIT_EXACT_MATCH_THRESHOLD && + suggestions.length >= MAX_SUGGESTION_RESULTS + ) { + break; + } } } @@ -175,7 +245,7 @@ export function useAtMentionCompletion(session: Session | null): UseAtMentionCom }); // Limit to reasonable number - return suggestions.slice(0, 15); + return suggestions.slice(0, MAX_SUGGESTION_RESULTS); }, [allFiles] ); diff --git a/src/renderer/hooks/input/useTabCompletion.ts b/src/renderer/hooks/input/useTabCompletion.ts index 860c1513..03c101d8 100644 --- a/src/renderer/hooks/input/useTabCompletion.ts +++ b/src/renderer/hooks/input/useTabCompletion.ts @@ -10,6 +10,13 @@ export interface TabCompletionSuggestion { export type TabCompletionFilter = 'all' | 'history' | 'branch' | 'tag' | 'file'; +/** + * PERF: Maximum number of file tree entries to flatten. + * Mirrors the cap in useAtMentionCompletion to avoid blocking the main thread + * on repos with 100k+ files. + */ +const MAX_FILE_TREE_ENTRIES = 50_000; + export interface UseTabCompletionReturn { getSuggestions: (input: string, filter?: TabCompletionFilter) => TabCompletionSuggestion[]; } @@ -55,8 +62,11 @@ export function useTabCompletion(session: Session | null): UseTabCompletionRetur const names: { name: string; type: 'file' | 'folder'; path: string }[] = []; + // PERF: Capped at MAX_FILE_TREE_ENTRIES to avoid blocking the main thread on huge repos const traverse = (nodes: FileNode[], currentPath = '') => { for (const node of nodes) { + if (names.length >= MAX_FILE_TREE_ENTRIES) return; + const fullPath = currentPath ? `${currentPath}/${node.name}` : node.name; names.push({ name: node.name, diff --git a/src/renderer/hooks/session/useBatchedSessionUpdates.ts b/src/renderer/hooks/session/useBatchedSessionUpdates.ts index 8158d11f..da59f1e6 100644 --- a/src/renderer/hooks/session/useBatchedSessionUpdates.ts +++ b/src/renderer/hooks/session/useBatchedSessionUpdates.ts @@ -163,10 +163,18 @@ export function useBatchedSessionUpdates( hasPendingRef.current = false; setSessions((prev) => { - return prev.map((session) => { + // PERF: Track whether any session was actually modified. + // If no session in prev matched an accumulator entry, return prev + // unchanged to preserve referential identity and skip a React re-render. + // This avoids ~7 unnecessary re-renders/sec when agents stream data for + // sessions that were removed between accumulation and flush. + let anyChanged = false; + + const next = prev.map((session) => { const acc = updates.get(session.id); if (!acc) return session; + anyChanged = true; let updatedSession = { ...session }; // Apply log accumulations @@ -438,6 +446,8 @@ export function useBatchedSessionUpdates( return updatedSession; }); + + return anyChanged ? next : prev; }); }, [setSessions]); diff --git a/src/renderer/utils/contextUsage.ts b/src/renderer/utils/contextUsage.ts index 9d0a2d97..3182de20 100644 --- a/src/renderer/utils/contextUsage.ts +++ b/src/renderer/utils/contextUsage.ts @@ -1,17 +1,126 @@ /** * Context Usage Estimation Utilities * - * SYNC: Re-exports from shared/contextUsage.ts for backward compatibility. - * All context usage logic is centralized there. See that file for: - * - The canonical calculation formula - * - All locations that must stay in sync - * - Provider-specific semantics (Claude vs OpenAI) + * Provides fallback estimation for context window usage when agents + * don't report their context window size directly. */ -export { - DEFAULT_CONTEXT_WINDOWS, - COMBINED_CONTEXT_AGENTS, - calculateContextTokens, - estimateContextUsage, - type ContextUsageStats, -} from '../../shared/contextUsage'; +import type { ToolType } from '../types'; +import type { UsageStats } from '../../shared/types'; + +/** + * Default context window sizes for different agents. + * Used as fallback when the agent doesn't report its context window size. + */ +export const DEFAULT_CONTEXT_WINDOWS: Record = { + 'claude-code': 200000, // Claude 3.5 Sonnet/Claude 4 default context + codex: 200000, // OpenAI o3/o4-mini context window + opencode: 128000, // OpenCode (depends on model, 128k is conservative default) + 'factory-droid': 200000, // Factory Droid (varies by model, defaults to Claude Opus) + terminal: 0, // Terminal has no context window +}; + +/** + * Agents that use combined input+output context windows. + * OpenAI models (Codex, o3, o4-mini) have a single context window that includes + * both input and output tokens, unlike Claude which has separate limits. + */ +const COMBINED_CONTEXT_AGENTS: Set = new Set(['codex']); + +/** + * Calculate total context tokens based on agent-specific semantics. + * + * For a single Anthropic API call, the total input context is the sum of: + * inputTokens + cacheReadInputTokens + cacheCreationInputTokens + * These three fields partition the input into uncached, cache-hit, and newly-cached segments. + * + * CAVEAT: When Claude Code performs multi-tool turns (many internal API calls), + * the reported values may be accumulated across all internal calls within the turn. + * In that case the total can exceed the context window. Callers should check for + * this and skip the update (see estimateContextUsage). + * + * Claude models: Context = input + cacheRead + cacheCreation + * OpenAI models: Context = input + output (combined limit) + * + * @param stats - The usage statistics containing token counts + * @param agentId - The agent identifier for agent-specific calculation + * @returns Total context tokens used + */ +export function calculateContextTokens( + stats: { + inputTokens?: number; + outputTokens?: number; + cacheReadInputTokens?: number; + cacheCreationInputTokens?: number; + }, + agentId?: ToolType | string +): number { + // OpenAI models have combined input+output context limits + if (agentId && COMBINED_CONTEXT_AGENTS.has(agentId as ToolType)) { + return (stats.inputTokens || 0) + (stats.cacheCreationInputTokens || 0) + (stats.outputTokens || 0); + } + + // Claude models: total input = uncached + cache-hit + newly-cached + // Output tokens don't consume the input context window + return ( + (stats.inputTokens || 0) + (stats.cacheReadInputTokens || 0) + (stats.cacheCreationInputTokens || 0) + ); +} + +/** + * Estimate context usage percentage when the agent doesn't provide it directly. + * Uses agent-specific default context window sizes for accurate estimation. + * + * Context calculation varies by agent: + * - Claude models: inputTokens + cacheReadInputTokens + cacheCreationInputTokens + * - OpenAI models (Codex): inputTokens + outputTokens (combined limit) + * + * Returns null when the calculated total exceeds the context window, which indicates + * accumulated values from multi-tool turns (many internal API calls within one turn). + * A single API call's total input can never exceed the context window, so values + * above it are definitely accumulated. Callers should preserve the previous valid + * percentage when this returns null. + * + * @param stats - The usage statistics containing token counts + * @param agentId - The agent identifier for agent-specific context window size + * @returns Estimated context usage percentage (0-100), or null if cannot be estimated + */ +export function estimateContextUsage( + stats: { + inputTokens?: number; + outputTokens?: number; + cacheReadInputTokens?: number; + cacheCreationInputTokens?: number; + contextWindow?: number; + }, + agentId?: ToolType | string +): number | null { + // Calculate total context using agent-specific semantics + const totalContextTokens = calculateContextTokens(stats, agentId); + + // Determine effective context window + const effectiveContextWindow = + stats.contextWindow && stats.contextWindow > 0 + ? stats.contextWindow + : agentId && agentId !== 'terminal' + ? DEFAULT_CONTEXT_WINDOWS[agentId as ToolType] || 0 + : 0; + + if (!effectiveContextWindow || effectiveContextWindow <= 0) { + return null; + } + + // If total exceeds context window, the values are accumulated across multiple + // internal API calls within a complex turn (tool use chains). A single API call's + // total input cannot exceed the context window. Return null to signal callers + // should keep the previous valid percentage. + if (totalContextTokens > effectiveContextWindow) { + return null; + } + + if (totalContextTokens <= 0) { + return 0; + } + + return Math.round((totalContextTokens / effectiveContextWindow) * 100); +} diff --git a/src/shared/contextUsage.ts b/src/shared/contextUsage.ts deleted file mode 100644 index 1e93a78b..00000000 --- a/src/shared/contextUsage.ts +++ /dev/null @@ -1,176 +0,0 @@ -/** - * Context Usage Estimation Utilities - * - * ╔══════════════════════════════════════════════════════════════════════════════╗ - * ║ CONTEXT CALCULATION SYNCHRONIZATION ║ - * ╠══════════════════════════════════════════════════════════════════════════════╣ - * ║ This is the SINGLE SOURCE OF TRUTH for context window calculations. ║ - * ║ ║ - * ║ ALL context calculations in the codebase MUST use these functions: ║ - * ║ - calculateContextTokens() - Calculate total context tokens ║ - * ║ - estimateContextUsage() - Estimate context usage percentage ║ - * ║ ║ - * ║ LOCATIONS THAT USE THESE (keep in sync when modifying): ║ - * ║ 1. src/renderer/App.tsx (line ~2768) - UI context % display ║ - * ║ 2. src/renderer/utils/contextUsage.ts - Re-exports for renderer ║ - * ║ 3. src/renderer/utils/contextExtractor.ts - Token estimation ║ - * ║ 4. src/renderer/components/MainPanel.tsx - Tab context display ║ - * ║ 5. src/renderer/components/TabSwitcherModal.tsx - Tab switcher ║ - * ║ 6. src/renderer/components/HistoryDetailModal.tsx - History view ║ - * ║ 7. src/renderer/services/contextSummarizer.ts - Compaction eligibility ║ - * ║ 8. src/main/parsers/usage-aggregator.ts - Re-exports for main process ║ - * ║ 9. src/main/process-listeners/usage-listener.ts - Usage event handling ║ - * ║ 10. src/web/mobile/App.tsx - Mobile UI ║ - * ║ 11. src/web/mobile/SessionStatusBanner.tsx - Mobile status ║ - * ║ ║ - * ║ PROVIDER-SPECIFIC FORMULAS: ║ - * ║ ║ - * ║ Claude-style (separate input/output limits): ║ - * ║ total = inputTokens + cacheReadInputTokens + cacheCreationInputTokens ║ - * ║ Agents: claude-code, factory-droid, opencode ║ - * ║ (OpenCode and Factory Droid can use various models, but they report ║ - * ║ cache tokens in Claude-style format regardless of backend) ║ - * ║ ║ - * ║ OpenAI-style (combined input+output limit): ║ - * ║ total = inputTokens + outputTokens ║ - * ║ Agents: codex ║ - * ║ (COMBINED_CONTEXT_AGENTS set determines which agents use this) ║ - * ║ ║ - * ║ KNOWN ISSUES (as of 2026-01-31): ║ - * ║ - Claude Code usage can be cumulative; normalized to per-turn in StdoutHandler ║ - * ║ - Values fluctuate based on which model (Haiku vs Sonnet) handles turn ║ - * ║ - This causes UI to show inconsistent context % across turns ║ - * ║ - Compaction check may fail when UI shows high but stored value is low ║ - * ╚══════════════════════════════════════════════════════════════════════════════╝ - * - * @see https://platform.claude.com/docs/en/build-with-claude/prompt-caching - * @see https://code.claude.com/docs/en/statusline#context-window-usage - */ - -import type { ToolType } from './types'; - -/** - * Default context window sizes for different agents. - * Used as fallback when the agent doesn't report its context window size. - * - * SYNC: When adding a new agent, also update: - * - COMBINED_CONTEXT_AGENTS if it uses combined input+output limits - * - calculateContextTokens() if it has a unique formula - */ -export const DEFAULT_CONTEXT_WINDOWS: Record = { - 'claude-code': 200000, // Claude 3.5 Sonnet/Claude 4 default context - codex: 200000, // OpenAI o3/o4-mini context window - opencode: 128000, // OpenCode (depends on model, 128k is conservative default) - 'factory-droid': 200000, // Factory Droid (varies by model, defaults to Claude Opus) - terminal: 0, // Terminal has no context window -}; - -/** - * Agents that use combined input+output context windows. - * OpenAI models (Codex, o3, o4-mini) have a single context window that includes - * both input and output tokens, unlike Claude which has separate limits. - * - * SYNC: When adding a new agent with combined context limits, add it here - * and update calculateContextTokens() to handle it. - */ -export const COMBINED_CONTEXT_AGENTS: Set = new Set(['codex']); - -/** - * Minimal usage stats interface for context calculation. - * All fields are optional to support different sources (web, renderer, main). - */ -export interface ContextUsageStats { - inputTokens?: number; - outputTokens?: number; - cacheReadInputTokens?: number; - cacheCreationInputTokens?: number; - contextWindow?: number; -} - -/** - * Calculate total context tokens based on agent-specific semantics. - * - * ╔══════════════════════════════════════════════════════════════════════════════╗ - * ║ THIS IS THE CANONICAL CONTEXT CALCULATION FUNCTION ║ - * ║ All UI displays, compaction checks, and usage tracking MUST use this. ║ - * ╚══════════════════════════════════════════════════════════════════════════════╝ - * - * Per Anthropic documentation, the context calculation formula is: - * total_context = input_tokens + cache_read_input_tokens + cache_creation_input_tokens - * - * Where: - * - input_tokens: New uncached tokens AFTER the last cache breakpoint - * - cache_read_input_tokens: Tokens retrieved from cache (entire cached prefix) - * - cache_creation_input_tokens: Tokens being written to cache for the first time - * - * For OpenAI models (Codex), context = input + output (combined limit) - * - * @param stats - The usage statistics containing token counts - * @param agentId - The agent identifier for agent-specific calculation - * @returns Total context tokens used for this turn - * - * @see https://platform.claude.com/docs/en/build-with-claude/prompt-caching - */ -export function calculateContextTokens( - stats: ContextUsageStats, - agentId?: ToolType | string -): number { - // Per Anthropic docs: total_context = input + cacheRead + cacheCreation - // All three components occupy context window space. - const baseTokens = - (stats.inputTokens || 0) + - (stats.cacheReadInputTokens || 0) + - (stats.cacheCreationInputTokens || 0); - - // OpenAI models have combined input+output context limits - if (agentId && COMBINED_CONTEXT_AGENTS.has(agentId)) { - return baseTokens + (stats.outputTokens || 0); - } - - // Claude models: output tokens don't consume context window - return baseTokens; -} - -/** - * Estimate context usage percentage when the agent doesn't provide it directly. - * Uses agent-specific default context window sizes for accurate estimation. - * - * Context calculation varies by agent: - * - Claude models: inputTokens + cacheReadInputTokens + cacheCreationInputTokens - * (per Anthropic docs, all three occupy context window space) - * - OpenAI models (Codex): inputTokens + outputTokens - * (combined context window includes both input and output) - * - * @param stats - The usage statistics containing token counts - * @param agentId - The agent identifier for agent-specific context window size - * @returns Estimated context usage percentage (0-100), or null if cannot be estimated - */ -export function estimateContextUsage( - stats: ContextUsageStats, - agentId?: ToolType | string -): number | null { - // Calculate total context using agent-specific semantics - const totalContextTokens = calculateContextTokens(stats, agentId); - - // If context window is provided and valid, use it - if (stats.contextWindow && stats.contextWindow > 0) { - return Math.min(100, Math.round((totalContextTokens / stats.contextWindow) * 100)); - } - - // If no agent specified or terminal, cannot estimate - if (!agentId || agentId === 'terminal') { - return null; - } - - // Use agent-specific default context window - const defaultContextWindow = DEFAULT_CONTEXT_WINDOWS[agentId as ToolType]; - if (!defaultContextWindow || defaultContextWindow <= 0) { - return null; - } - - if (totalContextTokens <= 0) { - return 0; - } - - return Math.min(100, Math.round((totalContextTokens / defaultContextWindow) * 100)); -} diff --git a/src/shared/stats-types.ts b/src/shared/stats-types.ts index 86f0b716..82aa0ebe 100644 --- a/src/shared/stats-types.ts +++ b/src/shared/stats-types.ts @@ -1,7 +1,7 @@ /** * Type definitions for the stats tracking system * - * These types are shared between main process (stats-db.ts) and renderer (dashboard). + * These types are shared between main process (stats/) and renderer (dashboard). */ /** diff --git a/src/web/mobile/App.tsx b/src/web/mobile/App.tsx index 492d589d..46c01f43 100644 --- a/src/web/mobile/App.tsx +++ b/src/web/mobile/App.tsx @@ -23,7 +23,7 @@ import { buildApiUrl } from '../utils/config'; import { formatCost } from '../../shared/formatters'; // SYNC: Uses estimateContextUsage() from shared/contextUsage.ts // See that file for the canonical formula and all locations that must stay in sync. -import { estimateContextUsage } from '../../shared/contextUsage'; +import { estimateContextUsage } from '../../renderer/utils/contextUsage'; import { triggerHaptic, HAPTIC_PATTERNS } from './constants'; import { webLogger } from '../utils/logger'; import { SessionPillBar } from './SessionPillBar'; diff --git a/src/web/mobile/SessionStatusBanner.tsx b/src/web/mobile/SessionStatusBanner.tsx index 3025e75b..d4ad23c2 100644 --- a/src/web/mobile/SessionStatusBanner.tsx +++ b/src/web/mobile/SessionStatusBanner.tsx @@ -33,7 +33,7 @@ import { import { stripAnsiCodes } from '../../shared/stringUtils'; // SYNC: Uses estimateContextUsage() from shared/contextUsage.ts // See that file for the canonical formula and all locations that must stay in sync. -import { estimateContextUsage } from '../../shared/contextUsage'; +import { estimateContextUsage } from '../../renderer/utils/contextUsage'; /** * Props for SessionStatusBanner component