Merge main into 0.15.0-rc: fix context window calculation

Key changes:
- Accept main's fix for context usage calculation (returns null for
  accumulated multi-tool turn values instead of capping at 100%)
- Adopt main's refactored structure:
  - agent-detector.ts → agents/detector.ts + definitions.ts + capabilities.ts
  - stats-db.ts → stats/*.ts modules
  - agent-session-storage types → agents/index.ts
- Port factory-droid agent to new agents/definitions.ts structure
- Remove obsolete shared/contextUsage.ts (logic now in renderer/utils)
- Update all import paths to reference new module locations
- Preserve all RC features: Symphony, File Preview Tabs, TabNaming, etc.

The context window fix is critical: main's approach correctly handles
when Claude Code reports accumulated token values from multi-tool turns
by returning null, causing the UI to preserve the last valid percentage.
RC's approach masked this by capping at 100%, hiding the issue.
This commit is contained in:
Pedram Amini
2026-02-02 18:03:05 -06:00
103 changed files with 15348 additions and 10508 deletions

View File

@@ -26,7 +26,7 @@ import { describe, it, expect, beforeAll } from 'vitest';
import { spawn } from 'child_process';
import { promisify } from 'util';
import { exec } from 'child_process';
import { getAgentCapabilities } from '../../main/agent-capabilities';
import { getAgentCapabilities } from '../../main/agents';
const execAsync = promisify(exec);

View File

@@ -36,7 +36,7 @@ import {
} from '../../main/group-chat/group-chat-moderator';
import { addParticipant } from '../../main/group-chat/group-chat-agent';
import { routeUserMessage } from '../../main/group-chat/group-chat-router';
import { AgentDetector } from '../../main/agent-detector';
import { AgentDetector } from '../../main/agents';
import {
selectTestAgents,
waitForAgentResponse,

View File

@@ -29,7 +29,7 @@ import { exec } from 'child_process';
import * as fs from 'fs';
import * as path from 'path';
import * as os from 'os';
import { getAgentCapabilities } from '../../main/agent-capabilities';
import { getAgentCapabilities } from '../../main/agents';
import { buildSshCommand, buildRemoteCommand } from '../../main/utils/ssh-command-builder';
import type { SshRemoteConfig } from '../../shared/types';

View File

@@ -5,7 +5,7 @@ import {
AGENT_CAPABILITIES,
getAgentCapabilities,
hasCapability,
} from '../../main/agent-capabilities';
} from '../../../main/agents';
describe('agent-capabilities', () => {
describe('AgentCapabilities interface', () => {

View File

@@ -0,0 +1,253 @@
/**
* Tests for agent-definitions.ts
*
* Tests the agent definition data structures and helper functions.
*/
import { describe, it, expect } from 'vitest';
import {
AGENT_DEFINITIONS,
getAgentDefinition,
getAgentIds,
getVisibleAgentDefinitions,
type AgentDefinition,
type AgentConfigOption,
} from '../../../main/agents';
describe('agent-definitions', () => {
describe('AGENT_DEFINITIONS', () => {
it('should contain all expected agents', () => {
const agentIds = AGENT_DEFINITIONS.map((def) => def.id);
expect(agentIds).toContain('terminal');
expect(agentIds).toContain('claude-code');
expect(agentIds).toContain('codex');
expect(agentIds).toContain('opencode');
expect(agentIds).toContain('gemini-cli');
expect(agentIds).toContain('qwen3-coder');
expect(agentIds).toContain('aider');
});
it('should have required properties on all definitions', () => {
for (const def of AGENT_DEFINITIONS) {
expect(def.id).toBeDefined();
expect(def.name).toBeDefined();
expect(def.binaryName).toBeDefined();
expect(def.command).toBeDefined();
expect(def.args).toBeDefined();
expect(Array.isArray(def.args)).toBe(true);
}
});
it('should have terminal as a hidden agent', () => {
const terminal = AGENT_DEFINITIONS.find((def) => def.id === 'terminal');
expect(terminal?.hidden).toBe(true);
});
it('should have claude-code with correct base args', () => {
const claudeCode = AGENT_DEFINITIONS.find((def) => def.id === 'claude-code');
expect(claudeCode).toBeDefined();
expect(claudeCode?.args).toContain('--print');
expect(claudeCode?.args).toContain('--verbose');
expect(claudeCode?.args).toContain('--output-format');
expect(claudeCode?.args).toContain('stream-json');
expect(claudeCode?.args).toContain('--dangerously-skip-permissions');
});
it('should have codex with batch mode configuration', () => {
const codex = AGENT_DEFINITIONS.find((def) => def.id === 'codex');
expect(codex).toBeDefined();
expect(codex?.batchModePrefix).toEqual(['exec']);
expect(codex?.batchModeArgs).toContain('--dangerously-bypass-approvals-and-sandbox');
expect(codex?.jsonOutputArgs).toEqual(['--json']);
});
it('should have opencode with batch mode configuration', () => {
const opencode = AGENT_DEFINITIONS.find((def) => def.id === 'opencode');
expect(opencode).toBeDefined();
expect(opencode?.batchModePrefix).toEqual(['run']);
expect(opencode?.jsonOutputArgs).toEqual(['--format', 'json']);
expect(opencode?.noPromptSeparator).toBe(true);
});
it('should have opencode with default env vars for YOLO mode', () => {
const opencode = AGENT_DEFINITIONS.find((def) => def.id === 'opencode');
expect(opencode?.defaultEnvVars).toBeDefined();
expect(opencode?.defaultEnvVars?.OPENCODE_CONFIG_CONTENT).toContain('external_directory');
});
});
describe('getAgentDefinition', () => {
it('should return definition for valid agent ID', () => {
const claudeCode = getAgentDefinition('claude-code');
expect(claudeCode).toBeDefined();
expect(claudeCode?.id).toBe('claude-code');
expect(claudeCode?.name).toBe('Claude Code');
});
it('should return undefined for invalid agent ID', () => {
const invalid = getAgentDefinition('non-existent-agent');
expect(invalid).toBeUndefined();
});
it('should return definition for all known agents', () => {
const knownAgents = ['terminal', 'claude-code', 'codex', 'opencode', 'gemini-cli', 'aider'];
for (const agentId of knownAgents) {
const def = getAgentDefinition(agentId);
expect(def).toBeDefined();
expect(def?.id).toBe(agentId);
}
});
});
describe('getAgentIds', () => {
it('should return array of all agent IDs', () => {
const ids = getAgentIds();
expect(Array.isArray(ids)).toBe(true);
expect(ids.length).toBeGreaterThan(0);
expect(ids).toContain('claude-code');
expect(ids).toContain('terminal');
});
it('should match AGENT_DEFINITIONS length', () => {
const ids = getAgentIds();
expect(ids.length).toBe(AGENT_DEFINITIONS.length);
});
});
describe('getVisibleAgentDefinitions', () => {
it('should not include hidden agents', () => {
const visible = getVisibleAgentDefinitions();
const visibleIds = visible.map((def) => def.id);
// Terminal should be hidden
expect(visibleIds).not.toContain('terminal');
});
it('should include visible agents', () => {
const visible = getVisibleAgentDefinitions();
const visibleIds = visible.map((def) => def.id);
expect(visibleIds).toContain('claude-code');
expect(visibleIds).toContain('codex');
expect(visibleIds).toContain('opencode');
});
it('should return fewer items than AGENT_DEFINITIONS', () => {
const visible = getVisibleAgentDefinitions();
expect(visible.length).toBeLessThan(AGENT_DEFINITIONS.length);
});
});
describe('Agent argument builders', () => {
it('should have resumeArgs function for claude-code', () => {
const claudeCode = getAgentDefinition('claude-code');
expect(claudeCode?.resumeArgs).toBeDefined();
expect(typeof claudeCode?.resumeArgs).toBe('function');
const args = claudeCode?.resumeArgs?.('test-session-123');
expect(args).toEqual(['--resume', 'test-session-123']);
});
it('should have resumeArgs function for codex', () => {
const codex = getAgentDefinition('codex');
expect(codex?.resumeArgs).toBeDefined();
const args = codex?.resumeArgs?.('thread-456');
expect(args).toEqual(['resume', 'thread-456']);
});
it('should have resumeArgs function for opencode', () => {
const opencode = getAgentDefinition('opencode');
expect(opencode?.resumeArgs).toBeDefined();
const args = opencode?.resumeArgs?.('session-789');
expect(args).toEqual(['--session', 'session-789']);
});
it('should have modelArgs function for opencode', () => {
const opencode = getAgentDefinition('opencode');
expect(opencode?.modelArgs).toBeDefined();
const args = opencode?.modelArgs?.('ollama/qwen3:8b');
expect(args).toEqual(['--model', 'ollama/qwen3:8b']);
});
it('should have workingDirArgs function for codex', () => {
const codex = getAgentDefinition('codex');
expect(codex?.workingDirArgs).toBeDefined();
const args = codex?.workingDirArgs?.('/path/to/project');
expect(args).toEqual(['-C', '/path/to/project']);
});
it('should have imageArgs function for codex', () => {
const codex = getAgentDefinition('codex');
expect(codex?.imageArgs).toBeDefined();
const args = codex?.imageArgs?.('/path/to/image.png');
expect(args).toEqual(['-i', '/path/to/image.png']);
});
it('should have imageArgs function for opencode', () => {
const opencode = getAgentDefinition('opencode');
expect(opencode?.imageArgs).toBeDefined();
const args = opencode?.imageArgs?.('/path/to/image.png');
expect(args).toEqual(['-f', '/path/to/image.png']);
});
});
describe('Agent config options', () => {
it('should have configOptions for codex', () => {
const codex = getAgentDefinition('codex');
expect(codex?.configOptions).toBeDefined();
expect(Array.isArray(codex?.configOptions)).toBe(true);
const contextWindowOption = codex?.configOptions?.find((opt) => opt.key === 'contextWindow');
expect(contextWindowOption).toBeDefined();
expect(contextWindowOption?.type).toBe('number');
expect(contextWindowOption?.default).toBe(400000);
});
it('should have configOptions for opencode', () => {
const opencode = getAgentDefinition('opencode');
expect(opencode?.configOptions).toBeDefined();
const modelOption = opencode?.configOptions?.find((opt) => opt.key === 'model');
expect(modelOption).toBeDefined();
expect(modelOption?.type).toBe('text');
expect(modelOption?.default).toBe('');
// Test argBuilder
expect(modelOption?.argBuilder).toBeDefined();
expect(modelOption?.argBuilder?.('ollama/qwen3:8b')).toEqual(['--model', 'ollama/qwen3:8b']);
expect(modelOption?.argBuilder?.('')).toEqual([]);
expect(modelOption?.argBuilder?.(' ')).toEqual([]);
});
});
describe('Type definitions', () => {
it('should export AgentDefinition type', () => {
const def: AgentDefinition = {
id: 'test',
name: 'Test Agent',
binaryName: 'test',
command: 'test',
args: [],
};
expect(def.id).toBe('test');
});
it('should export AgentConfigOption type', () => {
const option: AgentConfigOption = {
key: 'testKey',
type: 'text',
label: 'Test Label',
description: 'Test description',
default: 'default value',
};
expect(option.key).toBe('testKey');
});
});
});

View File

@@ -4,14 +4,14 @@ import {
AgentConfig,
AgentConfigOption,
AgentCapabilities,
} from '../../main/agent-detector';
} from '../../../main/agents';
// Mock dependencies
vi.mock('../../main/utils/execFile', () => ({
vi.mock('../../../main/utils/execFile', () => ({
execFileNoThrow: vi.fn(),
}));
vi.mock('../../main/utils/logger', () => ({
vi.mock('../../../main/utils/logger', () => ({
logger: {
info: vi.fn(),
warn: vi.fn(),
@@ -21,8 +21,8 @@ vi.mock('../../main/utils/logger', () => ({
}));
// Get mocked modules
import { execFileNoThrow } from '../../main/utils/execFile';
import { logger } from '../../main/utils/logger';
import { execFileNoThrow } from '../../../main/utils/execFile';
import { logger } from '../../../main/utils/logger';
import * as fs from 'fs';
import * as os from 'os';
import * as path from 'path';
@@ -501,7 +501,7 @@ describe('agent-detector', () => {
expect(logger.warn).toHaveBeenCalledWith(
expect.stringContaining('not executable'),
'AgentDetector'
'PathProber'
);
} finally {
Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true });
@@ -1230,6 +1230,81 @@ describe('agent-detector', () => {
});
});
describe('model cache TTL', () => {
it('should invalidate model cache after TTL expires', async () => {
vi.useFakeTimers();
// Setup: opencode is available
mockExecFileNoThrow.mockImplementation(async (cmd, args) => {
const binaryName = args[0];
if (binaryName === 'opencode') {
return { stdout: '/usr/bin/opencode\n', stderr: '', exitCode: 0 };
}
if (cmd === '/usr/bin/opencode' && args[0] === 'models') {
return {
stdout: 'initial-model\n',
stderr: '',
exitCode: 0,
};
}
return { stdout: '', stderr: 'not found', exitCode: 1 };
});
// Create detector with short TTL for testing (100ms)
const shortTtlDetector = new AgentDetector(100);
await shortTtlDetector.detectAgents();
// First call - should fetch
const models1 = await shortTtlDetector.discoverModels('opencode');
expect(models1).toEqual(['initial-model']);
// Clear mocks to track new calls
mockExecFileNoThrow.mockClear();
// Second call immediately - should use cache
const models2 = await shortTtlDetector.discoverModels('opencode');
expect(models2).toEqual(['initial-model']);
expect(mockExecFileNoThrow).not.toHaveBeenCalledWith(
'/usr/bin/opencode',
['models'],
undefined,
expect.any(Object)
);
// Advance time past TTL
vi.advanceTimersByTime(150);
// Setup new response for after cache expires
mockExecFileNoThrow.mockImplementation(async (cmd, args) => {
if (cmd === '/usr/bin/opencode' && args[0] === 'models') {
return {
stdout: 'new-model-after-ttl\n',
stderr: '',
exitCode: 0,
};
}
return { stdout: '', stderr: '', exitCode: 1 };
});
// Third call after TTL - should re-fetch
const models3 = await shortTtlDetector.discoverModels('opencode');
expect(models3).toEqual(['new-model-after-ttl']);
expect(mockExecFileNoThrow).toHaveBeenCalledWith(
'/usr/bin/opencode',
['models'],
undefined,
expect.any(Object)
);
vi.useRealTimers();
});
it('should accept custom cache TTL in constructor', () => {
const customTtlDetector = new AgentDetector(60000); // 1 minute
expect(customTtlDetector).toBeDefined();
});
});
describe('clearModelCache', () => {
beforeEach(async () => {
mockExecFileNoThrow.mockImplementation(async (cmd, args) => {

View File

@@ -0,0 +1,452 @@
/**
* Tests for path-prober.ts
*
* Tests the platform-specific binary detection logic.
*/
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import * as fs from 'fs';
// Mock dependencies before importing the module
vi.mock('../../../main/utils/execFile', () => ({
execFileNoThrow: vi.fn(),
}));
vi.mock('../../../main/utils/logger', () => ({
logger: {
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
},
}));
vi.mock('../../../shared/pathUtils', () => ({
expandTilde: vi.fn((p: string) => p.replace(/^~/, '/Users/testuser')),
detectNodeVersionManagerBinPaths: vi.fn(() => []),
}));
// Import after mocking
import {
getExpandedEnv,
checkCustomPath,
checkBinaryExists,
probeWindowsPaths,
probeUnixPaths,
type BinaryDetectionResult,
} from '../../../main/agents';
import { execFileNoThrow } from '../../../main/utils/execFile';
import { logger } from '../../../main/utils/logger';
describe('path-prober', () => {
beforeEach(() => {
vi.clearAllMocks();
});
describe('getExpandedEnv', () => {
it('should return environment with PATH', () => {
const env = getExpandedEnv();
expect(env.PATH).toBeDefined();
expect(typeof env.PATH).toBe('string');
});
it('should include common Unix paths on non-Windows', () => {
const originalPlatform = process.platform;
Object.defineProperty(process, 'platform', { value: 'darwin', configurable: true });
try {
const env = getExpandedEnv();
expect(env.PATH).toContain('/opt/homebrew/bin');
expect(env.PATH).toContain('/usr/local/bin');
} finally {
Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true });
}
});
it('should preserve existing PATH entries', () => {
const originalPath = process.env.PATH;
const testPath = '/test/custom/path';
process.env.PATH = testPath;
try {
const env = getExpandedEnv();
expect(env.PATH).toContain(testPath);
} finally {
process.env.PATH = originalPath;
}
});
});
describe('checkCustomPath', () => {
let statMock: ReturnType<typeof vi.spyOn>;
let accessMock: ReturnType<typeof vi.spyOn>;
beforeEach(() => {
statMock = vi.spyOn(fs.promises, 'stat');
accessMock = vi.spyOn(fs.promises, 'access');
});
afterEach(() => {
statMock.mockRestore();
accessMock.mockRestore();
});
it('should return exists: true for valid executable path on Unix', async () => {
const originalPlatform = process.platform;
Object.defineProperty(process, 'platform', { value: 'darwin', configurable: true });
try {
statMock.mockResolvedValue({ isFile: () => true } as fs.Stats);
accessMock.mockResolvedValue(undefined);
const result = await checkCustomPath('/usr/local/bin/claude');
expect(result.exists).toBe(true);
expect(result.path).toBe('/usr/local/bin/claude');
} finally {
Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true });
}
});
it('should return exists: false for non-executable file on Unix', async () => {
const originalPlatform = process.platform;
Object.defineProperty(process, 'platform', { value: 'darwin', configurable: true });
try {
statMock.mockResolvedValue({ isFile: () => true } as fs.Stats);
accessMock.mockRejectedValue(new Error('EACCES'));
const result = await checkCustomPath('/path/to/non-executable');
expect(result.exists).toBe(false);
expect(logger.warn).toHaveBeenCalledWith(
expect.stringContaining('not executable'),
'PathProber'
);
} finally {
Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true });
}
});
it('should return exists: false for non-existent path', async () => {
statMock.mockRejectedValue(new Error('ENOENT'));
const result = await checkCustomPath('/non/existent/path');
expect(result.exists).toBe(false);
});
it('should expand tilde in path', async () => {
const originalPlatform = process.platform;
Object.defineProperty(process, 'platform', { value: 'darwin', configurable: true });
try {
statMock.mockResolvedValue({ isFile: () => true } as fs.Stats);
accessMock.mockResolvedValue(undefined);
const result = await checkCustomPath('~/.local/bin/claude');
expect(result.exists).toBe(true);
expect(result.path).toBe('/Users/testuser/.local/bin/claude');
} finally {
Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true });
}
});
it('should try .exe extension on Windows', async () => {
const originalPlatform = process.platform;
Object.defineProperty(process, 'platform', { value: 'win32', configurable: true });
try {
// First call (exact path) returns false, second call (.exe) returns true
statMock
.mockRejectedValueOnce(new Error('ENOENT'))
.mockResolvedValueOnce({ isFile: () => true } as fs.Stats);
const result = await checkCustomPath('C:\\custom\\claude');
expect(result.exists).toBe(true);
expect(result.path).toBe('C:\\custom\\claude.exe');
} finally {
Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true });
}
});
it('should try .cmd extension on Windows if .exe not found', async () => {
const originalPlatform = process.platform;
Object.defineProperty(process, 'platform', { value: 'win32', configurable: true });
try {
// First call (exact), second (.exe) return false, third (.cmd) returns true
statMock
.mockRejectedValueOnce(new Error('ENOENT'))
.mockRejectedValueOnce(new Error('ENOENT'))
.mockResolvedValueOnce({ isFile: () => true } as fs.Stats);
const result = await checkCustomPath('C:\\custom\\claude');
expect(result.exists).toBe(true);
expect(result.path).toBe('C:\\custom\\claude.cmd');
} finally {
Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true });
}
});
it('should skip executable check on Windows', async () => {
const originalPlatform = process.platform;
Object.defineProperty(process, 'platform', { value: 'win32', configurable: true });
try {
statMock.mockResolvedValue({ isFile: () => true } as fs.Stats);
// Don't mock access - it shouldn't be called for X_OK on Windows
const result = await checkCustomPath('C:\\custom\\claude.exe');
expect(result.exists).toBe(true);
// access should not be called with X_OK on Windows
} finally {
Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true });
}
});
});
describe('probeWindowsPaths', () => {
let accessMock: ReturnType<typeof vi.spyOn>;
beforeEach(() => {
accessMock = vi.spyOn(fs.promises, 'access');
});
afterEach(() => {
accessMock.mockRestore();
});
it('should return null for unknown binary', async () => {
accessMock.mockRejectedValue(new Error('ENOENT'));
const result = await probeWindowsPaths('unknown-binary');
expect(result).toBeNull();
});
it('should probe known paths for claude binary', async () => {
// All paths fail - binary not found
accessMock.mockRejectedValue(new Error('ENOENT'));
const result = await probeWindowsPaths('claude');
// Should return null since all probes fail
expect(result).toBeNull();
// Should have tried multiple paths
expect(accessMock).toHaveBeenCalled();
});
});
describe('probeUnixPaths', () => {
let accessMock: ReturnType<typeof vi.spyOn>;
beforeEach(() => {
accessMock = vi.spyOn(fs.promises, 'access');
});
afterEach(() => {
accessMock.mockRestore();
});
it('should return null for unknown binary', async () => {
accessMock.mockRejectedValue(new Error('ENOENT'));
const result = await probeUnixPaths('unknown-binary');
expect(result).toBeNull();
});
it('should probe known paths for claude binary', async () => {
// All paths fail - binary not found
accessMock.mockRejectedValue(new Error('ENOENT'));
const result = await probeUnixPaths('claude');
// Should return null since all probes fail
expect(result).toBeNull();
// Should have tried multiple paths
expect(accessMock).toHaveBeenCalled();
});
it('should check both existence and executability', async () => {
const originalPlatform = process.platform;
Object.defineProperty(process, 'platform', { value: 'darwin', configurable: true });
try {
accessMock.mockRejectedValue(new Error('ENOENT'));
const result = await probeUnixPaths('claude');
expect(result).toBeNull();
// Verify access was called with F_OK | X_OK
expect(accessMock).toHaveBeenCalled();
} finally {
Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true });
}
});
});
describe('checkBinaryExists', () => {
let accessMock: ReturnType<typeof vi.spyOn>;
const execMock = vi.mocked(execFileNoThrow);
beforeEach(() => {
accessMock = vi.spyOn(fs.promises, 'access');
});
afterEach(() => {
accessMock.mockRestore();
});
it('should try direct probe first on Unix', async () => {
const originalPlatform = process.platform;
Object.defineProperty(process, 'platform', { value: 'darwin', configurable: true });
try {
// Direct probe finds the binary (first path in the list exists)
accessMock.mockResolvedValueOnce(undefined);
const result = await checkBinaryExists('claude');
expect(result.exists).toBe(true);
expect(result.path).toContain('claude');
// which should not be called if direct probe succeeds
} finally {
Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true });
}
});
it('should fall back to which on Unix if probe fails', async () => {
const originalPlatform = process.platform;
Object.defineProperty(process, 'platform', { value: 'darwin', configurable: true });
try {
// Direct probe fails
accessMock.mockRejectedValue(new Error('ENOENT'));
// which succeeds
execMock.mockResolvedValue({
exitCode: 0,
stdout: '/usr/local/bin/test-binary\n',
stderr: '',
});
const result = await checkBinaryExists('test-binary');
expect(result.exists).toBe(true);
expect(result.path).toBe('/usr/local/bin/test-binary');
expect(execMock).toHaveBeenCalledWith(
'which',
['test-binary'],
undefined,
expect.any(Object)
);
} finally {
Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true });
}
});
it('should use where on Windows', async () => {
const originalPlatform = process.platform;
Object.defineProperty(process, 'platform', { value: 'win32', configurable: true });
try {
// Direct probe fails
accessMock.mockRejectedValue(new Error('ENOENT'));
// where succeeds
execMock.mockResolvedValue({
exitCode: 0,
stdout: 'C:\\Users\\Test\\AppData\\Roaming\\npm\\test.cmd\r\n',
stderr: '',
});
const result = await checkBinaryExists('test');
expect(result.exists).toBe(true);
expect(execMock).toHaveBeenCalledWith('where', ['test'], undefined, expect.any(Object));
} finally {
Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true });
}
});
it('should return exists: false if binary not found', async () => {
const originalPlatform = process.platform;
Object.defineProperty(process, 'platform', { value: 'darwin', configurable: true });
try {
// Direct probe fails
accessMock.mockRejectedValue(new Error('ENOENT'));
// which fails
execMock.mockResolvedValue({
exitCode: 1,
stdout: '',
stderr: 'not found',
});
const result = await checkBinaryExists('non-existent');
expect(result.exists).toBe(false);
expect(result.path).toBeUndefined();
} finally {
Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true });
}
});
it('should prefer .exe over .cmd on Windows', async () => {
const originalPlatform = process.platform;
Object.defineProperty(process, 'platform', { value: 'win32', configurable: true });
try {
// Direct probe fails
accessMock.mockRejectedValue(new Error('ENOENT'));
// where returns both .exe and .cmd
execMock.mockResolvedValue({
exitCode: 0,
stdout: 'C:\\path\\to\\binary.cmd\r\nC:\\path\\to\\binary.exe\r\n',
stderr: '',
});
const result = await checkBinaryExists('binary');
expect(result.exists).toBe(true);
expect(result.path).toBe('C:\\path\\to\\binary.exe');
} finally {
Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true });
}
});
it('should handle Windows CRLF line endings', async () => {
const originalPlatform = process.platform;
Object.defineProperty(process, 'platform', { value: 'win32', configurable: true });
try {
accessMock.mockRejectedValue(new Error('ENOENT'));
execMock.mockResolvedValue({
exitCode: 0,
stdout: 'C:\\path\\to\\binary.exe\r\n',
stderr: '',
});
const result = await checkBinaryExists('binary');
expect(result.exists).toBe(true);
expect(result.path).toBe('C:\\path\\to\\binary.exe');
// Path should not contain \r
expect(result.path).not.toContain('\r');
} finally {
Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true });
}
});
});
describe('BinaryDetectionResult type', () => {
it('should allow exists: true with path', () => {
const result: BinaryDetectionResult = {
exists: true,
path: '/usr/local/bin/claude',
};
expect(result.exists).toBe(true);
expect(result.path).toBeDefined();
});
it('should allow exists: false without path', () => {
const result: BinaryDetectionResult = {
exists: false,
};
expect(result.exists).toBe(false);
expect(result.path).toBeUndefined();
});
});
});

View File

@@ -1,4 +1,6 @@
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import type Store from 'electron-store';
import type { ClaudeSessionOriginsData } from '../../../main/storage/claude-session-storage';
import {
AgentSessionStorage,
AgentSessionInfo,
@@ -11,8 +13,8 @@ import {
hasSessionStorage,
getAllSessionStorages,
clearStorageRegistry,
} from '../../main/agent-session-storage';
import type { ToolType } from '../../shared/types';
} from '../../../main/agents';
import type { ToolType } from '../../../shared/types';
// Mock storage implementation for testing
class MockSessionStorage implements AgentSessionStorage {
@@ -198,12 +200,12 @@ describe('ClaudeSessionStorage', () => {
// For now, we test that the class can be imported
it('should be importable', async () => {
// Dynamic import to test module loading
const { ClaudeSessionStorage } = await import('../../main/storage/claude-session-storage');
const { ClaudeSessionStorage } = await import('../../../main/storage/claude-session-storage');
expect(ClaudeSessionStorage).toBeDefined();
});
it('should have claude-code as agentId', async () => {
const { ClaudeSessionStorage } = await import('../../main/storage/claude-session-storage');
const { ClaudeSessionStorage } = await import('../../../main/storage/claude-session-storage');
// Create instance without store (it will create its own)
// Note: In a real test, we'd mock electron-store
@@ -214,18 +216,21 @@ describe('ClaudeSessionStorage', () => {
describe('OpenCodeSessionStorage', () => {
it('should be importable', async () => {
const { OpenCodeSessionStorage } = await import('../../main/storage/opencode-session-storage');
const { OpenCodeSessionStorage } =
await import('../../../main/storage/opencode-session-storage');
expect(OpenCodeSessionStorage).toBeDefined();
});
it('should have opencode as agentId', async () => {
const { OpenCodeSessionStorage } = await import('../../main/storage/opencode-session-storage');
const { OpenCodeSessionStorage } =
await import('../../../main/storage/opencode-session-storage');
const storage = new OpenCodeSessionStorage();
expect(storage.agentId).toBe('opencode');
});
it('should return empty results for non-existent projects', async () => {
const { OpenCodeSessionStorage } = await import('../../main/storage/opencode-session-storage');
const { OpenCodeSessionStorage } =
await import('../../../main/storage/opencode-session-storage');
const storage = new OpenCodeSessionStorage();
// Non-existent project should return empty results
@@ -245,7 +250,8 @@ describe('OpenCodeSessionStorage', () => {
});
it('should return message directory path for getSessionPath', async () => {
const { OpenCodeSessionStorage } = await import('../../main/storage/opencode-session-storage');
const { OpenCodeSessionStorage } =
await import('../../../main/storage/opencode-session-storage');
const storage = new OpenCodeSessionStorage();
// getSessionPath returns the message directory for the session
@@ -257,7 +263,8 @@ describe('OpenCodeSessionStorage', () => {
});
it('should fail gracefully when deleting from non-existent session', async () => {
const { OpenCodeSessionStorage } = await import('../../main/storage/opencode-session-storage');
const { OpenCodeSessionStorage } =
await import('../../../main/storage/opencode-session-storage');
const storage = new OpenCodeSessionStorage();
const deleteResult = await storage.deleteMessagePair(
@@ -272,18 +279,18 @@ describe('OpenCodeSessionStorage', () => {
describe('CodexSessionStorage', () => {
it('should be importable', async () => {
const { CodexSessionStorage } = await import('../../main/storage/codex-session-storage');
const { CodexSessionStorage } = await import('../../../main/storage/codex-session-storage');
expect(CodexSessionStorage).toBeDefined();
});
it('should have codex as agentId', async () => {
const { CodexSessionStorage } = await import('../../main/storage/codex-session-storage');
const { CodexSessionStorage } = await import('../../../main/storage/codex-session-storage');
const storage = new CodexSessionStorage();
expect(storage.agentId).toBe('codex');
});
it('should return empty results for non-existent sessions directory', async () => {
const { CodexSessionStorage } = await import('../../main/storage/codex-session-storage');
const { CodexSessionStorage } = await import('../../../main/storage/codex-session-storage');
const storage = new CodexSessionStorage();
// Non-existent project should return empty results (since ~/.codex/sessions/ likely doesn't exist in test)
@@ -306,7 +313,7 @@ describe('CodexSessionStorage', () => {
});
it('should return null for getSessionPath (async operation required)', async () => {
const { CodexSessionStorage } = await import('../../main/storage/codex-session-storage');
const { CodexSessionStorage } = await import('../../../main/storage/codex-session-storage');
const storage = new CodexSessionStorage();
// getSessionPath is synchronous and always returns null for Codex
@@ -316,7 +323,7 @@ describe('CodexSessionStorage', () => {
});
it('should fail gracefully when deleting from non-existent session', async () => {
const { CodexSessionStorage } = await import('../../main/storage/codex-session-storage');
const { CodexSessionStorage } = await import('../../../main/storage/codex-session-storage');
const storage = new CodexSessionStorage();
const deleteResult = await storage.deleteMessagePair(
@@ -329,7 +336,7 @@ describe('CodexSessionStorage', () => {
});
it('should handle empty search query', async () => {
const { CodexSessionStorage } = await import('../../main/storage/codex-session-storage');
const { CodexSessionStorage } = await import('../../../main/storage/codex-session-storage');
const storage = new CodexSessionStorage();
const search = await storage.searchSessions('/test/project', '', 'all');
@@ -342,12 +349,12 @@ describe('CodexSessionStorage', () => {
describe('Storage Module Initialization', () => {
it('should export initializeSessionStorages function', async () => {
const { initializeSessionStorages } = await import('../../main/storage/index');
const { initializeSessionStorages } = await import('../../../main/storage/index');
expect(typeof initializeSessionStorages).toBe('function');
});
it('should export CodexSessionStorage', async () => {
const { CodexSessionStorage } = await import('../../main/storage/index');
const { CodexSessionStorage } = await import('../../../main/storage/index');
expect(CodexSessionStorage).toBeDefined();
});
@@ -355,7 +362,7 @@ describe('Storage Module Initialization', () => {
// This tests that ClaudeSessionStorage can receive an external store
// This prevents the dual-store bug where IPC handlers and storage class
// use different electron-store instances
const { ClaudeSessionStorage } = await import('../../main/storage/claude-session-storage');
const { ClaudeSessionStorage } = await import('../../../main/storage/claude-session-storage');
// Create a mock store
const mockStore = {
@@ -366,14 +373,14 @@ describe('Storage Module Initialization', () => {
// Should be able to create with external store (no throw)
const storage = new ClaudeSessionStorage(
mockStore as unknown as import('electron-store').default
mockStore as unknown as Store<ClaudeSessionOriginsData>
);
expect(storage.agentId).toBe('claude-code');
});
it('should export InitializeSessionStoragesOptions interface', async () => {
// This tests that the options interface is exported for type-safe initialization
const storageModule = await import('../../main/storage/index');
const storageModule = await import('../../../main/storage/index');
// The function should accept options object
expect(typeof storageModule.initializeSessionStorages).toBe('function');
// Function should accept undefined options (backward compatible)
@@ -383,9 +390,8 @@ describe('Storage Module Initialization', () => {
it('should accept claudeSessionOriginsStore in options', async () => {
// This tests the fix for the dual-store bug
// When a shared store is passed, it should be used instead of creating a new one
const { initializeSessionStorages } = await import('../../main/storage/index');
const { getSessionStorage, clearStorageRegistry } =
await import('../../main/agent-session-storage');
const { initializeSessionStorages } = await import('../../../main/storage/index');
const { getSessionStorage, clearStorageRegistry } = await import('../../../main/agents');
// Clear registry first
clearStorageRegistry();
@@ -402,7 +408,7 @@ describe('Storage Module Initialization', () => {
// Initialize with the shared store
// This mimics what main/index.ts does
initializeSessionStorages({
claudeSessionOriginsStore: mockStore as unknown as import('electron-store').default,
claudeSessionOriginsStore: mockStore as unknown as Store<ClaudeSessionOriginsData>,
});
// Verify ClaudeSessionStorage was registered

View File

@@ -13,7 +13,7 @@
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import * as path from 'path';
import { createZipPackage, PackageContents } from '../packager';
import { createZipPackage, PackageContents } from '../../../main/debug-package/packager';
import AdmZip from 'adm-zip';
// Use the native node:fs module to avoid any vitest mocks

View File

@@ -51,7 +51,7 @@ describe('Debug Package Sanitization', () => {
describe('sanitizePath', () => {
describe('home directory replacement', () => {
it('should replace home directory with ~', async () => {
const { sanitizePath } = await import('../collectors/settings');
const { sanitizePath } = await import('../../../main/debug-package/collectors/settings');
const homeDir = os.homedir();
const testPath = `${homeDir}/Projects/MyApp`;
@@ -62,7 +62,7 @@ describe('Debug Package Sanitization', () => {
});
it('should replace home directory at any position in path', async () => {
const { sanitizePath } = await import('../collectors/settings');
const { sanitizePath } = await import('../../../main/debug-package/collectors/settings');
const homeDir = os.homedir();
const testPath = `${homeDir}/deeply/nested/folder/file.txt`;
@@ -72,7 +72,7 @@ describe('Debug Package Sanitization', () => {
});
it('should handle home directory with trailing slash', async () => {
const { sanitizePath } = await import('../collectors/settings');
const { sanitizePath } = await import('../../../main/debug-package/collectors/settings');
const homeDir = os.homedir();
const testPath = `${homeDir}/`;
@@ -82,7 +82,7 @@ describe('Debug Package Sanitization', () => {
});
it('should handle path that is exactly the home directory', async () => {
const { sanitizePath } = await import('../collectors/settings');
const { sanitizePath } = await import('../../../main/debug-package/collectors/settings');
const homeDir = os.homedir();
const result = sanitizePath(homeDir);
@@ -91,7 +91,7 @@ describe('Debug Package Sanitization', () => {
});
it('should not modify paths that do not contain home directory', async () => {
const { sanitizePath } = await import('../collectors/settings');
const { sanitizePath } = await import('../../../main/debug-package/collectors/settings');
const testPath = '/usr/local/bin/app';
const result = sanitizePath(testPath);
@@ -100,7 +100,7 @@ describe('Debug Package Sanitization', () => {
});
it('should handle empty string', async () => {
const { sanitizePath } = await import('../collectors/settings');
const { sanitizePath } = await import('../../../main/debug-package/collectors/settings');
const result = sanitizePath('');
@@ -110,7 +110,7 @@ describe('Debug Package Sanitization', () => {
describe('Windows path handling', () => {
it('should normalize backslashes to forward slashes', async () => {
const { sanitizePath } = await import('../collectors/settings');
const { sanitizePath } = await import('../../../main/debug-package/collectors/settings');
const testPath = 'C:\\Users\\testuser\\Documents\\Project';
const result = sanitizePath(testPath);
@@ -120,7 +120,8 @@ describe('Debug Package Sanitization', () => {
});
it('should handle Windows-style home directory', async () => {
const { sanitizePath: _sanitizePath } = await import('../collectors/settings');
const { sanitizePath: _sanitizePath } =
await import('../../../main/debug-package/collectors/settings');
// Mock homedir to return Windows-style path
const originalHomedir = os.homedir();
@@ -128,7 +129,8 @@ describe('Debug Package Sanitization', () => {
// Re-import to get fresh module with mocked homedir
vi.resetModules();
const { sanitizePath: freshSanitizePath } = await import('../collectors/settings');
const { sanitizePath: freshSanitizePath } =
await import('../../../main/debug-package/collectors/settings');
const testPath = 'C:\\Users\\testuser\\Documents\\Project';
const result = freshSanitizePath(testPath);
@@ -139,7 +141,7 @@ describe('Debug Package Sanitization', () => {
});
it('should handle mixed slash styles', async () => {
const { sanitizePath } = await import('../collectors/settings');
const { sanitizePath } = await import('../../../main/debug-package/collectors/settings');
const testPath = '/path/to\\mixed\\slashes/file.txt';
const result = sanitizePath(testPath);
@@ -152,7 +154,7 @@ describe('Debug Package Sanitization', () => {
describe('edge cases and type handling', () => {
it('should return null when given null', async () => {
const { sanitizePath } = await import('../collectors/settings');
const { sanitizePath } = await import('../../../main/debug-package/collectors/settings');
// @ts-expect-error - Testing runtime behavior with wrong type
const result = sanitizePath(null);
@@ -161,7 +163,7 @@ describe('Debug Package Sanitization', () => {
});
it('should return undefined when given undefined', async () => {
const { sanitizePath } = await import('../collectors/settings');
const { sanitizePath } = await import('../../../main/debug-package/collectors/settings');
// @ts-expect-error - Testing runtime behavior with wrong type
const result = sanitizePath(undefined);
@@ -170,7 +172,7 @@ describe('Debug Package Sanitization', () => {
});
it('should return numbers unchanged', async () => {
const { sanitizePath } = await import('../collectors/settings');
const { sanitizePath } = await import('../../../main/debug-package/collectors/settings');
// @ts-expect-error - Testing runtime behavior with wrong type
const result = sanitizePath(12345);
@@ -179,7 +181,7 @@ describe('Debug Package Sanitization', () => {
});
it('should return objects unchanged', async () => {
const { sanitizePath } = await import('../collectors/settings');
const { sanitizePath } = await import('../../../main/debug-package/collectors/settings');
const obj = { path: '/some/path' };
// @ts-expect-error - Testing runtime behavior with wrong type
@@ -189,7 +191,7 @@ describe('Debug Package Sanitization', () => {
});
it('should handle paths with spaces', async () => {
const { sanitizePath } = await import('../collectors/settings');
const { sanitizePath } = await import('../../../main/debug-package/collectors/settings');
const homeDir = os.homedir();
const testPath = `${homeDir}/My Documents/Project Files/app.tsx`;
@@ -199,7 +201,7 @@ describe('Debug Package Sanitization', () => {
});
it('should handle paths with special characters', async () => {
const { sanitizePath } = await import('../collectors/settings');
const { sanitizePath } = await import('../../../main/debug-package/collectors/settings');
const homeDir = os.homedir();
const testPath = `${homeDir}/Projects/@company/app-v2.0#beta`;
@@ -209,7 +211,7 @@ describe('Debug Package Sanitization', () => {
});
it('should handle very long paths', async () => {
const { sanitizePath } = await import('../collectors/settings');
const { sanitizePath } = await import('../../../main/debug-package/collectors/settings');
const homeDir = os.homedir();
const longPath = `${homeDir}/` + 'a/'.repeat(100) + 'file.txt';
@@ -228,7 +230,7 @@ describe('Debug Package Sanitization', () => {
describe('API key redaction', () => {
describe('sensitive key detection', () => {
it('should redact apiKey', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -242,7 +244,7 @@ describe('Debug Package Sanitization', () => {
});
it('should redact api_key (snake_case)', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -255,7 +257,7 @@ describe('Debug Package Sanitization', () => {
});
it('should redact authToken', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -268,7 +270,7 @@ describe('Debug Package Sanitization', () => {
});
it('should redact auth_token (snake_case)', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -281,7 +283,7 @@ describe('Debug Package Sanitization', () => {
});
it('should redact clientToken', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -294,7 +296,7 @@ describe('Debug Package Sanitization', () => {
});
it('should redact client_token (snake_case)', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -307,7 +309,7 @@ describe('Debug Package Sanitization', () => {
});
it('should redact password', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -320,7 +322,7 @@ describe('Debug Package Sanitization', () => {
});
it('should redact secret', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -333,7 +335,7 @@ describe('Debug Package Sanitization', () => {
});
it('should redact credential', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -346,7 +348,7 @@ describe('Debug Package Sanitization', () => {
});
it('should redact accessToken', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -359,7 +361,7 @@ describe('Debug Package Sanitization', () => {
});
it('should redact access_token (snake_case)', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -372,7 +374,7 @@ describe('Debug Package Sanitization', () => {
});
it('should redact refreshToken', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -385,7 +387,7 @@ describe('Debug Package Sanitization', () => {
});
it('should redact refresh_token (snake_case)', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -398,7 +400,7 @@ describe('Debug Package Sanitization', () => {
});
it('should redact privateKey', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -411,7 +413,7 @@ describe('Debug Package Sanitization', () => {
});
it('should redact private_key (snake_case)', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -426,7 +428,7 @@ describe('Debug Package Sanitization', () => {
describe('case insensitivity', () => {
it('should redact APIKEY (uppercase)', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -439,7 +441,7 @@ describe('Debug Package Sanitization', () => {
});
it('should redact ApiKey (mixed case)', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -452,7 +454,7 @@ describe('Debug Package Sanitization', () => {
});
it('should redact API_KEY (uppercase snake_case)', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -465,7 +467,7 @@ describe('Debug Package Sanitization', () => {
});
it('should redact PASSWORD (uppercase)', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -478,7 +480,7 @@ describe('Debug Package Sanitization', () => {
});
it('should redact Secret (capitalized)', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -493,7 +495,7 @@ describe('Debug Package Sanitization', () => {
describe('key name patterns containing sensitive words', () => {
it('should redact myApiKeyValue (key within name)', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -506,7 +508,7 @@ describe('Debug Package Sanitization', () => {
});
it('should redact userPassword (password in name)', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -519,7 +521,7 @@ describe('Debug Package Sanitization', () => {
});
it('should redact adminSecret (secret in name)', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -532,7 +534,7 @@ describe('Debug Package Sanitization', () => {
});
it('should redact bearerAccessToken (accesstoken in name)', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -545,7 +547,7 @@ describe('Debug Package Sanitization', () => {
});
it('should redact dbCredential (credential in name)', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -560,7 +562,7 @@ describe('Debug Package Sanitization', () => {
describe('nested object handling', () => {
it('should redact sensitive keys in nested objects', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -577,7 +579,7 @@ describe('Debug Package Sanitization', () => {
});
it('should redact deeply nested sensitive keys', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -602,7 +604,7 @@ describe('Debug Package Sanitization', () => {
});
it('should track sanitized fields with full path', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -621,7 +623,7 @@ describe('Debug Package Sanitization', () => {
});
it('should redact multiple sensitive keys at different levels', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -646,7 +648,7 @@ describe('Debug Package Sanitization', () => {
describe('array handling', () => {
it('should process arrays containing objects with sensitive keys', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -667,7 +669,7 @@ describe('Debug Package Sanitization', () => {
});
it('should handle empty arrays', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -680,7 +682,7 @@ describe('Debug Package Sanitization', () => {
});
it('should handle arrays of primitives', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -695,7 +697,7 @@ describe('Debug Package Sanitization', () => {
describe('preservation of non-sensitive data', () => {
it('should preserve boolean values', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -709,7 +711,7 @@ describe('Debug Package Sanitization', () => {
});
it('should preserve number values', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -724,7 +726,7 @@ describe('Debug Package Sanitization', () => {
});
it('should preserve string values without sensitive keywords', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -739,7 +741,7 @@ describe('Debug Package Sanitization', () => {
});
it('should preserve null values', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const mockStore = {
get: vi.fn(),
set: vi.fn(),
@@ -760,7 +762,7 @@ describe('Debug Package Sanitization', () => {
describe('environment variable filtering', () => {
describe('custom env vars masking', () => {
it('should not expose custom env var values in agents collector', async () => {
const { collectAgents } = await import('../collectors/agents');
const { collectAgents } = await import('../../../main/debug-package/collectors/agents');
const mockAgentDetector = {
detectAgents: vi.fn().mockResolvedValue([
@@ -786,7 +788,7 @@ describe('Debug Package Sanitization', () => {
});
it('should indicate env vars are set without showing values', async () => {
const { collectAgents } = await import('../collectors/agents');
const { collectAgents } = await import('../../../main/debug-package/collectors/agents');
const mockAgentDetector = {
detectAgents: vi.fn().mockResolvedValue([
@@ -812,7 +814,7 @@ describe('Debug Package Sanitization', () => {
describe('custom args masking', () => {
it('should not expose custom args values containing secrets', async () => {
const { collectAgents } = await import('../collectors/agents');
const { collectAgents } = await import('../../../main/debug-package/collectors/agents');
const mockAgentDetector = {
detectAgents: vi.fn().mockResolvedValue([
@@ -836,7 +838,7 @@ describe('Debug Package Sanitization', () => {
describe('path-based environment variables', () => {
it('should sanitize custom path settings', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const homeDir = os.homedir();
const mockStore = {
@@ -855,7 +857,7 @@ describe('Debug Package Sanitization', () => {
});
it('should sanitize folderPath settings', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const homeDir = os.homedir();
const mockStore = {
@@ -879,7 +881,7 @@ describe('Debug Package Sanitization', () => {
describe('comprehensive sanitization', () => {
it('should sanitize complex settings object with mixed sensitive data', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const homeDir = os.homedir();
const mockStore = {
@@ -931,7 +933,7 @@ describe('Debug Package Sanitization', () => {
});
it('should track all sanitized fields', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const homeDir = os.homedir();
const mockStore = {
@@ -952,7 +954,7 @@ describe('Debug Package Sanitization', () => {
});
it('should produce output that contains no home directory paths for recognized path keys', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const homeDir = os.homedir();
const mockStore = {
@@ -980,7 +982,7 @@ describe('Debug Package Sanitization', () => {
});
it('should not sanitize paths in array values (by design)', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const homeDir = os.homedir();
// Note: Arrays of string paths are NOT sanitized by design
@@ -1002,7 +1004,7 @@ describe('Debug Package Sanitization', () => {
});
it('should produce output that contains no API keys or secrets', async () => {
const { collectSettings } = await import('../collectors/settings');
const { collectSettings } = await import('../../../main/debug-package/collectors/settings');
const secrets = [
'sk-1234567890abcdef',

View File

@@ -63,7 +63,7 @@ import {
GroupChatParticipant,
} from '../../../main/group-chat/group-chat-storage';
import { readLog } from '../../../main/group-chat/group-chat-log';
import { AgentDetector } from '../../../main/agent-detector';
import { AgentDetector } from '../../../main/agents';
describe('group-chat-router', () => {
let mockProcessManager: IProcessManager;

View File

@@ -8,7 +8,7 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { ipcMain } from 'electron';
import { registerAgentSessionsHandlers } from '../../../../main/ipc/handlers/agentSessions';
import * as agentSessionStorage from '../../../../main/agent-session-storage';
import * as agentSessionStorage from '../../../../main/agents';
// Mock electron's ipcMain
vi.mock('electron', () => ({
@@ -18,8 +18,8 @@ vi.mock('electron', () => ({
},
}));
// Mock the agent-session-storage module
vi.mock('../../../../main/agent-session-storage', () => ({
// Mock the agents module (session storage exports)
vi.mock('../../../../main/agents', () => ({
getSessionStorage: vi.fn(),
hasSessionStorage: vi.fn(),
getAllSessionStorages: vi.fn(),

View File

@@ -10,7 +10,7 @@ import {
registerAgentsHandlers,
AgentsHandlerDependencies,
} from '../../../../main/ipc/handlers/agents';
import * as agentCapabilities from '../../../../main/agent-capabilities';
import * as agentCapabilities from '../../../../main/agents';
// Mock electron's ipcMain
vi.mock('electron', () => ({
@@ -20,8 +20,8 @@ vi.mock('electron', () => ({
},
}));
// Mock agent-capabilities module
vi.mock('../../../../main/agent-capabilities', () => ({
// Mock agents module (capabilities exports)
vi.mock('../../../../main/agents', () => ({
getAgentCapabilities: vi.fn(),
DEFAULT_CAPABILITIES: {
supportsResume: false,

View File

@@ -13,7 +13,7 @@ import {
DebugHandlerDependencies,
} from '../../../../main/ipc/handlers/debug';
import * as debugPackage from '../../../../main/debug-package';
import { AgentDetector } from '../../../../main/agent-detector';
import { AgentDetector } from '../../../../main/agents';
import { ProcessManager } from '../../../../main/process-manager';
import { WebServer } from '../../../../main/web-server';

View File

@@ -8,8 +8,8 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { ipcMain, BrowserWindow } from 'electron';
import { registerStatsHandlers } from '../../../../main/ipc/handlers/stats';
import * as statsDbModule from '../../../../main/stats-db';
import type { StatsDB } from '../../../../main/stats-db';
import * as statsDbModule from '../../../../main/stats';
import type { StatsDB } from '../../../../main/stats';
// Mock electron's ipcMain and BrowserWindow
vi.mock('electron', () => ({
@@ -21,7 +21,7 @@ vi.mock('electron', () => ({
}));
// Mock the stats-db module
vi.mock('../../../../main/stats-db', () => ({
vi.mock('../../../../main/stats', () => ({
getStatsDB: vi.fn(),
getInitializationResult: vi.fn(),
clearInitializationResult: vi.fn(),

View File

@@ -9,7 +9,7 @@ import { describe, it, expect, beforeEach, vi, Mock } from 'vitest';
import { ipcMain } from 'electron';
import { registerTabNamingHandlers } from '../../../../main/ipc/handlers/tabNaming';
import type { ProcessManager } from '../../../../main/process-manager';
import type { AgentDetector, AgentConfig } from '../../../../main/agent-detector';
import type { AgentDetector, AgentConfig } from '../../../../main/agents';
// Mock the logger
vi.mock('../../../../main/utils/logger', () => ({

View File

@@ -2,6 +2,7 @@
* Tests for usage aggregator utilities
*/
import { describe, expect, it } from 'vitest';
import {
aggregateModelUsage,
estimateContextUsage,
@@ -96,15 +97,32 @@ describe('estimateContextUsage', () => {
expect(result).toBe(10);
});
it('should cap at 100%', () => {
it('should correctly calculate for Claude with all token types', () => {
// Simulates a real Claude response: input + cacheRead + cacheCreation = total
const stats = createStats({
inputTokens: 150000,
outputTokens: 100000,
inputTokens: 2,
cacheReadInputTokens: 33541,
cacheCreationInputTokens: 11657,
outputTokens: 12,
contextWindow: 200000,
});
const result = estimateContextUsage(stats, 'claude-code');
// Output tokens excluded; 150k / 200k = 75%
expect(result).toBe(75);
// (2 + 33541 + 11657) / 200000 = 45200 / 200000 = 22.6% -> 23%
expect(result).toBe(23);
});
it('should return null when tokens exceed context window (accumulated values)', () => {
// When Claude Code does complex multi-tool turns, token values accumulate
// across internal API calls and can exceed the context window
const stats = createStats({
inputTokens: 21627,
cacheReadInputTokens: 1079415,
cacheCreationInputTokens: 39734,
contextWindow: 200000,
});
const result = estimateContextUsage(stats, 'claude-code');
// Total = 1,140,776 > 200,000 -> null (accumulated, skip update)
expect(result).toBeNull();
});
});
@@ -112,6 +130,7 @@ describe('estimateContextUsage', () => {
it('should use claude-code default context window (200k)', () => {
const stats = createStats({ contextWindow: 0 });
const result = estimateContextUsage(stats, 'claude-code');
// 10000 + 0 + 0 = 10000 / 200000 = 5%
expect(result).toBe(5);
});
@@ -149,6 +168,18 @@ describe('estimateContextUsage', () => {
const result = estimateContextUsage(stats, 'claude-code');
expect(result).toBe(0);
});
it('should return null when accumulated tokens exceed default window', () => {
const stats = createStats({
inputTokens: 50000,
cacheReadInputTokens: 500000,
cacheCreationInputTokens: 10000,
contextWindow: 0,
});
const result = estimateContextUsage(stats, 'claude-code');
// 560000 > 200000 default -> null
expect(result).toBeNull();
});
});
});
@@ -166,47 +197,62 @@ describe('calculateContextTokens', () => {
...overrides,
});
it('should include input, cacheRead, and cacheCreation tokens for Claude agents', () => {
it('should include input + cacheRead + cacheCreation for Claude agents', () => {
const stats = createStats();
const result = calculateContextTokens(stats, 'claude-code');
// 10000 + 1000 + 2000 = 13000 (all context tokens, excludes output)
// 10000 + 2000 + 1000 = 13000 (all input token types, excludes output)
expect(result).toBe(13000);
});
it('should include output tokens in addition to all context tokens for Codex agents', () => {
it('should include input + cacheCreation + output for Codex agents', () => {
const stats = createStats();
const result = calculateContextTokens(stats, 'codex');
// 10000 + 5000 + 1000 + 2000 = 18000 (includes output and all cache tokens)
expect(result).toBe(18000);
// 10000 + 1000 + 5000 = 16000 (combined input+output window)
expect(result).toBe(16000);
});
it('should default to Claude behavior when agent is undefined', () => {
const stats = createStats();
const result = calculateContextTokens(stats);
// 10000 + 1000 + 2000 = 13000 (includes cacheRead, defaults to Claude behavior)
// 10000 + 2000 + 1000 = 13000 (Claude default: all input token types)
expect(result).toBe(13000);
});
it('should include cacheReadInputTokens in context calculation', () => {
// Cached tokens still occupy context window space - they're just cheaper to process.
// This matches ClawdBot's working implementation: input + cacheRead + cacheWrite
it('should calculate correctly for typical first Claude turn', () => {
// Real-world scenario: first message with system prompt cache
const stats = createStats({
inputTokens: 2,
cacheReadInputTokens: 33541,
cacheCreationInputTokens: 11657,
outputTokens: 12,
});
const result = calculateContextTokens(stats, 'claude-code');
// 2 + 33541 + 11657 = 45200 (total context for the API call)
expect(result).toBe(45200);
});
it('should handle accumulated values from multi-tool turns', () => {
// When values are accumulated across internal API calls,
// the total can exceed the context window. calculateContextTokens
// returns the raw total; callers must check against contextWindow.
const stats = createStats({
inputTokens: 5000,
cacheCreationInputTokens: 1000,
cacheReadInputTokens: 100000, // Represents actual cached context size
cacheReadInputTokens: 500000, // Accumulated from many internal calls
});
const result = calculateContextTokens(stats, 'claude-code');
// 5000 + 1000 + 100000 = 106000 (all context tokens)
expect(result).toBe(106000);
// 5000 + 500000 + 1000 = 506000 (raw total, may exceed window)
expect(result).toBe(506000);
});
});
describe('DEFAULT_CONTEXT_WINDOWS', () => {
it('should have context windows defined for all known agent types', () => {
expect(DEFAULT_CONTEXT_WINDOWS['claude-code']).toBe(200000);
expect(DEFAULT_CONTEXT_WINDOWS['claude']).toBe(200000);
expect(DEFAULT_CONTEXT_WINDOWS['codex']).toBe(200000);
expect(DEFAULT_CONTEXT_WINDOWS['opencode']).toBe(128000);
expect(DEFAULT_CONTEXT_WINDOWS['factory-droid']).toBe(200000);
expect(DEFAULT_CONTEXT_WINDOWS['aider']).toBe(128000);
expect(DEFAULT_CONTEXT_WINDOWS['terminal']).toBe(0);
});
});

View File

@@ -0,0 +1,324 @@
/**
* Tests for data listener.
* Handles process output data including group chat buffering and web broadcasting.
*/
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { setupDataListener } from '../../../main/process-listeners/data-listener';
import type { ProcessManager } from '../../../main/process-manager';
import type { SafeSendFn } from '../../../main/utils/safe-send';
import type { ProcessListenerDependencies } from '../../../main/process-listeners/types';
describe('Data Listener', () => {
let mockProcessManager: ProcessManager;
let mockSafeSend: SafeSendFn;
let mockGetWebServer: ProcessListenerDependencies['getWebServer'];
let mockWebServer: { broadcastToSessionClients: ReturnType<typeof vi.fn> };
let mockOutputBuffer: ProcessListenerDependencies['outputBuffer'];
let mockOutputParser: ProcessListenerDependencies['outputParser'];
let mockDebugLog: ProcessListenerDependencies['debugLog'];
let mockPatterns: ProcessListenerDependencies['patterns'];
let eventHandlers: Map<string, (...args: unknown[]) => void>;
beforeEach(() => {
vi.clearAllMocks();
eventHandlers = new Map();
mockSafeSend = vi.fn();
mockWebServer = {
broadcastToSessionClients: vi.fn(),
};
mockGetWebServer = vi.fn().mockReturnValue(mockWebServer);
mockOutputBuffer = {
appendToGroupChatBuffer: vi.fn().mockReturnValue(100),
getGroupChatBufferedOutput: vi.fn().mockReturnValue('test output'),
clearGroupChatBuffer: vi.fn(),
};
mockOutputParser = {
extractTextFromStreamJson: vi.fn().mockReturnValue('parsed response'),
parseParticipantSessionId: vi.fn().mockReturnValue(null),
};
mockDebugLog = vi.fn();
mockPatterns = {
REGEX_MODERATOR_SESSION: /^group-chat-(.+)-moderator-/,
REGEX_MODERATOR_SESSION_TIMESTAMP: /^group-chat-(.+)-moderator-\d+$/,
REGEX_AI_SUFFIX: /-ai-[^-]+$/,
REGEX_AI_TAB_ID: /-ai-([^-]+)$/,
REGEX_BATCH_SESSION: /-batch-\d+$/,
REGEX_SYNOPSIS_SESSION: /-synopsis-\d+$/,
};
mockProcessManager = {
on: vi.fn((event: string, handler: (...args: unknown[]) => void) => {
eventHandlers.set(event, handler);
}),
} as unknown as ProcessManager;
});
const setupListener = () => {
setupDataListener(mockProcessManager, {
safeSend: mockSafeSend,
getWebServer: mockGetWebServer,
outputBuffer: mockOutputBuffer,
outputParser: mockOutputParser,
debugLog: mockDebugLog,
patterns: mockPatterns,
});
};
describe('Event Registration', () => {
it('should register the data event listener', () => {
setupListener();
expect(mockProcessManager.on).toHaveBeenCalledWith('data', expect.any(Function));
});
});
describe('Regular Process Data', () => {
it('should forward data to renderer for non-group-chat sessions', () => {
setupListener();
const handler = eventHandlers.get('data');
handler?.('regular-session-123', 'test output');
expect(mockSafeSend).toHaveBeenCalledWith(
'process:data',
'regular-session-123',
'test output'
);
});
it('should broadcast to web clients for AI sessions', () => {
setupListener();
const handler = eventHandlers.get('data');
handler?.('session-123-ai-tab1', 'test output');
expect(mockWebServer.broadcastToSessionClients).toHaveBeenCalledWith(
'session-123',
expect.objectContaining({
type: 'session_output',
sessionId: 'session-123',
tabId: 'tab1',
data: 'test output',
source: 'ai',
})
);
});
it('should extract base session ID correctly', () => {
setupListener();
const handler = eventHandlers.get('data');
handler?.('my-session-ai-mytab', 'test output');
expect(mockWebServer.broadcastToSessionClients).toHaveBeenCalledWith(
'my-session',
expect.objectContaining({
sessionId: 'my-session',
tabId: 'mytab',
})
);
});
});
describe('Moderator Output Buffering', () => {
it('should buffer moderator output instead of forwarding', () => {
setupListener();
const handler = eventHandlers.get('data');
const sessionId = 'group-chat-test-chat-123-moderator-abc123';
handler?.(sessionId, 'moderator output');
expect(mockOutputBuffer.appendToGroupChatBuffer).toHaveBeenCalledWith(
sessionId,
'moderator output'
);
expect(mockSafeSend).not.toHaveBeenCalled();
});
it('should extract group chat ID from moderator session', () => {
setupListener();
const handler = eventHandlers.get('data');
const sessionId = 'group-chat-my-chat-id-moderator-12345';
handler?.(sessionId, 'test');
expect(mockDebugLog).toHaveBeenCalledWith(
'GroupChat:Debug',
expect.stringContaining('my-chat-id')
);
});
it('should warn when buffer size exceeds limit', () => {
mockOutputBuffer.appendToGroupChatBuffer = vi.fn().mockReturnValue(15 * 1024 * 1024); // 15MB
setupListener();
const handler = eventHandlers.get('data');
const sessionId = 'group-chat-test-chat-123-moderator-abc123';
handler?.(sessionId, 'large output');
expect(mockDebugLog).toHaveBeenCalledWith(
'GroupChat:Debug',
expect.stringContaining('WARNING: Buffer size')
);
});
});
describe('Participant Output Buffering', () => {
beforeEach(() => {
mockOutputParser.parseParticipantSessionId = vi.fn().mockReturnValue({
groupChatId: 'test-chat-123',
participantName: 'TestAgent',
});
});
it('should buffer participant output instead of forwarding', () => {
setupListener();
const handler = eventHandlers.get('data');
const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123';
handler?.(sessionId, 'participant output');
expect(mockOutputBuffer.appendToGroupChatBuffer).toHaveBeenCalledWith(
sessionId,
'participant output'
);
expect(mockSafeSend).not.toHaveBeenCalled();
});
it('should warn when participant buffer size exceeds limit', () => {
mockOutputBuffer.appendToGroupChatBuffer = vi.fn().mockReturnValue(15 * 1024 * 1024); // 15MB
setupListener();
const handler = eventHandlers.get('data');
const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123';
handler?.(sessionId, 'large output');
expect(mockDebugLog).toHaveBeenCalledWith(
'GroupChat:Debug',
expect.stringContaining('WARNING: Buffer size')
);
});
});
describe('Web Broadcast Filtering', () => {
it('should skip PTY terminal output', () => {
setupListener();
const handler = eventHandlers.get('data');
handler?.('session-123-terminal', 'terminal output');
expect(mockWebServer.broadcastToSessionClients).not.toHaveBeenCalled();
// But should still forward to renderer
expect(mockSafeSend).toHaveBeenCalledWith(
'process:data',
'session-123-terminal',
'terminal output'
);
});
it('should skip batch session output using regex pattern', () => {
setupListener();
const handler = eventHandlers.get('data');
handler?.('session-123-batch-1234567890', 'batch output');
expect(mockWebServer.broadcastToSessionClients).not.toHaveBeenCalled();
});
it('should skip synopsis session output using regex pattern', () => {
setupListener();
const handler = eventHandlers.get('data');
handler?.('session-123-synopsis-1234567890', 'synopsis output');
expect(mockWebServer.broadcastToSessionClients).not.toHaveBeenCalled();
});
it('should NOT skip sessions with "batch" in UUID (false positive prevention)', () => {
setupListener();
const handler = eventHandlers.get('data');
// Session ID with "batch" in the UUID but not matching the pattern -batch-{digits}
handler?.('session-batch-uuid-ai-tab1', 'output');
// Should broadcast because it doesn't match the -batch-\d+$ pattern
expect(mockWebServer.broadcastToSessionClients).toHaveBeenCalled();
});
it('should broadcast when no web server is available', () => {
mockGetWebServer = vi.fn().mockReturnValue(null);
setupListener();
const handler = eventHandlers.get('data');
handler?.('session-123-ai-tab1', 'test output');
// Should still forward to renderer
expect(mockSafeSend).toHaveBeenCalledWith(
'process:data',
'session-123-ai-tab1',
'test output'
);
// But not broadcast (no web server)
expect(mockWebServer.broadcastToSessionClients).not.toHaveBeenCalled();
});
});
describe('Message ID Generation', () => {
it('should generate unique message IDs for broadcasts', () => {
setupListener();
const handler = eventHandlers.get('data');
handler?.('session-123-ai-tab1', 'output 1');
handler?.('session-123-ai-tab1', 'output 2');
const calls = mockWebServer.broadcastToSessionClients.mock.calls;
const msgId1 = calls[0][1].msgId;
const msgId2 = calls[1][1].msgId;
expect(msgId1).toBeDefined();
expect(msgId2).toBeDefined();
expect(msgId1).not.toBe(msgId2);
});
it('should include timestamp in message ID', () => {
const beforeTime = Date.now();
setupListener();
const handler = eventHandlers.get('data');
handler?.('session-123-ai-tab1', 'test output');
const msgId = mockWebServer.broadcastToSessionClients.mock.calls[0][1].msgId;
const timestamp = parseInt(msgId.split('-')[0], 10);
expect(timestamp).toBeGreaterThanOrEqual(beforeTime);
expect(timestamp).toBeLessThanOrEqual(Date.now());
});
});
describe('Source Detection', () => {
it('should identify AI source from session ID', () => {
setupListener();
const handler = eventHandlers.get('data');
handler?.('session-123-ai-tab1', 'ai output');
expect(mockWebServer.broadcastToSessionClients).toHaveBeenCalledWith(
expect.anything(),
expect.objectContaining({ source: 'ai' })
);
});
it('should identify terminal source for non-AI sessions', () => {
setupListener();
const handler = eventHandlers.get('data');
handler?.('session-123', 'terminal output');
expect(mockWebServer.broadcastToSessionClients).toHaveBeenCalledWith(
expect.anything(),
expect.objectContaining({ source: 'terminal' })
);
});
});
});

View File

@@ -0,0 +1,118 @@
/**
* Tests for error listener.
* Handles agent errors (auth expired, token exhaustion, rate limits, etc.).
*/
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { setupErrorListener } from '../../../main/process-listeners/error-listener';
import type { ProcessManager } from '../../../main/process-manager';
import type { SafeSendFn } from '../../../main/utils/safe-send';
import type { AgentError } from '../../../shared/types';
import type { ProcessListenerDependencies } from '../../../main/process-listeners/types';
describe('Error Listener', () => {
let mockProcessManager: ProcessManager;
let mockSafeSend: SafeSendFn;
let mockLogger: ProcessListenerDependencies['logger'];
let eventHandlers: Map<string, (...args: unknown[]) => void>;
beforeEach(() => {
vi.clearAllMocks();
eventHandlers = new Map();
mockSafeSend = vi.fn();
mockLogger = {
info: vi.fn(),
error: vi.fn(),
warn: vi.fn(),
debug: vi.fn(),
};
mockProcessManager = {
on: vi.fn((event: string, handler: (...args: unknown[]) => void) => {
eventHandlers.set(event, handler);
}),
} as unknown as ProcessManager;
});
it('should register the agent-error event listener', () => {
setupErrorListener(mockProcessManager, { safeSend: mockSafeSend, logger: mockLogger });
expect(mockProcessManager.on).toHaveBeenCalledWith('agent-error', expect.any(Function));
});
it('should log agent error and forward to renderer', () => {
setupErrorListener(mockProcessManager, { safeSend: mockSafeSend, logger: mockLogger });
const handler = eventHandlers.get('agent-error');
const testSessionId = 'test-session-123';
const testAgentError: AgentError = {
type: 'auth_expired',
agentId: 'claude-code',
message: 'Authentication token has expired',
recoverable: true,
timestamp: Date.now(),
};
handler?.(testSessionId, testAgentError);
expect(mockLogger.info).toHaveBeenCalledWith(
'Agent error detected: auth_expired',
'AgentError',
expect.objectContaining({
sessionId: testSessionId,
agentId: 'claude-code',
errorType: 'auth_expired',
message: 'Authentication token has expired',
recoverable: true,
})
);
expect(mockSafeSend).toHaveBeenCalledWith('agent:error', testSessionId, testAgentError);
});
it('should handle token exhaustion errors', () => {
setupErrorListener(mockProcessManager, { safeSend: mockSafeSend, logger: mockLogger });
const handler = eventHandlers.get('agent-error');
const testSessionId = 'session-456';
const testAgentError: AgentError = {
type: 'token_exhaustion',
agentId: 'codex',
message: 'Token limit exceeded',
recoverable: false,
timestamp: Date.now(),
};
handler?.(testSessionId, testAgentError);
expect(mockSafeSend).toHaveBeenCalledWith('agent:error', testSessionId, testAgentError);
});
it('should handle rate limit errors', () => {
setupErrorListener(mockProcessManager, { safeSend: mockSafeSend, logger: mockLogger });
const handler = eventHandlers.get('agent-error');
const testSessionId = 'session-789';
const testAgentError: AgentError = {
type: 'rate_limited',
agentId: 'opencode',
message: 'Rate limit exceeded, retry after 60 seconds',
recoverable: true,
timestamp: Date.now(),
};
handler?.(testSessionId, testAgentError);
expect(mockLogger.info).toHaveBeenCalledWith(
'Agent error detected: rate_limited',
'AgentError',
expect.objectContaining({
sessionId: testSessionId,
errorType: 'rate_limited',
})
);
expect(mockSafeSend).toHaveBeenCalledWith('agent:error', testSessionId, testAgentError);
});
});

View File

@@ -0,0 +1,420 @@
/**
* Tests for exit listener.
* Handles process exit events including group chat moderator/participant exits.
*/
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { setupExitListener } from '../../../main/process-listeners/exit-listener';
import type { ProcessManager } from '../../../main/process-manager';
import type { ProcessListenerDependencies } from '../../../main/process-listeners/types';
describe('Exit Listener', () => {
let mockProcessManager: ProcessManager;
let mockDeps: Parameters<typeof setupExitListener>[1];
let eventHandlers: Map<string, (...args: unknown[]) => void>;
// Create a minimal mock group chat
const createMockGroupChat = () => ({
id: 'test-chat-123',
name: 'Test Chat',
moderatorAgentId: 'claude-code',
moderatorSessionId: 'group-chat-test-chat-123-moderator',
participants: [
{
name: 'TestAgent',
agentId: 'claude-code',
sessionId: 'group-chat-test-chat-123-participant-TestAgent-abc123',
addedAt: Date.now(),
},
],
createdAt: Date.now(),
updatedAt: Date.now(),
logPath: '/tmp/test-chat.log',
imagesDir: '/tmp/test-chat-images',
});
beforeEach(() => {
vi.clearAllMocks();
eventHandlers = new Map();
mockProcessManager = {
on: vi.fn((event: string, handler: (...args: unknown[]) => void) => {
eventHandlers.set(event, handler);
}),
} as unknown as ProcessManager;
mockDeps = {
safeSend: vi.fn(),
powerManager: {
addBlockReason: vi.fn(),
removeBlockReason: vi.fn(),
},
groupChatEmitters: {
emitStateChange: vi.fn(),
emitParticipantState: vi.fn(),
emitParticipantsChanged: vi.fn(),
emitModeratorUsage: vi.fn(),
},
groupChatRouter: {
routeModeratorResponse: vi.fn().mockResolvedValue(undefined),
routeAgentResponse: vi.fn().mockResolvedValue(undefined),
markParticipantResponded: vi.fn().mockResolvedValue(undefined),
spawnModeratorSynthesis: vi.fn().mockResolvedValue(undefined),
getGroupChatReadOnlyState: vi.fn().mockReturnValue(false),
respawnParticipantWithRecovery: vi.fn().mockResolvedValue(undefined),
},
groupChatStorage: {
loadGroupChat: vi.fn().mockResolvedValue(createMockGroupChat()),
updateGroupChat: vi.fn().mockResolvedValue(createMockGroupChat()),
updateParticipant: vi.fn().mockResolvedValue(createMockGroupChat()),
},
sessionRecovery: {
needsSessionRecovery: vi.fn().mockReturnValue(false),
initiateSessionRecovery: vi.fn().mockResolvedValue(true),
},
outputBuffer: {
appendToGroupChatBuffer: vi.fn().mockReturnValue(100),
getGroupChatBufferedOutput: vi.fn().mockReturnValue('{"type":"text","text":"test output"}'),
clearGroupChatBuffer: vi.fn(),
},
outputParser: {
extractTextFromStreamJson: vi.fn().mockReturnValue('parsed response'),
parseParticipantSessionId: vi.fn().mockReturnValue(null),
},
getProcessManager: () => mockProcessManager,
getAgentDetector: () =>
({
detectAgents: vi.fn(),
}) as unknown as ReturnType<ProcessListenerDependencies['getAgentDetector']>,
getWebServer: () => null,
logger: {
info: vi.fn(),
error: vi.fn(),
warn: vi.fn(),
debug: vi.fn(),
},
debugLog: vi.fn(),
patterns: {
REGEX_MODERATOR_SESSION: /^group-chat-(.+)-moderator-/,
REGEX_MODERATOR_SESSION_TIMESTAMP: /^group-chat-(.+)-moderator-\d+$/,
REGEX_AI_SUFFIX: /-ai-[^-]+$/,
REGEX_AI_TAB_ID: /-ai-([^-]+)$/,
REGEX_BATCH_SESSION: /-batch-\d+$/,
REGEX_SYNOPSIS_SESSION: /-synopsis-\d+$/,
},
};
});
const setupListener = () => {
setupExitListener(mockProcessManager, mockDeps);
};
describe('Event Registration', () => {
it('should register the exit event listener', () => {
setupListener();
expect(mockProcessManager.on).toHaveBeenCalledWith('exit', expect.any(Function));
});
});
describe('Regular Process Exit', () => {
it('should forward exit event to renderer for non-group-chat sessions', () => {
setupListener();
const handler = eventHandlers.get('exit');
handler?.('regular-session-123', 0);
expect(mockDeps.safeSend).toHaveBeenCalledWith('process:exit', 'regular-session-123', 0);
});
it('should remove power block for non-group-chat sessions', () => {
setupListener();
const handler = eventHandlers.get('exit');
handler?.('regular-session-123', 0);
expect(mockDeps.powerManager.removeBlockReason).toHaveBeenCalledWith(
'session:regular-session-123'
);
});
});
describe('Participant Exit', () => {
beforeEach(() => {
mockDeps.outputParser.parseParticipantSessionId = vi.fn().mockReturnValue({
groupChatId: 'test-chat-123',
participantName: 'TestAgent',
});
});
it('should parse and route participant response on exit', async () => {
setupListener();
const handler = eventHandlers.get('exit');
const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123';
handler?.(sessionId, 0);
await vi.waitFor(() => {
expect(mockDeps.groupChatRouter.routeAgentResponse).toHaveBeenCalledWith(
'test-chat-123',
'TestAgent',
'parsed response',
expect.anything()
);
});
});
it('should mark participant as responded after successful routing', async () => {
setupListener();
const handler = eventHandlers.get('exit');
const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123';
handler?.(sessionId, 0);
await vi.waitFor(() => {
expect(mockDeps.groupChatRouter.markParticipantResponded).toHaveBeenCalledWith(
'test-chat-123',
'TestAgent'
);
});
});
it('should clear output buffer after processing', async () => {
setupListener();
const handler = eventHandlers.get('exit');
const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123';
handler?.(sessionId, 0);
await vi.waitFor(() => {
expect(mockDeps.outputBuffer.clearGroupChatBuffer).toHaveBeenCalledWith(sessionId);
});
});
it('should not route when buffered output is empty', async () => {
mockDeps.outputBuffer.getGroupChatBufferedOutput = vi.fn().mockReturnValue('');
setupListener();
const handler = eventHandlers.get('exit');
const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123';
handler?.(sessionId, 0);
// Give async operations time to complete
await new Promise((resolve) => setTimeout(resolve, 50));
expect(mockDeps.groupChatRouter.routeAgentResponse).not.toHaveBeenCalled();
});
it('should not route when parsed text is empty', async () => {
mockDeps.outputParser.extractTextFromStreamJson = vi.fn().mockReturnValue(' ');
setupListener();
const handler = eventHandlers.get('exit');
const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123';
handler?.(sessionId, 0);
// Give async operations time to complete
await new Promise((resolve) => setTimeout(resolve, 50));
expect(mockDeps.groupChatRouter.routeAgentResponse).not.toHaveBeenCalled();
});
});
describe('Session Recovery', () => {
beforeEach(() => {
mockDeps.outputParser.parseParticipantSessionId = vi.fn().mockReturnValue({
groupChatId: 'test-chat-123',
participantName: 'TestAgent',
});
mockDeps.sessionRecovery.needsSessionRecovery = vi.fn().mockReturnValue(true);
});
it('should initiate session recovery when needed', async () => {
setupListener();
const handler = eventHandlers.get('exit');
const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123';
handler?.(sessionId, 0);
await vi.waitFor(() => {
expect(mockDeps.sessionRecovery.initiateSessionRecovery).toHaveBeenCalledWith(
'test-chat-123',
'TestAgent'
);
});
});
it('should respawn participant after recovery initiation', async () => {
setupListener();
const handler = eventHandlers.get('exit');
const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123';
handler?.(sessionId, 0);
await vi.waitFor(() => {
expect(mockDeps.groupChatRouter.respawnParticipantWithRecovery).toHaveBeenCalledWith(
'test-chat-123',
'TestAgent',
expect.anything(),
expect.anything()
);
});
});
it('should clear buffer before initiating recovery', async () => {
setupListener();
const handler = eventHandlers.get('exit');
const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123';
handler?.(sessionId, 0);
await vi.waitFor(() => {
expect(mockDeps.outputBuffer.clearGroupChatBuffer).toHaveBeenCalledWith(sessionId);
});
});
it('should not mark participant as responded when recovery succeeds', async () => {
setupListener();
const handler = eventHandlers.get('exit');
const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123';
handler?.(sessionId, 0);
// Wait for async operations
await new Promise((resolve) => setTimeout(resolve, 50));
// When recovery succeeds, markParticipantResponded should NOT be called
// because the recovery spawn will handle that
expect(mockDeps.groupChatRouter.markParticipantResponded).not.toHaveBeenCalled();
});
it('should mark participant as responded when recovery fails', async () => {
mockDeps.groupChatRouter.respawnParticipantWithRecovery = vi
.fn()
.mockRejectedValue(new Error('Recovery failed'));
setupListener();
const handler = eventHandlers.get('exit');
const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123';
handler?.(sessionId, 0);
await vi.waitFor(() => {
expect(mockDeps.groupChatRouter.markParticipantResponded).toHaveBeenCalledWith(
'test-chat-123',
'TestAgent'
);
});
});
});
describe('Moderator Exit', () => {
it('should route moderator response on exit', async () => {
setupListener();
const handler = eventHandlers.get('exit');
const sessionId = 'group-chat-test-chat-123-moderator-1234567890';
handler?.(sessionId, 0);
await vi.waitFor(() => {
expect(mockDeps.groupChatRouter.routeModeratorResponse).toHaveBeenCalledWith(
'test-chat-123',
'parsed response',
expect.anything(),
expect.anything(),
false
);
});
});
it('should clear moderator buffer after processing', async () => {
setupListener();
const handler = eventHandlers.get('exit');
const sessionId = 'group-chat-test-chat-123-moderator-1234567890';
handler?.(sessionId, 0);
await vi.waitFor(() => {
expect(mockDeps.outputBuffer.clearGroupChatBuffer).toHaveBeenCalledWith(sessionId);
});
});
it('should handle synthesis sessions correctly', async () => {
setupListener();
const handler = eventHandlers.get('exit');
const sessionId = 'group-chat-test-chat-123-moderator-synthesis-1234567890';
handler?.(sessionId, 0);
await vi.waitFor(() => {
expect(mockDeps.groupChatRouter.routeModeratorResponse).toHaveBeenCalled();
});
});
});
describe('Error Handling', () => {
beforeEach(() => {
mockDeps.outputParser.parseParticipantSessionId = vi.fn().mockReturnValue({
groupChatId: 'test-chat-123',
participantName: 'TestAgent',
});
});
it('should log error when routing fails', async () => {
mockDeps.groupChatRouter.routeAgentResponse = vi
.fn()
.mockRejectedValue(new Error('Route failed'));
setupListener();
const handler = eventHandlers.get('exit');
const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123';
handler?.(sessionId, 0);
await vi.waitFor(() => {
expect(mockDeps.logger.error).toHaveBeenCalled();
});
});
it('should attempt fallback parsing when primary parsing fails', async () => {
// First call throws, second call (fallback) succeeds
mockDeps.outputParser.extractTextFromStreamJson = vi
.fn()
.mockImplementationOnce(() => {
throw new Error('Parse error');
})
.mockReturnValueOnce('fallback parsed response');
setupListener();
const handler = eventHandlers.get('exit');
const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123';
handler?.(sessionId, 0);
await vi.waitFor(() => {
// Should have been called twice: once with agentType, once without (fallback)
expect(mockDeps.outputParser.extractTextFromStreamJson).toHaveBeenCalledTimes(2);
});
});
it('should still mark participant as responded after routing error', async () => {
mockDeps.groupChatRouter.routeAgentResponse = vi
.fn()
.mockRejectedValue(new Error('Route failed'));
mockDeps.outputParser.extractTextFromStreamJson = vi
.fn()
.mockReturnValueOnce('parsed response')
.mockReturnValueOnce('fallback response');
setupListener();
const handler = eventHandlers.get('exit');
const sessionId = 'group-chat-test-chat-123-participant-TestAgent-abc123';
handler?.(sessionId, 0);
await vi.waitFor(() => {
expect(mockDeps.groupChatRouter.markParticipantResponded).toHaveBeenCalledWith(
'test-chat-123',
'TestAgent'
);
});
});
});
});

View File

@@ -0,0 +1,106 @@
/**
* Tests for forwarding listeners.
* These listeners simply forward process events to the renderer via IPC.
*/
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { setupForwardingListeners } from '../../../main/process-listeners/forwarding-listeners';
import type { ProcessManager } from '../../../main/process-manager';
import type { SafeSendFn } from '../../../main/utils/safe-send';
describe('Forwarding Listeners', () => {
let mockProcessManager: ProcessManager;
let mockSafeSend: SafeSendFn;
let eventHandlers: Map<string, (...args: unknown[]) => void>;
beforeEach(() => {
vi.clearAllMocks();
eventHandlers = new Map();
mockSafeSend = vi.fn();
mockProcessManager = {
on: vi.fn((event: string, handler: (...args: unknown[]) => void) => {
eventHandlers.set(event, handler);
}),
} as unknown as ProcessManager;
});
it('should register all forwarding event listeners', () => {
setupForwardingListeners(mockProcessManager, { safeSend: mockSafeSend });
expect(mockProcessManager.on).toHaveBeenCalledWith('slash-commands', expect.any(Function));
expect(mockProcessManager.on).toHaveBeenCalledWith('thinking-chunk', expect.any(Function));
expect(mockProcessManager.on).toHaveBeenCalledWith('tool-execution', expect.any(Function));
expect(mockProcessManager.on).toHaveBeenCalledWith('stderr', expect.any(Function));
expect(mockProcessManager.on).toHaveBeenCalledWith('command-exit', expect.any(Function));
});
it('should forward slash-commands events to renderer', () => {
setupForwardingListeners(mockProcessManager, { safeSend: mockSafeSend });
const handler = eventHandlers.get('slash-commands');
const testSessionId = 'test-session-123';
const testCommands = ['/help', '/clear'];
handler?.(testSessionId, testCommands);
expect(mockSafeSend).toHaveBeenCalledWith(
'process:slash-commands',
testSessionId,
testCommands
);
});
it('should forward thinking-chunk events to renderer', () => {
setupForwardingListeners(mockProcessManager, { safeSend: mockSafeSend });
const handler = eventHandlers.get('thinking-chunk');
const testSessionId = 'test-session-123';
const testChunk = { content: 'thinking...' };
handler?.(testSessionId, testChunk);
expect(mockSafeSend).toHaveBeenCalledWith('process:thinking-chunk', testSessionId, testChunk);
});
it('should forward tool-execution events to renderer', () => {
setupForwardingListeners(mockProcessManager, { safeSend: mockSafeSend });
const handler = eventHandlers.get('tool-execution');
const testSessionId = 'test-session-123';
const testToolExecution = { tool: 'read_file', status: 'completed' };
handler?.(testSessionId, testToolExecution);
expect(mockSafeSend).toHaveBeenCalledWith(
'process:tool-execution',
testSessionId,
testToolExecution
);
});
it('should forward stderr events to renderer', () => {
setupForwardingListeners(mockProcessManager, { safeSend: mockSafeSend });
const handler = eventHandlers.get('stderr');
const testSessionId = 'test-session-123';
const testStderr = 'Error: something went wrong';
handler?.(testSessionId, testStderr);
expect(mockSafeSend).toHaveBeenCalledWith('process:stderr', testSessionId, testStderr);
});
it('should forward command-exit events to renderer', () => {
setupForwardingListeners(mockProcessManager, { safeSend: mockSafeSend });
const handler = eventHandlers.get('command-exit');
const testSessionId = 'test-session-123';
const testExitCode = 0;
handler?.(testSessionId, testExitCode);
expect(mockSafeSend).toHaveBeenCalledWith('process:command-exit', testSessionId, testExitCode);
});
});

View File

@@ -0,0 +1,402 @@
/**
* Tests for session ID listener.
* Handles agent session ID storage for conversation resume.
*/
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { setupSessionIdListener } from '../../../main/process-listeners/session-id-listener';
import type { ProcessManager } from '../../../main/process-manager';
describe('Session ID Listener', () => {
let mockProcessManager: ProcessManager;
let mockDeps: Parameters<typeof setupSessionIdListener>[1];
let eventHandlers: Map<string, (...args: unknown[]) => void>;
// Create a minimal mock group chat
const createMockGroupChat = () => ({
id: 'test-chat-123',
name: 'Test Chat',
moderatorAgentId: 'claude-code',
moderatorSessionId: 'group-chat-test-chat-123-moderator',
participants: [
{
name: 'TestAgent',
agentId: 'claude-code',
sessionId: 'group-chat-test-chat-123-participant-TestAgent-abc123',
addedAt: Date.now(),
},
],
createdAt: Date.now(),
updatedAt: Date.now(),
logPath: '/tmp/test-chat.log',
imagesDir: '/tmp/test-chat-images',
});
beforeEach(() => {
vi.clearAllMocks();
eventHandlers = new Map();
mockProcessManager = {
on: vi.fn((event: string, handler: (...args: unknown[]) => void) => {
eventHandlers.set(event, handler);
}),
} as unknown as ProcessManager;
mockDeps = {
safeSend: vi.fn(),
logger: {
info: vi.fn(),
error: vi.fn(),
warn: vi.fn(),
debug: vi.fn(),
},
groupChatEmitters: {
emitParticipantsChanged: vi.fn(),
emitModeratorSessionIdChanged: vi.fn(),
},
groupChatStorage: {
loadGroupChat: vi.fn().mockResolvedValue(createMockGroupChat()),
updateGroupChat: vi.fn().mockResolvedValue(createMockGroupChat()),
updateParticipant: vi.fn().mockResolvedValue(createMockGroupChat()),
},
outputParser: {
extractTextFromStreamJson: vi.fn().mockReturnValue('parsed response'),
parseParticipantSessionId: vi.fn().mockReturnValue(null),
},
patterns: {
REGEX_MODERATOR_SESSION: /^group-chat-(.+)-moderator-/,
REGEX_MODERATOR_SESSION_TIMESTAMP: /^group-chat-(.+)-moderator-\d+$/,
REGEX_AI_SUFFIX: /-ai-[^-]+$/,
REGEX_AI_TAB_ID: /-ai-([^-]+)$/,
REGEX_BATCH_SESSION: /-batch-\d+$/,
REGEX_SYNOPSIS_SESSION: /-synopsis-\d+$/,
},
};
});
const setupListener = () => {
setupSessionIdListener(mockProcessManager, mockDeps);
};
describe('Event Registration', () => {
it('should register the session-id event listener', () => {
setupListener();
expect(mockProcessManager.on).toHaveBeenCalledWith('session-id', expect.any(Function));
});
});
describe('Regular Process Session ID', () => {
it('should forward session ID to renderer', () => {
setupListener();
const handler = eventHandlers.get('session-id');
handler?.('regular-session-123', 'agent-session-abc');
expect(mockDeps.safeSend).toHaveBeenCalledWith(
'process:session-id',
'regular-session-123',
'agent-session-abc'
);
});
});
describe('Participant Session ID Storage', () => {
beforeEach(() => {
mockDeps.outputParser.parseParticipantSessionId = vi.fn().mockReturnValue({
groupChatId: 'test-chat-123',
participantName: 'TestAgent',
});
});
it('should store agent session ID for participant', async () => {
setupListener();
const handler = eventHandlers.get('session-id');
handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', 'agent-session-xyz');
await vi.waitFor(() => {
expect(mockDeps.groupChatStorage.updateParticipant).toHaveBeenCalledWith(
'test-chat-123',
'TestAgent',
{ agentSessionId: 'agent-session-xyz' }
);
});
});
it('should emit participants changed after storage', async () => {
setupListener();
const handler = eventHandlers.get('session-id');
handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', 'agent-session-xyz');
await vi.waitFor(() => {
expect(mockDeps.groupChatEmitters.emitParticipantsChanged).toHaveBeenCalledWith(
'test-chat-123',
expect.any(Array)
);
});
});
it('should use updateParticipant return value instead of loading chat again (DB caching)', async () => {
setupListener();
const handler = eventHandlers.get('session-id');
handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', 'agent-session-xyz');
await vi.waitFor(() => {
expect(mockDeps.groupChatEmitters.emitParticipantsChanged).toHaveBeenCalled();
});
// Verify we didn't make a redundant loadGroupChat call
// The code should use the return value from updateParticipant directly
expect(mockDeps.groupChatStorage.loadGroupChat).not.toHaveBeenCalled();
});
it('should pass exact participants from updateParticipant return value', async () => {
const specificParticipants = [
{ name: 'Agent1', agentId: 'claude-code', sessionId: 'session-1', addedAt: 1000 },
{ name: 'Agent2', agentId: 'codex', sessionId: 'session-2', addedAt: 2000 },
];
mockDeps.groupChatStorage.updateParticipant = vi.fn().mockResolvedValue({
...createMockGroupChat(),
participants: specificParticipants,
});
setupListener();
const handler = eventHandlers.get('session-id');
handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', 'agent-session-xyz');
await vi.waitFor(() => {
expect(mockDeps.groupChatEmitters.emitParticipantsChanged).toHaveBeenCalledWith(
'test-chat-123',
specificParticipants
);
});
});
it('should handle empty participants array from updateParticipant', async () => {
mockDeps.groupChatStorage.updateParticipant = vi.fn().mockResolvedValue({
...createMockGroupChat(),
participants: [],
});
setupListener();
const handler = eventHandlers.get('session-id');
handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', 'agent-session-xyz');
await vi.waitFor(() => {
expect(mockDeps.groupChatEmitters.emitParticipantsChanged).toHaveBeenCalledWith(
'test-chat-123',
[]
);
});
});
it('should handle undefined emitParticipantsChanged gracefully (optional chaining)', async () => {
mockDeps.groupChatEmitters.emitParticipantsChanged = undefined;
setupListener();
const handler = eventHandlers.get('session-id');
// Should not throw
handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', 'agent-session-xyz');
await vi.waitFor(() => {
expect(mockDeps.groupChatStorage.updateParticipant).toHaveBeenCalled();
});
// No error should be logged for the optional emitter
expect(mockDeps.logger.error).not.toHaveBeenCalled();
});
it('should log error when storage fails', async () => {
mockDeps.groupChatStorage.updateParticipant = vi
.fn()
.mockRejectedValue(new Error('DB error'));
setupListener();
const handler = eventHandlers.get('session-id');
handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', 'agent-session-xyz');
await vi.waitFor(() => {
expect(mockDeps.logger.error).toHaveBeenCalledWith(
'[GroupChat] Failed to update participant agentSessionId',
'ProcessListener',
expect.objectContaining({
error: 'Error: DB error',
participant: 'TestAgent',
})
);
});
});
it('should still forward to renderer after storage', () => {
setupListener();
const handler = eventHandlers.get('session-id');
handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', 'agent-session-xyz');
expect(mockDeps.safeSend).toHaveBeenCalledWith(
'process:session-id',
'group-chat-test-chat-123-participant-TestAgent-abc123',
'agent-session-xyz'
);
});
});
describe('Moderator Session ID Storage', () => {
it('should store agent session ID for moderator', async () => {
setupListener();
const handler = eventHandlers.get('session-id');
handler?.('group-chat-test-chat-123-moderator-1234567890', 'moderator-session-xyz');
await vi.waitFor(() => {
expect(mockDeps.groupChatStorage.updateGroupChat).toHaveBeenCalledWith('test-chat-123', {
moderatorAgentSessionId: 'moderator-session-xyz',
});
});
});
it('should emit moderator session ID changed after storage', async () => {
setupListener();
const handler = eventHandlers.get('session-id');
handler?.('group-chat-test-chat-123-moderator-1234567890', 'moderator-session-xyz');
await vi.waitFor(() => {
expect(mockDeps.groupChatEmitters.emitModeratorSessionIdChanged).toHaveBeenCalledWith(
'test-chat-123',
'moderator-session-xyz'
);
});
});
it('should log error when moderator storage fails', async () => {
mockDeps.groupChatStorage.updateGroupChat = vi.fn().mockRejectedValue(new Error('DB error'));
setupListener();
const handler = eventHandlers.get('session-id');
handler?.('group-chat-test-chat-123-moderator-1234567890', 'moderator-session-xyz');
await vi.waitFor(() => {
expect(mockDeps.logger.error).toHaveBeenCalledWith(
'[GroupChat] Failed to update moderator agent session ID',
'ProcessListener',
expect.objectContaining({
error: 'Error: DB error',
groupChatId: 'test-chat-123',
})
);
});
});
it('should still forward to renderer for moderator sessions', () => {
setupListener();
const handler = eventHandlers.get('session-id');
handler?.('group-chat-test-chat-123-moderator-1234567890', 'moderator-session-xyz');
expect(mockDeps.safeSend).toHaveBeenCalledWith(
'process:session-id',
'group-chat-test-chat-123-moderator-1234567890',
'moderator-session-xyz'
);
});
it('should NOT store for synthesis moderator sessions (different pattern)', () => {
setupListener();
const handler = eventHandlers.get('session-id');
// Synthesis session ID doesn't match REGEX_MODERATOR_SESSION_TIMESTAMP
// because it has 'synthesis' in it: group-chat-xxx-moderator-synthesis-timestamp
handler?.('group-chat-test-chat-123-moderator-synthesis-1234567890', 'synthesis-session-xyz');
// Should NOT call updateGroupChat for synthesis sessions (doesn't match timestamp pattern)
expect(mockDeps.groupChatStorage.updateGroupChat).not.toHaveBeenCalled();
});
});
describe('Session ID Format Handling', () => {
it('should handle empty agent session ID', () => {
setupListener();
const handler = eventHandlers.get('session-id');
handler?.('regular-session-123', '');
expect(mockDeps.safeSend).toHaveBeenCalledWith(
'process:session-id',
'regular-session-123',
''
);
});
it('should handle UUID format session IDs', () => {
setupListener();
const handler = eventHandlers.get('session-id');
handler?.('regular-session-123', 'a1b2c3d4-e5f6-7890-abcd-ef1234567890');
expect(mockDeps.safeSend).toHaveBeenCalledWith(
'process:session-id',
'regular-session-123',
'a1b2c3d4-e5f6-7890-abcd-ef1234567890'
);
});
it('should handle long session IDs', () => {
setupListener();
const handler = eventHandlers.get('session-id');
const longSessionId = 'a'.repeat(500);
handler?.('regular-session-123', longSessionId);
expect(mockDeps.safeSend).toHaveBeenCalledWith(
'process:session-id',
'regular-session-123',
longSessionId
);
});
});
describe('Performance Optimization', () => {
it('should skip participant parsing for non-group-chat sessions (prefix check)', () => {
setupListener();
const handler = eventHandlers.get('session-id');
// Regular session ID doesn't start with 'group-chat-'
handler?.('regular-session-123', 'agent-session-abc');
// parseParticipantSessionId should NOT be called for non-group-chat sessions
expect(mockDeps.outputParser.parseParticipantSessionId).not.toHaveBeenCalled();
});
it('should only parse participant session ID for group-chat sessions', () => {
mockDeps.outputParser.parseParticipantSessionId = vi.fn().mockReturnValue(null);
setupListener();
const handler = eventHandlers.get('session-id');
// Group chat session ID starts with 'group-chat-'
handler?.('group-chat-test-123-participant-Agent-abc', 'agent-session-xyz');
// parseParticipantSessionId SHOULD be called for group-chat sessions
expect(mockDeps.outputParser.parseParticipantSessionId).toHaveBeenCalledWith(
'group-chat-test-123-participant-Agent-abc'
);
});
it('should skip moderator regex for non-group-chat sessions', () => {
setupListener();
const handler = eventHandlers.get('session-id');
// Process many non-group-chat sessions - should be fast since regex is skipped
for (let i = 0; i < 100; i++) {
handler?.(`regular-session-${i}`, `agent-session-${i}`);
}
// Neither storage method should be called for regular sessions
expect(mockDeps.groupChatStorage.updateParticipant).not.toHaveBeenCalled();
expect(mockDeps.groupChatStorage.updateGroupChat).not.toHaveBeenCalled();
// But all should still forward to renderer
expect(mockDeps.safeSend).toHaveBeenCalledTimes(100);
});
});
});

View File

@@ -0,0 +1,239 @@
/**
* Tests for stats listener.
* Handles query-complete events for usage statistics tracking.
*/
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { setupStatsListener } from '../../../main/process-listeners/stats-listener';
import type { ProcessManager } from '../../../main/process-manager';
import type { SafeSendFn } from '../../../main/utils/safe-send';
import type { QueryCompleteData } from '../../../main/process-manager/types';
import type { StatsDB } from '../../../main/stats';
import type { ProcessListenerDependencies } from '../../../main/process-listeners/types';
describe('Stats Listener', () => {
let mockProcessManager: ProcessManager;
let mockSafeSend: SafeSendFn;
let mockStatsDB: StatsDB;
let mockLogger: ProcessListenerDependencies['logger'];
let eventHandlers: Map<string, (...args: unknown[]) => void>;
beforeEach(() => {
vi.clearAllMocks();
eventHandlers = new Map();
mockSafeSend = vi.fn();
mockLogger = {
info: vi.fn(),
error: vi.fn(),
warn: vi.fn(),
debug: vi.fn(),
};
mockStatsDB = {
isReady: vi.fn(() => true),
insertQueryEvent: vi.fn(() => 'event-id-123'),
} as unknown as StatsDB;
mockProcessManager = {
on: vi.fn((event: string, handler: (...args: unknown[]) => void) => {
eventHandlers.set(event, handler);
}),
} as unknown as ProcessManager;
});
it('should register the query-complete event listener', () => {
setupStatsListener(mockProcessManager, {
safeSend: mockSafeSend,
getStatsDB: () => mockStatsDB,
logger: mockLogger,
});
expect(mockProcessManager.on).toHaveBeenCalledWith('query-complete', expect.any(Function));
});
it('should record query event to stats database when ready', async () => {
setupStatsListener(mockProcessManager, {
safeSend: mockSafeSend,
getStatsDB: () => mockStatsDB,
logger: mockLogger,
});
const handler = eventHandlers.get('query-complete');
const testSessionId = 'test-session-123';
const testQueryData: QueryCompleteData = {
sessionId: testSessionId,
agentType: 'claude-code',
source: 'user',
startTime: Date.now() - 5000,
duration: 5000,
projectPath: '/test/project',
tabId: 'tab-123',
};
handler?.(testSessionId, testQueryData);
// Wait for async processing
await vi.waitFor(() => {
expect(mockStatsDB.isReady).toHaveBeenCalled();
expect(mockStatsDB.insertQueryEvent).toHaveBeenCalledWith({
sessionId: testQueryData.sessionId,
agentType: testQueryData.agentType,
source: testQueryData.source,
startTime: testQueryData.startTime,
duration: testQueryData.duration,
projectPath: testQueryData.projectPath,
tabId: testQueryData.tabId,
});
expect(mockSafeSend).toHaveBeenCalledWith('stats:updated');
});
});
it('should not record event when stats database is not ready', () => {
vi.mocked(mockStatsDB.isReady).mockReturnValue(false);
setupStatsListener(mockProcessManager, {
safeSend: mockSafeSend,
getStatsDB: () => mockStatsDB,
logger: mockLogger,
});
const handler = eventHandlers.get('query-complete');
const testQueryData: QueryCompleteData = {
sessionId: 'session-456',
agentType: 'codex',
source: 'auto',
startTime: Date.now(),
duration: 1000,
projectPath: '/test/project',
tabId: 'tab-456',
};
handler?.('session-456', testQueryData);
expect(mockStatsDB.isReady).toHaveBeenCalled();
expect(mockStatsDB.insertQueryEvent).not.toHaveBeenCalled();
expect(mockSafeSend).not.toHaveBeenCalled();
});
it('should log error when recording fails after retries', async () => {
vi.mocked(mockStatsDB.insertQueryEvent).mockImplementation(() => {
throw new Error('Database error');
});
setupStatsListener(mockProcessManager, {
safeSend: mockSafeSend,
getStatsDB: () => mockStatsDB,
logger: mockLogger,
});
const handler = eventHandlers.get('query-complete');
const testQueryData: QueryCompleteData = {
sessionId: 'session-789',
agentType: 'opencode',
source: 'user',
startTime: Date.now(),
duration: 2000,
projectPath: '/test/project',
tabId: 'tab-789',
};
handler?.('session-789', testQueryData);
// Wait for all retries to complete (100ms + 200ms + final attempt)
await vi.waitFor(
() => {
expect(mockLogger.error).toHaveBeenCalledWith(
expect.stringContaining('Failed to record query event after 3 attempts'),
'[Stats]',
expect.objectContaining({
sessionId: 'session-789',
})
);
},
{ timeout: 1000 }
);
// Should have tried 3 times
expect(mockStatsDB.insertQueryEvent).toHaveBeenCalledTimes(3);
// Should not have broadcasted update on failure
expect(mockSafeSend).not.toHaveBeenCalled();
});
it('should log debug info when recording succeeds', async () => {
setupStatsListener(mockProcessManager, {
safeSend: mockSafeSend,
getStatsDB: () => mockStatsDB,
logger: mockLogger,
});
const handler = eventHandlers.get('query-complete');
const testQueryData: QueryCompleteData = {
sessionId: 'session-abc',
agentType: 'claude-code',
source: 'user',
startTime: Date.now(),
duration: 3000,
projectPath: '/test/project',
tabId: 'tab-abc',
};
handler?.('session-abc', testQueryData);
// Wait for async processing
await vi.waitFor(() => {
expect(mockLogger.debug).toHaveBeenCalledWith(
expect.stringContaining('Recorded query event'),
'[Stats]',
expect.objectContaining({
sessionId: 'session-abc',
agentType: 'claude-code',
source: 'user',
duration: 3000,
})
);
});
});
it('should retry on transient failure and succeed', async () => {
// First call fails, second succeeds
vi.mocked(mockStatsDB.insertQueryEvent)
.mockImplementationOnce(() => {
throw new Error('Transient error');
})
.mockImplementationOnce(() => 'event-id-456');
setupStatsListener(mockProcessManager, {
safeSend: mockSafeSend,
getStatsDB: () => mockStatsDB,
logger: mockLogger,
});
const handler = eventHandlers.get('query-complete');
const testQueryData: QueryCompleteData = {
sessionId: 'session-retry',
agentType: 'claude-code',
source: 'user',
startTime: Date.now(),
duration: 1000,
projectPath: '/test/project',
tabId: 'tab-retry',
};
handler?.('session-retry', testQueryData);
// Wait for retry to complete
await vi.waitFor(
() => {
expect(mockStatsDB.insertQueryEvent).toHaveBeenCalledTimes(2);
expect(mockSafeSend).toHaveBeenCalledWith('stats:updated');
},
{ timeout: 500 }
);
// Should have logged warning for first failure
expect(mockLogger.warn).toHaveBeenCalledWith(
expect.stringContaining('Stats DB insert failed'),
'[Stats]',
expect.any(Object)
);
});
});

View File

@@ -0,0 +1,433 @@
/**
* Tests for usage listener.
* Handles token/cost statistics from AI responses.
*/
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { setupUsageListener } from '../../../main/process-listeners/usage-listener';
import type { ProcessManager } from '../../../main/process-manager';
import type { UsageStats } from '../../../main/process-listeners/types';
describe('Usage Listener', () => {
let mockProcessManager: ProcessManager;
let mockDeps: Parameters<typeof setupUsageListener>[1];
let eventHandlers: Map<string, (...args: unknown[]) => void>;
const createMockUsageStats = (overrides: Partial<UsageStats> = {}): UsageStats => ({
inputTokens: 1000,
outputTokens: 500,
cacheReadInputTokens: 200,
cacheCreationInputTokens: 100,
totalCostUsd: 0.05,
contextWindow: 100000,
...overrides,
});
// Create a minimal mock group chat
const createMockGroupChat = () => ({
id: 'test-chat-123',
name: 'Test Chat',
moderatorAgentId: 'claude-code',
moderatorSessionId: 'group-chat-test-chat-123-moderator',
participants: [
{
name: 'TestAgent',
agentId: 'claude-code',
sessionId: 'group-chat-test-chat-123-participant-TestAgent-abc123',
addedAt: Date.now(),
},
],
createdAt: Date.now(),
updatedAt: Date.now(),
logPath: '/tmp/test-chat.log',
imagesDir: '/tmp/test-chat-images',
});
beforeEach(() => {
vi.clearAllMocks();
eventHandlers = new Map();
mockProcessManager = {
on: vi.fn((event: string, handler: (...args: unknown[]) => void) => {
eventHandlers.set(event, handler);
}),
} as unknown as ProcessManager;
mockDeps = {
safeSend: vi.fn(),
logger: {
info: vi.fn(),
error: vi.fn(),
warn: vi.fn(),
debug: vi.fn(),
},
groupChatEmitters: {
emitParticipantsChanged: vi.fn(),
emitModeratorUsage: vi.fn(),
},
groupChatStorage: {
loadGroupChat: vi.fn().mockResolvedValue(createMockGroupChat()),
updateGroupChat: vi.fn().mockResolvedValue(createMockGroupChat()),
updateParticipant: vi.fn().mockResolvedValue(createMockGroupChat()),
},
outputParser: {
extractTextFromStreamJson: vi.fn().mockReturnValue('parsed response'),
parseParticipantSessionId: vi.fn().mockReturnValue(null),
},
usageAggregator: {
calculateContextTokens: vi.fn().mockReturnValue(1800),
},
patterns: {
REGEX_MODERATOR_SESSION: /^group-chat-(.+)-moderator-/,
REGEX_MODERATOR_SESSION_TIMESTAMP: /^group-chat-(.+)-moderator-\d+$/,
REGEX_AI_SUFFIX: /-ai-[^-]+$/,
REGEX_AI_TAB_ID: /-ai-([^-]+)$/,
REGEX_BATCH_SESSION: /-batch-\d+$/,
REGEX_SYNOPSIS_SESSION: /-synopsis-\d+$/,
},
};
});
const setupListener = () => {
setupUsageListener(mockProcessManager, mockDeps);
};
describe('Event Registration', () => {
it('should register the usage event listener', () => {
setupListener();
expect(mockProcessManager.on).toHaveBeenCalledWith('usage', expect.any(Function));
});
});
describe('Regular Process Usage', () => {
it('should forward usage stats to renderer', () => {
setupListener();
const handler = eventHandlers.get('usage');
const usageStats = createMockUsageStats();
handler?.('regular-session-123', usageStats);
expect(mockDeps.safeSend).toHaveBeenCalledWith(
'process:usage',
'regular-session-123',
usageStats
);
});
});
describe('Participant Usage', () => {
beforeEach(() => {
mockDeps.outputParser.parseParticipantSessionId = vi.fn().mockReturnValue({
groupChatId: 'test-chat-123',
participantName: 'TestAgent',
});
});
it('should update participant with usage stats', async () => {
setupListener();
const handler = eventHandlers.get('usage');
const usageStats = createMockUsageStats();
handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', usageStats);
await vi.waitFor(() => {
expect(mockDeps.groupChatStorage.updateParticipant).toHaveBeenCalledWith(
'test-chat-123',
'TestAgent',
expect.objectContaining({
contextUsage: expect.any(Number),
tokenCount: 1800,
totalCost: 0.05,
})
);
});
});
it('should calculate context usage percentage correctly', async () => {
mockDeps.usageAggregator.calculateContextTokens = vi.fn().mockReturnValue(50000); // 50% of 100000
setupListener();
const handler = eventHandlers.get('usage');
const usageStats = createMockUsageStats({ contextWindow: 100000 });
handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', usageStats);
await vi.waitFor(() => {
expect(mockDeps.groupChatStorage.updateParticipant).toHaveBeenCalledWith(
'test-chat-123',
'TestAgent',
expect.objectContaining({
contextUsage: 50,
})
);
});
});
it('should handle zero context window gracefully (falls back to 200k default)', async () => {
setupListener();
const handler = eventHandlers.get('usage');
const usageStats = createMockUsageStats({ contextWindow: 0 });
handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', usageStats);
// With contextWindow 0, falls back to 200k default
// 1800 / 200000 = 0.9% -> rounds to 1%
await vi.waitFor(() => {
expect(mockDeps.groupChatStorage.updateParticipant).toHaveBeenCalledWith(
'test-chat-123',
'TestAgent',
expect.objectContaining({
contextUsage: 1,
})
);
});
});
it('should emit participants changed after update', async () => {
setupListener();
const handler = eventHandlers.get('usage');
const usageStats = createMockUsageStats();
handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', usageStats);
await vi.waitFor(() => {
expect(mockDeps.groupChatEmitters.emitParticipantsChanged).toHaveBeenCalledWith(
'test-chat-123',
expect.any(Array)
);
});
});
it('should use updateParticipant return value instead of loading chat again (DB caching)', async () => {
setupListener();
const handler = eventHandlers.get('usage');
const usageStats = createMockUsageStats();
handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', usageStats);
await vi.waitFor(() => {
expect(mockDeps.groupChatEmitters.emitParticipantsChanged).toHaveBeenCalled();
});
// Verify we didn't make a redundant loadGroupChat call
// The code should use the return value from updateParticipant directly
expect(mockDeps.groupChatStorage.loadGroupChat).not.toHaveBeenCalled();
});
it('should pass exact participants from updateParticipant return value', async () => {
const specificParticipants = [
{ name: 'Agent1', agentId: 'claude-code', sessionId: 'session-1', addedAt: 1000 },
{ name: 'Agent2', agentId: 'codex', sessionId: 'session-2', addedAt: 2000 },
];
mockDeps.groupChatStorage.updateParticipant = vi.fn().mockResolvedValue({
...createMockGroupChat(),
participants: specificParticipants,
});
setupListener();
const handler = eventHandlers.get('usage');
const usageStats = createMockUsageStats();
handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', usageStats);
await vi.waitFor(() => {
expect(mockDeps.groupChatEmitters.emitParticipantsChanged).toHaveBeenCalledWith(
'test-chat-123',
specificParticipants
);
});
});
it('should handle empty participants array from updateParticipant', async () => {
mockDeps.groupChatStorage.updateParticipant = vi.fn().mockResolvedValue({
...createMockGroupChat(),
participants: [],
});
setupListener();
const handler = eventHandlers.get('usage');
const usageStats = createMockUsageStats();
handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', usageStats);
await vi.waitFor(() => {
expect(mockDeps.groupChatEmitters.emitParticipantsChanged).toHaveBeenCalledWith(
'test-chat-123',
[]
);
});
});
it('should handle undefined emitParticipantsChanged gracefully (optional chaining)', async () => {
mockDeps.groupChatEmitters.emitParticipantsChanged = undefined;
setupListener();
const handler = eventHandlers.get('usage');
const usageStats = createMockUsageStats();
// Should not throw
handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', usageStats);
await vi.waitFor(() => {
expect(mockDeps.groupChatStorage.updateParticipant).toHaveBeenCalled();
});
// No error should be logged for the optional emitter
expect(mockDeps.logger.error).not.toHaveBeenCalled();
});
it('should log error when participant update fails', async () => {
mockDeps.groupChatStorage.updateParticipant = vi
.fn()
.mockRejectedValue(new Error('DB error'));
setupListener();
const handler = eventHandlers.get('usage');
const usageStats = createMockUsageStats();
handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', usageStats);
await vi.waitFor(() => {
expect(mockDeps.logger.error).toHaveBeenCalledWith(
'[GroupChat] Failed to update participant usage',
'ProcessListener',
expect.objectContaining({
error: 'Error: DB error',
participant: 'TestAgent',
})
);
});
});
it('should still forward to renderer for participant usage', () => {
setupListener();
const handler = eventHandlers.get('usage');
const usageStats = createMockUsageStats();
handler?.('group-chat-test-chat-123-participant-TestAgent-abc123', usageStats);
expect(mockDeps.safeSend).toHaveBeenCalledWith(
'process:usage',
'group-chat-test-chat-123-participant-TestAgent-abc123',
usageStats
);
});
});
describe('Moderator Usage', () => {
it('should emit moderator usage for moderator sessions', () => {
setupListener();
const handler = eventHandlers.get('usage');
const usageStats = createMockUsageStats();
handler?.('group-chat-test-chat-123-moderator-1234567890', usageStats);
expect(mockDeps.groupChatEmitters.emitModeratorUsage).toHaveBeenCalledWith(
'test-chat-123',
expect.objectContaining({
contextUsage: expect.any(Number),
totalCost: 0.05,
tokenCount: 1800,
})
);
});
it('should calculate moderator context usage correctly', () => {
mockDeps.usageAggregator.calculateContextTokens = vi.fn().mockReturnValue(25000); // 25% of 100000
setupListener();
const handler = eventHandlers.get('usage');
const usageStats = createMockUsageStats({ contextWindow: 100000 });
handler?.('group-chat-test-chat-123-moderator-1234567890', usageStats);
expect(mockDeps.groupChatEmitters.emitModeratorUsage).toHaveBeenCalledWith(
'test-chat-123',
expect.objectContaining({
contextUsage: 25,
})
);
});
it('should still forward to renderer for moderator usage', () => {
setupListener();
const handler = eventHandlers.get('usage');
const usageStats = createMockUsageStats();
handler?.('group-chat-test-chat-123-moderator-1234567890', usageStats);
expect(mockDeps.safeSend).toHaveBeenCalledWith(
'process:usage',
'group-chat-test-chat-123-moderator-1234567890',
usageStats
);
});
it('should handle synthesis moderator sessions', () => {
setupListener();
const handler = eventHandlers.get('usage');
const usageStats = createMockUsageStats();
handler?.('group-chat-test-chat-123-moderator-synthesis-1234567890', usageStats);
expect(mockDeps.groupChatEmitters.emitModeratorUsage).toHaveBeenCalledWith(
'test-chat-123',
expect.any(Object)
);
});
});
describe('Usage with Reasoning Tokens', () => {
it('should handle usage stats with reasoning tokens', () => {
setupListener();
const handler = eventHandlers.get('usage');
const usageStats = createMockUsageStats({ reasoningTokens: 1000 });
handler?.('regular-session-123', usageStats);
expect(mockDeps.safeSend).toHaveBeenCalledWith(
'process:usage',
'regular-session-123',
expect.objectContaining({ reasoningTokens: 1000 })
);
});
});
describe('Performance Optimization', () => {
it('should skip participant parsing for non-group-chat sessions (prefix check)', () => {
setupListener();
const handler = eventHandlers.get('usage');
const usageStats = createMockUsageStats();
// Regular session ID doesn't start with 'group-chat-'
handler?.('regular-session-123', usageStats);
// parseParticipantSessionId should NOT be called for non-group-chat sessions
expect(mockDeps.outputParser.parseParticipantSessionId).not.toHaveBeenCalled();
});
it('should only parse participant session ID for group-chat sessions', () => {
mockDeps.outputParser.parseParticipantSessionId = vi.fn().mockReturnValue(null);
setupListener();
const handler = eventHandlers.get('usage');
const usageStats = createMockUsageStats();
// Group chat session ID starts with 'group-chat-'
handler?.('group-chat-test-123-participant-Agent-abc', usageStats);
// parseParticipantSessionId SHOULD be called for group-chat sessions
expect(mockDeps.outputParser.parseParticipantSessionId).toHaveBeenCalledWith(
'group-chat-test-123-participant-Agent-abc'
);
});
it('should skip moderator regex for non-group-chat sessions', () => {
setupListener();
const handler = eventHandlers.get('usage');
const usageStats = createMockUsageStats();
// Process many non-group-chat sessions - should be fast since regex is skipped
for (let i = 0; i < 100; i++) {
handler?.(`regular-session-${i}`, usageStats);
}
// Moderator usage should NOT be emitted for any regular sessions
expect(mockDeps.groupChatEmitters.emitModeratorUsage).not.toHaveBeenCalled();
// But all should still forward to renderer
expect(mockDeps.safeSend).toHaveBeenCalledTimes(100);
});
});
});

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,601 @@
/**
* Tests for VACUUM scheduling, clearOldData, and database maintenance.
*
* Note: better-sqlite3 is a native module compiled for Electron's Node version.
* Direct testing with the native module in vitest is not possible without
* electron-rebuild for the vitest runtime. These tests use mocked database
* operations to verify the logic without requiring the actual native module.
*
* For full integration testing of the SQLite database, use the Electron test
* environment (e2e tests) where the native module is properly loaded.
*/
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import * as path from 'path';
import * as os from 'os';
// Track Database constructor calls to verify file path
let lastDbPath: string | null = null;
// Store mock references so they can be accessed in tests
const mockStatement = {
run: vi.fn(() => ({ changes: 1 })),
get: vi.fn(() => ({ count: 0, total_duration: 0 })),
all: vi.fn(() => []),
};
const mockDb = {
pragma: vi.fn(() => [{ user_version: 0 }]),
prepare: vi.fn(() => mockStatement),
close: vi.fn(),
// Transaction mock that immediately executes the function
transaction: vi.fn((fn: () => void) => {
return () => fn();
}),
};
// Mock better-sqlite3 as a class
vi.mock('better-sqlite3', () => {
return {
default: class MockDatabase {
constructor(dbPath: string) {
lastDbPath = dbPath;
}
pragma = mockDb.pragma;
prepare = mockDb.prepare;
close = mockDb.close;
transaction = mockDb.transaction;
},
};
});
// Mock electron's app module with trackable userData path
const mockUserDataPath = path.join(os.tmpdir(), 'maestro-test-stats-db');
vi.mock('electron', () => ({
app: {
getPath: vi.fn((name: string) => {
if (name === 'userData') return mockUserDataPath;
return os.tmpdir();
}),
},
}));
// Track fs calls
const mockFsExistsSync = vi.fn(() => true);
const mockFsMkdirSync = vi.fn();
const mockFsCopyFileSync = vi.fn();
const mockFsUnlinkSync = vi.fn();
const mockFsRenameSync = vi.fn();
const mockFsStatSync = vi.fn(() => ({ size: 1024 }));
const mockFsReadFileSync = vi.fn(() => '0'); // Default: old timestamp (triggers vacuum check)
const mockFsWriteFileSync = vi.fn();
// Mock fs
vi.mock('fs', () => ({
existsSync: (...args: unknown[]) => mockFsExistsSync(...args),
mkdirSync: (...args: unknown[]) => mockFsMkdirSync(...args),
copyFileSync: (...args: unknown[]) => mockFsCopyFileSync(...args),
unlinkSync: (...args: unknown[]) => mockFsUnlinkSync(...args),
renameSync: (...args: unknown[]) => mockFsRenameSync(...args),
statSync: (...args: unknown[]) => mockFsStatSync(...args),
readFileSync: (...args: unknown[]) => mockFsReadFileSync(...args),
writeFileSync: (...args: unknown[]) => mockFsWriteFileSync(...args),
}));
// Mock logger
vi.mock('../../../main/utils/logger', () => ({
logger: {
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
},
}));
// Import types only - we'll test the type definitions
import type {
QueryEvent,
AutoRunSession,
AutoRunTask,
SessionLifecycleEvent,
StatsTimeRange,
StatsFilters,
StatsAggregation,
} from '../../../shared/stats-types';
describe('Database VACUUM functionality', () => {
beforeEach(() => {
vi.clearAllMocks();
lastDbPath = null;
mockDb.pragma.mockReturnValue([{ user_version: 0 }]);
mockDb.prepare.mockReturnValue(mockStatement);
mockStatement.run.mockReturnValue({ changes: 1 });
mockFsExistsSync.mockReturnValue(true);
// Reset statSync to throw by default (simulates file not existing)
mockFsStatSync.mockImplementation(() => {
throw new Error('ENOENT: no such file or directory');
});
});
afterEach(() => {
vi.resetModules();
});
describe('getDatabaseSize', () => {
it('should return 0 when statSync throws (file missing)', async () => {
// The mock fs.statSync is not configured to return size by default
// so getDatabaseSize will catch the error and return 0
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// Since mockFsExistsSync.mockReturnValue(true) is set but statSync is not mocked,
// getDatabaseSize will try to call the real statSync on a non-existent path
// and catch the error, returning 0
const size = db.getDatabaseSize();
// The mock environment doesn't have actual file, so expect 0
expect(size).toBe(0);
});
it('should handle statSync gracefully when file does not exist', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// getDatabaseSize should not throw
expect(() => db.getDatabaseSize()).not.toThrow();
});
});
describe('vacuum', () => {
it('should execute VACUUM SQL command', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// Clear mocks from initialization
mockStatement.run.mockClear();
mockDb.prepare.mockClear();
const result = db.vacuum();
expect(result.success).toBe(true);
expect(mockDb.prepare).toHaveBeenCalledWith('VACUUM');
expect(mockStatement.run).toHaveBeenCalled();
});
it('should return success true when vacuum completes', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const result = db.vacuum();
expect(result.success).toBe(true);
expect(result.error).toBeUndefined();
});
it('should return bytesFreed of 0 when sizes are equal (mocked)', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const result = db.vacuum();
// With mock fs, both before and after sizes will be 0
expect(result.bytesFreed).toBe(0);
});
it('should return error if database not initialized', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
// Don't initialize
const result = db.vacuum();
expect(result.success).toBe(false);
expect(result.bytesFreed).toBe(0);
expect(result.error).toBe('Database not initialized');
});
it('should handle VACUUM failure gracefully', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// Make VACUUM fail
mockDb.prepare.mockImplementation((sql: string) => {
if (sql === 'VACUUM') {
return {
run: vi.fn().mockImplementation(() => {
throw new Error('database is locked');
}),
};
}
return mockStatement;
});
const result = db.vacuum();
expect(result.success).toBe(false);
expect(result.error).toContain('database is locked');
});
it('should log vacuum progress with size information', async () => {
const { logger } = await import('../../../main/utils/logger');
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// Clear logger mocks from initialization
vi.mocked(logger.info).mockClear();
db.vacuum();
// Check that logger was called with vacuum-related messages
expect(logger.info).toHaveBeenCalledWith(
expect.stringContaining('Starting VACUUM'),
expect.any(String)
);
expect(logger.info).toHaveBeenCalledWith(
expect.stringContaining('VACUUM completed'),
expect.any(String)
);
});
});
describe('vacuumIfNeeded', () => {
it('should skip vacuum if database size is 0 (below threshold)', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// Clear mocks from initialization
mockStatement.run.mockClear();
mockDb.prepare.mockClear();
const result = db.vacuumIfNeeded();
// Size is 0 (mock fs), which is below 100MB threshold
expect(result.vacuumed).toBe(false);
expect(result.databaseSize).toBe(0);
expect(result.result).toBeUndefined();
});
it('should return correct databaseSize in result', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const result = db.vacuumIfNeeded();
// Size property should be present
expect(typeof result.databaseSize).toBe('number');
});
it('should use default 100MB threshold when not specified', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// With 0 byte size (mocked), should skip vacuum
const result = db.vacuumIfNeeded();
expect(result.vacuumed).toBe(false);
});
it('should not vacuum with threshold 0 and size 0 since 0 is not > 0', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// Clear mocks from initialization
mockStatement.run.mockClear();
mockDb.prepare.mockClear();
// With 0 threshold and 0 byte file: 0 is NOT greater than 0
const result = db.vacuumIfNeeded(0);
// The condition is: databaseSize < thresholdBytes
// 0 < 0 is false, so vacuumed should be true (it tries to vacuum)
expect(result.databaseSize).toBe(0);
// Since 0 is NOT less than 0, it proceeds to vacuum
expect(result.vacuumed).toBe(true);
});
it('should log appropriate message when skipping vacuum', async () => {
const { logger } = await import('../../../main/utils/logger');
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// Clear logger mocks from initialization
vi.mocked(logger.debug).mockClear();
db.vacuumIfNeeded();
expect(logger.debug).toHaveBeenCalledWith(
expect.stringContaining('below vacuum threshold'),
expect.any(String)
);
});
});
describe('vacuumIfNeeded with custom thresholds', () => {
it('should respect custom threshold parameter (threshold = -1 means always vacuum)', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// Clear mocks from initialization
mockStatement.run.mockClear();
mockDb.prepare.mockClear();
// With -1 threshold, 0 > -1 is true, so should vacuum
const result = db.vacuumIfNeeded(-1);
expect(result.vacuumed).toBe(true);
expect(mockDb.prepare).toHaveBeenCalledWith('VACUUM');
});
it('should not vacuum with very large threshold', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// Clear mocks from initialization
mockStatement.run.mockClear();
mockDb.prepare.mockClear();
// With 1TB threshold, should NOT trigger vacuum
const result = db.vacuumIfNeeded(1024 * 1024 * 1024 * 1024);
expect(result.vacuumed).toBe(false);
expect(mockDb.prepare).not.toHaveBeenCalledWith('VACUUM');
});
});
describe('initialize with vacuumIfNeeded integration', () => {
it('should call vacuumIfNeededWeekly during initialization', async () => {
const { logger } = await import('../../../main/utils/logger');
// Clear logger mocks before test
vi.mocked(logger.debug).mockClear();
// Mock timestamp file as old (0 = epoch, triggers vacuum check)
mockFsReadFileSync.mockReturnValue('0');
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// With old timestamp, vacuumIfNeededWeekly should proceed to call vacuumIfNeeded
// which logs "below vacuum threshold" for small databases (mocked as 1024 bytes)
expect(logger.debug).toHaveBeenCalledWith(
expect.stringContaining('below vacuum threshold'),
expect.any(String)
);
});
it('should complete initialization even if vacuum would fail', async () => {
// Make VACUUM fail if called
mockDb.prepare.mockImplementation((sql: string) => {
if (sql === 'VACUUM') {
return {
run: vi.fn().mockImplementation(() => {
throw new Error('VACUUM failed: database is locked');
}),
};
}
return mockStatement;
});
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
// Initialize should not throw (vacuum is skipped due to 0 size anyway)
expect(() => db.initialize()).not.toThrow();
// Database should still be ready
expect(db.isReady()).toBe(true);
});
it('should not block initialization for small databases', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
// Time the initialization (should be fast for mock)
const start = Date.now();
db.initialize();
const elapsed = Date.now() - start;
expect(db.isReady()).toBe(true);
expect(elapsed).toBeLessThan(1000); // Should be fast in mock environment
});
});
describe('vacuum return types', () => {
it('vacuum should return object with success, bytesFreed, and optional error', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const result = db.vacuum();
expect(typeof result.success).toBe('boolean');
expect(typeof result.bytesFreed).toBe('number');
expect(result.error === undefined || typeof result.error === 'string').toBe(true);
});
it('vacuumIfNeeded should return object with vacuumed, databaseSize, and optional result', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const result = db.vacuumIfNeeded();
expect(typeof result.vacuumed).toBe('boolean');
expect(typeof result.databaseSize).toBe('number');
expect(result.result === undefined || typeof result.result === 'object').toBe(true);
});
it('vacuumIfNeeded should include result when vacuum is performed', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// Use -1 threshold to force vacuum
const result = db.vacuumIfNeeded(-1);
expect(result.vacuumed).toBe(true);
expect(result.result).toBeDefined();
expect(result.result?.success).toBe(true);
});
});
describe('clearOldData method', () => {
beforeEach(() => {
vi.clearAllMocks();
vi.resetModules();
});
it('should return error when database is not initialized', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
// Don't initialize
const result = db.clearOldData(30);
expect(result.success).toBe(false);
expect(result.deletedQueryEvents).toBe(0);
expect(result.deletedAutoRunSessions).toBe(0);
expect(result.deletedAutoRunTasks).toBe(0);
expect(result.error).toBe('Database not initialized');
});
it('should return error when olderThanDays is 0 or negative', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const resultZero = db.clearOldData(0);
expect(resultZero.success).toBe(false);
expect(resultZero.error).toBe('olderThanDays must be greater than 0');
const resultNegative = db.clearOldData(-10);
expect(resultNegative.success).toBe(false);
expect(resultNegative.error).toBe('olderThanDays must be greater than 0');
});
it('should successfully clear old data with valid parameters', async () => {
// Mock prepare to return statements with expected behavior
mockStatement.all.mockReturnValue([{ id: 'session-1' }, { id: 'session-2' }]);
mockStatement.run.mockReturnValue({ changes: 5 });
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const result = db.clearOldData(30);
expect(result.success).toBe(true);
expect(result.deletedQueryEvents).toBe(5);
expect(result.deletedAutoRunSessions).toBe(5);
expect(result.deletedAutoRunTasks).toBe(5);
expect(result.error).toBeUndefined();
});
it('should handle empty results (no old data)', async () => {
mockStatement.all.mockReturnValue([]);
mockStatement.run.mockReturnValue({ changes: 0 });
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const result = db.clearOldData(365);
expect(result.success).toBe(true);
expect(result.deletedQueryEvents).toBe(0);
expect(result.deletedAutoRunSessions).toBe(0);
expect(result.deletedAutoRunTasks).toBe(0);
expect(result.error).toBeUndefined();
});
it('should calculate correct cutoff time based on days', async () => {
let capturedCutoffTime: number | null = null;
mockDb.prepare.mockImplementation((sql: string) => {
return {
run: vi.fn((cutoff: number) => {
if (sql.includes('DELETE FROM query_events')) {
capturedCutoffTime = cutoff;
}
return { changes: 0 };
}),
get: mockStatement.get,
all: vi.fn(() => []),
};
});
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const beforeCall = Date.now();
db.clearOldData(7);
const afterCall = Date.now();
// Cutoff should be approximately 7 days ago
const expectedCutoff = beforeCall - 7 * 24 * 60 * 60 * 1000;
expect(capturedCutoffTime).not.toBeNull();
expect(capturedCutoffTime!).toBeGreaterThanOrEqual(expectedCutoff - 1000);
expect(capturedCutoffTime!).toBeLessThanOrEqual(afterCall - 7 * 24 * 60 * 60 * 1000 + 1000);
});
it('should handle database errors gracefully', async () => {
mockDb.prepare.mockImplementation((sql: string) => {
if (sql.includes('DELETE FROM query_events')) {
return {
run: vi.fn(() => {
throw new Error('Database locked');
}),
};
}
return mockStatement;
});
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const result = db.clearOldData(30);
expect(result.success).toBe(false);
expect(result.error).toBe('Database locked');
expect(result.deletedQueryEvents).toBe(0);
expect(result.deletedAutoRunSessions).toBe(0);
expect(result.deletedAutoRunTasks).toBe(0);
});
it('should support various time periods', async () => {
mockStatement.all.mockReturnValue([]);
mockStatement.run.mockReturnValue({ changes: 0 });
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// Test common time periods from Settings UI
const periods = [7, 30, 90, 180, 365];
for (const days of periods) {
const result = db.clearOldData(days);
expect(result.success).toBe(true);
}
});
});
// =====================================================================
});

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,732 @@
/**
* Tests for query event CRUD operations, filtering, and CSV export.
*
* Note: better-sqlite3 is a native module compiled for Electron's Node version.
* Direct testing with the native module in vitest is not possible without
* electron-rebuild for the vitest runtime. These tests use mocked database
* operations to verify the logic without requiring the actual native module.
*
* For full integration testing of the SQLite database, use the Electron test
* environment (e2e tests) where the native module is properly loaded.
*/
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import * as path from 'path';
import * as os from 'os';
// Track Database constructor calls to verify file path
let lastDbPath: string | null = null;
// Store mock references so they can be accessed in tests
const mockStatement = {
run: vi.fn(() => ({ changes: 1 })),
get: vi.fn(() => ({ count: 0, total_duration: 0 })),
all: vi.fn(() => []),
};
const mockDb = {
pragma: vi.fn(() => [{ user_version: 0 }]),
prepare: vi.fn(() => mockStatement),
close: vi.fn(),
// Transaction mock that immediately executes the function
transaction: vi.fn((fn: () => void) => {
return () => fn();
}),
};
// Mock better-sqlite3 as a class
vi.mock('better-sqlite3', () => {
return {
default: class MockDatabase {
constructor(dbPath: string) {
lastDbPath = dbPath;
}
pragma = mockDb.pragma;
prepare = mockDb.prepare;
close = mockDb.close;
transaction = mockDb.transaction;
},
};
});
// Mock electron's app module with trackable userData path
const mockUserDataPath = path.join(os.tmpdir(), 'maestro-test-stats-db');
vi.mock('electron', () => ({
app: {
getPath: vi.fn((name: string) => {
if (name === 'userData') return mockUserDataPath;
return os.tmpdir();
}),
},
}));
// Track fs calls
const mockFsExistsSync = vi.fn(() => true);
const mockFsMkdirSync = vi.fn();
const mockFsCopyFileSync = vi.fn();
const mockFsUnlinkSync = vi.fn();
const mockFsRenameSync = vi.fn();
const mockFsStatSync = vi.fn(() => ({ size: 1024 }));
const mockFsReadFileSync = vi.fn(() => '0'); // Default: old timestamp (triggers vacuum check)
const mockFsWriteFileSync = vi.fn();
// Mock fs
vi.mock('fs', () => ({
existsSync: (...args: unknown[]) => mockFsExistsSync(...args),
mkdirSync: (...args: unknown[]) => mockFsMkdirSync(...args),
copyFileSync: (...args: unknown[]) => mockFsCopyFileSync(...args),
unlinkSync: (...args: unknown[]) => mockFsUnlinkSync(...args),
renameSync: (...args: unknown[]) => mockFsRenameSync(...args),
statSync: (...args: unknown[]) => mockFsStatSync(...args),
readFileSync: (...args: unknown[]) => mockFsReadFileSync(...args),
writeFileSync: (...args: unknown[]) => mockFsWriteFileSync(...args),
}));
// Mock logger
vi.mock('../../../main/utils/logger', () => ({
logger: {
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
},
}));
// Import types only - we'll test the type definitions
import type {
QueryEvent,
AutoRunSession,
AutoRunTask,
SessionLifecycleEvent,
StatsTimeRange,
StatsFilters,
StatsAggregation,
} from '../../../shared/stats-types';
describe('Stats aggregation and filtering', () => {
beforeEach(() => {
vi.clearAllMocks();
mockDb.pragma.mockReturnValue([{ user_version: 0 }]);
mockDb.prepare.mockReturnValue(mockStatement);
mockStatement.run.mockReturnValue({ changes: 1 });
mockFsExistsSync.mockReturnValue(true);
});
afterEach(() => {
vi.resetModules();
});
describe('time range filtering', () => {
it('should filter query events by day range', async () => {
mockStatement.all.mockReturnValue([]);
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
db.getQueryEvents('day');
// Verify the SQL includes time filter
const prepareCall = mockDb.prepare.mock.calls.find((call) =>
(call[0] as string).includes('SELECT * FROM query_events')
);
expect(prepareCall).toBeDefined();
});
it('should filter with agentType filter', async () => {
mockStatement.all.mockReturnValue([]);
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
db.getQueryEvents('week', { agentType: 'claude-code' });
// Verify the SQL includes agent_type filter
expect(mockStatement.all).toHaveBeenCalled();
});
it('should filter with source filter', async () => {
mockStatement.all.mockReturnValue([]);
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
db.getQueryEvents('month', { source: 'auto' });
// Verify the SQL includes source filter
expect(mockStatement.all).toHaveBeenCalled();
});
it('should filter with projectPath filter', async () => {
mockStatement.all.mockReturnValue([]);
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
db.getQueryEvents('year', { projectPath: '/test/project' });
// Verify the SQL includes project_path filter
expect(mockStatement.all).toHaveBeenCalled();
});
it('should filter with sessionId filter', async () => {
mockStatement.all.mockReturnValue([]);
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
db.getQueryEvents('all', { sessionId: 'session-123' });
// Verify the SQL includes session_id filter
expect(mockStatement.all).toHaveBeenCalled();
});
it('should combine multiple filters', async () => {
mockStatement.all.mockReturnValue([]);
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
db.getQueryEvents('week', {
agentType: 'claude-code',
source: 'user',
projectPath: '/test',
sessionId: 'session-1',
});
// Verify all parameters were passed
expect(mockStatement.all).toHaveBeenCalled();
});
});
describe('aggregation queries', () => {
it('should compute aggregated stats correctly', async () => {
mockStatement.get.mockReturnValue({ count: 100, total_duration: 500000 });
mockStatement.all.mockReturnValue([
{ agent_type: 'claude-code', count: 70, duration: 350000 },
{ agent_type: 'opencode', count: 30, duration: 150000 },
]);
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const stats = db.getAggregatedStats('week');
expect(stats.totalQueries).toBe(100);
expect(stats.totalDuration).toBe(500000);
expect(stats.avgDuration).toBe(5000);
});
it('should handle empty results for aggregation', async () => {
mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 });
mockStatement.all.mockReturnValue([]);
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const stats = db.getAggregatedStats('day');
expect(stats.totalQueries).toBe(0);
expect(stats.avgDuration).toBe(0);
expect(stats.byAgent).toEqual({});
});
});
describe('CSV export', () => {
it('should export query events to CSV format', async () => {
const now = Date.now();
mockStatement.all.mockReturnValue([
{
id: 'event-1',
session_id: 'session-1',
agent_type: 'claude-code',
source: 'user',
start_time: now,
duration: 5000,
project_path: '/test',
tab_id: 'tab-1',
},
]);
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const csv = db.exportToCsv('week');
// Verify CSV structure
expect(csv).toContain('id,sessionId,agentType,source,startTime,duration,projectPath,tabId');
expect(csv).toContain('event-1');
expect(csv).toContain('session-1');
expect(csv).toContain('claude-code');
});
it('should handle empty data for CSV export', async () => {
mockStatement.all.mockReturnValue([]);
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const csv = db.exportToCsv('day');
// Should only contain headers
expect(csv).toBe(
'id,sessionId,agentType,source,startTime,duration,projectPath,tabId,isRemote'
);
});
});
});
/**
* Interactive session query event recording tests
*
* These tests verify that query events are properly recorded for interactive
* (user-initiated) sessions, which is the core validation for:
* - [ ] Verify query events are recorded for interactive sessions
*/
describe('Query events recorded for interactive sessions', () => {
beforeEach(() => {
vi.clearAllMocks();
mockDb.pragma.mockReturnValue([{ user_version: 1 }]);
mockDb.prepare.mockReturnValue(mockStatement);
mockStatement.run.mockReturnValue({ changes: 1 });
mockStatement.all.mockReturnValue([]);
mockFsExistsSync.mockReturnValue(true);
});
afterEach(() => {
vi.resetModules();
});
describe('user-initiated interactive session recording', () => {
it('should record query event with source="user" for interactive session', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const startTime = Date.now();
const eventId = db.insertQueryEvent({
sessionId: 'interactive-session-1',
agentType: 'claude-code',
source: 'user', // Interactive session is always 'user'
startTime,
duration: 5000,
projectPath: '/Users/test/myproject',
tabId: 'tab-1',
});
expect(eventId).toBeDefined();
expect(typeof eventId).toBe('string');
// Verify the INSERT was called with correct parameters
const runCalls = mockStatement.run.mock.calls;
const lastCall = runCalls[runCalls.length - 1];
// Parameters: id, session_id, agent_type, source, start_time, duration, project_path, tab_id
expect(lastCall[1]).toBe('interactive-session-1'); // session_id
expect(lastCall[2]).toBe('claude-code'); // agent_type
expect(lastCall[3]).toBe('user'); // source
expect(lastCall[4]).toBe(startTime); // start_time
expect(lastCall[5]).toBe(5000); // duration
expect(lastCall[6]).toBe('/Users/test/myproject'); // project_path
expect(lastCall[7]).toBe('tab-1'); // tab_id
});
it('should record interactive query without optional fields', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const startTime = Date.now();
const eventId = db.insertQueryEvent({
sessionId: 'minimal-session',
agentType: 'claude-code',
source: 'user',
startTime,
duration: 3000,
// projectPath and tabId are optional
});
expect(eventId).toBeDefined();
// Verify NULL values for optional fields
const runCalls = mockStatement.run.mock.calls;
const lastCall = runCalls[runCalls.length - 1];
expect(lastCall[6]).toBeNull(); // project_path
expect(lastCall[7]).toBeNull(); // tab_id
});
it('should record multiple interactive queries for the same session', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// Clear mocks after initialize() to count only test operations
mockStatement.run.mockClear();
const baseTime = Date.now();
// First query
const id1 = db.insertQueryEvent({
sessionId: 'multi-query-session',
agentType: 'claude-code',
source: 'user',
startTime: baseTime,
duration: 5000,
projectPath: '/project',
tabId: 'tab-1',
});
// Second query (same session, different tab)
const id2 = db.insertQueryEvent({
sessionId: 'multi-query-session',
agentType: 'claude-code',
source: 'user',
startTime: baseTime + 10000,
duration: 3000,
projectPath: '/project',
tabId: 'tab-2',
});
// Third query (same session, same tab as first)
const id3 = db.insertQueryEvent({
sessionId: 'multi-query-session',
agentType: 'claude-code',
source: 'user',
startTime: baseTime + 20000,
duration: 7000,
projectPath: '/project',
tabId: 'tab-1',
});
// All should have unique IDs
expect(id1).not.toBe(id2);
expect(id2).not.toBe(id3);
expect(id1).not.toBe(id3);
// All should be recorded (3 INSERT calls after initialization)
expect(mockStatement.run).toHaveBeenCalledTimes(3);
});
it('should record interactive queries with different agent types', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// Clear mocks after initialize() to count only test operations
mockStatement.run.mockClear();
const startTime = Date.now();
// Claude Code query
const claudeId = db.insertQueryEvent({
sessionId: 'session-1',
agentType: 'claude-code',
source: 'user',
startTime,
duration: 5000,
});
// OpenCode query
const opencodeId = db.insertQueryEvent({
sessionId: 'session-2',
agentType: 'opencode',
source: 'user',
startTime: startTime + 10000,
duration: 3000,
});
// Codex query
const codexId = db.insertQueryEvent({
sessionId: 'session-3',
agentType: 'codex',
source: 'user',
startTime: startTime + 20000,
duration: 4000,
});
expect(claudeId).toBeDefined();
expect(opencodeId).toBeDefined();
expect(codexId).toBeDefined();
// Verify different agent types were recorded
const runCalls = mockStatement.run.mock.calls;
expect(runCalls[0][2]).toBe('claude-code');
expect(runCalls[1][2]).toBe('opencode');
expect(runCalls[2][2]).toBe('codex');
});
});
describe('retrieval of interactive session query events', () => {
it('should retrieve interactive query events filtered by source=user', async () => {
const now = Date.now();
mockStatement.all.mockReturnValue([
{
id: 'event-1',
session_id: 'session-1',
agent_type: 'claude-code',
source: 'user',
start_time: now - 1000,
duration: 5000,
project_path: '/project',
tab_id: 'tab-1',
},
{
id: 'event-2',
session_id: 'session-2',
agent_type: 'claude-code',
source: 'user',
start_time: now - 2000,
duration: 3000,
project_path: '/project',
tab_id: 'tab-2',
},
]);
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// Filter by source='user' to get only interactive sessions
const events = db.getQueryEvents('day', { source: 'user' });
expect(events).toHaveLength(2);
expect(events[0].source).toBe('user');
expect(events[1].source).toBe('user');
expect(events[0].sessionId).toBe('session-1');
expect(events[1].sessionId).toBe('session-2');
});
it('should retrieve interactive query events filtered by sessionId', async () => {
const now = Date.now();
mockStatement.all.mockReturnValue([
{
id: 'event-1',
session_id: 'target-session',
agent_type: 'claude-code',
source: 'user',
start_time: now - 1000,
duration: 5000,
project_path: '/project',
tab_id: 'tab-1',
},
]);
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const events = db.getQueryEvents('week', { sessionId: 'target-session' });
expect(events).toHaveLength(1);
expect(events[0].sessionId).toBe('target-session');
});
it('should retrieve interactive query events filtered by projectPath', async () => {
const now = Date.now();
mockStatement.all.mockReturnValue([
{
id: 'event-1',
session_id: 'session-1',
agent_type: 'claude-code',
source: 'user',
start_time: now - 1000,
duration: 5000,
project_path: '/specific/project',
tab_id: 'tab-1',
},
]);
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const events = db.getQueryEvents('month', { projectPath: '/specific/project' });
expect(events).toHaveLength(1);
expect(events[0].projectPath).toBe('/specific/project');
});
it('should correctly map database columns to QueryEvent interface fields', async () => {
const now = Date.now();
mockStatement.all.mockReturnValue([
{
id: 'db-event-id',
session_id: 'db-session-id',
agent_type: 'claude-code',
source: 'user',
start_time: now,
duration: 5000,
project_path: '/project/path',
tab_id: 'tab-123',
},
]);
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const events = db.getQueryEvents('day');
expect(events).toHaveLength(1);
const event = events[0];
// Verify snake_case -> camelCase mapping
expect(event.id).toBe('db-event-id');
expect(event.sessionId).toBe('db-session-id');
expect(event.agentType).toBe('claude-code');
expect(event.source).toBe('user');
expect(event.startTime).toBe(now);
expect(event.duration).toBe(5000);
expect(event.projectPath).toBe('/project/path');
expect(event.tabId).toBe('tab-123');
});
});
describe('aggregation includes interactive session data', () => {
it('should include interactive sessions in aggregated stats', async () => {
mockStatement.get.mockReturnValue({ count: 10, total_duration: 50000 });
// The aggregation calls mockStatement.all multiple times for different queries
// We return based on the call sequence: byAgent, bySource, byDay
let callCount = 0;
mockStatement.all.mockImplementation(() => {
callCount++;
if (callCount === 1) {
// byAgent breakdown
return [{ agent_type: 'claude-code', count: 10, duration: 50000 }];
}
if (callCount === 2) {
// bySource breakdown
return [{ source: 'user', count: 10 }];
}
// byDay breakdown
return [{ date: '2024-12-28', count: 10, duration: 50000 }];
});
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const stats = db.getAggregatedStats('week');
expect(stats.totalQueries).toBe(10);
expect(stats.totalDuration).toBe(50000);
expect(stats.avgDuration).toBe(5000);
expect(stats.bySource.user).toBe(10);
expect(stats.bySource.auto).toBe(0);
});
it('should correctly separate user vs auto queries in bySource', async () => {
mockStatement.get.mockReturnValue({ count: 15, total_duration: 75000 });
// Return by-source breakdown with both user and auto on second call
let callCount = 0;
mockStatement.all.mockImplementation(() => {
callCount++;
if (callCount === 2) {
// bySource breakdown
return [
{ source: 'user', count: 10 },
{ source: 'auto', count: 5 },
];
}
return [];
});
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const stats = db.getAggregatedStats('month');
expect(stats.bySource.user).toBe(10);
expect(stats.bySource.auto).toBe(5);
});
});
describe('timing accuracy for interactive sessions', () => {
it('should preserve exact startTime and duration values', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const exactStartTime = 1735344000000; // Specific timestamp
const exactDuration = 12345; // Specific duration in ms
db.insertQueryEvent({
sessionId: 'timing-test-session',
agentType: 'claude-code',
source: 'user',
startTime: exactStartTime,
duration: exactDuration,
});
const runCalls = mockStatement.run.mock.calls;
const lastCall = runCalls[runCalls.length - 1];
expect(lastCall[4]).toBe(exactStartTime); // Exact start_time preserved
expect(lastCall[5]).toBe(exactDuration); // Exact duration preserved
});
it('should handle zero duration (immediate responses)', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const eventId = db.insertQueryEvent({
sessionId: 'zero-duration-session',
agentType: 'claude-code',
source: 'user',
startTime: Date.now(),
duration: 0, // Zero duration is valid (e.g., cached response)
});
expect(eventId).toBeDefined();
const runCalls = mockStatement.run.mock.calls;
const lastCall = runCalls[runCalls.length - 1];
expect(lastCall[5]).toBe(0);
});
it('should handle very long durations', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const longDuration = 10 * 60 * 1000; // 10 minutes in ms
const eventId = db.insertQueryEvent({
sessionId: 'long-duration-session',
agentType: 'claude-code',
source: 'user',
startTime: Date.now(),
duration: longDuration,
});
expect(eventId).toBeDefined();
const runCalls = mockStatement.run.mock.calls;
const lastCall = runCalls[runCalls.length - 1];
expect(lastCall[5]).toBe(longDuration);
});
});
});
/**
* Comprehensive Auto Run session and task recording verification tests
*
* These tests verify the complete Auto Run tracking workflow:
* 1. Auto Run sessions are properly recorded when batch processing starts
* 2. Individual tasks within sessions are recorded with timing data
* 3. Sessions are updated correctly when batch processing completes
* 4. All data can be retrieved with proper field mapping
*/

View File

@@ -0,0 +1,682 @@
/**
* Tests for StatsDB core class, initialization, and singleton.
*
* Note: better-sqlite3 is a native module compiled for Electron's Node version.
* Direct testing with the native module in vitest is not possible without
* electron-rebuild for the vitest runtime. These tests use mocked database
* operations to verify the logic without requiring the actual native module.
*
* For full integration testing of the SQLite database, use the Electron test
* environment (e2e tests) where the native module is properly loaded.
*/
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import * as path from 'path';
import * as os from 'os';
// Track Database constructor calls to verify file path
let lastDbPath: string | null = null;
// Store mock references so they can be accessed in tests
const mockStatement = {
run: vi.fn(() => ({ changes: 1 })),
get: vi.fn(() => ({ count: 0, total_duration: 0 })),
all: vi.fn(() => []),
};
const mockDb = {
pragma: vi.fn(() => [{ user_version: 0 }]),
prepare: vi.fn(() => mockStatement),
close: vi.fn(),
// Transaction mock that immediately executes the function
transaction: vi.fn((fn: () => void) => {
return () => fn();
}),
};
// Mock better-sqlite3 as a class
vi.mock('better-sqlite3', () => {
return {
default: class MockDatabase {
constructor(dbPath: string) {
lastDbPath = dbPath;
}
pragma = mockDb.pragma;
prepare = mockDb.prepare;
close = mockDb.close;
transaction = mockDb.transaction;
},
};
});
// Mock electron's app module with trackable userData path
const mockUserDataPath = path.join(os.tmpdir(), 'maestro-test-stats-db');
vi.mock('electron', () => ({
app: {
getPath: vi.fn((name: string) => {
if (name === 'userData') return mockUserDataPath;
return os.tmpdir();
}),
},
}));
// Track fs calls
const mockFsExistsSync = vi.fn(() => true);
const mockFsMkdirSync = vi.fn();
const mockFsCopyFileSync = vi.fn();
const mockFsUnlinkSync = vi.fn();
const mockFsRenameSync = vi.fn();
const mockFsStatSync = vi.fn(() => ({ size: 1024 }));
const mockFsReadFileSync = vi.fn(() => '0'); // Default: old timestamp (triggers vacuum check)
const mockFsWriteFileSync = vi.fn();
// Mock fs
vi.mock('fs', () => ({
existsSync: (...args: unknown[]) => mockFsExistsSync(...args),
mkdirSync: (...args: unknown[]) => mockFsMkdirSync(...args),
copyFileSync: (...args: unknown[]) => mockFsCopyFileSync(...args),
unlinkSync: (...args: unknown[]) => mockFsUnlinkSync(...args),
renameSync: (...args: unknown[]) => mockFsRenameSync(...args),
statSync: (...args: unknown[]) => mockFsStatSync(...args),
readFileSync: (...args: unknown[]) => mockFsReadFileSync(...args),
writeFileSync: (...args: unknown[]) => mockFsWriteFileSync(...args),
}));
// Mock logger
vi.mock('../../../main/utils/logger', () => ({
logger: {
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
},
}));
// Import types only - we'll test the type definitions
import type {
QueryEvent,
AutoRunSession,
AutoRunTask,
SessionLifecycleEvent,
StatsTimeRange,
StatsFilters,
StatsAggregation,
} from '../../../shared/stats-types';
describe('StatsDB class (mocked)', () => {
beforeEach(() => {
vi.clearAllMocks();
lastDbPath = null;
mockDb.pragma.mockReturnValue([{ user_version: 0 }]);
mockDb.prepare.mockReturnValue(mockStatement);
mockStatement.run.mockReturnValue({ changes: 1 });
mockStatement.get.mockReturnValue({ count: 0, total_duration: 0 });
mockStatement.all.mockReturnValue([]);
mockFsExistsSync.mockReturnValue(true);
mockFsMkdirSync.mockClear();
});
afterEach(() => {
vi.resetModules();
});
describe('module exports', () => {
it('should export StatsDB class', async () => {
const { StatsDB } = await import('../../../main/stats');
expect(StatsDB).toBeDefined();
expect(typeof StatsDB).toBe('function');
});
it('should export singleton functions', async () => {
const { getStatsDB, initializeStatsDB, closeStatsDB } = await import('../../../main/stats');
expect(getStatsDB).toBeDefined();
expect(initializeStatsDB).toBeDefined();
expect(closeStatsDB).toBeDefined();
});
});
describe('StatsDB instantiation', () => {
it('should create instance without initialization', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
expect(db).toBeDefined();
expect(db.isReady()).toBe(false);
});
it('should return database path', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
expect(db.getDbPath()).toContain('stats.db');
});
});
describe('initialization', () => {
it('should initialize database and set isReady to true', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
expect(db.isReady()).toBe(true);
});
it('should enable WAL mode', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
expect(mockDb.pragma).toHaveBeenCalledWith('journal_mode = WAL');
});
it('should run v1 migration for fresh database', async () => {
mockDb.pragma.mockImplementation((sql: string) => {
if (sql === 'user_version') return [{ user_version: 0 }];
return undefined;
});
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// Should set user_version to 1
expect(mockDb.pragma).toHaveBeenCalledWith('user_version = 1');
});
it('should skip migration for already migrated database', async () => {
mockDb.pragma.mockImplementation((sql: string) => {
if (sql === 'user_version') return [{ user_version: 1 }];
return undefined;
});
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// Should NOT set user_version (no migration needed)
expect(mockDb.pragma).not.toHaveBeenCalledWith('user_version = 1');
});
it('should create _migrations table on initialization', async () => {
mockDb.pragma.mockImplementation((sql: string) => {
if (sql === 'user_version') return [{ user_version: 0 }];
return undefined;
});
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// Should have prepared the CREATE TABLE IF NOT EXISTS _migrations statement
expect(mockDb.prepare).toHaveBeenCalledWith(
expect.stringContaining('CREATE TABLE IF NOT EXISTS _migrations')
);
});
it('should record successful migration in _migrations table', async () => {
mockDb.pragma.mockImplementation((sql: string) => {
if (sql === 'user_version') return [{ user_version: 0 }];
return undefined;
});
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// Should have inserted a success record into _migrations
expect(mockDb.prepare).toHaveBeenCalledWith(
expect.stringContaining('INSERT OR REPLACE INTO _migrations')
);
});
it('should use transaction for migration atomicity', async () => {
mockDb.pragma.mockImplementation((sql: string) => {
if (sql === 'user_version') return [{ user_version: 0 }];
return undefined;
});
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// Should have used transaction
expect(mockDb.transaction).toHaveBeenCalled();
});
});
describe('migration system API', () => {
beforeEach(() => {
vi.clearAllMocks();
mockDb.pragma.mockImplementation((sql: string) => {
if (sql === 'user_version') return [{ user_version: 1 }];
return undefined;
});
mockDb.prepare.mockReturnValue(mockStatement);
mockStatement.run.mockReturnValue({ changes: 1 });
mockStatement.get.mockReturnValue(null);
mockStatement.all.mockReturnValue([]);
mockFsExistsSync.mockReturnValue(true);
});
afterEach(() => {
vi.resetModules();
});
it('should return current version via getCurrentVersion()', async () => {
mockDb.pragma.mockImplementation((sql: string) => {
if (sql === 'user_version') return [{ user_version: 1 }];
return undefined;
});
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
expect(db.getCurrentVersion()).toBe(1);
});
it('should return target version via getTargetVersion()', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// Currently we have version 3 migration (v1: initial schema, v2: is_remote column, v3: session_lifecycle table)
expect(db.getTargetVersion()).toBe(3);
});
it('should return false from hasPendingMigrations() when up to date', async () => {
mockDb.pragma.mockImplementation((sql: string) => {
if (sql === 'user_version') return [{ user_version: 3 }];
return undefined;
});
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
expect(db.hasPendingMigrations()).toBe(false);
});
it('should correctly identify pending migrations based on version difference', async () => {
// This test verifies the hasPendingMigrations() logic
// by checking current version < target version
// Simulate a database that's already at version 3 (target version)
let currentVersion = 3;
mockDb.pragma.mockImplementation((sql: string) => {
if (sql === 'user_version') return [{ user_version: currentVersion }];
// Handle version updates from migration
if (sql.startsWith('user_version = ')) {
currentVersion = parseInt(sql.replace('user_version = ', ''));
}
return undefined;
});
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// At version 3, target is 3, so no pending migrations
expect(db.getCurrentVersion()).toBe(3);
expect(db.getTargetVersion()).toBe(3);
expect(db.hasPendingMigrations()).toBe(false);
});
it('should return empty array from getMigrationHistory() when no _migrations table', async () => {
mockStatement.get.mockReturnValue(null); // No table exists
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const history = db.getMigrationHistory();
expect(history).toEqual([]);
});
it('should return migration records from getMigrationHistory()', async () => {
const mockMigrationRows = [
{
version: 1,
description: 'Initial schema',
applied_at: 1704067200000,
status: 'success' as const,
error_message: null,
},
];
mockStatement.get.mockReturnValue({ name: '_migrations' }); // Table exists
mockStatement.all.mockReturnValue(mockMigrationRows);
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const history = db.getMigrationHistory();
expect(history).toHaveLength(1);
expect(history[0]).toEqual({
version: 1,
description: 'Initial schema',
appliedAt: 1704067200000,
status: 'success',
errorMessage: undefined,
});
});
it('should include errorMessage in migration history for failed migrations', async () => {
const mockMigrationRows = [
{
version: 2,
description: 'Add new column',
applied_at: 1704067200000,
status: 'failed' as const,
error_message: 'SQLITE_ERROR: duplicate column name',
},
];
mockStatement.get.mockReturnValue({ name: '_migrations' });
mockStatement.all.mockReturnValue(mockMigrationRows);
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const history = db.getMigrationHistory();
expect(history[0].status).toBe('failed');
expect(history[0].errorMessage).toBe('SQLITE_ERROR: duplicate column name');
});
});
describe('error handling', () => {
it('should throw when calling insertQueryEvent before initialization', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
expect(() =>
db.insertQueryEvent({
sessionId: 'test',
agentType: 'claude-code',
source: 'user',
startTime: Date.now(),
duration: 1000,
})
).toThrow('Database not initialized');
});
it('should throw when calling getQueryEvents before initialization', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
expect(() => db.getQueryEvents('day')).toThrow('Database not initialized');
});
it('should throw when calling getAggregatedStats before initialization', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
expect(() => db.getAggregatedStats('week')).toThrow('Database not initialized');
});
});
describe('query events', () => {
it('should insert a query event and return an id', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const eventId = db.insertQueryEvent({
sessionId: 'session-1',
agentType: 'claude-code',
source: 'user',
startTime: Date.now(),
duration: 5000,
projectPath: '/test/project',
tabId: 'tab-1',
});
expect(eventId).toBeDefined();
expect(typeof eventId).toBe('string');
expect(mockStatement.run).toHaveBeenCalled();
});
it('should retrieve query events within time range', async () => {
mockStatement.all.mockReturnValue([
{
id: 'event-1',
session_id: 'session-1',
agent_type: 'claude-code',
source: 'user',
start_time: Date.now(),
duration: 5000,
project_path: '/test',
tab_id: 'tab-1',
},
]);
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const events = db.getQueryEvents('day');
expect(events).toHaveLength(1);
expect(events[0].sessionId).toBe('session-1');
expect(events[0].agentType).toBe('claude-code');
});
});
describe('close', () => {
it('should close the database connection', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
db.close();
expect(mockDb.close).toHaveBeenCalled();
expect(db.isReady()).toBe(false);
});
});
});
/**
* Database file creation verification tests
*
* These tests verify that the database file is created at the correct path
* in the user's application data directory on first launch.
*/
describe('Database file creation on first launch', () => {
beforeEach(() => {
vi.clearAllMocks();
lastDbPath = null;
mockDb.pragma.mockReturnValue([{ user_version: 0 }]);
mockDb.prepare.mockReturnValue(mockStatement);
mockFsExistsSync.mockReturnValue(true);
mockFsMkdirSync.mockClear();
});
afterEach(() => {
vi.resetModules();
});
describe('database path computation', () => {
it('should compute database path using electron app.getPath("userData")', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
const dbPath = db.getDbPath();
// Verify the path is in the userData directory
expect(dbPath).toContain(mockUserDataPath);
expect(dbPath).toContain('stats.db');
});
it('should create database file at userData/stats.db path', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// Verify better-sqlite3 was called with the correct path
expect(lastDbPath).toBe(path.join(mockUserDataPath, 'stats.db'));
});
it('should use platform-appropriate userData path', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
// The path should be absolute and contain stats.db
const dbPath = db.getDbPath();
expect(path.isAbsolute(dbPath)).toBe(true);
expect(path.basename(dbPath)).toBe('stats.db');
});
});
describe('directory creation', () => {
it('should create userData directory if it does not exist', async () => {
// Simulate directory not existing
mockFsExistsSync.mockReturnValue(false);
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// Verify mkdirSync was called with recursive option
expect(mockFsMkdirSync).toHaveBeenCalledWith(mockUserDataPath, { recursive: true });
});
it('should not create directory if it already exists', async () => {
// Simulate directory already existing
mockFsExistsSync.mockReturnValue(true);
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// Verify mkdirSync was NOT called
expect(mockFsMkdirSync).not.toHaveBeenCalled();
});
});
describe('database initialization', () => {
it('should open database connection on initialize', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
expect(db.isReady()).toBe(false);
db.initialize();
expect(db.isReady()).toBe(true);
});
it('should only initialize once (idempotent)', async () => {
mockDb.pragma.mockClear();
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const firstCallCount = mockDb.pragma.mock.calls.length;
db.initialize(); // Second call should be a no-op
const secondCallCount = mockDb.pragma.mock.calls.length;
expect(secondCallCount).toBe(firstCallCount);
});
it('should create all three tables on fresh database', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
// Verify prepare was called with CREATE TABLE statements
const prepareCalls = mockDb.prepare.mock.calls.map((call) => call[0]);
// Check for query_events table
expect(
prepareCalls.some((sql: string) => sql.includes('CREATE TABLE IF NOT EXISTS query_events'))
).toBe(true);
// Check for auto_run_sessions table
expect(
prepareCalls.some((sql: string) =>
sql.includes('CREATE TABLE IF NOT EXISTS auto_run_sessions')
)
).toBe(true);
// Check for auto_run_tasks table
expect(
prepareCalls.some((sql: string) =>
sql.includes('CREATE TABLE IF NOT EXISTS auto_run_tasks')
)
).toBe(true);
});
it('should create all required indexes', async () => {
const { StatsDB } = await import('../../../main/stats');
const db = new StatsDB();
db.initialize();
const prepareCalls = mockDb.prepare.mock.calls.map((call) => call[0]);
// Verify all 7 indexes are created
const expectedIndexes = [
'idx_query_start_time',
'idx_query_agent_type',
'idx_query_source',
'idx_query_session',
'idx_auto_session_start',
'idx_task_auto_session',
'idx_task_start',
];
for (const indexName of expectedIndexes) {
expect(prepareCalls.some((sql: string) => sql.includes(indexName))).toBe(true);
}
});
});
describe('singleton pattern', () => {
it('should return same instance from getStatsDB', async () => {
const { getStatsDB, closeStatsDB } = await import('../../../main/stats');
const instance1 = getStatsDB();
const instance2 = getStatsDB();
expect(instance1).toBe(instance2);
// Cleanup
closeStatsDB();
});
it('should initialize database via initializeStatsDB', async () => {
const { initializeStatsDB, getStatsDB, closeStatsDB } = await import('../../../main/stats');
initializeStatsDB();
const db = getStatsDB();
expect(db.isReady()).toBe(true);
// Cleanup
closeStatsDB();
});
it('should close database and reset singleton via closeStatsDB', async () => {
const { initializeStatsDB, getStatsDB, closeStatsDB } = await import('../../../main/stats');
initializeStatsDB();
const dbBefore = getStatsDB();
expect(dbBefore.isReady()).toBe(true);
closeStatsDB();
// After close, a new instance should be returned
const dbAfter = getStatsDB();
expect(dbAfter).not.toBe(dbBefore);
expect(dbAfter.isReady()).toBe(false);
});
});
});
/**
* Auto Run session and task recording tests
*/

View File

@@ -0,0 +1,319 @@
/**
* Tests for shared stats type definitions.
*
* Note: better-sqlite3 is a native module compiled for Electron's Node version.
* Direct testing with the native module in vitest is not possible without
* electron-rebuild for the vitest runtime. These tests use mocked database
* operations to verify the logic without requiring the actual native module.
*
* For full integration testing of the SQLite database, use the Electron test
* environment (e2e tests) where the native module is properly loaded.
*/
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import * as path from 'path';
import * as os from 'os';
// Track Database constructor calls to verify file path
let lastDbPath: string | null = null;
// Store mock references so they can be accessed in tests
const mockStatement = {
run: vi.fn(() => ({ changes: 1 })),
get: vi.fn(() => ({ count: 0, total_duration: 0 })),
all: vi.fn(() => []),
};
const mockDb = {
pragma: vi.fn(() => [{ user_version: 0 }]),
prepare: vi.fn(() => mockStatement),
close: vi.fn(),
// Transaction mock that immediately executes the function
transaction: vi.fn((fn: () => void) => {
return () => fn();
}),
};
// Mock better-sqlite3 as a class
vi.mock('better-sqlite3', () => {
return {
default: class MockDatabase {
constructor(dbPath: string) {
lastDbPath = dbPath;
}
pragma = mockDb.pragma;
prepare = mockDb.prepare;
close = mockDb.close;
transaction = mockDb.transaction;
},
};
});
// Mock electron's app module with trackable userData path
const mockUserDataPath = path.join(os.tmpdir(), 'maestro-test-stats-db');
vi.mock('electron', () => ({
app: {
getPath: vi.fn((name: string) => {
if (name === 'userData') return mockUserDataPath;
return os.tmpdir();
}),
},
}));
// Track fs calls
const mockFsExistsSync = vi.fn(() => true);
const mockFsMkdirSync = vi.fn();
const mockFsCopyFileSync = vi.fn();
const mockFsUnlinkSync = vi.fn();
const mockFsRenameSync = vi.fn();
const mockFsStatSync = vi.fn(() => ({ size: 1024 }));
const mockFsReadFileSync = vi.fn(() => '0'); // Default: old timestamp (triggers vacuum check)
const mockFsWriteFileSync = vi.fn();
// Mock fs
vi.mock('fs', () => ({
existsSync: (...args: unknown[]) => mockFsExistsSync(...args),
mkdirSync: (...args: unknown[]) => mockFsMkdirSync(...args),
copyFileSync: (...args: unknown[]) => mockFsCopyFileSync(...args),
unlinkSync: (...args: unknown[]) => mockFsUnlinkSync(...args),
renameSync: (...args: unknown[]) => mockFsRenameSync(...args),
statSync: (...args: unknown[]) => mockFsStatSync(...args),
readFileSync: (...args: unknown[]) => mockFsReadFileSync(...args),
writeFileSync: (...args: unknown[]) => mockFsWriteFileSync(...args),
}));
// Mock logger
vi.mock('../../../main/utils/logger', () => ({
logger: {
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
},
}));
// Import types only - we'll test the type definitions
import type {
QueryEvent,
AutoRunSession,
AutoRunTask,
SessionLifecycleEvent,
StatsTimeRange,
StatsFilters,
StatsAggregation,
} from '../../../shared/stats-types';
describe('stats-types.ts', () => {
describe('QueryEvent interface', () => {
it('should define proper QueryEvent structure', () => {
const event: QueryEvent = {
id: 'test-id',
sessionId: 'session-1',
agentType: 'claude-code',
source: 'user',
startTime: Date.now(),
duration: 5000,
projectPath: '/test/project',
tabId: 'tab-1',
};
expect(event.id).toBe('test-id');
expect(event.sessionId).toBe('session-1');
expect(event.source).toBe('user');
});
it('should allow optional fields to be undefined', () => {
const event: QueryEvent = {
id: 'test-id',
sessionId: 'session-1',
agentType: 'claude-code',
source: 'auto',
startTime: Date.now(),
duration: 3000,
};
expect(event.projectPath).toBeUndefined();
expect(event.tabId).toBeUndefined();
});
});
describe('AutoRunSession interface', () => {
it('should define proper AutoRunSession structure', () => {
const session: AutoRunSession = {
id: 'auto-run-1',
sessionId: 'session-1',
agentType: 'claude-code',
documentPath: '/docs/task.md',
startTime: Date.now(),
duration: 60000,
tasksTotal: 5,
tasksCompleted: 3,
projectPath: '/test/project',
};
expect(session.id).toBe('auto-run-1');
expect(session.tasksTotal).toBe(5);
expect(session.tasksCompleted).toBe(3);
});
});
describe('AutoRunTask interface', () => {
it('should define proper AutoRunTask structure', () => {
const task: AutoRunTask = {
id: 'task-1',
autoRunSessionId: 'auto-run-1',
sessionId: 'session-1',
agentType: 'claude-code',
taskIndex: 0,
taskContent: 'First task content',
startTime: Date.now(),
duration: 10000,
success: true,
};
expect(task.id).toBe('task-1');
expect(task.taskIndex).toBe(0);
expect(task.success).toBe(true);
});
it('should handle failed tasks', () => {
const task: AutoRunTask = {
id: 'task-2',
autoRunSessionId: 'auto-run-1',
sessionId: 'session-1',
agentType: 'claude-code',
taskIndex: 1,
startTime: Date.now(),
duration: 5000,
success: false,
};
expect(task.success).toBe(false);
expect(task.taskContent).toBeUndefined();
});
});
describe('SessionLifecycleEvent interface', () => {
it('should define proper SessionLifecycleEvent structure for created session', () => {
const event: SessionLifecycleEvent = {
id: 'lifecycle-1',
sessionId: 'session-1',
agentType: 'claude-code',
projectPath: '/test/project',
createdAt: Date.now(),
isRemote: false,
};
expect(event.id).toBe('lifecycle-1');
expect(event.sessionId).toBe('session-1');
expect(event.agentType).toBe('claude-code');
expect(event.closedAt).toBeUndefined();
expect(event.duration).toBeUndefined();
});
it('should define proper SessionLifecycleEvent structure for closed session', () => {
// Use fixed timestamps to avoid race conditions from multiple Date.now() calls
const createdAt = 1700000000000; // Fixed timestamp
const closedAt = 1700003600000; // Exactly 1 hour later
const event: SessionLifecycleEvent = {
id: 'lifecycle-2',
sessionId: 'session-2',
agentType: 'claude-code',
projectPath: '/test/project',
createdAt,
closedAt,
duration: closedAt - createdAt,
isRemote: true,
};
expect(event.closedAt).toBe(closedAt);
expect(event.duration).toBe(3600000);
expect(event.isRemote).toBe(true);
});
it('should allow optional fields to be undefined', () => {
const event: SessionLifecycleEvent = {
id: 'lifecycle-3',
sessionId: 'session-3',
agentType: 'opencode',
createdAt: Date.now(),
};
expect(event.projectPath).toBeUndefined();
expect(event.closedAt).toBeUndefined();
expect(event.duration).toBeUndefined();
expect(event.isRemote).toBeUndefined();
});
});
describe('StatsTimeRange type', () => {
it('should accept valid time ranges', () => {
const ranges: StatsTimeRange[] = ['day', 'week', 'month', 'year', 'all'];
expect(ranges).toHaveLength(5);
expect(ranges).toContain('day');
expect(ranges).toContain('all');
});
});
describe('StatsFilters interface', () => {
it('should allow partial filters', () => {
const filters1: StatsFilters = { agentType: 'claude-code' };
const filters2: StatsFilters = { source: 'user' };
const filters3: StatsFilters = {
agentType: 'opencode',
source: 'auto',
projectPath: '/test',
};
expect(filters1.agentType).toBe('claude-code');
expect(filters2.source).toBe('user');
expect(filters3.projectPath).toBe('/test');
});
});
describe('StatsAggregation interface', () => {
it('should define proper aggregation structure', () => {
const aggregation: StatsAggregation = {
totalQueries: 100,
totalDuration: 500000,
avgDuration: 5000,
byAgent: {
'claude-code': { count: 70, duration: 350000 },
opencode: { count: 30, duration: 150000 },
},
bySource: { user: 60, auto: 40 },
byLocation: { local: 80, remote: 20 },
byDay: [
{ date: '2024-01-01', count: 10, duration: 50000 },
{ date: '2024-01-02', count: 15, duration: 75000 },
],
byHour: [
{ hour: 9, count: 20, duration: 100000 },
{ hour: 10, count: 25, duration: 125000 },
],
// Session lifecycle fields
totalSessions: 15,
sessionsByAgent: {
'claude-code': 10,
opencode: 5,
},
sessionsByDay: [
{ date: '2024-01-01', count: 3 },
{ date: '2024-01-02', count: 5 },
],
avgSessionDuration: 1800000,
};
expect(aggregation.totalQueries).toBe(100);
expect(aggregation.byAgent['claude-code'].count).toBe(70);
expect(aggregation.bySource.user).toBe(60);
expect(aggregation.byDay).toHaveLength(2);
// Session lifecycle assertions
expect(aggregation.totalSessions).toBe(15);
expect(aggregation.sessionsByAgent['claude-code']).toBe(10);
expect(aggregation.sessionsByDay).toHaveLength(2);
expect(aggregation.avgSessionDuration).toBe(1800000);
});
});
});

View File

@@ -0,0 +1,416 @@
/**
* Tests for ClaudeSessionStorage
*
* Verifies:
* - Session origin registration and retrieval
* - Session naming and starring
* - Context usage tracking
* - Origin info attachment to sessions
*/
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { ClaudeSessionStorage } from '../../../main/storage/claude-session-storage';
import type { SshRemoteConfig } from '../../../shared/types';
import type Store from 'electron-store';
import type { ClaudeSessionOriginsData } from '../../../main/storage/claude-session-storage';
// Mock electron-store
const mockStoreData: Record<string, unknown> = {};
vi.mock('electron-store', () => {
return {
default: vi.fn().mockImplementation(() => ({
get: vi.fn((key: string, defaultValue?: unknown) => {
return mockStoreData[key] ?? defaultValue;
}),
set: vi.fn((key: string, value: unknown) => {
mockStoreData[key] = value;
}),
store: mockStoreData,
})),
};
});
// Mock logger
vi.mock('../../../main/utils/logger', () => ({
logger: {
info: vi.fn(),
debug: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
},
}));
// Mock fs/promises
vi.mock('fs/promises', () => ({
default: {
access: vi.fn(),
readdir: vi.fn(),
stat: vi.fn(),
readFile: vi.fn(),
writeFile: vi.fn(),
},
}));
// Mock remote-fs utilities
vi.mock('../../../main/utils/remote-fs', () => ({
readDirRemote: vi.fn(),
readFileRemote: vi.fn(),
statRemote: vi.fn(),
}));
// Mock statsCache
vi.mock('../../../main/utils/statsCache', () => ({
encodeClaudeProjectPath: vi.fn((projectPath: string) => {
// Simple encoding for tests - replace / with -
return projectPath.replace(/\//g, '-').replace(/^-/, '');
}),
}));
// Mock pricing
vi.mock('../../../main/utils/pricing', () => ({
calculateClaudeCost: vi.fn(() => 0.05),
}));
describe('ClaudeSessionStorage', () => {
let storage: ClaudeSessionStorage;
let mockStore: {
get: ReturnType<typeof vi.fn>;
set: ReturnType<typeof vi.fn>;
store: Record<string, unknown>;
};
beforeEach(() => {
vi.clearAllMocks();
// Reset mock store data
Object.keys(mockStoreData).forEach((key) => delete mockStoreData[key]);
mockStoreData['origins'] = {};
mockStore = {
get: vi.fn((key: string, defaultValue?: unknown) => {
return mockStoreData[key] ?? defaultValue;
}),
set: vi.fn((key: string, value: unknown) => {
mockStoreData[key] = value;
}),
store: mockStoreData,
};
// Create storage with mock store
storage = new ClaudeSessionStorage(mockStore as unknown as Store<ClaudeSessionOriginsData>);
});
describe('Origin Management', () => {
describe('registerSessionOrigin', () => {
it('should register a user session origin', () => {
storage.registerSessionOrigin('/project/path', 'session-123', 'user');
const origins = storage.getSessionOrigins('/project/path');
expect(origins['session-123']).toEqual({ origin: 'user' });
});
it('should register an auto session origin', () => {
storage.registerSessionOrigin('/project/path', 'session-456', 'auto');
const origins = storage.getSessionOrigins('/project/path');
expect(origins['session-456']).toEqual({ origin: 'auto' });
});
it('should register origin with session name', () => {
storage.registerSessionOrigin('/project/path', 'session-789', 'user', 'My Session');
const origins = storage.getSessionOrigins('/project/path');
expect(origins['session-789']).toEqual({
origin: 'user',
sessionName: 'My Session',
});
});
it('should handle multiple sessions for same project', () => {
storage.registerSessionOrigin('/project/path', 'session-1', 'user');
storage.registerSessionOrigin('/project/path', 'session-2', 'auto');
storage.registerSessionOrigin('/project/path', 'session-3', 'user', 'Named');
const origins = storage.getSessionOrigins('/project/path');
expect(Object.keys(origins)).toHaveLength(3);
});
it('should handle multiple projects', () => {
storage.registerSessionOrigin('/project/a', 'session-a', 'user');
storage.registerSessionOrigin('/project/b', 'session-b', 'auto');
expect(storage.getSessionOrigins('/project/a')['session-a']).toBeDefined();
expect(storage.getSessionOrigins('/project/b')['session-b']).toBeDefined();
expect(storage.getSessionOrigins('/project/a')['session-b']).toBeUndefined();
});
it('should persist to store', () => {
storage.registerSessionOrigin('/project/path', 'session-123', 'user');
expect(mockStore.set).toHaveBeenCalledWith(
'origins',
expect.objectContaining({
'/project/path': expect.objectContaining({
'session-123': 'user',
}),
})
);
});
});
describe('updateSessionName', () => {
it('should update name for existing session with string origin', () => {
storage.registerSessionOrigin('/project/path', 'session-123', 'user');
storage.updateSessionName('/project/path', 'session-123', 'New Name');
const origins = storage.getSessionOrigins('/project/path');
expect(origins['session-123']).toEqual({
origin: 'user',
sessionName: 'New Name',
});
});
it('should update name for existing session with object origin', () => {
storage.registerSessionOrigin('/project/path', 'session-123', 'user', 'Old Name');
storage.updateSessionName('/project/path', 'session-123', 'New Name');
const origins = storage.getSessionOrigins('/project/path');
expect(origins['session-123'].sessionName).toBe('New Name');
});
it('should create origin entry if session not registered', () => {
storage.updateSessionName('/project/path', 'new-session', 'Session Name');
const origins = storage.getSessionOrigins('/project/path');
expect(origins['new-session']).toEqual({
origin: 'user',
sessionName: 'Session Name',
});
});
it('should preserve existing starred status', () => {
storage.registerSessionOrigin('/project/path', 'session-123', 'user');
storage.updateSessionStarred('/project/path', 'session-123', true);
storage.updateSessionName('/project/path', 'session-123', 'Named');
const origins = storage.getSessionOrigins('/project/path');
expect(origins['session-123'].starred).toBe(true);
expect(origins['session-123'].sessionName).toBe('Named');
});
});
describe('updateSessionStarred', () => {
it('should star a session', () => {
storage.registerSessionOrigin('/project/path', 'session-123', 'user');
storage.updateSessionStarred('/project/path', 'session-123', true);
const origins = storage.getSessionOrigins('/project/path');
expect(origins['session-123'].starred).toBe(true);
});
it('should unstar a session', () => {
storage.registerSessionOrigin('/project/path', 'session-123', 'user');
storage.updateSessionStarred('/project/path', 'session-123', true);
storage.updateSessionStarred('/project/path', 'session-123', false);
const origins = storage.getSessionOrigins('/project/path');
expect(origins['session-123'].starred).toBe(false);
});
it('should create origin entry if session not registered', () => {
storage.updateSessionStarred('/project/path', 'new-session', true);
const origins = storage.getSessionOrigins('/project/path');
expect(origins['new-session']).toEqual({
origin: 'user',
starred: true,
});
});
it('should preserve existing session name', () => {
storage.registerSessionOrigin('/project/path', 'session-123', 'user', 'My Session');
storage.updateSessionStarred('/project/path', 'session-123', true);
const origins = storage.getSessionOrigins('/project/path');
expect(origins['session-123'].sessionName).toBe('My Session');
expect(origins['session-123'].starred).toBe(true);
});
});
describe('updateSessionContextUsage', () => {
it('should store context usage percentage', () => {
storage.registerSessionOrigin('/project/path', 'session-123', 'user');
storage.updateSessionContextUsage('/project/path', 'session-123', 75);
const origins = storage.getSessionOrigins('/project/path');
expect(origins['session-123'].contextUsage).toBe(75);
});
it('should create origin entry if session not registered', () => {
storage.updateSessionContextUsage('/project/path', 'new-session', 50);
const origins = storage.getSessionOrigins('/project/path');
expect(origins['new-session']).toEqual({
origin: 'user',
contextUsage: 50,
});
});
it('should preserve existing origin data', () => {
storage.registerSessionOrigin('/project/path', 'session-123', 'auto', 'Named');
storage.updateSessionStarred('/project/path', 'session-123', true);
storage.updateSessionContextUsage('/project/path', 'session-123', 80);
const origins = storage.getSessionOrigins('/project/path');
expect(origins['session-123']).toEqual({
origin: 'auto',
sessionName: 'Named',
starred: true,
contextUsage: 80,
});
});
it('should update context usage on subsequent calls', () => {
storage.registerSessionOrigin('/project/path', 'session-123', 'user');
storage.updateSessionContextUsage('/project/path', 'session-123', 25);
storage.updateSessionContextUsage('/project/path', 'session-123', 50);
storage.updateSessionContextUsage('/project/path', 'session-123', 75);
const origins = storage.getSessionOrigins('/project/path');
expect(origins['session-123'].contextUsage).toBe(75);
});
});
describe('getSessionOrigins', () => {
it('should return empty object for project with no sessions', () => {
const origins = storage.getSessionOrigins('/nonexistent/project');
expect(origins).toEqual({});
});
it('should normalize string origins to SessionOriginInfo format', () => {
// Simulate legacy string-only origin stored directly
mockStoreData['origins'] = {
'/project/path': {
'session-123': 'user',
},
};
const origins = storage.getSessionOrigins('/project/path');
expect(origins['session-123']).toEqual({ origin: 'user' });
});
it('should return full SessionOriginInfo for object origins', () => {
storage.registerSessionOrigin('/project/path', 'session-123', 'user', 'Named');
storage.updateSessionStarred('/project/path', 'session-123', true);
storage.updateSessionContextUsage('/project/path', 'session-123', 60);
const origins = storage.getSessionOrigins('/project/path');
expect(origins['session-123']).toEqual({
origin: 'user',
sessionName: 'Named',
starred: true,
contextUsage: 60,
});
});
});
});
describe('Session Path', () => {
describe('getSessionPath', () => {
it('should return correct local path', () => {
const sessionPath = storage.getSessionPath('/project/path', 'session-123');
expect(sessionPath).toBeDefined();
expect(sessionPath).toContain('session-123.jsonl');
expect(sessionPath).toContain('.claude');
expect(sessionPath).toContain('projects');
});
it('should return remote path when sshConfig provided', () => {
const sshConfig: SshRemoteConfig = {
id: 'test-remote',
name: 'Test Remote',
host: 'remote.example.com',
port: 22,
username: 'testuser',
privateKeyPath: '~/.ssh/id_rsa',
enabled: true,
useSshConfig: false,
};
const sessionPath = storage.getSessionPath('/project/path', 'session-123', sshConfig);
expect(sessionPath).toBeDefined();
expect(sessionPath).toContain('session-123.jsonl');
expect(sessionPath).toContain('~/.claude/projects');
});
});
});
describe('Agent ID', () => {
it('should have correct agent ID', () => {
expect(storage.agentId).toBe('claude-code');
});
});
describe('Edge Cases', () => {
it('should handle special characters in project path', () => {
storage.registerSessionOrigin('/path/with spaces/and-dashes', 'session-1', 'user');
const origins = storage.getSessionOrigins('/path/with spaces/and-dashes');
expect(origins['session-1']).toBeDefined();
});
it('should handle special characters in session ID', () => {
storage.registerSessionOrigin('/project', 'session-with-dashes-123', 'user');
storage.registerSessionOrigin('/project', 'session_with_underscores', 'auto');
const origins = storage.getSessionOrigins('/project');
expect(origins['session-with-dashes-123']).toBeDefined();
expect(origins['session_with_underscores']).toBeDefined();
});
it('should handle empty session name', () => {
storage.registerSessionOrigin('/project', 'session-123', 'user', '');
const origins = storage.getSessionOrigins('/project');
// Empty string is falsy, so sessionName is not stored when empty
expect(origins['session-123']).toEqual({ origin: 'user' });
});
it('should handle zero context usage', () => {
storage.updateSessionContextUsage('/project', 'session-123', 0);
const origins = storage.getSessionOrigins('/project');
expect(origins['session-123'].contextUsage).toBe(0);
});
it('should handle 100% context usage', () => {
storage.updateSessionContextUsage('/project', 'session-123', 100);
const origins = storage.getSessionOrigins('/project');
expect(origins['session-123'].contextUsage).toBe(100);
});
});
describe('Storage Persistence', () => {
it('should call store.set on every origin update', () => {
storage.registerSessionOrigin('/project', 'session-1', 'user');
expect(mockStore.set).toHaveBeenCalledTimes(1);
storage.updateSessionName('/project', 'session-1', 'Name');
expect(mockStore.set).toHaveBeenCalledTimes(2);
storage.updateSessionStarred('/project', 'session-1', true);
expect(mockStore.set).toHaveBeenCalledTimes(3);
storage.updateSessionContextUsage('/project', 'session-1', 50);
expect(mockStore.set).toHaveBeenCalledTimes(4);
});
it('should always call store.set with origins key', () => {
storage.registerSessionOrigin('/project', 'session-1', 'user');
expect(mockStore.set).toHaveBeenCalledWith('origins', expect.any(Object));
});
});
});

View File

@@ -10,7 +10,7 @@ import {
applyAgentConfigOverrides,
getContextWindowValue,
} from '../../../main/utils/agent-args';
import type { AgentConfig } from '../../../main/agent-detector';
import type { AgentConfig } from '../../../main/agents';
/**
* Helper to create a minimal AgentConfig for testing.

View File

@@ -0,0 +1,476 @@
/**
* Tests for LiveSessionManager
*
* Verifies:
* - Live session tracking (setLive, setOffline, isLive)
* - AutoRun state management
* - Broadcast callback integration
* - Memory leak prevention (cleanup on offline)
*/
import { describe, it, expect, vi, beforeEach } from 'vitest';
import {
LiveSessionManager,
LiveSessionBroadcastCallbacks,
} from '../../../../main/web-server/managers/LiveSessionManager';
// Mock the logger
vi.mock('../../../../main/utils/logger', () => ({
logger: {
info: vi.fn(),
debug: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
},
}));
describe('LiveSessionManager', () => {
let manager: LiveSessionManager;
let mockBroadcastCallbacks: LiveSessionBroadcastCallbacks;
beforeEach(() => {
vi.clearAllMocks();
manager = new LiveSessionManager();
mockBroadcastCallbacks = {
broadcastSessionLive: vi.fn(),
broadcastSessionOffline: vi.fn(),
broadcastAutoRunState: vi.fn(),
};
});
describe('Live Session Tracking', () => {
describe('setSessionLive', () => {
it('should mark a session as live', () => {
manager.setSessionLive('session-123');
expect(manager.isSessionLive('session-123')).toBe(true);
});
it('should store agent session ID when provided', () => {
manager.setSessionLive('session-123', 'agent-session-abc');
const info = manager.getLiveSessionInfo('session-123');
expect(info?.agentSessionId).toBe('agent-session-abc');
});
it('should record enabledAt timestamp', () => {
const before = Date.now();
manager.setSessionLive('session-123');
const after = Date.now();
const info = manager.getLiveSessionInfo('session-123');
expect(info?.enabledAt).toBeGreaterThanOrEqual(before);
expect(info?.enabledAt).toBeLessThanOrEqual(after);
});
it('should broadcast session live when callbacks set', () => {
manager.setBroadcastCallbacks(mockBroadcastCallbacks);
manager.setSessionLive('session-123', 'agent-session-abc');
expect(mockBroadcastCallbacks.broadcastSessionLive).toHaveBeenCalledWith(
'session-123',
'agent-session-abc'
);
});
it('should not broadcast when callbacks not set', () => {
// No error should occur when broadcasting without callbacks
manager.setSessionLive('session-123');
expect(manager.isSessionLive('session-123')).toBe(true);
});
it('should update existing session info when called again', () => {
manager.setSessionLive('session-123', 'agent-1');
const firstInfo = manager.getLiveSessionInfo('session-123');
manager.setSessionLive('session-123', 'agent-2');
const secondInfo = manager.getLiveSessionInfo('session-123');
expect(secondInfo?.agentSessionId).toBe('agent-2');
expect(secondInfo?.enabledAt).toBeGreaterThanOrEqual(firstInfo!.enabledAt);
});
});
describe('setSessionOffline', () => {
it('should mark a session as offline', () => {
manager.setSessionLive('session-123');
expect(manager.isSessionLive('session-123')).toBe(true);
manager.setSessionOffline('session-123');
expect(manager.isSessionLive('session-123')).toBe(false);
});
it('should broadcast session offline when callbacks set', () => {
manager.setBroadcastCallbacks(mockBroadcastCallbacks);
manager.setSessionLive('session-123');
manager.setSessionOffline('session-123');
expect(mockBroadcastCallbacks.broadcastSessionOffline).toHaveBeenCalledWith('session-123');
});
it('should not broadcast if session was not live', () => {
manager.setBroadcastCallbacks(mockBroadcastCallbacks);
manager.setSessionOffline('never-existed');
expect(mockBroadcastCallbacks.broadcastSessionOffline).not.toHaveBeenCalled();
});
it('should clean up associated AutoRun state (memory leak prevention)', () => {
manager.setSessionLive('session-123');
manager.setAutoRunState('session-123', {
isRunning: true,
totalTasks: 10,
completedTasks: 5,
currentTask: 'Task 5',
});
expect(manager.getAutoRunState('session-123')).toBeDefined();
manager.setSessionOffline('session-123');
expect(manager.getAutoRunState('session-123')).toBeUndefined();
});
});
describe('isSessionLive', () => {
it('should return false for non-existent session', () => {
expect(manager.isSessionLive('non-existent')).toBe(false);
});
it('should return true for live session', () => {
manager.setSessionLive('session-123');
expect(manager.isSessionLive('session-123')).toBe(true);
});
it('should return false after session goes offline', () => {
manager.setSessionLive('session-123');
manager.setSessionOffline('session-123');
expect(manager.isSessionLive('session-123')).toBe(false);
});
});
describe('getLiveSessionInfo', () => {
it('should return undefined for non-existent session', () => {
expect(manager.getLiveSessionInfo('non-existent')).toBeUndefined();
});
it('should return complete session info', () => {
manager.setSessionLive('session-123', 'agent-session-abc');
const info = manager.getLiveSessionInfo('session-123');
expect(info).toEqual({
sessionId: 'session-123',
agentSessionId: 'agent-session-abc',
enabledAt: expect.any(Number),
});
});
});
describe('getLiveSessions', () => {
it('should return empty array when no sessions', () => {
expect(manager.getLiveSessions()).toEqual([]);
});
it('should return all live sessions', () => {
manager.setSessionLive('session-1');
manager.setSessionLive('session-2');
manager.setSessionLive('session-3');
const sessions = manager.getLiveSessions();
expect(sessions).toHaveLength(3);
expect(sessions.map((s) => s.sessionId)).toContain('session-1');
expect(sessions.map((s) => s.sessionId)).toContain('session-2');
expect(sessions.map((s) => s.sessionId)).toContain('session-3');
});
it('should not include offline sessions', () => {
manager.setSessionLive('session-1');
manager.setSessionLive('session-2');
manager.setSessionOffline('session-1');
const sessions = manager.getLiveSessions();
expect(sessions).toHaveLength(1);
expect(sessions[0].sessionId).toBe('session-2');
});
});
describe('getLiveSessionIds', () => {
it('should return iterable of session IDs', () => {
manager.setSessionLive('session-1');
manager.setSessionLive('session-2');
const ids = Array.from(manager.getLiveSessionIds());
expect(ids).toHaveLength(2);
expect(ids).toContain('session-1');
expect(ids).toContain('session-2');
});
});
describe('getLiveSessionCount', () => {
it('should return 0 when no sessions', () => {
expect(manager.getLiveSessionCount()).toBe(0);
});
it('should return correct count', () => {
manager.setSessionLive('session-1');
manager.setSessionLive('session-2');
manager.setSessionLive('session-3');
expect(manager.getLiveSessionCount()).toBe(3);
manager.setSessionOffline('session-2');
expect(manager.getLiveSessionCount()).toBe(2);
});
});
});
describe('AutoRun State Management', () => {
describe('setAutoRunState', () => {
it('should store running AutoRun state', () => {
const state = {
isRunning: true,
totalTasks: 10,
completedTasks: 3,
currentTask: 'Task 3',
};
manager.setAutoRunState('session-123', state);
expect(manager.getAutoRunState('session-123')).toEqual(state);
});
it('should remove state when isRunning is false', () => {
manager.setAutoRunState('session-123', {
isRunning: true,
totalTasks: 10,
completedTasks: 3,
currentTask: 'Task 3',
});
manager.setAutoRunState('session-123', {
isRunning: false,
totalTasks: 10,
completedTasks: 10,
currentTask: 'Complete',
});
expect(manager.getAutoRunState('session-123')).toBeUndefined();
});
it('should remove state when null is passed', () => {
manager.setAutoRunState('session-123', {
isRunning: true,
totalTasks: 10,
completedTasks: 3,
currentTask: 'Task 3',
});
manager.setAutoRunState('session-123', null);
expect(manager.getAutoRunState('session-123')).toBeUndefined();
});
it('should broadcast AutoRun state when callbacks set', () => {
manager.setBroadcastCallbacks(mockBroadcastCallbacks);
const state = {
isRunning: true,
totalTasks: 10,
completedTasks: 3,
currentTask: 'Task 3',
};
manager.setAutoRunState('session-123', state);
expect(mockBroadcastCallbacks.broadcastAutoRunState).toHaveBeenCalledWith(
'session-123',
state
);
});
it('should broadcast null state when clearing', () => {
manager.setBroadcastCallbacks(mockBroadcastCallbacks);
manager.setAutoRunState('session-123', {
isRunning: true,
totalTasks: 10,
completedTasks: 3,
currentTask: 'Task 3',
});
manager.setAutoRunState('session-123', null);
expect(mockBroadcastCallbacks.broadcastAutoRunState).toHaveBeenLastCalledWith(
'session-123',
null
);
});
});
describe('getAutoRunState', () => {
it('should return undefined for non-existent state', () => {
expect(manager.getAutoRunState('non-existent')).toBeUndefined();
});
it('should return stored state', () => {
const state = {
isRunning: true,
totalTasks: 5,
completedTasks: 2,
currentTask: 'Task 2',
};
manager.setAutoRunState('session-123', state);
expect(manager.getAutoRunState('session-123')).toEqual(state);
});
});
describe('getAutoRunStates', () => {
it('should return empty map when no states', () => {
const states = manager.getAutoRunStates();
expect(states.size).toBe(0);
});
it('should return all stored states', () => {
manager.setAutoRunState('session-1', {
isRunning: true,
totalTasks: 5,
completedTasks: 1,
currentTask: 'Task 1',
});
manager.setAutoRunState('session-2', {
isRunning: true,
totalTasks: 10,
completedTasks: 5,
currentTask: 'Task 5',
});
const states = manager.getAutoRunStates();
expect(states.size).toBe(2);
expect(states.get('session-1')?.totalTasks).toBe(5);
expect(states.get('session-2')?.totalTasks).toBe(10);
});
});
});
describe('clearAll', () => {
it('should mark all live sessions as offline', () => {
manager.setBroadcastCallbacks(mockBroadcastCallbacks);
manager.setSessionLive('session-1');
manager.setSessionLive('session-2');
manager.setSessionLive('session-3');
manager.clearAll();
expect(manager.getLiveSessionCount()).toBe(0);
expect(mockBroadcastCallbacks.broadcastSessionOffline).toHaveBeenCalledTimes(3);
});
it('should clear all AutoRun states', () => {
manager.setSessionLive('session-1');
manager.setAutoRunState('session-1', {
isRunning: true,
totalTasks: 5,
completedTasks: 1,
currentTask: 'Task 1',
});
manager.setSessionLive('session-2');
manager.setAutoRunState('session-2', {
isRunning: true,
totalTasks: 10,
completedTasks: 5,
currentTask: 'Task 5',
});
manager.clearAll();
expect(manager.getAutoRunStates().size).toBe(0);
});
it('should handle being called when already empty', () => {
// Should not throw
manager.clearAll();
expect(manager.getLiveSessionCount()).toBe(0);
});
});
describe('Integration Scenarios', () => {
it('should handle full session lifecycle', () => {
manager.setBroadcastCallbacks(mockBroadcastCallbacks);
// Session comes online
manager.setSessionLive('session-123', 'agent-abc');
expect(manager.isSessionLive('session-123')).toBe(true);
expect(mockBroadcastCallbacks.broadcastSessionLive).toHaveBeenCalled();
// AutoRun starts
manager.setAutoRunState('session-123', {
isRunning: true,
totalTasks: 5,
completedTasks: 0,
currentTask: 'Task 1',
});
expect(mockBroadcastCallbacks.broadcastAutoRunState).toHaveBeenCalled();
// AutoRun progresses
manager.setAutoRunState('session-123', {
isRunning: true,
totalTasks: 5,
completedTasks: 3,
currentTask: 'Task 4',
});
// AutoRun completes
manager.setAutoRunState('session-123', {
isRunning: false,
totalTasks: 5,
completedTasks: 5,
currentTask: 'Complete',
});
expect(manager.getAutoRunState('session-123')).toBeUndefined();
// Session goes offline
manager.setSessionOffline('session-123');
expect(manager.isSessionLive('session-123')).toBe(false);
expect(mockBroadcastCallbacks.broadcastSessionOffline).toHaveBeenCalled();
});
it('should handle multiple concurrent sessions', () => {
manager.setSessionLive('session-1', 'agent-1');
manager.setSessionLive('session-2', 'agent-2');
manager.setSessionLive('session-3', 'agent-3');
manager.setAutoRunState('session-1', {
isRunning: true,
totalTasks: 3,
completedTasks: 1,
currentTask: 'Task 1',
});
manager.setAutoRunState('session-3', {
isRunning: true,
totalTasks: 5,
completedTasks: 2,
currentTask: 'Task 2',
});
expect(manager.getLiveSessionCount()).toBe(3);
expect(manager.getAutoRunStates().size).toBe(2);
// Session 2 goes offline (no AutoRun state to clean)
manager.setSessionOffline('session-2');
expect(manager.getLiveSessionCount()).toBe(2);
expect(manager.getAutoRunStates().size).toBe(2);
// Session 1 goes offline (has AutoRun state)
manager.setSessionOffline('session-1');
expect(manager.getLiveSessionCount()).toBe(1);
expect(manager.getAutoRunStates().size).toBe(1);
expect(manager.getAutoRunState('session-1')).toBeUndefined();
expect(manager.getAutoRunState('session-3')).toBeDefined();
});
});
});

View File

@@ -460,7 +460,7 @@ describe('HistoryDetailModal', () => {
usageStats: {
inputTokens: 5000,
outputTokens: 1000,
cacheReadInputTokens: 2000, // Included in calculation (occupies context)
cacheReadInputTokens: 2000, // Excluded from calculation (cumulative)
cacheCreationInputTokens: 5000,
contextWindow: 100000,
totalCostUsd: 0.1,
@@ -470,8 +470,8 @@ describe('HistoryDetailModal', () => {
/>
);
// Context = (inputTokens + cacheCreationInputTokens + cacheReadInputTokens) / contextWindow
// (5000 + 5000 + 2000) / 100000 = 12%
// Context = (inputTokens + cacheReadInputTokens + cacheCreationInputTokens) / contextWindow
// (5000 + 2000 + 5000) / 100000 = 12%
expect(screen.getByText('12%')).toBeInTheDocument();
});

View File

@@ -336,6 +336,7 @@ describe('MainPanel', () => {
slashCommandOpen: false,
slashCommands: [],
selectedSlashCommandIndex: 0,
previewFile: null,
markdownEditMode: false,
shortcuts: defaultShortcuts,
rightPanelOpen: true,
@@ -634,8 +635,100 @@ describe('MainPanel', () => {
});
});
// Note: Legacy previewFile tests removed - file preview is now handled via the tab system
// File tabs have their own content rendering and closing behavior
describe('File Preview mode', () => {
it('should render FilePreview when previewFile is set', () => {
const previewFile = { name: 'test.ts', content: 'test content', path: '/test/test.ts' };
render(<MainPanel {...defaultProps} previewFile={previewFile} />);
expect(screen.getByTestId('file-preview')).toBeInTheDocument();
expect(screen.getByText('File Preview: test.ts')).toBeInTheDocument();
});
it('should hide TabBar when file preview is open', () => {
const previewFile = { name: 'test.ts', content: 'test content', path: '/test/test.ts' };
render(<MainPanel {...defaultProps} previewFile={previewFile} />);
expect(screen.queryByTestId('tab-bar')).not.toBeInTheDocument();
});
it('should call setPreviewFile(null) and setActiveFocus when closing preview', () => {
const setPreviewFile = vi.fn();
const setActiveFocus = vi.fn();
const previewFile = { name: 'test.ts', content: 'test content', path: '/test/test.ts' };
render(
<MainPanel
{...defaultProps}
previewFile={previewFile}
setPreviewFile={setPreviewFile}
setActiveFocus={setActiveFocus}
/>
);
fireEvent.click(screen.getByTestId('file-preview-close'));
expect(setPreviewFile).toHaveBeenCalledWith(null);
expect(setActiveFocus).toHaveBeenCalledWith('right');
});
it('should focus file tree container when closing preview (setTimeout callback)', async () => {
vi.useFakeTimers();
const setPreviewFile = vi.fn();
const setActiveFocus = vi.fn();
const previewFile = { name: 'test.ts', content: 'test content', path: '/test/test.ts' };
const fileTreeContainerRef = { current: { focus: vi.fn() } };
render(
<MainPanel
{...defaultProps}
previewFile={previewFile}
setPreviewFile={setPreviewFile}
setActiveFocus={setActiveFocus}
fileTreeContainerRef={fileTreeContainerRef as any}
fileTreeFilterOpen={false}
/>
);
fireEvent.click(screen.getByTestId('file-preview-close'));
// Run the setTimeout callback
await act(async () => {
vi.advanceTimersByTime(1);
});
expect(fileTreeContainerRef.current.focus).toHaveBeenCalled();
vi.useRealTimers();
});
it('should focus file tree filter input when closing preview with filter open', async () => {
vi.useFakeTimers();
const setPreviewFile = vi.fn();
const setActiveFocus = vi.fn();
const previewFile = { name: 'test.ts', content: 'test content', path: '/test/test.ts' };
const fileTreeFilterInputRef = { current: { focus: vi.fn() } };
render(
<MainPanel
{...defaultProps}
previewFile={previewFile}
setPreviewFile={setPreviewFile}
setActiveFocus={setActiveFocus}
fileTreeFilterInputRef={fileTreeFilterInputRef as any}
fileTreeFilterOpen={true}
/>
);
fireEvent.click(screen.getByTestId('file-preview-close'));
// Run the setTimeout callback
await act(async () => {
vi.advanceTimersByTime(1);
});
expect(fileTreeFilterInputRef.current.focus).toHaveBeenCalled();
vi.useRealTimers();
});
});
describe('Tab Bar', () => {
it('should render TabBar in AI mode with tabs', () => {
@@ -1862,7 +1955,7 @@ describe('MainPanel', () => {
<MainPanel {...defaultProps} activeSession={session} getContextColor={getContextColor} />
);
// Context usage should be (50000 + 25000) / 200000 * 100 = 37.5% -> 38%
// Context usage: (50000 + 25000 + 0) / 200000 * 100 = 38% (input + cacheRead + cacheCreation)
expect(getContextColor).toHaveBeenCalledWith(38, theme);
});
});
@@ -2143,41 +2236,6 @@ describe('MainPanel', () => {
expect(writeText).toHaveBeenCalledWith('https://github.com/user/repo.git');
});
it('should open remote URL in system browser when clicked', async () => {
setMockGitStatus('session-1', {
fileCount: 0,
branch: 'main',
remote: 'https://github.com/user/repo.git',
ahead: 0,
behind: 0,
totalAdditions: 0,
totalDeletions: 0,
modifiedCount: 0,
fileChanges: [],
lastUpdated: Date.now(),
});
const session = createSession({ isGitRepo: true });
render(<MainPanel {...defaultProps} activeSession={session} />);
await waitFor(() => {
expect(screen.getByText(/main|GIT/)).toBeInTheDocument();
});
const gitBadge = screen.getByText(/main|GIT/);
fireEvent.mouseEnter(gitBadge.parentElement!);
await waitFor(() => {
expect(screen.getByText('github.com/user/repo')).toBeInTheDocument();
});
// Click the remote URL link
const remoteLink = screen.getByText('github.com/user/repo');
fireEvent.click(remoteLink);
expect(window.maestro.shell.openExternal).toHaveBeenCalledWith('https://github.com/user/repo');
});
});
describe('Edge cases', () => {
@@ -2315,9 +2373,10 @@ describe('MainPanel', () => {
expect(screen.queryByText('Context Window')).not.toBeInTheDocument();
});
it('should cap context usage at 100%', () => {
const getContextColor = vi.fn().mockReturnValue('#ef4444');
it('should use preserved session.contextUsage when accumulated values exceed window', () => {
const getContextColor = vi.fn().mockReturnValue('#22c55e');
const session = createSession({
contextUsage: 45, // Preserved valid percentage from last non-accumulated update
aiTabs: [
{
id: 'tab-1',
@@ -2328,8 +2387,8 @@ describe('MainPanel', () => {
usageStats: {
inputTokens: 150000,
outputTokens: 100000,
cacheReadInputTokens: 100000, // Excluded from calculation (cumulative)
cacheCreationInputTokens: 100000, // Included in calculation
cacheReadInputTokens: 100000, // Accumulated from multi-tool turn
cacheCreationInputTokens: 100000, // Accumulated from multi-tool turn
totalCostUsd: 0.05,
contextWindow: 200000,
},
@@ -2342,8 +2401,9 @@ describe('MainPanel', () => {
<MainPanel {...defaultProps} activeSession={session} getContextColor={getContextColor} />
);
// Context usage: (150000 + 100000) / 200000 = 125% -> capped at 100%
expect(getContextColor).toHaveBeenCalledWith(100, theme);
// raw = 150000 + 100000 + 100000 = 350000 > 200000 (accumulated)
// Falls back to session.contextUsage = 45%
expect(getContextColor).toHaveBeenCalledWith(45, theme);
});
});
@@ -2930,8 +2990,32 @@ describe('MainPanel', () => {
expect(screen.getByText(longMessage)).toBeInTheDocument();
});
// Note: Legacy test for previewFile removed - file preview is now handled via the tab system
// The error banner still displays above file tabs when activeFileTabId is set
it('should still display error banner when previewFile is open', () => {
// The error banner appears above file preview in the layout hierarchy
// This ensures users see critical errors even while previewing files
const previewFile = { name: 'test.ts', content: 'test content', path: '/test/test.ts' };
const session = createSession({
inputMode: 'ai',
aiTabs: [
{
id: 'tab-1',
name: 'Tab 1',
isUnread: false,
createdAt: Date.now(),
agentError: createAgentError(),
},
],
activeTabId: 'tab-1',
});
render(<MainPanel {...defaultProps} activeSession={session} previewFile={previewFile} />);
// Both error banner and file preview should be visible
expect(
screen.getByText('Authentication token has expired. Please re-authenticate.')
).toBeInTheDocument();
expect(screen.getByTestId('file-preview')).toBeInTheDocument();
});
it('should handle error with empty message gracefully', () => {
const session = createSession({
@@ -2993,7 +3077,7 @@ describe('MainPanel', () => {
previousUIState: {
readOnlyMode: false,
saveToHistory: true,
showThinking: 'off',
showThinking: false,
},
});
@@ -3022,7 +3106,7 @@ describe('MainPanel', () => {
previousUIState: {
readOnlyMode: false,
saveToHistory: true,
showThinking: 'off',
showThinking: false,
},
});
@@ -3044,7 +3128,7 @@ describe('MainPanel', () => {
previousUIState: {
readOnlyMode: false,
saveToHistory: true,
showThinking: 'off',
showThinking: false,
},
});
@@ -3063,7 +3147,7 @@ describe('MainPanel', () => {
previousUIState: {
readOnlyMode: false,
saveToHistory: true,
showThinking: 'off',
showThinking: false,
},
});
@@ -3088,7 +3172,7 @@ describe('MainPanel', () => {
previousUIState: {
readOnlyMode: false,
saveToHistory: true,
showThinking: 'off',
showThinking: false,
},
},
{
@@ -3102,151 +3186,4 @@ describe('MainPanel', () => {
expect(screen.getByTestId('wizard-conversation-view')).toBeInTheDocument();
});
});
describe('File Tab Loading State (SSH Remote Files)', () => {
// Helper to create a file preview tab
const createFileTab = (
overrides: Partial<import('../../../renderer/types').FilePreviewTab> = {}
): import('../../../renderer/types').FilePreviewTab => ({
id: 'file-tab-1',
path: '/remote/path/file.ts',
name: 'file',
extension: '.ts',
content: '',
scrollTop: 0,
searchQuery: '',
editMode: false,
editContent: undefined,
createdAt: Date.now(),
lastModified: 0,
...overrides,
});
it('should display loading spinner when file tab isLoading is true', () => {
const fileTab = createFileTab({
sshRemoteId: 'ssh-remote-1',
isLoading: true, // SSH remote file loading
});
const session = createSession({
inputMode: 'ai',
filePreviewTabs: [fileTab],
activeFileTabId: 'file-tab-1',
unifiedTabOrder: [
{ type: 'ai' as const, id: 'tab-1' },
{ type: 'file' as const, id: 'file-tab-1' },
],
});
render(
<MainPanel
{...defaultProps}
activeSession={session}
activeFileTabId="file-tab-1"
activeFileTab={fileTab}
/>
);
// Should display loading text with file name
expect(screen.getByText('Loading file.ts')).toBeInTheDocument();
// Should display "Fetching from remote server..." subtitle
expect(screen.getByText('Fetching from remote server...')).toBeInTheDocument();
});
it('should render FilePreview when file tab isLoading is false', () => {
const fileTab = createFileTab({
content: 'const x = 1;',
lastModified: Date.now(),
sshRemoteId: 'ssh-remote-1',
isLoading: false, // Loading complete
});
const session = createSession({
inputMode: 'ai',
filePreviewTabs: [fileTab],
activeFileTabId: 'file-tab-1',
unifiedTabOrder: [
{ type: 'ai' as const, id: 'tab-1' },
{ type: 'file' as const, id: 'file-tab-1' },
],
});
render(
<MainPanel
{...defaultProps}
activeSession={session}
activeFileTabId="file-tab-1"
activeFileTab={fileTab}
/>
);
// Should render file preview (mocked component)
expect(screen.getByTestId('file-preview')).toBeInTheDocument();
// Should NOT display loading state
expect(screen.queryByText('Fetching from remote server...')).not.toBeInTheDocument();
});
it('should display loading state for file tab without sshRemoteId (local file loading)', () => {
const fileTab = createFileTab({
path: '/local/path/config.json',
name: 'config',
extension: '.json',
isLoading: true, // Even local files can show loading briefly
});
const session = createSession({
inputMode: 'ai',
filePreviewTabs: [fileTab],
activeFileTabId: 'file-tab-1',
unifiedTabOrder: [
{ type: 'ai' as const, id: 'tab-1' },
{ type: 'file' as const, id: 'file-tab-1' },
],
});
render(
<MainPanel
{...defaultProps}
activeSession={session}
activeFileTabId="file-tab-1"
activeFileTab={fileTab}
/>
);
// Should display loading text with file name
expect(screen.getByText('Loading config.json')).toBeInTheDocument();
});
it('should not show loading state when AI tab is active', () => {
const fileTab = createFileTab({
sshRemoteId: 'ssh-remote-1',
isLoading: true, // Loading but not active
});
const session = createSession({
inputMode: 'ai',
filePreviewTabs: [fileTab],
activeFileTabId: null, // AI tab is active, not file tab
activeTabId: 'tab-1',
unifiedTabOrder: [
{ type: 'ai' as const, id: 'tab-1' },
{ type: 'file' as const, id: 'file-tab-1' },
],
});
render(
<MainPanel
{...defaultProps}
activeSession={session}
activeFileTabId={null}
activeFileTab={null}
/>
);
// Should NOT display loading state (file tab is not active)
expect(screen.queryByText('Fetching from remote server...')).not.toBeInTheDocument();
// Should display terminal output (default for AI tab)
expect(screen.getByTestId('terminal-output')).toBeInTheDocument();
});
});
});

View File

@@ -860,4 +860,73 @@ describe('useAtMentionCompletion', () => {
expect(matchingFiles.length).toBeGreaterThan(0);
});
});
// =============================================================================
// PERFORMANCE OPTIMIZATION TESTS
// =============================================================================
describe('performance optimizations', () => {
it('caps file tree traversal at MAX_FILE_TREE_ENTRIES', () => {
// Generate a tree with more than 50k files
const largeFolder: FileNode[] = [];
for (let i = 0; i < 200; i++) {
const children: FileNode[] = [];
for (let j = 0; j < 300; j++) {
children.push(createFile(`file_${i}_${j}.ts`));
}
largeFolder.push(createFolder(`dir_${i}`, children));
}
// This tree has 200 folders + 60,000 files = 60,200 nodes total
const session = createMockSession(largeFolder);
const { result } = renderHook(() => useAtMentionCompletion(session));
// With empty filter, should return at most 15 suggestions
const suggestions = result.current.getSuggestions('');
expect(suggestions.length).toBeLessThanOrEqual(15);
// With a filter that would match many files, should still return max 15
const filtered = result.current.getSuggestions('file');
expect(filtered.length).toBeLessThanOrEqual(15);
});
it('empty filter skips fuzzy matching and returns sorted results', () => {
const session = createMockSession([
createFolder('zebra'),
createFile('banana.ts'),
createFile('apple.ts'),
]);
const { result } = renderHook(() => useAtMentionCompletion(session));
const suggestions = result.current.getSuggestions('');
// Files should come before folders, then alphabetical
expect(suggestions[0].displayText).toBe('apple.ts');
expect(suggestions[1].displayText).toBe('banana.ts');
expect(suggestions[2].displayText).toBe('zebra');
// All scores should be 0 (no fuzzy matching performed)
expect(suggestions.every((s) => s.score === 0)).toBe(true);
});
it('early exits after enough exact substring matches', () => {
// Create 200 files that contain "match" in their name (exact substring matches)
// plus files that would only fuzzy-match
const files: FileNode[] = [];
for (let i = 0; i < 200; i++) {
files.push(createFile(`match_${i}.ts`));
}
// Add some files that would only fuzzy match (no "match" substring)
for (let i = 0; i < 100; i++) {
files.push(createFile(`m_a_t_c_h_${i}.ts`));
}
const session = createMockSession(files);
const { result } = renderHook(() => useAtMentionCompletion(session));
const suggestions = result.current.getSuggestions('match');
// Should still return valid results with max 15
expect(suggestions.length).toBe(15);
// Top results should be exact substring matches (higher score)
expect(suggestions[0].displayText).toContain('match');
});
});
});

View File

@@ -9,7 +9,7 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { renderHook, act, waitFor } from '@testing-library/react';
import { useGitStatusPolling } from '../../../renderer/hooks';
import { useGitStatusPolling, getScaledPollInterval } from '../../../renderer/hooks';
import type { Session } from '../../../renderer/types';
import { gitService } from '../../../renderer/services/git';
@@ -109,4 +109,37 @@ describe('useGitStatusPolling', () => {
expect(gitService.getStatus).toHaveBeenCalledTimes(1);
});
});
describe('getScaledPollInterval', () => {
it('returns default 30s for 1-3 git sessions', () => {
expect(getScaledPollInterval(30000, 1)).toBe(30000);
expect(getScaledPollInterval(30000, 2)).toBe(30000);
expect(getScaledPollInterval(30000, 3)).toBe(30000);
});
it('returns 45s for 4-7 git sessions', () => {
expect(getScaledPollInterval(30000, 4)).toBe(45000);
expect(getScaledPollInterval(30000, 7)).toBe(45000);
});
it('returns 60s for 8-12 git sessions', () => {
expect(getScaledPollInterval(30000, 8)).toBe(60000);
expect(getScaledPollInterval(30000, 12)).toBe(60000);
});
it('returns 90s for 13+ git sessions', () => {
expect(getScaledPollInterval(30000, 13)).toBe(90000);
expect(getScaledPollInterval(30000, 50)).toBe(90000);
});
it('does not scale custom (non-default) poll intervals', () => {
// A user-configured interval of 10s should not be scaled
expect(getScaledPollInterval(10000, 10)).toBe(10000);
expect(getScaledPollInterval(60000, 20)).toBe(60000);
});
it('returns 30s for zero git sessions', () => {
expect(getScaledPollInterval(30000, 0)).toBe(30000);
});
});
});

View File

@@ -1088,4 +1088,30 @@ describe('useTabCompletion', () => {
expect(filters.length).toBe(5);
});
});
describe('performance optimizations', () => {
it('caps file tree traversal at MAX_FILE_TREE_ENTRIES', () => {
// Generate a tree with more than 50k files
const largeTree: FileNode[] = [];
for (let i = 0; i < 200; i++) {
const children: FileNode[] = [];
for (let j = 0; j < 300; j++) {
children.push({ name: `file_${i}_${j}.ts`, type: 'file' });
}
largeTree.push({ name: `dir_${i}`, type: 'folder', children });
}
// 200 folders + 60,000 files = 60,200 nodes total
const session = createMockSession({
fileTree: largeTree,
shellCwd: '/project',
});
const { result } = renderHook(() => useTabCompletion(session));
// Even with 60k+ files, getSuggestions should work without hanging
// and return at most 15 results
const suggestions = result.current.getSuggestions('file', 'file');
expect(suggestions.length).toBeLessThanOrEqual(15);
});
});
});

View File

@@ -650,9 +650,8 @@ describe('calculateTotalTokens', () => {
const total = calculateTotalTokens(contexts);
// Per Anthropic docs: input + cacheRead + cacheCreation for each context
// (100+50+25) + (300+75+25) = 575
expect(total).toBe(575);
// input + cacheRead + cacheCreation for each context
expect(total).toBe(575); // (100+50+25) + (300+75+25)
});
});
@@ -695,7 +694,7 @@ describe('getContextSummary', () => {
expect(summary.totalSources).toBe(2);
expect(summary.totalLogs).toBe(5);
// Per Anthropic docs: (100+50+25) + (200+75+25) = 475
// (100+50+25) + (200+75+25) = 475 (input + cacheRead + cacheCreation)
expect(summary.estimatedTokens).toBe(475);
expect(summary.byAgent['claude-code']).toBe(1);
expect(summary.byAgent['opencode']).toBe(1);

View File

@@ -1,13 +1,5 @@
/**
* Tests for context usage estimation utilities
*
* Claude Code reports per-turn context window usage directly (no normalization needed).
* Codex reports cumulative session totals, which are normalized in StdoutHandler.
*
* Per Anthropic documentation:
* total_context = input_tokens + cache_read_input_tokens + cache_creation_input_tokens
*
* @see https://platform.claude.com/docs/en/build-with-claude/prompt-caching
*/
import {
@@ -36,13 +28,11 @@ describe('estimateContextUsage', () => {
expect(result).toBe(10);
});
it('should include cacheReadInputTokens in context calculation (per Anthropic docs)', () => {
// Per Anthropic docs: total_context = input + cacheRead + cacheCreation
// Claude Code reports per-turn values directly, Codex is normalized in StdoutHandler
it('should include cacheReadInputTokens in calculation (part of total input context)', () => {
const stats = createStats({
inputTokens: 1000,
outputTokens: 500,
cacheReadInputTokens: 50000, // INCLUDED - represents cached context for this turn
cacheReadInputTokens: 50000,
cacheCreationInputTokens: 5000,
contextWindow: 100000,
});
@@ -51,17 +41,17 @@ describe('estimateContextUsage', () => {
expect(result).toBe(56);
});
it('should cap at 100%', () => {
it('should return null when accumulated tokens exceed context window', () => {
const stats = createStats({
inputTokens: 50000,
outputTokens: 50000,
cacheReadInputTokens: 100000, // Large cached context
cacheCreationInputTokens: 100000, // Large new cache
cacheReadInputTokens: 150000,
cacheCreationInputTokens: 200000,
contextWindow: 200000,
});
const result = estimateContextUsage(stats, 'claude-code');
// (50000 + 100000 + 100000) / 200000 = 125% -> capped at 100%
expect(result).toBe(100);
// (50000 + 150000 + 200000) = 400000 > 200000 -> null (accumulated values)
expect(result).toBeNull();
});
it('should round to nearest integer', () => {
@@ -85,10 +75,16 @@ describe('estimateContextUsage', () => {
expect(result).toBe(5);
});
it('should use claude default context window (200k)', () => {
const stats = createStats({ contextWindow: 0 });
const result = estimateContextUsage(stats, 'claude');
expect(result).toBe(5);
});
it('should use codex default context window (200k) and include output tokens', () => {
const stats = createStats({ contextWindow: 0 });
const result = estimateContextUsage(stats, 'codex');
// Codex includes output tokens: (10000 + 5000 + 0 + 0) / 200000 = 7.5% -> 8%
// Codex includes output tokens: (10000 + 5000 + 0) / 200000 = 7.5% -> 8%
expect(result).toBe(8);
});
@@ -99,11 +95,10 @@ describe('estimateContextUsage', () => {
expect(result).toBe(8);
});
it('should use factory-droid default context window (200k)', () => {
it('should use aider default context window (128k)', () => {
const stats = createStats({ contextWindow: 0 });
const result = estimateContextUsage(stats, 'factory-droid');
// (10000 + 0 + 0) / 200000 = 5%
expect(result).toBe(5);
const result = estimateContextUsage(stats, 'aider');
expect(result).toBe(8);
});
it('should return null for terminal agent', () => {
@@ -140,23 +135,24 @@ describe('estimateContextUsage', () => {
// @ts-expect-error - testing undefined case
stats.cacheReadInputTokens = undefined;
const result = estimateContextUsage(stats, 'claude-code');
// (10000 + 0 + 0) / 100000 = 10%
// (10000 + 0) / 100000 = 10%
expect(result).toBe(10);
});
it('should include cache read tokens in context (represents context window usage)', () => {
// Per Anthropic docs, cacheRead represents tokens retrieved from cache
// and DOES occupy context window space for this turn.
it('should return null when accumulated cacheRead tokens cause total to exceed context window', () => {
// During multi-tool turns, Claude Code accumulates token values across
// internal API calls. When accumulated total exceeds context window,
// return null to signal callers should preserve previous valid percentage.
const stats = createStats({
inputTokens: 500,
outputTokens: 1000,
cacheReadInputTokens: 100000, // Large cached context for this turn
cacheReadInputTokens: 758000, // accumulated across multi-tool turn
cacheCreationInputTokens: 50000,
contextWindow: 200000,
});
const result = estimateContextUsage(stats, 'claude-code');
// (500 + 100000 + 50000) / 200000 = 75%
expect(result).toBe(75);
// (500 + 758000 + 50000) = 808500 > 200000 -> null (accumulated values)
expect(result).toBeNull();
});
});
@@ -177,17 +173,17 @@ describe('estimateContextUsage', () => {
expect(result).toBe(5);
});
it('should handle very large token counts', () => {
it('should return null for very large accumulated token counts', () => {
const stats = createStats({
inputTokens: 250000,
outputTokens: 500000,
cacheReadInputTokens: 50000,
cacheCreationInputTokens: 50000,
cacheReadInputTokens: 500000,
cacheCreationInputTokens: 250000,
contextWindow: 0,
});
const result = estimateContextUsage(stats, 'claude-code');
// (250000 + 50000 + 50000) / 200000 = 175% -> capped at 100%
expect(result).toBe(100);
// (250000 + 500000 + 250000) = 1000000 > 200000 -> null (accumulated values)
expect(result).toBeNull();
});
it('should handle very small percentages', () => {
@@ -198,7 +194,7 @@ describe('estimateContextUsage', () => {
contextWindow: 0,
});
const result = estimateContextUsage(stats, 'claude-code');
// (100 + 0 + 0) / 200000 = 0.05% -> 0% (output excluded for Claude)
// (100 + 0) / 200000 = 0.05% -> 0% (output excluded for Claude)
expect(result).toBe(0);
});
});
@@ -218,28 +214,34 @@ describe('calculateContextTokens', () => {
...overrides,
});
describe('Claude agents (per Anthropic formula: input + cacheRead + cacheCreation)', () => {
it('should include all input-related tokens for claude-code', () => {
describe('Claude agents (input + cacheRead + cacheCreation)', () => {
it('should include input, cacheRead, and cacheCreation tokens for claude-code', () => {
const stats = createStats();
const result = calculateContextTokens(stats, 'claude-code');
// Per Anthropic docs: 10000 + 2000 + 1000 = 13000
// 10000 + 2000 + 1000 = 13000 (excludes output only)
expect(result).toBe(13000);
});
it('should include all input-related tokens when agent is undefined (defaults to Claude)', () => {
it('should include input, cacheRead, and cacheCreation tokens for claude', () => {
const stats = createStats();
const result = calculateContextTokens(stats, 'claude');
expect(result).toBe(13000);
});
it('should include input, cacheRead, and cacheCreation tokens when agent is undefined', () => {
const stats = createStats();
const result = calculateContextTokens(stats);
// Defaults to Claude behavior: input + cacheRead + cacheCreation
// Defaults to Claude behavior
expect(result).toBe(13000);
});
});
describe('OpenAI agents (includes output tokens in combined limit)', () => {
it('should include output tokens for codex', () => {
describe('OpenAI agents (includes output tokens)', () => {
it('should include input, output, and cacheCreation tokens for codex', () => {
const stats = createStats();
const result = calculateContextTokens(stats, 'codex');
// 10000 + 2000 + 1000 + 5000 = 18000 (input + cacheRead + cacheCreation + output)
expect(result).toBe(18000);
// 10000 + 5000 + 1000 = 16000 (input + output + cacheCreation, excludes cacheRead)
expect(result).toBe(16000);
});
});
@@ -266,18 +268,19 @@ describe('calculateContextTokens', () => {
expect(result).toBe(10000);
});
it('should include cacheRead in context calculation (per Anthropic docs)', () => {
// Per Anthropic documentation, total_context = input + cacheRead + cacheCreation
// All three components occupy context window space.
it('should include cacheRead in raw calculation (callers detect accumulated values)', () => {
// calculateContextTokens returns the raw total including cacheRead.
// Callers (estimateContextUsage) detect when total > contextWindow
// and return null to signal accumulated values from multi-tool turns.
const stats = createStats({
inputTokens: 50000,
outputTokens: 9000,
cacheReadInputTokens: 100000, // INCLUDED - represents cached context
cacheCreationInputTokens: 25000,
cacheReadInputTokens: 758000,
cacheCreationInputTokens: 75000,
});
const result = calculateContextTokens(stats, 'claude-code');
// 50000 + 100000 + 25000 = 175000
expect(result).toBe(175000);
// 50000 + 758000 + 75000 = 883000 (raw total, callers check against window)
expect(result).toBe(883000);
});
});
});
@@ -285,9 +288,10 @@ describe('calculateContextTokens', () => {
describe('DEFAULT_CONTEXT_WINDOWS', () => {
it('should have context windows defined for all known agent types', () => {
expect(DEFAULT_CONTEXT_WINDOWS['claude-code']).toBe(200000);
expect(DEFAULT_CONTEXT_WINDOWS['claude']).toBe(200000);
expect(DEFAULT_CONTEXT_WINDOWS['codex']).toBe(200000);
expect(DEFAULT_CONTEXT_WINDOWS['opencode']).toBe(128000);
expect(DEFAULT_CONTEXT_WINDOWS['factory-droid']).toBe(200000);
expect(DEFAULT_CONTEXT_WINDOWS['aider']).toBe(128000);
expect(DEFAULT_CONTEXT_WINDOWS['terminal']).toBe(0);
});
});

View File

@@ -1,313 +0,0 @@
/**
* Tests for the Context Usage Estimation Utilities.
*
* These tests verify:
* - DEFAULT_CONTEXT_WINDOWS constant values
* - COMBINED_CONTEXT_AGENTS membership
* - calculateContextTokens() with various agent types and token fields
* - estimateContextUsage() percentage calculation, fallback logic, and capping
*/
import { describe, it, expect } from 'vitest';
import {
DEFAULT_CONTEXT_WINDOWS,
COMBINED_CONTEXT_AGENTS,
calculateContextTokens,
estimateContextUsage,
type ContextUsageStats,
} from '../../shared/contextUsage';
describe('DEFAULT_CONTEXT_WINDOWS', () => {
it('should have the correct context window for claude-code', () => {
expect(DEFAULT_CONTEXT_WINDOWS['claude-code']).toBe(200000);
});
it('should have the correct context window for codex', () => {
expect(DEFAULT_CONTEXT_WINDOWS['codex']).toBe(200000);
});
it('should have the correct context window for opencode', () => {
expect(DEFAULT_CONTEXT_WINDOWS['opencode']).toBe(128000);
});
it('should have the correct context window for factory-droid', () => {
expect(DEFAULT_CONTEXT_WINDOWS['factory-droid']).toBe(200000);
});
it('should have zero context window for terminal', () => {
expect(DEFAULT_CONTEXT_WINDOWS['terminal']).toBe(0);
});
it('should have entries for all expected agent types', () => {
const expectedKeys = ['claude-code', 'codex', 'opencode', 'factory-droid', 'terminal'];
expect(Object.keys(DEFAULT_CONTEXT_WINDOWS).sort()).toEqual(expectedKeys.sort());
});
});
describe('COMBINED_CONTEXT_AGENTS', () => {
it('should contain codex', () => {
expect(COMBINED_CONTEXT_AGENTS.has('codex')).toBe(true);
});
it('should not contain claude-code', () => {
expect(COMBINED_CONTEXT_AGENTS.has('claude-code')).toBe(false);
});
it('should not contain opencode', () => {
expect(COMBINED_CONTEXT_AGENTS.has('opencode')).toBe(false);
});
it('should not contain factory-droid', () => {
expect(COMBINED_CONTEXT_AGENTS.has('factory-droid')).toBe(false);
});
it('should not contain terminal', () => {
expect(COMBINED_CONTEXT_AGENTS.has('terminal')).toBe(false);
});
it('should have exactly one member', () => {
expect(COMBINED_CONTEXT_AGENTS.size).toBe(1);
});
});
describe('calculateContextTokens', () => {
it('should calculate Claude-style tokens: input + cacheRead + cacheCreation (no output)', () => {
const stats: ContextUsageStats = {
inputTokens: 1000,
cacheReadInputTokens: 5000,
cacheCreationInputTokens: 2000,
outputTokens: 3000,
};
const result = calculateContextTokens(stats, 'claude-code');
expect(result).toBe(8000); // 1000 + 5000 + 2000, output excluded
});
it('should calculate Codex tokens: input + cacheRead + cacheCreation + output (combined)', () => {
const stats: ContextUsageStats = {
inputTokens: 1000,
cacheReadInputTokens: 5000,
cacheCreationInputTokens: 2000,
outputTokens: 3000,
};
const result = calculateContextTokens(stats, 'codex');
expect(result).toBe(11000); // 1000 + 5000 + 2000 + 3000
});
it('should default missing token fields to 0', () => {
const stats: ContextUsageStats = {
inputTokens: 500,
};
const result = calculateContextTokens(stats, 'claude-code');
expect(result).toBe(500); // 500 + 0 + 0
});
it('should handle all undefined token fields', () => {
const stats: ContextUsageStats = {};
const result = calculateContextTokens(stats, 'claude-code');
expect(result).toBe(0);
});
it('should use base formula for terminal agent', () => {
const stats: ContextUsageStats = {
inputTokens: 100,
cacheReadInputTokens: 200,
cacheCreationInputTokens: 300,
outputTokens: 400,
};
const result = calculateContextTokens(stats, 'terminal');
expect(result).toBe(600); // 100 + 200 + 300, no output
});
it('should use base formula when no agentId is provided', () => {
const stats: ContextUsageStats = {
inputTokens: 100,
cacheReadInputTokens: 200,
cacheCreationInputTokens: 300,
outputTokens: 400,
};
const result = calculateContextTokens(stats);
expect(result).toBe(600); // 100 + 200 + 300, no output
});
it('should return 0 when all tokens are zero', () => {
const stats: ContextUsageStats = {
inputTokens: 0,
cacheReadInputTokens: 0,
cacheCreationInputTokens: 0,
outputTokens: 0,
};
const result = calculateContextTokens(stats, 'claude-code');
expect(result).toBe(0);
});
it('should use base formula for opencode agent', () => {
const stats: ContextUsageStats = {
inputTokens: 1000,
cacheReadInputTokens: 2000,
cacheCreationInputTokens: 500,
outputTokens: 1500,
};
const result = calculateContextTokens(stats, 'opencode');
expect(result).toBe(3500); // 1000 + 2000 + 500, output excluded
});
it('should use base formula for factory-droid agent', () => {
const stats: ContextUsageStats = {
inputTokens: 1000,
outputTokens: 2000,
};
const result = calculateContextTokens(stats, 'factory-droid');
expect(result).toBe(1000); // only input, no cacheRead or cacheCreation
});
it('should default outputTokens to 0 for codex when undefined', () => {
const stats: ContextUsageStats = {
inputTokens: 1000,
};
const result = calculateContextTokens(stats, 'codex');
expect(result).toBe(1000); // 1000 + 0 + 0 + 0
});
});
describe('estimateContextUsage', () => {
it('should use contextWindow from stats when provided', () => {
const stats: ContextUsageStats = {
inputTokens: 5000,
cacheReadInputTokens: 0,
cacheCreationInputTokens: 0,
contextWindow: 10000,
};
const result = estimateContextUsage(stats, 'claude-code');
expect(result).toBe(50); // 5000 / 10000 * 100 = 50%
});
it('should fall back to DEFAULT_CONTEXT_WINDOWS when no contextWindow in stats', () => {
const stats: ContextUsageStats = {
inputTokens: 100000,
cacheReadInputTokens: 0,
cacheCreationInputTokens: 0,
};
const result = estimateContextUsage(stats, 'claude-code');
// 100000 / 200000 * 100 = 50%
expect(result).toBe(50);
});
it('should return null for terminal agent', () => {
const stats: ContextUsageStats = {
inputTokens: 100,
};
const result = estimateContextUsage(stats, 'terminal');
expect(result).toBeNull();
});
it('should return null when no agentId and no contextWindow', () => {
const stats: ContextUsageStats = {
inputTokens: 100,
};
const result = estimateContextUsage(stats);
expect(result).toBeNull();
});
it('should return 0 when all tokens are 0', () => {
const stats: ContextUsageStats = {
inputTokens: 0,
cacheReadInputTokens: 0,
cacheCreationInputTokens: 0,
outputTokens: 0,
};
const result = estimateContextUsage(stats, 'claude-code');
expect(result).toBe(0);
});
it('should cap at 100% when tokens exceed context window', () => {
const stats: ContextUsageStats = {
inputTokens: 300000,
cacheReadInputTokens: 0,
cacheCreationInputTokens: 0,
};
const result = estimateContextUsage(stats, 'claude-code');
// 300000 / 200000 * 100 = 150%, capped at 100
expect(result).toBe(100);
});
it('should cap at 100% when using stats contextWindow', () => {
const stats: ContextUsageStats = {
inputTokens: 15000,
contextWindow: 10000,
};
const result = estimateContextUsage(stats, 'claude-code');
expect(result).toBe(100);
});
it('should calculate ~50% usage for claude-code agent', () => {
const stats: ContextUsageStats = {
inputTokens: 50000,
cacheReadInputTokens: 30000,
cacheCreationInputTokens: 20000,
};
const result = estimateContextUsage(stats, 'claude-code');
// (50000 + 30000 + 20000) / 200000 * 100 = 50%
expect(result).toBe(50);
});
it('should include output tokens in calculation for codex agent', () => {
const stats: ContextUsageStats = {
inputTokens: 50000,
cacheReadInputTokens: 0,
cacheCreationInputTokens: 0,
outputTokens: 50000,
};
const result = estimateContextUsage(stats, 'codex');
// (50000 + 0 + 0 + 50000) / 200000 * 100 = 50%
expect(result).toBe(50);
});
it('should use contextWindow from stats even without agentId', () => {
const stats: ContextUsageStats = {
inputTokens: 5000,
contextWindow: 10000,
};
const result = estimateContextUsage(stats);
// 5000 / 10000 * 100 = 50%
expect(result).toBe(50);
});
it('should round the percentage to nearest integer', () => {
const stats: ContextUsageStats = {
inputTokens: 33333,
contextWindow: 100000,
};
const result = estimateContextUsage(stats, 'claude-code');
// 33333 / 100000 * 100 = 33.333 => rounded to 33
expect(result).toBe(33);
});
it('should use opencode default context window of 128000', () => {
const stats: ContextUsageStats = {
inputTokens: 64000,
};
const result = estimateContextUsage(stats, 'opencode');
// 64000 / 128000 * 100 = 50%
expect(result).toBe(50);
});
it('should return null for unknown agent without contextWindow', () => {
const stats: ContextUsageStats = {
inputTokens: 100,
};
// Cast to bypass type checking for an unknown agent
const result = estimateContextUsage(stats, 'unknown-agent' as any);
expect(result).toBeNull();
});
it('should handle contextWindow of 0 by falling back to defaults', () => {
const stats: ContextUsageStats = {
inputTokens: 100000,
contextWindow: 0,
};
const result = estimateContextUsage(stats, 'claude-code');
// contextWindow is 0 (falsy), falls back to default 200000
// 100000 / 200000 * 100 = 50%
expect(result).toBe(50);
});
});

View File

@@ -1,859 +0,0 @@
import { execFileNoThrow } from './utils/execFile';
import { logger } from './utils/logger';
import * as os from 'os';
import * as fs from 'fs';
import * as path from 'path';
import { AgentCapabilities, getAgentCapabilities } from './agent-capabilities';
import { expandTilde, detectNodeVersionManagerBinPaths, buildExpandedEnv } from '../shared/pathUtils';
// Re-export AgentCapabilities for convenience
export { AgentCapabilities } from './agent-capabilities';
// Configuration option types for agent-specific settings
export interface AgentConfigOption {
key: string; // Storage key
type: 'checkbox' | 'text' | 'number' | 'select';
label: string; // UI label
description: string; // Help text
default: any; // Default value
options?: string[]; // For select type
argBuilder?: (value: any) => string[]; // Converts config value to CLI args
}
export interface AgentConfig {
id: string;
name: string;
binaryName: string;
command: string;
args: string[]; // Base args always included (excludes batch mode prefix)
available: boolean;
path?: string;
customPath?: string; // User-specified custom path (shown in UI even if not available)
requiresPty?: boolean; // Whether this agent needs a pseudo-terminal
configOptions?: AgentConfigOption[]; // Agent-specific configuration
hidden?: boolean; // If true, agent is hidden from UI (internal use only)
capabilities: AgentCapabilities; // Agent feature capabilities
// Argument builders for dynamic CLI construction
// These are optional - agents that don't have them use hardcoded behavior
batchModePrefix?: string[]; // Args added before base args for batch mode (e.g., ['run'] for OpenCode)
batchModeArgs?: string[]; // Args only applied in batch mode (e.g., ['--skip-git-repo-check'] for Codex exec)
jsonOutputArgs?: string[]; // Args for JSON output format (e.g., ['--format', 'json'])
resumeArgs?: (sessionId: string) => string[]; // Function to build resume args
readOnlyArgs?: string[]; // Args for read-only/plan mode (e.g., ['--agent', 'plan'])
modelArgs?: (modelId: string) => string[]; // Function to build model selection args (e.g., ['--model', modelId])
yoloModeArgs?: string[]; // Args for YOLO/full-access mode (e.g., ['--dangerously-bypass-approvals-and-sandbox'])
workingDirArgs?: (dir: string) => string[]; // Function to build working directory args (e.g., ['-C', dir])
imageArgs?: (imagePath: string) => string[]; // Function to build image attachment args (e.g., ['-i', imagePath] for Codex)
promptArgs?: (prompt: string) => string[]; // Function to build prompt args (e.g., ['-p', prompt] for OpenCode)
noPromptSeparator?: boolean; // If true, don't add '--' before the prompt in batch mode (OpenCode doesn't support it)
defaultEnvVars?: Record<string, string>; // Default environment variables for this agent (merged with user customEnvVars)
}
export const AGENT_DEFINITIONS: Omit<AgentConfig, 'available' | 'path' | 'capabilities'>[] = [
{
id: 'terminal',
name: 'Terminal',
// Use platform-appropriate default shell
binaryName: process.platform === 'win32' ? 'powershell.exe' : 'bash',
command: process.platform === 'win32' ? 'powershell.exe' : 'bash',
args: [],
requiresPty: true,
hidden: true, // Internal agent, not shown in UI
},
{
id: 'claude-code',
name: 'Claude Code',
binaryName: 'claude',
command: 'claude',
// YOLO mode (--dangerously-skip-permissions) is always enabled - Maestro requires it
args: [
'--print',
'--verbose',
'--output-format',
'stream-json',
'--dangerously-skip-permissions',
],
resumeArgs: (sessionId: string) => ['--resume', sessionId], // Resume with session ID
readOnlyArgs: ['--permission-mode', 'plan'], // Read-only/plan mode
},
{
id: 'codex',
name: 'Codex',
binaryName: 'codex',
command: 'codex',
// Base args for interactive mode (no flags that are exec-only)
args: [],
// Codex CLI argument builders
// Batch mode: codex exec --json --dangerously-bypass-approvals-and-sandbox --skip-git-repo-check [--sandbox read-only] [-C dir] [resume <id>] -- "prompt"
// Sandbox modes:
// - Default (YOLO): --dangerously-bypass-approvals-and-sandbox (full system access, required by Maestro)
// - Read-only: --sandbox read-only (can only read files, overrides YOLO)
batchModePrefix: ['exec'], // Codex uses 'exec' subcommand for batch mode
batchModeArgs: ['--dangerously-bypass-approvals-and-sandbox', '--skip-git-repo-check'], // Args only valid on 'exec' subcommand
jsonOutputArgs: ['--json'], // JSON output format (must come before resume subcommand)
resumeArgs: (sessionId: string) => ['resume', sessionId], // Resume with session/thread ID
readOnlyArgs: ['--sandbox', 'read-only'], // Read-only/plan mode
yoloModeArgs: ['--dangerously-bypass-approvals-and-sandbox'], // Full access mode
workingDirArgs: (dir: string) => ['-C', dir], // Set working directory
imageArgs: (imagePath: string) => ['-i', imagePath], // Image attachment: codex exec -i /path/to/image.png
// Agent-specific configuration options shown in UI
configOptions: [
{
key: 'contextWindow',
type: 'number',
label: 'Context Window Size',
description:
'Maximum context window size in tokens. Required for context usage display. Common values: 400000 (GPT-5.2), 128000 (GPT-4o).',
default: 400000, // Default for GPT-5.2 models
},
],
},
{
id: 'gemini-cli',
name: 'Gemini CLI',
binaryName: 'gemini',
command: 'gemini',
args: [],
},
{
id: 'qwen3-coder',
name: 'Qwen3 Coder',
binaryName: 'qwen3-coder',
command: 'qwen3-coder',
args: [],
},
{
id: 'opencode',
name: 'OpenCode',
binaryName: 'opencode',
command: 'opencode',
args: [], // Base args (none for OpenCode - batch mode uses 'run' subcommand)
// OpenCode CLI argument builders
// Batch mode: opencode run --format json [--model provider/model] [--session <id>] [--agent plan] "prompt"
// YOLO mode (auto-approve all permissions) is enabled via OPENCODE_CONFIG_CONTENT env var.
// This prevents OpenCode from prompting for permission on external_directory access, which would hang in batch mode.
batchModePrefix: ['run'], // OpenCode uses 'run' subcommand for batch mode
jsonOutputArgs: ['--format', 'json'], // JSON output format
resumeArgs: (sessionId: string) => ['--session', sessionId], // Resume with session ID
readOnlyArgs: ['--agent', 'plan'], // Read-only/plan mode
modelArgs: (modelId: string) => ['--model', modelId], // Model selection (e.g., 'ollama/qwen3:8b')
imageArgs: (imagePath: string) => ['-f', imagePath], // Image/file attachment: opencode run -f /path/to/image.png -- "prompt"
noPromptSeparator: true, // OpenCode doesn't need '--' before prompt - yargs handles positional args
// Default env vars: enable YOLO mode (allow all permissions including external_directory)
// Users can override by setting customEnvVars in agent config
defaultEnvVars: {
OPENCODE_CONFIG_CONTENT: '{"permission":{"*":"allow","external_directory":"allow"}}',
},
// Agent-specific configuration options shown in UI
configOptions: [
{
key: 'model',
type: 'text',
label: 'Model',
description:
'Model to use (e.g., "ollama/qwen3:8b", "anthropic/claude-sonnet-4-20250514"). Leave empty for default.',
default: '', // Empty string means use OpenCode's default model
argBuilder: (value: string) => {
// Only add --model arg if a model is specified
if (value && value.trim()) {
return ['--model', value.trim()];
}
return [];
},
},
{
key: 'contextWindow',
type: 'number',
label: 'Context Window Size',
description:
'Maximum context window size in tokens. Required for context usage display. Varies by model (e.g., 400000 for Claude/GPT-5.2, 128000 for GPT-4o).',
default: 128000, // Default for common models (GPT-4, etc.)
},
],
},
{
id: 'factory-droid',
name: 'Factory Droid',
binaryName: 'droid',
command: 'droid',
args: [], // Base args for interactive mode (none)
requiresPty: false, // Batch mode uses child process
// Batch mode: droid exec [options] "prompt"
batchModePrefix: ['exec'],
// Always skip permissions in batch mode (like Claude Code's --dangerously-skip-permissions)
// Maestro requires full access to work properly
batchModeArgs: ['--skip-permissions-unsafe'],
// JSON output for parsing
jsonOutputArgs: ['-o', 'stream-json'],
// Session resume: -s <id> (requires a prompt)
resumeArgs: (sessionId: string) => ['-s', sessionId],
// Read-only mode is DEFAULT in droid exec (no flag needed)
readOnlyArgs: [],
// YOLO mode (same as batchModeArgs, kept for explicit yoloMode requests)
yoloModeArgs: ['--skip-permissions-unsafe'],
// Model selection is handled by configOptions.model.argBuilder below
// Don't define modelArgs here to avoid duplicate -m flags
// Working directory
workingDirArgs: (dir: string) => ['--cwd', dir],
// File/image input
imageArgs: (imagePath: string) => ['-f', imagePath],
// Prompt is positional argument (no separator needed)
noPromptSeparator: true,
// Default env vars - don't set NO_COLOR as it conflicts with FORCE_COLOR
defaultEnvVars: {},
// UI config options
// Model IDs from droid CLI (exact IDs required)
// NOTE: autonomyLevel is NOT configurable - Maestro always uses --skip-permissions-unsafe
// which conflicts with --auto. This matches Claude Code's behavior.
configOptions: [
{
key: 'model',
type: 'select',
label: 'Model',
description: 'Model to use for Factory Droid',
// Model IDs from `droid exec --help` (2026-01-22)
options: [
'', // Empty = use droid's default (claude-opus-4-5-20251101)
// OpenAI models
'gpt-5.1',
'gpt-5.1-codex',
'gpt-5.1-codex-max',
'gpt-5.2',
// Claude models
'claude-sonnet-4-5-20250929',
'claude-opus-4-5-20251101',
'claude-haiku-4-5-20251001',
// Google models
'gemini-3-pro-preview',
],
default: '', // Empty = use droid's default (claude-opus-4-5-20251101)
argBuilder: (value: string) => (value && value.trim() ? ['-m', value.trim()] : []),
},
{
key: 'reasoningEffort',
type: 'select',
label: 'Reasoning Effort',
description: 'How much the model should reason before responding',
options: ['', 'low', 'medium', 'high'],
default: '', // Empty = use droid's default reasoning
argBuilder: (value: string) => (value && value.trim() ? ['-r', value.trim()] : []),
},
{
key: 'contextWindow',
type: 'number',
label: 'Context Window Size',
description: 'Maximum context window in tokens (for UI display)',
default: 200000,
},
],
},
];
export class AgentDetector {
private cachedAgents: AgentConfig[] | null = null;
private detectionInProgress: Promise<AgentConfig[]> | null = null;
private customPaths: Record<string, string> = {};
// Cache for model discovery results: agentId -> { models, timestamp }
private modelCache: Map<string, { models: string[]; timestamp: number }> = new Map();
// Cache TTL: 5 minutes (model lists don't change frequently)
private readonly MODEL_CACHE_TTL_MS = 5 * 60 * 1000;
/**
* Set custom paths for agents (from user configuration)
*/
setCustomPaths(paths: Record<string, string>): void {
this.customPaths = paths;
// Clear cache when custom paths change
this.cachedAgents = null;
}
/**
* Get the current custom paths
*/
getCustomPaths(): Record<string, string> {
return { ...this.customPaths };
}
/**
* Detect which agents are available on the system
* Uses promise deduplication to prevent parallel detection when multiple calls arrive simultaneously
*/
async detectAgents(): Promise<AgentConfig[]> {
if (this.cachedAgents) {
return this.cachedAgents;
}
// If detection is already in progress, return the same promise to avoid parallel runs
if (this.detectionInProgress) {
return this.detectionInProgress;
}
// Start detection and track the promise
this.detectionInProgress = this.doDetectAgents();
try {
return await this.detectionInProgress;
} finally {
this.detectionInProgress = null;
}
}
/**
* Internal method that performs the actual agent detection
*/
private async doDetectAgents(): Promise<AgentConfig[]> {
const agents: AgentConfig[] = [];
const expandedEnv = this.getExpandedEnv();
logger.info(`Agent detection starting. PATH: ${expandedEnv.PATH}`, 'AgentDetector');
for (const agentDef of AGENT_DEFINITIONS) {
const customPath = this.customPaths[agentDef.id];
let detection: { exists: boolean; path?: string };
// If user has specified a custom path, check that first
if (customPath) {
detection = await this.checkCustomPath(customPath);
if (detection.exists) {
logger.info(
`Agent "${agentDef.name}" found at custom path: ${detection.path}`,
'AgentDetector'
);
} else {
logger.warn(
`Agent "${agentDef.name}" custom path not valid: ${customPath}`,
'AgentDetector'
);
// Fall back to PATH detection
detection = await this.checkBinaryExists(agentDef.binaryName);
if (detection.exists) {
logger.info(
`Agent "${agentDef.name}" found in PATH at: ${detection.path}`,
'AgentDetector'
);
}
}
} else {
detection = await this.checkBinaryExists(agentDef.binaryName);
if (detection.exists) {
logger.info(`Agent "${agentDef.name}" found at: ${detection.path}`, 'AgentDetector');
} else if (agentDef.binaryName !== 'bash') {
// Don't log bash as missing since it's always present, log others as warnings
logger.warn(
`Agent "${agentDef.name}" (binary: ${agentDef.binaryName}) not found. ` +
`Searched in PATH: ${expandedEnv.PATH}`,
'AgentDetector'
);
}
}
agents.push({
...agentDef,
available: detection.exists,
path: detection.path,
customPath: customPath || undefined,
capabilities: getAgentCapabilities(agentDef.id),
});
}
const availableAgents = agents.filter((a) => a.available);
const isWindows = process.platform === 'win32';
// On Windows, log detailed path info to help debug shell execution issues
if (isWindows) {
logger.info(`Agent detection complete (Windows)`, 'AgentDetector', {
platform: process.platform,
agents: availableAgents.map((a) => ({
id: a.id,
name: a.name,
path: a.path,
pathExtension: a.path ? path.extname(a.path) : 'none',
// .exe = direct execution, .cmd = requires shell
willUseShell: a.path
? a.path.toLowerCase().endsWith('.cmd') ||
a.path.toLowerCase().endsWith('.bat') ||
!path.extname(a.path)
: true,
})),
});
} else {
logger.info(
`Agent detection complete. Available: ${availableAgents.map((a) => a.name).join(', ') || 'none'}`,
'AgentDetector'
);
}
this.cachedAgents = agents;
return agents;
}
/**
* Check if a custom path points to a valid executable
* On Windows, also tries .cmd and .exe extensions if the path doesn't exist as-is
*/
private async checkCustomPath(customPath: string): Promise<{ exists: boolean; path?: string }> {
const isWindows = process.platform === 'win32';
// Expand tilde to home directory (Node.js fs doesn't understand ~)
const expandedPath = expandTilde(customPath);
// Helper to check if a specific path exists and is a file
const checkPath = async (pathToCheck: string): Promise<boolean> => {
try {
const stats = await fs.promises.stat(pathToCheck);
return stats.isFile();
} catch {
return false;
}
};
try {
// First, try the exact path provided (with tilde expanded)
if (await checkPath(expandedPath)) {
// Check if file is executable (on Unix systems)
if (!isWindows) {
try {
await fs.promises.access(expandedPath, fs.constants.X_OK);
} catch {
logger.warn(`Custom path exists but is not executable: ${customPath}`, 'AgentDetector');
return { exists: false };
}
}
// Return the expanded path so it can be used directly
return { exists: true, path: expandedPath };
}
// On Windows, if the exact path doesn't exist, try with .cmd and .exe extensions
if (isWindows) {
const lowerPath = expandedPath.toLowerCase();
// Only try extensions if the path doesn't already have one
if (!lowerPath.endsWith('.cmd') && !lowerPath.endsWith('.exe')) {
// Try .exe first (preferred), then .cmd
const exePath = expandedPath + '.exe';
if (await checkPath(exePath)) {
logger.debug(`Custom path resolved with .exe extension`, 'AgentDetector', {
original: customPath,
resolved: exePath,
});
return { exists: true, path: exePath };
}
const cmdPath = expandedPath + '.cmd';
if (await checkPath(cmdPath)) {
logger.debug(`Custom path resolved with .cmd extension`, 'AgentDetector', {
original: customPath,
resolved: cmdPath,
});
return { exists: true, path: cmdPath };
}
}
}
return { exists: false };
} catch {
return { exists: false };
}
}
/**
* Build an expanded PATH that includes common binary installation locations.
* This is necessary because packaged Electron apps don't inherit shell environment.
*/
private getExpandedEnv(): NodeJS.ProcessEnv {
return buildExpandedEnv();
}
/**
* On Windows, directly probe known installation paths for a binary.
* This is more reliable than `where` command which may fail in packaged Electron apps.
* Returns the first existing path found, preferring .exe over .cmd.
*/
private async probeWindowsPaths(binaryName: string): Promise<string | null> {
const home = os.homedir();
const appData = process.env.APPDATA || path.join(home, 'AppData', 'Roaming');
const localAppData = process.env.LOCALAPPDATA || path.join(home, 'AppData', 'Local');
const programFiles = process.env.ProgramFiles || 'C:\\Program Files';
// Define known installation paths for each binary, in priority order
// Prefer .exe (standalone installers) over .cmd (npm wrappers)
const knownPaths: Record<string, string[]> = {
claude: [
// PowerShell installer (primary method) - installs claude.exe
path.join(home, '.local', 'bin', 'claude.exe'),
// Winget installation
path.join(localAppData, 'Microsoft', 'WinGet', 'Links', 'claude.exe'),
path.join(programFiles, 'WinGet', 'Links', 'claude.exe'),
// npm global installation - creates .cmd wrapper
path.join(appData, 'npm', 'claude.cmd'),
path.join(localAppData, 'npm', 'claude.cmd'),
// WindowsApps (Microsoft Store style)
path.join(localAppData, 'Microsoft', 'WindowsApps', 'claude.exe'),
],
codex: [
// npm global installation (primary method for Codex)
path.join(appData, 'npm', 'codex.cmd'),
path.join(localAppData, 'npm', 'codex.cmd'),
// Possible standalone in future
path.join(home, '.local', 'bin', 'codex.exe'),
],
opencode: [
// Scoop installation (recommended for OpenCode)
path.join(home, 'scoop', 'shims', 'opencode.exe'),
path.join(home, 'scoop', 'apps', 'opencode', 'current', 'opencode.exe'),
// Chocolatey installation
path.join(
process.env.ChocolateyInstall || 'C:\\ProgramData\\chocolatey',
'bin',
'opencode.exe'
),
// Go install
path.join(home, 'go', 'bin', 'opencode.exe'),
// npm (has known issues on Windows, but check anyway)
path.join(appData, 'npm', 'opencode.cmd'),
],
gemini: [
// npm global installation
path.join(appData, 'npm', 'gemini.cmd'),
path.join(localAppData, 'npm', 'gemini.cmd'),
],
droid: [
// Factory Droid installation paths
path.join(home, '.factory', 'bin', 'droid.exe'),
path.join(localAppData, 'Factory', 'droid.exe'),
path.join(appData, 'Factory', 'droid.exe'),
path.join(home, '.local', 'bin', 'droid.exe'),
// npm global installation
path.join(appData, 'npm', 'droid.cmd'),
path.join(localAppData, 'npm', 'droid.cmd'),
],
};
const pathsToCheck = knownPaths[binaryName] || [];
for (const probePath of pathsToCheck) {
try {
await fs.promises.access(probePath, fs.constants.F_OK);
logger.debug(`Direct probe found ${binaryName}`, 'AgentDetector', { path: probePath });
return probePath;
} catch {
// Path doesn't exist, continue to next
}
}
return null;
}
/**
* On macOS/Linux, directly probe known installation paths for a binary.
* This is necessary because packaged Electron apps don't inherit shell aliases,
* and 'which' may fail to find binaries in non-standard locations.
* Returns the first existing executable path found.
*/
private async probeUnixPaths(binaryName: string): Promise<string | null> {
const home = os.homedir();
// Get dynamic paths from Node version managers (nvm, fnm, volta, etc.)
const versionManagerPaths = detectNodeVersionManagerBinPaths();
// Define known installation paths for each binary, in priority order
const knownPaths: Record<string, string[]> = {
claude: [
// Claude Code default installation location (irm https://claude.ai/install.ps1 equivalent on macOS)
path.join(home, '.claude', 'local', 'claude'),
// User local bin (pip, manual installs)
path.join(home, '.local', 'bin', 'claude'),
// Homebrew on Apple Silicon
'/opt/homebrew/bin/claude',
// Homebrew on Intel Mac
'/usr/local/bin/claude',
// npm global with custom prefix
path.join(home, '.npm-global', 'bin', 'claude'),
// User bin directory
path.join(home, 'bin', 'claude'),
// Add paths from Node version managers (nvm, fnm, volta, etc.)
...versionManagerPaths.map((p) => path.join(p, 'claude')),
],
codex: [
// User local bin
path.join(home, '.local', 'bin', 'codex'),
// Homebrew paths
'/opt/homebrew/bin/codex',
'/usr/local/bin/codex',
// npm global
path.join(home, '.npm-global', 'bin', 'codex'),
// Add paths from Node version managers (nvm, fnm, volta, etc.)
...versionManagerPaths.map((p) => path.join(p, 'codex')),
],
opencode: [
// OpenCode installer default location
path.join(home, '.opencode', 'bin', 'opencode'),
// Go install location
path.join(home, 'go', 'bin', 'opencode'),
// User local bin
path.join(home, '.local', 'bin', 'opencode'),
// Homebrew paths
'/opt/homebrew/bin/opencode',
'/usr/local/bin/opencode',
// Add paths from Node version managers (nvm, fnm, volta, etc.)
...versionManagerPaths.map((p) => path.join(p, 'opencode')),
],
gemini: [
// npm global paths
path.join(home, '.npm-global', 'bin', 'gemini'),
'/opt/homebrew/bin/gemini',
'/usr/local/bin/gemini',
// Add paths from Node version managers (nvm, fnm, volta, etc.)
...versionManagerPaths.map((p) => path.join(p, 'gemini')),
],
droid: [
// Factory Droid installation paths
path.join(home, '.factory', 'bin', 'droid'),
path.join(home, '.local', 'bin', 'droid'),
'/opt/homebrew/bin/droid',
'/usr/local/bin/droid',
// Add paths from Node version managers (in case installed via npm)
...versionManagerPaths.map((p) => path.join(p, 'droid')),
],
};
const pathsToCheck = knownPaths[binaryName] || [];
for (const probePath of pathsToCheck) {
try {
// Check both existence and executability
await fs.promises.access(probePath, fs.constants.F_OK | fs.constants.X_OK);
logger.debug(`Direct probe found ${binaryName}`, 'AgentDetector', { path: probePath });
return probePath;
} catch {
// Path doesn't exist or isn't executable, continue to next
}
}
return null;
}
/**
* Check if a binary exists in PATH
* On Windows, this also handles .cmd and .exe extensions properly
*/
private async checkBinaryExists(binaryName: string): Promise<{ exists: boolean; path?: string }> {
const isWindows = process.platform === 'win32';
// First try direct file probing of known installation paths
// This is more reliable than which/where in packaged Electron apps
if (isWindows) {
const probedPath = await this.probeWindowsPaths(binaryName);
if (probedPath) {
return { exists: true, path: probedPath };
}
logger.debug(`Direct probe failed for ${binaryName}, falling back to where`, 'AgentDetector');
} else {
// macOS/Linux: probe known paths first
const probedPath = await this.probeUnixPaths(binaryName);
if (probedPath) {
return { exists: true, path: probedPath };
}
logger.debug(`Direct probe failed for ${binaryName}, falling back to which`, 'AgentDetector');
}
try {
// Use 'which' on Unix-like systems, 'where' on Windows
const command = isWindows ? 'where' : 'which';
// Use expanded PATH to find binaries in common installation locations
// This is critical for packaged Electron apps which don't inherit shell env
const env = this.getExpandedEnv();
const result = await execFileNoThrow(command, [binaryName], undefined, env);
if (result.exitCode === 0 && result.stdout.trim()) {
// Get all matches (Windows 'where' can return multiple)
// Handle both Unix (\n) and Windows (\r\n) line endings
const matches = result.stdout
.trim()
.split(/\r?\n/)
.map((p) => p.trim())
.filter((p) => p);
if (process.platform === 'win32' && matches.length > 0) {
// On Windows, prefer .exe over .cmd over extensionless
// This helps with proper execution handling
const exeMatch = matches.find((p) => p.toLowerCase().endsWith('.exe'));
const cmdMatch = matches.find((p) => p.toLowerCase().endsWith('.cmd'));
// Return the best match: .exe > .cmd > first result
let bestMatch = exeMatch || cmdMatch || matches[0];
// If the first match doesn't have an extension, check if .cmd or .exe version exists
// This handles cases where 'where' returns a path without extension
if (
!bestMatch.toLowerCase().endsWith('.exe') &&
!bestMatch.toLowerCase().endsWith('.cmd')
) {
const cmdPath = bestMatch + '.cmd';
const exePath = bestMatch + '.exe';
// Check if the .exe or .cmd version exists
try {
await fs.promises.access(exePath, fs.constants.F_OK);
bestMatch = exePath;
logger.debug(`Found .exe version of ${binaryName}`, 'AgentDetector', {
path: exePath,
});
} catch {
try {
await fs.promises.access(cmdPath, fs.constants.F_OK);
bestMatch = cmdPath;
logger.debug(`Found .cmd version of ${binaryName}`, 'AgentDetector', {
path: cmdPath,
});
} catch {
// Neither .exe nor .cmd exists, use the original path
}
}
}
logger.debug(`Windows binary detection for ${binaryName}`, 'AgentDetector', {
allMatches: matches,
selectedMatch: bestMatch,
isCmd: bestMatch.toLowerCase().endsWith('.cmd'),
isExe: bestMatch.toLowerCase().endsWith('.exe'),
});
return {
exists: true,
path: bestMatch,
};
}
return {
exists: true,
path: matches[0], // First match for Unix
};
}
return { exists: false };
} catch {
return { exists: false };
}
}
/**
* Get a specific agent by ID
*/
async getAgent(agentId: string): Promise<AgentConfig | null> {
const agents = await this.detectAgents();
return agents.find((a) => a.id === agentId) || null;
}
/**
* Clear the cache (useful if PATH changes)
*/
clearCache(): void {
this.cachedAgents = null;
}
/**
* Clear the model cache for a specific agent or all agents
*/
clearModelCache(agentId?: string): void {
if (agentId) {
this.modelCache.delete(agentId);
} else {
this.modelCache.clear();
}
}
/**
* Discover available models for an agent that supports model selection.
* Returns cached results if available and not expired.
*
* @param agentId - The agent identifier (e.g., 'opencode')
* @param forceRefresh - If true, bypass cache and fetch fresh model list
* @returns Array of model names, or empty array if agent doesn't support model discovery
*/
async discoverModels(agentId: string, forceRefresh = false): Promise<string[]> {
const agent = await this.getAgent(agentId);
if (!agent || !agent.available) {
logger.warn(`Cannot discover models: agent ${agentId} not available`, 'AgentDetector');
return [];
}
// Check if agent supports model selection
if (!agent.capabilities.supportsModelSelection) {
logger.debug(`Agent ${agentId} does not support model selection`, 'AgentDetector');
return [];
}
// Check cache unless force refresh
if (!forceRefresh) {
const cached = this.modelCache.get(agentId);
if (cached && Date.now() - cached.timestamp < this.MODEL_CACHE_TTL_MS) {
logger.debug(`Returning cached models for ${agentId}`, 'AgentDetector');
return cached.models;
}
}
// Run agent-specific model discovery command
const models = await this.runModelDiscovery(agentId, agent);
// Cache the results
this.modelCache.set(agentId, { models, timestamp: Date.now() });
return models;
}
/**
* Run the agent-specific model discovery command.
* Each agent may have a different way to list available models.
*/
private async runModelDiscovery(agentId: string, agent: AgentConfig): Promise<string[]> {
const env = this.getExpandedEnv();
const command = agent.path || agent.command;
// Agent-specific model discovery commands
switch (agentId) {
case 'opencode': {
// OpenCode: `opencode models` returns one model per line
const result = await execFileNoThrow(command, ['models'], undefined, env);
if (result.exitCode !== 0) {
logger.warn(
`Model discovery failed for ${agentId}: exit code ${result.exitCode}`,
'AgentDetector',
{ stderr: result.stderr }
);
return [];
}
// Parse output: one model per line (e.g., "opencode/gpt-5-nano", "ollama/gpt-oss:latest")
const models = result.stdout
.split('\n')
.map((line) => line.trim())
.filter((line) => line.length > 0);
logger.info(`Discovered ${models.length} models for ${agentId}`, 'AgentDetector', {
models,
});
return models;
}
default:
// For agents without model discovery implemented, return empty array
logger.debug(`No model discovery implemented for ${agentId}`, 'AgentDetector');
return [];
}
}
}

View File

@@ -0,0 +1,346 @@
/**
* Agent Definitions
*
* Contains the configuration definitions for all supported AI agents.
* This includes CLI arguments, configuration options, and default settings.
*/
import type { AgentCapabilities } from './capabilities';
// ============ Configuration Types ============
/**
* Base configuration option fields shared by all types
*/
interface BaseConfigOption {
key: string; // Storage key
label: string; // UI label
description: string; // Help text
}
/**
* Checkbox configuration option (boolean value)
*/
interface CheckboxConfigOption extends BaseConfigOption {
type: 'checkbox';
default: boolean;
argBuilder?: (value: boolean) => string[];
}
/**
* Text configuration option (string value)
*/
interface TextConfigOption extends BaseConfigOption {
type: 'text';
default: string;
argBuilder?: (value: string) => string[];
}
/**
* Number configuration option (numeric value)
*/
interface NumberConfigOption extends BaseConfigOption {
type: 'number';
default: number;
argBuilder?: (value: number) => string[];
}
/**
* Select configuration option (string value from predefined options)
*/
interface SelectConfigOption extends BaseConfigOption {
type: 'select';
default: string;
options: string[];
argBuilder?: (value: string) => string[];
}
/**
* Configuration option types for agent-specific settings.
* Uses discriminated union for full type safety.
*/
export type AgentConfigOption =
| CheckboxConfigOption
| TextConfigOption
| NumberConfigOption
| SelectConfigOption;
/**
* Full agent configuration including runtime detection state
*/
export interface AgentConfig {
id: string;
name: string;
binaryName: string;
command: string;
args: string[]; // Base args always included (excludes batch mode prefix)
available: boolean;
path?: string;
customPath?: string; // User-specified custom path (shown in UI even if not available)
requiresPty?: boolean; // Whether this agent needs a pseudo-terminal
configOptions?: AgentConfigOption[]; // Agent-specific configuration
hidden?: boolean; // If true, agent is hidden from UI (internal use only)
capabilities: AgentCapabilities; // Agent feature capabilities
// Argument builders for dynamic CLI construction
// These are optional - agents that don't have them use hardcoded behavior
batchModePrefix?: string[]; // Args added before base args for batch mode (e.g., ['run'] for OpenCode)
batchModeArgs?: string[]; // Args only applied in batch mode (e.g., ['--skip-git-repo-check'] for Codex exec)
jsonOutputArgs?: string[]; // Args for JSON output format (e.g., ['--format', 'json'])
resumeArgs?: (sessionId: string) => string[]; // Function to build resume args
readOnlyArgs?: string[]; // Args for read-only/plan mode (e.g., ['--agent', 'plan'])
modelArgs?: (modelId: string) => string[]; // Function to build model selection args (e.g., ['--model', modelId])
yoloModeArgs?: string[]; // Args for YOLO/full-access mode (e.g., ['--dangerously-bypass-approvals-and-sandbox'])
workingDirArgs?: (dir: string) => string[]; // Function to build working directory args (e.g., ['-C', dir])
imageArgs?: (imagePath: string) => string[]; // Function to build image attachment args (e.g., ['-i', imagePath] for Codex)
promptArgs?: (prompt: string) => string[]; // Function to build prompt args (e.g., ['-p', prompt] for OpenCode)
noPromptSeparator?: boolean; // If true, don't add '--' before the prompt in batch mode (OpenCode doesn't support it)
defaultEnvVars?: Record<string, string>; // Default environment variables for this agent (merged with user customEnvVars)
}
/**
* Agent definition without runtime detection state (used for static definitions)
*/
export type AgentDefinition = Omit<AgentConfig, 'available' | 'path' | 'capabilities'>;
// ============ Agent Definitions ============
/**
* Static definitions for all supported agents.
* These are the base configurations before runtime detection adds availability info.
*/
export const AGENT_DEFINITIONS: AgentDefinition[] = [
{
id: 'terminal',
name: 'Terminal',
// Use platform-appropriate default shell
binaryName: process.platform === 'win32' ? 'powershell.exe' : 'bash',
command: process.platform === 'win32' ? 'powershell.exe' : 'bash',
args: [],
requiresPty: true,
hidden: true, // Internal agent, not shown in UI
},
{
id: 'claude-code',
name: 'Claude Code',
binaryName: 'claude',
command: 'claude',
// YOLO mode (--dangerously-skip-permissions) is always enabled - Maestro requires it
args: [
'--print',
'--verbose',
'--output-format',
'stream-json',
'--dangerously-skip-permissions',
],
resumeArgs: (sessionId: string) => ['--resume', sessionId], // Resume with session ID
readOnlyArgs: ['--permission-mode', 'plan'], // Read-only/plan mode
},
{
id: 'codex',
name: 'Codex',
binaryName: 'codex',
command: 'codex',
// Base args for interactive mode (no flags that are exec-only)
args: [],
// Codex CLI argument builders
// Batch mode: codex exec --json --dangerously-bypass-approvals-and-sandbox --skip-git-repo-check [--sandbox read-only] [-C dir] [resume <id>] -- "prompt"
// Sandbox modes:
// - Default (YOLO): --dangerously-bypass-approvals-and-sandbox (full system access, required by Maestro)
// - Read-only: --sandbox read-only (can only read files, overrides YOLO)
batchModePrefix: ['exec'], // Codex uses 'exec' subcommand for batch mode
batchModeArgs: ['--dangerously-bypass-approvals-and-sandbox', '--skip-git-repo-check'], // Args only valid on 'exec' subcommand
jsonOutputArgs: ['--json'], // JSON output format (must come before resume subcommand)
resumeArgs: (sessionId: string) => ['resume', sessionId], // Resume with session/thread ID
readOnlyArgs: ['--sandbox', 'read-only'], // Read-only/plan mode
yoloModeArgs: ['--dangerously-bypass-approvals-and-sandbox'], // Full access mode
workingDirArgs: (dir: string) => ['-C', dir], // Set working directory
imageArgs: (imagePath: string) => ['-i', imagePath], // Image attachment: codex exec -i /path/to/image.png
// Agent-specific configuration options shown in UI
configOptions: [
{
key: 'contextWindow',
type: 'number',
label: 'Context Window Size',
description:
'Maximum context window size in tokens. Required for context usage display. Common values: 400000 (GPT-5.2), 128000 (GPT-4o).',
default: 400000, // Default for GPT-5.2 models
},
],
},
{
id: 'gemini-cli',
name: 'Gemini CLI',
binaryName: 'gemini',
command: 'gemini',
args: [],
},
{
id: 'qwen3-coder',
name: 'Qwen3 Coder',
binaryName: 'qwen3-coder',
command: 'qwen3-coder',
args: [],
},
{
id: 'opencode',
name: 'OpenCode',
binaryName: 'opencode',
command: 'opencode',
args: [], // Base args (none for OpenCode - batch mode uses 'run' subcommand)
// OpenCode CLI argument builders
// Batch mode: opencode run --format json [--model provider/model] [--session <id>] [--agent plan] "prompt"
// YOLO mode (auto-approve all permissions) is enabled via OPENCODE_CONFIG_CONTENT env var.
// This prevents OpenCode from prompting for permission on external_directory access, which would hang in batch mode.
batchModePrefix: ['run'], // OpenCode uses 'run' subcommand for batch mode
jsonOutputArgs: ['--format', 'json'], // JSON output format
resumeArgs: (sessionId: string) => ['--session', sessionId], // Resume with session ID
readOnlyArgs: ['--agent', 'plan'], // Read-only/plan mode
modelArgs: (modelId: string) => ['--model', modelId], // Model selection (e.g., 'ollama/qwen3:8b')
imageArgs: (imagePath: string) => ['-f', imagePath], // Image/file attachment: opencode run -f /path/to/image.png -- "prompt"
noPromptSeparator: true, // OpenCode doesn't need '--' before prompt - yargs handles positional args
// Default env vars: enable YOLO mode (allow all permissions including external_directory)
// Users can override by setting customEnvVars in agent config
defaultEnvVars: {
OPENCODE_CONFIG_CONTENT: '{"permission":{"*":"allow","external_directory":"allow"}}',
},
// Agent-specific configuration options shown in UI
configOptions: [
{
key: 'model',
type: 'text',
label: 'Model',
description:
'Model to use (e.g., "ollama/qwen3:8b", "anthropic/claude-sonnet-4-20250514"). Leave empty for default.',
default: '', // Empty string means use OpenCode's default model
argBuilder: (value: string) => {
// Only add --model arg if a model is specified
if (value && value.trim()) {
return ['--model', value.trim()];
}
return [];
},
},
{
key: 'contextWindow',
type: 'number',
label: 'Context Window Size',
description:
'Maximum context window size in tokens. Required for context usage display. Varies by model (e.g., 400000 for Claude/GPT-5.2, 128000 for GPT-4o).',
default: 128000, // Default for common models (GPT-4, etc.)
},
],
},
{
id: 'factory-droid',
name: 'Factory Droid',
binaryName: 'droid',
command: 'droid',
args: [], // Base args for interactive mode (none)
requiresPty: false, // Batch mode uses child process
// Batch mode: droid exec [options] "prompt"
batchModePrefix: ['exec'],
// Always skip permissions in batch mode (like Claude Code's --dangerously-skip-permissions)
// Maestro requires full access to work properly
batchModeArgs: ['--skip-permissions-unsafe'],
// JSON output for parsing
jsonOutputArgs: ['-o', 'stream-json'],
// Session resume: -s <id> (requires a prompt)
resumeArgs: (sessionId: string) => ['-s', sessionId],
// Read-only mode is DEFAULT in droid exec (no flag needed)
readOnlyArgs: [],
// YOLO mode (same as batchModeArgs, kept for explicit yoloMode requests)
yoloModeArgs: ['--skip-permissions-unsafe'],
// Working directory
workingDirArgs: (dir: string) => ['--cwd', dir],
// File/image input
imageArgs: (imagePath: string) => ['-f', imagePath],
// Prompt is positional argument (no separator needed)
noPromptSeparator: true,
// Default env vars - don't set NO_COLOR as it conflicts with FORCE_COLOR
defaultEnvVars: {},
// UI config options
// Model IDs from droid CLI (exact IDs required)
// NOTE: autonomyLevel is NOT configurable - Maestro always uses --skip-permissions-unsafe
// which conflicts with --auto. This matches Claude Code's behavior.
configOptions: [
{
key: 'model',
type: 'select',
label: 'Model',
description: 'Model to use for Factory Droid',
// Model IDs from `droid exec --help`
options: [
'', // Empty = use droid's default
// OpenAI models
'gpt-5.1',
'gpt-5.1-codex',
'gpt-5.1-codex-max',
'gpt-5.2',
// Claude models
'claude-sonnet-4-5-20250929',
'claude-opus-4-5-20251101',
'claude-haiku-4-5-20251001',
// Google models
'gemini-3-pro-preview',
],
default: '', // Empty = use droid's default
argBuilder: (value: string) => (value && value.trim() ? ['-m', value.trim()] : []),
},
{
key: 'reasoningEffort',
type: 'select',
label: 'Reasoning Effort',
description: 'How much the model should reason before responding',
options: ['', 'low', 'medium', 'high'],
default: '', // Empty = use droid's default reasoning
argBuilder: (value: string) => (value && value.trim() ? ['-r', value.trim()] : []),
},
{
key: 'contextWindow',
type: 'number',
label: 'Context Window Size',
description: 'Maximum context window in tokens (for UI display)',
default: 200000,
},
],
},
{
id: 'aider',
name: 'Aider',
binaryName: 'aider',
command: 'aider',
args: [], // Base args (placeholder - to be configured when implemented)
},
];
/**
* Get an agent definition by ID (without runtime detection state)
*/
export function getAgentDefinition(agentId: string): AgentDefinition | undefined {
return AGENT_DEFINITIONS.find((def) => def.id === agentId);
}
/**
* Get all agent IDs
*/
export function getAgentIds(): string[] {
return AGENT_DEFINITIONS.map((def) => def.id);
}
/**
* Get all visible (non-hidden) agent definitions
*/
export function getVisibleAgentDefinitions(): AgentDefinition[] {
return AGENT_DEFINITIONS.filter((def) => !def.hidden);
}

288
src/main/agents/detector.ts Normal file
View File

@@ -0,0 +1,288 @@
/**
* Agent Detection and Configuration Manager
*
* Responsibilities:
* - Detects installed agents via file system probing and PATH resolution
* - Manages agent configuration and capability metadata
* - Caches detection results for performance
* - Discovers available models for agents that support model selection
*
* Model Discovery:
* - Model lists are cached for 5 minutes (configurable) to balance freshness and performance
* - Each agent implements its own model discovery command
* - Cache can be manually cleared or bypassed with forceRefresh flag
*/
import * as path from 'path';
import { execFileNoThrow } from '../utils/execFile';
import { logger } from '../utils/logger';
import { getAgentCapabilities } from './capabilities';
import { checkBinaryExists, checkCustomPath, getExpandedEnv } from './path-prober';
import { AGENT_DEFINITIONS, type AgentConfig } from './definitions';
const LOG_CONTEXT = 'AgentDetector';
// ============ Agent Detector Class ============
/** Default cache TTL: 5 minutes (model lists don't change frequently) */
const DEFAULT_MODEL_CACHE_TTL_MS = 5 * 60 * 1000;
export class AgentDetector {
private cachedAgents: AgentConfig[] | null = null;
private detectionInProgress: Promise<AgentConfig[]> | null = null;
private customPaths: Record<string, string> = {};
// Cache for model discovery results: agentId -> { models, timestamp }
private modelCache: Map<string, { models: string[]; timestamp: number }> = new Map();
// Configurable cache TTL (useful for testing or different environments)
private readonly modelCacheTtlMs: number;
/**
* Create an AgentDetector instance
* @param modelCacheTtlMs - Model cache TTL in milliseconds (default: 5 minutes)
*/
constructor(modelCacheTtlMs: number = DEFAULT_MODEL_CACHE_TTL_MS) {
this.modelCacheTtlMs = modelCacheTtlMs;
}
/**
* Set custom paths for agents (from user configuration)
*/
setCustomPaths(paths: Record<string, string>): void {
this.customPaths = paths;
// Clear cache when custom paths change
this.cachedAgents = null;
}
/**
* Get the current custom paths
*/
getCustomPaths(): Record<string, string> {
return { ...this.customPaths };
}
/**
* Detect which agents are available on the system
* Uses promise deduplication to prevent parallel detection when multiple calls arrive simultaneously
*/
async detectAgents(): Promise<AgentConfig[]> {
if (this.cachedAgents) {
return this.cachedAgents;
}
// If detection is already in progress, return the same promise to avoid parallel runs
if (this.detectionInProgress) {
return this.detectionInProgress;
}
// Start detection and track the promise
this.detectionInProgress = this.doDetectAgents();
try {
return await this.detectionInProgress;
} finally {
this.detectionInProgress = null;
}
}
/**
* Internal method that performs the actual agent detection
*/
private async doDetectAgents(): Promise<AgentConfig[]> {
const agents: AgentConfig[] = [];
const expandedEnv = getExpandedEnv();
logger.info(`Agent detection starting. PATH: ${expandedEnv.PATH}`, LOG_CONTEXT);
for (const agentDef of AGENT_DEFINITIONS) {
const customPath = this.customPaths[agentDef.id];
let detection: { exists: boolean; path?: string };
// If user has specified a custom path, check that first
if (customPath) {
detection = await checkCustomPath(customPath);
if (detection.exists) {
logger.info(
`Agent "${agentDef.name}" found at custom path: ${detection.path}`,
LOG_CONTEXT
);
} else {
logger.warn(`Agent "${agentDef.name}" custom path not valid: ${customPath}`, LOG_CONTEXT);
// Fall back to PATH detection
detection = await checkBinaryExists(agentDef.binaryName);
if (detection.exists) {
logger.info(
`Agent "${agentDef.name}" found in PATH at: ${detection.path}`,
LOG_CONTEXT
);
}
}
} else {
detection = await checkBinaryExists(agentDef.binaryName);
if (detection.exists) {
logger.info(`Agent "${agentDef.name}" found at: ${detection.path}`, LOG_CONTEXT);
} else if (agentDef.binaryName !== 'bash') {
// Don't log bash as missing since it's always present, log others as warnings
logger.warn(
`Agent "${agentDef.name}" (binary: ${agentDef.binaryName}) not found. ` +
`Searched in PATH: ${expandedEnv.PATH}`,
LOG_CONTEXT
);
}
}
agents.push({
...agentDef,
available: detection.exists,
path: detection.path,
customPath: customPath || undefined,
capabilities: getAgentCapabilities(agentDef.id),
});
}
const availableAgents = agents.filter((a) => a.available);
const isWindows = process.platform === 'win32';
// On Windows, log detailed path info to help debug shell execution issues
if (isWindows) {
logger.info(`Agent detection complete (Windows)`, LOG_CONTEXT, {
platform: process.platform,
agents: availableAgents.map((a) => ({
id: a.id,
name: a.name,
path: a.path,
pathExtension: a.path ? path.extname(a.path) : 'none',
// .exe = direct execution, .cmd = requires shell
willUseShell: a.path
? a.path.toLowerCase().endsWith('.cmd') ||
a.path.toLowerCase().endsWith('.bat') ||
!path.extname(a.path)
: true,
})),
});
} else {
logger.info(
`Agent detection complete. Available: ${availableAgents.map((a) => a.name).join(', ') || 'none'}`,
LOG_CONTEXT
);
}
this.cachedAgents = agents;
return agents;
}
/**
* Get a specific agent by ID
*/
async getAgent(agentId: string): Promise<AgentConfig | null> {
const agents = await this.detectAgents();
return agents.find((a) => a.id === agentId) || null;
}
/**
* Clear the cache (useful if PATH changes)
*/
clearCache(): void {
this.cachedAgents = null;
}
/**
* Clear the model cache for a specific agent or all agents
*/
clearModelCache(agentId?: string): void {
if (agentId) {
this.modelCache.delete(agentId);
} else {
this.modelCache.clear();
}
}
/**
* Discover available models for an agent that supports model selection.
* Returns cached results if available and not expired.
*
* @param agentId - The agent identifier (e.g., 'opencode')
* @param forceRefresh - If true, bypass cache and fetch fresh model list
* @returns Array of model names, or empty array if agent doesn't support model discovery
*/
async discoverModels(agentId: string, forceRefresh = false): Promise<string[]> {
const agent = await this.getAgent(agentId);
if (!agent || !agent.available) {
logger.warn(`Cannot discover models: agent ${agentId} not available`, LOG_CONTEXT);
return [];
}
// Check if agent supports model selection
if (!agent.capabilities.supportsModelSelection) {
logger.debug(`Agent ${agentId} does not support model selection`, LOG_CONTEXT);
return [];
}
// Check cache unless force refresh
if (!forceRefresh) {
const cached = this.modelCache.get(agentId);
if (cached && Date.now() - cached.timestamp < this.modelCacheTtlMs) {
logger.debug(`Returning cached models for ${agentId}`, LOG_CONTEXT);
return cached.models;
}
}
// Run agent-specific model discovery command
const models = await this.runModelDiscovery(agentId, agent);
// Cache the results
this.modelCache.set(agentId, { models, timestamp: Date.now() });
return models;
}
/**
* Run the agent-specific model discovery command.
* Each agent may have a different way to list available models.
*
* This method catches all exceptions to ensure graceful degradation
* when model discovery fails for any reason.
*/
private async runModelDiscovery(agentId: string, agent: AgentConfig): Promise<string[]> {
const env = getExpandedEnv();
const command = agent.path || agent.command;
try {
// Agent-specific model discovery commands
switch (agentId) {
case 'opencode': {
// OpenCode: `opencode models` returns one model per line
const result = await execFileNoThrow(command, ['models'], undefined, env);
if (result.exitCode !== 0) {
logger.warn(
`Model discovery failed for ${agentId}: exit code ${result.exitCode}`,
LOG_CONTEXT,
{ stderr: result.stderr }
);
return [];
}
// Parse output: one model per line (e.g., "opencode/gpt-5-nano", "ollama/gpt-oss:latest")
const models = result.stdout
.split('\n')
.map((line) => line.trim())
.filter((line) => line.length > 0);
logger.info(`Discovered ${models.length} models for ${agentId}`, LOG_CONTEXT, {
models,
});
return models;
}
default:
// For agents without model discovery implemented, return empty array
logger.debug(`No model discovery implemented for ${agentId}`, LOG_CONTEXT);
return [];
}
} catch (error) {
logger.error(`Model discovery threw exception for ${agentId}`, LOG_CONTEXT, { error });
return [];
}
}
}

68
src/main/agents/index.ts Normal file
View File

@@ -0,0 +1,68 @@
/**
* Agents Module
*
* This module consolidates all agent-related functionality:
* - Agent detection and configuration
* - Agent definitions and types
* - Agent capabilities
* - Session storage interface
* - Binary path probing
*
* Usage:
* ```typescript
* import { AgentDetector, AGENT_DEFINITIONS, getAgentCapabilities } from './agents';
* ```
*/
// ============ Capabilities ============
export {
type AgentCapabilities,
DEFAULT_CAPABILITIES,
AGENT_CAPABILITIES,
getAgentCapabilities,
hasCapability,
} from './capabilities';
// ============ Definitions ============
export {
type AgentConfigOption,
type AgentConfig,
type AgentDefinition,
AGENT_DEFINITIONS,
getAgentDefinition,
getAgentIds,
getVisibleAgentDefinitions,
} from './definitions';
// ============ Detector ============
export { AgentDetector } from './detector';
// ============ Path Prober ============
export {
type BinaryDetectionResult,
getExpandedEnv,
checkCustomPath,
probeWindowsPaths,
probeUnixPaths,
checkBinaryExists,
} from './path-prober';
// ============ Session Storage ============
export {
type AgentSessionOrigin,
type SessionMessage,
type AgentSessionInfo,
type PaginatedSessionsResult,
type SessionMessagesResult,
type SessionSearchResult,
type SessionSearchMode,
type SessionListOptions,
type SessionReadOptions,
type SessionOriginInfo,
type AgentSessionStorage,
registerSessionStorage,
getSessionStorage,
hasSessionStorage,
getAllSessionStorages,
clearStorageRegistry,
} from './session-storage';

View File

@@ -0,0 +1,534 @@
/**
* Binary Path Detection Utilities
*
* Packaged Electron apps don't inherit shell environment, so we need to
* probe known installation paths directly.
*
* Detection Strategy:
* 1. Direct file system probing of known installation paths (fastest, most reliable)
* 2. Fall back to which/where command with expanded PATH
*
* This two-tier approach ensures we find binaries even when:
* - PATH is not inherited correctly
* - Binaries are in non-standard locations
* - Shell initialization files (.bashrc, .zshrc) aren't sourced
*/
import * as os from 'os';
import * as fs from 'fs';
import * as path from 'path';
import { execFileNoThrow } from '../utils/execFile';
import { logger } from '../utils/logger';
import { expandTilde, detectNodeVersionManagerBinPaths } from '../../shared/pathUtils';
const LOG_CONTEXT = 'PathProber';
// ============ Types ============
export interface BinaryDetectionResult {
exists: boolean;
path?: string;
}
// ============ Environment Expansion ============
/**
* Build an expanded PATH that includes common binary installation locations.
* This is necessary because packaged Electron apps don't inherit shell environment.
*/
export function getExpandedEnv(): NodeJS.ProcessEnv {
const home = os.homedir();
const env = { ...process.env };
const isWindows = process.platform === 'win32';
// Platform-specific paths
let additionalPaths: string[];
if (isWindows) {
// Windows-specific paths
const appData = process.env.APPDATA || path.join(home, 'AppData', 'Roaming');
const localAppData = process.env.LOCALAPPDATA || path.join(home, 'AppData', 'Local');
const programFiles = process.env.ProgramFiles || 'C:\\Program Files';
const programFilesX86 = process.env['ProgramFiles(x86)'] || 'C:\\Program Files (x86)';
additionalPaths = [
// Claude Code PowerShell installer (irm https://claude.ai/install.ps1 | iex)
// This is the primary installation method - installs claude.exe to ~/.local/bin
path.join(home, '.local', 'bin'),
// Claude Code winget install (winget install --id Anthropic.ClaudeCode)
path.join(localAppData, 'Microsoft', 'WinGet', 'Links'),
path.join(programFiles, 'WinGet', 'Links'),
path.join(localAppData, 'Microsoft', 'WinGet', 'Packages'),
path.join(programFiles, 'WinGet', 'Packages'),
// npm global installs (Claude Code, Codex CLI, Gemini CLI)
path.join(appData, 'npm'),
path.join(localAppData, 'npm'),
// Claude Code CLI install location (npm global)
path.join(appData, 'npm', 'node_modules', '@anthropic-ai', 'claude-code', 'cli'),
// Codex CLI install location (npm global)
path.join(appData, 'npm', 'node_modules', '@openai', 'codex', 'bin'),
// User local programs
path.join(localAppData, 'Programs'),
path.join(localAppData, 'Microsoft', 'WindowsApps'),
// Python/pip user installs (for Aider)
path.join(appData, 'Python', 'Scripts'),
path.join(localAppData, 'Programs', 'Python', 'Python312', 'Scripts'),
path.join(localAppData, 'Programs', 'Python', 'Python311', 'Scripts'),
path.join(localAppData, 'Programs', 'Python', 'Python310', 'Scripts'),
// Git for Windows (provides bash, common tools)
path.join(programFiles, 'Git', 'cmd'),
path.join(programFiles, 'Git', 'bin'),
path.join(programFiles, 'Git', 'usr', 'bin'),
path.join(programFilesX86, 'Git', 'cmd'),
path.join(programFilesX86, 'Git', 'bin'),
// Node.js
path.join(programFiles, 'nodejs'),
path.join(localAppData, 'Programs', 'node'),
// Scoop package manager (OpenCode, other tools)
path.join(home, 'scoop', 'shims'),
path.join(home, 'scoop', 'apps', 'opencode', 'current'),
// Chocolatey (OpenCode, other tools)
path.join(process.env.ChocolateyInstall || 'C:\\ProgramData\\chocolatey', 'bin'),
// Go binaries (some tools installed via 'go install')
path.join(home, 'go', 'bin'),
// Windows system paths
path.join(process.env.SystemRoot || 'C:\\Windows', 'System32'),
path.join(process.env.SystemRoot || 'C:\\Windows'),
];
} else {
// Unix-like paths (macOS/Linux)
additionalPaths = [
'/opt/homebrew/bin', // Homebrew on Apple Silicon
'/opt/homebrew/sbin',
'/usr/local/bin', // Homebrew on Intel, common install location
'/usr/local/sbin',
`${home}/.local/bin`, // User local installs (pip, etc.)
`${home}/.npm-global/bin`, // npm global with custom prefix
`${home}/bin`, // User bin directory
`${home}/.claude/local`, // Claude local install location
`${home}/.opencode/bin`, // OpenCode installer default location
'/usr/bin',
'/bin',
'/usr/sbin',
'/sbin',
];
}
const currentPath = env.PATH || '';
// Use platform-appropriate path delimiter
const pathParts = currentPath.split(path.delimiter);
// Add paths that aren't already present
for (const p of additionalPaths) {
if (!pathParts.includes(p)) {
pathParts.unshift(p);
}
}
env.PATH = pathParts.join(path.delimiter);
return env;
}
// ============ Custom Path Validation ============
/**
* Check if a custom path points to a valid executable
* On Windows, also tries .cmd and .exe extensions if the path doesn't exist as-is
*/
export async function checkCustomPath(customPath: string): Promise<BinaryDetectionResult> {
const isWindows = process.platform === 'win32';
// Expand tilde to home directory (Node.js fs doesn't understand ~)
const expandedPath = expandTilde(customPath);
// Helper to check if a specific path exists and is a file
const checkPath = async (pathToCheck: string): Promise<boolean> => {
try {
const stats = await fs.promises.stat(pathToCheck);
return stats.isFile();
} catch {
return false;
}
};
try {
// First, try the exact path provided (with tilde expanded)
if (await checkPath(expandedPath)) {
// Check if file is executable (on Unix systems)
if (!isWindows) {
try {
await fs.promises.access(expandedPath, fs.constants.X_OK);
} catch {
logger.warn(`Custom path exists but is not executable: ${customPath}`, LOG_CONTEXT);
return { exists: false };
}
}
// Return the expanded path so it can be used directly
return { exists: true, path: expandedPath };
}
// On Windows, if the exact path doesn't exist, try with .cmd and .exe extensions
if (isWindows) {
const lowerPath = expandedPath.toLowerCase();
// Only try extensions if the path doesn't already have one
if (!lowerPath.endsWith('.cmd') && !lowerPath.endsWith('.exe')) {
// Try .exe first (preferred), then .cmd
const exePath = expandedPath + '.exe';
if (await checkPath(exePath)) {
logger.debug(`Custom path resolved with .exe extension`, LOG_CONTEXT, {
original: customPath,
resolved: exePath,
});
return { exists: true, path: exePath };
}
const cmdPath = expandedPath + '.cmd';
if (await checkPath(cmdPath)) {
logger.debug(`Custom path resolved with .cmd extension`, LOG_CONTEXT, {
original: customPath,
resolved: cmdPath,
});
return { exists: true, path: cmdPath };
}
}
}
return { exists: false };
} catch (error) {
logger.debug(`Error checking custom path: ${customPath}`, LOG_CONTEXT, { error });
return { exists: false };
}
}
// ============ Windows Path Probing ============
/**
* Known installation paths for binaries on Windows
*/
function getWindowsKnownPaths(binaryName: string): string[] {
const home = os.homedir();
const appData = process.env.APPDATA || path.join(home, 'AppData', 'Roaming');
const localAppData = process.env.LOCALAPPDATA || path.join(home, 'AppData', 'Local');
const programFiles = process.env.ProgramFiles || 'C:\\Program Files';
// Common path builders to reduce duplication across binary definitions
const npmGlobal = (bin: string) => [
path.join(appData, 'npm', `${bin}.cmd`),
path.join(localAppData, 'npm', `${bin}.cmd`),
];
const localBin = (bin: string) => [path.join(home, '.local', 'bin', `${bin}.exe`)];
const wingetLinks = (bin: string) => [
path.join(localAppData, 'Microsoft', 'WinGet', 'Links', `${bin}.exe`),
path.join(programFiles, 'WinGet', 'Links', `${bin}.exe`),
];
const goBin = (bin: string) => [path.join(home, 'go', 'bin', `${bin}.exe`)];
const pythonScripts = (bin: string) => [
path.join(appData, 'Python', 'Scripts', `${bin}.exe`),
path.join(localAppData, 'Programs', 'Python', 'Python312', 'Scripts', `${bin}.exe`),
path.join(localAppData, 'Programs', 'Python', 'Python311', 'Scripts', `${bin}.exe`),
path.join(localAppData, 'Programs', 'Python', 'Python310', 'Scripts', `${bin}.exe`),
];
// Define known installation paths for each binary, in priority order
// Prefer .exe (standalone installers) over .cmd (npm wrappers)
const knownPaths: Record<string, string[]> = {
claude: [
// PowerShell installer (primary method) - installs claude.exe
...localBin('claude'),
// Winget installation
...wingetLinks('claude'),
// npm global installation - creates .cmd wrapper
...npmGlobal('claude'),
// WindowsApps (Microsoft Store style)
path.join(localAppData, 'Microsoft', 'WindowsApps', 'claude.exe'),
],
codex: [
// npm global installation (primary method for Codex)
...npmGlobal('codex'),
// Possible standalone in future
...localBin('codex'),
],
opencode: [
// Scoop installation (recommended for OpenCode)
path.join(home, 'scoop', 'shims', 'opencode.exe'),
path.join(home, 'scoop', 'apps', 'opencode', 'current', 'opencode.exe'),
// Chocolatey installation
path.join(
process.env.ChocolateyInstall || 'C:\\ProgramData\\chocolatey',
'bin',
'opencode.exe'
),
// Go install
...goBin('opencode'),
// npm (has known issues on Windows, but check anyway)
...npmGlobal('opencode'),
],
gemini: [
// npm global installation
...npmGlobal('gemini'),
],
aider: [
// pip installation
...pythonScripts('aider'),
],
};
return knownPaths[binaryName] || [];
}
/**
* On Windows, directly probe known installation paths for a binary.
* This is more reliable than `where` command which may fail in packaged Electron apps.
* Returns the first existing path found (in priority order), preferring .exe over .cmd.
*
* Uses parallel probing for performance on slow file systems.
*/
export async function probeWindowsPaths(binaryName: string): Promise<string | null> {
const pathsToCheck = getWindowsKnownPaths(binaryName);
if (pathsToCheck.length === 0) {
return null;
}
// Check all paths in parallel for performance
const results = await Promise.allSettled(
pathsToCheck.map(async (probePath) => {
await fs.promises.access(probePath, fs.constants.F_OK);
return probePath;
})
);
// Return the first successful result (maintains priority order from pathsToCheck)
for (let i = 0; i < results.length; i++) {
const result = results[i];
if (result.status === 'fulfilled') {
logger.debug(`Direct probe found ${binaryName}`, LOG_CONTEXT, { path: result.value });
return result.value;
}
}
return null;
}
// ============ Unix Path Probing ============
/**
* Known installation paths for binaries on Unix-like systems
*/
function getUnixKnownPaths(binaryName: string): string[] {
const home = os.homedir();
// Get dynamic paths from Node version managers (nvm, fnm, volta, etc.)
const versionManagerPaths = detectNodeVersionManagerBinPaths();
// Common path builders to reduce duplication across binary definitions
const homebrew = (bin: string) => [`/opt/homebrew/bin/${bin}`, `/usr/local/bin/${bin}`];
const localBin = (bin: string) => [path.join(home, '.local', 'bin', bin)];
const npmGlobal = (bin: string) => [path.join(home, '.npm-global', 'bin', bin)];
const nodeVersionManagers = (bin: string) => versionManagerPaths.map((p) => path.join(p, bin));
// Define known installation paths for each binary, in priority order
const knownPaths: Record<string, string[]> = {
claude: [
// Claude Code default installation location
path.join(home, '.claude', 'local', 'claude'),
// User local bin (pip, manual installs)
...localBin('claude'),
// Homebrew (Apple Silicon + Intel)
...homebrew('claude'),
// npm global with custom prefix
...npmGlobal('claude'),
// User bin directory
path.join(home, 'bin', 'claude'),
// Node version managers (nvm, fnm, volta, etc.)
...nodeVersionManagers('claude'),
],
codex: [
// User local bin
...localBin('codex'),
// Homebrew paths
...homebrew('codex'),
// npm global
...npmGlobal('codex'),
// Node version managers (nvm, fnm, volta, etc.)
...nodeVersionManagers('codex'),
],
opencode: [
// OpenCode installer default location
path.join(home, '.opencode', 'bin', 'opencode'),
// Go install location
path.join(home, 'go', 'bin', 'opencode'),
// User local bin
...localBin('opencode'),
// Homebrew paths
...homebrew('opencode'),
// Node version managers (nvm, fnm, volta, etc.)
...nodeVersionManagers('opencode'),
],
gemini: [
// npm global paths
...npmGlobal('gemini'),
// Homebrew paths
...homebrew('gemini'),
// Node version managers (nvm, fnm, volta, etc.)
...nodeVersionManagers('gemini'),
],
aider: [
// pip installation
...localBin('aider'),
// Homebrew paths
...homebrew('aider'),
// Node version managers (in case installed via npm)
...nodeVersionManagers('aider'),
],
};
return knownPaths[binaryName] || [];
}
/**
* On macOS/Linux, directly probe known installation paths for a binary.
* This is necessary because packaged Electron apps don't inherit shell aliases,
* and 'which' may fail to find binaries in non-standard locations.
* Returns the first existing executable path found (in priority order).
*
* Uses parallel probing for performance on slow file systems.
*/
export async function probeUnixPaths(binaryName: string): Promise<string | null> {
const pathsToCheck = getUnixKnownPaths(binaryName);
if (pathsToCheck.length === 0) {
return null;
}
// Check all paths in parallel for performance
const results = await Promise.allSettled(
pathsToCheck.map(async (probePath) => {
// Check both existence and executability
await fs.promises.access(probePath, fs.constants.F_OK | fs.constants.X_OK);
return probePath;
})
);
// Return the first successful result (maintains priority order from pathsToCheck)
for (let i = 0; i < results.length; i++) {
const result = results[i];
if (result.status === 'fulfilled') {
logger.debug(`Direct probe found ${binaryName}`, LOG_CONTEXT, { path: result.value });
return result.value;
}
}
return null;
}
// ============ Binary Detection ============
/**
* Check if a binary exists in PATH or known installation locations.
* On Windows, this also handles .cmd and .exe extensions properly.
*
* Detection order:
* 1. Direct probe of known installation paths (most reliable)
* 2. Fall back to which/where command with expanded PATH
*/
export async function checkBinaryExists(binaryName: string): Promise<BinaryDetectionResult> {
const isWindows = process.platform === 'win32';
// First try direct file probing of known installation paths
// This is more reliable than which/where in packaged Electron apps
if (isWindows) {
const probedPath = await probeWindowsPaths(binaryName);
if (probedPath) {
return { exists: true, path: probedPath };
}
logger.debug(`Direct probe failed for ${binaryName}, falling back to where`, LOG_CONTEXT);
} else {
// macOS/Linux: probe known paths first
const probedPath = await probeUnixPaths(binaryName);
if (probedPath) {
return { exists: true, path: probedPath };
}
logger.debug(`Direct probe failed for ${binaryName}, falling back to which`, LOG_CONTEXT);
}
try {
// Use 'which' on Unix-like systems, 'where' on Windows
const command = isWindows ? 'where' : 'which';
// Use expanded PATH to find binaries in common installation locations
// This is critical for packaged Electron apps which don't inherit shell env
const env = getExpandedEnv();
const result = await execFileNoThrow(command, [binaryName], undefined, env);
if (result.exitCode === 0 && result.stdout.trim()) {
// Get all matches (Windows 'where' can return multiple)
// Handle both Unix (\n) and Windows (\r\n) line endings
const matches = result.stdout
.trim()
.split(/\r?\n/)
.map((p) => p.trim())
.filter((p) => p);
if (process.platform === 'win32' && matches.length > 0) {
// On Windows, prefer .exe over .cmd over extensionless
// This helps with proper execution handling
const exeMatch = matches.find((p) => p.toLowerCase().endsWith('.exe'));
const cmdMatch = matches.find((p) => p.toLowerCase().endsWith('.cmd'));
// Return the best match: .exe > .cmd > first result
let bestMatch = exeMatch || cmdMatch || matches[0];
// If the first match doesn't have an extension, check if .cmd or .exe version exists
// This handles cases where 'where' returns a path without extension
if (
!bestMatch.toLowerCase().endsWith('.exe') &&
!bestMatch.toLowerCase().endsWith('.cmd')
) {
const cmdPath = bestMatch + '.cmd';
const exePath = bestMatch + '.exe';
// Check if the .exe or .cmd version exists
try {
await fs.promises.access(exePath, fs.constants.F_OK);
bestMatch = exePath;
logger.debug(`Found .exe version of ${binaryName}`, LOG_CONTEXT, {
path: exePath,
});
} catch {
try {
await fs.promises.access(cmdPath, fs.constants.F_OK);
bestMatch = cmdPath;
logger.debug(`Found .cmd version of ${binaryName}`, LOG_CONTEXT, {
path: cmdPath,
});
} catch {
// Neither .exe nor .cmd exists, use the original path
}
}
}
logger.debug(`Windows binary detection for ${binaryName}`, LOG_CONTEXT, {
allMatches: matches,
selectedMatch: bestMatch,
isCmd: bestMatch.toLowerCase().endsWith('.cmd'),
isExe: bestMatch.toLowerCase().endsWith('.exe'),
});
return {
exists: true,
path: bestMatch,
};
}
return {
exists: true,
path: matches[0], // First match for Unix
};
}
return { exists: false };
} catch {
return { exists: false };
}
}

View File

@@ -14,8 +14,8 @@
* ```
*/
import type { ToolType, SshRemoteConfig } from '../shared/types';
import { logger } from './utils/logger';
import type { ToolType, SshRemoteConfig } from '../../shared/types';
import { logger } from '../utils/logger';
const LOG_CONTEXT = '[AgentSessionStorage]';

View File

@@ -6,7 +6,7 @@
* - Custom args/env vars show only whether they're set, not values
*/
import { AgentDetector, AgentCapabilities } from '../../agent-detector';
import { AgentDetector, type AgentCapabilities } from '../../agents';
import { sanitizePath } from './settings';
export interface AgentInfo {

View File

@@ -29,7 +29,7 @@ import {
} from './collectors/windows-diagnostics';
import { createZipPackage, PackageContents } from './packager';
import { logger } from '../utils/logger';
import { AgentDetector } from '../agent-detector';
import { AgentDetector } from '../agents';
import { ProcessManager } from '../process-manager';
import { WebServer } from '../web-server';
import Store from 'electron-store';

View File

@@ -19,7 +19,7 @@ import {
} from './group-chat-storage';
import { appendToLog } from './group-chat-log';
import { IProcessManager, isModeratorActive } from './group-chat-moderator';
import type { AgentDetector } from '../agent-detector';
import type { AgentDetector } from '../agents';
import {
buildAgentArgs,
applyAgentConfigOverrides,

View File

@@ -31,7 +31,7 @@ import {
getModeratorSynthesisPrompt,
} from './group-chat-moderator';
import { addParticipant } from './group-chat-agent';
import { AgentDetector } from '../agent-detector';
import { AgentDetector } from '../agents';
import { powerManager } from '../power-manager';
import {
buildAgentArgs,

View File

@@ -6,7 +6,7 @@ import crypto from 'crypto';
// which causes "Cannot read properties of undefined (reading 'getAppPath')" errors
import { ProcessManager } from './process-manager';
import { WebServer } from './web-server';
import { AgentDetector } from './agent-detector';
import { AgentDetector } from './agents';
import { logger } from './utils/logger';
import { tunnelManager } from './tunnel-manager';
import { powerManager } from './power-manager';
@@ -53,7 +53,7 @@ import {
cleanupAllGroomingSessions,
getActiveGroomingSessionCount,
} from './ipc/handlers';
import { initializeStatsDB, closeStatsDB, getStatsDB } from './stats-db';
import { initializeStatsDB, closeStatsDB, getStatsDB } from './stats';
import { groupChatEmitters } from './ipc/handlers/groupChat';
import {
routeModeratorResponse,

View File

@@ -26,7 +26,7 @@ import {
getSessionStorage,
hasSessionStorage,
getAllSessionStorages,
} from '../../agent-session-storage';
} from '../../agents';
import { calculateClaudeCost } from '../../utils/pricing';
import {
loadGlobalStatsCache,
@@ -43,7 +43,7 @@ import type {
SessionSearchMode,
SessionListOptions,
SessionReadOptions,
} from '../../agent-session-storage';
} from '../../agents';
import type { GlobalAgentStats, ProviderStats, SshRemoteConfig } from '../../../shared/types';
import type { MaestroSettings } from './persistence';

View File

@@ -1,8 +1,7 @@
import { ipcMain } from 'electron';
import Store from 'electron-store';
import * as fs from 'fs';
import { AgentDetector, AGENT_DEFINITIONS } from '../../agent-detector';
import { getAgentCapabilities } from '../../agent-capabilities';
import { AgentDetector, AGENT_DEFINITIONS, getAgentCapabilities } from '../../agents';
import { execFileNoThrow } from '../../utils/execFile';
import { logger } from '../../utils/logger';
import {

View File

@@ -20,10 +20,10 @@ import {
requireDependency,
CreateHandlerOptions,
} from '../../utils/ipcHandler';
import { getSessionStorage, type SessionMessagesResult } from '../../agent-session-storage';
import { getSessionStorage, type SessionMessagesResult } from '../../agents';
import { groomContext, cancelAllGroomingSessions } from '../../utils/context-groomer';
import type { ProcessManager } from '../../process-manager';
import type { AgentDetector } from '../../agent-detector';
import type { AgentDetector } from '../../agents';
const LOG_CONTEXT = '[ContextMerge]';

View File

@@ -16,7 +16,7 @@ import {
DebugPackageOptions,
DebugPackageDependencies,
} from '../../debug-package';
import { AgentDetector } from '../../agent-detector';
import { AgentDetector } from '../../agents';
import { ProcessManager } from '../../process-manager';
import { WebServer } from '../../web-server';

View File

@@ -64,7 +64,7 @@ import {
import { routeUserMessage } from '../../group-chat/group-chat-router';
// Agent detector import
import { AgentDetector } from '../../agent-detector';
import { AgentDetector } from '../../agents';
import { groomContext } from '../../utils/context-groomer';
import { v4 as uuidv4 } from 'uuid';

View File

@@ -51,7 +51,7 @@ import { registerNotificationsHandlers } from './notifications';
import { registerSymphonyHandlers, SymphonyHandlerDependencies } from './symphony';
import { registerAgentErrorHandlers } from './agent-error';
import { registerTabNamingHandlers, TabNamingHandlerDependencies } from './tabNaming';
import { AgentDetector } from '../../agent-detector';
import { AgentDetector } from '../../agents';
import { ProcessManager } from '../../process-manager';
import { WebServer } from '../../web-server';
import { tunnelManager as tunnelManagerInstance } from '../../tunnel-manager';

View File

@@ -2,7 +2,7 @@ import { ipcMain, BrowserWindow } from 'electron';
import Store from 'electron-store';
import * as os from 'os';
import { ProcessManager } from '../../process-manager';
import { AgentDetector } from '../../agent-detector';
import { AgentDetector } from '../../agents';
import { logger } from '../../utils/logger';
import { isWebContentsAvailable } from '../../utils/safe-send';
import {

View File

@@ -15,8 +15,7 @@
import { ipcMain, BrowserWindow } from 'electron';
import { logger } from '../../utils/logger';
import { withIpcErrorLogging, CreateHandlerOptions } from '../../utils/ipcHandler';
import { getStatsDB, getInitializationResult, clearInitializationResult } from '../../stats-db';
import { isWebContentsAvailable } from '../../utils/safe-send';
import { getStatsDB } from '../../stats';
import {
QueryEvent,
AutoRunSession,
@@ -59,7 +58,7 @@ function isStatsCollectionEnabled(settingsStore?: { get: (key: string) => unknow
*/
function broadcastStatsUpdate(getMainWindow: () => BrowserWindow | null): void {
const mainWindow = getMainWindow();
if (isWebContentsAvailable(mainWindow)) {
if (mainWindow && !mainWindow.isDestroyed()) {
mainWindow.webContents.send('stats:updated');
}
}
@@ -244,15 +243,6 @@ export function registerStatsHandlers(deps: StatsHandlerDependencies): void {
})
);
// Get earliest stat timestamp (for UI display)
ipcMain.handle(
'stats:get-earliest-timestamp',
withIpcErrorLogging(handlerOpts('getEarliestTimestamp'), async () => {
const db = getStatsDB();
return db.getEarliestStatTimestamp();
})
);
// Record session creation (launched)
ipcMain.handle(
'stats:record-session-created',
@@ -302,22 +292,4 @@ export function registerStatsHandlers(deps: StatsHandlerDependencies): void {
return db.getSessionLifecycleEvents(range);
})
);
// Get initialization result (for showing database reset notification)
// This returns info about whether the database was reset due to corruption
ipcMain.handle(
'stats:get-initialization-result',
withIpcErrorLogging(handlerOpts('getInitializationResult'), async () => {
return getInitializationResult();
})
);
// Clear initialization result (after user has acknowledged the notification)
ipcMain.handle(
'stats:clear-initialization-result',
withIpcErrorLogging(handlerOpts('clearInitializationResult'), async () => {
clearInitializationResult();
return true;
})
);
}

View File

@@ -19,7 +19,7 @@ import { getSshRemoteConfig, createSshRemoteStoreAdapter } from '../../utils/ssh
import { buildSshCommand } from '../../utils/ssh-command-builder';
import { tabNamingPrompt } from '../../../prompts';
import type { ProcessManager } from '../../process-manager';
import type { AgentDetector } from '../../agent-detector';
import type { AgentDetector } from '../../agents';
import type { MaestroSettings } from './persistence';
const LOG_CONTEXT = '[TabNaming]';

View File

@@ -4,20 +4,9 @@
* Utility functions for aggregating token usage statistics from AI agents.
* This module is separate from process-manager to avoid circular dependencies
* and allow parsers to use it without importing node-pty dependencies.
*
* SYNC: Context calculation utilities are re-exported from shared/contextUsage.ts.
* See that file for the canonical formula and all locations that must stay in sync.
* This module provides the re-exports for the main process.
*/
// Re-export context utilities from shared module
// SYNC: See shared/contextUsage.ts for the canonical calculation
export {
DEFAULT_CONTEXT_WINDOWS,
COMBINED_CONTEXT_AGENTS,
calculateContextTokens,
estimateContextUsage,
} from '../../shared/contextUsage';
import type { ToolType } from '../../shared/types';
/**
* Model statistics from Claude Code modelUsage response
@@ -48,6 +37,122 @@ export interface UsageStats {
reasoningTokens?: number;
}
/**
* Default context window sizes for different agents.
* Used as fallback when the agent doesn't report its context window size.
*/
export const DEFAULT_CONTEXT_WINDOWS: Record<ToolType, number> = {
'claude-code': 200000, // Claude 3.5 Sonnet/Claude 4 default context
codex: 200000, // OpenAI o3/o4-mini context window
opencode: 128000, // OpenCode (depends on model, 128k is conservative default)
'factory-droid': 200000, // Factory Droid (varies by model, defaults to Claude Opus)
terminal: 0, // Terminal has no context window
};
/**
* Agents that use combined input+output context windows.
* OpenAI models (Codex, o3, o4-mini) have a single context window that includes
* both input and output tokens, unlike Claude which has separate limits.
*/
const COMBINED_CONTEXT_AGENTS: Set<ToolType> = new Set(['codex']);
/**
* Calculate total context tokens based on agent-specific semantics.
*
* For a single Anthropic API call, the total input context is the sum of:
* inputTokens + cacheReadInputTokens + cacheCreationInputTokens
* These three fields partition the input into uncached, cache-hit, and newly-cached segments.
*
* CAVEAT: When Claude Code performs multi-tool turns (many internal API calls),
* the reported values may be accumulated across all internal calls within the turn.
* In that case the total can exceed the context window. Callers should check for
* this and skip the update (see estimateContextUsage).
*
* Claude models: Context = input + cacheRead + cacheCreation
* OpenAI models: Context = input + output (combined limit)
*
* @param stats - The usage statistics containing token counts
* @param agentId - The agent identifier for agent-specific calculation
* @returns Total context tokens used
*/
export function calculateContextTokens(
stats: Pick<
UsageStats,
'inputTokens' | 'outputTokens' | 'cacheReadInputTokens' | 'cacheCreationInputTokens'
>,
agentId?: ToolType
): number {
// OpenAI models have combined input+output context limits
if (agentId && COMBINED_CONTEXT_AGENTS.has(agentId)) {
return stats.inputTokens + (stats.cacheCreationInputTokens || 0) + stats.outputTokens;
}
// Claude models: total input = uncached + cache-hit + newly-cached
// Output tokens don't consume the input context window
return (
stats.inputTokens + (stats.cacheReadInputTokens || 0) + (stats.cacheCreationInputTokens || 0)
);
}
/**
* Estimate context usage percentage when the agent doesn't provide it directly.
* Uses agent-specific default context window sizes for accurate estimation.
*
* Context calculation varies by agent:
* - Claude models: inputTokens + cacheReadInputTokens + cacheCreationInputTokens
* - OpenAI models (Codex): inputTokens + outputTokens (combined limit)
*
* Returns null when the calculated total exceeds the context window, which indicates
* accumulated values from multi-tool turns (many internal API calls within one turn).
* A single API call's total input can never exceed the context window, so values
* above it are definitely accumulated. Callers should preserve the previous valid
* percentage when this returns null.
*
* @param stats - The usage statistics containing token counts
* @param agentId - The agent identifier for agent-specific context window size
* @returns Estimated context usage percentage (0-100), or null if cannot be estimated
*/
export function estimateContextUsage(
stats: Pick<
UsageStats,
| 'inputTokens'
| 'outputTokens'
| 'cacheReadInputTokens'
| 'cacheCreationInputTokens'
| 'contextWindow'
>,
agentId?: ToolType
): number | null {
// Calculate total context using agent-specific semantics
const totalContextTokens = calculateContextTokens(stats, agentId);
// Determine effective context window
const effectiveContextWindow =
stats.contextWindow && stats.contextWindow > 0
? stats.contextWindow
: agentId && agentId !== 'terminal'
? DEFAULT_CONTEXT_WINDOWS[agentId] || 0
: 0;
if (!effectiveContextWindow || effectiveContextWindow <= 0) {
return null;
}
// If total exceeds context window, the values are accumulated across multiple
// internal API calls within a complex turn (tool use chains). A single API call's
// total input cannot exceed the context window. Return null to signal callers
// should keep the previous valid percentage.
if (totalContextTokens > effectiveContextWindow) {
return null;
}
if (totalContextTokens <= 0) {
return 0;
}
return Math.round((totalContextTokens / effectiveContextWindow) * 100);
}
/**
* Aggregate token counts from modelUsage for accurate context tracking.
* modelUsage contains per-model breakdown with actual context tokens (including cache hits).
@@ -89,7 +194,6 @@ export function aggregateModelUsage(
modelStats.cacheCreationInputTokens || 0
);
// Use the highest context window from any model
// This ensures we track the maximum context limit across multi-model turns
if (modelStats.contextWindow && modelStats.contextWindow > contextWindow) {
contextWindow = modelStats.contextWindow;
}

View File

@@ -8,7 +8,7 @@ import { setupStatsListener } from '../stats-listener';
import type { ProcessManager } from '../../process-manager';
import type { SafeSendFn } from '../../utils/safe-send';
import type { QueryCompleteData } from '../../process-manager/types';
import type { StatsDB } from '../../stats-db';
import type { StatsDB } from '../../stats';
import type { ProcessListenerDependencies } from '../types';
describe('Stats Listener', () => {

View File

@@ -5,9 +5,9 @@
import type { ProcessManager } from '../process-manager';
import type { WebServer } from '../web-server';
import type { AgentDetector } from '../agent-detector';
import type { AgentDetector } from '../agents';
import type { SafeSendFn } from '../utils/safe-send';
import type { StatsDB } from '../stats-db';
import type { StatsDB } from '../stats';
import type { GroupChat, GroupChatParticipant } from '../group-chat/group-chat-storage';
import type { GroupChatState } from '../../shared/group-chat-types';
import type { ParticipantState } from '../ipc/handlers/groupChat';

View File

@@ -1,10 +1,6 @@
/**
* Usage statistics listener.
* Handles usage stats from AI responses, including group chat participant/moderator updates.
*
* SYNC: Context calculations use usageAggregator.calculateContextTokens() which wraps
* the shared calculateContextTokens() function from shared/contextUsage.ts.
* See that file for the canonical formula and all locations that must stay in sync.
*/
import type { ProcessManager } from '../process-manager';
@@ -56,18 +52,29 @@ export function setupUsageListener(
// Calculate context usage percentage using agent-specific logic
// Note: For group chat, we don't have agent type here, defaults to Claude behavior
const totalContextTokens = usageAggregator.calculateContextTokens(usageStats);
const contextUsage =
usageStats.contextWindow > 0
? Math.round((totalContextTokens / usageStats.contextWindow) * 100)
: 0;
const effectiveWindow = usageStats.contextWindow > 0 ? usageStats.contextWindow : 200000;
// Skip update if values are accumulated (total > window) from multi-tool turns
const contextUsage =
totalContextTokens <= effectiveWindow
? Math.round((totalContextTokens / effectiveWindow) * 100)
: -1; // -1 signals "skip update"
// Update participant with usage stats (skip context update if accumulated)
const updateData: {
contextUsage?: number;
tokenCount?: number;
totalCost: number;
} = {
totalCost: usageStats.totalCostUsd,
};
if (contextUsage >= 0) {
updateData.contextUsage = contextUsage;
updateData.tokenCount = totalContextTokens;
}
// Update participant with usage stats
groupChatStorage
.updateParticipant(groupChatId, participantName, {
contextUsage,
tokenCount: totalContextTokens,
totalCost: usageStats.totalCostUsd,
})
.updateParticipant(groupChatId, participantName, updateData)
.then((updatedChat) => {
// Emit participants changed so UI updates
// Note: updateParticipant returns the updated chat, avoiding extra DB read
@@ -91,17 +98,25 @@ export function setupUsageListener(
// Calculate context usage percentage using agent-specific logic
// Note: Moderator is typically Claude, defaults to Claude behavior
const totalContextTokens = usageAggregator.calculateContextTokens(usageStats);
const contextUsage =
usageStats.contextWindow > 0
? Math.round((totalContextTokens / usageStats.contextWindow) * 100)
: 0;
const effectiveWindow = usageStats.contextWindow > 0 ? usageStats.contextWindow : 200000;
// Emit moderator usage for the moderator card
groupChatEmitters.emitModeratorUsage?.(groupChatId, {
contextUsage,
totalCost: usageStats.totalCostUsd,
tokenCount: totalContextTokens,
});
// Skip context update if values are accumulated (total > window) from multi-tool turns.
// When accumulated, emit with contextUsage/tokenCount as -1 so the handler
// knows to preserve the previous values. Cost is always updated.
if (totalContextTokens <= effectiveWindow) {
const contextUsage = Math.round((totalContextTokens / effectiveWindow) * 100);
groupChatEmitters.emitModeratorUsage?.(groupChatId, {
contextUsage,
totalCost: usageStats.totalCostUsd,
tokenCount: totalContextTokens,
});
} else {
groupChatEmitters.emitModeratorUsage?.(groupChatId, {
contextUsage: -1,
totalCost: usageStats.totalCostUsd,
tokenCount: -1,
});
}
}
safeSend('process:usage', sessionId, usageStats);

View File

@@ -5,7 +5,7 @@ import { EventEmitter } from 'events';
import * as path from 'path';
import { logger } from '../../utils/logger';
import { getOutputParser } from '../../parsers';
import { getAgentCapabilities } from '../../agent-capabilities';
import { getAgentCapabilities } from '../../agents';
import type { ProcessConfig, ManagedProcess, SpawnResult } from '../types';
import type { DataBufferManager } from '../handlers/DataBufferManager';
import { StdoutHandler } from '../handlers/StdoutHandler';

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,353 @@
/**
* Stats Aggregation Queries
*
* Decomposes the monolithic getAggregatedStats into focused sub-query functions,
* each independently testable and readable.
*/
import type Database from 'better-sqlite3';
import type { StatsTimeRange, StatsAggregation } from '../../shared/stats-types';
import { PERFORMANCE_THRESHOLDS } from '../../shared/performance-metrics';
import { getTimeRangeStart, perfMetrics, LOG_CONTEXT } from './utils';
import { logger } from '../utils/logger';
// ============================================================================
// Sub-query Functions
// ============================================================================
function queryTotals(
db: Database.Database,
startTime: number
): { count: number; total_duration: number } {
const perfStart = perfMetrics.start();
const result = db
.prepare(
`
SELECT COUNT(*) as count, COALESCE(SUM(duration), 0) as total_duration
FROM query_events
WHERE start_time >= ?
`
)
.get(startTime) as { count: number; total_duration: number };
perfMetrics.end(perfStart, 'getAggregatedStats:totals');
return result;
}
function queryByAgent(
db: Database.Database,
startTime: number
): Record<string, { count: number; duration: number }> {
const perfStart = perfMetrics.start();
const rows = db
.prepare(
`
SELECT agent_type, COUNT(*) as count, SUM(duration) as duration
FROM query_events
WHERE start_time >= ?
GROUP BY agent_type
`
)
.all(startTime) as Array<{ agent_type: string; count: number; duration: number }>;
const result: Record<string, { count: number; duration: number }> = {};
for (const row of rows) {
result[row.agent_type] = { count: row.count, duration: row.duration };
}
perfMetrics.end(perfStart, 'getAggregatedStats:byAgent', { agentCount: rows.length });
return result;
}
function queryBySource(db: Database.Database, startTime: number): { user: number; auto: number } {
const perfStart = perfMetrics.start();
const rows = db
.prepare(
`
SELECT source, COUNT(*) as count
FROM query_events
WHERE start_time >= ?
GROUP BY source
`
)
.all(startTime) as Array<{ source: 'user' | 'auto'; count: number }>;
const result = { user: 0, auto: 0 };
for (const row of rows) {
result[row.source] = row.count;
}
perfMetrics.end(perfStart, 'getAggregatedStats:bySource');
return result;
}
function queryByLocation(
db: Database.Database,
startTime: number
): { local: number; remote: number } {
const perfStart = perfMetrics.start();
const rows = db
.prepare(
`
SELECT is_remote, COUNT(*) as count
FROM query_events
WHERE start_time >= ?
GROUP BY is_remote
`
)
.all(startTime) as Array<{ is_remote: number | null; count: number }>;
const result = { local: 0, remote: 0 };
for (const row of rows) {
if (row.is_remote === 1) {
result.remote = row.count;
} else {
// Treat NULL (legacy data) and 0 as local
result.local += row.count;
}
}
perfMetrics.end(perfStart, 'getAggregatedStats:byLocation');
return result;
}
function queryByDay(
db: Database.Database,
startTime: number
): Array<{ date: string; count: number; duration: number }> {
const perfStart = perfMetrics.start();
const rows = db
.prepare(
`
SELECT date(start_time / 1000, 'unixepoch', 'localtime') as date,
COUNT(*) as count,
SUM(duration) as duration
FROM query_events
WHERE start_time >= ?
GROUP BY date(start_time / 1000, 'unixepoch', 'localtime')
ORDER BY date ASC
`
)
.all(startTime) as Array<{ date: string; count: number; duration: number }>;
perfMetrics.end(perfStart, 'getAggregatedStats:byDay', { dayCount: rows.length });
return rows;
}
function queryByAgentByDay(
db: Database.Database,
startTime: number
): Record<string, Array<{ date: string; count: number; duration: number }>> {
const perfStart = perfMetrics.start();
const rows = db
.prepare(
`
SELECT agent_type,
date(start_time / 1000, 'unixepoch', 'localtime') as date,
COUNT(*) as count,
SUM(duration) as duration
FROM query_events
WHERE start_time >= ?
GROUP BY agent_type, date(start_time / 1000, 'unixepoch', 'localtime')
ORDER BY agent_type, date ASC
`
)
.all(startTime) as Array<{
agent_type: string;
date: string;
count: number;
duration: number;
}>;
const result: Record<string, Array<{ date: string; count: number; duration: number }>> = {};
for (const row of rows) {
if (!result[row.agent_type]) {
result[row.agent_type] = [];
}
result[row.agent_type].push({ date: row.date, count: row.count, duration: row.duration });
}
perfMetrics.end(perfStart, 'getAggregatedStats:byAgentByDay');
return result;
}
function queryByHour(
db: Database.Database,
startTime: number
): Array<{ hour: number; count: number; duration: number }> {
const perfStart = perfMetrics.start();
const rows = db
.prepare(
`
SELECT CAST(strftime('%H', start_time / 1000, 'unixepoch', 'localtime') AS INTEGER) as hour,
COUNT(*) as count,
SUM(duration) as duration
FROM query_events
WHERE start_time >= ?
GROUP BY hour
ORDER BY hour ASC
`
)
.all(startTime) as Array<{ hour: number; count: number; duration: number }>;
perfMetrics.end(perfStart, 'getAggregatedStats:byHour');
return rows;
}
function querySessionStats(
db: Database.Database,
startTime: number
): {
totalSessions: number;
sessionsByAgent: Record<string, number>;
sessionsByDay: Array<{ date: string; count: number }>;
avgSessionDuration: number;
} {
const perfStart = perfMetrics.start();
// Total unique sessions with queries
const sessionTotals = db
.prepare(
`
SELECT COUNT(DISTINCT session_id) as count
FROM query_events
WHERE start_time >= ?
`
)
.get(startTime) as { count: number };
// Average session duration from lifecycle table
const avgResult = db
.prepare(
`
SELECT COALESCE(AVG(duration), 0) as avg_duration
FROM session_lifecycle
WHERE created_at >= ? AND duration IS NOT NULL
`
)
.get(startTime) as { avg_duration: number };
// Sessions by agent type
const byAgentRows = db
.prepare(
`
SELECT agent_type, COUNT(*) as count
FROM session_lifecycle
WHERE created_at >= ?
GROUP BY agent_type
`
)
.all(startTime) as Array<{ agent_type: string; count: number }>;
const sessionsByAgent: Record<string, number> = {};
for (const row of byAgentRows) {
sessionsByAgent[row.agent_type] = row.count;
}
// Sessions by day
const byDayRows = db
.prepare(
`
SELECT date(created_at / 1000, 'unixepoch', 'localtime') as date,
COUNT(*) as count
FROM session_lifecycle
WHERE created_at >= ?
GROUP BY date(created_at / 1000, 'unixepoch', 'localtime')
ORDER BY date ASC
`
)
.all(startTime) as Array<{ date: string; count: number }>;
perfMetrics.end(perfStart, 'getAggregatedStats:sessions', {
sessionCount: sessionTotals.count,
});
return {
totalSessions: sessionTotals.count,
sessionsByAgent,
sessionsByDay: byDayRows,
avgSessionDuration: Math.round(avgResult.avg_duration),
};
}
function queryBySessionByDay(
db: Database.Database,
startTime: number
): Record<string, Array<{ date: string; count: number; duration: number }>> {
const perfStart = perfMetrics.start();
const rows = db
.prepare(
`
SELECT session_id,
date(start_time / 1000, 'unixepoch', 'localtime') as date,
COUNT(*) as count,
SUM(duration) as duration
FROM query_events
WHERE start_time >= ?
GROUP BY session_id, date(start_time / 1000, 'unixepoch', 'localtime')
ORDER BY session_id, date ASC
`
)
.all(startTime) as Array<{
session_id: string;
date: string;
count: number;
duration: number;
}>;
const result: Record<string, Array<{ date: string; count: number; duration: number }>> = {};
for (const row of rows) {
if (!result[row.session_id]) {
result[row.session_id] = [];
}
result[row.session_id].push({ date: row.date, count: row.count, duration: row.duration });
}
perfMetrics.end(perfStart, 'getAggregatedStats:bySessionByDay');
return result;
}
// ============================================================================
// Orchestrator
// ============================================================================
/**
* Get aggregated statistics for a time range.
*
* Composes results from focused sub-query functions for readability
* and independent testability.
*/
export function getAggregatedStats(db: Database.Database, range: StatsTimeRange): StatsAggregation {
const perfStart = perfMetrics.start();
const startTime = getTimeRangeStart(range);
const totals = queryTotals(db, startTime);
const byAgent = queryByAgent(db, startTime);
const bySource = queryBySource(db, startTime);
const byLocation = queryByLocation(db, startTime);
const byDay = queryByDay(db, startTime);
const byAgentByDay = queryByAgentByDay(db, startTime);
const byHour = queryByHour(db, startTime);
const sessionStats = querySessionStats(db, startTime);
const bySessionByDay = queryBySessionByDay(db, startTime);
const totalDuration = perfMetrics.end(perfStart, 'getAggregatedStats:total', {
range,
totalQueries: totals.count,
});
// Log warning if the aggregation is slow
if (totalDuration > PERFORMANCE_THRESHOLDS.DASHBOARD_LOAD) {
logger.warn(
`getAggregatedStats took ${totalDuration.toFixed(0)}ms (threshold: ${PERFORMANCE_THRESHOLDS.DASHBOARD_LOAD}ms)`,
LOG_CONTEXT,
{ range, totalQueries: totals.count }
);
}
return {
totalQueries: totals.count,
totalDuration: totals.total_duration,
avgDuration: totals.count > 0 ? Math.round(totals.total_duration / totals.count) : 0,
byAgent,
bySource,
byDay,
byLocation,
byHour,
...sessionStats,
byAgentByDay,
bySessionByDay,
};
}

169
src/main/stats/auto-run.ts Normal file
View File

@@ -0,0 +1,169 @@
/**
* Auto Run CRUD Operations
*
* Handles insertion, updating, and retrieval of Auto Run sessions and tasks.
*/
import type Database from 'better-sqlite3';
import type { AutoRunSession, AutoRunTask, StatsTimeRange } from '../../shared/stats-types';
import { generateId, getTimeRangeStart, normalizePath, LOG_CONTEXT } from './utils';
import {
mapAutoRunSessionRow,
mapAutoRunTaskRow,
type AutoRunSessionRow,
type AutoRunTaskRow,
} from './row-mappers';
import { StatementCache } from './utils';
import { logger } from '../utils/logger';
const stmtCache = new StatementCache();
// ============================================================================
// Auto Run Sessions
// ============================================================================
const INSERT_SESSION_SQL = `
INSERT INTO auto_run_sessions (id, session_id, agent_type, document_path, start_time, duration, tasks_total, tasks_completed, project_path)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
`;
/**
* Insert a new Auto Run session
*/
export function insertAutoRunSession(
db: Database.Database,
session: Omit<AutoRunSession, 'id'>
): string {
const id = generateId();
const stmt = stmtCache.get(db, INSERT_SESSION_SQL);
stmt.run(
id,
session.sessionId,
session.agentType,
normalizePath(session.documentPath),
session.startTime,
session.duration,
session.tasksTotal ?? null,
session.tasksCompleted ?? null,
normalizePath(session.projectPath)
);
logger.debug(`Inserted Auto Run session ${id}`, LOG_CONTEXT);
return id;
}
/**
* Update an existing Auto Run session (e.g., when it completes)
*/
export function updateAutoRunSession(
db: Database.Database,
id: string,
updates: Partial<AutoRunSession>
): boolean {
const setClauses: string[] = [];
const params: (string | number | null)[] = [];
if (updates.duration !== undefined) {
setClauses.push('duration = ?');
params.push(updates.duration);
}
if (updates.tasksTotal !== undefined) {
setClauses.push('tasks_total = ?');
params.push(updates.tasksTotal ?? null);
}
if (updates.tasksCompleted !== undefined) {
setClauses.push('tasks_completed = ?');
params.push(updates.tasksCompleted ?? null);
}
if (updates.documentPath !== undefined) {
setClauses.push('document_path = ?');
params.push(normalizePath(updates.documentPath));
}
if (setClauses.length === 0) {
return false;
}
params.push(id);
const sql = `UPDATE auto_run_sessions SET ${setClauses.join(', ')} WHERE id = ?`;
const stmt = db.prepare(sql);
const result = stmt.run(...params);
logger.debug(`Updated Auto Run session ${id}`, LOG_CONTEXT);
return result.changes > 0;
}
/**
* Get Auto Run sessions within a time range
*/
export function getAutoRunSessions(db: Database.Database, range: StatsTimeRange): AutoRunSession[] {
const startTime = getTimeRangeStart(range);
const stmt = stmtCache.get(
db,
`
SELECT * FROM auto_run_sessions
WHERE start_time >= ?
ORDER BY start_time DESC
`
);
const rows = stmt.all(startTime) as AutoRunSessionRow[];
return rows.map(mapAutoRunSessionRow);
}
// ============================================================================
// Auto Run Tasks
// ============================================================================
const INSERT_TASK_SQL = `
INSERT INTO auto_run_tasks (id, auto_run_session_id, session_id, agent_type, task_index, task_content, start_time, duration, success)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
`;
/**
* Insert a new Auto Run task
*/
export function insertAutoRunTask(db: Database.Database, task: Omit<AutoRunTask, 'id'>): string {
const id = generateId();
const stmt = stmtCache.get(db, INSERT_TASK_SQL);
stmt.run(
id,
task.autoRunSessionId,
task.sessionId,
task.agentType,
task.taskIndex,
task.taskContent ?? null,
task.startTime,
task.duration,
task.success ? 1 : 0
);
logger.debug(`Inserted Auto Run task ${id}`, LOG_CONTEXT);
return id;
}
/**
* Get all tasks for a specific Auto Run session
*/
export function getAutoRunTasks(db: Database.Database, autoRunSessionId: string): AutoRunTask[] {
const stmt = stmtCache.get(
db,
`
SELECT * FROM auto_run_tasks
WHERE auto_run_session_id = ?
ORDER BY task_index ASC
`
);
const rows = stmt.all(autoRunSessionId) as AutoRunTaskRow[];
return rows.map(mapAutoRunTaskRow);
}
/**
* Clear the statement cache (call when database is closed)
*/
export function clearAutoRunCache(): void {
stmtCache.clear();
}

View File

@@ -0,0 +1,170 @@
/**
* Data Management Operations
*
* Handles data cleanup (with transactional safety) and CSV export
* (with proper escaping and complete field coverage).
*/
import type Database from 'better-sqlite3';
import type { StatsTimeRange } from '../../shared/stats-types';
import { getQueryEvents } from './query-events';
import { LOG_CONTEXT } from './utils';
import { logger } from '../utils/logger';
// ============================================================================
// Data Cleanup
// ============================================================================
/**
* Clear old data from the database.
*
* Deletes query_events, auto_run_sessions, auto_run_tasks, and session_lifecycle
* records that are older than the specified number of days.
*
* All deletes run within a single transaction for atomicity — either all tables
* are cleaned or none are.
*
* @param olderThanDays - Delete records older than this many days
*/
export function clearOldData(
db: Database.Database,
olderThanDays: number
): {
success: boolean;
deletedQueryEvents: number;
deletedAutoRunSessions: number;
deletedAutoRunTasks: number;
deletedSessionLifecycle: number;
error?: string;
} {
if (olderThanDays <= 0) {
return {
success: false,
deletedQueryEvents: 0,
deletedAutoRunSessions: 0,
deletedAutoRunTasks: 0,
deletedSessionLifecycle: 0,
error: 'olderThanDays must be greater than 0',
};
}
try {
const cutoffTime = Date.now() - olderThanDays * 24 * 60 * 60 * 1000;
logger.info(
`Clearing stats data older than ${olderThanDays} days (before ${new Date(cutoffTime).toISOString()})`,
LOG_CONTEXT
);
let deletedEvents = 0;
let deletedSessions = 0;
let deletedTasks = 0;
let deletedLifecycle = 0;
// Wrap all deletes in a transaction for atomicity
const runCleanup = db.transaction(() => {
// Delete auto_run_tasks for sessions being deleted (cascade)
const tasksResult = db
.prepare(
'DELETE FROM auto_run_tasks WHERE auto_run_session_id IN (SELECT id FROM auto_run_sessions WHERE start_time < ?)'
)
.run(cutoffTime);
deletedTasks = tasksResult.changes;
// Delete auto_run_sessions
const sessionsResult = db
.prepare('DELETE FROM auto_run_sessions WHERE start_time < ?')
.run(cutoffTime);
deletedSessions = sessionsResult.changes;
// Delete query_events
const eventsResult = db
.prepare('DELETE FROM query_events WHERE start_time < ?')
.run(cutoffTime);
deletedEvents = eventsResult.changes;
// Delete session_lifecycle
const lifecycleResult = db
.prepare('DELETE FROM session_lifecycle WHERE created_at < ?')
.run(cutoffTime);
deletedLifecycle = lifecycleResult.changes;
});
runCleanup();
const totalDeleted = deletedEvents + deletedSessions + deletedTasks + deletedLifecycle;
logger.info(
`Cleared ${totalDeleted} old stats records (${deletedEvents} query events, ${deletedSessions} auto-run sessions, ${deletedTasks} auto-run tasks, ${deletedLifecycle} session lifecycle)`,
LOG_CONTEXT
);
return {
success: true,
deletedQueryEvents: deletedEvents,
deletedAutoRunSessions: deletedSessions,
deletedAutoRunTasks: deletedTasks,
deletedSessionLifecycle: deletedLifecycle,
};
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
logger.error(`Failed to clear old stats data: ${errorMessage}`, LOG_CONTEXT);
return {
success: false,
deletedQueryEvents: 0,
deletedAutoRunSessions: 0,
deletedAutoRunTasks: 0,
deletedSessionLifecycle: 0,
error: errorMessage,
};
}
}
// ============================================================================
// CSV Export
// ============================================================================
/**
* Escape a value for CSV output.
*
* Wraps the value in double quotes and escapes any embedded double quotes
* by doubling them (RFC 4180 compliant).
*/
function csvEscape(value: string): string {
return `"${value.replace(/"/g, '""')}"`;
}
/**
* Export query events to CSV format.
*
* Includes all fields (including isRemote added in migration v2)
* with proper CSV escaping for values containing quotes, commas, or newlines.
*/
export function exportToCsv(db: Database.Database, range: StatsTimeRange): string {
const events = getQueryEvents(db, range);
const headers = [
'id',
'sessionId',
'agentType',
'source',
'startTime',
'duration',
'projectPath',
'tabId',
'isRemote',
];
const rows = events.map((e) => [
csvEscape(e.id),
csvEscape(e.sessionId),
csvEscape(e.agentType),
csvEscape(e.source),
csvEscape(new Date(e.startTime).toISOString()),
csvEscape(e.duration.toString()),
csvEscape(e.projectPath ?? ''),
csvEscape(e.tabId ?? ''),
csvEscape(e.isRemote !== undefined ? String(e.isRemote) : ''),
]);
return [headers.join(','), ...rows.map((row) => row.join(','))].join('\n');
}

44
src/main/stats/index.ts Normal file
View File

@@ -0,0 +1,44 @@
/**
* Stats Module
*
* Consolidated module for all stats database functionality:
* - SQLite database lifecycle and integrity management
* - Migration system for schema evolution
* - CRUD operations for query events, auto-run sessions/tasks, and session lifecycle
* - Aggregated statistics for the Usage Dashboard
* - Data management (cleanup, CSV export)
* - Singleton instance management
* - Performance metrics API
*
* Usage:
* ```typescript
* import { getStatsDB, initializeStatsDB, closeStatsDB } from './stats';
* import type { StatsDB } from './stats';
* ```
*/
// ============ Types ============
export type {
IntegrityCheckResult,
BackupResult,
CorruptionRecoveryResult,
Migration,
MigrationRecord,
} from './types';
// ============ Utilities ============
export { normalizePath } from './utils';
// ============ Core Database ============
export { StatsDB } from './stats-db';
// ============ Singleton & Lifecycle ============
export { getStatsDB, initializeStatsDB, closeStatsDB } from './singleton';
// ============ Performance Metrics API ============
export {
setPerformanceLoggingEnabled,
isPerformanceLoggingEnabled,
getPerformanceMetrics,
clearPerformanceMetrics,
} from './singleton';

View File

@@ -0,0 +1,234 @@
/**
* Stats Database Migration System
*
* Manages schema evolution through versioned, sequential migrations.
* Each migration runs exactly once and is recorded in the _migrations table.
*
* ### Adding New Migrations
*
* 1. Create a new `migrateVN()` function
* 2. Add it to the `getMigrations()` array with version number and description
* 3. Update `STATS_DB_VERSION` in `../../shared/stats-types.ts`
*/
import type Database from 'better-sqlite3';
import type { Migration, MigrationRecord } from './types';
import { mapMigrationRecordRow, type MigrationRecordRow } from './row-mappers';
import {
CREATE_MIGRATIONS_TABLE_SQL,
CREATE_QUERY_EVENTS_SQL,
CREATE_QUERY_EVENTS_INDEXES_SQL,
CREATE_AUTO_RUN_SESSIONS_SQL,
CREATE_AUTO_RUN_SESSIONS_INDEXES_SQL,
CREATE_AUTO_RUN_TASKS_SQL,
CREATE_AUTO_RUN_TASKS_INDEXES_SQL,
CREATE_SESSION_LIFECYCLE_SQL,
CREATE_SESSION_LIFECYCLE_INDEXES_SQL,
runStatements,
} from './schema';
import { LOG_CONTEXT } from './utils';
import { logger } from '../utils/logger';
// ============================================================================
// Migration Registry
// ============================================================================
/**
* Registry of all database migrations.
* Migrations must be sequential starting from version 1.
*/
export function getMigrations(): Migration[] {
return [
{
version: 1,
description: 'Initial schema: query_events, auto_run_sessions, auto_run_tasks tables',
up: (db) => migrateV1(db),
},
{
version: 2,
description: 'Add is_remote column to query_events for tracking SSH sessions',
up: (db) => migrateV2(db),
},
{
version: 3,
description: 'Add session_lifecycle table for tracking session creation and closure',
up: (db) => migrateV3(db),
},
];
}
// ============================================================================
// Migration Execution
// ============================================================================
/**
* Run all pending database migrations.
*
* 1. Creates the _migrations table if it doesn't exist
* 2. Gets the current schema version from user_version pragma
* 3. Runs each pending migration in a transaction
* 4. Records each migration in the _migrations table
* 5. Updates the user_version pragma
*/
export function runMigrations(db: Database.Database): void {
// Create migrations table (the only table created outside the migration system)
db.prepare(CREATE_MIGRATIONS_TABLE_SQL).run();
// Get current version (0 if fresh database)
const versionResult = db.pragma('user_version') as Array<{ user_version: number }>;
const currentVersion = versionResult[0]?.user_version ?? 0;
const migrations = getMigrations();
const pendingMigrations = migrations.filter((m) => m.version > currentVersion);
if (pendingMigrations.length === 0) {
logger.debug(`Database is up to date (version ${currentVersion})`, LOG_CONTEXT);
return;
}
// Sort by version to ensure sequential execution
pendingMigrations.sort((a, b) => a.version - b.version);
logger.info(
`Running ${pendingMigrations.length} pending migration(s) (current version: ${currentVersion})`,
LOG_CONTEXT
);
for (const migration of pendingMigrations) {
applyMigration(db, migration);
}
}
/**
* Apply a single migration within a transaction.
* Records the migration in the _migrations table with success/failure status.
*/
function applyMigration(db: Database.Database, migration: Migration): void {
const startTime = Date.now();
logger.info(`Applying migration v${migration.version}: ${migration.description}`, LOG_CONTEXT);
try {
const runMigrationTxn = db.transaction(() => {
migration.up(db);
db.prepare(
`
INSERT OR REPLACE INTO _migrations (version, description, applied_at, status, error_message)
VALUES (?, ?, ?, 'success', NULL)
`
).run(migration.version, migration.description, Date.now());
db.pragma(`user_version = ${migration.version}`);
});
runMigrationTxn();
const duration = Date.now() - startTime;
logger.info(`Migration v${migration.version} completed in ${duration}ms`, LOG_CONTEXT);
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
db.prepare(
`
INSERT OR REPLACE INTO _migrations (version, description, applied_at, status, error_message)
VALUES (?, ?, ?, 'failed', ?)
`
).run(migration.version, migration.description, Date.now(), errorMessage);
logger.error(`Migration v${migration.version} failed: ${errorMessage}`, LOG_CONTEXT);
throw error;
}
}
// ============================================================================
// Migration Queries
// ============================================================================
/**
* Get the list of applied migrations from the _migrations table.
*/
export function getMigrationHistory(db: Database.Database): MigrationRecord[] {
const tableExists = db
.prepare("SELECT name FROM sqlite_master WHERE type='table' AND name='_migrations'")
.get();
if (!tableExists) {
return [];
}
const rows = db
.prepare(
`
SELECT version, description, applied_at, status, error_message
FROM _migrations
ORDER BY version ASC
`
)
.all() as MigrationRecordRow[];
return rows.map(mapMigrationRecordRow);
}
/**
* Get the current database schema version.
*/
export function getCurrentVersion(db: Database.Database): number {
const versionResult = db.pragma('user_version') as Array<{ user_version: number }>;
return versionResult[0]?.user_version ?? 0;
}
/**
* Get the target version (highest version in migrations registry).
*/
export function getTargetVersion(): number {
const migrations = getMigrations();
if (migrations.length === 0) return 0;
return Math.max(...migrations.map((m) => m.version));
}
/**
* Check if any migrations are pending.
*/
export function hasPendingMigrations(db: Database.Database): boolean {
return getCurrentVersion(db) < getTargetVersion();
}
// ============================================================================
// Individual Migration Functions
// ============================================================================
/**
* Migration v1: Initial schema creation
*/
function migrateV1(db: Database.Database): void {
db.prepare(CREATE_QUERY_EVENTS_SQL).run();
runStatements(db, CREATE_QUERY_EVENTS_INDEXES_SQL);
db.prepare(CREATE_AUTO_RUN_SESSIONS_SQL).run();
runStatements(db, CREATE_AUTO_RUN_SESSIONS_INDEXES_SQL);
db.prepare(CREATE_AUTO_RUN_TASKS_SQL).run();
runStatements(db, CREATE_AUTO_RUN_TASKS_INDEXES_SQL);
logger.debug('Created stats database tables and indexes', LOG_CONTEXT);
}
/**
* Migration v2: Add is_remote column for SSH session tracking
*/
function migrateV2(db: Database.Database): void {
db.prepare('ALTER TABLE query_events ADD COLUMN is_remote INTEGER').run();
db.prepare('CREATE INDEX IF NOT EXISTS idx_query_is_remote ON query_events(is_remote)').run();
logger.debug('Added is_remote column to query_events table', LOG_CONTEXT);
}
/**
* Migration v3: Add session_lifecycle table
*/
function migrateV3(db: Database.Database): void {
db.prepare(CREATE_SESSION_LIFECYCLE_SQL).run();
runStatements(db, CREATE_SESSION_LIFECYCLE_INDEXES_SQL);
logger.debug('Created session_lifecycle table', LOG_CONTEXT);
}

View File

@@ -0,0 +1,87 @@
/**
* Query Event CRUD Operations
*
* Handles insertion and retrieval of individual AI query/response cycle records.
*/
import type Database from 'better-sqlite3';
import type { QueryEvent, StatsTimeRange, StatsFilters } from '../../shared/stats-types';
import { generateId, getTimeRangeStart, normalizePath, LOG_CONTEXT } from './utils';
import { mapQueryEventRow, type QueryEventRow } from './row-mappers';
import { StatementCache } from './utils';
import { logger } from '../utils/logger';
const stmtCache = new StatementCache();
const INSERT_SQL = `
INSERT INTO query_events (id, session_id, agent_type, source, start_time, duration, project_path, tab_id, is_remote)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
`;
/**
* Insert a new query event
*/
export function insertQueryEvent(db: Database.Database, event: Omit<QueryEvent, 'id'>): string {
const id = generateId();
const stmt = stmtCache.get(db, INSERT_SQL);
stmt.run(
id,
event.sessionId,
event.agentType,
event.source,
event.startTime,
event.duration,
normalizePath(event.projectPath),
event.tabId ?? null,
event.isRemote !== undefined ? (event.isRemote ? 1 : 0) : null
);
logger.debug(`Inserted query event ${id}`, LOG_CONTEXT);
return id;
}
/**
* Get query events within a time range with optional filters
*/
export function getQueryEvents(
db: Database.Database,
range: StatsTimeRange,
filters?: StatsFilters
): QueryEvent[] {
const startTime = getTimeRangeStart(range);
let sql = 'SELECT * FROM query_events WHERE start_time >= ?';
const params: (string | number)[] = [startTime];
if (filters?.agentType) {
sql += ' AND agent_type = ?';
params.push(filters.agentType);
}
if (filters?.source) {
sql += ' AND source = ?';
params.push(filters.source);
}
if (filters?.projectPath) {
sql += ' AND project_path = ?';
// Normalize filter path to match stored format
params.push(normalizePath(filters.projectPath) ?? '');
}
if (filters?.sessionId) {
sql += ' AND session_id = ?';
params.push(filters.sessionId);
}
sql += ' ORDER BY start_time DESC';
const stmt = db.prepare(sql);
const rows = stmt.all(...params) as QueryEventRow[];
return rows.map(mapQueryEventRow);
}
/**
* Clear the statement cache (call when database is closed)
*/
export function clearQueryEventCache(): void {
stmtCache.clear();
}

View File

@@ -0,0 +1,142 @@
/**
* Row Mapper Functions
*
* Converts snake_case SQLite row objects to camelCase TypeScript interfaces.
* Centralizes the mapping logic that was previously duplicated across CRUD methods.
*/
import type {
QueryEvent,
AutoRunSession,
AutoRunTask,
SessionLifecycleEvent,
} from '../../shared/stats-types';
import type { MigrationRecord } from './types';
// ============================================================================
// Raw Row Types (snake_case from SQLite)
// ============================================================================
export interface QueryEventRow {
id: string;
session_id: string;
agent_type: string;
source: 'user' | 'auto';
start_time: number;
duration: number;
project_path: string | null;
tab_id: string | null;
is_remote: number | null;
}
export interface AutoRunSessionRow {
id: string;
session_id: string;
agent_type: string;
document_path: string | null;
start_time: number;
duration: number;
tasks_total: number | null;
tasks_completed: number | null;
project_path: string | null;
}
export interface AutoRunTaskRow {
id: string;
auto_run_session_id: string;
session_id: string;
agent_type: string;
task_index: number;
task_content: string | null;
start_time: number;
duration: number;
success: number;
}
export interface SessionLifecycleRow {
id: string;
session_id: string;
agent_type: string;
project_path: string | null;
created_at: number;
closed_at: number | null;
duration: number | null;
is_remote: number | null;
}
export interface MigrationRecordRow {
version: number;
description: string;
applied_at: number;
status: 'success' | 'failed';
error_message: string | null;
}
// ============================================================================
// Mapper Functions
// ============================================================================
export function mapQueryEventRow(row: QueryEventRow): QueryEvent {
return {
id: row.id,
sessionId: row.session_id,
agentType: row.agent_type,
source: row.source,
startTime: row.start_time,
duration: row.duration,
projectPath: row.project_path ?? undefined,
tabId: row.tab_id ?? undefined,
isRemote: row.is_remote !== null ? row.is_remote === 1 : undefined,
};
}
export function mapAutoRunSessionRow(row: AutoRunSessionRow): AutoRunSession {
return {
id: row.id,
sessionId: row.session_id,
agentType: row.agent_type,
documentPath: row.document_path ?? undefined,
startTime: row.start_time,
duration: row.duration,
tasksTotal: row.tasks_total ?? undefined,
tasksCompleted: row.tasks_completed ?? undefined,
projectPath: row.project_path ?? undefined,
};
}
export function mapAutoRunTaskRow(row: AutoRunTaskRow): AutoRunTask {
return {
id: row.id,
autoRunSessionId: row.auto_run_session_id,
sessionId: row.session_id,
agentType: row.agent_type,
taskIndex: row.task_index,
taskContent: row.task_content ?? undefined,
startTime: row.start_time,
duration: row.duration,
success: row.success === 1,
};
}
export function mapSessionLifecycleRow(row: SessionLifecycleRow): SessionLifecycleEvent {
return {
id: row.id,
sessionId: row.session_id,
agentType: row.agent_type,
projectPath: row.project_path ?? undefined,
createdAt: row.created_at,
closedAt: row.closed_at ?? undefined,
duration: row.duration ?? undefined,
isRemote: row.is_remote !== null ? row.is_remote === 1 : undefined,
};
}
export function mapMigrationRecordRow(row: MigrationRecordRow): MigrationRecord {
return {
version: row.version,
description: row.description,
appliedAt: row.applied_at,
status: row.status,
errorMessage: row.error_message ?? undefined,
};
}

141
src/main/stats/schema.ts Normal file
View File

@@ -0,0 +1,141 @@
/**
* Stats Database Schema
*
* SQL definitions for all tables and indexes, plus helper utilities
* for executing multi-statement SQL strings.
*/
import type Database from 'better-sqlite3';
// ============================================================================
// Migrations Infrastructure
// ============================================================================
export const CREATE_MIGRATIONS_TABLE_SQL = `
CREATE TABLE IF NOT EXISTS _migrations (
version INTEGER PRIMARY KEY,
description TEXT NOT NULL,
applied_at INTEGER NOT NULL,
status TEXT NOT NULL CHECK(status IN ('success', 'failed')),
error_message TEXT
)
`;
// ============================================================================
// Metadata Table (for internal key-value storage like vacuum timestamps)
// ============================================================================
export const CREATE_META_TABLE_SQL = `
CREATE TABLE IF NOT EXISTS _meta (
key TEXT PRIMARY KEY,
value TEXT NOT NULL
)
`;
// ============================================================================
// Query Events (Migration v1)
// ============================================================================
export const CREATE_QUERY_EVENTS_SQL = `
CREATE TABLE IF NOT EXISTS query_events (
id TEXT PRIMARY KEY,
session_id TEXT NOT NULL,
agent_type TEXT NOT NULL,
source TEXT NOT NULL CHECK(source IN ('user', 'auto')),
start_time INTEGER NOT NULL,
duration INTEGER NOT NULL,
project_path TEXT,
tab_id TEXT
)
`;
export const CREATE_QUERY_EVENTS_INDEXES_SQL = `
CREATE INDEX IF NOT EXISTS idx_query_start_time ON query_events(start_time);
CREATE INDEX IF NOT EXISTS idx_query_agent_type ON query_events(agent_type);
CREATE INDEX IF NOT EXISTS idx_query_source ON query_events(source);
CREATE INDEX IF NOT EXISTS idx_query_session ON query_events(session_id);
CREATE INDEX IF NOT EXISTS idx_query_project_path ON query_events(project_path);
CREATE INDEX IF NOT EXISTS idx_query_agent_time ON query_events(agent_type, start_time)
`;
// ============================================================================
// Auto Run Sessions (Migration v1)
// ============================================================================
export const CREATE_AUTO_RUN_SESSIONS_SQL = `
CREATE TABLE IF NOT EXISTS auto_run_sessions (
id TEXT PRIMARY KEY,
session_id TEXT NOT NULL,
agent_type TEXT NOT NULL,
document_path TEXT,
start_time INTEGER NOT NULL,
duration INTEGER NOT NULL,
tasks_total INTEGER,
tasks_completed INTEGER,
project_path TEXT
)
`;
export const CREATE_AUTO_RUN_SESSIONS_INDEXES_SQL = `
CREATE INDEX IF NOT EXISTS idx_auto_session_start ON auto_run_sessions(start_time)
`;
// ============================================================================
// Auto Run Tasks (Migration v1)
// ============================================================================
export const CREATE_AUTO_RUN_TASKS_SQL = `
CREATE TABLE IF NOT EXISTS auto_run_tasks (
id TEXT PRIMARY KEY,
auto_run_session_id TEXT NOT NULL REFERENCES auto_run_sessions(id),
session_id TEXT NOT NULL,
agent_type TEXT NOT NULL,
task_index INTEGER NOT NULL,
task_content TEXT,
start_time INTEGER NOT NULL,
duration INTEGER NOT NULL,
success INTEGER NOT NULL CHECK(success IN (0, 1))
)
`;
export const CREATE_AUTO_RUN_TASKS_INDEXES_SQL = `
CREATE INDEX IF NOT EXISTS idx_task_auto_session ON auto_run_tasks(auto_run_session_id);
CREATE INDEX IF NOT EXISTS idx_task_start ON auto_run_tasks(start_time)
`;
// ============================================================================
// Session Lifecycle (Migration v3)
// ============================================================================
export const CREATE_SESSION_LIFECYCLE_SQL = `
CREATE TABLE IF NOT EXISTS session_lifecycle (
id TEXT PRIMARY KEY,
session_id TEXT NOT NULL UNIQUE,
agent_type TEXT NOT NULL,
project_path TEXT,
created_at INTEGER NOT NULL,
closed_at INTEGER,
duration INTEGER,
is_remote INTEGER
)
`;
export const CREATE_SESSION_LIFECYCLE_INDEXES_SQL = `
CREATE INDEX IF NOT EXISTS idx_session_created_at ON session_lifecycle(created_at);
CREATE INDEX IF NOT EXISTS idx_session_agent_type ON session_lifecycle(agent_type)
`;
// ============================================================================
// Utilities
// ============================================================================
/**
* Execute a multi-statement SQL string by splitting on semicolons.
*
* Useful for running multiple CREATE INDEX statements defined in a single string.
*/
export function runStatements(db: Database.Database, multiStatementSql: string): void {
for (const sql of multiStatementSql.split(';').filter((s) => s.trim())) {
db.prepare(sql).run();
}
}

View File

@@ -0,0 +1,105 @@
/**
* Session Lifecycle CRUD Operations
*
* Tracks when sessions are created (launched) and closed,
* enabling session duration and lifecycle analytics.
*/
import type Database from 'better-sqlite3';
import type { SessionLifecycleEvent, StatsTimeRange } from '../../shared/stats-types';
import { generateId, getTimeRangeStart, normalizePath, LOG_CONTEXT } from './utils';
import { mapSessionLifecycleRow, type SessionLifecycleRow } from './row-mappers';
import { StatementCache } from './utils';
import { logger } from '../utils/logger';
const stmtCache = new StatementCache();
const INSERT_SQL = `
INSERT INTO session_lifecycle (id, session_id, agent_type, project_path, created_at, is_remote)
VALUES (?, ?, ?, ?, ?, ?)
`;
/**
* Record a session being created (launched)
*/
export function recordSessionCreated(
db: Database.Database,
event: Omit<SessionLifecycleEvent, 'id' | 'closedAt' | 'duration'>
): string {
const id = generateId();
const stmt = stmtCache.get(db, INSERT_SQL);
stmt.run(
id,
event.sessionId,
event.agentType,
normalizePath(event.projectPath),
event.createdAt,
event.isRemote !== undefined ? (event.isRemote ? 1 : 0) : null
);
logger.debug(`Recorded session created: ${event.sessionId}`, LOG_CONTEXT);
return id;
}
/**
* Record a session being closed
*/
export function recordSessionClosed(
db: Database.Database,
sessionId: string,
closedAt: number
): boolean {
// Get the session's created_at time to calculate duration
const session = db
.prepare('SELECT created_at FROM session_lifecycle WHERE session_id = ?')
.get(sessionId) as { created_at: number } | undefined;
if (!session) {
logger.debug(`Session not found for closure: ${sessionId}`, LOG_CONTEXT);
return false;
}
const duration = closedAt - session.created_at;
const stmt = stmtCache.get(
db,
`
UPDATE session_lifecycle
SET closed_at = ?, duration = ?
WHERE session_id = ?
`
);
const result = stmt.run(closedAt, duration, sessionId);
logger.debug(`Recorded session closed: ${sessionId}, duration: ${duration}ms`, LOG_CONTEXT);
return result.changes > 0;
}
/**
* Get session lifecycle events within a time range
*/
export function getSessionLifecycleEvents(
db: Database.Database,
range: StatsTimeRange
): SessionLifecycleEvent[] {
const startTime = getTimeRangeStart(range);
const stmt = stmtCache.get(
db,
`
SELECT * FROM session_lifecycle
WHERE created_at >= ?
ORDER BY created_at DESC
`
);
const rows = stmt.all(startTime) as SessionLifecycleRow[];
return rows.map(mapSessionLifecycleRow);
}
/**
* Clear the statement cache (call when database is closed)
*/
export function clearSessionLifecycleCache(): void {
stmtCache.clear();
}

View File

@@ -0,0 +1,87 @@
/**
* Stats Database Singleton Management & Performance Metrics API
*
* Provides the global StatsDB instance and performance monitoring utilities.
*/
import { StatsDB } from './stats-db';
import { perfMetrics, LOG_CONTEXT } from './utils';
import { logger } from '../utils/logger';
// ============================================================================
// Singleton Instance
// ============================================================================
let statsDbInstance: StatsDB | null = null;
/**
* Get the singleton StatsDB instance
*/
export function getStatsDB(): StatsDB {
if (!statsDbInstance) {
statsDbInstance = new StatsDB();
}
return statsDbInstance;
}
/**
* Initialize the stats database (call on app ready)
*/
export function initializeStatsDB(): void {
const db = getStatsDB();
db.initialize();
}
/**
* Close the stats database (call on app quit)
*/
export function closeStatsDB(): void {
if (statsDbInstance) {
statsDbInstance.close();
statsDbInstance = null;
}
}
// ============================================================================
// Performance Metrics API
// ============================================================================
/**
* Enable or disable performance metrics logging for StatsDB operations.
*
* When enabled, detailed timing information is logged at debug level for:
* - Database queries (getAggregatedStats, getQueryEvents, etc.)
* - Individual SQL operations (totals, byAgent, bySource, byDay queries)
*
* Performance warnings are always logged (even when metrics are disabled)
* when operations exceed defined thresholds.
*
* @param enabled - Whether to enable performance metrics logging
*/
export function setPerformanceLoggingEnabled(enabled: boolean): void {
perfMetrics.setEnabled(enabled);
logger.info(`Performance metrics logging ${enabled ? 'enabled' : 'disabled'}`, LOG_CONTEXT);
}
/**
* Check if performance metrics logging is currently enabled.
*/
export function isPerformanceLoggingEnabled(): boolean {
return perfMetrics.isEnabled();
}
/**
* Get collected performance metrics for analysis.
*
* Returns the last 100 recorded metrics (when enabled).
*/
export function getPerformanceMetrics() {
return perfMetrics.getMetrics();
}
/**
* Clear collected performance metrics.
*/
export function clearPerformanceMetrics(): void {
perfMetrics.clearMetrics();
}

543
src/main/stats/stats-db.ts Normal file
View File

@@ -0,0 +1,543 @@
/**
* Stats Database Core Class
*
* Manages the SQLite database lifecycle: initialization, integrity checks,
* corruption recovery, VACUUM scheduling, and connection management.
*
* CRUD operations are delegated to focused modules (query-events, auto-run,
* session-lifecycle, aggregations, data-management).
*/
import Database from 'better-sqlite3';
import * as path from 'path';
import * as fs from 'fs';
import { app } from 'electron';
import { logger } from '../utils/logger';
import type {
QueryEvent,
AutoRunSession,
AutoRunTask,
SessionLifecycleEvent,
StatsTimeRange,
StatsFilters,
StatsAggregation,
} from '../../shared/stats-types';
import type {
IntegrityCheckResult,
BackupResult,
CorruptionRecoveryResult,
MigrationRecord,
} from './types';
import { LOG_CONTEXT } from './utils';
import { CREATE_META_TABLE_SQL } from './schema';
import {
runMigrations,
getMigrationHistory,
getCurrentVersion,
getTargetVersion,
hasPendingMigrations,
} from './migrations';
import { insertQueryEvent, getQueryEvents, clearQueryEventCache } from './query-events';
import {
insertAutoRunSession,
updateAutoRunSession,
getAutoRunSessions,
insertAutoRunTask,
getAutoRunTasks,
clearAutoRunCache,
} from './auto-run';
import {
recordSessionCreated,
recordSessionClosed,
getSessionLifecycleEvents,
clearSessionLifecycleCache,
} from './session-lifecycle';
import { getAggregatedStats } from './aggregations';
import { clearOldData, exportToCsv } from './data-management';
/**
* StatsDB manages the SQLite database for usage statistics.
*/
export class StatsDB {
private db: Database.Database | null = null;
private dbPath: string;
private initialized = false;
constructor() {
this.dbPath = path.join(app.getPath('userData'), 'stats.db');
}
// ============================================================================
// Database Accessor
// ============================================================================
/**
* Get the underlying database handle, throwing if not initialized.
* Replaces the repeated `if (!this.db) throw` guard clauses.
*/
get database(): Database.Database {
if (!this.db) throw new Error('Database not initialized');
return this.db;
}
// ============================================================================
// Lifecycle
// ============================================================================
/**
* Initialize the database - create file, tables, and indexes.
*
* If the database is corrupted, this method will:
* 1. Backup the corrupted database file
* 2. Delete the corrupted file and any associated WAL/SHM files
* 3. Create a fresh database
*/
initialize(): void {
if (this.initialized) {
return;
}
try {
const dir = path.dirname(this.dbPath);
if (!fs.existsSync(dir)) {
fs.mkdirSync(dir, { recursive: true });
}
const dbExists = fs.existsSync(this.dbPath);
if (dbExists) {
const db = this.openWithCorruptionHandling();
if (!db) {
throw new Error('Failed to open or recover database');
}
this.db = db;
} else {
this.db = new Database(this.dbPath);
}
// Enable WAL mode for better concurrent access
this.db.pragma('journal_mode = WAL');
// Create the _meta table for internal key-value storage
this.db.prepare(CREATE_META_TABLE_SQL).run();
// Run migrations
runMigrations(this.db);
this.initialized = true;
logger.info(`Stats database initialized at ${this.dbPath}`, LOG_CONTEXT);
// Schedule VACUUM to run weekly instead of on every startup
this.vacuumIfNeededWeekly();
} catch (error) {
logger.error(`Failed to initialize stats database: ${error}`, LOG_CONTEXT);
throw error;
}
}
/**
* Close the database connection
*/
close(): void {
if (this.db) {
this.db.close();
this.db = null;
this.initialized = false;
// Clear all statement caches
clearQueryEventCache();
clearAutoRunCache();
clearSessionLifecycleCache();
logger.info('Stats database closed', LOG_CONTEXT);
}
}
/**
* Check if database is initialized and ready
*/
isReady(): boolean {
return this.initialized && this.db !== null;
}
/**
* Get the database file path
*/
getDbPath(): string {
return this.dbPath;
}
/**
* Get the database file size in bytes.
*/
getDatabaseSize(): number {
try {
const stats = fs.statSync(this.dbPath);
return stats.size;
} catch {
return 0;
}
}
// ============================================================================
// VACUUM
// ============================================================================
/**
* Run VACUUM on the database to reclaim unused space and optimize structure.
*/
vacuum(): { success: boolean; bytesFreed: number; error?: string } {
if (!this.db) {
return { success: false, bytesFreed: 0, error: 'Database not initialized' };
}
try {
const sizeBefore = this.getDatabaseSize();
logger.info(
`Starting VACUUM (current size: ${(sizeBefore / 1024 / 1024).toFixed(2)} MB)`,
LOG_CONTEXT
);
this.db.prepare('VACUUM').run();
const sizeAfter = this.getDatabaseSize();
const bytesFreed = sizeBefore - sizeAfter;
logger.info(
`VACUUM completed: ${(sizeBefore / 1024 / 1024).toFixed(2)} MB -> ${(sizeAfter / 1024 / 1024).toFixed(2)} MB (freed ${(bytesFreed / 1024 / 1024).toFixed(2)} MB)`,
LOG_CONTEXT
);
return { success: true, bytesFreed };
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
logger.error(`VACUUM failed: ${errorMessage}`, LOG_CONTEXT);
return { success: false, bytesFreed: 0, error: errorMessage };
}
}
/**
* Conditionally vacuum the database if it exceeds a size threshold.
*
* @param thresholdBytes - Size threshold in bytes (default: 100MB)
*/
vacuumIfNeeded(thresholdBytes: number = 100 * 1024 * 1024): {
vacuumed: boolean;
databaseSize: number;
result?: { success: boolean; bytesFreed: number; error?: string };
} {
const databaseSize = this.getDatabaseSize();
if (databaseSize < thresholdBytes) {
logger.debug(
`Database size (${(databaseSize / 1024 / 1024).toFixed(2)} MB) below vacuum threshold (${(thresholdBytes / 1024 / 1024).toFixed(2)} MB), skipping VACUUM`,
LOG_CONTEXT
);
return { vacuumed: false, databaseSize };
}
logger.info(
`Database size (${(databaseSize / 1024 / 1024).toFixed(2)} MB) exceeds vacuum threshold (${(thresholdBytes / 1024 / 1024).toFixed(2)} MB), running VACUUM`,
LOG_CONTEXT
);
const result = this.vacuum();
return { vacuumed: true, databaseSize, result };
}
/**
* Run VACUUM only if it hasn't been run in the last 7 days.
*
* Stores the last vacuum timestamp in the database's _meta table
* instead of an external file.
*
* @param intervalMs - Minimum time between vacuums (default: 7 days)
*/
private vacuumIfNeededWeekly(intervalMs: number = 7 * 24 * 60 * 60 * 1000): void {
try {
// Read last vacuum timestamp from _meta table
const row = this.database
.prepare("SELECT value FROM _meta WHERE key = 'last_vacuum_at'")
.get() as { value: string } | undefined;
const lastVacuum = row ? parseInt(row.value, 10) || 0 : 0;
const now = Date.now();
const timeSinceLastVacuum = now - lastVacuum;
if (timeSinceLastVacuum < intervalMs) {
const daysRemaining = ((intervalMs - timeSinceLastVacuum) / (24 * 60 * 60 * 1000)).toFixed(
1
);
logger.debug(
`Skipping VACUUM (last run ${((now - lastVacuum) / (24 * 60 * 60 * 1000)).toFixed(1)} days ago, next in ${daysRemaining} days)`,
LOG_CONTEXT
);
return;
}
// Run VACUUM if database is large enough
const result = this.vacuumIfNeeded();
if (result.vacuumed) {
// Update timestamp in _meta table
this.database
.prepare("INSERT OR REPLACE INTO _meta (key, value) VALUES ('last_vacuum_at', ?)")
.run(String(now));
logger.info('Updated VACUUM timestamp in _meta table', LOG_CONTEXT);
}
} catch (error) {
// Non-fatal - log and continue
logger.warn(`Failed to check/update VACUUM schedule: ${error}`, LOG_CONTEXT);
}
}
// ============================================================================
// Integrity & Corruption Handling
// ============================================================================
/**
* Check the integrity of the database using SQLite's PRAGMA integrity_check.
*/
checkIntegrity(): IntegrityCheckResult {
if (!this.db) {
return { ok: false, errors: ['Database not initialized'] };
}
try {
const result = this.db.pragma('integrity_check') as Array<{ integrity_check: string }>;
if (result.length === 1 && result[0].integrity_check === 'ok') {
return { ok: true, errors: [] };
}
const errors = result.map((row) => row.integrity_check);
return { ok: false, errors };
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
return { ok: false, errors: [errorMessage] };
}
}
/**
* Create a backup of the current database file.
*/
backupDatabase(): BackupResult {
try {
if (!fs.existsSync(this.dbPath)) {
return { success: false, error: 'Database file does not exist' };
}
const timestamp = Date.now();
const backupPath = `${this.dbPath}.backup.${timestamp}`;
fs.copyFileSync(this.dbPath, backupPath);
logger.info(`Created database backup at ${backupPath}`, LOG_CONTEXT);
return { success: true, backupPath };
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
logger.error(`Failed to create database backup: ${errorMessage}`, LOG_CONTEXT);
return { success: false, error: errorMessage };
}
}
/**
* Handle a corrupted database by backing it up and recreating a fresh database.
*/
private recoverFromCorruption(): CorruptionRecoveryResult {
logger.warn('Attempting to recover from database corruption...', LOG_CONTEXT);
try {
if (this.db) {
try {
this.db.close();
} catch {
// Ignore errors closing corrupted database
}
this.db = null;
this.initialized = false;
}
const backupResult = this.backupDatabase();
if (!backupResult.success) {
if (fs.existsSync(this.dbPath)) {
const timestamp = Date.now();
const emergencyBackupPath = `${this.dbPath}.corrupted.${timestamp}`;
try {
fs.renameSync(this.dbPath, emergencyBackupPath);
logger.warn(`Emergency backup created at ${emergencyBackupPath}`, LOG_CONTEXT);
} catch {
logger.error('Failed to backup corrupted database, data will be lost', LOG_CONTEXT);
fs.unlinkSync(this.dbPath);
}
}
}
// Delete WAL and SHM files
const walPath = `${this.dbPath}-wal`;
const shmPath = `${this.dbPath}-shm`;
if (fs.existsSync(walPath)) {
fs.unlinkSync(walPath);
}
if (fs.existsSync(shmPath)) {
fs.unlinkSync(shmPath);
}
if (fs.existsSync(this.dbPath)) {
fs.unlinkSync(this.dbPath);
}
logger.info('Corrupted database removed, will create fresh database', LOG_CONTEXT);
return {
recovered: true,
backupPath: backupResult.backupPath,
};
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
logger.error(`Failed to recover from database corruption: ${errorMessage}`, LOG_CONTEXT);
return {
recovered: false,
error: errorMessage,
};
}
}
/**
* Attempt to open and validate a database, handling corruption if detected.
*/
private openWithCorruptionHandling(): Database.Database | null {
try {
const db = new Database(this.dbPath);
const result = db.pragma('integrity_check') as Array<{ integrity_check: string }>;
if (result.length === 1 && result[0].integrity_check === 'ok') {
return db;
}
const errors = result.map((row) => row.integrity_check);
logger.error(`Database integrity check failed: ${errors.join(', ')}`, LOG_CONTEXT);
db.close();
} catch (error) {
logger.error(`Failed to open database: ${error}`, LOG_CONTEXT);
}
const recoveryResult = this.recoverFromCorruption();
if (!recoveryResult.recovered) {
logger.error('Database corruption recovery failed', LOG_CONTEXT);
return null;
}
try {
const db = new Database(this.dbPath);
logger.info('Fresh database created after corruption recovery', LOG_CONTEXT);
return db;
} catch (error) {
logger.error(`Failed to create fresh database after recovery: ${error}`, LOG_CONTEXT);
return null;
}
}
// ============================================================================
// Migration Queries (delegated)
// ============================================================================
getMigrationHistory(): MigrationRecord[] {
return getMigrationHistory(this.database);
}
getCurrentVersion(): number {
return getCurrentVersion(this.database);
}
getTargetVersion(): number {
return getTargetVersion();
}
hasPendingMigrations(): boolean {
return hasPendingMigrations(this.database);
}
// ============================================================================
// Query Events (delegated)
// ============================================================================
insertQueryEvent(event: Omit<QueryEvent, 'id'>): string {
return insertQueryEvent(this.database, event);
}
getQueryEvents(range: StatsTimeRange, filters?: StatsFilters): QueryEvent[] {
return getQueryEvents(this.database, range, filters);
}
// ============================================================================
// Auto Run (delegated)
// ============================================================================
insertAutoRunSession(session: Omit<AutoRunSession, 'id'>): string {
return insertAutoRunSession(this.database, session);
}
updateAutoRunSession(id: string, updates: Partial<AutoRunSession>): boolean {
return updateAutoRunSession(this.database, id, updates);
}
getAutoRunSessions(range: StatsTimeRange): AutoRunSession[] {
return getAutoRunSessions(this.database, range);
}
insertAutoRunTask(task: Omit<AutoRunTask, 'id'>): string {
return insertAutoRunTask(this.database, task);
}
getAutoRunTasks(autoRunSessionId: string): AutoRunTask[] {
return getAutoRunTasks(this.database, autoRunSessionId);
}
// ============================================================================
// Session Lifecycle (delegated)
// ============================================================================
recordSessionCreated(event: Omit<SessionLifecycleEvent, 'id' | 'closedAt' | 'duration'>): string {
return recordSessionCreated(this.database, event);
}
recordSessionClosed(sessionId: string, closedAt: number): boolean {
return recordSessionClosed(this.database, sessionId, closedAt);
}
getSessionLifecycleEvents(range: StatsTimeRange): SessionLifecycleEvent[] {
return getSessionLifecycleEvents(this.database, range);
}
// ============================================================================
// Aggregations (delegated)
// ============================================================================
getAggregatedStats(range: StatsTimeRange): StatsAggregation {
return getAggregatedStats(this.database, range);
}
// ============================================================================
// Data Management (delegated)
// ============================================================================
clearOldData(olderThanDays: number) {
if (!this.db) {
return {
success: false,
deletedQueryEvents: 0,
deletedAutoRunSessions: 0,
deletedAutoRunTasks: 0,
deletedSessionLifecycle: 0,
error: 'Database not initialized',
};
}
return clearOldData(this.database, olderThanDays);
}
exportToCsv(range: StatsTimeRange): string {
return exportToCsv(this.database, range);
}
}

65
src/main/stats/types.ts Normal file
View File

@@ -0,0 +1,65 @@
/**
* Stats Database Internal Types
*
* These types are specific to the stats database implementation.
* Shared types (QueryEvent, AutoRunSession, etc.) remain in src/shared/stats-types.ts.
*/
import type Database from 'better-sqlite3';
/**
* Result of a database integrity check
*/
export interface IntegrityCheckResult {
/** Whether the database passed the integrity check */
ok: boolean;
/** Error messages from the integrity check (empty if ok is true) */
errors: string[];
}
/**
* Result of a database backup operation
*/
export interface BackupResult {
/** Whether the backup succeeded */
success: boolean;
/** Path to the backup file (if success is true) */
backupPath?: string;
/** Error message (if success is false) */
error?: string;
}
/**
* Result of corruption recovery
*/
export interface CorruptionRecoveryResult {
/** Whether recovery was performed */
recovered: boolean;
/** Path to the backup of the corrupted database */
backupPath?: string;
/** Error during recovery (if any) */
error?: string;
}
/**
* Represents a single database migration
*/
export interface Migration {
/** Version number (must be sequential starting from 1) */
version: number;
/** Human-readable description of the migration */
description: string;
/** Function to apply the migration */
up: (db: Database.Database) => void;
}
/**
* Record of an applied migration stored in the migrations table
*/
export interface MigrationRecord {
version: number;
description: string;
appliedAt: number;
status: 'success' | 'failed';
errorMessage?: string;
}

100
src/main/stats/utils.ts Normal file
View File

@@ -0,0 +1,100 @@
/**
* Stats Database Utilities
*
* Shared helper functions and constants used across the stats module.
*/
import type Database from 'better-sqlite3';
import { logger } from '../utils/logger';
import { PerformanceMetrics } from '../../shared/performance-metrics';
import type { StatsTimeRange } from '../../shared/stats-types';
export const LOG_CONTEXT = '[StatsDB]';
/**
* Performance metrics logger for StatsDB operations.
*
* Disabled by default - enable via setPerformanceLoggingEnabled(true).
* Logs at debug level through the main process logger.
*/
export const perfMetrics = new PerformanceMetrics(
'StatsDB',
(message, context) => logger.debug(message, context ?? LOG_CONTEXT),
false // Disabled by default - enable for debugging
);
/**
* Generate a unique ID for database entries
*/
export function generateId(): string {
return `${Date.now()}-${Math.random().toString(36).substring(2, 11)}`;
}
/**
* Get timestamp for start of time range
*/
export function getTimeRangeStart(range: StatsTimeRange): number {
const now = Date.now();
const day = 24 * 60 * 60 * 1000;
switch (range) {
case 'day':
return now - day;
case 'week':
return now - 7 * day;
case 'month':
return now - 30 * day;
case 'year':
return now - 365 * day;
case 'all':
return 0;
default:
// Exhaustive check - should never reach here
return 0;
}
}
/**
* Normalize file paths to use forward slashes consistently across platforms.
*
* This ensures that paths stored in the database use a consistent format
* regardless of the operating system, enabling cross-platform data portability
* and consistent filtering by project path.
*
* - Converts Windows-style backslashes to forward slashes
* - Preserves UNC paths (\\server\share -> //server/share)
* - Handles null/undefined by returning null
*
* @param filePath - The file path to normalize (may be Windows or Unix style)
* @returns The normalized path with forward slashes, or null if input is null/undefined
*/
export function normalizePath(filePath: string | null | undefined): string | null {
if (filePath == null) {
return null;
}
// Replace all backslashes with forward slashes
return filePath.replace(/\\/g, '/');
}
/**
* Cache for prepared SQL statements.
*
* Eliminates repeated `db.prepare()` overhead for frequently executed queries.
* Each cache instance should be cleared when the database connection is closed.
*/
export class StatementCache {
private cache = new Map<string, Database.Statement>();
get(db: Database.Database, sql: string): Database.Statement {
let stmt = this.cache.get(sql);
if (!stmt) {
stmt = db.prepare(sql);
this.cache.set(sql, stmt);
}
return stmt;
}
clear(): void {
this.cache.clear();
}
}

View File

@@ -32,7 +32,7 @@ import type {
AgentSessionOrigin,
SessionOriginInfo,
SessionMessage,
} from '../agent-session-storage';
} from '../agents';
import type { ToolType, SshRemoteConfig } from '../../shared/types';
const LOG_CONTEXT = '[ClaudeSessionStorage]';

View File

@@ -36,7 +36,7 @@ import type {
SessionListOptions,
SessionReadOptions,
SessionMessage,
} from '../agent-session-storage';
} from '../agents';
import type { ToolType, SshRemoteConfig } from '../../shared/types';
const LOG_CONTEXT = '[CodexSessionStorage]';

View File

@@ -40,7 +40,7 @@ import type {
SessionListOptions,
SessionReadOptions,
SessionMessage,
} from '../agent-session-storage';
} from '../agents';
import type { ToolType, SshRemoteConfig } from '../../shared/types';
const LOG_CONTEXT = '[FactoryDroidSessionStorage]';

View File

@@ -11,7 +11,7 @@ export { CodexSessionStorage } from './codex-session-storage';
export { FactoryDroidSessionStorage } from './factory-droid-session-storage';
import Store from 'electron-store';
import { registerSessionStorage } from '../agent-session-storage';
import { registerSessionStorage } from '../agents';
import { ClaudeSessionStorage, ClaudeSessionOriginsData } from './claude-session-storage';
import { OpenCodeSessionStorage } from './opencode-session-storage';
import { CodexSessionStorage } from './codex-session-storage';

View File

@@ -34,7 +34,7 @@ import type {
SessionListOptions,
SessionReadOptions,
SessionMessage,
} from '../agent-session-storage';
} from '../agents';
import type { ToolType, SshRemoteConfig } from '../../shared/types';
const LOG_CONTEXT = '[OpenCodeSessionStorage]';

View File

@@ -1,4 +1,4 @@
import type { AgentConfig } from '../agent-detector';
import type { AgentConfig } from '../agents';
type BuildAgentArgsOptions = {
baseArgs: string[];
@@ -118,7 +118,10 @@ export function applyAgentConfigOverrides(
: option.default;
}
finalArgs = [...finalArgs, ...option.argBuilder(value)];
// Type assertion needed because AgentConfigOption is a discriminated union
// and we're handling all types generically here
const argBuilderFn = option.argBuilder as (value: unknown) => string[];
finalArgs = [...finalArgs, ...argBuilderFn(value)];
}
}
@@ -179,9 +182,11 @@ export function getContextWindowValue(
}
// Fall back to agent-level config
const contextWindowOption = agent?.configOptions?.find(
(option) => option.key === 'contextWindow'
(option) => option.key === 'contextWindow' && option.type === 'number'
);
const contextWindowDefault = contextWindowOption?.default ?? 0;
// Extract default value, ensuring it's a number (contextWindow should always be a number config)
const defaultValue = contextWindowOption?.default;
const contextWindowDefault = typeof defaultValue === 'number' ? defaultValue : 0;
return typeof agentConfigValues.contextWindow === 'number'
? agentConfigValues.contextWindow
: contextWindowDefault;

View File

@@ -15,7 +15,7 @@
import { v4 as uuidv4 } from 'uuid';
import { logger } from './logger';
import { buildAgentArgs } from './agent-args';
import type { AgentDetector } from '../agent-detector';
import type { AgentDetector } from '../agents';
const LOG_CONTEXT = '[ContextGroomer]';

View File

@@ -186,7 +186,7 @@ import { shouldOpenExternally, flattenTree } from './utils/fileExplorer';
import type { FileNode } from './types/fileTree';
import { substituteTemplateVariables } from './utils/templateVariables';
import { validateNewSession, getProviderDisplayName } from './utils/sessionValidation';
import { estimateContextUsage, calculateContextTokens } from './utils/contextUsage';
import { estimateContextUsage } from './utils/contextUsage';
import { formatLogsForClipboard } from './utils/contextExtractor';
import {
parseSessionId,
@@ -2942,90 +2942,24 @@ function MaestroConsoleInner() {
const parsed = parseSessionId(sessionId);
const { actualSessionId, tabId, baseSessionId } = parsed;
// Calculate context window usage percentage from CURRENT (per-turn) tokens.
// Claude Code usage is normalized to per-turn values in StdoutHandler before reaching here.
//
// SYNC: Uses calculateContextTokens() from shared/contextUsage.ts
// This MUST match the calculation used in:
// - contextSummarizer.ts (compaction eligibility)
// - MainPanel.tsx (tab context display)
// - TabSwitcherModal.tsx (tab switcher)
// - HistoryDetailModal.tsx (history view)
// - usage-listener.ts (main process usage events)
//
// @see src/shared/contextUsage.ts for the canonical calculation
// Estimate context usage percentage using agent-specific calculation.
// estimateContextUsage returns null when values are accumulated across multiple
// internal API calls within a complex turn. In that case, the UI may update less
// during tool-heavy turns, but it's always accurate when it does update,
// keeping the compact warning reliable.
// Use baseSessionId for lookup to handle synopsis/batch sessions that inherit parent's agent type
const sessionForUsage = sessionsRef.current.find((s) => s.id === baseSessionId);
const agentToolType = sessionForUsage?.toolType;
const currentContextTokens = calculateContextTokens(
{
inputTokens: usageStats.inputTokens,
outputTokens: usageStats.outputTokens,
cacheReadInputTokens: usageStats.cacheReadInputTokens,
cacheCreationInputTokens: usageStats.cacheCreationInputTokens,
},
agentToolType
);
// Calculate context percentage, falling back to agent-specific defaults if contextWindow not provided
let contextPercentage: number;
const effectiveContextWindow = usageStats.contextWindow > 0 ? usageStats.contextWindow : 200000;
// Sanity check: if tokens exceed 150% of context window, the data is likely corrupt
// (e.g., accumulated session totals instead of per-turn values). In this case,
// preserve the previous context percentage rather than showing misleading 100%.
if (currentContextTokens > effectiveContextWindow * 1.5) {
console.warn('[onUsage] Ignoring anomalous context data - tokens exceed 150% of window', {
sessionId: actualSessionId,
currentContextTokens,
contextWindow: effectiveContextWindow,
inputTokens: usageStats.inputTokens,
cacheReadInputTokens: usageStats.cacheReadInputTokens,
cacheCreationInputTokens: usageStats.cacheCreationInputTokens,
});
// Keep existing context percentage (don't update)
contextPercentage = sessionForUsage?.contextUsage ?? 0;
// Skip usage updates to avoid polluting UI with cumulative totals
return;
} else if (usageStats.contextWindow > 0) {
contextPercentage = Math.min(
Math.round((currentContextTokens / usageStats.contextWindow) * 100),
100
);
} else {
// Use fallback estimation with agent-specific default context window
const estimated = estimateContextUsage(usageStats, agentToolType);
contextPercentage = estimated ?? 0;
}
// DEBUG: Log context calculation details
// Uses calculateContextTokens() from shared/contextUsage.ts for consistency
const isCombinedContext = agentToolType === 'codex';
console.log('[onUsage] Context calculation', {
sessionId: actualSessionId,
agentType: agentToolType,
raw: {
inputTokens: usageStats.inputTokens,
outputTokens: usageStats.outputTokens,
cacheReadInputTokens: usageStats.cacheReadInputTokens,
cacheCreationInputTokens: usageStats.cacheCreationInputTokens,
contextWindow: usageStats.contextWindow,
},
calculated: {
currentContextTokens,
effectiveContextWindow,
contextPercentage,
formula: isCombinedContext
? 'input + output (combined)'
: 'input + cacheRead + cacheCreation',
},
});
const contextPercentage = estimateContextUsage(usageStats, agentToolType);
// Batch the usage stats update, context percentage, and cycle tokens
// The batched updater handles the accumulation logic internally
batchedUpdater.updateUsage(actualSessionId, tabId, usageStats);
batchedUpdater.updateUsage(actualSessionId, null, usageStats); // Session-level accumulation
batchedUpdater.updateContextUsage(actualSessionId, contextPercentage);
// Only update context percentage if we got a valid value (not accumulated)
if (contextPercentage !== null) {
batchedUpdater.updateContextUsage(actualSessionId, contextPercentage);
}
batchedUpdater.updateCycleTokens(actualSessionId, usageStats.outputTokens);
// Update persistent global stats (not batched - this is a separate concern)

View File

@@ -574,12 +574,14 @@ export const MainPanel = React.memo(
return configured > 0 ? configured : reported;
}, [configuredContextWindow, activeTab?.usageStats?.contextWindow]);
// Compute context tokens using agent-specific calculation
// SYNC: Uses calculateContextTokens() from shared/contextUsage.ts
// See that file for the canonical formula and all locations that must stay in sync.
// Compute context tokens using agent-specific calculation.
// Claude: input + cacheRead + cacheCreation (total input for the request)
// Codex: input + output (combined limit)
// When values are accumulated from multi-tool turns, total may exceed contextWindow.
// In that case, derive tokens from session.contextUsage (preserved last valid percentage).
const activeTabContextTokens = useMemo(() => {
if (!activeTab?.usageStats) return 0;
return calculateContextTokens(
const raw = calculateContextTokens(
{
inputTokens: activeTab.usageStats.inputTokens,
outputTokens: activeTab.usageStats.outputTokens,
@@ -588,43 +590,25 @@ export const MainPanel = React.memo(
},
activeSession?.toolType
);
}, [activeTab?.usageStats, activeSession?.toolType]);
// Compute context usage percentage from context tokens and window size
// If raw exceeds window, values are accumulated from multi-tool turns.
// Fall back to deriving from the preserved contextUsage percentage.
const effectiveWindow = activeTabContextWindow || 200000;
if (raw > effectiveWindow && activeSession?.contextUsage != null) {
return Math.round((activeSession.contextUsage / 100) * effectiveWindow);
}
return raw;
}, [activeTab?.usageStats, activeSession?.toolType, activeTabContextWindow, activeSession?.contextUsage]);
// Compute context usage percentage from context tokens and window size.
// Since we already handle accumulated values in activeTabContextTokens,
// we just calculate the percentage directly.
const activeTabContextUsage = useMemo(() => {
if (!activeTabContextWindow || activeTabContextWindow === 0) return 0;
if (activeTabContextTokens === 0) return 0;
const percentage = Math.min(
Math.round((activeTabContextTokens / activeTabContextWindow) * 100),
100
);
// DEBUG: Log MainPanel context display calculation
console.log('[MainPanel] Context display calculation', {
sessionId: activeSession?.id,
tabId: activeTab?.id,
usageStats: activeTab?.usageStats
? {
inputTokens: activeTab.usageStats.inputTokens,
outputTokens: activeTab.usageStats.outputTokens,
cacheReadInputTokens: activeTab.usageStats.cacheReadInputTokens,
cacheCreationInputTokens: activeTab.usageStats.cacheCreationInputTokens,
contextWindow: activeTab.usageStats.contextWindow,
}
: null,
activeTabContextTokens,
activeTabContextWindow,
displayedPercentage: percentage,
});
return percentage;
}, [
activeTabContextTokens,
activeTabContextWindow,
activeSession?.id,
activeTab?.id,
activeTab?.usageStats,
]);
return Math.round((activeTabContextTokens / activeTabContextWindow) * 100);
}, [activeTabContextTokens, activeTabContextWindow]);
// PERF: Track panel width for responsive widget hiding with threshold-based updates
// Only update state when width crosses a meaningful threshold (20px) to prevent

View File

@@ -5,7 +5,7 @@
*/
// Git status polling
export { useGitStatusPolling } from './useGitStatusPolling';
export { useGitStatusPolling, getScaledPollInterval } from './useGitStatusPolling';
export type {
UseGitStatusPollingReturn,
UseGitStatusPollingOptions,

View File

@@ -1,4 +1,4 @@
import { useState, useEffect, useRef, useCallback } from 'react';
import { useState, useEffect, useRef, useCallback, useMemo } from 'react';
import type { Session } from '../../types';
import { gitService } from '../../services/git';
@@ -90,6 +90,31 @@ export interface UseGitStatusPollingOptions {
const DEFAULT_POLL_INTERVAL = 30000; // 30 seconds
const DEFAULT_INACTIVITY_TIMEOUT = 60000; // 60 seconds
/**
* PERF: Scale polling interval based on the number of git sessions.
* With many sessions, each poll spawns N parallel git processes which creates
* sustained CPU/IO load (especially on large repos where `git status` takes seconds).
* Only applies when using the default poll interval; custom intervals are respected.
*/
const POLL_INTERVAL_SCALE_THRESHOLDS: { maxSessions: number; interval: number }[] = [
{ maxSessions: 3, interval: 30000 }, // 1-3 sessions: 30s (unchanged)
{ maxSessions: 7, interval: 45000 }, // 4-7 sessions: 45s
{ maxSessions: 12, interval: 60000 }, // 8-12 sessions: 60s
{ maxSessions: Infinity, interval: 90000 }, // 13+: 90s
];
export function getScaledPollInterval(basePollInterval: number, gitSessionCount: number): number {
// Only scale if using the default interval (user-configured intervals are respected)
if (basePollInterval !== DEFAULT_POLL_INTERVAL) return basePollInterval;
for (const threshold of POLL_INTERVAL_SCALE_THRESHOLDS) {
if (gitSessionCount <= threshold.maxSessions) {
return threshold.interval;
}
}
return 90000;
}
/**
* PERF: Compare two GitStatusData objects for meaningful changes.
* Ignores lastUpdated since that always changes and would cause unnecessary re-renders.
@@ -324,9 +349,16 @@ export function useGitStatusPolling(
}
}, [pauseWhenHidden]);
// PERF: Track git session count to dynamically scale the polling interval
const gitSessionCount = useMemo(() => sessions.filter((s) => s.isGitRepo).length, [sessions]);
const gitSessionCountRef = useRef(gitSessionCount);
gitSessionCountRef.current = gitSessionCount;
const startPolling = useCallback(() => {
if (!intervalRef.current && (!pauseWhenHidden || !document.hidden)) {
pollGitStatus();
// Scale interval based on how many git sessions are active
const scaledInterval = getScaledPollInterval(pollInterval, gitSessionCountRef.current);
intervalRef.current = setInterval(() => {
const now = Date.now();
const timeSinceLastActivity = now - lastActivityRef.current;
@@ -342,7 +374,7 @@ export function useGitStatusPolling(
intervalRef.current = null;
}
}
}, pollInterval);
}, scaledInterval);
}
}, [pollInterval, inactivityTimeout, pollGitStatus]);
@@ -431,6 +463,28 @@ export function useGitStatusPolling(
};
}, [pauseWhenHidden, startPolling, stopPolling]);
// PERF: Restart polling when git session count crosses a scaling threshold
// so the interval adapts to the current load level
const prevScaledIntervalRef = useRef(getScaledPollInterval(pollInterval, gitSessionCount));
useEffect(() => {
// Ensure ref reflects current count before startPolling reads it.
// (The render-phase assignment at line 330 already does this, but being
// explicit here makes the data-flow self-documenting.)
gitSessionCountRef.current = gitSessionCount;
const newScaledInterval = getScaledPollInterval(pollInterval, gitSessionCount);
if (newScaledInterval !== prevScaledIntervalRef.current) {
prevScaledIntervalRef.current = newScaledInterval;
// Restart with new interval if currently polling
if (intervalRef.current) {
stopPolling();
if (isActiveRef.current && (!pauseWhenHidden || !document.hidden)) {
startPolling();
}
}
}
}, [gitSessionCount, pollInterval, stopPolling, startPolling, pauseWhenHidden]);
// Refresh immediately when active session changes to get detailed data
useEffect(() => {
if (activeSessionId) {

View File

@@ -17,6 +17,30 @@ export interface UseAtMentionCompletionReturn {
getSuggestions: (filter: string) => AtMentionSuggestion[];
}
/**
* PERF: Maximum number of file tree entries to flatten.
* For repos with 100k+ files, unbounded traversal creates a massive array
* that blocks the main thread. 50k entries is more than enough for
* meaningful @mention suggestions while keeping traversal fast.
* Breadth-first-like order naturally prioritizes shallower (more relevant) files.
*/
const MAX_FILE_TREE_ENTRIES = 50_000;
/**
* PERF: Maximum number of results to return from fuzzy search.
*/
const MAX_SUGGESTION_RESULTS = 15;
/**
* PERF: Once this many exact substring matches are found (and we have MAX_SUGGESTION_RESULTS),
* stop searching. Exact matches score highest in fuzzyMatchWithScore (they receive a +50
* bonus in search.ts), so once we have 50 exact substring matches the top-15 results are
* virtually guaranteed to be optimal — any remaining files would only contribute weaker
* fuzzy-only matches that cannot outscore them. 50 provides a comfortable margin over
* MAX_SUGGESTION_RESULTS (15) to account for score ties and type-based sorting.
*/
const EARLY_EXIT_EXACT_MATCH_THRESHOLD = 50;
/**
* Hook for providing @ mention file completion in AI mode.
* Uses fuzzy matching to find files in the project tree and Auto Run folder.
@@ -93,6 +117,7 @@ export function useAtMentionCompletion(session: Session | null): UseAtMentionCom
}, [autoRunFolderPath, sessionCwd]);
// Build a flat list of all files/folders from the file tree
// PERF: Capped at MAX_FILE_TREE_ENTRIES to avoid blocking the main thread on huge repos
const projectFiles = useMemo(() => {
if (!session?.fileTree) return [];
@@ -100,6 +125,8 @@ export function useAtMentionCompletion(session: Session | null): UseAtMentionCom
const traverse = (nodes: FileNode[], currentPath = '') => {
for (const node of nodes) {
if (files.length >= MAX_FILE_TREE_ENTRIES) return;
const fullPath = currentPath ? `${currentPath}/${node.name}` : node.name;
files.push({
name: node.name,
@@ -140,7 +167,32 @@ export function useAtMentionCompletion(session: Session | null): UseAtMentionCom
// Early return if no files available (allFiles is empty when session is null)
if (allFiles.length === 0) return [];
// PERF: When no filter (user just typed @), skip all fuzzy matching
// and return the first N files directly. Avoids 200k+ no-op fuzzyMatchWithScore calls.
if (!filter) {
const results: AtMentionSuggestion[] = [];
for (let i = 0; i < Math.min(allFiles.length, MAX_SUGGESTION_RESULTS); i++) {
const file = allFiles[i];
results.push({
value: file.path,
type: file.type,
displayText: file.name,
fullPath: file.path,
score: 0,
source: file.source,
});
}
// Sort the small result set (sorting 15 items is essentially free)
results.sort((a, b) => {
if (a.type !== b.type) return a.type === 'file' ? -1 : 1;
return a.displayText.localeCompare(b.displayText);
});
return results;
}
const suggestions: AtMentionSuggestion[] = [];
const filterLower = filter.toLowerCase();
let exactSubstringMatchCount = 0;
for (const file of allFiles) {
// Match against both file name and full path
@@ -150,7 +202,7 @@ export function useAtMentionCompletion(session: Session | null): UseAtMentionCom
// Use the better of the two scores
const bestMatch = nameMatch.score > pathMatch.score ? nameMatch : pathMatch;
if (bestMatch.matches || !filter) {
if (bestMatch.matches) {
suggestions.push({
value: file.path,
type: file.type,
@@ -159,6 +211,24 @@ export function useAtMentionCompletion(session: Session | null): UseAtMentionCom
score: bestMatch.score,
source: file.source,
});
// Track exact substring matches for early exit
if (
file.name.toLowerCase().includes(filterLower) ||
file.path.toLowerCase().includes(filterLower)
) {
exactSubstringMatchCount++;
}
// PERF: Early exit - once we have enough high-quality exact substring
// matches and enough total results, further searching through remaining
// files would only yield lower-scoring fuzzy matches.
if (
exactSubstringMatchCount >= EARLY_EXIT_EXACT_MATCH_THRESHOLD &&
suggestions.length >= MAX_SUGGESTION_RESULTS
) {
break;
}
}
}
@@ -175,7 +245,7 @@ export function useAtMentionCompletion(session: Session | null): UseAtMentionCom
});
// Limit to reasonable number
return suggestions.slice(0, 15);
return suggestions.slice(0, MAX_SUGGESTION_RESULTS);
},
[allFiles]
);

View File

@@ -10,6 +10,13 @@ export interface TabCompletionSuggestion {
export type TabCompletionFilter = 'all' | 'history' | 'branch' | 'tag' | 'file';
/**
* PERF: Maximum number of file tree entries to flatten.
* Mirrors the cap in useAtMentionCompletion to avoid blocking the main thread
* on repos with 100k+ files.
*/
const MAX_FILE_TREE_ENTRIES = 50_000;
export interface UseTabCompletionReturn {
getSuggestions: (input: string, filter?: TabCompletionFilter) => TabCompletionSuggestion[];
}
@@ -55,8 +62,11 @@ export function useTabCompletion(session: Session | null): UseTabCompletionRetur
const names: { name: string; type: 'file' | 'folder'; path: string }[] = [];
// PERF: Capped at MAX_FILE_TREE_ENTRIES to avoid blocking the main thread on huge repos
const traverse = (nodes: FileNode[], currentPath = '') => {
for (const node of nodes) {
if (names.length >= MAX_FILE_TREE_ENTRIES) return;
const fullPath = currentPath ? `${currentPath}/${node.name}` : node.name;
names.push({
name: node.name,

View File

@@ -163,10 +163,18 @@ export function useBatchedSessionUpdates(
hasPendingRef.current = false;
setSessions((prev) => {
return prev.map((session) => {
// PERF: Track whether any session was actually modified.
// If no session in prev matched an accumulator entry, return prev
// unchanged to preserve referential identity and skip a React re-render.
// This avoids ~7 unnecessary re-renders/sec when agents stream data for
// sessions that were removed between accumulation and flush.
let anyChanged = false;
const next = prev.map((session) => {
const acc = updates.get(session.id);
if (!acc) return session;
anyChanged = true;
let updatedSession = { ...session };
// Apply log accumulations
@@ -438,6 +446,8 @@ export function useBatchedSessionUpdates(
return updatedSession;
});
return anyChanged ? next : prev;
});
}, [setSessions]);

View File

@@ -1,17 +1,126 @@
/**
* Context Usage Estimation Utilities
*
* SYNC: Re-exports from shared/contextUsage.ts for backward compatibility.
* All context usage logic is centralized there. See that file for:
* - The canonical calculation formula
* - All locations that must stay in sync
* - Provider-specific semantics (Claude vs OpenAI)
* Provides fallback estimation for context window usage when agents
* don't report their context window size directly.
*/
export {
DEFAULT_CONTEXT_WINDOWS,
COMBINED_CONTEXT_AGENTS,
calculateContextTokens,
estimateContextUsage,
type ContextUsageStats,
} from '../../shared/contextUsage';
import type { ToolType } from '../types';
import type { UsageStats } from '../../shared/types';
/**
* Default context window sizes for different agents.
* Used as fallback when the agent doesn't report its context window size.
*/
export const DEFAULT_CONTEXT_WINDOWS: Record<ToolType, number> = {
'claude-code': 200000, // Claude 3.5 Sonnet/Claude 4 default context
codex: 200000, // OpenAI o3/o4-mini context window
opencode: 128000, // OpenCode (depends on model, 128k is conservative default)
'factory-droid': 200000, // Factory Droid (varies by model, defaults to Claude Opus)
terminal: 0, // Terminal has no context window
};
/**
* Agents that use combined input+output context windows.
* OpenAI models (Codex, o3, o4-mini) have a single context window that includes
* both input and output tokens, unlike Claude which has separate limits.
*/
const COMBINED_CONTEXT_AGENTS: Set<ToolType> = new Set(['codex']);
/**
* Calculate total context tokens based on agent-specific semantics.
*
* For a single Anthropic API call, the total input context is the sum of:
* inputTokens + cacheReadInputTokens + cacheCreationInputTokens
* These three fields partition the input into uncached, cache-hit, and newly-cached segments.
*
* CAVEAT: When Claude Code performs multi-tool turns (many internal API calls),
* the reported values may be accumulated across all internal calls within the turn.
* In that case the total can exceed the context window. Callers should check for
* this and skip the update (see estimateContextUsage).
*
* Claude models: Context = input + cacheRead + cacheCreation
* OpenAI models: Context = input + output (combined limit)
*
* @param stats - The usage statistics containing token counts
* @param agentId - The agent identifier for agent-specific calculation
* @returns Total context tokens used
*/
export function calculateContextTokens(
stats: {
inputTokens?: number;
outputTokens?: number;
cacheReadInputTokens?: number;
cacheCreationInputTokens?: number;
},
agentId?: ToolType | string
): number {
// OpenAI models have combined input+output context limits
if (agentId && COMBINED_CONTEXT_AGENTS.has(agentId as ToolType)) {
return (stats.inputTokens || 0) + (stats.cacheCreationInputTokens || 0) + (stats.outputTokens || 0);
}
// Claude models: total input = uncached + cache-hit + newly-cached
// Output tokens don't consume the input context window
return (
(stats.inputTokens || 0) + (stats.cacheReadInputTokens || 0) + (stats.cacheCreationInputTokens || 0)
);
}
/**
* Estimate context usage percentage when the agent doesn't provide it directly.
* Uses agent-specific default context window sizes for accurate estimation.
*
* Context calculation varies by agent:
* - Claude models: inputTokens + cacheReadInputTokens + cacheCreationInputTokens
* - OpenAI models (Codex): inputTokens + outputTokens (combined limit)
*
* Returns null when the calculated total exceeds the context window, which indicates
* accumulated values from multi-tool turns (many internal API calls within one turn).
* A single API call's total input can never exceed the context window, so values
* above it are definitely accumulated. Callers should preserve the previous valid
* percentage when this returns null.
*
* @param stats - The usage statistics containing token counts
* @param agentId - The agent identifier for agent-specific context window size
* @returns Estimated context usage percentage (0-100), or null if cannot be estimated
*/
export function estimateContextUsage(
stats: {
inputTokens?: number;
outputTokens?: number;
cacheReadInputTokens?: number;
cacheCreationInputTokens?: number;
contextWindow?: number;
},
agentId?: ToolType | string
): number | null {
// Calculate total context using agent-specific semantics
const totalContextTokens = calculateContextTokens(stats, agentId);
// Determine effective context window
const effectiveContextWindow =
stats.contextWindow && stats.contextWindow > 0
? stats.contextWindow
: agentId && agentId !== 'terminal'
? DEFAULT_CONTEXT_WINDOWS[agentId as ToolType] || 0
: 0;
if (!effectiveContextWindow || effectiveContextWindow <= 0) {
return null;
}
// If total exceeds context window, the values are accumulated across multiple
// internal API calls within a complex turn (tool use chains). A single API call's
// total input cannot exceed the context window. Return null to signal callers
// should keep the previous valid percentage.
if (totalContextTokens > effectiveContextWindow) {
return null;
}
if (totalContextTokens <= 0) {
return 0;
}
return Math.round((totalContextTokens / effectiveContextWindow) * 100);
}

View File

@@ -1,176 +0,0 @@
/**
* Context Usage Estimation Utilities
*
* ╔══════════════════════════════════════════════════════════════════════════════╗
* ║ CONTEXT CALCULATION SYNCHRONIZATION ║
* ╠══════════════════════════════════════════════════════════════════════════════╣
* ║ This is the SINGLE SOURCE OF TRUTH for context window calculations. ║
* ║ ║
* ║ ALL context calculations in the codebase MUST use these functions: ║
* ║ - calculateContextTokens() - Calculate total context tokens ║
* ║ - estimateContextUsage() - Estimate context usage percentage ║
* ║ ║
* ║ LOCATIONS THAT USE THESE (keep in sync when modifying): ║
* ║ 1. src/renderer/App.tsx (line ~2768) - UI context % display ║
* ║ 2. src/renderer/utils/contextUsage.ts - Re-exports for renderer ║
* ║ 3. src/renderer/utils/contextExtractor.ts - Token estimation ║
* ║ 4. src/renderer/components/MainPanel.tsx - Tab context display ║
* ║ 5. src/renderer/components/TabSwitcherModal.tsx - Tab switcher ║
* ║ 6. src/renderer/components/HistoryDetailModal.tsx - History view ║
* ║ 7. src/renderer/services/contextSummarizer.ts - Compaction eligibility ║
* ║ 8. src/main/parsers/usage-aggregator.ts - Re-exports for main process ║
* ║ 9. src/main/process-listeners/usage-listener.ts - Usage event handling ║
* ║ 10. src/web/mobile/App.tsx - Mobile UI ║
* ║ 11. src/web/mobile/SessionStatusBanner.tsx - Mobile status ║
* ║ ║
* ║ PROVIDER-SPECIFIC FORMULAS: ║
* ║ ║
* ║ Claude-style (separate input/output limits): ║
* ║ total = inputTokens + cacheReadInputTokens + cacheCreationInputTokens ║
* ║ Agents: claude-code, factory-droid, opencode ║
* ║ (OpenCode and Factory Droid can use various models, but they report ║
* ║ cache tokens in Claude-style format regardless of backend) ║
* ║ ║
* ║ OpenAI-style (combined input+output limit): ║
* ║ total = inputTokens + outputTokens ║
* ║ Agents: codex ║
* ║ (COMBINED_CONTEXT_AGENTS set determines which agents use this) ║
* ║ ║
* ║ KNOWN ISSUES (as of 2026-01-31): ║
* ║ - Claude Code usage can be cumulative; normalized to per-turn in StdoutHandler ║
* ║ - Values fluctuate based on which model (Haiku vs Sonnet) handles turn ║
* ║ - This causes UI to show inconsistent context % across turns ║
* ║ - Compaction check may fail when UI shows high but stored value is low ║
* ╚══════════════════════════════════════════════════════════════════════════════╝
*
* @see https://platform.claude.com/docs/en/build-with-claude/prompt-caching
* @see https://code.claude.com/docs/en/statusline#context-window-usage
*/
import type { ToolType } from './types';
/**
* Default context window sizes for different agents.
* Used as fallback when the agent doesn't report its context window size.
*
* SYNC: When adding a new agent, also update:
* - COMBINED_CONTEXT_AGENTS if it uses combined input+output limits
* - calculateContextTokens() if it has a unique formula
*/
export const DEFAULT_CONTEXT_WINDOWS: Record<ToolType, number> = {
'claude-code': 200000, // Claude 3.5 Sonnet/Claude 4 default context
codex: 200000, // OpenAI o3/o4-mini context window
opencode: 128000, // OpenCode (depends on model, 128k is conservative default)
'factory-droid': 200000, // Factory Droid (varies by model, defaults to Claude Opus)
terminal: 0, // Terminal has no context window
};
/**
* Agents that use combined input+output context windows.
* OpenAI models (Codex, o3, o4-mini) have a single context window that includes
* both input and output tokens, unlike Claude which has separate limits.
*
* SYNC: When adding a new agent with combined context limits, add it here
* and update calculateContextTokens() to handle it.
*/
export const COMBINED_CONTEXT_AGENTS: Set<ToolType | string> = new Set(['codex']);
/**
* Minimal usage stats interface for context calculation.
* All fields are optional to support different sources (web, renderer, main).
*/
export interface ContextUsageStats {
inputTokens?: number;
outputTokens?: number;
cacheReadInputTokens?: number;
cacheCreationInputTokens?: number;
contextWindow?: number;
}
/**
* Calculate total context tokens based on agent-specific semantics.
*
* ╔══════════════════════════════════════════════════════════════════════════════╗
* ║ THIS IS THE CANONICAL CONTEXT CALCULATION FUNCTION ║
* ║ All UI displays, compaction checks, and usage tracking MUST use this. ║
* ╚══════════════════════════════════════════════════════════════════════════════╝
*
* Per Anthropic documentation, the context calculation formula is:
* total_context = input_tokens + cache_read_input_tokens + cache_creation_input_tokens
*
* Where:
* - input_tokens: New uncached tokens AFTER the last cache breakpoint
* - cache_read_input_tokens: Tokens retrieved from cache (entire cached prefix)
* - cache_creation_input_tokens: Tokens being written to cache for the first time
*
* For OpenAI models (Codex), context = input + output (combined limit)
*
* @param stats - The usage statistics containing token counts
* @param agentId - The agent identifier for agent-specific calculation
* @returns Total context tokens used for this turn
*
* @see https://platform.claude.com/docs/en/build-with-claude/prompt-caching
*/
export function calculateContextTokens(
stats: ContextUsageStats,
agentId?: ToolType | string
): number {
// Per Anthropic docs: total_context = input + cacheRead + cacheCreation
// All three components occupy context window space.
const baseTokens =
(stats.inputTokens || 0) +
(stats.cacheReadInputTokens || 0) +
(stats.cacheCreationInputTokens || 0);
// OpenAI models have combined input+output context limits
if (agentId && COMBINED_CONTEXT_AGENTS.has(agentId)) {
return baseTokens + (stats.outputTokens || 0);
}
// Claude models: output tokens don't consume context window
return baseTokens;
}
/**
* Estimate context usage percentage when the agent doesn't provide it directly.
* Uses agent-specific default context window sizes for accurate estimation.
*
* Context calculation varies by agent:
* - Claude models: inputTokens + cacheReadInputTokens + cacheCreationInputTokens
* (per Anthropic docs, all three occupy context window space)
* - OpenAI models (Codex): inputTokens + outputTokens
* (combined context window includes both input and output)
*
* @param stats - The usage statistics containing token counts
* @param agentId - The agent identifier for agent-specific context window size
* @returns Estimated context usage percentage (0-100), or null if cannot be estimated
*/
export function estimateContextUsage(
stats: ContextUsageStats,
agentId?: ToolType | string
): number | null {
// Calculate total context using agent-specific semantics
const totalContextTokens = calculateContextTokens(stats, agentId);
// If context window is provided and valid, use it
if (stats.contextWindow && stats.contextWindow > 0) {
return Math.min(100, Math.round((totalContextTokens / stats.contextWindow) * 100));
}
// If no agent specified or terminal, cannot estimate
if (!agentId || agentId === 'terminal') {
return null;
}
// Use agent-specific default context window
const defaultContextWindow = DEFAULT_CONTEXT_WINDOWS[agentId as ToolType];
if (!defaultContextWindow || defaultContextWindow <= 0) {
return null;
}
if (totalContextTokens <= 0) {
return 0;
}
return Math.min(100, Math.round((totalContextTokens / defaultContextWindow) * 100));
}

Some files were not shown because too many files have changed in this diff Show More