Merge branch 'main' into code-refactor

This commit is contained in:
Raza Rauf
2026-01-24 00:02:13 +05:00
38 changed files with 972 additions and 2575 deletions

View File

@@ -112,6 +112,13 @@ jobs:
if: matrix.platform == 'win'
uses: ilammy/msvc-dev-cmd@v1
# Windows: Setup Python 3.11 for node-gyp (Python 3.12+ removed distutils)
- name: Setup Python for node-gyp (Windows)
if: matrix.platform == 'win'
uses: actions/setup-python@v5
with:
python-version: '3.11'
# macOS: Setup Python 3.11 for node-gyp (Python 3.12+ removed distutils)
- name: Setup Python for node-gyp
if: matrix.platform == 'mac'

8
.gitignore vendored
View File

@@ -33,11 +33,6 @@ scratch/
Thumbs.db
# IDE
# .vscode/ is tracked for shared settings (settings.json, extensions.json)
# But ignore personal/local files
.vscode/*
!.vscode/settings.json
!.vscode/extensions.json
.idea/
*.swp
*.swo
@@ -55,5 +50,6 @@ yarn-debug.log*
yarn-error.log*
#VS Code
.vscode/
.VSCodeCounter
.qodo
.qodo

View File

@@ -1,7 +0,0 @@
{
"recommendations": [
"esbenp.prettier-vscode",
"dbaeumer.vscode-eslint",
"editorconfig.editorconfig"
]
}

47
.vscode/settings.json vendored
View File

@@ -1,47 +0,0 @@
{
// Format on save with Prettier
"editor.formatOnSave": true,
"editor.defaultFormatter": "esbenp.prettier-vscode",
// Use tabs (matches .prettierrc and .editorconfig)
"editor.tabSize": 2,
"editor.insertSpaces": false,
"editor.detectIndentation": false,
// ESLint configuration
"eslint.enable": true,
"eslint.validate": ["javascript", "javascriptreact", "typescript", "typescriptreact"],
// Don't let ESLint format - let Prettier handle it
"eslint.format.enable": false,
// File-specific formatters
"[typescript]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[typescriptreact]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[javascript]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[json]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
// Recommended extensions
"editor.codeActionsOnSave": {
"source.fixAll.eslint": "explicit"
},
// Files to exclude from search/watch
"files.exclude": {
"dist": true,
"release": true,
"node_modules": true
},
// TypeScript settings
"typescript.tsdk": "node_modules/typescript/lib",
"typescript.enablePromptUseWorkspaceTsdk": true
}

View File

@@ -22,7 +22,7 @@ See [Performance Guidelines](#performance-guidelines) for specific practices.
- [Project Structure](#project-structure)
- [Development Scripts](#development-scripts)
- [Testing](#testing)
- [Linting](#linting)
- [Linting & Pre-commit Hooks](#linting--pre-commit-hooks)
- [Common Development Tasks](#common-development-tasks)
- [Adding a New AI Agent](#adding-a-new-ai-agent)
- [Code Style](#code-style)
@@ -150,6 +150,25 @@ You can also specify a custom demo directory via environment variable:
MAESTRO_DEMO_DIR=~/Desktop/my-demo npm run dev
```
### Running Multiple Instances (Git Worktrees)
When working with multiple git worktrees, you can run Maestro instances in parallel by specifying different ports using the `VITE_PORT` environment variable:
```bash
# In the main worktree (uses default port 5173)
npm run dev
# In worktree 2 (in another directory and terminal)
VITE_PORT=5174 npm run dev
# In worktree 3
VITE_PORT=5175 npm run dev
```
This allows you to develop and test different branches simultaneously without port conflicts.
**Note:** The web interface dev server (`npm run dev:web`) uses a separate port (default 5174) and can be configured with `VITE_WEB_PORT` if needed.
## Testing
Run the test suite with Jest:

View File

@@ -121,3 +121,11 @@ The confirmation dialog shows the full path to the worktree directory so you kno
- **Use a dedicated worktree folder** — Keep all worktrees in one place outside the main repo
- **Clean up when done** — Remove worktree agents after merging PRs to avoid clutter
- **Watch for Changes** — Enable file watching to keep the file tree in sync with worktree activity
- **Run multiple dev instances** — Use `VITE_PORT` environment variable to run Maestro in multiple worktrees simultaneously:
```bash
# In main worktree
npm run dev
# In worktree 2 (different terminal/directory)
VITE_PORT=5174 npm run dev
```

View File

@@ -15,11 +15,20 @@ Maestro can update itself automatically! This feature was introduced in **v0.8.7
## v0.14.x - Doc Graphs, SSH Agents, Inline Wizard
**Latest: v0.14.5** | Released January 1, 1
**Latest: v0.14.5** | Released January 23, 2026
Changes in this point release include:
-
- Desktop app performance improvements (more to come on this, we want Maestro blazing fast) 🐌
- Added local manifest feature for custom playbooks 📖
- Agents are now inherently aware of your activity history as seen in the history panel 📜 (this is built-in cross context memory!)
- Added markdown rendering support for AI responses in mobile view 📱
- Bugfix in tracking costs from JSONL files that were aged out 🏦
- Added BlueSky social media handle for leaderboard 🦋
- Added options to disable GPU rendering and confetti 🎊
- Better handling of large files in preview 🗄️
- Bug fix in Claude context calculation 🧮
- Addressed bug in OpenSpec version reporting 🐛
The major contributions to 0.14.x remain:
@@ -43,7 +52,7 @@ The major contributions to 0.14.x remain:
- Added "Toggle Bookmark" shortcut (CTRL/CMD+SHIFT+B) ⌨️
- Gist publishing now shows previous URLs with copy button 📋
Thanks for the contributions: @t1mmen @aejfager @Crumbgrabber @whglaser @b3nw @deandebeer @shadown @breki @charles-dyfis-net
Thanks for the contributions: @t1mmen @aejfager @Crumbgrabber @whglaser @b3nw @deandebeer @shadown @breki @charles-dyfis-net @ronaldeddings @jlengrand @ksylvan
### Previous Releases in this Series

View File

@@ -651,9 +651,14 @@ describe('marketplace IPC handlers', () => {
manifest: sampleManifest,
};
// Mock file reads:
// 1. First read: official cache
// 2. Second read: local manifest (ENOENT = no local manifest)
// 3. Third read: existing playbooks for this session
vi.mocked(fs.readFile)
.mockResolvedValueOnce(JSON.stringify(validCache))
.mockResolvedValueOnce(JSON.stringify({ playbooks: existingPlaybooks }));
.mockResolvedValueOnce(JSON.stringify(validCache)) // Official cache
.mockRejectedValueOnce({ code: 'ENOENT' }) // No local manifest
.mockResolvedValueOnce(JSON.stringify({ playbooks: existingPlaybooks })); // Existing playbooks
vi.mocked(fs.mkdir).mockResolvedValue(undefined);
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
@@ -678,7 +683,12 @@ describe('marketplace IPC handlers', () => {
manifest: sampleManifest,
};
vi.mocked(fs.readFile).mockResolvedValueOnce(JSON.stringify(validCache));
// Mock file reads:
// 1. First read: official cache
// 2. Second read: local manifest (ENOENT = no local manifest)
vi.mocked(fs.readFile)
.mockResolvedValueOnce(JSON.stringify(validCache)) // Official cache
.mockRejectedValueOnce({ code: 'ENOENT' }); // No local manifest
const handler = handlers.get('marketplace:importPlaybook');
const result = await handler!(
@@ -693,6 +703,240 @@ describe('marketplace IPC handlers', () => {
expect(result.error).toContain('Playbook not found');
});
it('should import a local playbook that only exists in the local manifest', async () => {
// Create a local-only playbook that doesn't exist in the official manifest
const localOnlyPlaybook = {
id: 'local-playbook-1',
title: 'Local Playbook',
description: 'A playbook from the local manifest',
category: 'Custom',
author: 'Local Author',
lastUpdated: '2024-01-20',
path: 'local-playbooks/local-playbook-1',
documents: [{ filename: 'local-phase-1', resetOnCompletion: false }],
loopEnabled: false,
maxLoops: null,
prompt: 'Local custom instructions',
};
const localManifest: MarketplaceManifest = {
lastUpdated: '2024-01-20',
playbooks: [localOnlyPlaybook],
};
// Setup: cache with official manifest (no local-playbook-1)
const validCache: MarketplaceCache = {
fetchedAt: Date.now(),
manifest: sampleManifest, // Official manifest without local playbook
};
// Mock file reads:
// 1. First read: official cache
// 2. Second read: local manifest (with the local-only playbook)
// 3. Third read: existing playbooks (ENOENT = none)
vi.mocked(fs.readFile)
.mockResolvedValueOnce(JSON.stringify(validCache)) // Cache with official manifest
.mockResolvedValueOnce(JSON.stringify(localManifest)) // Local manifest
.mockRejectedValueOnce({ code: 'ENOENT' }); // No existing playbooks
vi.mocked(fs.mkdir).mockResolvedValue(undefined);
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
// Mock document fetch for the local playbook's document
mockFetch.mockResolvedValueOnce({
ok: true,
text: () => Promise.resolve('# Local Phase 1 Content'),
});
const handler = handlers.get('marketplace:importPlaybook');
const result = await handler!(
{} as any,
'local-playbook-1', // This ID only exists in the LOCAL manifest
'My Local Playbook',
'/autorun/folder',
'session-123'
);
// Verify the import succeeded
expect(result.success).toBe(true);
expect(result.playbook).toBeDefined();
expect(result.playbook.name).toBe('Local Playbook');
expect(result.importedDocs).toEqual(['local-phase-1']);
// Verify target folder was created
expect(fs.mkdir).toHaveBeenCalledWith('/autorun/folder/My Local Playbook', {
recursive: true,
});
// Verify document was written
expect(fs.writeFile).toHaveBeenCalledWith(
'/autorun/folder/My Local Playbook/local-phase-1.md',
'# Local Phase 1 Content',
'utf-8'
);
// Verify the custom prompt was preserved
expect(result.playbook.prompt).toBe('Local custom instructions');
});
it('should import a local playbook with filesystem path (reads from disk, not GitHub)', async () => {
// Create a local playbook with a LOCAL FILESYSTEM path (absolute path)
// This tests the isLocalPath() detection and fs.readFile document reading
const localFilesystemPlaybook = {
id: 'filesystem-playbook-1',
title: 'Filesystem Playbook',
description: 'A playbook stored on the local filesystem',
category: 'Custom',
author: 'Local Author',
lastUpdated: '2024-01-20',
path: '/Users/test/custom-playbooks/my-playbook', // ABSOLUTE PATH - triggers local file reading
documents: [
{ filename: 'phase-1', resetOnCompletion: false },
{ filename: 'phase-2', resetOnCompletion: true },
],
loopEnabled: false,
maxLoops: null,
prompt: 'Filesystem playbook instructions',
};
const localManifest: MarketplaceManifest = {
lastUpdated: '2024-01-20',
playbooks: [localFilesystemPlaybook],
};
// Setup: cache with official manifest (no filesystem-playbook-1)
const validCache: MarketplaceCache = {
fetchedAt: Date.now(),
manifest: sampleManifest,
};
// Mock file reads in order:
// 1. Official cache
// 2. Local manifest (with the filesystem playbook)
// 3. Document read: /Users/test/custom-playbooks/my-playbook/phase-1.md
// 4. Document read: /Users/test/custom-playbooks/my-playbook/phase-2.md
// 5. Existing playbooks file (ENOENT = none)
vi.mocked(fs.readFile)
.mockResolvedValueOnce(JSON.stringify(validCache)) // 1. Official cache
.mockResolvedValueOnce(JSON.stringify(localManifest)) // 2. Local manifest
.mockResolvedValueOnce('# Phase 1 from filesystem\n\n- [ ] Task 1') // 3. phase-1.md
.mockResolvedValueOnce('# Phase 2 from filesystem\n\n- [ ] Task 2') // 4. phase-2.md
.mockRejectedValueOnce({ code: 'ENOENT' }); // 5. No existing playbooks
vi.mocked(fs.mkdir).mockResolvedValue(undefined);
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
const handler = handlers.get('marketplace:importPlaybook');
const result = await handler!(
{} as any,
'filesystem-playbook-1',
'Imported Filesystem Playbook',
'/autorun/folder',
'session-123'
);
// Verify the import succeeded
expect(result.success).toBe(true);
expect(result.playbook).toBeDefined();
expect(result.playbook.name).toBe('Filesystem Playbook');
expect(result.importedDocs).toEqual(['phase-1', 'phase-2']);
// Verify documents were READ FROM LOCAL FILESYSTEM (not fetched from GitHub)
// The fs.readFile mock should have been called for the document paths
expect(fs.readFile).toHaveBeenCalledWith(
'/Users/test/custom-playbooks/my-playbook/phase-1.md',
'utf-8'
);
expect(fs.readFile).toHaveBeenCalledWith(
'/Users/test/custom-playbooks/my-playbook/phase-2.md',
'utf-8'
);
// Verify NO fetch calls were made for documents (since they're local)
// Note: mockFetch should NOT have been called for document retrieval
expect(mockFetch).not.toHaveBeenCalled();
// Verify documents were written to the target folder
expect(fs.writeFile).toHaveBeenCalledWith(
'/autorun/folder/Imported Filesystem Playbook/phase-1.md',
'# Phase 1 from filesystem\n\n- [ ] Task 1',
'utf-8'
);
expect(fs.writeFile).toHaveBeenCalledWith(
'/autorun/folder/Imported Filesystem Playbook/phase-2.md',
'# Phase 2 from filesystem\n\n- [ ] Task 2',
'utf-8'
);
// Verify the prompt was preserved
expect(result.playbook.prompt).toBe('Filesystem playbook instructions');
});
it('should import a local playbook with tilde path (reads from disk, not GitHub)', async () => {
// Create a local playbook with a TILDE-PREFIXED path (home directory)
const tildePathPlaybook = {
id: 'tilde-playbook-1',
title: 'Tilde Path Playbook',
description: 'A playbook stored in home directory',
category: 'Custom',
author: 'Local Author',
lastUpdated: '2024-01-20',
path: '~/playbooks/my-tilde-playbook', // TILDE PATH - triggers local file reading
documents: [{ filename: 'setup', resetOnCompletion: false }],
loopEnabled: false,
maxLoops: null,
prompt: null,
};
const localManifest: MarketplaceManifest = {
lastUpdated: '2024-01-20',
playbooks: [tildePathPlaybook],
};
const validCache: MarketplaceCache = {
fetchedAt: Date.now(),
manifest: sampleManifest,
};
// Mock os.homedir() to return a predictable path
vi.mock('os', () => ({
homedir: vi.fn().mockReturnValue('/Users/testuser'),
}));
// The tilde path ~/playbooks/my-tilde-playbook will be resolved to:
// /Users/testuser/playbooks/my-tilde-playbook (or similar based on os.homedir)
// For this test, we just verify that fs.readFile is called (not fetch)
vi.mocked(fs.readFile)
.mockResolvedValueOnce(JSON.stringify(validCache))
.mockResolvedValueOnce(JSON.stringify(localManifest))
.mockResolvedValueOnce('# Setup from tilde path')
.mockRejectedValueOnce({ code: 'ENOENT' });
vi.mocked(fs.mkdir).mockResolvedValue(undefined);
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
const handler = handlers.get('marketplace:importPlaybook');
const result = await handler!(
{} as any,
'tilde-playbook-1',
'Tilde Playbook',
'/autorun/folder',
'session-123'
);
// Verify the import succeeded
expect(result.success).toBe(true);
expect(result.playbook).toBeDefined();
expect(result.playbook.name).toBe('Tilde Path Playbook');
expect(result.importedDocs).toEqual(['setup']);
// Verify NO fetch calls were made (documents read from filesystem)
expect(mockFetch).not.toHaveBeenCalled();
// Verify null prompt is converted to empty string (Maestro default fallback)
expect(result.playbook.prompt).toBe('');
});
it('should continue importing when individual document fetch fails', async () => {
const validCache: MarketplaceCache = {
fetchedAt: Date.now(),
@@ -1196,4 +1440,169 @@ describe('marketplace IPC handlers', () => {
}
});
});
describe('merged manifest lookup', () => {
it('should find playbook ID that exists only in local manifest', async () => {
// Create a playbook that only exists in the local manifest
const localOnlyPlaybook = {
id: 'local-only-playbook',
title: 'Local Only Playbook',
description: 'This playbook only exists locally',
category: 'Custom',
author: 'Local Author',
lastUpdated: '2024-01-20',
path: 'custom/local-only-playbook',
documents: [{ filename: 'doc1', resetOnCompletion: false }],
loopEnabled: false,
maxLoops: null,
prompt: 'Local only prompt',
};
const localManifest: MarketplaceManifest = {
lastUpdated: '2024-01-20',
playbooks: [localOnlyPlaybook],
};
// Official manifest does NOT contain local-only-playbook
const validCache: MarketplaceCache = {
fetchedAt: Date.now(),
manifest: sampleManifest, // Only has test-playbook-1, test-playbook-2, test-playbook-with-assets
};
// Mock file reads:
// 1. Cache (official manifest)
// 2. Local manifest (with local-only-playbook)
vi.mocked(fs.readFile)
.mockResolvedValueOnce(JSON.stringify(validCache))
.mockResolvedValueOnce(JSON.stringify(localManifest));
const handler = handlers.get('marketplace:getManifest');
const result = await handler!({} as any);
// Verify the merged manifest contains the local-only playbook
const foundPlaybook = result.manifest.playbooks.find(
(p: any) => p.id === 'local-only-playbook'
);
expect(foundPlaybook).toBeDefined();
expect(foundPlaybook.title).toBe('Local Only Playbook');
expect(foundPlaybook.source).toBe('local');
// Verify it also contains the official playbooks
const officialPlaybook = result.manifest.playbooks.find(
(p: any) => p.id === 'test-playbook-1'
);
expect(officialPlaybook).toBeDefined();
expect(officialPlaybook.source).toBe('official');
});
it('should prefer local version when playbook ID exists in both manifests', async () => {
// Create a local playbook that has the SAME ID as an official one
const localOverridePlaybook = {
id: 'test-playbook-1', // SAME ID as official playbook
title: 'Local Override Version',
description: 'This local version overrides the official one',
category: 'Custom',
author: 'Local Author',
lastUpdated: '2024-01-25',
path: '/Users/local/custom-playbooks/test-playbook-1', // Local filesystem path
documents: [
{ filename: 'custom-phase-1', resetOnCompletion: false },
{ filename: 'custom-phase-2', resetOnCompletion: false },
],
loopEnabled: true,
maxLoops: 5,
prompt: 'Local override custom prompt',
};
const localManifest: MarketplaceManifest = {
lastUpdated: '2024-01-25',
playbooks: [localOverridePlaybook],
};
// Official manifest has test-playbook-1 with different properties
const validCache: MarketplaceCache = {
fetchedAt: Date.now(),
manifest: sampleManifest, // Contains test-playbook-1 with title "Test Playbook"
};
vi.mocked(fs.readFile)
.mockResolvedValueOnce(JSON.stringify(validCache))
.mockResolvedValueOnce(JSON.stringify(localManifest));
const handler = handlers.get('marketplace:getManifest');
const result = await handler!({} as any);
// Find the playbook with ID 'test-playbook-1'
const mergedPlaybook = result.manifest.playbooks.find((p: any) => p.id === 'test-playbook-1');
// Verify the LOCAL version took precedence
expect(mergedPlaybook).toBeDefined();
expect(mergedPlaybook.title).toBe('Local Override Version'); // NOT "Test Playbook"
expect(mergedPlaybook.source).toBe('local'); // Tagged as local
expect(mergedPlaybook.author).toBe('Local Author');
expect(mergedPlaybook.documents).toEqual([
{ filename: 'custom-phase-1', resetOnCompletion: false },
{ filename: 'custom-phase-2', resetOnCompletion: false },
]);
expect(mergedPlaybook.loopEnabled).toBe(true);
expect(mergedPlaybook.maxLoops).toBe(5);
expect(mergedPlaybook.prompt).toBe('Local override custom prompt');
// Verify there's only ONE playbook with ID 'test-playbook-1' (no duplicates)
const matchingPlaybooks = result.manifest.playbooks.filter(
(p: any) => p.id === 'test-playbook-1'
);
expect(matchingPlaybooks.length).toBe(1);
// Verify other official playbooks are still present
const otherOfficialPlaybook = result.manifest.playbooks.find(
(p: any) => p.id === 'test-playbook-2'
);
expect(otherOfficialPlaybook).toBeDefined();
expect(otherOfficialPlaybook.source).toBe('official');
});
it('should tag playbooks with correct source (official vs local)', async () => {
const localPlaybook = {
id: 'brand-new-local',
title: 'Brand New Local Playbook',
description: 'A completely new local playbook',
category: 'Custom',
author: 'Local Author',
lastUpdated: '2024-01-20',
path: '/local/playbooks/brand-new',
documents: [{ filename: 'doc', resetOnCompletion: false }],
loopEnabled: false,
maxLoops: null,
prompt: null,
};
const localManifest: MarketplaceManifest = {
lastUpdated: '2024-01-20',
playbooks: [localPlaybook],
};
const validCache: MarketplaceCache = {
fetchedAt: Date.now(),
manifest: sampleManifest,
};
vi.mocked(fs.readFile)
.mockResolvedValueOnce(JSON.stringify(validCache))
.mockResolvedValueOnce(JSON.stringify(localManifest));
const handler = handlers.get('marketplace:getManifest');
const result = await handler!({} as any);
// Verify all playbooks have the correct source tag
for (const playbook of result.manifest.playbooks) {
if (playbook.id === 'brand-new-local') {
expect(playbook.source).toBe('local');
} else {
// All sample manifest playbooks should be tagged as official
expect(playbook.source).toBe('official');
}
}
});
});
});

View File

@@ -71,6 +71,7 @@ describe('stats IPC handlers', () => {
sessionsByDay: [],
avgSessionDuration: 0,
byAgentByDay: {},
bySessionByDay: {},
}),
exportToCsv: vi.fn().mockReturnValue('id,sessionId,...'),
clearOldData: vi.fn().mockReturnValue({ success: true, deletedCount: 0 }),

View File

@@ -166,24 +166,38 @@ describe('calculateContextTokens', () => {
...overrides,
});
it('should exclude output tokens for Claude agents', () => {
it('should exclude output tokens and cacheReadInputTokens for Claude agents', () => {
const stats = createStats();
const result = calculateContextTokens(stats, 'claude-code');
// 10000 + 1000 + 2000 = 13000 (no output tokens)
expect(result).toBe(13000);
// 10000 + 1000 = 11000 (no output tokens, no cacheRead - cumulative)
expect(result).toBe(11000);
});
it('should include output tokens for Codex agents', () => {
it('should include output tokens but exclude cacheReadInputTokens for Codex agents', () => {
const stats = createStats();
const result = calculateContextTokens(stats, 'codex');
// 10000 + 5000 + 1000 + 2000 = 18000 (includes output)
expect(result).toBe(18000);
// 10000 + 5000 + 1000 = 16000 (includes output, excludes cacheRead)
expect(result).toBe(16000);
});
it('should default to Claude behavior when agent is undefined', () => {
const stats = createStats();
const result = calculateContextTokens(stats);
expect(result).toBe(13000);
// 10000 + 1000 = 11000 (excludes cacheRead)
expect(result).toBe(11000);
});
it('should exclude cacheReadInputTokens because they are cumulative session totals', () => {
// cacheReadInputTokens accumulate across all turns in a session and can
// exceed the context window. Including them would cause context % > 100%.
const stats = createStats({
inputTokens: 5000,
cacheCreationInputTokens: 1000,
cacheReadInputTokens: 500000, // Very high cumulative value
});
const result = calculateContextTokens(stats, 'claude-code');
// Should only be 5000 + 1000 = 6000, NOT 506000
expect(result).toBe(6000);
});
});

File diff suppressed because it is too large Load Diff

View File

@@ -107,8 +107,9 @@ vi.mock('../../../renderer/components/AchievementCard', () => ({
),
}));
// Add __APP_VERSION__ global
// Add __APP_VERSION__ and __COMMIT_HASH__ globals
(globalThis as unknown as { __APP_VERSION__: string }).__APP_VERSION__ = '1.0.0';
(globalThis as unknown as { __COMMIT_HASH__: string }).__COMMIT_HASH__ = '';
// Create test theme
const createTheme = (): Theme => ({

View File

@@ -41,6 +41,7 @@ const mockData: StatsAggregation = {
sessionsByDay: [],
avgSessionDuration: 288000,
byAgentByDay: {},
bySessionByDay: {},
};
// Empty data for edge case testing
@@ -58,6 +59,7 @@ const emptyData: StatsAggregation = {
sessionsByDay: [],
avgSessionDuration: 0,
byAgentByDay: {},
bySessionByDay: {},
};
// Data with large numbers
@@ -78,6 +80,7 @@ const largeNumbersData: StatsAggregation = {
sessionsByDay: [],
avgSessionDuration: 7200000,
byAgentByDay: {},
bySessionByDay: {},
};
// Single agent data
@@ -97,6 +100,7 @@ const singleAgentData: StatsAggregation = {
sessionsByDay: [],
avgSessionDuration: 360000,
byAgentByDay: {},
bySessionByDay: {},
};
// Only auto queries
@@ -116,6 +120,7 @@ const onlyAutoData: StatsAggregation = {
sessionsByDay: [],
avgSessionDuration: 360000,
byAgentByDay: {},
bySessionByDay: {},
};
describe('SummaryCards', () => {

View File

@@ -69,6 +69,7 @@ const mockStatsData: StatsAggregation = {
],
avgSessionDuration: 288000,
byAgentByDay: {},
bySessionByDay: {},
};
describe('Chart Accessibility - AgentComparisonChart', () => {
@@ -409,6 +410,7 @@ describe('Chart Accessibility - General ARIA Patterns', () => {
sessionsByDay: [],
avgSessionDuration: 0,
byAgentByDay: {},
bySessionByDay: {},
};
render(<AgentComparisonChart data={emptyData} theme={mockTheme} />);

View File

@@ -211,6 +211,7 @@ const createSampleData = () => ({
],
avgSessionDuration: 144000,
byAgentByDay: {},
bySessionByDay: {},
});
describe('UsageDashboard Responsive Layout', () => {

View File

@@ -159,6 +159,7 @@ beforeEach(() => {
],
avgSessionDuration: 180000,
byAgentByDay: {},
bySessionByDay: {},
});
mockStats.getDatabaseSize.mockResolvedValue(1024 * 1024); // 1 MB
});
@@ -280,6 +281,7 @@ describe('Usage Dashboard State Transition Animations', () => {
sessionsByDay: [],
avgSessionDuration: 240000,
byAgentByDay: {},
bySessionByDay: {},
};
it('applies dashboard-card-enter class to metric cards', () => {

View File

@@ -146,6 +146,7 @@ const createSampleData = () => ({
],
avgSessionDuration: 144000,
byAgentByDay: {},
bySessionByDay: {},
});
describe('UsageDashboardModal', () => {

View File

@@ -128,6 +128,68 @@ describe('synopsis', () => {
});
});
describe('conversational filler filtering', () => {
it('should skip "Excellent!" and use next meaningful line', () => {
const response = 'Excellent!\n\nThe markdown generation is working perfectly.';
const result = parseSynopsis(response);
expect(result.shortSummary).toBe('The markdown generation is working perfectly.');
});
it('should skip "Perfect!" and use next meaningful line', () => {
const response = 'Perfect!\n\nAll tests are passing now.';
const result = parseSynopsis(response);
expect(result.shortSummary).toBe('All tests are passing now.');
});
it('should skip multiple filler words at start', () => {
const response = 'Great!\n\nExcellent!\n\nFixed the authentication bug in login handler.';
const result = parseSynopsis(response);
expect(result.shortSummary).toBe('Fixed the authentication bug in login handler.');
});
it('should skip filler with exclamation marks and variations', () => {
const fillers = [
'Excellent!',
'Perfect!',
'Great!',
'Awesome!',
'Done!',
'Wonderful!',
'Fantastic!',
];
for (const filler of fillers) {
const response = `${filler}\n\nActual content here.`;
const result = parseSynopsis(response);
expect(result.shortSummary).toBe('Actual content here.');
}
});
it('should skip phrase fillers like "Looks good!"', () => {
const response = 'Looks good!\n\nUpdated the config file with new settings.';
const result = parseSynopsis(response);
expect(result.shortSummary).toBe('Updated the config file with new settings.');
});
it('should skip "All done!" style fillers', () => {
const response = 'All done!\n\nRefactored the component to use hooks.';
const result = parseSynopsis(response);
expect(result.shortSummary).toBe('Refactored the component to use hooks.');
});
it('should fall back to "Task completed" if only filler exists', () => {
const response = 'Excellent!';
const result = parseSynopsis(response);
expect(result.shortSummary).toBe('Task completed');
});
});
describe('fallback behavior', () => {
it('should use first line as summary when no format detected', () => {
const response = 'Just a plain text response\nWith multiple lines.\nAnd more content.';

View File

@@ -735,18 +735,31 @@ export function registerMarketplaceHandlers(deps: MarketplaceHandlerDependencies
LOG_CONTEXT
);
// Get the manifest to find the playbook
// Get the manifest to find the playbook (including local playbooks)
// This mirrors the logic in marketplace:getManifest to ensure local playbooks are included
const cache = await readCache(app);
let manifest: MarketplaceManifest;
let officialManifest: MarketplaceManifest | null = null;
if (cache && isCacheValid(cache)) {
manifest = cache.manifest;
officialManifest = cache.manifest;
} else {
manifest = await fetchManifest();
await writeCache(app, manifest);
try {
officialManifest = await fetchManifest();
await writeCache(app, officialManifest);
} catch (error) {
logger.warn(
'Failed to fetch official manifest during import, continuing with local only',
LOG_CONTEXT,
{ error }
);
}
}
// Find the playbook
// Read local manifest and merge with official
const localManifest = await readLocalManifest(app);
const manifest = mergeManifests(officialManifest, localManifest);
// Find the playbook in the merged manifest
const marketplacePlaybook = manifest.playbooks.find((p) => p.id === playbookId);
if (!marketplacePlaybook) {
throw new MarketplaceImportError(`Playbook not found: ${playbookId}`);

View File

@@ -60,8 +60,13 @@ const COMBINED_CONTEXT_AGENTS: Set<ToolType> = new Set(['codex']);
/**
* Calculate total context tokens based on agent-specific semantics.
*
* Claude models: Context = input + cacheCreation + cacheRead (output excluded)
* OpenAI models: Context = input + output + cacheRead (combined limit)
* IMPORTANT: Claude Code reports CUMULATIVE session tokens, not per-request tokens.
* The cacheReadInputTokens can exceed the context window because they accumulate
* across all turns in the conversation. For context pressure display, we should
* only count tokens that represent NEW context being added:
*
* Claude models: Context = input + cacheCreation (excludes cacheRead - already cached)
* OpenAI models: Context = input + output (combined limit)
*
* @param stats - The usage statistics containing token counts
* @param agentId - The agent identifier for agent-specific calculation
@@ -74,8 +79,11 @@ export function calculateContextTokens(
>,
agentId?: ToolType
): number {
const baseTokens =
stats.inputTokens + (stats.cacheCreationInputTokens || 0) + (stats.cacheReadInputTokens || 0);
// For Claude: inputTokens = uncached new tokens, cacheCreationInputTokens = newly cached tokens
// cacheReadInputTokens are EXCLUDED because they represent already-cached context
// that Claude Code reports cumulatively across the session, not per-request.
// Including them would cause context % to exceed 100% impossibly.
const baseTokens = stats.inputTokens + (stats.cacheCreationInputTokens || 0);
// OpenAI models have combined input+output context limits
if (agentId && COMBINED_CONTEXT_AGENTS.has(agentId)) {
@@ -91,14 +99,14 @@ export function calculateContextTokens(
* Uses agent-specific default context window sizes for accurate estimation.
*
* IMPORTANT: Context calculation varies by agent:
* - Claude models: inputTokens + cacheCreationInputTokens + cacheReadInputTokens
* (output tokens are separate from context window)
* - OpenAI models (Codex): inputTokens + outputTokens + cacheReadInputTokens
* - Claude models: inputTokens + cacheCreationInputTokens
* (cacheRead excluded - cumulative, output excluded - separate limit)
* - OpenAI models (Codex): inputTokens + outputTokens
* (combined context window includes both input and output)
*
* The cacheReadInputTokens are critical because they represent the full
* conversation context being sent, even though they're served from cache
* for billing purposes.
* Note: cacheReadInputTokens are NOT included because Claude Code reports them
* as cumulative session totals, not per-request values. Including them would
* cause context percentage to exceed 100% impossibly.
*
* @param stats - The usage statistics containing token counts
* @param agentId - The agent identifier for agent-specific context window size

View File

@@ -1565,6 +1565,40 @@ export class StatsDB {
sessionCount: sessionTotals.count,
});
// By session by day (for agent usage chart - shows each Maestro session's usage over time)
const bySessionByDayStart = perfMetrics.start();
const bySessionByDayStmt = this.db.prepare(`
SELECT session_id,
date(start_time / 1000, 'unixepoch', 'localtime') as date,
COUNT(*) as count,
SUM(duration) as duration
FROM query_events
WHERE start_time >= ?
GROUP BY session_id, date(start_time / 1000, 'unixepoch', 'localtime')
ORDER BY session_id, date ASC
`);
const bySessionByDayRows = bySessionByDayStmt.all(startTime) as Array<{
session_id: string;
date: string;
count: number;
duration: number;
}>;
const bySessionByDay: Record<
string,
Array<{ date: string; count: number; duration: number }>
> = {};
for (const row of bySessionByDayRows) {
if (!bySessionByDay[row.session_id]) {
bySessionByDay[row.session_id] = [];
}
bySessionByDay[row.session_id].push({
date: row.date,
count: row.count,
duration: row.duration,
});
}
perfMetrics.end(bySessionByDayStart, 'getAggregatedStats:bySessionByDay', { range });
const totalDuration = perfMetrics.end(perfStart, 'getAggregatedStats:total', {
range,
totalQueries: totals.count,
@@ -1593,6 +1627,7 @@ export class StatsDB {
sessionsByDay: sessionsByDayRows,
avgSessionDuration: Math.round(avgSessionDurationResult.avg_duration),
byAgentByDay,
bySessionByDay,
};
}

View File

@@ -5,9 +5,12 @@ Provide a brief synopsis of what you just accomplished in this task using this e
**Details:** [A paragraph with more specifics about what was done, files changed, etc.]
Rules:
- Write in a scientific log style: factual, concise, and informative. Example: "Added user authentication endpoint with JWT validation" not "I helped you add authentication".
- Be specific about what was actually accomplished, not what was attempted.
- Focus only on meaningful work that was done. Omit filler phrases like "the task is complete", "no further action needed", "everything is working", etc.
- NEVER start with conversational words like "Excellent!", "Perfect!", "Great!", "Awesome!", "Done!", or any similar expressions. These add no information value.
- NEVER include preamble about session context, interaction history, or caveats like "This is our first interaction", "there's no prior work to summarize", "you asked me to", etc. Jump straight to the accomplishment.
- Start directly with the action taken (e.g., "Fixed button visibility..." not "You asked me to fix...").
- Start directly with the action taken using a verb (e.g., "Fixed button visibility..." not "You asked me to fix..." and not "Excellent! Fixed...").
- If nothing meaningful was accomplished (no code changes, no files modified, no research completed, just greetings or introductions), respond with ONLY the text: NOTHING_TO_REPORT
- Use NOTHING_TO_REPORT when the conversation was just a greeting, introduction, or there genuinely was no work to summarize.

View File

@@ -1017,11 +1017,13 @@ function MaestroConsoleInner() {
}
// Sessions must have aiTabs - if missing, this is a data corruption issue
// Create a default tab to prevent crashes when code calls .find() on aiTabs
if (!session.aiTabs || session.aiTabs.length === 0) {
console.error(
'[restoreSession] Session has no aiTabs - data corruption, skipping:',
'[restoreSession] Session has no aiTabs - data corruption, creating default tab:',
session.id
);
const defaultTabId = generateId();
return {
...session,
aiPid: -1,
@@ -1029,6 +1031,27 @@ function MaestroConsoleInner() {
state: 'error' as SessionState,
isLive: false,
liveUrl: undefined,
aiTabs: [
{
id: defaultTabId,
agentSessionId: null,
name: null,
state: 'idle' as const,
logs: [
{
id: generateId(),
timestamp: Date.now(),
source: 'system' as const,
text: '⚠️ Session data was corrupted and has been recovered with a new tab.',
},
],
starred: false,
inputValue: '',
stagedImages: [],
createdAt: Date.now(),
},
],
activeTabId: defaultTabId,
};
}
@@ -1983,11 +2006,19 @@ function MaestroConsoleInner() {
}
// Create a short summary from the last AI response
// Skip conversational fillers like "Excellent!", "Perfect!", etc.
let summary = '';
if (lastAiLog?.text) {
const text = lastAiLog.text.trim();
if (text.length > 10) {
const firstSentence = text.match(/^[^.!?\n]*[.!?]/)?.[0] || text.substring(0, 120);
// Match sentences (text ending with . ! or ?)
const sentences = text.match(/[^.!?\n]+[.!?]+/g) || [];
// Pattern to detect conversational filler sentences
const fillerPattern =
/^(excellent|perfect|great|awesome|wonderful|fantastic|good|nice|cool|done|ok|okay|alright|sure|yes|yeah|absolutely|certainly|definitely|looks?\s+good|all\s+(set|done|ready)|got\s+it|understood|will\s+do|on\s+it|no\s+problem|no\s+worries|happy\s+to\s+help)[!.\s]*$/i;
// Find the first non-filler sentence
const meaningfulSentence = sentences.find((s) => !fillerPattern.test(s.trim()));
const firstSentence = meaningfulSentence?.trim() || text.substring(0, 120);
summary =
firstSentence.length < text.length
? firstSentence
@@ -2314,11 +2345,17 @@ function MaestroConsoleInner() {
// Fire side effects AFTER state update (outside the updater function)
// Record stats for any completed query (even if we have queued items to process next)
if (toastData?.startTime && toastData?.agentType) {
// Determine if this query was part of an Auto Run session
const sessionIdForStats = toastData.sessionId || actualSessionId;
const isAutoRunQuery = getBatchStateRef.current
? getBatchStateRef.current(sessionIdForStats).isRunning
: false;
window.maestro.stats
.recordQuery({
sessionId: toastData.sessionId || actualSessionId,
sessionId: sessionIdForStats,
agentType: toastData.agentType,
source: 'user', // Interactive queries are always user-initiated
source: isAutoRunQuery ? 'auto' : 'user',
startTime: toastData.startTime,
duration: toastData.duration,
projectPath: toastData.projectPath,

View File

@@ -30,6 +30,7 @@ declare module '*.webp' {
// Vite-injected build-time constants
declare const __APP_VERSION__: string;
declare const __COMMIT_HASH__: string;
// Splash screen global functions (defined in index.html)
interface Window {

View File

@@ -188,6 +188,7 @@ export function AboutModal({
</h1>
<span className="text-xs font-mono" style={{ color: theme.colors.textDim }}>
v{__APP_VERSION__}
{__COMMIT_HASH__ && ` (${__COMMIT_HASH__})`}
</span>
</div>
<p className="text-xs opacity-70" style={{ color: theme.colors.textDim }}>

View File

@@ -10,6 +10,7 @@ import React, { useState, useMemo, useRef, useEffect } from 'react';
import { Check } from 'lucide-react';
import type { Theme } from '../types';
import type { GroupChatHistoryEntry } from '../../shared/group-chat-types';
import { stripMarkdown } from '../utils/textProcessing';
// Lookback period options for the activity graph
type LookbackPeriod = {
@@ -529,9 +530,9 @@ export function GroupChatHistoryPanel({
</span>
</div>
{/* Summary - full text */}
{/* Summary - strip markdown for clean display */}
<p className="text-xs leading-relaxed" style={{ color: theme.colors.textMain }}>
{entry.summary}
{stripMarkdown(entry.summary)}
</p>
{/* Footer with cost */}

View File

@@ -34,6 +34,7 @@ import { AgentSessionsBrowser } from './AgentSessionsBrowser';
import { TabBar } from './TabBar';
import { WizardConversationView, DocumentGenerationView } from './InlineWizard';
import { gitService } from '../services/git';
import { remoteUrlToBrowserUrl } from '../../shared/gitUtils';
import { useGitBranch, useGitDetail, useGitFileStatus } from '../contexts/GitStatusContext';
import { formatShortcutKeys } from '../utils/shortcutFormatter';
import { calculateContextTokens } from '../utils/contextUsage';
@@ -939,10 +940,8 @@ export const MainPanel = React.memo(
<button
onClick={(e) => {
e.stopPropagation();
const url = gitInfo.remote.startsWith('http')
? gitInfo.remote
: `https://${gitInfo.remote}`;
window.open(url.replace(/\.git$/, ''), '_blank');
const url = remoteUrlToBrowserUrl(gitInfo.remote);
if (url) window.open(url, '_blank');
}}
className="text-xs font-mono truncate hover:underline text-left"
style={{ color: theme.colors.textMain }}

View File

@@ -352,6 +352,18 @@ function PlaybookDetailView({
/ {playbook.subcategory}
</span>
)}
{playbook.source === 'local' && (
<span
className="px-2 py-0.5 rounded text-xs font-medium"
style={{
backgroundColor: '#3b82f620',
color: '#3b82f6',
}}
title="Custom local playbook"
>
Local
</span>
)}
</div>
<h2 className="text-lg font-semibold truncate" style={{ color: theme.colors.textMain }}>
{playbook.title}
@@ -508,6 +520,28 @@ function PlaybookDetailView({
{playbook.lastUpdated}
</p>
</div>
{/* Source badge for local playbooks */}
{playbook.source === 'local' && (
<div className="mb-4">
<h4
className="text-xs font-semibold mb-1 uppercase tracking-wide"
style={{ color: theme.colors.textDim }}
>
Source
</h4>
<span
className="px-2 py-0.5 rounded text-xs font-medium inline-block"
style={{
backgroundColor: '#3b82f620',
color: '#3b82f6',
}}
title="Custom local playbook"
>
Local
</span>
</div>
)}
</div>
{/* Main content area with document dropdown and markdown preview */}

View File

@@ -1,47 +1,52 @@
/**
* AgentUsageChart
*
* Line chart showing provider usage over time with one line per provider.
* Displays query counts and duration for each provider (claude-code, codex, opencode).
* Line chart showing Maestro agent (session) usage over time with one line per agent.
* Displays query counts and duration for each agent that was used during the time period.
*
* Features:
* - One line per provider
* - Dual Y-axes: queries (left) and time (right)
* - Provider-specific colors
* - One line per Maestro agent (named session from left panel)
* - Toggle between query count and time metrics
* - Session ID to name mapping when names are available
* - Hover tooltips with exact values
* - Responsive SVG rendering
* - Theme-aware styling
* - Limits display to top 10 agents by query count
*/
import React, { useState, useMemo, useCallback } from 'react';
import { format, parseISO } from 'date-fns';
import type { Theme } from '../../types';
import type { Theme, Session } from '../../types';
import type { StatsTimeRange, StatsAggregation } from '../../hooks/useStats';
import {
COLORBLIND_AGENT_PALETTE,
COLORBLIND_LINE_COLORS,
} from '../../constants/colorblindPalettes';
import { COLORBLIND_AGENT_PALETTE } from '../../constants/colorblindPalettes';
// Provider colors (matching AgentComparisonChart)
const PROVIDER_COLORS: Record<string, string> = {
'claude-code': '#a78bfa', // violet
codex: '#34d399', // emerald
opencode: '#60a5fa', // blue
};
// 10 distinct colors for agents
const AGENT_COLORS = [
'#a78bfa', // violet
'#34d399', // emerald
'#60a5fa', // blue
'#f472b6', // pink
'#fbbf24', // amber
'#fb923c', // orange
'#4ade80', // green
'#38bdf8', // sky
'#c084fc', // purple
'#f87171', // red
];
// Data point for a single provider on a single day
interface ProviderDayData {
// Data point for a single agent on a single day
interface AgentDayData {
date: string;
formattedDate: string;
count: number;
duration: number;
}
// All providers' data for a single day
// All agents' data for a single day
interface DayData {
date: string;
formattedDate: string;
providers: Record<string, { count: number; duration: number }>;
agents: Record<string, { count: number; duration: number }>;
}
interface AgentUsageChartProps {
@@ -53,6 +58,8 @@ interface AgentUsageChartProps {
theme: Theme;
/** Enable colorblind-friendly colors */
colorBlindMode?: boolean;
/** Current sessions for mapping IDs to names */
sessions?: Session[];
}
/**
@@ -117,13 +124,40 @@ function formatXAxisDate(dateStr: string, timeRange: StatsTimeRange): string {
}
/**
* Get provider color, with colorblind mode support
* Get agent color based on index, with colorblind mode support
*/
function getProviderColor(provider: string, index: number, colorBlindMode: boolean): string {
function getAgentColor(index: number, colorBlindMode: boolean): string {
if (colorBlindMode) {
return COLORBLIND_AGENT_PALETTE[index % COLORBLIND_AGENT_PALETTE.length];
}
return PROVIDER_COLORS[provider] || COLORBLIND_LINE_COLORS.primary;
return AGENT_COLORS[index % AGENT_COLORS.length];
}
/**
* Extract a display name from a session ID
* Session IDs are in format: "sessionId-ai-tabId" or similar
* Returns the first 8 chars of the session UUID or the name if found
*/
function getSessionDisplayName(sessionId: string, sessions?: Session[]): string {
// Try to find the session by ID to get its name
if (sessions) {
// Session IDs in stats may include tab suffixes like "-ai-tabId"
// Try to match the base session ID
const session = sessions.find((s) => sessionId.startsWith(s.id));
if (session?.name) {
return session.name;
}
}
// Fallback: extract the UUID part and show first 8 chars
// Format is typically "uuid-ai-tabId" or just "uuid"
const parts = sessionId.split('-');
if (parts.length >= 5) {
// UUID format: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// Take first segment
return parts[0].substring(0, 8).toUpperCase();
}
return sessionId.substring(0, 8).toUpperCase();
}
export function AgentUsageChart({
@@ -131,10 +165,9 @@ export function AgentUsageChart({
timeRange,
theme,
colorBlindMode = false,
sessions,
}: AgentUsageChartProps) {
const [hoveredDay, setHoveredDay] = useState<{ dayIndex: number; provider?: string } | null>(
null
);
const [hoveredDay, setHoveredDay] = useState<{ dayIndex: number; agent?: string } | null>(null);
const [tooltipPos, setTooltipPos] = useState<{ x: number; y: number } | null>(null);
const [metricMode, setMetricMode] = useState<'count' | 'duration'>('count');
@@ -145,29 +178,46 @@ export function AgentUsageChart({
const innerWidth = chartWidth - padding.left - padding.right;
const innerHeight = chartHeight - padding.top - padding.bottom;
// Get list of providers and their data
const { providers, chartData, allDates } = useMemo(() => {
const byAgentByDay = data.byAgentByDay || {};
const providerList = Object.keys(byAgentByDay).sort();
// Get list of agents and their data (limited to top 10 by total queries)
const { agents, chartData, allDates, agentDisplayNames } = useMemo(() => {
const bySessionByDay = data.bySessionByDay || {};
// Collect all unique dates
// Calculate total queries per session to rank them
const sessionTotals: Array<{ sessionId: string; totalQueries: number }> = [];
for (const sessionId of Object.keys(bySessionByDay)) {
const totalQueries = bySessionByDay[sessionId].reduce((sum, day) => sum + day.count, 0);
sessionTotals.push({ sessionId, totalQueries });
}
// Sort by total queries descending and take top 10
sessionTotals.sort((a, b) => b.totalQueries - a.totalQueries);
const topSessions = sessionTotals.slice(0, 10);
const agentList = topSessions.map((s) => s.sessionId);
// Build display name map
const displayNames: Record<string, string> = {};
for (const sessionId of agentList) {
displayNames[sessionId] = getSessionDisplayName(sessionId, sessions);
}
// Collect all unique dates from selected agents
const dateSet = new Set<string>();
for (const provider of providerList) {
for (const day of byAgentByDay[provider]) {
for (const sessionId of agentList) {
for (const day of bySessionByDay[sessionId]) {
dateSet.add(day.date);
}
}
const sortedDates = Array.from(dateSet).sort();
// Build per-provider arrays aligned to all dates
const providerData: Record<string, ProviderDayData[]> = {};
for (const provider of providerList) {
// Build per-agent arrays aligned to all dates
const agentData: Record<string, AgentDayData[]> = {};
for (const sessionId of agentList) {
const dayMap = new Map<string, { count: number; duration: number }>();
for (const day of byAgentByDay[provider]) {
for (const day of bySessionByDay[sessionId]) {
dayMap.set(day.date, { count: day.count, duration: day.duration });
}
providerData[provider] = sortedDates.map((date) => ({
agentData[sessionId] = sortedDates.map((date) => ({
date,
formattedDate: format(parseISO(date), 'EEEE, MMM d, yyyy'),
count: dayMap.get(date)?.count || 0,
@@ -177,26 +227,27 @@ export function AgentUsageChart({
// Build combined day data for tooltips
const combinedData: DayData[] = sortedDates.map((date) => {
const providers: Record<string, { count: number; duration: number }> = {};
for (const provider of providerList) {
const dayData = providerData[provider].find((d) => d.date === date);
const agents: Record<string, { count: number; duration: number }> = {};
for (const sessionId of agentList) {
const dayData = agentData[sessionId].find((d) => d.date === date);
if (dayData) {
providers[provider] = { count: dayData.count, duration: dayData.duration };
agents[sessionId] = { count: dayData.count, duration: dayData.duration };
}
}
return {
date,
formattedDate: format(parseISO(date), 'EEEE, MMM d, yyyy'),
providers,
agents,
};
});
return {
providers: providerList,
chartData: providerData,
agents: agentList,
chartData: agentData,
allDates: combinedData,
agentDisplayNames: displayNames,
};
}, [data.byAgentByDay]);
}, [data.bySessionByDay, sessions]);
// Calculate scales
const { xScale, yScale, yTicks } = useMemo(() => {
@@ -208,13 +259,13 @@ export function AgentUsageChart({
};
}
// Find max value across all providers
// Find max value across all agents
let maxValue = 1;
for (const provider of providers) {
const providerMax = Math.max(
...chartData[provider].map((d) => (metricMode === 'count' ? d.count : d.duration))
for (const agent of agents) {
const agentMax = Math.max(
...chartData[agent].map((d) => (metricMode === 'count' ? d.count : d.duration))
);
maxValue = Math.max(maxValue, providerMax);
maxValue = Math.max(maxValue, agentMax);
}
// Add 10% padding
@@ -235,16 +286,16 @@ export function AgentUsageChart({
: Array.from({ length: tickCount }, (_, i) => (yMax / (tickCount - 1)) * i);
return { xScale: xScaleFn, yScale: yScaleFn, yTicks: yTicksArr };
}, [allDates, providers, chartData, metricMode, chartHeight, innerWidth, innerHeight, padding]);
}, [allDates, agents, chartData, metricMode, chartHeight, innerWidth, innerHeight, padding]);
// Generate line paths for each provider
// Generate line paths for each agent
const linePaths = useMemo(() => {
const paths: Record<string, string> = {};
for (const provider of providers) {
const providerDays = chartData[provider];
if (providerDays.length === 0) continue;
for (const agent of agents) {
const agentDays = chartData[agent];
if (agentDays.length === 0) continue;
paths[provider] = providerDays
paths[agent] = agentDays
.map((day, idx) => {
const x = xScale(idx);
const y = yScale(metricMode === 'count' ? day.count : day.duration);
@@ -253,12 +304,12 @@ export function AgentUsageChart({
.join(' ');
}
return paths;
}, [providers, chartData, xScale, yScale, metricMode]);
}, [agents, chartData, xScale, yScale, metricMode]);
// Handle mouse events
const handleMouseEnter = useCallback(
(dayIndex: number, provider: string, event: React.MouseEvent<SVGCircleElement>) => {
setHoveredDay({ dayIndex, provider });
(dayIndex: number, agent: string, event: React.MouseEvent<SVGCircleElement>) => {
setHoveredDay({ dayIndex, agent });
const rect = event.currentTarget.getBoundingClientRect();
setTooltipPos({
x: rect.left + rect.width / 2,
@@ -278,7 +329,7 @@ export function AgentUsageChart({
className="p-4 rounded-lg"
style={{ backgroundColor: theme.colors.bgMain }}
role="figure"
aria-label={`Provider usage chart showing ${metricMode === 'count' ? 'query counts' : 'duration'} over time. ${providers.length} providers displayed.`}
aria-label={`Agent usage chart showing ${metricMode === 'count' ? 'query counts' : 'duration'} over time. ${agents.length} agents displayed.`}
>
{/* Header with title and metric toggle */}
<div className="flex items-center justify-between mb-4">
@@ -319,7 +370,7 @@ export function AgentUsageChart({
{/* Chart container */}
<div className="relative">
{allDates.length === 0 || providers.length === 0 ? (
{allDates.length === 0 || agents.length === 0 ? (
<div
className="flex items-center justify-center"
style={{ height: chartHeight, color: theme.colors.textDim }}
@@ -332,7 +383,7 @@ export function AgentUsageChart({
viewBox={`0 0 ${chartWidth} ${chartHeight}`}
preserveAspectRatio="xMidYMid meet"
role="img"
aria-label={`Line chart showing ${metricMode === 'count' ? 'query counts' : 'duration'} per provider over time`}
aria-label={`Line chart showing ${metricMode === 'count' ? 'query counts' : 'duration'} per agent over time`}
>
{/* Grid lines */}
{yTicks.map((tick, idx) => (
@@ -386,13 +437,13 @@ export function AgentUsageChart({
);
})}
{/* Lines for each provider */}
{providers.map((provider, providerIdx) => {
const color = getProviderColor(provider, providerIdx, colorBlindMode);
{/* Lines for each agent */}
{agents.map((agent, agentIdx) => {
const color = getAgentColor(agentIdx, colorBlindMode);
return (
<path
key={`line-${provider}`}
d={linePaths[provider]}
key={`line-${agent}`}
d={linePaths[agent]}
fill="none"
stroke={color}
strokeWidth={2}
@@ -403,18 +454,17 @@ export function AgentUsageChart({
);
})}
{/* Data points for each provider */}
{providers.map((provider, providerIdx) => {
const color = getProviderColor(provider, providerIdx, colorBlindMode);
return chartData[provider].map((day, dayIdx) => {
{/* Data points for each agent */}
{agents.map((agent, agentIdx) => {
const color = getAgentColor(agentIdx, colorBlindMode);
return chartData[agent].map((day, dayIdx) => {
const x = xScale(dayIdx);
const y = yScale(metricMode === 'count' ? day.count : day.duration);
const isHovered =
hoveredDay?.dayIndex === dayIdx && hoveredDay?.provider === provider;
const isHovered = hoveredDay?.dayIndex === dayIdx && hoveredDay?.agent === agent;
return (
<circle
key={`point-${provider}-${dayIdx}`}
key={`point-${agent}-${dayIdx}`}
cx={x}
cy={y}
r={isHovered ? 6 : 4}
@@ -425,7 +475,7 @@ export function AgentUsageChart({
cursor: 'pointer',
transition: 'r 0.15s ease',
}}
onMouseEnter={(e) => handleMouseEnter(dayIdx, provider, e)}
onMouseEnter={(e) => handleMouseEnter(dayIdx, agent, e)}
onMouseLeave={handleMouseLeave}
/>
);
@@ -462,14 +512,14 @@ export function AgentUsageChart({
>
<div className="font-medium mb-1">{allDates[hoveredDay.dayIndex].formattedDate}</div>
<div style={{ color: theme.colors.textDim }}>
{providers.map((provider, idx) => {
const dayData = allDates[hoveredDay.dayIndex].providers[provider];
{agents.map((agent, idx) => {
const dayData = allDates[hoveredDay.dayIndex].agents[agent];
if (!dayData || (dayData.count === 0 && dayData.duration === 0)) return null;
const color = getProviderColor(provider, idx, colorBlindMode);
const color = getAgentColor(idx, colorBlindMode);
return (
<div key={provider} className="flex items-center gap-2">
<div key={agent} className="flex items-center gap-2">
<span className="w-2 h-2 rounded-full" style={{ backgroundColor: color }} />
<span>{provider}:</span>
<span>{agentDisplayNames[agent]}:</span>
<span style={{ color: theme.colors.textMain }}>
{metricMode === 'count'
? `${dayData.count} ${dayData.count === 1 ? 'query' : 'queries'}`
@@ -488,13 +538,13 @@ export function AgentUsageChart({
className="flex items-center justify-center gap-4 mt-3 pt-3 border-t flex-wrap"
style={{ borderColor: theme.colors.border }}
>
{providers.map((provider, idx) => {
const color = getProviderColor(provider, idx, colorBlindMode);
{agents.map((agent, idx) => {
const color = getAgentColor(idx, colorBlindMode);
return (
<div key={provider} className="flex items-center gap-1.5">
<div key={agent} className="flex items-center gap-1.5">
<div className="w-3 h-0.5 rounded" style={{ backgroundColor: color }} />
<span className="text-xs" style={{ color: theme.colors.textDim }}>
{provider}
{agentDisplayNames[agent]}
</span>
</div>
);

View File

@@ -75,8 +75,10 @@ interface StatsAggregation {
sessionsByAgent: Record<string, number>;
sessionsByDay: Array<{ date: string; count: number }>;
avgSessionDuration: number;
// Per-agent per-day breakdown for provider usage chart
// Per-provider per-day breakdown for provider comparison
byAgentByDay: Record<string, Array<{ date: string; count: number; duration: number }>>;
// Per-session per-day breakdown for agent usage chart
bySessionByDay: Record<string, Array<{ date: string; count: number; duration: number }>>;
}
// View mode options for the dashboard
@@ -971,6 +973,7 @@ export function UsageDashboardModal({
timeRange={timeRange}
theme={theme}
colorBlindMode={colorBlindMode}
sessions={sessions}
/>
</ChartErrorBoundary>
</div>

View File

@@ -2162,6 +2162,7 @@ interface MaestroAPI {
sessionsByDay: Array<{ date: string; count: number }>;
avgSessionDuration: number;
byAgentByDay: Record<string, Array<{ date: string; count: number; duration: number }>>;
bySessionByDay: Record<string, Array<{ date: string; count: number; duration: number }>>;
}>;
// Export query events to CSV
exportCsv: (range: 'day' | 'week' | 'month' | 'year' | 'all') => Promise<string>;

View File

@@ -190,6 +190,7 @@ export function useAgentExecution(deps: UseAgentExecutionDeps): UseAgentExecutio
let agentSessionId: string | undefined;
let responseText = '';
let taskUsageStats: UsageStats | undefined;
const queryStartTime = Date.now(); // Track start time for stats
// Array to collect cleanup functions as listeners are registered
const cleanupFns: (() => void)[] = [];
@@ -231,6 +232,25 @@ export function useAgentExecution(deps: UseAgentExecutionDeps): UseAgentExecutio
// Clean up listeners
cleanup();
// Record query stats for Auto Run queries
const queryDuration = Date.now() - queryStartTime;
const activeTab = getActiveTab(session);
window.maestro.stats
.recordQuery({
sessionId: sessionId, // Use the original session ID, not the batch ID
agentType: session.toolType,
source: 'auto', // Auto Run queries are always 'auto'
startTime: queryStartTime,
duration: queryDuration,
projectPath: effectiveCwd,
tabId: activeTab?.id,
isRemote: session.sessionSshRemoteConfig?.enabled ?? false,
})
.catch((err) => {
// Don't fail the batch flow if stats recording fails
console.warn('[spawnAgentForSession] Failed to record query stats:', err);
});
// Check for queued items BEFORE updating state (using sessionsRef for latest state)
const currentSession = sessionsRef.current.find((s) => s.id === sessionId);
let queuedItemToProcess: { sessionId: string; item: QueuedItem } | null = null;

View File

@@ -34,8 +34,10 @@ export interface StatsAggregation {
sessionsByAgent: Record<string, number>;
sessionsByDay: Array<{ date: string; count: number }>;
avgSessionDuration: number;
// Per-agent per-day breakdown for provider usage chart
// Per-provider per-day breakdown for provider comparison
byAgentByDay: Record<string, Array<{ date: string; count: number; duration: number }>>;
// Per-session per-day breakdown for agent usage chart
bySessionByDay: Record<string, Array<{ date: string; count: number; duration: number }>>;
}
// Return type for the useStats hook

View File

@@ -211,8 +211,8 @@
<!-- React DevTools: connects to standalone react-devtools app (npm install -g react-devtools) -->
<!-- Only attempts connection in dev mode (Vite serves on localhost:5173) -->
<script>
if (window.location.hostname === 'localhost') {
var script = document.createElement('script');
var script = document.createElement('script');
if (window.location.hostname === 'localhost') {
script.src = 'http://localhost:8097';
script.async = false;
document.head.appendChild(script);

View File

@@ -93,8 +93,10 @@ export interface StatsAggregation {
sessionsByDay: Array<{ date: string; count: number }>;
/** Average session duration in ms (for closed sessions) */
avgSessionDuration: number;
/** Queries and duration by agent per day (for provider usage chart) */
/** Queries and duration by provider per day (for provider comparison) */
byAgentByDay: Record<string, Array<{ date: string; count: number; duration: number }>>;
/** Queries and duration by Maestro session per day (for agent usage chart) */
bySessionByDay: Record<string, Array<{ date: string; count: number; duration: number }>>;
}
/**

View File

@@ -38,6 +38,23 @@ function isTemplatePlaceholder(text: string): boolean {
return placeholderPatterns.some((pattern) => pattern.test(text.trim()));
}
/**
* Check if text is a conversational filler that should be stripped.
* These are words/phrases that add no information value to a scientific log.
*/
function isConversationalFiller(text: string): boolean {
const fillerPatterns = [
/^(excellent|perfect|great|awesome|wonderful|fantastic|good|nice|cool|done|ok|okay|alright|sure|yes|yeah|yep|absolutely|certainly|definitely|indeed|affirmative)[\s!.]*$/i,
/^(that's|that is|this is|it's|it is)\s+(great|good|perfect|excellent|done|complete|finished)[\s!.]*$/i,
/^(all\s+)?(set|done|ready|complete|finished|good\s+to\s+go)[\s!.]*$/i,
/^(looks?\s+)?(good|great|perfect)[\s!.]*$/i,
/^(here\s+you\s+go|there\s+you\s+go|there\s+we\s+go|here\s+it\s+is)[\s!.]*$/i,
/^(got\s+it|understood|will\s+do|on\s+it|right\s+away)[\s!.]*$/i,
/^(no\s+problem|no\s+worries|happy\s+to\s+help)[\s!.]*$/i,
];
return fillerPatterns.some((pattern) => pattern.test(text.trim()));
}
/**
* Check if a response indicates nothing meaningful to report.
* Looks for the NOTHING_TO_REPORT sentinel token anywhere in the response.
@@ -94,15 +111,20 @@ export function parseSynopsis(response: string): ParsedSynopsis {
let shortSummary = summaryMatch?.[1]?.trim() || '';
let details = detailsMatch?.[1]?.trim() || '';
// Check if summary is a template placeholder (model output format instructions literally)
if (!shortSummary || isTemplatePlaceholder(shortSummary)) {
// Try to find actual content by looking for non-placeholder lines
// Check if summary is a template placeholder or conversational filler
if (
!shortSummary ||
isTemplatePlaceholder(shortSummary) ||
isConversationalFiller(shortSummary)
) {
// Try to find actual content by looking for non-placeholder, non-filler lines
const lines = clean.split('\n').filter((line) => {
const trimmed = line.trim();
return (
trimmed &&
!trimmed.startsWith('**') &&
!isTemplatePlaceholder(trimmed) &&
!isConversationalFiller(trimmed) &&
!trimmed.match(/^Rules:/i) &&
!trimmed.match(/^-\s+Be specific/i) &&
!trimmed.match(/^-\s+Focus only/i) &&

View File

@@ -2,44 +2,58 @@ import { defineConfig } from 'vite';
import react from '@vitejs/plugin-react';
import path from 'path';
import { readFileSync } from 'fs';
import { execSync } from 'child_process';
// Read version from package.json as fallback
const packageJson = JSON.parse(readFileSync(path.join(__dirname, 'package.json'), 'utf-8'));
// Use VITE_APP_VERSION env var if set (during CI builds), otherwise use package.json
const appVersion = process.env.VITE_APP_VERSION || packageJson.version;
// Get the first 8 chars of git commit hash for dev mode
function getCommitHash(): string {
try {
// Note: execSync is safe here - no user input, static git command
return execSync('git rev-parse HEAD', { encoding: 'utf-8' }).trim().slice(0, 8);
} catch {
return '';
}
}
const disableHmr = process.env.DISABLE_HMR === '1';
export default defineConfig(({ mode }) => ({
plugins: [react({ fastRefresh: !disableHmr })],
root: path.join(__dirname, 'src/renderer'),
base: './',
define: {
__APP_VERSION__: JSON.stringify(appVersion),
// Explicitly define NODE_ENV for React and related packages
'process.env.NODE_ENV': JSON.stringify(mode),
},
resolve: {
alias: {
// In development, use wdyr.dev.ts which loads why-did-you-render
// In production, use wdyr.ts which is empty (prevents bundling the library)
'./wdyr': mode === 'development'
? path.join(__dirname, 'src/renderer/wdyr.dev.ts')
: path.join(__dirname, 'src/renderer/wdyr.ts'),
},
},
esbuild: {
// Strip console.* and debugger in production builds
drop: mode === 'production' ? ['console', 'debugger'] : [],
},
build: {
outDir: path.join(__dirname, 'dist/renderer'),
emptyOutDir: true,
},
server: {
port: 5173,
hmr: !disableHmr,
// Disable file watching entirely when HMR is disabled to prevent any reloads
watch: disableHmr ? null : undefined,
},
plugins: [react({ fastRefresh: !disableHmr })],
root: path.join(__dirname, 'src/renderer'),
base: './',
define: {
__APP_VERSION__: JSON.stringify(appVersion),
// Show commit hash only in development mode
__COMMIT_HASH__: JSON.stringify(mode === 'development' ? getCommitHash() : ''),
// Explicitly define NODE_ENV for React and related packages
'process.env.NODE_ENV': JSON.stringify(mode),
},
resolve: {
alias: {
// In development, use wdyr.dev.ts which loads why-did-you-render
// In production, use wdyr.ts which is empty (prevents bundling the library)
'./wdyr':
mode === 'development'
? path.join(__dirname, 'src/renderer/wdyr.dev.ts')
: path.join(__dirname, 'src/renderer/wdyr.ts'),
},
},
esbuild: {
// Strip console.* and debugger in production builds
drop: mode === 'production' ? ['console', 'debugger'] : [],
},
build: {
outDir: path.join(__dirname, 'dist/renderer'),
emptyOutDir: true,
},
server: {
port: process.env.VITE_PORT ? parseInt(process.env.VITE_PORT) : 5173,
hmr: !disableHmr,
// Disable file watching entirely when HMR is disabled to prevent any reloads
watch: disableHmr ? null : undefined,
},
}));

View File

@@ -11,6 +11,7 @@ import { defineConfig } from 'vite';
import react from '@vitejs/plugin-react';
import path from 'path';
import { readFileSync } from 'fs';
import { execFileSync } from 'child_process';
// Read version from package.json
const packageJson = JSON.parse(
@@ -18,6 +19,19 @@ const packageJson = JSON.parse(
);
const appVersion = process.env.VITE_APP_VERSION || packageJson.version;
// Get git hash
function getGitHash() {
try {
return execFileSync('git', ['rev-parse', '--short=8', 'HEAD'], {
encoding: 'utf8',
stdio: ['pipe', 'pipe', 'pipe']
}).trim();
} catch {
return 'unknown';
}
}
const gitHash = getGitHash();
export default defineConfig(({ mode }) => ({
plugins: [react()],
@@ -33,6 +47,7 @@ export default defineConfig(({ mode }) => ({
define: {
__APP_VERSION__: JSON.stringify(appVersion),
__GIT_HASH__: JSON.stringify(gitHash),
},
esbuild: {
@@ -128,7 +143,7 @@ export default defineConfig(({ mode }) => ({
// Development server (for testing web interface standalone)
server: {
port: 5174, // Different from renderer dev server (5173)
port: process.env.VITE_WEB_PORT ? parseInt(process.env.VITE_WEB_PORT) : 5174, // Different from renderer dev server (5173)
strictPort: true,
// Proxy API calls to the running Maestro app during development
proxy: {