Merge pull request #108 from pedramamini/openspec-integration

feat(openspec): integrate OpenSpec spec-driven development commands
This commit is contained in:
Pedram Amini
2025-12-28 12:51:55 -06:00
committed by GitHub
33 changed files with 3275 additions and 123 deletions

View File

@@ -109,6 +109,8 @@ src/
| Add setting | `src/renderer/hooks/useSettings.ts`, `src/main/index.ts` |
| Add template variable | `src/shared/templateVariables.ts`, `src/renderer/utils/templateVariables.ts` |
| Modify system prompts | `src/prompts/*.md` (wizard, Auto Run, etc.) |
| Add Spec-Kit command | `src/prompts/speckit/`, `src/main/speckit-manager.ts` |
| Add OpenSpec command | `src/prompts/openspec/`, `src/main/openspec-manager.ts` |
| Add CLI command | `src/cli/commands/`, `src/cli/index.ts` |
| Configure agent | `src/main/agent-detector.ts`, `src/main/agent-capabilities.ts` |
| Add agent output parser | `src/main/parsers/`, `src/main/parsers/index.ts` |

View File

@@ -307,6 +307,23 @@ To add a built-in slash command that users see by default, add it to the Custom
For commands that need programmatic behavior (not just prompts), handle them in `App.tsx` where slash commands are processed before being sent to the agent.
### Adding Bundled AI Command Sets (Spec-Kit / OpenSpec Pattern)
Maestro bundles two spec-driven workflow systems. To add a similar bundled command set:
1. **Create prompts directory**: `src/prompts/my-workflow/`
2. **Add command markdown files**: `my-workflow.command1.md`, `my-workflow.command2.md`
3. **Create index.ts**: Export command definitions with IDs, slash commands, descriptions, and prompts
4. **Create metadata.json**: Track source version, commit SHA, and last refreshed date
5. **Create manager**: `src/main/my-workflow-manager.ts` (handles loading, saving, refreshing)
6. **Add IPC handlers**: In `src/main/index.ts` for get/set/refresh operations
7. **Add preload API**: In `src/main/preload.ts` to expose to renderer
8. **Create UI panel**: Similar to `OpenSpecCommandsPanel.tsx` or `SpecKitCommandsPanel.tsx`
9. **Add to extraResources**: In `package.json` build config for all platforms
10. **Create refresh script**: `scripts/refresh-my-workflow.mjs`
Reference the existing Spec-Kit (`src/prompts/speckit/`, `src/main/speckit-manager.ts`) and OpenSpec (`src/prompts/openspec/`, `src/main/openspec-manager.ts`) implementations.
### Adding a New Theme
Maestro has 16 themes across 3 modes: dark, light, and vibe.
@@ -698,15 +715,23 @@ All PRs must pass these checks before review:
## Building for Release
### 0. Refresh Spec Kit Prompts (Optional)
### 0. Refresh AI Command Prompts (Optional)
Before releasing, check if GitHub's spec-kit has updates:
Before releasing, check if the upstream spec-kit and OpenSpec repositories have updates:
```bash
# Refresh GitHub's spec-kit prompts
npm run refresh-speckit
# Refresh Fission-AI's OpenSpec prompts
npm run refresh-openspec
```
This fetches the latest prompts from [github/spec-kit](https://github.com/github/spec-kit) and updates the bundled files in `src/prompts/speckit/`. The custom `/speckit.implement` prompt is never overwritten.
These scripts fetch the latest prompts from their respective repositories:
- **Spec-Kit**: [github/spec-kit](https://github.com/github/spec-kit) → `src/prompts/speckit/`
- **OpenSpec**: [Fission-AI/OpenSpec](https://github.com/Fission-AI/OpenSpec) → `src/prompts/openspec/`
Custom Maestro-specific prompts (`/speckit.implement`, `/openspec.implement`, `/openspec.help`) are never overwritten by the refresh scripts.
Review any changes with `git diff` before committing.

View File

@@ -16,7 +16,7 @@ Settings are organized into tabs:
| **Shortcuts** | Customize keyboard shortcuts (see [Keyboard Shortcuts](./keyboard-shortcuts)) |
| **Appearance** | Font size, UI density |
| **Notifications** | Sound alerts, text-to-speech settings |
| **AI Commands** | View and edit slash commands and Spec-Kit prompts |
| **AI Commands** | View and edit slash commands, [Spec-Kit](./speckit-commands), and [OpenSpec](./openspec-commands) prompts |
## Checking for Updates

View File

@@ -55,14 +55,16 @@
"group": "Usage",
"pages": [
"general-usage",
"keyboard-shortcuts",
"slash-commands",
"speckit-commands",
"openspec-commands",
"history",
"context-management",
"autorun-playbooks",
"git-worktrees",
"group-chat",
"remote-access",
"slash-commands",
"speckit-commands",
"configuration"
]
},
@@ -77,7 +79,6 @@
"group": "Reference",
"pages": [
"achievements",
"keyboard-shortcuts",
"troubleshooting"
],
"icon": "life-ring"

View File

@@ -22,7 +22,7 @@ icon: sparkles
- 🔀 **Git Integration** - Automatic repo detection, branch display, diff viewer, commit logs, and git-aware file completion. Work with git without leaving the app.
- 📁 **[File Explorer](./general-usage)** - Browse project files with syntax highlighting, markdown preview, and image viewing. Reference files in prompts with `@` mentions.
- 🔍 **[Powerful Output Filtering](./general-usage)** - Search and filter AI output with include/exclude modes, regex support, and per-response local filters.
-**[Slash Commands](./slash-commands)** - Extensible command system with autocomplete. Create custom commands with template variables for your workflows.
-**[Slash Commands](./slash-commands)** - Extensible command system with autocomplete. Create custom commands with template variables for your workflows. Includes bundled [Spec-Kit](./speckit-commands) for feature specifications and [OpenSpec](./openspec-commands) for change proposals.
- 💾 **Draft Auto-Save** - Never lose work. Drafts are automatically saved and restored per session.
- 🔊 **Speakable Notifications** - Audio alerts with text-to-speech announcements when agents complete tasks.
- 🎨 **[Beautiful Themes](https://github.com/pedramamini/Maestro/blob/main/THEMES.md)** - 12 themes including Dracula, Monokai, Nord, Tokyo Night, GitHub Light, and more.

154
docs/openspec-commands.md Normal file
View File

@@ -0,0 +1,154 @@
---
title: OpenSpec Commands
description: Spec-driven development workflow for managing code changes with AI-assisted proposal, implementation, and archival.
icon: git-pull-request
---
OpenSpec is a spec-driven development tool from [Fission-AI/OpenSpec](https://github.com/Fission-AI/OpenSpec) that ensures alignment between humans and AI coding assistants before any code is written. Maestro bundles these workflow commands and keeps them updated automatically.
## OpenSpec vs. Spec-Kit
Maestro offers two complementary spec-driven development tools:
| Feature | OpenSpec | Spec-Kit |
|---------|----------|----------|
| **Focus** | Change management & proposals | Feature specifications |
| **Workflow** | Proposal → Apply → Archive | Constitution → Specify → Plan → Tasks |
| **Best For** | Iterative changes, brownfield projects | New features, greenfield development |
| **Output** | Change proposals with spec deltas | Feature specifications and task lists |
| **Directory** | `openspec/` | Project root or designated folder |
**Use OpenSpec when:**
- Making iterative changes to existing features
- You need explicit change proposals before implementation
- Working on brownfield projects with existing specifications
- You want a clear archive of completed changes
**Use Spec-Kit when:**
- Defining new features from scratch
- Establishing project constitutions and principles
- Creating detailed feature specifications
- Breaking down work into implementation tasks
Both tools integrate with Maestro's Auto Run for autonomous execution.
## Core Workflow
OpenSpec follows a three-stage cycle:
### Stage 1: Proposal (`/openspec.proposal`)
Create a change proposal before writing any code:
1. Reviews existing specs and active changes
2. Scaffolds `proposal.md`, `tasks.md`, and optional `design.md`
3. Creates spec deltas showing what will be ADDED, MODIFIED, or REMOVED
4. Validates the proposal structure
**Creates:** A `openspec/changes/<change-id>/` directory with:
- `proposal.md` - Why and what
- `tasks.md` - Implementation checklist
- `specs/<capability>/spec.md` - Spec deltas
### Stage 2: Apply (`/openspec.apply`)
Implement the approved proposal:
1. Reads proposal and tasks
2. Implements tasks sequentially
3. Updates task checkboxes as work completes
4. Ensures approval gate is passed before starting
**Tip:** Only start implementation after the proposal is reviewed and approved.
### Stage 3: Archive (`/openspec.archive`)
After deployment, archive the completed change:
1. Moves `changes/<name>/` to `changes/archive/YYYY-MM-DD-<name>/`
2. Updates source-of-truth specs if capabilities changed
3. Validates the archived change
## Maestro-Specific Commands
### `/openspec.implement` - Generate Auto Run Documents
Bridges OpenSpec with Maestro's Auto Run:
1. Reads the proposal and tasks from a change
2. Converts tasks into Auto Run document format
3. Saves to `Auto Run Docs/` with task checkboxes
4. Supports worktree mode for parallel execution
### `/openspec.help` - Workflow Overview
Get help with OpenSpec concepts and Maestro integration.
## Directory Structure
OpenSpec uses a clear separation between current truth and proposed changes:
```
openspec/
├── project.md # Project conventions
├── specs/ # Current truth - what IS built
│ └── <capability>/
│ ├── spec.md # Requirements and scenarios
│ └── design.md # Technical patterns
└── changes/ # Proposals - what SHOULD change
├── <change-name>/
│ ├── proposal.md # Why, what, impact
│ ├── tasks.md # Implementation checklist
│ └── specs/ # Spec deltas (ADDED/MODIFIED/REMOVED)
└── archive/ # Completed changes
```
## Spec Delta Format
Changes use explicit operation headers:
```markdown
## ADDED Requirements
### Requirement: New Feature
The system SHALL provide...
#### Scenario: Success case
- **WHEN** user performs action
- **THEN** expected result
## MODIFIED Requirements
### Requirement: Existing Feature
[Complete updated requirement text]
## REMOVED Requirements
### Requirement: Old Feature
**Reason**: [Why removing]
**Migration**: [How to handle]
```
## Viewing & Managing Commands
Access OpenSpec commands via **Settings → AI Commands** tab. Here you can:
- **View all commands** with descriptions
- **Check for Updates** to pull the latest workflow from GitHub
- **Expand commands** to see their full prompts
- **Customize prompts** (modifications are preserved across updates)
## Auto-Updates
OpenSpec prompts are synced from the [Fission-AI/OpenSpec repository](https://github.com/Fission-AI/OpenSpec):
1. Open **Settings → AI Commands**
2. Click **Check for Updates** in the OpenSpec section
3. New workflow improvements are downloaded
4. Your custom modifications are preserved
## Tips for Best Results
- **Proposal first** - Never start implementation without an approved proposal
- **Keep changes focused** - One logical change per proposal
- **Use meaningful IDs** - `add-user-auth` not `change-1`
- **Include scenarios** - Every requirement needs at least one `#### Scenario:`
- **Validate often** - Run `openspec validate --strict` before sharing
- **Archive promptly** - Archive changes after deployment to keep `changes/` clean

View File

@@ -67,3 +67,23 @@ Commands support **template variables** that are automatically substituted at ru
It's {{WEEKDAY}}, {{DATE}}. I'm on branch {{GIT_BRANCH}} at {{AGENT_PATH}}.
Summarize what I worked on yesterday and suggest priorities for today.
```
## Spec-Kit Commands
Maestro bundles [GitHub's spec-kit](https://github.com/github/spec-kit) methodology for structured feature development. Commands include `/speckit.constitution`, `/speckit.specify`, `/speckit.clarify`, `/speckit.plan`, `/speckit.tasks`, and `/speckit.implement`.
See [Spec-Kit Commands](/speckit-commands) for the complete workflow guide.
## OpenSpec Commands
Maestro bundles [OpenSpec](https://github.com/Fission-AI/OpenSpec) for spec-driven change management. These commands help you propose, implement, and archive changes systematically:
| Command | Description |
|---------|-------------|
| `/openspec.proposal` | Create a change proposal with spec deltas before writing code |
| `/openspec.apply` | Implement an approved proposal by following the tasks |
| `/openspec.archive` | Archive completed changes after deployment |
| `/openspec.implement` | Generate Auto Run documents from a proposal (Maestro-specific) |
| `/openspec.help` | Get help with OpenSpec workflow and concepts |
See [OpenSpec Commands](/openspec-commands) for the complete workflow guide and directory structure.

4
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "maestro",
"version": "0.12.2",
"version": "0.12.3",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "maestro",
"version": "0.12.2",
"version": "0.12.3",
"hasInstallScript": true,
"license": "AGPL 3.0",
"dependencies": {

View File

@@ -47,7 +47,8 @@
"test:integration": "vitest run --config vitest.integration.config.ts",
"test:integration:watch": "vitest --config vitest.integration.config.ts",
"test:performance": "vitest run --config vitest.performance.config.mts",
"refresh-speckit": "node scripts/refresh-speckit.mjs"
"refresh-speckit": "node scripts/refresh-speckit.mjs",
"refresh-openspec": "node scripts/refresh-openspec.mjs"
},
"build": {
"npmRebuild": false,
@@ -101,6 +102,10 @@
{
"from": "src/prompts/speckit",
"to": "prompts/speckit"
},
{
"from": "src/prompts/openspec",
"to": "prompts/openspec"
}
]
},
@@ -129,6 +134,10 @@
{
"from": "src/prompts/speckit",
"to": "prompts/speckit"
},
{
"from": "src/prompts/openspec",
"to": "prompts/openspec"
}
]
},
@@ -166,6 +175,10 @@
{
"from": "src/prompts/speckit",
"to": "prompts/speckit"
},
{
"from": "src/prompts/openspec",
"to": "prompts/openspec"
}
]
},

View File

@@ -0,0 +1,216 @@
#!/usr/bin/env node
/**
* Refresh OpenSpec Prompts
*
* Fetches the latest OpenSpec prompts from GitHub by parsing AGENTS.md
* and extracts the three workflow stages (proposal, apply, archive).
*
* Unlike spec-kit which uses ZIP releases, OpenSpec bundles all workflow
* instructions in a single AGENTS.md file that we parse into sections.
*
* Usage: npm run refresh-openspec
*/
import fs from 'fs';
import path from 'path';
import { fileURLToPath } from 'url';
import https from 'https';
const __dirname = path.dirname(fileURLToPath(import.meta.url));
const OPENSPEC_DIR = path.join(__dirname, '..', 'src', 'prompts', 'openspec');
const METADATA_PATH = path.join(OPENSPEC_DIR, 'metadata.json');
// GitHub OpenSpec repository info
const GITHUB_API = 'https://api.github.com';
const REPO_OWNER = 'Fission-AI';
const REPO_NAME = 'OpenSpec';
const AGENTS_MD_URL = `https://raw.githubusercontent.com/${REPO_OWNER}/${REPO_NAME}/main/openspec/AGENTS.md`;
// Commands to extract from AGENTS.md (we skip custom commands like 'help' and 'implement')
const UPSTREAM_COMMANDS = ['proposal', 'apply', 'archive'];
// Section markers for parsing AGENTS.md
// Stage headers are formatted as: ### Stage N: Title
const SECTION_MARKERS = {
proposal: {
start: /^###\s*Stage\s*1[:\s]+Creating\s+Changes/i,
end: /^###\s*Stage\s*2[:\s]+/i,
},
apply: {
start: /^###\s*Stage\s*2[:\s]+Implementing\s+Changes/i,
end: /^###\s*Stage\s*3[:\s]+/i,
},
archive: {
start: /^###\s*Stage\s*3[:\s]+Archiving\s+Changes/i,
end: /^##[^#]/, // End at next level-2 heading or end of file
},
};
/**
* Make an HTTPS GET request
*/
function httpsGet(url, options = {}) {
return new Promise((resolve, reject) => {
const headers = {
'User-Agent': 'Maestro-OpenSpec-Refresher',
...options.headers,
};
https.get(url, { headers }, (res) => {
// Handle redirects
if (res.statusCode === 301 || res.statusCode === 302) {
return resolve(httpsGet(res.headers.location, options));
}
if (res.statusCode !== 200) {
reject(new Error(`HTTP ${res.statusCode}: ${url}`));
return;
}
let data = '';
res.on('data', chunk => data += chunk);
res.on('end', () => resolve({ data, headers: res.headers }));
res.on('error', reject);
}).on('error', reject);
});
}
/**
* Parse AGENTS.md and extract workflow sections as prompts
*/
function parseAgentsMd(content) {
const result = {};
const lines = content.split('\n');
for (const [sectionId, markers] of Object.entries(SECTION_MARKERS)) {
let inSection = false;
let sectionLines = [];
for (const line of lines) {
if (!inSection && markers.start.test(line)) {
inSection = true;
sectionLines.push(line);
continue;
}
if (inSection) {
// Check if we've hit the end marker (next stage or next major section)
if (markers.end.test(line) && line.trim() !== '') {
// Don't include the end marker line, it belongs to the next section
break;
}
sectionLines.push(line);
}
}
if (sectionLines.length > 0) {
// Clean up trailing empty lines
while (sectionLines.length > 0 && sectionLines[sectionLines.length - 1].trim() === '') {
sectionLines.pop();
}
result[sectionId] = sectionLines.join('\n').trim();
}
}
return result;
}
/**
* Get the latest commit SHA from the main branch
*/
async function getLatestCommitSha() {
try {
const url = `${GITHUB_API}/repos/${REPO_OWNER}/${REPO_NAME}/commits/main`;
const { data } = await httpsGet(url);
const commit = JSON.parse(data);
return commit.sha.substring(0, 7);
} catch (error) {
console.warn(' Warning: Could not fetch commit SHA, using "main"');
return 'main';
}
}
/**
* Main refresh function
*/
async function refreshOpenSpec() {
console.log('🔄 Refreshing OpenSpec prompts from GitHub...\n');
// Ensure openspec directory exists
if (!fs.existsSync(OPENSPEC_DIR)) {
console.error('❌ OpenSpec directory not found:', OPENSPEC_DIR);
process.exit(1);
}
try {
// Fetch AGENTS.md
console.log('📡 Fetching AGENTS.md from OpenSpec repository...');
const { data: agentsMdContent } = await httpsGet(AGENTS_MD_URL);
console.log(` Downloaded AGENTS.md (${agentsMdContent.length} bytes)`);
// Parse sections
console.log('\n📦 Parsing workflow sections...');
const extractedPrompts = parseAgentsMd(agentsMdContent);
const extractedCount = Object.keys(extractedPrompts).length;
console.log(` Extracted ${extractedCount} sections from AGENTS.md`);
if (extractedCount === 0) {
console.error('❌ Failed to extract any sections from AGENTS.md');
console.error(' Check that the section markers match the current format');
process.exit(1);
}
// Get commit SHA for version tracking
console.log('\n📋 Getting version info...');
const commitSha = await getLatestCommitSha();
console.log(` Commit: ${commitSha}`);
// Update prompt files
console.log('\n✏ Updating prompt files...');
let updatedCount = 0;
for (const commandName of UPSTREAM_COMMANDS) {
const content = extractedPrompts[commandName];
if (!content) {
console.log(` ⚠ Missing: openspec.${commandName}.md (section not found)`);
continue;
}
const promptFile = path.join(OPENSPEC_DIR, `openspec.${commandName}.md`);
const existingContent = fs.existsSync(promptFile)
? fs.readFileSync(promptFile, 'utf8')
: '';
if (content !== existingContent) {
fs.writeFileSync(promptFile, content);
console.log(` ✓ Updated: openspec.${commandName}.md`);
updatedCount++;
} else {
console.log(` - Unchanged: openspec.${commandName}.md`);
}
}
// Update metadata
const metadata = {
lastRefreshed: new Date().toISOString(),
commitSha,
sourceVersion: '0.1.0',
sourceUrl: `https://github.com/${REPO_OWNER}/${REPO_NAME}`,
};
fs.writeFileSync(METADATA_PATH, JSON.stringify(metadata, null, 2));
console.log('\n📄 Updated metadata.json');
// Summary
console.log('\n✅ Refresh complete!');
console.log(` Commit: ${commitSha}`);
console.log(` Updated: ${updatedCount} files`);
console.log(` Skipped: help, implement (custom Maestro prompts)`);
} catch (error) {
console.error('\n❌ Refresh failed:', error.message);
process.exit(1);
}
}
// Run
refreshOpenSpec();

View File

@@ -0,0 +1,259 @@
/**
* Tests for the OpenSpec IPC handlers
*
* These tests verify the IPC handlers for managing OpenSpec commands:
* - Getting metadata
* - Getting all prompts
* - Getting individual commands
* - Saving user customizations
* - Resetting to defaults
* - Refreshing from GitHub
*/
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { ipcMain } from 'electron';
import { registerOpenSpecHandlers } from '../../../../main/ipc/handlers/openspec';
import * as openspecManager from '../../../../main/openspec-manager';
// Mock electron's ipcMain
vi.mock('electron', () => ({
ipcMain: {
handle: vi.fn(),
removeHandler: vi.fn(),
},
}));
// Mock the openspec-manager module
vi.mock('../../../../main/openspec-manager', () => ({
getOpenSpecMetadata: vi.fn(),
getOpenSpecPrompts: vi.fn(),
getOpenSpecCommandBySlash: vi.fn(),
saveOpenSpecPrompt: vi.fn(),
resetOpenSpecPrompt: vi.fn(),
refreshOpenSpecPrompts: vi.fn(),
}));
// Mock the logger
vi.mock('../../../../main/utils/logger', () => ({
logger: {
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
},
}));
describe('openspec IPC handlers', () => {
let handlers: Map<string, Function>;
beforeEach(() => {
vi.clearAllMocks();
// Capture all registered handlers
handlers = new Map();
vi.mocked(ipcMain.handle).mockImplementation((channel, handler) => {
handlers.set(channel, handler);
});
// Register handlers
registerOpenSpecHandlers();
});
afterEach(() => {
handlers.clear();
});
describe('registration', () => {
it('should register all openspec handlers', () => {
const expectedChannels = [
'openspec:getMetadata',
'openspec:getPrompts',
'openspec:getCommand',
'openspec:savePrompt',
'openspec:resetPrompt',
'openspec:refresh',
];
for (const channel of expectedChannels) {
expect(handlers.has(channel)).toBe(true);
}
});
});
describe('openspec:getMetadata', () => {
it('should return metadata from manager', async () => {
const mockMetadata = {
lastRefreshed: '2025-01-01T00:00:00Z',
commitSha: 'abc1234',
sourceVersion: '0.1.0',
sourceUrl: 'https://github.com/Fission-AI/OpenSpec',
};
vi.mocked(openspecManager.getOpenSpecMetadata).mockResolvedValue(mockMetadata);
const handler = handlers.get('openspec:getMetadata');
const result = await handler!({} as any);
expect(openspecManager.getOpenSpecMetadata).toHaveBeenCalled();
expect(result).toEqual({ success: true, metadata: mockMetadata });
});
it('should handle errors gracefully', async () => {
vi.mocked(openspecManager.getOpenSpecMetadata).mockRejectedValue(new Error('Failed to read'));
const handler = handlers.get('openspec:getMetadata');
const result = await handler!({} as any);
expect(result.success).toBe(false);
expect(result.error).toContain('Failed to read');
});
});
describe('openspec:getPrompts', () => {
it('should return all commands from manager', async () => {
const mockCommands = [
{
id: 'proposal',
command: '/openspec.proposal',
description: 'Create a change proposal',
prompt: '# Proposal',
isCustom: false,
isModified: false,
},
{
id: 'help',
command: '/openspec.help',
description: 'Get help',
prompt: '# Help',
isCustom: true,
isModified: false,
},
];
vi.mocked(openspecManager.getOpenSpecPrompts).mockResolvedValue(mockCommands);
const handler = handlers.get('openspec:getPrompts');
const result = await handler!({} as any);
expect(openspecManager.getOpenSpecPrompts).toHaveBeenCalled();
expect(result).toEqual({ success: true, commands: mockCommands });
});
it('should handle errors gracefully', async () => {
vi.mocked(openspecManager.getOpenSpecPrompts).mockRejectedValue(new Error('Failed'));
const handler = handlers.get('openspec:getPrompts');
const result = await handler!({} as any);
expect(result.success).toBe(false);
});
});
describe('openspec:getCommand', () => {
it('should return command by slash command string', async () => {
const mockCommand = {
id: 'proposal',
command: '/openspec.proposal',
description: 'Create a change proposal',
prompt: '# Proposal',
isCustom: false,
isModified: false,
};
vi.mocked(openspecManager.getOpenSpecCommandBySlash).mockResolvedValue(mockCommand);
const handler = handlers.get('openspec:getCommand');
const result = await handler!({} as any, '/openspec.proposal');
expect(openspecManager.getOpenSpecCommandBySlash).toHaveBeenCalledWith('/openspec.proposal');
expect(result).toEqual({ success: true, command: mockCommand });
});
it('should return null for unknown command', async () => {
vi.mocked(openspecManager.getOpenSpecCommandBySlash).mockResolvedValue(null);
const handler = handlers.get('openspec:getCommand');
const result = await handler!({} as any, '/openspec.unknown');
expect(result).toEqual({ success: true, command: null });
});
});
describe('openspec:savePrompt', () => {
it('should save prompt customization', async () => {
vi.mocked(openspecManager.saveOpenSpecPrompt).mockResolvedValue(undefined);
const handler = handlers.get('openspec:savePrompt');
const result = await handler!({} as any, 'proposal', '# Custom Proposal');
expect(openspecManager.saveOpenSpecPrompt).toHaveBeenCalledWith('proposal', '# Custom Proposal');
expect(result).toEqual({ success: true });
});
it('should handle save errors', async () => {
vi.mocked(openspecManager.saveOpenSpecPrompt).mockRejectedValue(new Error('Write failed'));
const handler = handlers.get('openspec:savePrompt');
const result = await handler!({} as any, 'proposal', '# Custom');
expect(result.success).toBe(false);
expect(result.error).toContain('Write failed');
});
});
describe('openspec:resetPrompt', () => {
it('should reset prompt to default', async () => {
const defaultPrompt = '# Default Proposal';
vi.mocked(openspecManager.resetOpenSpecPrompt).mockResolvedValue(defaultPrompt);
const handler = handlers.get('openspec:resetPrompt');
const result = await handler!({} as any, 'proposal');
expect(openspecManager.resetOpenSpecPrompt).toHaveBeenCalledWith('proposal');
expect(result).toEqual({ success: true, prompt: defaultPrompt });
});
it('should handle unknown command error', async () => {
vi.mocked(openspecManager.resetOpenSpecPrompt).mockRejectedValue(
new Error('Unknown openspec command: nonexistent')
);
const handler = handlers.get('openspec:resetPrompt');
const result = await handler!({} as any, 'nonexistent');
expect(result.success).toBe(false);
expect(result.error).toContain('Unknown openspec command');
});
});
describe('openspec:refresh', () => {
it('should refresh prompts from GitHub', async () => {
const newMetadata = {
lastRefreshed: '2025-06-15T12:00:00Z',
commitSha: 'def5678',
sourceVersion: '0.1.0',
sourceUrl: 'https://github.com/Fission-AI/OpenSpec',
};
vi.mocked(openspecManager.refreshOpenSpecPrompts).mockResolvedValue(newMetadata);
const handler = handlers.get('openspec:refresh');
const result = await handler!({} as any);
expect(openspecManager.refreshOpenSpecPrompts).toHaveBeenCalled();
expect(result).toEqual({ success: true, metadata: newMetadata });
});
it('should handle network errors', async () => {
vi.mocked(openspecManager.refreshOpenSpecPrompts).mockRejectedValue(
new Error('Failed to fetch AGENTS.md: Not Found')
);
const handler = handlers.get('openspec:refresh');
const result = await handler!({} as any);
expect(result.success).toBe(false);
expect(result.error).toContain('Failed to fetch');
});
});
});

View File

@@ -0,0 +1,493 @@
/**
* Tests for the OpenSpec Manager
*
* Tests the core functionality for managing bundled OpenSpec prompts including:
* - Loading bundled prompts from disk
* - User customization persistence
* - Resetting to defaults
* - Parsing AGENTS.md for upstream command extraction
*/
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import fs from 'fs/promises';
import path from 'path';
// Mock electron app module
vi.mock('electron', () => ({
app: {
getPath: vi.fn().mockReturnValue('/mock/userData'),
isPackaged: false,
},
}));
// Mock fs/promises
vi.mock('fs/promises', () => ({
default: {
readFile: vi.fn(),
writeFile: vi.fn(),
mkdir: vi.fn(),
},
}));
// Mock the logger
vi.mock('../../main/utils/logger', () => ({
logger: {
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
},
}));
// Import the module after mocks are set up
import {
getOpenSpecMetadata,
getOpenSpecPrompts,
saveOpenSpecPrompt,
resetOpenSpecPrompt,
getOpenSpecCommand,
getOpenSpecCommandBySlash,
OpenSpecCommand,
OpenSpecMetadata,
} from '../../main/openspec-manager';
describe('openspec-manager', () => {
const mockBundledPrompt = '# Test Prompt\n\nThis is a test prompt.';
const mockMetadata: OpenSpecMetadata = {
lastRefreshed: '2025-01-01T00:00:00Z',
commitSha: 'abc1234',
sourceVersion: '0.1.0',
sourceUrl: 'https://github.com/Fission-AI/OpenSpec',
};
beforeEach(() => {
vi.clearAllMocks();
});
afterEach(() => {
vi.clearAllMocks();
});
describe('getOpenSpecMetadata', () => {
it('should return bundled metadata when no customizations exist', async () => {
// No user customizations file
vi.mocked(fs.readFile).mockImplementation(async (filePath) => {
const pathStr = filePath.toString();
if (pathStr.includes('openspec-customizations.json')) {
throw new Error('ENOENT');
}
if (pathStr.includes('metadata.json')) {
return JSON.stringify(mockMetadata);
}
throw new Error('ENOENT');
});
const metadata = await getOpenSpecMetadata();
expect(metadata).toEqual(mockMetadata);
});
it('should return customized metadata when available', async () => {
const customMetadata: OpenSpecMetadata = {
lastRefreshed: '2025-06-15T12:00:00Z',
commitSha: 'def5678',
sourceVersion: '0.2.0',
sourceUrl: 'https://github.com/Fission-AI/OpenSpec',
};
vi.mocked(fs.readFile).mockImplementation(async (filePath) => {
const pathStr = filePath.toString();
if (pathStr.includes('openspec-customizations.json')) {
return JSON.stringify({
metadata: customMetadata,
prompts: {},
});
}
throw new Error('ENOENT');
});
const metadata = await getOpenSpecMetadata();
expect(metadata).toEqual(customMetadata);
});
it('should return default metadata when no files exist', async () => {
vi.mocked(fs.readFile).mockRejectedValue(new Error('ENOENT'));
const metadata = await getOpenSpecMetadata();
expect(metadata.sourceUrl).toBe('https://github.com/Fission-AI/OpenSpec');
expect(metadata.sourceVersion).toBe('0.1.0');
});
});
describe('getOpenSpecPrompts', () => {
it('should return all bundled commands', async () => {
vi.mocked(fs.readFile).mockImplementation(async (filePath) => {
const pathStr = filePath.toString();
if (pathStr.includes('openspec-customizations.json')) {
throw new Error('ENOENT');
}
if (pathStr.includes('openspec-prompts')) {
throw new Error('ENOENT');
}
if (pathStr.endsWith('.md')) {
return mockBundledPrompt;
}
throw new Error('ENOENT');
});
const commands = await getOpenSpecPrompts();
expect(commands.length).toBeGreaterThan(0);
expect(commands.some((cmd) => cmd.command === '/openspec.help')).toBe(true);
expect(commands.some((cmd) => cmd.command === '/openspec.proposal')).toBe(true);
expect(commands.some((cmd) => cmd.command === '/openspec.apply')).toBe(true);
expect(commands.some((cmd) => cmd.command === '/openspec.archive')).toBe(true);
expect(commands.some((cmd) => cmd.command === '/openspec.implement')).toBe(true);
});
it('should return commands with correct structure', async () => {
vi.mocked(fs.readFile).mockImplementation(async (filePath) => {
const pathStr = filePath.toString();
if (pathStr.includes('openspec-customizations.json')) {
throw new Error('ENOENT');
}
if (pathStr.includes('openspec-prompts')) {
throw new Error('ENOENT');
}
if (pathStr.endsWith('.md')) {
return mockBundledPrompt;
}
throw new Error('ENOENT');
});
const commands = await getOpenSpecPrompts();
for (const cmd of commands) {
expect(cmd).toHaveProperty('id');
expect(cmd).toHaveProperty('command');
expect(cmd).toHaveProperty('description');
expect(cmd).toHaveProperty('prompt');
expect(cmd).toHaveProperty('isCustom');
expect(cmd).toHaveProperty('isModified');
expect(cmd.command.startsWith('/openspec.')).toBe(true);
}
});
it('should mark custom commands correctly', async () => {
vi.mocked(fs.readFile).mockImplementation(async (filePath) => {
const pathStr = filePath.toString();
if (pathStr.includes('openspec-customizations.json')) {
throw new Error('ENOENT');
}
if (pathStr.includes('openspec-prompts')) {
throw new Error('ENOENT');
}
if (pathStr.endsWith('.md')) {
return mockBundledPrompt;
}
throw new Error('ENOENT');
});
const commands = await getOpenSpecPrompts();
const helpCmd = commands.find((cmd) => cmd.id === 'help');
const implementCmd = commands.find((cmd) => cmd.id === 'implement');
const proposalCmd = commands.find((cmd) => cmd.id === 'proposal');
expect(helpCmd?.isCustom).toBe(true);
expect(implementCmd?.isCustom).toBe(true);
expect(proposalCmd?.isCustom).toBe(false);
});
it('should use customized prompt when available', async () => {
const customContent = '# Custom Proposal\n\nThis is my custom prompt.';
vi.mocked(fs.readFile).mockImplementation(async (filePath) => {
const pathStr = filePath.toString();
if (pathStr.includes('openspec-customizations.json')) {
return JSON.stringify({
metadata: mockMetadata,
prompts: {
proposal: {
content: customContent,
isModified: true,
modifiedAt: '2025-06-15T12:00:00Z',
},
},
});
}
if (pathStr.includes('openspec-prompts')) {
throw new Error('ENOENT');
}
if (pathStr.endsWith('.md')) {
return mockBundledPrompt;
}
throw new Error('ENOENT');
});
const commands = await getOpenSpecPrompts();
const proposalCmd = commands.find((cmd) => cmd.id === 'proposal');
expect(proposalCmd?.prompt).toBe(customContent);
expect(proposalCmd?.isModified).toBe(true);
});
});
describe('saveOpenSpecPrompt', () => {
it('should save customization to disk', async () => {
const customContent = '# My Custom Prompt';
vi.mocked(fs.readFile).mockImplementation(async (filePath) => {
const pathStr = filePath.toString();
if (pathStr.includes('openspec-customizations.json')) {
throw new Error('ENOENT');
}
if (pathStr.includes('metadata.json')) {
return JSON.stringify(mockMetadata);
}
throw new Error('ENOENT');
});
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
await saveOpenSpecPrompt('proposal', customContent);
expect(fs.writeFile).toHaveBeenCalled();
const writeCall = vi.mocked(fs.writeFile).mock.calls[0];
const writtenContent = JSON.parse(writeCall[1] as string);
expect(writtenContent.prompts.proposal.content).toBe(customContent);
expect(writtenContent.prompts.proposal.isModified).toBe(true);
expect(writtenContent.prompts.proposal.modifiedAt).toBeDefined();
});
it('should preserve existing customizations', async () => {
const existingCustomizations = {
metadata: mockMetadata,
prompts: {
apply: {
content: '# Existing Apply',
isModified: true,
modifiedAt: '2025-01-01T00:00:00Z',
},
},
};
vi.mocked(fs.readFile).mockImplementation(async (filePath) => {
const pathStr = filePath.toString();
if (pathStr.includes('openspec-customizations.json')) {
return JSON.stringify(existingCustomizations);
}
throw new Error('ENOENT');
});
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
await saveOpenSpecPrompt('proposal', '# New Proposal');
const writeCall = vi.mocked(fs.writeFile).mock.calls[0];
const writtenContent = JSON.parse(writeCall[1] as string);
expect(writtenContent.prompts.apply.content).toBe('# Existing Apply');
expect(writtenContent.prompts.proposal.content).toBe('# New Proposal');
});
});
describe('resetOpenSpecPrompt', () => {
it('should reset prompt to bundled default', async () => {
const customizations = {
metadata: mockMetadata,
prompts: {
proposal: {
content: '# Custom',
isModified: true,
},
},
};
vi.mocked(fs.readFile).mockImplementation(async (filePath) => {
const pathStr = filePath.toString();
if (pathStr.includes('openspec-customizations.json')) {
return JSON.stringify(customizations);
}
if (pathStr.includes('openspec-prompts')) {
throw new Error('ENOENT');
}
if (pathStr.endsWith('.md')) {
return mockBundledPrompt;
}
throw new Error('ENOENT');
});
vi.mocked(fs.writeFile).mockResolvedValue(undefined);
const result = await resetOpenSpecPrompt('proposal');
expect(result).toBe(mockBundledPrompt);
expect(fs.writeFile).toHaveBeenCalled();
const writeCall = vi.mocked(fs.writeFile).mock.calls[0];
const writtenContent = JSON.parse(writeCall[1] as string);
expect(writtenContent.prompts.proposal).toBeUndefined();
});
it('should throw for unknown command', async () => {
vi.mocked(fs.readFile).mockImplementation(async (filePath) => {
const pathStr = filePath.toString();
if (pathStr.includes('openspec-customizations.json')) {
throw new Error('ENOENT');
}
if (pathStr.includes('openspec-prompts')) {
throw new Error('ENOENT');
}
if (pathStr.endsWith('.md')) {
throw new Error('ENOENT');
}
throw new Error('ENOENT');
});
await expect(resetOpenSpecPrompt('nonexistent')).rejects.toThrow('Unknown openspec command');
});
});
describe('getOpenSpecCommand', () => {
it('should return command by ID', async () => {
vi.mocked(fs.readFile).mockImplementation(async (filePath) => {
const pathStr = filePath.toString();
if (pathStr.includes('openspec-customizations.json')) {
throw new Error('ENOENT');
}
if (pathStr.includes('openspec-prompts')) {
throw new Error('ENOENT');
}
if (pathStr.endsWith('.md')) {
return mockBundledPrompt;
}
throw new Error('ENOENT');
});
const command = await getOpenSpecCommand('proposal');
expect(command).not.toBeNull();
expect(command?.id).toBe('proposal');
expect(command?.command).toBe('/openspec.proposal');
});
it('should return null for unknown ID', async () => {
vi.mocked(fs.readFile).mockImplementation(async (filePath) => {
const pathStr = filePath.toString();
if (pathStr.includes('openspec-customizations.json')) {
throw new Error('ENOENT');
}
if (pathStr.includes('openspec-prompts')) {
throw new Error('ENOENT');
}
if (pathStr.endsWith('.md')) {
return mockBundledPrompt;
}
throw new Error('ENOENT');
});
const command = await getOpenSpecCommand('nonexistent');
expect(command).toBeNull();
});
});
describe('getOpenSpecCommandBySlash', () => {
it('should return command by slash command string', async () => {
vi.mocked(fs.readFile).mockImplementation(async (filePath) => {
const pathStr = filePath.toString();
if (pathStr.includes('openspec-customizations.json')) {
throw new Error('ENOENT');
}
if (pathStr.includes('openspec-prompts')) {
throw new Error('ENOENT');
}
if (pathStr.endsWith('.md')) {
return mockBundledPrompt;
}
throw new Error('ENOENT');
});
const command = await getOpenSpecCommandBySlash('/openspec.proposal');
expect(command).not.toBeNull();
expect(command?.id).toBe('proposal');
expect(command?.command).toBe('/openspec.proposal');
});
it('should return null for unknown slash command', async () => {
vi.mocked(fs.readFile).mockImplementation(async (filePath) => {
const pathStr = filePath.toString();
if (pathStr.includes('openspec-customizations.json')) {
throw new Error('ENOENT');
}
if (pathStr.includes('openspec-prompts')) {
throw new Error('ENOENT');
}
if (pathStr.endsWith('.md')) {
return mockBundledPrompt;
}
throw new Error('ENOENT');
});
const command = await getOpenSpecCommandBySlash('/openspec.nonexistent');
expect(command).toBeNull();
});
});
describe('user prompts directory priority', () => {
it('should prefer user prompts directory over bundled for upstream commands', async () => {
const userPromptContent = '# User Updated Proposal';
vi.mocked(fs.readFile).mockImplementation(async (filePath) => {
const pathStr = filePath.toString();
if (pathStr.includes('openspec-customizations.json')) {
throw new Error('ENOENT');
}
// User prompts directory (downloaded updates)
if (pathStr.includes('openspec-prompts') && pathStr.includes('openspec.proposal.md')) {
return userPromptContent;
}
if (pathStr.includes('openspec-prompts')) {
throw new Error('ENOENT');
}
// Bundled prompts (fallback)
if (pathStr.endsWith('.md')) {
return mockBundledPrompt;
}
throw new Error('ENOENT');
});
const commands = await getOpenSpecPrompts();
const proposalCmd = commands.find((cmd) => cmd.id === 'proposal');
expect(proposalCmd?.prompt).toBe(userPromptContent);
});
it('should always use bundled for custom commands (help, implement)', async () => {
vi.mocked(fs.readFile).mockImplementation(async (filePath) => {
const pathStr = filePath.toString();
if (pathStr.includes('openspec-customizations.json')) {
throw new Error('ENOENT');
}
if (pathStr.includes('openspec-prompts')) {
return '# Should not be used for custom commands';
}
if (pathStr.endsWith('.md')) {
return mockBundledPrompt;
}
throw new Error('ENOENT');
});
const commands = await getOpenSpecPrompts();
const helpCmd = commands.find((cmd) => cmd.id === 'help');
const implementCmd = commands.find((cmd) => cmd.id === 'implement');
// Custom commands should use bundled, not user prompts directory
expect(helpCmd?.prompt).toBe(mockBundledPrompt);
expect(implementCmd?.prompt).toBe(mockBundledPrompt);
});
});
});

View File

@@ -0,0 +1,224 @@
/**
* Tests for src/renderer/services/openspec.ts
* OpenSpec service that wraps IPC calls to main process
*/
import { describe, test, expect, vi, beforeEach } from 'vitest';
import {
getOpenSpecCommands,
getOpenSpecMetadata,
getOpenSpecCommand,
} from '../../../renderer/services/openspec';
// Mock the window.maestro.openspec object
const mockOpenspec = {
getPrompts: vi.fn(),
getMetadata: vi.fn(),
getCommand: vi.fn(),
};
// Setup mock before each test
beforeEach(() => {
vi.clearAllMocks();
// Ensure window.maestro.openspec is mocked
(window as any).maestro = {
...(window as any).maestro,
openspec: mockOpenspec,
};
// Mock console.error to prevent noise in test output
vi.spyOn(console, 'error').mockImplementation(() => {});
});
describe('openspec service', () => {
describe('getOpenSpecCommands', () => {
test('returns commands when API succeeds', async () => {
const mockCommands = [
{
id: 'proposal',
command: '/openspec.proposal',
description: 'Create a change proposal',
prompt: '# Proposal',
isCustom: false,
isModified: false,
},
{
id: 'help',
command: '/openspec.help',
description: 'Get help',
prompt: '# Help',
isCustom: true,
isModified: false,
},
];
mockOpenspec.getPrompts.mockResolvedValue({
success: true,
commands: mockCommands,
});
const result = await getOpenSpecCommands();
expect(result).toEqual(mockCommands);
expect(mockOpenspec.getPrompts).toHaveBeenCalled();
});
test('returns empty array when API returns success false', async () => {
mockOpenspec.getPrompts.mockResolvedValue({
success: false,
error: 'Something went wrong',
});
const result = await getOpenSpecCommands();
expect(result).toEqual([]);
});
test('returns empty array when API throws', async () => {
mockOpenspec.getPrompts.mockRejectedValue(new Error('IPC error'));
const result = await getOpenSpecCommands();
expect(result).toEqual([]);
expect(console.error).toHaveBeenCalledWith(
'[OpenSpec] Failed to get commands:',
expect.any(Error)
);
});
test('returns empty array when commands is undefined', async () => {
mockOpenspec.getPrompts.mockResolvedValue({
success: true,
commands: undefined,
});
const result = await getOpenSpecCommands();
expect(result).toEqual([]);
});
});
describe('getOpenSpecMetadata', () => {
test('returns metadata when API succeeds', async () => {
const mockMetadata = {
lastRefreshed: '2025-01-01T00:00:00Z',
commitSha: 'abc1234',
sourceVersion: '0.1.0',
sourceUrl: 'https://github.com/Fission-AI/OpenSpec',
};
mockOpenspec.getMetadata.mockResolvedValue({
success: true,
metadata: mockMetadata,
});
const result = await getOpenSpecMetadata();
expect(result).toEqual(mockMetadata);
expect(mockOpenspec.getMetadata).toHaveBeenCalled();
});
test('returns null when API returns success false', async () => {
mockOpenspec.getMetadata.mockResolvedValue({
success: false,
error: 'Something went wrong',
});
const result = await getOpenSpecMetadata();
expect(result).toBeNull();
});
test('returns null when API throws', async () => {
mockOpenspec.getMetadata.mockRejectedValue(new Error('IPC error'));
const result = await getOpenSpecMetadata();
expect(result).toBeNull();
expect(console.error).toHaveBeenCalledWith(
'[OpenSpec] Failed to get metadata:',
expect.any(Error)
);
});
test('returns null when metadata is undefined', async () => {
mockOpenspec.getMetadata.mockResolvedValue({
success: true,
metadata: undefined,
});
const result = await getOpenSpecMetadata();
expect(result).toBeNull();
});
});
describe('getOpenSpecCommand', () => {
test('returns command when API succeeds', async () => {
const mockCommand = {
id: 'proposal',
command: '/openspec.proposal',
description: 'Create a change proposal',
prompt: '# Proposal',
isCustom: false,
isModified: false,
};
mockOpenspec.getCommand.mockResolvedValue({
success: true,
command: mockCommand,
});
const result = await getOpenSpecCommand('/openspec.proposal');
expect(result).toEqual(mockCommand);
expect(mockOpenspec.getCommand).toHaveBeenCalledWith('/openspec.proposal');
});
test('returns null when command not found', async () => {
mockOpenspec.getCommand.mockResolvedValue({
success: true,
command: null,
});
const result = await getOpenSpecCommand('/openspec.nonexistent');
expect(result).toBeNull();
});
test('returns null when API returns success false', async () => {
mockOpenspec.getCommand.mockResolvedValue({
success: false,
error: 'Something went wrong',
});
const result = await getOpenSpecCommand('/openspec.proposal');
expect(result).toBeNull();
});
test('returns null when API throws', async () => {
mockOpenspec.getCommand.mockRejectedValue(new Error('IPC error'));
const result = await getOpenSpecCommand('/openspec.proposal');
expect(result).toBeNull();
expect(console.error).toHaveBeenCalledWith(
'[OpenSpec] Failed to get command:',
expect.any(Error)
);
});
test('returns null when command is undefined', async () => {
mockOpenspec.getCommand.mockResolvedValue({
success: true,
command: undefined,
});
const result = await getOpenSpecCommand('/openspec.proposal');
expect(result).toBeNull();
});
});
});

View File

@@ -22,6 +22,7 @@ import { registerAgentSessionsHandlers, AgentSessionsHandlerDependencies } from
import { registerGroupChatHandlers, GroupChatHandlerDependencies } from './groupChat';
import { registerDebugHandlers, DebugHandlerDependencies } from './debug';
import { registerSpeckitHandlers } from './speckit';
import { registerOpenSpecHandlers } from './openspec';
import { registerContextHandlers, ContextHandlerDependencies, cleanupAllGroomingSessions, getActiveGroomingSessionCount } from './context';
import { AgentDetector } from '../../agent-detector';
import { ProcessManager } from '../../process-manager';
@@ -45,6 +46,7 @@ export { registerAgentSessionsHandlers };
export { registerGroupChatHandlers };
export { registerDebugHandlers };
export { registerSpeckitHandlers };
export { registerOpenSpecHandlers };
export { registerContextHandlers, cleanupAllGroomingSessions, getActiveGroomingSessionCount };
export type { AgentsHandlerDependencies };
export type { ProcessHandlerDependencies };
@@ -154,6 +156,8 @@ export function registerAllHandlers(deps: HandlerDependencies): void {
});
// Register spec-kit handlers (no dependencies needed)
registerSpeckitHandlers();
// Register OpenSpec handlers (no dependencies needed)
registerOpenSpecHandlers();
registerContextHandlers({
getMainWindow: deps.getMainWindow,
getProcessManager: deps.getProcessManager,

View File

@@ -0,0 +1,100 @@
/**
* OpenSpec IPC Handlers
*
* Provides IPC handlers for managing OpenSpec commands:
* - Get metadata (version, last refresh date)
* - Get all commands with prompts
* - Save user edits to prompts
* - Reset prompts to bundled defaults
* - Refresh prompts from GitHub
*/
import { ipcMain } from 'electron';
import { logger } from '../../utils/logger';
import { createIpcHandler, CreateHandlerOptions } from '../../utils/ipcHandler';
import {
getOpenSpecMetadata,
getOpenSpecPrompts,
saveOpenSpecPrompt,
resetOpenSpecPrompt,
refreshOpenSpecPrompts,
getOpenSpecCommandBySlash,
OpenSpecCommand,
OpenSpecMetadata,
} from '../../openspec-manager';
const LOG_CONTEXT = '[OpenSpec]';
// Helper to create handler options with consistent context
const handlerOpts = (operation: string, logSuccess = true): CreateHandlerOptions => ({
context: LOG_CONTEXT,
operation,
logSuccess,
});
/**
* Register all OpenSpec IPC handlers.
*/
export function registerOpenSpecHandlers(): void {
// Get metadata (version info, last refresh date)
ipcMain.handle(
'openspec:getMetadata',
createIpcHandler(handlerOpts('getMetadata', false), async () => {
const metadata = await getOpenSpecMetadata();
return { metadata };
})
);
// Get all openspec prompts
ipcMain.handle(
'openspec:getPrompts',
createIpcHandler(handlerOpts('getPrompts', false), async () => {
const commands = await getOpenSpecPrompts();
return { commands };
})
);
// Get a single command by slash command string
ipcMain.handle(
'openspec:getCommand',
createIpcHandler(handlerOpts('getCommand', false), async (slashCommand: string) => {
const command = await getOpenSpecCommandBySlash(slashCommand);
return { command };
})
);
// Save user's edit to a prompt
ipcMain.handle(
'openspec:savePrompt',
createIpcHandler(handlerOpts('savePrompt'), async (id: string, content: string) => {
await saveOpenSpecPrompt(id, content);
logger.info(`Saved custom prompt for openspec.${id}`, LOG_CONTEXT);
return {};
})
);
// Reset a prompt to bundled default
ipcMain.handle(
'openspec:resetPrompt',
createIpcHandler(handlerOpts('resetPrompt'), async (id: string) => {
const prompt = await resetOpenSpecPrompt(id);
logger.info(`Reset openspec.${id} to bundled default`, LOG_CONTEXT);
return { prompt };
})
);
// Refresh prompts from GitHub
ipcMain.handle(
'openspec:refresh',
createIpcHandler(handlerOpts('refresh'), async () => {
const metadata = await refreshOpenSpecPrompts();
logger.info(`Refreshed OpenSpec prompts to commit ${metadata.commitSha}`, LOG_CONTEXT);
return { metadata };
})
);
logger.debug(`${LOG_CONTEXT} OpenSpec IPC handlers registered`);
}
// Export types for preload
export type { OpenSpecCommand, OpenSpecMetadata };

View File

@@ -0,0 +1,435 @@
/**
* OpenSpec Manager
*
* Manages bundled OpenSpec prompts with support for:
* - Loading bundled prompts from src/prompts/openspec/
* - Fetching updates from GitHub's OpenSpec repository
* - User customization with ability to reset to defaults
*
* OpenSpec provides a structured change management workflow:
* - Proposal → Draft change specifications before coding
* - Apply → Implement tasks referencing agreed specs
* - Archive → Move completed work to archive after deployment
*
* Source: https://github.com/Fission-AI/OpenSpec
*/
import fs from 'fs/promises';
import path from 'path';
import { app } from 'electron';
import { logger } from './utils/logger';
const LOG_CONTEXT = '[OpenSpec]';
// All bundled OpenSpec commands with their metadata
const OPENSPEC_COMMANDS = [
{ id: 'help', command: '/openspec.help', description: 'Learn how to use OpenSpec with Maestro', isCustom: true },
{ id: 'proposal', command: '/openspec.proposal', description: 'Create a change proposal with specs, tasks, and optional design docs', isCustom: false },
{ id: 'apply', command: '/openspec.apply', description: 'Implement an approved change proposal by executing tasks', isCustom: false },
{ id: 'archive', command: '/openspec.archive', description: 'Archive a completed change after deployment', isCustom: false },
{ id: 'implement', command: '/openspec.implement', description: 'Convert OpenSpec tasks to Maestro Auto Run documents', isCustom: true },
] as const;
export interface OpenSpecCommand {
id: string;
command: string;
description: string;
prompt: string;
isCustom: boolean;
isModified: boolean;
}
export interface OpenSpecMetadata {
lastRefreshed: string;
commitSha: string;
sourceVersion: string;
sourceUrl: string;
}
interface StoredPrompt {
content: string;
isModified: boolean;
modifiedAt?: string;
}
interface StoredData {
metadata: OpenSpecMetadata;
prompts: Record<string, StoredPrompt>;
}
/**
* Get path to user's OpenSpec customizations file
*/
function getUserDataPath(): string {
return path.join(app.getPath('userData'), 'openspec-customizations.json');
}
/**
* Load user customizations from disk
*/
async function loadUserCustomizations(): Promise<StoredData | null> {
try {
const content = await fs.readFile(getUserDataPath(), 'utf-8');
return JSON.parse(content);
} catch {
return null;
}
}
/**
* Save user customizations to disk
*/
async function saveUserCustomizations(data: StoredData): Promise<void> {
await fs.writeFile(getUserDataPath(), JSON.stringify(data, null, 2), 'utf-8');
}
/**
* Get the path to bundled prompts directory
* In development, this is src/prompts/openspec
* In production, this is in the app resources
*/
function getBundledPromptsPath(): string {
if (app.isPackaged) {
return path.join(process.resourcesPath, 'prompts', 'openspec');
}
// In development, use the source directory
return path.join(__dirname, '..', '..', 'src', 'prompts', 'openspec');
}
/**
* Get the user data directory for storing downloaded OpenSpec prompts
*/
function getUserPromptsPath(): string {
return path.join(app.getPath('userData'), 'openspec-prompts');
}
/**
* Get bundled prompts by reading from disk
* Checks user prompts directory first (for downloaded updates), then falls back to bundled
*/
async function getBundledPrompts(): Promise<Record<string, { prompt: string; description: string; isCustom: boolean }>> {
const bundledPromptsDir = getBundledPromptsPath();
const userPromptsDir = getUserPromptsPath();
const result: Record<string, { prompt: string; description: string; isCustom: boolean }> = {};
for (const cmd of OPENSPEC_COMMANDS) {
// For custom commands, always use bundled
if (cmd.isCustom) {
try {
const promptPath = path.join(bundledPromptsDir, `openspec.${cmd.id}.md`);
const prompt = await fs.readFile(promptPath, 'utf-8');
result[cmd.id] = {
prompt,
description: cmd.description,
isCustom: cmd.isCustom,
};
} catch (error) {
logger.warn(`Failed to load bundled prompt for ${cmd.id}: ${error}`, LOG_CONTEXT);
result[cmd.id] = {
prompt: `# ${cmd.id}\n\nPrompt not available.`,
description: cmd.description,
isCustom: cmd.isCustom,
};
}
continue;
}
// For upstream commands, check user prompts directory first (downloaded updates)
try {
const userPromptPath = path.join(userPromptsDir, `openspec.${cmd.id}.md`);
const prompt = await fs.readFile(userPromptPath, 'utf-8');
result[cmd.id] = {
prompt,
description: cmd.description,
isCustom: cmd.isCustom,
};
continue;
} catch {
// User prompt not found, try bundled
}
// Fall back to bundled prompts
try {
const promptPath = path.join(bundledPromptsDir, `openspec.${cmd.id}.md`);
const prompt = await fs.readFile(promptPath, 'utf-8');
result[cmd.id] = {
prompt,
description: cmd.description,
isCustom: cmd.isCustom,
};
} catch (error) {
logger.warn(`Failed to load bundled prompt for ${cmd.id}: ${error}`, LOG_CONTEXT);
result[cmd.id] = {
prompt: `# ${cmd.id}\n\nPrompt not available.`,
description: cmd.description,
isCustom: cmd.isCustom,
};
}
}
return result;
}
/**
* Get bundled metadata by reading from disk
* Checks user prompts directory first (for downloaded updates), then falls back to bundled
*/
async function getBundledMetadata(): Promise<OpenSpecMetadata> {
const bundledPromptsDir = getBundledPromptsPath();
const userPromptsDir = getUserPromptsPath();
// Check user prompts directory first (downloaded updates)
try {
const userMetadataPath = path.join(userPromptsDir, 'metadata.json');
const content = await fs.readFile(userMetadataPath, 'utf-8');
return JSON.parse(content);
} catch {
// User metadata not found, try bundled
}
// Fall back to bundled metadata
try {
const metadataPath = path.join(bundledPromptsDir, 'metadata.json');
const content = await fs.readFile(metadataPath, 'utf-8');
return JSON.parse(content);
} catch {
// Return default metadata if file doesn't exist
return {
lastRefreshed: '2025-01-01T00:00:00Z',
commitSha: 'main',
sourceVersion: '0.1.0',
sourceUrl: 'https://github.com/Fission-AI/OpenSpec',
};
}
}
/**
* Get current OpenSpec metadata
*/
export async function getOpenSpecMetadata(): Promise<OpenSpecMetadata> {
const customizations = await loadUserCustomizations();
if (customizations?.metadata) {
return customizations.metadata;
}
return getBundledMetadata();
}
/**
* Get all OpenSpec prompts (bundled defaults merged with user customizations)
*/
export async function getOpenSpecPrompts(): Promise<OpenSpecCommand[]> {
const bundled = await getBundledPrompts();
const customizations = await loadUserCustomizations();
const commands: OpenSpecCommand[] = [];
for (const [id, data] of Object.entries(bundled)) {
const customPrompt = customizations?.prompts?.[id];
const isModified = customPrompt?.isModified ?? false;
const prompt = isModified && customPrompt ? customPrompt.content : data.prompt;
commands.push({
id,
command: `/openspec.${id}`,
description: data.description,
prompt,
isCustom: data.isCustom,
isModified,
});
}
return commands;
}
/**
* Save user's edit to an OpenSpec prompt
*/
export async function saveOpenSpecPrompt(id: string, content: string): Promise<void> {
const customizations = await loadUserCustomizations() ?? {
metadata: await getBundledMetadata(),
prompts: {},
};
customizations.prompts[id] = {
content,
isModified: true,
modifiedAt: new Date().toISOString(),
};
await saveUserCustomizations(customizations);
logger.info(`Saved customization for openspec.${id}`, LOG_CONTEXT);
}
/**
* Reset an OpenSpec prompt to its bundled default
*/
export async function resetOpenSpecPrompt(id: string): Promise<string> {
const bundled = await getBundledPrompts();
const defaultPrompt = bundled[id];
if (!defaultPrompt) {
throw new Error(`Unknown openspec command: ${id}`);
}
const customizations = await loadUserCustomizations();
if (customizations?.prompts?.[id]) {
delete customizations.prompts[id];
await saveUserCustomizations(customizations);
logger.info(`Reset openspec.${id} to bundled default`, LOG_CONTEXT);
}
return defaultPrompt.prompt;
}
/**
* Upstream commands to fetch (we skip custom commands like 'help' and 'implement')
*/
const UPSTREAM_COMMANDS = ['proposal', 'apply', 'archive'];
/**
* Section markers in AGENTS.md for extracting workflow prompts
*/
const SECTION_MARKERS: Record<string, { start: RegExp; end: RegExp }> = {
proposal: {
start: /^#+\s*Stage\s*1[:\s]+Creating\s+Changes/i,
end: /^#+\s*Stage\s*2[:\s]+/i,
},
apply: {
start: /^#+\s*Stage\s*2[:\s]+Implementing\s+Changes/i,
end: /^#+\s*Stage\s*3[:\s]+/i,
},
archive: {
start: /^#+\s*Stage\s*3[:\s]+Archiving\s+Changes/i,
end: /^$/, // End of file or next major section
},
};
/**
* Parse AGENTS.md and extract workflow sections as prompts
*/
function parseAgentsMd(content: string): Record<string, string> {
const result: Record<string, string> = {};
const lines = content.split('\n');
for (const [sectionId, markers] of Object.entries(SECTION_MARKERS)) {
let inSection = false;
const sectionLines: string[] = [];
for (const line of lines) {
if (!inSection && markers.start.test(line)) {
inSection = true;
sectionLines.push(line);
continue;
}
if (inSection) {
// Check if we've hit the end marker (next stage or end of content)
if (markers.end.test(line) && line.trim() !== '') {
// Don't include the end marker line, it belongs to the next section
break;
}
sectionLines.push(line);
}
}
if (sectionLines.length > 0) {
result[sectionId] = sectionLines.join('\n').trim();
}
}
return result;
}
/**
* Fetch latest prompts from GitHub OpenSpec repository
* Updates all upstream commands by parsing AGENTS.md
*/
export async function refreshOpenSpecPrompts(): Promise<OpenSpecMetadata> {
logger.info('Refreshing OpenSpec prompts from GitHub...', LOG_CONTEXT);
// Fetch AGENTS.md from the repository
const agentsMdUrl = 'https://raw.githubusercontent.com/Fission-AI/OpenSpec/main/openspec/AGENTS.md';
const agentsResponse = await fetch(agentsMdUrl);
if (!agentsResponse.ok) {
throw new Error(`Failed to fetch AGENTS.md: ${agentsResponse.statusText}`);
}
const agentsMdContent = await agentsResponse.text();
logger.info('Downloaded AGENTS.md', LOG_CONTEXT);
// Parse the AGENTS.md content to extract sections
const extractedPrompts = parseAgentsMd(agentsMdContent);
logger.info(`Extracted ${Object.keys(extractedPrompts).length} sections from AGENTS.md`, LOG_CONTEXT);
// Get the latest commit SHA for version tracking
let commitSha = 'main';
try {
const commitsResponse = await fetch('https://api.github.com/repos/Fission-AI/OpenSpec/commits/main', {
headers: { 'User-Agent': 'Maestro-OpenSpec-Refresher' },
});
if (commitsResponse.ok) {
const commitInfo = await commitsResponse.json() as { sha: string };
commitSha = commitInfo.sha.substring(0, 7);
}
} catch {
logger.warn('Could not fetch commit SHA, using "main"', LOG_CONTEXT);
}
// Create user prompts directory
const userPromptsDir = getUserPromptsPath();
await fs.mkdir(userPromptsDir, { recursive: true });
// Save extracted prompts
for (const cmdId of UPSTREAM_COMMANDS) {
const promptContent = extractedPrompts[cmdId];
if (promptContent) {
const destPath = path.join(userPromptsDir, `openspec.${cmdId}.md`);
await fs.writeFile(destPath, promptContent, 'utf8');
logger.info(`Updated: openspec.${cmdId}.md`, LOG_CONTEXT);
} else {
logger.warn(`Could not extract ${cmdId} section from AGENTS.md`, LOG_CONTEXT);
}
}
// Update metadata with new version info
const newMetadata: OpenSpecMetadata = {
lastRefreshed: new Date().toISOString(),
commitSha,
sourceVersion: '0.1.0',
sourceUrl: 'https://github.com/Fission-AI/OpenSpec',
};
// Save metadata to user prompts directory
await fs.writeFile(
path.join(userPromptsDir, 'metadata.json'),
JSON.stringify(newMetadata, null, 2),
'utf8'
);
// Also save to customizations file for compatibility
const customizations = await loadUserCustomizations() ?? {
metadata: newMetadata,
prompts: {},
};
customizations.metadata = newMetadata;
await saveUserCustomizations(customizations);
logger.info(`Refreshed OpenSpec prompts (commit: ${commitSha})`, LOG_CONTEXT);
return newMetadata;
}
/**
* Get a single OpenSpec command by ID
*/
export async function getOpenSpecCommand(id: string): Promise<OpenSpecCommand | null> {
const commands = await getOpenSpecPrompts();
return commands.find((cmd) => cmd.id === id) ?? null;
}
/**
* Get an OpenSpec command by its slash command string (e.g., "/openspec.proposal")
*/
export async function getOpenSpecCommandBySlash(slashCommand: string): Promise<OpenSpecCommand | null> {
const commands = await getOpenSpecPrompts();
return commands.find((cmd) => cmd.command === slashCommand) ?? null;
}

View File

@@ -992,6 +992,75 @@ contextBridge.exposeInMainWorld('maestro', {
}>,
},
// OpenSpec API (bundled OpenSpec slash commands)
openspec: {
// Get metadata (version, last refresh date)
getMetadata: () =>
ipcRenderer.invoke('openspec:getMetadata') as Promise<{
success: boolean;
metadata?: {
lastRefreshed: string;
commitSha: string;
sourceVersion: string;
sourceUrl: string;
};
error?: string;
}>,
// Get all openspec prompts
getPrompts: () =>
ipcRenderer.invoke('openspec:getPrompts') as Promise<{
success: boolean;
commands?: Array<{
id: string;
command: string;
description: string;
prompt: string;
isCustom: boolean;
isModified: boolean;
}>;
error?: string;
}>,
// Get a single command by slash command string
getCommand: (slashCommand: string) =>
ipcRenderer.invoke('openspec:getCommand', slashCommand) as Promise<{
success: boolean;
command?: {
id: string;
command: string;
description: string;
prompt: string;
isCustom: boolean;
isModified: boolean;
} | null;
error?: string;
}>,
// Save user's edit to a prompt
savePrompt: (id: string, content: string) =>
ipcRenderer.invoke('openspec:savePrompt', id, content) as Promise<{
success: boolean;
error?: string;
}>,
// Reset a prompt to bundled default
resetPrompt: (id: string) =>
ipcRenderer.invoke('openspec:resetPrompt', id) as Promise<{
success: boolean;
prompt?: string;
error?: string;
}>,
// Refresh prompts from GitHub
refresh: () =>
ipcRenderer.invoke('openspec:refresh') as Promise<{
success: boolean;
metadata?: {
lastRefreshed: string;
commitSha: string;
sourceVersion: string;
sourceUrl: string;
};
error?: string;
}>,
},
// Notification API
notification: {
show: (title: string, body: string) =>
@@ -2373,6 +2442,61 @@ export interface MaestroAPI {
error?: string;
}>;
};
openspec: {
getMetadata: () => Promise<{
success: boolean;
metadata?: {
lastRefreshed: string;
commitSha: string;
sourceVersion: string;
sourceUrl: string;
};
error?: string;
}>;
getPrompts: () => Promise<{
success: boolean;
commands?: Array<{
id: string;
command: string;
description: string;
prompt: string;
isCustom: boolean;
isModified: boolean;
}>;
error?: string;
}>;
getCommand: (slashCommand: string) => Promise<{
success: boolean;
command?: {
id: string;
command: string;
description: string;
prompt: string;
isCustom: boolean;
isModified: boolean;
} | null;
error?: string;
}>;
savePrompt: (id: string, content: string) => Promise<{
success: boolean;
error?: string;
}>;
resetPrompt: (id: string) => Promise<{
success: boolean;
prompt?: string;
error?: string;
}>;
refresh: () => Promise<{
success: boolean;
metadata?: {
lastRefreshed: string;
commitSha: string;
sourceVersion: string;
sourceUrl: string;
};
error?: string;
}>;
};
}
declare global {

View File

@@ -0,0 +1,117 @@
/**
* OpenSpec prompts module
*
* Bundled prompts for the OpenSpec workflow from Fission-AI with our custom Maestro integration.
* These prompts are imported at build time using Vite's ?raw suffix.
*
* OpenSpec provides a structured change management workflow:
* - Proposal → Draft change specifications before coding
* - Apply → Implement tasks referencing agreed specs
* - Archive → Move completed work to archive after deployment
*
* Source: https://github.com/Fission-AI/OpenSpec
* Version: 0.1.0
*/
// Bundled OpenSpec prompts (extracted from upstream AGENTS.md)
import proposalPrompt from './openspec.proposal.md?raw';
import applyPrompt from './openspec.apply.md?raw';
import archivePrompt from './openspec.archive.md?raw';
// Custom Maestro prompts
import helpPrompt from './openspec.help.md?raw';
import implementPrompt from './openspec.implement.md?raw';
// Metadata
import metadataJson from './metadata.json';
export interface OpenSpecCommandDefinition {
id: string;
command: string;
description: string;
prompt: string;
isCustom: boolean;
}
export interface OpenSpecMetadata {
lastRefreshed: string;
commitSha: string;
sourceVersion: string;
sourceUrl: string;
}
/**
* All bundled OpenSpec commands
*/
export const openspecCommands: OpenSpecCommandDefinition[] = [
{
id: 'help',
command: '/openspec.help',
description: 'Learn how to use OpenSpec with Maestro',
prompt: helpPrompt,
isCustom: true,
},
{
id: 'proposal',
command: '/openspec.proposal',
description: 'Create a change proposal with specs, tasks, and optional design docs',
prompt: proposalPrompt,
isCustom: false,
},
{
id: 'apply',
command: '/openspec.apply',
description: 'Implement an approved change proposal by executing tasks',
prompt: applyPrompt,
isCustom: false,
},
{
id: 'archive',
command: '/openspec.archive',
description: 'Archive a completed change after deployment',
prompt: archivePrompt,
isCustom: false,
},
{
id: 'implement',
command: '/openspec.implement',
description: 'Convert OpenSpec tasks to Maestro Auto Run documents',
prompt: implementPrompt,
isCustom: true,
},
];
/**
* Get an OpenSpec command by ID
*/
export function getOpenSpecCommand(id: string): OpenSpecCommandDefinition | undefined {
return openspecCommands.find((cmd) => cmd.id === id);
}
/**
* Get an OpenSpec command by slash command string
*/
export function getOpenSpecCommandBySlash(command: string): OpenSpecCommandDefinition | undefined {
return openspecCommands.find((cmd) => cmd.command === command);
}
/**
* Get the metadata for bundled OpenSpec prompts
*/
export function getOpenSpecMetadata(): OpenSpecMetadata {
return {
lastRefreshed: metadataJson.lastRefreshed,
commitSha: metadataJson.commitSha,
sourceVersion: metadataJson.sourceVersion,
sourceUrl: metadataJson.sourceUrl,
};
}
// Export individual prompts for direct access
export {
helpPrompt,
proposalPrompt,
applyPrompt,
archivePrompt,
implementPrompt,
};

View File

@@ -0,0 +1,6 @@
{
"lastRefreshed": "2025-12-27T17:46:57.133Z",
"commitSha": "8dcd170",
"sourceVersion": "0.1.0",
"sourceUrl": "https://github.com/Fission-AI/OpenSpec"
}

View File

@@ -0,0 +1,9 @@
### Stage 2: Implementing Changes
Track these steps as TODOs and complete them one by one.
1. **Read proposal.md** - Understand what's being built
2. **Read design.md** (if exists) - Review technical decisions
3. **Read tasks.md** - Get implementation checklist
4. **Implement tasks sequentially** - Complete in order
5. **Confirm completion** - Ensure every item in `tasks.md` is finished before updating statuses
6. **Update checklist** - After all work is done, set every task to `- [x]` so the list reflects reality
7. **Approval gate** - Do not start implementation until the proposal is reviewed and approved

View File

@@ -0,0 +1,6 @@
### Stage 3: Archiving Changes
After deployment, create separate PR to:
- Move `changes/[name]/``changes/archive/YYYY-MM-DD-[name]/`
- Update `specs/` if capabilities changed
- Use `openspec archive <change-id> --skip-specs --yes` for tooling-only changes (always pass the change ID explicitly)
- Run `openspec validate --strict` to confirm the archived change passes checks

View File

@@ -0,0 +1,147 @@
# OpenSpec Help
You are explaining how to use **OpenSpec** within Maestro. OpenSpec is a spec-driven development tool from [Fission-AI/OpenSpec](https://github.com/Fission-AI/OpenSpec) that provides a structured workflow for managing code changes through specifications.
## What is OpenSpec?
OpenSpec implements a three-stage workflow for managing code changes:
1. **Proposal** - Draft change specifications before coding begins
2. **Apply** - Implement tasks referencing agreed specs
3. **Archive** - Move completed work to archive after deployment
Unlike spec-kit (which focuses on creating feature specifications), OpenSpec specializes in **change management** - tracking modifications to an existing system through detailed spec deltas.
## Key Differences from Spec-Kit
| Aspect | Spec-Kit | OpenSpec |
|--------|----------|----------|
| Focus | Feature specification | Change management |
| Output | Feature specs, plans, tasks | Proposals, spec deltas |
| Workflow | Constitution → Specify → Plan | Proposal → Apply → Archive |
| Artifact | `specs/[feature]/` | `openspec/changes/[id]/` |
| When to use | New features | Modifying existing features |
**Use spec-kit for**: New features, greenfield development, establishing project foundations
**Use OpenSpec for**: Feature modifications, breaking changes, refactoring, migrations
## Directory Structure
OpenSpec uses this directory layout:
```
openspec/
├── project.md # Project conventions and context
├── specs/ # Deployed specifications (truth)
│ └── [capability]/
│ └── spec.md
└── changes/ # Proposed modifications (in progress)
├── [change-id]/
│ ├── proposal.md # What and why
│ ├── tasks.md # Implementation checklist
│ ├── design.md # Optional technical decisions
│ └── specs/ # Spec deltas
│ └── [capability]/
│ └── spec.md
└── archive/ # Completed changes
└── YYYY-MM-DD-[change-id]/
```
## Core Commands
### `/openspec.proposal` - Create Change Proposal
Start here when modifying existing functionality. This command helps you:
- Review existing specs and active changes
- Choose a unique change-id (kebab-case, verb-led like `add-`, `update-`, `remove-`)
- Scaffold `proposal.md`, `tasks.md`, and spec deltas
- Validate your proposal before sharing
### `/openspec.apply` - Implement Changes
Use after your proposal is approved. This command guides you through:
- Reading the proposal and design documents
- Following the tasks checklist sequentially
- Marking tasks complete as you work
- Ensuring all items are finished before deployment
### `/openspec.archive` - Archive Completed Changes
Use after deployment to finalize the change:
- Move change directory to archive with date prefix
- Update main specs if capabilities changed
- Validate the archived change passes all checks
### `/openspec.implement` - Execute with Maestro Auto Run
**Maestro-specific command.** Converts your OpenSpec tasks into Auto Run documents:
- Read proposal and tasks from a specified change
- Convert to Auto Run document format with checkboxes
- Support worktree mode for parallel execution
- Group related tasks into phases
## Spec Delta Format
When modifying existing specs, OpenSpec uses operation headers:
```markdown
## ADDED Requirements
New standalone capabilities
## MODIFIED Requirements
Changed behavior of existing requirements
## REMOVED Requirements
Deprecated features (include Reason and Migration)
## RENAMED Requirements
Name-only changes (no behavior change)
```
Each requirement needs at least one scenario:
```markdown
#### Scenario: User login success
- **WHEN** valid credentials provided
- **THEN** return JWT token
```
## Validation Commands
Always validate before sharing your proposal:
```bash
openspec validate <change-id> --strict # Comprehensive validation
openspec list # View active changes
openspec list --specs # List existing specs
openspec show <change-id> # Display change details
```
## Integration with Maestro Auto Run
OpenSpec works seamlessly with Maestro's Auto Run feature:
1. **Create proposal** with `/openspec.proposal`
2. **Get approval** from stakeholders
3. **Use `/openspec.implement`** to generate Auto Run documents
4. Documents are saved to `Auto Run Docs/` in your project
5. Each task becomes a checkbox item that Auto Run executes
6. Complete tasks are marked with implementation notes
7. **Archive** with `/openspec.archive` after deployment
## Tips for Best Results
- **Always review `project.md`** - Understand project conventions first
- **Check existing changes** - Run `openspec list` to avoid conflicts
- **Use verb-led IDs** - `add-auth`, `update-api`, `remove-legacy`
- **Include scenarios** - Every requirement needs at least one test scenario
- **Validate early** - Run validation before sharing proposals
- **Respect the approval gate** - Don't implement until proposal is approved
- **Archive promptly** - Clean up after deployment to keep changes directory focused
## Learn More
- [OpenSpec Repository](https://github.com/Fission-AI/OpenSpec) - Official documentation
- OpenSpec prompts update automatically when you click "Check for Updates" in Maestro settings
- Custom modifications to prompts are preserved across updates
---
*This help command is a Maestro-specific addition to the OpenSpec workflow.*

View File

@@ -0,0 +1,122 @@
---
description: Convert OpenSpec tasks to Maestro Auto Run documents for automated implementation.
---
You are an expert at converting OpenSpec change proposals into actionable Maestro Auto Run documents.
## User Input
```text
$ARGUMENTS
```
The user input may contain:
- A change ID (e.g., `001-add-auth`, `feature-search`)
- A path to an OpenSpec change directory
- Empty (you should scan for changes in `openspec/changes/`)
## Your Task
1. **Locate the OpenSpec change** in `openspec/changes/<change-id>/`
2. **Read the `tasks.md`** file (and optionally `proposal.md` for context)
3. **Generate Auto Run documents** using the format below
4. **Save to `Auto Run Docs/`** folder
## Critical Requirements
Each Auto Run document MUST:
1. **Be Completely Self-Contained**: Each phase must be executable without ANY user input during execution. The AI should be able to start and complete each phase entirely on its own.
2. **Deliver Working Progress**: By the end of each phase, there should be something tangible that works - testable code, runnable features, or verifiable changes.
3. **Reference OpenSpec Context**: Include links to the proposal and relevant spec files so the executing AI understands the full context.
4. **Preserve Task IDs**: Keep the original task identifiers (T001, T002, etc.) from OpenSpec for traceability.
## Document Format
Each Auto Run document MUST follow this exact format:
```markdown
# Phase XX: [Brief Title]
[One paragraph describing what this phase accomplishes and why it matters]
## OpenSpec Context
- **Change ID:** <change-id>
- **Proposal:** openspec/changes/<change-id>/proposal.md
- **Design:** openspec/changes/<change-id>/design.md (if exists)
## Tasks
- [ ] T001 First specific task to complete
- [ ] T002 Second specific task to complete
- [ ] Continue with more tasks...
## Completion
- [ ] Verify all changes work as expected
- [ ] Run `openspec validate <change-id>` (if available)
```
## Task Writing Guidelines
Each task should be:
- **Specific**: Not "set up the feature" but "Create UserAuthService class with login/logout methods"
- **Actionable**: Clear what needs to be done
- **Verifiable**: You can tell when it's complete
- **Autonomous**: Can be done without asking the user questions
Preserve any markers from the original tasks.md:
- `[P]` = Parallelizable (can run with other `[P]` tasks)
- Task IDs (T001, T002, etc.) for traceability
## Phase Guidelines
- **Phase 1**: Foundation + Setup (dependencies, configuration, scaffolding)
- **Phase 2-N**: Feature implementation by logical grouping
- Each phase should build on the previous
- Keep phases focused (5-15 tasks typically)
- Group related tasks that share context
## Output Format
Create each document as a file in the `Auto Run Docs/` folder with this naming pattern:
```
Auto Run Docs/OpenSpec-<change-id>-Phase-01-[Description].md
Auto Run Docs/OpenSpec-<change-id>-Phase-02-[Description].md
```
## Execution Steps
1. **Find the OpenSpec change**:
- If change ID provided, look in `openspec/changes/<change-id>/`
- If no ID, list available changes in `openspec/changes/` and ask user to select
2. **Read the source files**:
- `tasks.md` - The implementation checklist (REQUIRED)
- `proposal.md` - Context about what and why (recommended)
- `design.md` - Technical decisions if exists (optional)
3. **Analyze and group tasks**:
- Identify logical phases (setup, core features, testing, etc.)
- Preserve task dependencies (non-`[P]` tasks run sequentially)
- Keep related tasks together in the same phase
4. **Generate Auto Run documents**:
- One document per phase
- Use the exact format with BEGIN/END markers
- Include OpenSpec context in each document
5. **Save the documents**:
- Files go to `Auto Run Docs/` folder
- Filename pattern: `OpenSpec-<change-id>-Phase-XX-[Description].md`
## Now Execute
Read the OpenSpec change (from user input or by scanning `openspec/changes/`) and generate the Auto Run documents. Start with Phase 1 (setup/foundation), then create additional phases as needed.
If no change ID is provided and multiple changes exist, list them and ask which one to implement.

View File

@@ -0,0 +1,31 @@
### Stage 1: Creating Changes
Create proposal when you need to:
- Add features or functionality
- Make breaking changes (API, schema)
- Change architecture or patterns
- Optimize performance (changes behavior)
- Update security patterns
Triggers (examples):
- "Help me create a change proposal"
- "Help me plan a change"
- "Help me create a proposal"
- "I want to create a spec proposal"
- "I want to create a spec"
Loose matching guidance:
- Contains one of: `proposal`, `change`, `spec`
- With one of: `create`, `plan`, `make`, `start`, `help`
Skip proposal for:
- Bug fixes (restore intended behavior)
- Typos, formatting, comments
- Dependency updates (non-breaking)
- Configuration changes
- Tests for existing behavior
**Workflow**
1. Review `openspec/project.md`, `openspec list`, and `openspec list --specs` to understand current context.
2. Choose a unique verb-led `change-id` and scaffold `proposal.md`, `tasks.md`, optional `design.md`, and spec deltas under `openspec/changes/<id>/`.
3. Draft spec deltas using `## ADDED|MODIFIED|REMOVED Requirements` with at least one `#### Scenario:` per requirement.
4. Run `openspec validate <id> --strict` and resolve any issues before sharing the proposal.

View File

@@ -1,126 +1,122 @@
---
description: Execute spec-kit tasks using Maestro's Auto Run feature with optional git worktree support for parallel implementation.
description: Convert Spec Kit tasks to Maestro Auto Run documents for automated implementation.
---
You are an expert at converting Spec Kit feature specifications into actionable Maestro Auto Run documents.
## User Input
```text
$ARGUMENTS
```
You **MUST** consider the user input before proceeding (if not empty).
The user input may contain:
- A feature name or spec directory path (e.g., `user-auth`, `specs/1-my-feature`)
- Empty (you should scan for specs in `specs/` directory)
## Overview
## Your Task
This command guides you through implementing your spec-kit feature using Maestro's powerful automation capabilities. The `tasks.md` file generated by `/speckit.tasks` is designed to work seamlessly with Maestro's Auto Run feature.
1. **Locate the Spec Kit feature** in `specs/<feature-name>/`
2. **Read the `tasks.md`** file (and optionally `specification.md` for context)
3. **Generate Auto Run documents** using the format below
4. **Save to `Auto Run Docs/`** folder
## Implementation Workflow
## Critical Requirements
### Step 1: Locate Your Tasks File
Each Auto Run document MUST:
Your `tasks.md` file is located in your feature's spec directory (e.g., `specs/1-my-feature/tasks.md`). This file contains all implementation tasks in a checkbox format that Auto Run can process automatically.
1. **Be Completely Self-Contained**: Each phase must be executable without ANY user input during execution. The AI should be able to start and complete each phase entirely on its own.
### Step 2: Configure Auto Run
2. **Deliver Working Progress**: By the end of each phase, there should be something tangible that works - testable code, runnable features, or verifiable changes.
1. **Open the Right Bar** in Maestro (press `Cmd/Ctrl + B` or click the sidebar toggle)
2. **Select the "Auto Run" tab**
3. **Set the Auto Run folder** to your spec-kit documents directory:
- Click the folder icon or use the folder selector
- Navigate to your feature's spec directory (e.g., `specs/1-my-feature/`)
4. **Select your `tasks.md` file** from the document list
3. **Reference Spec Kit Context**: Include links to the specification and relevant planning docs so the executing AI understands the full context.
### Step 3: Start Automated Implementation
4. **Preserve Task IDs**: Keep the original task identifiers (T001, T002, etc.) and user story markers ([US1], [US2]) from Spec Kit for traceability.
Once configured, Auto Run will:
- Read each task from `tasks.md`
- Execute tasks sequentially (respecting dependencies)
- Mark tasks as completed (`[X]`) with implementation notes
- Handle parallel tasks (`[P]` marker) when possible
## Document Format
**To start**: Click the "Run" button or press `Cmd/Ctrl + Enter` in the Auto Run panel.
## Advanced: Parallel Implementation with Git Worktrees
For larger features with independent components, you can use git worktrees to implement multiple parts in parallel across different Maestro sessions.
### What are Worktrees?
Git worktrees allow you to have multiple working directories for the same repository, each on a different branch. This enables:
- Multiple AI agents working on different feature branches simultaneously
- Isolated changes that won't conflict during development
- Easy merging when components are complete
### Setting Up Parallel Implementation
1. **Identify Independent Tasks**: Look for tasks marked with `[P]` in your `tasks.md` that can run in parallel.
2. **Create Worktrees in Maestro**:
- In each session, Maestro can automatically create a worktree for the feature branch
- Use the worktree toggle in Auto Run to enable worktree mode
- Each session gets its own isolated working directory
3. **Assign Tasks to Sessions**:
- Session 1: Phase 1 (Setup) + User Story 1 tasks
- Session 2: User Story 2 tasks (if independent)
- Session 3: User Story 3 tasks (if independent)
4. **Merge When Complete**:
- Each session commits to its feature branch
- Use Maestro's git integration to merge branches
- Or merge manually: `git merge session-branch`
### Worktree Commands
Maestro handles worktrees automatically, but for manual setup:
```bash
# Create a worktree for a feature branch
git worktree add ../my-feature-worktree feature-branch
# List existing worktrees
git worktree list
# Remove a worktree when done
git worktree remove ../my-feature-worktree
```
## Best Practices
1. **Complete Setup Phase First**: Always complete Phase 1 (Setup) before parallelizing user stories.
2. **Respect Dependencies**: Tasks without the `[P]` marker should run sequentially.
3. **Review Before Merging**: Use `/speckit.analyze` after implementation to verify consistency.
4. **Incremental Testing**: Each user story phase should be independently testable. Verify before moving to the next.
5. **Use Checklists**: If you created checklists with `/speckit.checklist`, verify them before marking the feature complete.
## Task Format Reference
Your `tasks.md` uses this format that Auto Run understands:
Each Auto Run document MUST follow this exact format:
```markdown
- [ ] T001 Setup project structure
- [ ] T002 [P] Configure database connection
- [ ] T003 [P] [US1] Create User model in src/models/user.py
- [ ] T004 [US1] Implement UserService
# Phase XX: [Brief Title]
[One paragraph describing what this phase accomplishes and why it matters]
## Spec Kit Context
- **Feature:** <feature-name>
- **Specification:** specs/<feature-name>/specification.md
- **Plan:** specs/<feature-name>/plan.md (if exists)
## Tasks
- [ ] T001 First specific task to complete
- [ ] T002 Second specific task to complete
- [ ] Continue with more tasks...
## Completion
- [ ] Verify all changes work as expected
- [ ] Run `/speckit.analyze` to verify consistency
```
- `- [ ]` = Incomplete task (Auto Run will process)
- `- [X]` = Completed task (Auto Run will skip)
- `[P]` = Parallelizable (can run with other [P] tasks)
- `[US1]` = User Story 1 (groups related tasks)
## Task Writing Guidelines
## Getting Help
Each task should be:
- **Specific**: Not "set up the feature" but "Create UserAuthService class with login/logout methods"
- **Actionable**: Clear what needs to be done
- **Verifiable**: You can tell when it's complete
- **Autonomous**: Can be done without asking the user questions
- **View task progress**: Check the Auto Run panel in the Right Bar
- **See implementation details**: Each completed task includes notes about what was done
- **Troubleshoot issues**: Check the session logs in the main terminal view
Preserve any markers from the original tasks.md:
- `[P]` = Parallelizable (can run with other `[P]` tasks)
- `[US1]`, `[US2]` = User Story groupings
- Task IDs (T001, T002, etc.) for traceability
## Next Steps After Implementation
## Phase Guidelines
1. Run `/speckit.analyze` to verify implementation consistency
2. Complete any remaining checklists
3. Run tests to ensure everything works
4. Create a pull request with your changes
- **Phase 1**: Foundation + Setup (dependencies, configuration, scaffolding)
- **Phase 2-N**: Feature implementation by user story or logical grouping
- Each phase should build on the previous
- Keep phases focused (5-15 tasks typically)
- Group related tasks (same user story) together
## Output Format
Create each document as a file in the `Auto Run Docs/` folder with this naming pattern:
```
Auto Run Docs/SpecKit-<feature-name>-Phase-01-[Description].md
Auto Run Docs/SpecKit-<feature-name>-Phase-02-[Description].md
```
## Execution Steps
1. **Find the Spec Kit feature**:
- If feature name provided, look in `specs/<feature-name>/`
- If no name, list available specs in `specs/` and ask user to select
2. **Read the source files**:
- `tasks.md` - The implementation checklist (REQUIRED)
- `specification.md` - Feature specification (recommended)
- `plan.md` - Implementation plan if exists (optional)
3. **Analyze and group tasks**:
- Identify logical phases (setup, user stories, testing, etc.)
- Preserve task dependencies (non-`[P]` tasks run sequentially)
- Keep related tasks (same `[US]` marker) together in the same phase
4. **Generate Auto Run documents**:
- One document per phase
- Use the exact format with BEGIN/END markers
- Include Spec Kit context in each document
5. **Save the documents**:
- Files go to `Auto Run Docs/` folder
- Filename pattern: `SpecKit-<feature-name>-Phase-XX-[Description].md`
## Now Execute
Read the Spec Kit feature (from user input or by scanning `specs/`) and generate the Auto Run documents. Start with Phase 1 (setup/foundation), then create additional phases as needed.
If no feature name is provided and multiple specs exist, list them and ask which one to implement.

View File

@@ -90,6 +90,7 @@ import { ToastContainer } from './components/Toast';
// Import services
import { gitService } from './services/git';
import { getSpeckitCommands } from './services/speckit';
import { getOpenSpecCommands } from './services/openspec';
// Import prompts and synopsis parsing
import { autorunSynopsisPrompt, maestroSystemPrompt } from '../prompts';
@@ -101,7 +102,7 @@ import type {
ToolType, SessionState, RightPanelTab,
FocusArea, LogEntry, Session, AITab, UsageStats, QueuedItem, BatchRunConfig,
AgentError, BatchRunState, GroupChatMessage,
SpecKitCommand, LeaderboardRegistration, CustomAICommand
SpecKitCommand, OpenSpecCommand, LeaderboardRegistration, CustomAICommand
} from './types';
import { THEMES } from './constants/themes';
import { generateId } from './utils/ids';
@@ -315,6 +316,9 @@ function MaestroConsoleInner() {
// Spec Kit commands (loaded from bundled prompts)
const [speckitCommands, setSpeckitCommands] = useState<SpecKitCommand[]>([]);
// OpenSpec commands (loaded from bundled prompts)
const [openspecCommands, setOpenspecCommands] = useState<OpenSpecCommand[]>([]);
// --- GROUP CHAT STATE (Phase 4: extracted to GroupChatContext) ---
// Note: groupChatsExpanded remains here as it's a UI layout concern (already in UILayoutContext)
const [groupChatsExpanded, setGroupChatsExpanded] = useState(true);
@@ -1141,6 +1145,19 @@ function MaestroConsoleInner() {
loadSpeckitCommands();
}, []);
// Load OpenSpec commands on startup
useEffect(() => {
const loadOpenspecCommands = async () => {
try {
const commands = await getOpenSpecCommands();
setOpenspecCommands(commands);
} catch (error) {
console.error('[OpenSpec] Failed to load commands:', error);
}
};
loadOpenspecCommands();
}, []);
// Set up process event listeners for real-time output
useEffect(() => {
// Copy ref value to local variable for cleanup (React ESLint rule)
@@ -2372,10 +2389,12 @@ function MaestroConsoleInner() {
const updateGlobalStatsRef = useRef(updateGlobalStats);
const customAICommandsRef = useRef(customAICommands);
const speckitCommandsRef = useRef(speckitCommands);
const openspecCommandsRef = useRef(openspecCommands);
addToastRef.current = addToast;
updateGlobalStatsRef.current = updateGlobalStats;
customAICommandsRef.current = customAICommands;
speckitCommandsRef.current = speckitCommands;
openspecCommandsRef.current = openspecCommands;
// Note: spawnBackgroundSynopsisRef and spawnAgentWithPromptRef are now provided by useAgentExecution hook
// Note: addHistoryEntryRef is now provided by useAgentSessionManagement hook
@@ -3131,8 +3150,8 @@ function MaestroConsoleInner() {
});
}, [activeSession, canSummarize, minContextUsagePercent, startSummarize, setSessions, addToast, clearTabState]);
// Combine custom AI commands with spec-kit commands for input processing (slash command execution)
// This ensures speckit commands are processed the same way as custom commands
// Combine custom AI commands with spec-kit and openspec commands for input processing (slash command execution)
// This ensures speckit and openspec commands are processed the same way as custom commands
const allCustomCommands = useMemo((): CustomAICommand[] => {
// Convert speckit commands to CustomAICommand format
const speckitAsCustom: CustomAICommand[] = speckitCommands.map(cmd => ({
@@ -3142,10 +3161,18 @@ function MaestroConsoleInner() {
prompt: cmd.prompt,
isBuiltIn: true, // Speckit commands are built-in (bundled)
}));
return [...customAICommands, ...speckitAsCustom];
}, [customAICommands, speckitCommands]);
// Convert openspec commands to CustomAICommand format
const openspecAsCustom: CustomAICommand[] = openspecCommands.map(cmd => ({
id: `openspec-${cmd.id}`,
command: cmd.command,
description: cmd.description,
prompt: cmd.prompt,
isBuiltIn: true, // OpenSpec commands are built-in (bundled)
}));
return [...customAICommands, ...speckitAsCustom, ...openspecAsCustom];
}, [customAICommands, speckitCommands, openspecCommands]);
// Combine built-in slash commands with custom AI commands, spec-kit commands, AND agent-specific commands for autocomplete
// Combine built-in slash commands with custom AI commands, spec-kit commands, openspec commands, AND agent-specific commands for autocomplete
const allSlashCommands = useMemo(() => {
const customCommandsAsSlash = customAICommands
.map(cmd => ({
@@ -3163,6 +3190,15 @@ function MaestroConsoleInner() {
prompt: cmd.prompt, // Include prompt for execution
isSpeckit: true, // Mark as spec-kit command for special handling
}));
// OpenSpec commands (bundled from Fission-AI/OpenSpec)
const openspecCommandsAsSlash = openspecCommands
.map(cmd => ({
command: cmd.command,
description: cmd.description,
aiOnly: true, // OpenSpec commands are only available in AI mode
prompt: cmd.prompt, // Include prompt for execution
isOpenspec: true, // Mark as openspec command for special handling
}));
// Only include agent-specific commands if the agent supports slash commands
// This allows built-in and custom commands to be shown for all agents (Codex, OpenCode, etc.)
const agentCommands = hasActiveSessionCapability('supportsSlashCommands')
@@ -3172,8 +3208,8 @@ function MaestroConsoleInner() {
aiOnly: true, // Agent commands are only available in AI mode
}))
: [];
return [...slashCommands, ...customCommandsAsSlash, ...speckitCommandsAsSlash, ...agentCommands];
}, [customAICommands, speckitCommands, activeSession?.agentCommands, hasActiveSessionCapability]);
return [...slashCommands, ...customCommandsAsSlash, ...speckitCommandsAsSlash, ...openspecCommandsAsSlash, ...agentCommands];
}, [customAICommands, speckitCommands, openspecCommands, activeSession?.agentCommands, hasActiveSessionCapability]);
// Derive current input value and setter based on active session mode
// For AI mode: use active tab's inputValue (stored per-tab)
@@ -5945,10 +5981,15 @@ function MaestroConsoleInner() {
cmd => cmd.command === commandText
);
const matchingCommand = matchingCustomCommand || matchingSpeckitCommand;
// Look up in openspec commands
const matchingOpenspecCommand = openspecCommandsRef.current.find(
cmd => cmd.command === commandText
);
const matchingCommand = matchingCustomCommand || matchingSpeckitCommand || matchingOpenspecCommand;
if (matchingCommand) {
console.log('[Remote] Found matching command:', matchingCommand.command, matchingSpeckitCommand ? '(spec-kit)' : '(custom)');
console.log('[Remote] Found matching command:', matchingCommand.command, matchingSpeckitCommand ? '(spec-kit)' : matchingOpenspecCommand ? '(openspec)' : '(custom)');
// Get git branch for template substitution
let gitBranch: string | undefined;
@@ -6269,10 +6310,11 @@ function MaestroConsoleInner() {
sessionCustomContextWindow: session.customContextWindow,
});
} else if (item.type === 'command' && item.command) {
// Process a slash command - find the matching custom AI command or speckit command
// Process a slash command - find the matching custom AI command, speckit command, or openspec command
// Use refs to get latest values and avoid stale closure
const matchingCommand = customAICommandsRef.current.find(cmd => cmd.command === item.command)
|| speckitCommandsRef.current.find(cmd => cmd.command === item.command);
|| speckitCommandsRef.current.find(cmd => cmd.command === item.command)
|| openspecCommandsRef.current.find(cmd => cmd.command === item.command);
if (matchingCommand) {
// Substitute template variables
let gitBranch: string | undefined;
@@ -7702,6 +7744,19 @@ function MaestroConsoleInner() {
}));
}, [activeSession, getActiveTab]);
const handlePromptToggleEnterToSend = useCallback(() => setEnterToSendAI(!enterToSendAI), [enterToSendAI]);
// OpenSpec command injection - sets prompt content into input field
const handleInjectOpenSpecPrompt = useCallback((prompt: string) => {
if (activeGroupChatId) {
// Update group chat draft
setGroupChats(prev => prev.map(c =>
c.id === activeGroupChatId ? { ...c, draftMessage: prompt } : c
));
} else {
setInputValue(prompt);
}
// Focus the input so user can edit/send the injected prompt
setTimeout(() => inputRef.current?.focus(), 0);
}, [activeGroupChatId, setInputValue]);
// QuickActionsModal stable callbacks
const handleQuickActionsRenameTab = useCallback(() => {
@@ -8291,6 +8346,7 @@ function MaestroConsoleInner() {
isFilePreviewOpen={previewFile !== null}
ghCliAvailable={ghCliAvailable}
onPublishGist={() => setGistPublishModalOpen(true)}
onInjectOpenSpecPrompt={handleInjectOpenSpecPrompt}
lightboxImage={lightboxImage}
lightboxImages={lightboxImages}
stagedImages={stagedImages}

View File

@@ -749,6 +749,8 @@ export interface AppUtilityModalsProps {
autoRunSelectedDocument: string | null;
autoRunCompletedTaskCount: number;
onAutoRunResetTasks: () => void;
// OpenSpec commands
onInjectOpenSpecPrompt?: (prompt: string) => void;
// Gist publishing (for QuickActionsModal)
isFilePreviewOpen: boolean;
@@ -920,6 +922,8 @@ export function AppUtilityModals({
isFilePreviewOpen,
ghCliAvailable,
onPublishGist,
// OpenSpec commands
onInjectOpenSpecPrompt,
// LightboxModal
lightboxImage,
lightboxImages,
@@ -1061,6 +1065,7 @@ export function AppUtilityModals({
isFilePreviewOpen={isFilePreviewOpen}
ghCliAvailable={ghCliAvailable}
onPublishGist={onPublishGist}
onInjectOpenSpecPrompt={onInjectOpenSpecPrompt}
/>
)}
@@ -1740,6 +1745,8 @@ export interface AppModalsProps {
isFilePreviewOpen: boolean;
ghCliAvailable: boolean;
onPublishGist?: () => void;
// OpenSpec commands
onInjectOpenSpecPrompt?: (prompt: string) => void;
lightboxImage: string | null;
lightboxImages: string[];
stagedImages: string[];
@@ -2007,6 +2014,8 @@ export function AppModals(props: AppModalsProps) {
isFilePreviewOpen,
ghCliAvailable,
onPublishGist,
// OpenSpec commands
onInjectOpenSpecPrompt,
lightboxImage,
lightboxImages,
stagedImages,
@@ -2291,6 +2300,7 @@ export function AppModals(props: AppModalsProps) {
isFilePreviewOpen={isFilePreviewOpen}
ghCliAvailable={ghCliAvailable}
onPublishGist={onPublishGist}
onInjectOpenSpecPrompt={onInjectOpenSpecPrompt}
lightboxImage={lightboxImage}
lightboxImages={lightboxImages}
stagedImages={stagedImages}

View File

@@ -0,0 +1,376 @@
import React, { useState, useRef, useEffect } from 'react';
import { Edit2, Save, X, RotateCcw, RefreshCw, ExternalLink, ChevronDown, ChevronRight, GitBranch } from 'lucide-react';
import type { Theme, OpenSpecCommand, OpenSpecMetadata } from '../types';
import { useTemplateAutocomplete } from '../hooks';
import { TemplateAutocompleteDropdown } from './TemplateAutocompleteDropdown';
interface OpenSpecCommandsPanelProps {
theme: Theme;
}
interface EditingCommand {
id: string;
prompt: string;
}
export function OpenSpecCommandsPanel({ theme }: OpenSpecCommandsPanelProps) {
const [commands, setCommands] = useState<OpenSpecCommand[]>([]);
const [metadata, setMetadata] = useState<OpenSpecMetadata | null>(null);
const [editingCommand, setEditingCommand] = useState<EditingCommand | null>(null);
const [isRefreshing, setIsRefreshing] = useState(false);
const [expandedCommands, setExpandedCommands] = useState<Set<string>>(new Set());
const [isLoading, setIsLoading] = useState(true);
const editCommandTextareaRef = useRef<HTMLTextAreaElement>(null);
// Template autocomplete for edit command prompt
const {
autocompleteState: editAutocompleteState,
handleKeyDown: handleEditAutocompleteKeyDown,
handleChange: handleEditAutocompleteChange,
selectVariable: selectEditVariable,
autocompleteRef: editAutocompleteRef,
} = useTemplateAutocomplete({
textareaRef: editCommandTextareaRef,
value: editingCommand?.prompt || '',
onChange: (value) => editingCommand && setEditingCommand({ ...editingCommand, prompt: value }),
});
// Load commands and metadata on mount
useEffect(() => {
const loadData = async () => {
setIsLoading(true);
try {
const [promptsResult, metadataResult] = await Promise.all([
window.maestro.openspec.getPrompts(),
window.maestro.openspec.getMetadata(),
]);
if (promptsResult.success && promptsResult.commands) {
setCommands(promptsResult.commands);
}
if (metadataResult.success && metadataResult.metadata) {
setMetadata(metadataResult.metadata);
}
} catch (error) {
console.error('Failed to load openspec commands:', error);
} finally {
setIsLoading(false);
}
};
loadData();
}, []);
const handleSaveEdit = async () => {
if (!editingCommand) return;
try {
const result = await window.maestro.openspec.savePrompt(editingCommand.id, editingCommand.prompt);
if (result.success) {
setCommands(commands.map(cmd =>
cmd.id === editingCommand.id
? { ...cmd, prompt: editingCommand.prompt, isModified: true }
: cmd
));
setEditingCommand(null);
}
} catch (error) {
console.error('Failed to save prompt:', error);
}
};
const handleReset = async (id: string) => {
try {
const result = await window.maestro.openspec.resetPrompt(id);
if (result.success && result.prompt) {
setCommands(commands.map(cmd =>
cmd.id === id
? { ...cmd, prompt: result.prompt!, isModified: false }
: cmd
));
}
} catch (error) {
console.error('Failed to reset prompt:', error);
}
};
const handleRefresh = async () => {
setIsRefreshing(true);
try {
const result = await window.maestro.openspec.refresh();
if (result.success && result.metadata) {
setMetadata(result.metadata);
// Reload prompts after refresh
const promptsResult = await window.maestro.openspec.getPrompts();
if (promptsResult.success && promptsResult.commands) {
setCommands(promptsResult.commands);
}
}
} catch (error) {
console.error('Failed to refresh openspec prompts:', error);
} finally {
setIsRefreshing(false);
}
};
const handleCancelEdit = () => {
setEditingCommand(null);
};
const toggleExpanded = (id: string) => {
const newExpanded = new Set(expandedCommands);
if (newExpanded.has(id)) {
newExpanded.delete(id);
} else {
newExpanded.add(id);
}
setExpandedCommands(newExpanded);
};
const formatDate = (isoDate: string) => {
try {
return new Date(isoDate).toLocaleDateString(undefined, {
year: 'numeric',
month: 'short',
day: 'numeric',
});
} catch {
return isoDate;
}
};
if (isLoading) {
return (
<div className="space-y-4">
<div>
<label className="block text-xs font-bold opacity-70 uppercase mb-1 flex items-center gap-2">
<GitBranch className="w-3 h-3" />
OpenSpec Commands
</label>
<p className="text-xs opacity-50" style={{ color: theme.colors.textDim }}>
Loading OpenSpec commands...
</p>
</div>
</div>
);
}
return (
<div className="space-y-4">
<div>
<label className="block text-xs font-bold opacity-70 uppercase mb-1 flex items-center gap-2">
<GitBranch className="w-3 h-3" />
OpenSpec Commands
</label>
<p className="text-xs opacity-50" style={{ color: theme.colors.textDim }}>
Change management commands from{' '}
<button
onClick={() => window.maestro.shell.openExternal('https://github.com/Fission-AI/OpenSpec')}
className="underline hover:opacity-80 inline-flex items-center gap-1"
style={{ color: theme.colors.accent, background: 'none', border: 'none', cursor: 'pointer', padding: 0 }}
>
Fission-AI/OpenSpec
<ExternalLink className="w-2.5 h-2.5" />
</button>
{' '}for structured change proposals.
</p>
</div>
{/* Metadata and refresh */}
{metadata && (
<div
className="flex items-center justify-between p-3 rounded-lg border"
style={{ backgroundColor: theme.colors.bgMain, borderColor: theme.colors.border }}
>
<div className="text-xs" style={{ color: theme.colors.textDim }}>
<span>Version: </span>
<span className="font-mono" style={{ color: theme.colors.textMain }}>
{metadata.sourceVersion}
</span>
<span className="mx-2"></span>
<span>Updated: </span>
<span style={{ color: theme.colors.textMain }}>
{formatDate(metadata.lastRefreshed)}
</span>
</div>
<button
onClick={handleRefresh}
disabled={isRefreshing}
className="flex items-center gap-1.5 px-3 py-1.5 rounded text-xs font-medium transition-all disabled:opacity-50"
style={{
backgroundColor: theme.colors.bgActivity,
color: theme.colors.textMain,
border: `1px solid ${theme.colors.border}`,
}}
>
<RefreshCw className={`w-3 h-3 ${isRefreshing ? 'animate-spin' : ''}`} />
{isRefreshing ? 'Checking...' : 'Check for Updates'}
</button>
</div>
)}
{/* Commands list */}
<div className="space-y-2 max-h-[500px] overflow-y-auto pr-1 scrollbar-thin">
{commands.map((cmd) => (
<div
key={cmd.id}
className="rounded-lg border overflow-hidden"
style={{ backgroundColor: theme.colors.bgMain, borderColor: theme.colors.border }}
>
{editingCommand?.id === cmd.id ? (
// Editing mode
<div className="p-3 space-y-3">
<div className="flex items-center justify-between">
<span className="font-mono font-bold text-sm" style={{ color: theme.colors.accent }}>
{cmd.command}
</span>
<div className="flex items-center gap-1">
<button
onClick={handleCancelEdit}
className="flex items-center gap-1 px-2 py-1 rounded text-xs font-medium transition-all"
style={{
backgroundColor: theme.colors.bgActivity,
color: theme.colors.textMain,
border: `1px solid ${theme.colors.border}`,
}}
>
<X className="w-3 h-3" />
Cancel
</button>
<button
onClick={handleSaveEdit}
className="flex items-center gap-1 px-2 py-1 rounded text-xs font-medium transition-all"
style={{
backgroundColor: theme.colors.success,
color: '#000000',
}}
>
<Save className="w-3 h-3" />
Save
</button>
</div>
</div>
<div className="relative">
<textarea
ref={editCommandTextareaRef}
value={editingCommand.prompt}
onChange={handleEditAutocompleteChange}
onKeyDown={(e) => {
if (handleEditAutocompleteKeyDown(e)) {
return;
}
if (e.key === 'Tab') {
e.preventDefault();
const textarea = e.currentTarget;
const start = textarea.selectionStart;
const end = textarea.selectionEnd;
const value = textarea.value;
const newValue = value.substring(0, start) + '\t' + value.substring(end);
setEditingCommand({ ...editingCommand, prompt: newValue });
setTimeout(() => {
textarea.selectionStart = textarea.selectionEnd = start + 1;
}, 0);
}
}}
rows={15}
className="w-full p-2 rounded border bg-transparent outline-none text-sm resize-y scrollbar-thin min-h-[300px] font-mono"
style={{ borderColor: theme.colors.border, color: theme.colors.textMain }}
/>
<TemplateAutocompleteDropdown
ref={editAutocompleteRef}
theme={theme}
state={editAutocompleteState}
onSelect={selectEditVariable}
/>
</div>
</div>
) : (
// Display mode
<>
<button
onClick={() => toggleExpanded(cmd.id)}
className="w-full px-3 py-2.5 flex items-center justify-between hover:bg-white/5 transition-colors"
>
<div className="flex items-center gap-2">
{expandedCommands.has(cmd.id) ? (
<ChevronDown className="w-3.5 h-3.5" style={{ color: theme.colors.textDim }} />
) : (
<ChevronRight className="w-3.5 h-3.5" style={{ color: theme.colors.textDim }} />
)}
<span className="font-mono font-bold text-sm" style={{ color: theme.colors.accent }}>
{cmd.command}
</span>
{cmd.isCustom && (
<span
className="px-1.5 py-0.5 rounded text-[10px] font-medium"
style={{ backgroundColor: theme.colors.accent + '20', color: theme.colors.accent }}
>
Maestro
</span>
)}
{cmd.isModified && (
<span
className="px-1.5 py-0.5 rounded text-[10px] font-medium"
style={{ backgroundColor: theme.colors.warning + '20', color: theme.colors.warning }}
>
Modified
</span>
)}
</div>
<span className="text-xs truncate max-w-[300px]" style={{ color: theme.colors.textDim }}>
{cmd.description}
</span>
</button>
{expandedCommands.has(cmd.id) && (
<div className="px-3 pb-3 pt-1 border-t" style={{ borderColor: theme.colors.border }}>
<div className="flex items-center justify-end gap-1 mb-2">
{cmd.isModified && (
<button
onClick={() => handleReset(cmd.id)}
className="flex items-center gap-1 px-2 py-1 rounded text-xs font-medium transition-all hover:bg-white/10"
style={{ color: theme.colors.textDim }}
title="Reset to bundled default"
>
<RotateCcw className="w-3 h-3" />
Reset
</button>
)}
<button
onClick={() => setEditingCommand({ id: cmd.id, prompt: cmd.prompt })}
className="flex items-center gap-1 px-2 py-1 rounded text-xs font-medium transition-all hover:bg-white/10"
style={{ color: theme.colors.textDim }}
title="Edit prompt"
>
<Edit2 className="w-3 h-3" />
Edit
</button>
</div>
<div
className="text-xs p-2 rounded font-mono overflow-y-auto max-h-48 scrollbar-thin whitespace-pre-wrap"
style={{ backgroundColor: theme.colors.bgActivity, color: theme.colors.textMain }}
>
{cmd.prompt.length > 500 ? cmd.prompt.substring(0, 500) + '...' : cmd.prompt}
</div>
</div>
)}
</>
)}
</div>
))}
</div>
{commands.length === 0 && (
<div
className="p-6 rounded-lg border border-dashed text-center"
style={{ borderColor: theme.colors.border }}
>
<GitBranch className="w-8 h-8 mx-auto mb-2 opacity-30" />
<p className="text-sm opacity-50" style={{ color: theme.colors.textDim }}>
No OpenSpec commands loaded
</p>
</div>
)}
</div>
);
}

View File

@@ -6,6 +6,7 @@ import { useLayerStack } from '../contexts/LayerStackContext';
import { useToast } from '../contexts/ToastContext';
import { MODAL_PRIORITIES } from '../constants/modalPriorities';
import { gitService } from '../services/git';
import { getOpenSpecCommand } from '../services/openspec';
import { formatShortcutKeys } from '../utils/shortcutFormatter';
import type { WizardStep } from './Wizard/WizardContext';
import { useListNavigation } from '../hooks';
@@ -103,6 +104,8 @@ interface QuickActionsModalProps {
isFilePreviewOpen?: boolean;
ghCliAvailable?: boolean;
onPublishGist?: () => void;
// OpenSpec commands
onInjectOpenSpecPrompt?: (prompt: string) => void;
}
export function QuickActionsModal(props: QuickActionsModalProps) {
@@ -123,7 +126,8 @@ export function QuickActionsModal(props: QuickActionsModalProps) {
onSummarizeAndContinue, canSummarizeActiveTab,
autoRunSelectedDocument, autoRunCompletedTaskCount, onAutoRunResetTasks,
onCloseAllTabs, onCloseOtherTabs, onCloseTabsLeft, onCloseTabsRight,
isFilePreviewOpen, ghCliAvailable, onPublishGist
isFilePreviewOpen, ghCliAvailable, onPublishGist,
onInjectOpenSpecPrompt
} = props;
const [search, setSearch] = useState('');
@@ -415,6 +419,69 @@ export function QuickActionsModal(props: QuickActionsModalProps) {
...(onNewGroupChat && sessions.filter(s => s.toolType !== 'terminal').length >= 2 ? [{ id: 'newGroupChat', label: 'New Group Chat', action: () => { onNewGroupChat(); setQuickActionOpen(false); } }] : []),
...(activeGroupChatId && onCloseGroupChat ? [{ id: 'closeGroupChat', label: 'Close Group Chat', action: () => { onCloseGroupChat(); setQuickActionOpen(false); } }] : []),
...(activeGroupChatId && onDeleteGroupChat && groupChats ? [{ id: 'deleteGroupChat', label: `Remove Group Chat: ${groupChats.find(c => c.id === activeGroupChatId)?.name || 'Group Chat'}`, shortcut: shortcuts.killInstance, action: () => { onDeleteGroupChat(activeGroupChatId); setQuickActionOpen(false); } }] : []),
// OpenSpec commands - inject prompt when selected
...(onInjectOpenSpecPrompt ? [
{
id: 'openspec-proposal',
label: 'OpenSpec: Create Proposal',
subtext: 'Create a new change proposal',
action: async () => {
const cmd = await getOpenSpecCommand('/openspec.proposal');
if (cmd) {
onInjectOpenSpecPrompt(cmd.prompt);
}
setQuickActionOpen(false);
}
},
{
id: 'openspec-apply',
label: 'OpenSpec: Apply Changes',
subtext: 'Apply an approved change proposal',
action: async () => {
const cmd = await getOpenSpecCommand('/openspec.apply');
if (cmd) {
onInjectOpenSpecPrompt(cmd.prompt);
}
setQuickActionOpen(false);
}
},
{
id: 'openspec-archive',
label: 'OpenSpec: Archive Change',
subtext: 'Archive a completed change',
action: async () => {
const cmd = await getOpenSpecCommand('/openspec.archive');
if (cmd) {
onInjectOpenSpecPrompt(cmd.prompt);
}
setQuickActionOpen(false);
}
},
{
id: 'openspec-implement',
label: 'OpenSpec: Generate Auto Run',
subtext: 'Generate Auto Run document from proposal',
action: async () => {
const cmd = await getOpenSpecCommand('/openspec.implement');
if (cmd) {
onInjectOpenSpecPrompt(cmd.prompt);
}
setQuickActionOpen(false);
}
},
{
id: 'openspec-help',
label: 'OpenSpec: Help',
subtext: 'Show OpenSpec workflow help',
action: async () => {
const cmd = await getOpenSpecCommand('/openspec.help');
if (cmd) {
onInjectOpenSpecPrompt(cmd.prompt);
}
setQuickActionOpen(false);
}
}
] : []),
// Debug commands - only visible when user types "debug"
{ id: 'debugResetBusy', label: 'Debug: Reset Busy State', subtext: 'Clear stuck thinking/busy state for all sessions', action: () => {
// Reset all sessions and tabs to idle state

View File

@@ -7,6 +7,7 @@ import { useLayerStack } from '../contexts/LayerStackContext';
import { MODAL_PRIORITIES } from '../constants/modalPriorities';
import { AICommandsPanel } from './AICommandsPanel';
import { SpecKitCommandsPanel } from './SpecKitCommandsPanel';
import { OpenSpecCommandsPanel } from './OpenSpecCommandsPanel';
import { formatShortcutKeys } from '../utils/shortcutFormatter';
import { ToggleButtonGroup } from './ToggleButtonGroup';
import { SettingCheckbox } from './SettingCheckbox';
@@ -1665,6 +1666,15 @@ export const SettingsModal = memo(function SettingsModal(props: SettingsModalPro
{/* Spec Kit Commands Section */}
<SpecKitCommandsPanel theme={theme} />
{/* Divider */}
<div
className="border-t"
style={{ borderColor: theme.colors.border }}
/>
{/* OpenSpec Commands Section */}
<OpenSpecCommandsPanel theme={theme} />
</div>
)}
</div>

View File

@@ -1252,6 +1252,61 @@ interface MaestroAPI {
error?: string;
}>;
};
openspec: {
getMetadata: () => Promise<{
success: boolean;
metadata?: {
lastRefreshed: string;
commitSha: string;
sourceVersion: string;
sourceUrl: string;
};
error?: string;
}>;
getPrompts: () => Promise<{
success: boolean;
commands?: Array<{
id: string;
command: string;
description: string;
prompt: string;
isCustom: boolean;
isModified: boolean;
}>;
error?: string;
}>;
getCommand: (slashCommand: string) => Promise<{
success: boolean;
command?: {
id: string;
command: string;
description: string;
prompt: string;
isCustom: boolean;
isModified: boolean;
};
error?: string;
}>;
savePrompt: (id: string, content: string) => Promise<{
success: boolean;
error?: string;
}>;
resetPrompt: (id: string) => Promise<{
success: boolean;
prompt?: string;
error?: string;
}>;
refresh: () => Promise<{
success: boolean;
metadata?: {
lastRefreshed: string;
commitSha: string;
sourceVersion: string;
sourceUrl: string;
};
error?: string;
}>;
};
}
declare global {

View File

@@ -0,0 +1,56 @@
/**
* OpenSpec Service
*
* Provides access to bundled OpenSpec commands for the renderer.
* These commands integrate with the slash command system.
*/
import type { OpenSpecCommand, OpenSpecMetadata } from '../types';
/**
* Get all OpenSpec commands from the main process
*/
export async function getOpenSpecCommands(): Promise<OpenSpecCommand[]> {
try {
const result = await window.maestro.openspec.getPrompts();
if (result.success && result.commands) {
return result.commands;
}
return [];
} catch (error) {
console.error('[OpenSpec] Failed to get commands:', error);
return [];
}
}
/**
* Get OpenSpec metadata (version, refresh date)
*/
export async function getOpenSpecMetadata(): Promise<OpenSpecMetadata | null> {
try {
const result = await window.maestro.openspec.getMetadata();
if (result.success && result.metadata) {
return result.metadata;
}
return null;
} catch (error) {
console.error('[OpenSpec] Failed to get metadata:', error);
return null;
}
}
/**
* Get a single OpenSpec command by its slash command string
*/
export async function getOpenSpecCommand(slashCommand: string): Promise<OpenSpecCommand | null> {
try {
const result = await window.maestro.openspec.getCommand(slashCommand);
if (result.success && result.command) {
return result.command;
}
return null;
} catch (error) {
console.error('[OpenSpec] Failed to get command:', error);
return null;
}
}

View File

@@ -585,6 +585,24 @@ export interface SpecKitMetadata {
sourceUrl: string; // GitHub repo URL
}
// OpenSpec command definition (bundled from Fission-AI/OpenSpec)
export interface OpenSpecCommand {
id: string; // e.g., 'proposal'
command: string; // e.g., '/openspec.proposal'
description: string;
prompt: string;
isCustom: boolean; // true for 'help' and 'implement' (Maestro-specific)
isModified: boolean; // true if user has edited
}
// OpenSpec metadata for tracking version and refresh status
export interface OpenSpecMetadata {
lastRefreshed: string; // ISO date
commitSha: string; // Git commit SHA or version tag
sourceVersion: string; // Semantic version
sourceUrl: string; // GitHub repo URL
}
// Leaderboard registration data for runmaestro.ai integration
export interface LeaderboardRegistration {
// Required fields