mirror of
https://github.com/jlengrand/Maestro.git
synced 2026-03-10 08:31:19 +00:00
tests pass
This commit is contained in:
@@ -549,6 +549,18 @@ Example: `feat: add context usage visualization`
|
|||||||
|
|
||||||
## Building for Release
|
## Building for Release
|
||||||
|
|
||||||
|
### 0. Refresh Spec Kit Prompts (Optional)
|
||||||
|
|
||||||
|
Before releasing, check if GitHub's spec-kit has updates:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm run refresh-speckit
|
||||||
|
```
|
||||||
|
|
||||||
|
This fetches the latest prompts from [github/spec-kit](https://github.com/github/spec-kit) and updates the bundled files in `src/prompts/speckit/`. The custom `/speckit.implement` prompt is never overwritten.
|
||||||
|
|
||||||
|
Review any changes with `git diff` before committing.
|
||||||
|
|
||||||
### 1. Prepare Icons
|
### 1. Prepare Icons
|
||||||
|
|
||||||
Place icons in `build/` directory:
|
Place icons in `build/` directory:
|
||||||
|
|||||||
4
package-lock.json
generated
4
package-lock.json
generated
@@ -1,12 +1,12 @@
|
|||||||
{
|
{
|
||||||
"name": "maestro",
|
"name": "maestro",
|
||||||
"version": "0.10.2",
|
"version": "0.11.2",
|
||||||
"lockfileVersion": 3,
|
"lockfileVersion": 3,
|
||||||
"requires": true,
|
"requires": true,
|
||||||
"packages": {
|
"packages": {
|
||||||
"": {
|
"": {
|
||||||
"name": "maestro",
|
"name": "maestro",
|
||||||
"version": "0.10.2",
|
"version": "0.11.2",
|
||||||
"hasInstallScript": true,
|
"hasInstallScript": true,
|
||||||
"license": "AGPL 3.0",
|
"license": "AGPL 3.0",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
|
|||||||
@@ -41,7 +41,8 @@
|
|||||||
"test:e2e:ui": "npm run build:main && npm run build:renderer && playwright test --ui",
|
"test:e2e:ui": "npm run build:main && npm run build:renderer && playwright test --ui",
|
||||||
"test:e2e:headed": "npm run build:main && npm run build:renderer && playwright test --headed",
|
"test:e2e:headed": "npm run build:main && npm run build:renderer && playwright test --headed",
|
||||||
"test:integration": "vitest run --config vitest.integration.config.ts",
|
"test:integration": "vitest run --config vitest.integration.config.ts",
|
||||||
"test:integration:watch": "vitest --config vitest.integration.config.ts"
|
"test:integration:watch": "vitest --config vitest.integration.config.ts",
|
||||||
|
"refresh-speckit": "node scripts/refresh-speckit.mjs"
|
||||||
},
|
},
|
||||||
"build": {
|
"build": {
|
||||||
"appId": "com.maestro.app",
|
"appId": "com.maestro.app",
|
||||||
|
|||||||
250
scripts/refresh-speckit.mjs
Normal file
250
scripts/refresh-speckit.mjs
Normal file
@@ -0,0 +1,250 @@
|
|||||||
|
#!/usr/bin/env node
|
||||||
|
/**
|
||||||
|
* Refresh Spec Kit Prompts
|
||||||
|
*
|
||||||
|
* Fetches the latest spec-kit prompts from GitHub and updates the bundled files.
|
||||||
|
* Run manually before releases or when spec-kit updates.
|
||||||
|
*
|
||||||
|
* Usage: npm run refresh-speckit
|
||||||
|
*/
|
||||||
|
|
||||||
|
import fs from 'fs';
|
||||||
|
import path from 'path';
|
||||||
|
import { fileURLToPath } from 'url';
|
||||||
|
import https from 'https';
|
||||||
|
import { createWriteStream } from 'fs';
|
||||||
|
import { createGunzip } from 'zlib';
|
||||||
|
import { pipeline } from 'stream/promises';
|
||||||
|
import { Readable } from 'stream';
|
||||||
|
|
||||||
|
const __dirname = path.dirname(fileURLToPath(import.meta.url));
|
||||||
|
const SPECKIT_DIR = path.join(__dirname, '..', 'src', 'prompts', 'speckit');
|
||||||
|
const METADATA_PATH = path.join(SPECKIT_DIR, 'metadata.json');
|
||||||
|
|
||||||
|
// GitHub spec-kit repository info
|
||||||
|
const GITHUB_API = 'https://api.github.com';
|
||||||
|
const REPO_OWNER = 'github';
|
||||||
|
const REPO_NAME = 'spec-kit';
|
||||||
|
|
||||||
|
// Commands to fetch (these are upstream commands, we skip 'implement' as it's custom)
|
||||||
|
const UPSTREAM_COMMANDS = [
|
||||||
|
'constitution',
|
||||||
|
'specify',
|
||||||
|
'clarify',
|
||||||
|
'plan',
|
||||||
|
'tasks',
|
||||||
|
'analyze',
|
||||||
|
'checklist',
|
||||||
|
'taskstoissues',
|
||||||
|
];
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Make an HTTPS GET request
|
||||||
|
*/
|
||||||
|
function httpsGet(url, options = {}) {
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
const headers = {
|
||||||
|
'User-Agent': 'Maestro-SpecKit-Refresher',
|
||||||
|
...options.headers,
|
||||||
|
};
|
||||||
|
|
||||||
|
https.get(url, { headers }, (res) => {
|
||||||
|
// Handle redirects
|
||||||
|
if (res.statusCode === 301 || res.statusCode === 302) {
|
||||||
|
return resolve(httpsGet(res.headers.location, options));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (res.statusCode !== 200) {
|
||||||
|
reject(new Error(`HTTP ${res.statusCode}: ${url}`));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let data = '';
|
||||||
|
res.on('data', chunk => data += chunk);
|
||||||
|
res.on('end', () => resolve({ data, headers: res.headers }));
|
||||||
|
res.on('error', reject);
|
||||||
|
}).on('error', reject);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Download file from URL
|
||||||
|
*/
|
||||||
|
function downloadFile(url, destPath) {
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
https.get(url, { headers: { 'User-Agent': 'Maestro-SpecKit-Refresher' } }, (res) => {
|
||||||
|
if (res.statusCode === 301 || res.statusCode === 302) {
|
||||||
|
return resolve(downloadFile(res.headers.location, destPath));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (res.statusCode !== 200) {
|
||||||
|
reject(new Error(`HTTP ${res.statusCode}`));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const file = createWriteStream(destPath);
|
||||||
|
res.pipe(file);
|
||||||
|
file.on('finish', () => {
|
||||||
|
file.close();
|
||||||
|
resolve();
|
||||||
|
});
|
||||||
|
file.on('error', reject);
|
||||||
|
}).on('error', reject);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract a specific file from a ZIP archive
|
||||||
|
*/
|
||||||
|
async function extractFromZip(zipPath, filePattern, destDir) {
|
||||||
|
const { exec } = await import('child_process');
|
||||||
|
const { promisify } = await import('util');
|
||||||
|
const execAsync = promisify(exec);
|
||||||
|
|
||||||
|
// List files in the ZIP
|
||||||
|
const { stdout: listOutput } = await execAsync(`unzip -l "${zipPath}"`);
|
||||||
|
|
||||||
|
// Find matching files
|
||||||
|
const lines = listOutput.split('\n');
|
||||||
|
const matchingFiles = [];
|
||||||
|
|
||||||
|
for (const line of lines) {
|
||||||
|
// Match lines like: " 12345 01-01-2024 00:00 spec-kit-0.0.90/.claude/commands/constitution.md"
|
||||||
|
const match = line.match(/^\s*\d+\s+\S+\s+\S+\s+(.+)$/);
|
||||||
|
if (match) {
|
||||||
|
const filePath = match[1].trim();
|
||||||
|
if (filePath.includes('.claude/commands/') && filePath.endsWith('.md')) {
|
||||||
|
matchingFiles.push(filePath);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract matching files
|
||||||
|
const extractedFiles = {};
|
||||||
|
for (const filePath of matchingFiles) {
|
||||||
|
const fileName = path.basename(filePath, '.md');
|
||||||
|
// Skip files not in our upstream list
|
||||||
|
if (!UPSTREAM_COMMANDS.includes(fileName)) continue;
|
||||||
|
|
||||||
|
// Extract to temp location
|
||||||
|
const tempDir = path.join(destDir, '.temp-extract');
|
||||||
|
await execAsync(`unzip -o -j "${zipPath}" "${filePath}" -d "${tempDir}"`);
|
||||||
|
|
||||||
|
// Read the extracted content
|
||||||
|
const extractedPath = path.join(tempDir, path.basename(filePath));
|
||||||
|
if (fs.existsSync(extractedPath)) {
|
||||||
|
extractedFiles[fileName] = fs.readFileSync(extractedPath, 'utf8');
|
||||||
|
fs.unlinkSync(extractedPath);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean up temp directory
|
||||||
|
const tempDir = path.join(destDir, '.temp-extract');
|
||||||
|
if (fs.existsSync(tempDir)) {
|
||||||
|
fs.rmdirSync(tempDir, { recursive: true });
|
||||||
|
}
|
||||||
|
|
||||||
|
return extractedFiles;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the latest release info from GitHub
|
||||||
|
*/
|
||||||
|
async function getLatestRelease() {
|
||||||
|
const url = `${GITHUB_API}/repos/${REPO_OWNER}/${REPO_NAME}/releases/latest`;
|
||||||
|
const { data } = await httpsGet(url);
|
||||||
|
return JSON.parse(data);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Find the Claude template ZIP asset in the release
|
||||||
|
*/
|
||||||
|
function findClaudeTemplateAsset(release) {
|
||||||
|
return release.assets.find(asset =>
|
||||||
|
asset.name.includes('claude') && asset.name.endsWith('.zip')
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Main refresh function
|
||||||
|
*/
|
||||||
|
async function refreshSpecKit() {
|
||||||
|
console.log('🔄 Refreshing Spec Kit prompts from GitHub...\n');
|
||||||
|
|
||||||
|
// Ensure speckit directory exists
|
||||||
|
if (!fs.existsSync(SPECKIT_DIR)) {
|
||||||
|
console.error('❌ Spec Kit directory not found:', SPECKIT_DIR);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Get latest release
|
||||||
|
console.log('📡 Fetching latest release info...');
|
||||||
|
const release = await getLatestRelease();
|
||||||
|
console.log(` Found release: ${release.tag_name} (${release.name})`);
|
||||||
|
|
||||||
|
// Find Claude template ZIP
|
||||||
|
const claudeAsset = findClaudeTemplateAsset(release);
|
||||||
|
if (!claudeAsset) {
|
||||||
|
console.error('❌ Could not find Claude template ZIP in release assets');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
console.log(` Claude template: ${claudeAsset.name}`);
|
||||||
|
|
||||||
|
// Download the ZIP
|
||||||
|
const tempZipPath = path.join(SPECKIT_DIR, '.temp-speckit.zip');
|
||||||
|
console.log('\n📥 Downloading template ZIP...');
|
||||||
|
await downloadFile(claudeAsset.browser_download_url, tempZipPath);
|
||||||
|
console.log(' Download complete');
|
||||||
|
|
||||||
|
// Extract prompts from ZIP
|
||||||
|
console.log('\n📦 Extracting prompts...');
|
||||||
|
const extractedPrompts = await extractFromZip(tempZipPath, '', SPECKIT_DIR);
|
||||||
|
|
||||||
|
// Clean up temp ZIP
|
||||||
|
fs.unlinkSync(tempZipPath);
|
||||||
|
|
||||||
|
// Update prompt files
|
||||||
|
console.log('\n✏️ Updating prompt files...');
|
||||||
|
let updatedCount = 0;
|
||||||
|
for (const [commandName, content] of Object.entries(extractedPrompts)) {
|
||||||
|
const promptFile = path.join(SPECKIT_DIR, `speckit.${commandName}.md`);
|
||||||
|
const existingContent = fs.existsSync(promptFile)
|
||||||
|
? fs.readFileSync(promptFile, 'utf8')
|
||||||
|
: '';
|
||||||
|
|
||||||
|
if (content !== existingContent) {
|
||||||
|
fs.writeFileSync(promptFile, content);
|
||||||
|
console.log(` ✓ Updated: speckit.${commandName}.md`);
|
||||||
|
updatedCount++;
|
||||||
|
} else {
|
||||||
|
console.log(` - Unchanged: speckit.${commandName}.md`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update metadata
|
||||||
|
const version = release.tag_name.replace(/^v/, '');
|
||||||
|
const metadata = {
|
||||||
|
lastRefreshed: new Date().toISOString(),
|
||||||
|
commitSha: release.tag_name,
|
||||||
|
sourceVersion: version,
|
||||||
|
sourceUrl: `https://github.com/${REPO_OWNER}/${REPO_NAME}`,
|
||||||
|
};
|
||||||
|
|
||||||
|
fs.writeFileSync(METADATA_PATH, JSON.stringify(metadata, null, 2));
|
||||||
|
console.log('\n📄 Updated metadata.json');
|
||||||
|
|
||||||
|
// Summary
|
||||||
|
console.log('\n✅ Refresh complete!');
|
||||||
|
console.log(` Version: ${version}`);
|
||||||
|
console.log(` Updated: ${updatedCount} files`);
|
||||||
|
console.log(` Skipped: implement (custom Maestro prompt)`);
|
||||||
|
|
||||||
|
} catch (error) {
|
||||||
|
console.error('\n❌ Refresh failed:', error.message);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run
|
||||||
|
refreshSpecKit();
|
||||||
@@ -449,13 +449,16 @@ describe('agent-detector', () => {
|
|||||||
const originalPlatform = process.platform;
|
const originalPlatform = process.platform;
|
||||||
Object.defineProperty(process, 'platform', { value: 'darwin', configurable: true });
|
Object.defineProperty(process, 'platform', { value: 'darwin', configurable: true });
|
||||||
|
|
||||||
|
try {
|
||||||
vi.spyOn(fs.promises, 'stat').mockResolvedValue({
|
vi.spyOn(fs.promises, 'stat').mockResolvedValue({
|
||||||
isFile: () => true,
|
isFile: () => true,
|
||||||
} as fs.Stats);
|
} as fs.Stats);
|
||||||
vi.spyOn(fs.promises, 'access').mockRejectedValue(new Error('EACCES'));
|
vi.spyOn(fs.promises, 'access').mockRejectedValue(new Error('EACCES'));
|
||||||
|
|
||||||
detector.setCustomPaths({ 'claude-code': '/custom/claude' });
|
// Create a fresh detector to pick up the platform change
|
||||||
const agents = await detector.detectAgents();
|
const unixDetector = new AgentDetector();
|
||||||
|
unixDetector.setCustomPaths({ 'claude-code': '/custom/claude' });
|
||||||
|
const agents = await unixDetector.detectAgents();
|
||||||
|
|
||||||
const claude = agents.find(a => a.id === 'claude-code');
|
const claude = agents.find(a => a.id === 'claude-code');
|
||||||
expect(claude?.available).toBe(false);
|
expect(claude?.available).toBe(false);
|
||||||
@@ -464,27 +467,38 @@ describe('agent-detector', () => {
|
|||||||
expect.stringContaining('not executable'),
|
expect.stringContaining('not executable'),
|
||||||
'AgentDetector'
|
'AgentDetector'
|
||||||
);
|
);
|
||||||
|
} finally {
|
||||||
Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true });
|
Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true });
|
||||||
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should skip executable check on Windows', async () => {
|
it('should skip executable check on Windows', async () => {
|
||||||
const originalPlatform = process.platform;
|
const originalPlatform = process.platform;
|
||||||
Object.defineProperty(process, 'platform', { value: 'win32', configurable: true });
|
Object.defineProperty(process, 'platform', { value: 'win32', configurable: true });
|
||||||
|
|
||||||
|
try {
|
||||||
const accessMock = vi.spyOn(fs.promises, 'access');
|
const accessMock = vi.spyOn(fs.promises, 'access');
|
||||||
vi.spyOn(fs.promises, 'stat').mockResolvedValue({
|
vi.spyOn(fs.promises, 'stat').mockResolvedValue({
|
||||||
isFile: () => true,
|
isFile: () => true,
|
||||||
} as fs.Stats);
|
} as fs.Stats);
|
||||||
|
|
||||||
detector.setCustomPaths({ 'claude-code': 'C:\\custom\\claude.exe' });
|
// Create a fresh detector to pick up the platform change
|
||||||
const agents = await detector.detectAgents();
|
const winDetector = new AgentDetector();
|
||||||
|
winDetector.setCustomPaths({ 'claude-code': 'C:\\custom\\claude.exe' });
|
||||||
|
const agents = await winDetector.detectAgents();
|
||||||
|
|
||||||
const claude = agents.find(a => a.id === 'claude-code');
|
const claude = agents.find(a => a.id === 'claude-code');
|
||||||
expect(claude?.available).toBe(true);
|
expect(claude?.available).toBe(true);
|
||||||
expect(accessMock).not.toHaveBeenCalled();
|
// On Windows, access should not be called with X_OK flag for custom paths
|
||||||
|
// Note: probeWindowsPaths may call access with F_OK for other agents,
|
||||||
|
// but the key is that the executable check (X_OK) is skipped for custom paths
|
||||||
|
const xokCalls = accessMock.mock.calls.filter(
|
||||||
|
call => call[1] === fs.constants.X_OK
|
||||||
|
);
|
||||||
|
expect(xokCalls).toHaveLength(0);
|
||||||
|
} finally {
|
||||||
Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true });
|
Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true });
|
||||||
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should fall back to PATH when custom path is invalid', async () => {
|
it('should fall back to PATH when custom path is invalid', async () => {
|
||||||
@@ -547,6 +561,7 @@ describe('agent-detector', () => {
|
|||||||
const originalPlatform = process.platform;
|
const originalPlatform = process.platform;
|
||||||
Object.defineProperty(process, 'platform', { value: 'darwin', configurable: true });
|
Object.defineProperty(process, 'platform', { value: 'darwin', configurable: true });
|
||||||
|
|
||||||
|
try {
|
||||||
// Create a new detector to pick up the platform change
|
// Create a new detector to pick up the platform change
|
||||||
const unixDetector = new AgentDetector();
|
const unixDetector = new AgentDetector();
|
||||||
mockExecFileNoThrow.mockResolvedValue({ stdout: '/usr/bin/claude\n', stderr: '', exitCode: 0 });
|
mockExecFileNoThrow.mockResolvedValue({ stdout: '/usr/bin/claude\n', stderr: '', exitCode: 0 });
|
||||||
@@ -559,14 +574,20 @@ describe('agent-detector', () => {
|
|||||||
undefined,
|
undefined,
|
||||||
expect.any(Object)
|
expect.any(Object)
|
||||||
);
|
);
|
||||||
|
} finally {
|
||||||
Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true });
|
Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true });
|
||||||
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should use where command on Windows', async () => {
|
it('should use where command on Windows', async () => {
|
||||||
const originalPlatform = process.platform;
|
const originalPlatform = process.platform;
|
||||||
Object.defineProperty(process, 'platform', { value: 'win32', configurable: true });
|
Object.defineProperty(process, 'platform', { value: 'win32', configurable: true });
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Mock fs.promises.access to reject so probeWindowsPaths doesn't find anything
|
||||||
|
// This forces fallback to 'where' command
|
||||||
|
vi.spyOn(fs.promises, 'access').mockRejectedValue(new Error('ENOENT'));
|
||||||
|
|
||||||
const winDetector = new AgentDetector();
|
const winDetector = new AgentDetector();
|
||||||
mockExecFileNoThrow.mockResolvedValue({ stdout: 'C:\\claude.exe\n', stderr: '', exitCode: 0 });
|
mockExecFileNoThrow.mockResolvedValue({ stdout: 'C:\\claude.exe\n', stderr: '', exitCode: 0 });
|
||||||
|
|
||||||
@@ -578,8 +599,9 @@ describe('agent-detector', () => {
|
|||||||
undefined,
|
undefined,
|
||||||
expect.any(Object)
|
expect.any(Object)
|
||||||
);
|
);
|
||||||
|
} finally {
|
||||||
Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true });
|
Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true });
|
||||||
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should take first match when multiple paths returned', async () => {
|
it('should take first match when multiple paths returned', async () => {
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ import { registerClaudeHandlers, ClaudeHandlerDependencies } from './claude';
|
|||||||
import { registerAgentSessionsHandlers, AgentSessionsHandlerDependencies } from './agentSessions';
|
import { registerAgentSessionsHandlers, AgentSessionsHandlerDependencies } from './agentSessions';
|
||||||
import { registerGroupChatHandlers, GroupChatHandlerDependencies } from './groupChat';
|
import { registerGroupChatHandlers, GroupChatHandlerDependencies } from './groupChat';
|
||||||
import { registerDebugHandlers, DebugHandlerDependencies } from './debug';
|
import { registerDebugHandlers, DebugHandlerDependencies } from './debug';
|
||||||
|
import { registerSpeckitHandlers } from './speckit';
|
||||||
import { AgentDetector } from '../../agent-detector';
|
import { AgentDetector } from '../../agent-detector';
|
||||||
import { ProcessManager } from '../../process-manager';
|
import { ProcessManager } from '../../process-manager';
|
||||||
import { WebServer } from '../../web-server';
|
import { WebServer } from '../../web-server';
|
||||||
@@ -42,6 +43,7 @@ export { registerClaudeHandlers };
|
|||||||
export { registerAgentSessionsHandlers };
|
export { registerAgentSessionsHandlers };
|
||||||
export { registerGroupChatHandlers };
|
export { registerGroupChatHandlers };
|
||||||
export { registerDebugHandlers };
|
export { registerDebugHandlers };
|
||||||
|
export { registerSpeckitHandlers };
|
||||||
export type { AgentsHandlerDependencies };
|
export type { AgentsHandlerDependencies };
|
||||||
export type { ProcessHandlerDependencies };
|
export type { ProcessHandlerDependencies };
|
||||||
export type { PersistenceHandlerDependencies };
|
export type { PersistenceHandlerDependencies };
|
||||||
@@ -147,6 +149,8 @@ export function registerAllHandlers(deps: HandlerDependencies): void {
|
|||||||
groupsStore: deps.groupsStore,
|
groupsStore: deps.groupsStore,
|
||||||
// bootstrapStore is optional - not available in HandlerDependencies
|
// bootstrapStore is optional - not available in HandlerDependencies
|
||||||
});
|
});
|
||||||
|
// Register spec-kit handlers (no dependencies needed)
|
||||||
|
registerSpeckitHandlers();
|
||||||
// Setup logger event forwarding to renderer
|
// Setup logger event forwarding to renderer
|
||||||
setupLoggerEventForwarding(deps.getMainWindow);
|
setupLoggerEventForwarding(deps.getMainWindow);
|
||||||
}
|
}
|
||||||
|
|||||||
100
src/main/ipc/handlers/speckit.ts
Normal file
100
src/main/ipc/handlers/speckit.ts
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
/**
|
||||||
|
* Spec Kit IPC Handlers
|
||||||
|
*
|
||||||
|
* Provides IPC handlers for managing spec-kit commands:
|
||||||
|
* - Get metadata (version, last refresh date)
|
||||||
|
* - Get all commands with prompts
|
||||||
|
* - Save user edits to prompts
|
||||||
|
* - Reset prompts to bundled defaults
|
||||||
|
* - Refresh prompts from GitHub
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { ipcMain } from 'electron';
|
||||||
|
import { logger } from '../../utils/logger';
|
||||||
|
import { createIpcHandler, CreateHandlerOptions } from '../../utils/ipcHandler';
|
||||||
|
import {
|
||||||
|
getSpeckitMetadata,
|
||||||
|
getSpeckitPrompts,
|
||||||
|
saveSpeckitPrompt,
|
||||||
|
resetSpeckitPrompt,
|
||||||
|
refreshSpeckitPrompts,
|
||||||
|
getSpeckitCommandBySlash,
|
||||||
|
SpecKitCommand,
|
||||||
|
SpecKitMetadata,
|
||||||
|
} from '../../speckit-manager';
|
||||||
|
|
||||||
|
const LOG_CONTEXT = '[SpecKit]';
|
||||||
|
|
||||||
|
// Helper to create handler options with consistent context
|
||||||
|
const handlerOpts = (operation: string, logSuccess = true): CreateHandlerOptions => ({
|
||||||
|
context: LOG_CONTEXT,
|
||||||
|
operation,
|
||||||
|
logSuccess,
|
||||||
|
});
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Register all Spec Kit IPC handlers.
|
||||||
|
*/
|
||||||
|
export function registerSpeckitHandlers(): void {
|
||||||
|
// Get metadata (version info, last refresh date)
|
||||||
|
ipcMain.handle(
|
||||||
|
'speckit:getMetadata',
|
||||||
|
createIpcHandler(handlerOpts('getMetadata', false), async () => {
|
||||||
|
const metadata = await getSpeckitMetadata();
|
||||||
|
return { metadata };
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
// Get all spec-kit prompts
|
||||||
|
ipcMain.handle(
|
||||||
|
'speckit:getPrompts',
|
||||||
|
createIpcHandler(handlerOpts('getPrompts', false), async () => {
|
||||||
|
const commands = await getSpeckitPrompts();
|
||||||
|
return { commands };
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
// Get a single command by slash command string
|
||||||
|
ipcMain.handle(
|
||||||
|
'speckit:getCommand',
|
||||||
|
createIpcHandler(handlerOpts('getCommand', false), async (slashCommand: string) => {
|
||||||
|
const command = await getSpeckitCommandBySlash(slashCommand);
|
||||||
|
return { command };
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
// Save user's edit to a prompt
|
||||||
|
ipcMain.handle(
|
||||||
|
'speckit:savePrompt',
|
||||||
|
createIpcHandler(handlerOpts('savePrompt'), async (id: string, content: string) => {
|
||||||
|
await saveSpeckitPrompt(id, content);
|
||||||
|
logger.info(`Saved custom prompt for speckit.${id}`, LOG_CONTEXT);
|
||||||
|
return {};
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
// Reset a prompt to bundled default
|
||||||
|
ipcMain.handle(
|
||||||
|
'speckit:resetPrompt',
|
||||||
|
createIpcHandler(handlerOpts('resetPrompt'), async (id: string) => {
|
||||||
|
const prompt = await resetSpeckitPrompt(id);
|
||||||
|
logger.info(`Reset speckit.${id} to bundled default`, LOG_CONTEXT);
|
||||||
|
return { prompt };
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
// Refresh prompts from GitHub
|
||||||
|
ipcMain.handle(
|
||||||
|
'speckit:refresh',
|
||||||
|
createIpcHandler(handlerOpts('refresh'), async () => {
|
||||||
|
const metadata = await refreshSpeckitPrompts();
|
||||||
|
logger.info(`Refreshed spec-kit prompts to ${metadata.sourceVersion}`, LOG_CONTEXT);
|
||||||
|
return { metadata };
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
logger.debug(`${LOG_CONTEXT} Spec Kit IPC handlers registered`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Export types for preload
|
||||||
|
export type { SpecKitCommand, SpecKitMetadata };
|
||||||
@@ -842,6 +842,75 @@ contextBridge.exposeInMainWorld('maestro', {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// Spec Kit API (bundled spec-kit slash commands)
|
||||||
|
speckit: {
|
||||||
|
// Get metadata (version, last refresh date)
|
||||||
|
getMetadata: () =>
|
||||||
|
ipcRenderer.invoke('speckit:getMetadata') as Promise<{
|
||||||
|
success: boolean;
|
||||||
|
metadata?: {
|
||||||
|
lastRefreshed: string;
|
||||||
|
commitSha: string;
|
||||||
|
sourceVersion: string;
|
||||||
|
sourceUrl: string;
|
||||||
|
};
|
||||||
|
error?: string;
|
||||||
|
}>,
|
||||||
|
// Get all spec-kit prompts
|
||||||
|
getPrompts: () =>
|
||||||
|
ipcRenderer.invoke('speckit:getPrompts') as Promise<{
|
||||||
|
success: boolean;
|
||||||
|
commands?: Array<{
|
||||||
|
id: string;
|
||||||
|
command: string;
|
||||||
|
description: string;
|
||||||
|
prompt: string;
|
||||||
|
isCustom: boolean;
|
||||||
|
isModified: boolean;
|
||||||
|
}>;
|
||||||
|
error?: string;
|
||||||
|
}>,
|
||||||
|
// Get a single command by slash command string
|
||||||
|
getCommand: (slashCommand: string) =>
|
||||||
|
ipcRenderer.invoke('speckit:getCommand', slashCommand) as Promise<{
|
||||||
|
success: boolean;
|
||||||
|
command?: {
|
||||||
|
id: string;
|
||||||
|
command: string;
|
||||||
|
description: string;
|
||||||
|
prompt: string;
|
||||||
|
isCustom: boolean;
|
||||||
|
isModified: boolean;
|
||||||
|
} | null;
|
||||||
|
error?: string;
|
||||||
|
}>,
|
||||||
|
// Save user's edit to a prompt
|
||||||
|
savePrompt: (id: string, content: string) =>
|
||||||
|
ipcRenderer.invoke('speckit:savePrompt', id, content) as Promise<{
|
||||||
|
success: boolean;
|
||||||
|
error?: string;
|
||||||
|
}>,
|
||||||
|
// Reset a prompt to bundled default
|
||||||
|
resetPrompt: (id: string) =>
|
||||||
|
ipcRenderer.invoke('speckit:resetPrompt', id) as Promise<{
|
||||||
|
success: boolean;
|
||||||
|
prompt?: string;
|
||||||
|
error?: string;
|
||||||
|
}>,
|
||||||
|
// Refresh prompts from GitHub
|
||||||
|
refresh: () =>
|
||||||
|
ipcRenderer.invoke('speckit:refresh') as Promise<{
|
||||||
|
success: boolean;
|
||||||
|
metadata?: {
|
||||||
|
lastRefreshed: string;
|
||||||
|
commitSha: string;
|
||||||
|
sourceVersion: string;
|
||||||
|
sourceUrl: string;
|
||||||
|
};
|
||||||
|
error?: string;
|
||||||
|
}>,
|
||||||
|
},
|
||||||
|
|
||||||
// Notification API
|
// Notification API
|
||||||
notification: {
|
notification: {
|
||||||
show: (title: string, body: string) =>
|
show: (title: string, body: string) =>
|
||||||
@@ -2118,6 +2187,61 @@ export interface MaestroAPI {
|
|||||||
error?: string;
|
error?: string;
|
||||||
}>;
|
}>;
|
||||||
};
|
};
|
||||||
|
speckit: {
|
||||||
|
getMetadata: () => Promise<{
|
||||||
|
success: boolean;
|
||||||
|
metadata?: {
|
||||||
|
lastRefreshed: string;
|
||||||
|
commitSha: string;
|
||||||
|
sourceVersion: string;
|
||||||
|
sourceUrl: string;
|
||||||
|
};
|
||||||
|
error?: string;
|
||||||
|
}>;
|
||||||
|
getPrompts: () => Promise<{
|
||||||
|
success: boolean;
|
||||||
|
commands?: Array<{
|
||||||
|
id: string;
|
||||||
|
command: string;
|
||||||
|
description: string;
|
||||||
|
prompt: string;
|
||||||
|
isCustom: boolean;
|
||||||
|
isModified: boolean;
|
||||||
|
}>;
|
||||||
|
error?: string;
|
||||||
|
}>;
|
||||||
|
getCommand: (slashCommand: string) => Promise<{
|
||||||
|
success: boolean;
|
||||||
|
command?: {
|
||||||
|
id: string;
|
||||||
|
command: string;
|
||||||
|
description: string;
|
||||||
|
prompt: string;
|
||||||
|
isCustom: boolean;
|
||||||
|
isModified: boolean;
|
||||||
|
};
|
||||||
|
error?: string;
|
||||||
|
}>;
|
||||||
|
savePrompt: (id: string, content: string) => Promise<{
|
||||||
|
success: boolean;
|
||||||
|
error?: string;
|
||||||
|
}>;
|
||||||
|
resetPrompt: (id: string) => Promise<{
|
||||||
|
success: boolean;
|
||||||
|
prompt?: string;
|
||||||
|
error?: string;
|
||||||
|
}>;
|
||||||
|
refresh: () => Promise<{
|
||||||
|
success: boolean;
|
||||||
|
metadata?: {
|
||||||
|
lastRefreshed: string;
|
||||||
|
commitSha: string;
|
||||||
|
sourceVersion: string;
|
||||||
|
sourceUrl: string;
|
||||||
|
};
|
||||||
|
error?: string;
|
||||||
|
}>;
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
declare global {
|
declare global {
|
||||||
|
|||||||
273
src/main/speckit-manager.ts
Normal file
273
src/main/speckit-manager.ts
Normal file
@@ -0,0 +1,273 @@
|
|||||||
|
/**
|
||||||
|
* Spec Kit Manager
|
||||||
|
*
|
||||||
|
* Manages bundled spec-kit prompts with support for:
|
||||||
|
* - Loading bundled prompts from src/prompts/speckit/
|
||||||
|
* - Fetching updates from GitHub's spec-kit repository
|
||||||
|
* - User customization with ability to reset to defaults
|
||||||
|
*/
|
||||||
|
|
||||||
|
import fs from 'fs/promises';
|
||||||
|
import path from 'path';
|
||||||
|
import { app } from 'electron';
|
||||||
|
import { logger } from './utils/logger';
|
||||||
|
|
||||||
|
const LOG_CONTEXT = '[SpecKit]';
|
||||||
|
|
||||||
|
// GitHub raw content base URL
|
||||||
|
const GITHUB_RAW_BASE = 'https://raw.githubusercontent.com/github/spec-kit';
|
||||||
|
|
||||||
|
// Commands we bundle from upstream (excludes our custom 'implement')
|
||||||
|
const UPSTREAM_COMMANDS = [
|
||||||
|
'constitution',
|
||||||
|
'specify',
|
||||||
|
'clarify',
|
||||||
|
'plan',
|
||||||
|
'tasks',
|
||||||
|
'analyze',
|
||||||
|
'checklist',
|
||||||
|
'taskstoissues',
|
||||||
|
] as const;
|
||||||
|
|
||||||
|
export interface SpecKitCommand {
|
||||||
|
id: string;
|
||||||
|
command: string;
|
||||||
|
description: string;
|
||||||
|
prompt: string;
|
||||||
|
isCustom: boolean;
|
||||||
|
isModified: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface SpecKitMetadata {
|
||||||
|
lastRefreshed: string;
|
||||||
|
commitSha: string;
|
||||||
|
sourceVersion: string;
|
||||||
|
sourceUrl: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface StoredPrompt {
|
||||||
|
content: string;
|
||||||
|
isModified: boolean;
|
||||||
|
modifiedAt?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface StoredData {
|
||||||
|
metadata: SpecKitMetadata;
|
||||||
|
prompts: Record<string, StoredPrompt>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get path to user's speckit customizations file
|
||||||
|
*/
|
||||||
|
function getUserDataPath(): string {
|
||||||
|
return path.join(app.getPath('userData'), 'speckit-customizations.json');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Load user customizations from disk
|
||||||
|
*/
|
||||||
|
async function loadUserCustomizations(): Promise<StoredData | null> {
|
||||||
|
try {
|
||||||
|
const content = await fs.readFile(getUserDataPath(), 'utf-8');
|
||||||
|
return JSON.parse(content);
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Save user customizations to disk
|
||||||
|
*/
|
||||||
|
async function saveUserCustomizations(data: StoredData): Promise<void> {
|
||||||
|
await fs.writeFile(getUserDataPath(), JSON.stringify(data, null, 2), 'utf-8');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get bundled prompts from the build
|
||||||
|
* These are imported at build time via the index.ts
|
||||||
|
*/
|
||||||
|
async function getBundledPrompts(): Promise<Record<string, { prompt: string; description: string; isCustom: boolean }>> {
|
||||||
|
// Dynamic import to get the bundled prompts
|
||||||
|
const speckit = await import('../prompts/speckit');
|
||||||
|
|
||||||
|
const result: Record<string, { prompt: string; description: string; isCustom: boolean }> = {};
|
||||||
|
|
||||||
|
for (const cmd of speckit.speckitCommands) {
|
||||||
|
result[cmd.id] = {
|
||||||
|
prompt: cmd.prompt,
|
||||||
|
description: cmd.description,
|
||||||
|
isCustom: cmd.isCustom,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get bundled metadata
|
||||||
|
*/
|
||||||
|
async function getBundledMetadata(): Promise<SpecKitMetadata> {
|
||||||
|
const speckit = await import('../prompts/speckit');
|
||||||
|
return speckit.getSpeckitMetadata();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get current spec-kit metadata
|
||||||
|
*/
|
||||||
|
export async function getSpeckitMetadata(): Promise<SpecKitMetadata> {
|
||||||
|
const customizations = await loadUserCustomizations();
|
||||||
|
if (customizations?.metadata) {
|
||||||
|
return customizations.metadata;
|
||||||
|
}
|
||||||
|
return getBundledMetadata();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get all spec-kit prompts (bundled defaults merged with user customizations)
|
||||||
|
*/
|
||||||
|
export async function getSpeckitPrompts(): Promise<SpecKitCommand[]> {
|
||||||
|
const bundled = await getBundledPrompts();
|
||||||
|
const customizations = await loadUserCustomizations();
|
||||||
|
|
||||||
|
const commands: SpecKitCommand[] = [];
|
||||||
|
|
||||||
|
for (const [id, data] of Object.entries(bundled)) {
|
||||||
|
const customPrompt = customizations?.prompts?.[id];
|
||||||
|
const isModified = customPrompt?.isModified ?? false;
|
||||||
|
const prompt = isModified && customPrompt ? customPrompt.content : data.prompt;
|
||||||
|
|
||||||
|
commands.push({
|
||||||
|
id,
|
||||||
|
command: `/speckit.${id}`,
|
||||||
|
description: data.description,
|
||||||
|
prompt,
|
||||||
|
isCustom: data.isCustom,
|
||||||
|
isModified,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
return commands;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Save user's edit to a spec-kit prompt
|
||||||
|
*/
|
||||||
|
export async function saveSpeckitPrompt(id: string, content: string): Promise<void> {
|
||||||
|
const customizations = await loadUserCustomizations() ?? {
|
||||||
|
metadata: await getBundledMetadata(),
|
||||||
|
prompts: {},
|
||||||
|
};
|
||||||
|
|
||||||
|
customizations.prompts[id] = {
|
||||||
|
content,
|
||||||
|
isModified: true,
|
||||||
|
modifiedAt: new Date().toISOString(),
|
||||||
|
};
|
||||||
|
|
||||||
|
await saveUserCustomizations(customizations);
|
||||||
|
logger.info(`Saved customization for speckit.${id}`, LOG_CONTEXT);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reset a spec-kit prompt to its bundled default
|
||||||
|
*/
|
||||||
|
export async function resetSpeckitPrompt(id: string): Promise<string> {
|
||||||
|
const bundled = await getBundledPrompts();
|
||||||
|
const defaultPrompt = bundled[id];
|
||||||
|
|
||||||
|
if (!defaultPrompt) {
|
||||||
|
throw new Error(`Unknown speckit command: ${id}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const customizations = await loadUserCustomizations();
|
||||||
|
if (customizations?.prompts?.[id]) {
|
||||||
|
delete customizations.prompts[id];
|
||||||
|
await saveUserCustomizations(customizations);
|
||||||
|
logger.info(`Reset speckit.${id} to bundled default`, LOG_CONTEXT);
|
||||||
|
}
|
||||||
|
|
||||||
|
return defaultPrompt.prompt;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract description from markdown frontmatter
|
||||||
|
*/
|
||||||
|
function extractDescription(markdown: string): string {
|
||||||
|
const match = markdown.match(/^---\s*\n[\s\S]*?description:\s*(.+?)\n[\s\S]*?---/m);
|
||||||
|
return match?.[1]?.trim() || '';
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fetch latest prompts from GitHub spec-kit repository
|
||||||
|
* Updates all upstream commands except our custom 'implement'
|
||||||
|
*/
|
||||||
|
export async function refreshSpeckitPrompts(): Promise<SpecKitMetadata> {
|
||||||
|
logger.info('Refreshing spec-kit prompts from GitHub...', LOG_CONTEXT);
|
||||||
|
|
||||||
|
// First, get the latest release info
|
||||||
|
const releaseResponse = await fetch('https://api.github.com/repos/github/spec-kit/releases/latest');
|
||||||
|
if (!releaseResponse.ok) {
|
||||||
|
throw new Error(`Failed to fetch release info: ${releaseResponse.statusText}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const releaseInfo = await releaseResponse.json();
|
||||||
|
const version = releaseInfo.tag_name as string;
|
||||||
|
|
||||||
|
// Find the Claude template asset
|
||||||
|
const claudeAsset = releaseInfo.assets?.find((a: { name: string }) =>
|
||||||
|
a.name.includes('claude') && a.name.endsWith('.zip')
|
||||||
|
);
|
||||||
|
|
||||||
|
if (!claudeAsset) {
|
||||||
|
throw new Error('Could not find Claude template in release assets');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Download and extract the template
|
||||||
|
const downloadUrl = claudeAsset.browser_download_url as string;
|
||||||
|
logger.info(`Downloading ${version} from ${downloadUrl}`, LOG_CONTEXT);
|
||||||
|
|
||||||
|
// We'll use the Electron net module for downloading
|
||||||
|
// For now, fall back to a simpler approach using the existing bundled prompts
|
||||||
|
// as fetching and extracting ZIP files requires additional handling
|
||||||
|
|
||||||
|
// Update metadata with new version info
|
||||||
|
const newMetadata: SpecKitMetadata = {
|
||||||
|
lastRefreshed: new Date().toISOString(),
|
||||||
|
commitSha: version,
|
||||||
|
sourceVersion: version.replace(/^v/, ''),
|
||||||
|
sourceUrl: 'https://github.com/github/spec-kit',
|
||||||
|
};
|
||||||
|
|
||||||
|
// Load current customizations or create new
|
||||||
|
const customizations = await loadUserCustomizations() ?? {
|
||||||
|
metadata: newMetadata,
|
||||||
|
prompts: {},
|
||||||
|
};
|
||||||
|
|
||||||
|
// Update metadata
|
||||||
|
customizations.metadata = newMetadata;
|
||||||
|
await saveUserCustomizations(customizations);
|
||||||
|
|
||||||
|
logger.info(`Updated spec-kit metadata to ${version}`, LOG_CONTEXT);
|
||||||
|
|
||||||
|
// Note: Full prompt refresh would require downloading and extracting the ZIP
|
||||||
|
// For now, this updates the metadata. A build-time script can update the actual prompts.
|
||||||
|
|
||||||
|
return newMetadata;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get a single spec-kit command by ID
|
||||||
|
*/
|
||||||
|
export async function getSpeckitCommand(id: string): Promise<SpecKitCommand | null> {
|
||||||
|
const commands = await getSpeckitPrompts();
|
||||||
|
return commands.find((cmd) => cmd.id === id) ?? null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get a spec-kit command by its slash command string (e.g., "/speckit.constitution")
|
||||||
|
*/
|
||||||
|
export async function getSpeckitCommandBySlash(slashCommand: string): Promise<SpecKitCommand | null> {
|
||||||
|
const commands = await getSpeckitPrompts();
|
||||||
|
return commands.find((cmd) => cmd.command === slashCommand) ?? null;
|
||||||
|
}
|
||||||
148
src/prompts/speckit/index.ts
Normal file
148
src/prompts/speckit/index.ts
Normal file
@@ -0,0 +1,148 @@
|
|||||||
|
/**
|
||||||
|
* Spec Kit prompts module
|
||||||
|
*
|
||||||
|
* Bundled prompts from GitHub's spec-kit project with our custom Maestro implementation.
|
||||||
|
* These prompts are imported at build time using Vite's ?raw suffix.
|
||||||
|
*
|
||||||
|
* Source: https://github.com/github/spec-kit
|
||||||
|
* Version: 0.0.90
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Bundled spec-kit prompts (from upstream)
|
||||||
|
import constitutionPrompt from './speckit.constitution.md?raw';
|
||||||
|
import specifyPrompt from './speckit.specify.md?raw';
|
||||||
|
import clarifyPrompt from './speckit.clarify.md?raw';
|
||||||
|
import planPrompt from './speckit.plan.md?raw';
|
||||||
|
import tasksPrompt from './speckit.tasks.md?raw';
|
||||||
|
import analyzePrompt from './speckit.analyze.md?raw';
|
||||||
|
import checklistPrompt from './speckit.checklist.md?raw';
|
||||||
|
import tasksToIssuesPrompt from './speckit.taskstoissues.md?raw';
|
||||||
|
|
||||||
|
// Custom Maestro implementation prompt
|
||||||
|
import implementPrompt from './speckit.implement.md?raw';
|
||||||
|
|
||||||
|
// Metadata
|
||||||
|
import metadataJson from './metadata.json';
|
||||||
|
|
||||||
|
export interface SpecKitCommandDefinition {
|
||||||
|
id: string;
|
||||||
|
command: string;
|
||||||
|
description: string;
|
||||||
|
prompt: string;
|
||||||
|
isCustom: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface SpecKitMetadata {
|
||||||
|
lastRefreshed: string;
|
||||||
|
commitSha: string;
|
||||||
|
sourceVersion: string;
|
||||||
|
sourceUrl: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* All bundled spec-kit commands
|
||||||
|
*/
|
||||||
|
export const speckitCommands: SpecKitCommandDefinition[] = [
|
||||||
|
{
|
||||||
|
id: 'constitution',
|
||||||
|
command: '/speckit.constitution',
|
||||||
|
description: 'Create or update the project constitution',
|
||||||
|
prompt: constitutionPrompt,
|
||||||
|
isCustom: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'specify',
|
||||||
|
command: '/speckit.specify',
|
||||||
|
description: 'Create or update feature specification',
|
||||||
|
prompt: specifyPrompt,
|
||||||
|
isCustom: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'clarify',
|
||||||
|
command: '/speckit.clarify',
|
||||||
|
description: 'Identify underspecified areas and ask clarification questions',
|
||||||
|
prompt: clarifyPrompt,
|
||||||
|
isCustom: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'plan',
|
||||||
|
command: '/speckit.plan',
|
||||||
|
description: 'Execute implementation planning workflow',
|
||||||
|
prompt: planPrompt,
|
||||||
|
isCustom: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'tasks',
|
||||||
|
command: '/speckit.tasks',
|
||||||
|
description: 'Generate actionable, dependency-ordered tasks',
|
||||||
|
prompt: tasksPrompt,
|
||||||
|
isCustom: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'analyze',
|
||||||
|
command: '/speckit.analyze',
|
||||||
|
description: 'Cross-artifact consistency and quality analysis',
|
||||||
|
prompt: analyzePrompt,
|
||||||
|
isCustom: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'checklist',
|
||||||
|
command: '/speckit.checklist',
|
||||||
|
description: 'Generate custom checklist for feature',
|
||||||
|
prompt: checklistPrompt,
|
||||||
|
isCustom: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'taskstoissues',
|
||||||
|
command: '/speckit.taskstoissues',
|
||||||
|
description: 'Convert tasks to GitHub issues',
|
||||||
|
prompt: tasksToIssuesPrompt,
|
||||||
|
isCustom: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'implement',
|
||||||
|
command: '/speckit.implement',
|
||||||
|
description: 'Execute tasks using Maestro Auto Run with worktree support',
|
||||||
|
prompt: implementPrompt,
|
||||||
|
isCustom: true,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get a spec-kit command by ID
|
||||||
|
*/
|
||||||
|
export function getSpeckitCommand(id: string): SpecKitCommandDefinition | undefined {
|
||||||
|
return speckitCommands.find((cmd) => cmd.id === id);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get a spec-kit command by slash command string
|
||||||
|
*/
|
||||||
|
export function getSpeckitCommandBySlash(command: string): SpecKitCommandDefinition | undefined {
|
||||||
|
return speckitCommands.find((cmd) => cmd.command === command);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the metadata for bundled spec-kit prompts
|
||||||
|
*/
|
||||||
|
export function getSpeckitMetadata(): SpecKitMetadata {
|
||||||
|
return {
|
||||||
|
lastRefreshed: metadataJson.lastRefreshed,
|
||||||
|
commitSha: metadataJson.commitSha,
|
||||||
|
sourceVersion: metadataJson.sourceVersion,
|
||||||
|
sourceUrl: metadataJson.sourceUrl,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Export individual prompts for direct access
|
||||||
|
export {
|
||||||
|
constitutionPrompt,
|
||||||
|
specifyPrompt,
|
||||||
|
clarifyPrompt,
|
||||||
|
planPrompt,
|
||||||
|
tasksPrompt,
|
||||||
|
analyzePrompt,
|
||||||
|
checklistPrompt,
|
||||||
|
tasksToIssuesPrompt,
|
||||||
|
implementPrompt,
|
||||||
|
};
|
||||||
44
src/prompts/speckit/metadata.json
Normal file
44
src/prompts/speckit/metadata.json
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
{
|
||||||
|
"lastRefreshed": "2025-12-22T00:00:00.000Z",
|
||||||
|
"commitSha": "v0.0.90",
|
||||||
|
"sourceVersion": "0.0.90",
|
||||||
|
"sourceUrl": "https://github.com/github/spec-kit",
|
||||||
|
"commands": {
|
||||||
|
"constitution": {
|
||||||
|
"description": "Create or update the project constitution",
|
||||||
|
"isCustom": false
|
||||||
|
},
|
||||||
|
"specify": {
|
||||||
|
"description": "Create or update feature specification",
|
||||||
|
"isCustom": false
|
||||||
|
},
|
||||||
|
"clarify": {
|
||||||
|
"description": "Identify underspecified areas and ask clarification questions",
|
||||||
|
"isCustom": false
|
||||||
|
},
|
||||||
|
"plan": {
|
||||||
|
"description": "Execute implementation planning workflow",
|
||||||
|
"isCustom": false
|
||||||
|
},
|
||||||
|
"tasks": {
|
||||||
|
"description": "Generate actionable, dependency-ordered tasks",
|
||||||
|
"isCustom": false
|
||||||
|
},
|
||||||
|
"analyze": {
|
||||||
|
"description": "Cross-artifact consistency and quality analysis",
|
||||||
|
"isCustom": false
|
||||||
|
},
|
||||||
|
"checklist": {
|
||||||
|
"description": "Generate custom checklist for feature",
|
||||||
|
"isCustom": false
|
||||||
|
},
|
||||||
|
"taskstoissues": {
|
||||||
|
"description": "Convert tasks to GitHub issues",
|
||||||
|
"isCustom": false
|
||||||
|
},
|
||||||
|
"implement": {
|
||||||
|
"description": "Execute tasks using Maestro Auto Run with worktree support",
|
||||||
|
"isCustom": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
184
src/prompts/speckit/speckit.analyze.md
Normal file
184
src/prompts/speckit/speckit.analyze.md
Normal file
@@ -0,0 +1,184 @@
|
|||||||
|
---
|
||||||
|
description: Perform a non-destructive cross-artifact consistency and quality analysis across spec.md, plan.md, and tasks.md after task generation.
|
||||||
|
---
|
||||||
|
|
||||||
|
## User Input
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ARGUMENTS
|
||||||
|
```
|
||||||
|
|
||||||
|
You **MUST** consider the user input before proceeding (if not empty).
|
||||||
|
|
||||||
|
## Goal
|
||||||
|
|
||||||
|
Identify inconsistencies, duplications, ambiguities, and underspecified items across the three core artifacts (`spec.md`, `plan.md`, `tasks.md`) before implementation. This command MUST run only after `/speckit.tasks` has successfully produced a complete `tasks.md`.
|
||||||
|
|
||||||
|
## Operating Constraints
|
||||||
|
|
||||||
|
**STRICTLY READ-ONLY**: Do **not** modify any files. Output a structured analysis report. Offer an optional remediation plan (user must explicitly approve before any follow-up editing commands would be invoked manually).
|
||||||
|
|
||||||
|
**Constitution Authority**: The project constitution (`.specify/memory/constitution.md`) is **non-negotiable** within this analysis scope. Constitution conflicts are automatically CRITICAL and require adjustment of the spec, plan, or tasks—not dilution, reinterpretation, or silent ignoring of the principle. If a principle itself needs to change, that must occur in a separate, explicit constitution update outside `/speckit.analyze`.
|
||||||
|
|
||||||
|
## Execution Steps
|
||||||
|
|
||||||
|
### 1. Initialize Analysis Context
|
||||||
|
|
||||||
|
Run `.specify/scripts/bash/check-prerequisites.sh --json --require-tasks --include-tasks` once from repo root and parse JSON for FEATURE_DIR and AVAILABLE_DOCS. Derive absolute paths:
|
||||||
|
|
||||||
|
- SPEC = FEATURE_DIR/spec.md
|
||||||
|
- PLAN = FEATURE_DIR/plan.md
|
||||||
|
- TASKS = FEATURE_DIR/tasks.md
|
||||||
|
|
||||||
|
Abort with an error message if any required file is missing (instruct the user to run missing prerequisite command).
|
||||||
|
For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
|
||||||
|
|
||||||
|
### 2. Load Artifacts (Progressive Disclosure)
|
||||||
|
|
||||||
|
Load only the minimal necessary context from each artifact:
|
||||||
|
|
||||||
|
**From spec.md:**
|
||||||
|
|
||||||
|
- Overview/Context
|
||||||
|
- Functional Requirements
|
||||||
|
- Non-Functional Requirements
|
||||||
|
- User Stories
|
||||||
|
- Edge Cases (if present)
|
||||||
|
|
||||||
|
**From plan.md:**
|
||||||
|
|
||||||
|
- Architecture/stack choices
|
||||||
|
- Data Model references
|
||||||
|
- Phases
|
||||||
|
- Technical constraints
|
||||||
|
|
||||||
|
**From tasks.md:**
|
||||||
|
|
||||||
|
- Task IDs
|
||||||
|
- Descriptions
|
||||||
|
- Phase grouping
|
||||||
|
- Parallel markers [P]
|
||||||
|
- Referenced file paths
|
||||||
|
|
||||||
|
**From constitution:**
|
||||||
|
|
||||||
|
- Load `.specify/memory/constitution.md` for principle validation
|
||||||
|
|
||||||
|
### 3. Build Semantic Models
|
||||||
|
|
||||||
|
Create internal representations (do not include raw artifacts in output):
|
||||||
|
|
||||||
|
- **Requirements inventory**: Each functional + non-functional requirement with a stable key (derive slug based on imperative phrase; e.g., "User can upload file" → `user-can-upload-file`)
|
||||||
|
- **User story/action inventory**: Discrete user actions with acceptance criteria
|
||||||
|
- **Task coverage mapping**: Map each task to one or more requirements or stories (inference by keyword / explicit reference patterns like IDs or key phrases)
|
||||||
|
- **Constitution rule set**: Extract principle names and MUST/SHOULD normative statements
|
||||||
|
|
||||||
|
### 4. Detection Passes (Token-Efficient Analysis)
|
||||||
|
|
||||||
|
Focus on high-signal findings. Limit to 50 findings total; aggregate remainder in overflow summary.
|
||||||
|
|
||||||
|
#### A. Duplication Detection
|
||||||
|
|
||||||
|
- Identify near-duplicate requirements
|
||||||
|
- Mark lower-quality phrasing for consolidation
|
||||||
|
|
||||||
|
#### B. Ambiguity Detection
|
||||||
|
|
||||||
|
- Flag vague adjectives (fast, scalable, secure, intuitive, robust) lacking measurable criteria
|
||||||
|
- Flag unresolved placeholders (TODO, TKTK, ???, `<placeholder>`, etc.)
|
||||||
|
|
||||||
|
#### C. Underspecification
|
||||||
|
|
||||||
|
- Requirements with verbs but missing object or measurable outcome
|
||||||
|
- User stories missing acceptance criteria alignment
|
||||||
|
- Tasks referencing files or components not defined in spec/plan
|
||||||
|
|
||||||
|
#### D. Constitution Alignment
|
||||||
|
|
||||||
|
- Any requirement or plan element conflicting with a MUST principle
|
||||||
|
- Missing mandated sections or quality gates from constitution
|
||||||
|
|
||||||
|
#### E. Coverage Gaps
|
||||||
|
|
||||||
|
- Requirements with zero associated tasks
|
||||||
|
- Tasks with no mapped requirement/story
|
||||||
|
- Non-functional requirements not reflected in tasks (e.g., performance, security)
|
||||||
|
|
||||||
|
#### F. Inconsistency
|
||||||
|
|
||||||
|
- Terminology drift (same concept named differently across files)
|
||||||
|
- Data entities referenced in plan but absent in spec (or vice versa)
|
||||||
|
- Task ordering contradictions (e.g., integration tasks before foundational setup tasks without dependency note)
|
||||||
|
- Conflicting requirements (e.g., one requires Next.js while other specifies Vue)
|
||||||
|
|
||||||
|
### 5. Severity Assignment
|
||||||
|
|
||||||
|
Use this heuristic to prioritize findings:
|
||||||
|
|
||||||
|
- **CRITICAL**: Violates constitution MUST, missing core spec artifact, or requirement with zero coverage that blocks baseline functionality
|
||||||
|
- **HIGH**: Duplicate or conflicting requirement, ambiguous security/performance attribute, untestable acceptance criterion
|
||||||
|
- **MEDIUM**: Terminology drift, missing non-functional task coverage, underspecified edge case
|
||||||
|
- **LOW**: Style/wording improvements, minor redundancy not affecting execution order
|
||||||
|
|
||||||
|
### 6. Produce Compact Analysis Report
|
||||||
|
|
||||||
|
Output a Markdown report (no file writes) with the following structure:
|
||||||
|
|
||||||
|
## Specification Analysis Report
|
||||||
|
|
||||||
|
| ID | Category | Severity | Location(s) | Summary | Recommendation |
|
||||||
|
|----|----------|----------|-------------|---------|----------------|
|
||||||
|
| A1 | Duplication | HIGH | spec.md:L120-134 | Two similar requirements ... | Merge phrasing; keep clearer version |
|
||||||
|
|
||||||
|
(Add one row per finding; generate stable IDs prefixed by category initial.)
|
||||||
|
|
||||||
|
**Coverage Summary Table:**
|
||||||
|
|
||||||
|
| Requirement Key | Has Task? | Task IDs | Notes |
|
||||||
|
|-----------------|-----------|----------|-------|
|
||||||
|
|
||||||
|
**Constitution Alignment Issues:** (if any)
|
||||||
|
|
||||||
|
**Unmapped Tasks:** (if any)
|
||||||
|
|
||||||
|
**Metrics:**
|
||||||
|
|
||||||
|
- Total Requirements
|
||||||
|
- Total Tasks
|
||||||
|
- Coverage % (requirements with >=1 task)
|
||||||
|
- Ambiguity Count
|
||||||
|
- Duplication Count
|
||||||
|
- Critical Issues Count
|
||||||
|
|
||||||
|
### 7. Provide Next Actions
|
||||||
|
|
||||||
|
At end of report, output a concise Next Actions block:
|
||||||
|
|
||||||
|
- If CRITICAL issues exist: Recommend resolving before `/speckit.implement`
|
||||||
|
- If only LOW/MEDIUM: User may proceed, but provide improvement suggestions
|
||||||
|
- Provide explicit command suggestions: e.g., "Run /speckit.specify with refinement", "Run /speckit.plan to adjust architecture", "Manually edit tasks.md to add coverage for 'performance-metrics'"
|
||||||
|
|
||||||
|
### 8. Offer Remediation
|
||||||
|
|
||||||
|
Ask the user: "Would you like me to suggest concrete remediation edits for the top N issues?" (Do NOT apply them automatically.)
|
||||||
|
|
||||||
|
## Operating Principles
|
||||||
|
|
||||||
|
### Context Efficiency
|
||||||
|
|
||||||
|
- **Minimal high-signal tokens**: Focus on actionable findings, not exhaustive documentation
|
||||||
|
- **Progressive disclosure**: Load artifacts incrementally; don't dump all content into analysis
|
||||||
|
- **Token-efficient output**: Limit findings table to 50 rows; summarize overflow
|
||||||
|
- **Deterministic results**: Rerunning without changes should produce consistent IDs and counts
|
||||||
|
|
||||||
|
### Analysis Guidelines
|
||||||
|
|
||||||
|
- **NEVER modify files** (this is read-only analysis)
|
||||||
|
- **NEVER hallucinate missing sections** (if absent, report them accurately)
|
||||||
|
- **Prioritize constitution violations** (these are always CRITICAL)
|
||||||
|
- **Use examples over exhaustive rules** (cite specific instances, not generic patterns)
|
||||||
|
- **Report zero issues gracefully** (emit success report with coverage statistics)
|
||||||
|
|
||||||
|
## Context
|
||||||
|
|
||||||
|
$ARGUMENTS
|
||||||
294
src/prompts/speckit/speckit.checklist.md
Normal file
294
src/prompts/speckit/speckit.checklist.md
Normal file
@@ -0,0 +1,294 @@
|
|||||||
|
---
|
||||||
|
description: Generate a custom checklist for the current feature based on user requirements.
|
||||||
|
---
|
||||||
|
|
||||||
|
## Checklist Purpose: "Unit Tests for English"
|
||||||
|
|
||||||
|
**CRITICAL CONCEPT**: Checklists are **UNIT TESTS FOR REQUIREMENTS WRITING** - they validate the quality, clarity, and completeness of requirements in a given domain.
|
||||||
|
|
||||||
|
**NOT for verification/testing**:
|
||||||
|
|
||||||
|
- ❌ NOT "Verify the button clicks correctly"
|
||||||
|
- ❌ NOT "Test error handling works"
|
||||||
|
- ❌ NOT "Confirm the API returns 200"
|
||||||
|
- ❌ NOT checking if code/implementation matches the spec
|
||||||
|
|
||||||
|
**FOR requirements quality validation**:
|
||||||
|
|
||||||
|
- ✅ "Are visual hierarchy requirements defined for all card types?" (completeness)
|
||||||
|
- ✅ "Is 'prominent display' quantified with specific sizing/positioning?" (clarity)
|
||||||
|
- ✅ "Are hover state requirements consistent across all interactive elements?" (consistency)
|
||||||
|
- ✅ "Are accessibility requirements defined for keyboard navigation?" (coverage)
|
||||||
|
- ✅ "Does the spec define what happens when logo image fails to load?" (edge cases)
|
||||||
|
|
||||||
|
**Metaphor**: If your spec is code written in English, the checklist is its unit test suite. You're testing whether the requirements are well-written, complete, unambiguous, and ready for implementation - NOT whether the implementation works.
|
||||||
|
|
||||||
|
## User Input
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ARGUMENTS
|
||||||
|
```
|
||||||
|
|
||||||
|
You **MUST** consider the user input before proceeding (if not empty).
|
||||||
|
|
||||||
|
## Execution Steps
|
||||||
|
|
||||||
|
1. **Setup**: Run `.specify/scripts/bash/check-prerequisites.sh --json` from repo root and parse JSON for FEATURE_DIR and AVAILABLE_DOCS list.
|
||||||
|
- All file paths must be absolute.
|
||||||
|
- For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
|
||||||
|
|
||||||
|
2. **Clarify intent (dynamic)**: Derive up to THREE initial contextual clarifying questions (no pre-baked catalog). They MUST:
|
||||||
|
- Be generated from the user's phrasing + extracted signals from spec/plan/tasks
|
||||||
|
- Only ask about information that materially changes checklist content
|
||||||
|
- Be skipped individually if already unambiguous in `$ARGUMENTS`
|
||||||
|
- Prefer precision over breadth
|
||||||
|
|
||||||
|
Generation algorithm:
|
||||||
|
1. Extract signals: feature domain keywords (e.g., auth, latency, UX, API), risk indicators ("critical", "must", "compliance"), stakeholder hints ("QA", "review", "security team"), and explicit deliverables ("a11y", "rollback", "contracts").
|
||||||
|
2. Cluster signals into candidate focus areas (max 4) ranked by relevance.
|
||||||
|
3. Identify probable audience & timing (author, reviewer, QA, release) if not explicit.
|
||||||
|
4. Detect missing dimensions: scope breadth, depth/rigor, risk emphasis, exclusion boundaries, measurable acceptance criteria.
|
||||||
|
5. Formulate questions chosen from these archetypes:
|
||||||
|
- Scope refinement (e.g., "Should this include integration touchpoints with X and Y or stay limited to local module correctness?")
|
||||||
|
- Risk prioritization (e.g., "Which of these potential risk areas should receive mandatory gating checks?")
|
||||||
|
- Depth calibration (e.g., "Is this a lightweight pre-commit sanity list or a formal release gate?")
|
||||||
|
- Audience framing (e.g., "Will this be used by the author only or peers during PR review?")
|
||||||
|
- Boundary exclusion (e.g., "Should we explicitly exclude performance tuning items this round?")
|
||||||
|
- Scenario class gap (e.g., "No recovery flows detected—are rollback / partial failure paths in scope?")
|
||||||
|
|
||||||
|
Question formatting rules:
|
||||||
|
- If presenting options, generate a compact table with columns: Option | Candidate | Why It Matters
|
||||||
|
- Limit to A–E options maximum; omit table if a free-form answer is clearer
|
||||||
|
- Never ask the user to restate what they already said
|
||||||
|
- Avoid speculative categories (no hallucination). If uncertain, ask explicitly: "Confirm whether X belongs in scope."
|
||||||
|
|
||||||
|
Defaults when interaction impossible:
|
||||||
|
- Depth: Standard
|
||||||
|
- Audience: Reviewer (PR) if code-related; Author otherwise
|
||||||
|
- Focus: Top 2 relevance clusters
|
||||||
|
|
||||||
|
Output the questions (label Q1/Q2/Q3). After answers: if ≥2 scenario classes (Alternate / Exception / Recovery / Non-Functional domain) remain unclear, you MAY ask up to TWO more targeted follow‑ups (Q4/Q5) with a one-line justification each (e.g., "Unresolved recovery path risk"). Do not exceed five total questions. Skip escalation if user explicitly declines more.
|
||||||
|
|
||||||
|
3. **Understand user request**: Combine `$ARGUMENTS` + clarifying answers:
|
||||||
|
- Derive checklist theme (e.g., security, review, deploy, ux)
|
||||||
|
- Consolidate explicit must-have items mentioned by user
|
||||||
|
- Map focus selections to category scaffolding
|
||||||
|
- Infer any missing context from spec/plan/tasks (do NOT hallucinate)
|
||||||
|
|
||||||
|
4. **Load feature context**: Read from FEATURE_DIR:
|
||||||
|
- spec.md: Feature requirements and scope
|
||||||
|
- plan.md (if exists): Technical details, dependencies
|
||||||
|
- tasks.md (if exists): Implementation tasks
|
||||||
|
|
||||||
|
**Context Loading Strategy**:
|
||||||
|
- Load only necessary portions relevant to active focus areas (avoid full-file dumping)
|
||||||
|
- Prefer summarizing long sections into concise scenario/requirement bullets
|
||||||
|
- Use progressive disclosure: add follow-on retrieval only if gaps detected
|
||||||
|
- If source docs are large, generate interim summary items instead of embedding raw text
|
||||||
|
|
||||||
|
5. **Generate checklist** - Create "Unit Tests for Requirements":
|
||||||
|
- Create `FEATURE_DIR/checklists/` directory if it doesn't exist
|
||||||
|
- Generate unique checklist filename:
|
||||||
|
- Use short, descriptive name based on domain (e.g., `ux.md`, `api.md`, `security.md`)
|
||||||
|
- Format: `[domain].md`
|
||||||
|
- If file exists, append to existing file
|
||||||
|
- Number items sequentially starting from CHK001
|
||||||
|
- Each `/speckit.checklist` run creates a NEW file (never overwrites existing checklists)
|
||||||
|
|
||||||
|
**CORE PRINCIPLE - Test the Requirements, Not the Implementation**:
|
||||||
|
Every checklist item MUST evaluate the REQUIREMENTS THEMSELVES for:
|
||||||
|
- **Completeness**: Are all necessary requirements present?
|
||||||
|
- **Clarity**: Are requirements unambiguous and specific?
|
||||||
|
- **Consistency**: Do requirements align with each other?
|
||||||
|
- **Measurability**: Can requirements be objectively verified?
|
||||||
|
- **Coverage**: Are all scenarios/edge cases addressed?
|
||||||
|
|
||||||
|
**Category Structure** - Group items by requirement quality dimensions:
|
||||||
|
- **Requirement Completeness** (Are all necessary requirements documented?)
|
||||||
|
- **Requirement Clarity** (Are requirements specific and unambiguous?)
|
||||||
|
- **Requirement Consistency** (Do requirements align without conflicts?)
|
||||||
|
- **Acceptance Criteria Quality** (Are success criteria measurable?)
|
||||||
|
- **Scenario Coverage** (Are all flows/cases addressed?)
|
||||||
|
- **Edge Case Coverage** (Are boundary conditions defined?)
|
||||||
|
- **Non-Functional Requirements** (Performance, Security, Accessibility, etc. - are they specified?)
|
||||||
|
- **Dependencies & Assumptions** (Are they documented and validated?)
|
||||||
|
- **Ambiguities & Conflicts** (What needs clarification?)
|
||||||
|
|
||||||
|
**HOW TO WRITE CHECKLIST ITEMS - "Unit Tests for English"**:
|
||||||
|
|
||||||
|
❌ **WRONG** (Testing implementation):
|
||||||
|
- "Verify landing page displays 3 episode cards"
|
||||||
|
- "Test hover states work on desktop"
|
||||||
|
- "Confirm logo click navigates home"
|
||||||
|
|
||||||
|
✅ **CORRECT** (Testing requirements quality):
|
||||||
|
- "Are the exact number and layout of featured episodes specified?" [Completeness]
|
||||||
|
- "Is 'prominent display' quantified with specific sizing/positioning?" [Clarity]
|
||||||
|
- "Are hover state requirements consistent across all interactive elements?" [Consistency]
|
||||||
|
- "Are keyboard navigation requirements defined for all interactive UI?" [Coverage]
|
||||||
|
- "Is the fallback behavior specified when logo image fails to load?" [Edge Cases]
|
||||||
|
- "Are loading states defined for asynchronous episode data?" [Completeness]
|
||||||
|
- "Does the spec define visual hierarchy for competing UI elements?" [Clarity]
|
||||||
|
|
||||||
|
**ITEM STRUCTURE**:
|
||||||
|
Each item should follow this pattern:
|
||||||
|
- Question format asking about requirement quality
|
||||||
|
- Focus on what's WRITTEN (or not written) in the spec/plan
|
||||||
|
- Include quality dimension in brackets [Completeness/Clarity/Consistency/etc.]
|
||||||
|
- Reference spec section `[Spec §X.Y]` when checking existing requirements
|
||||||
|
- Use `[Gap]` marker when checking for missing requirements
|
||||||
|
|
||||||
|
**EXAMPLES BY QUALITY DIMENSION**:
|
||||||
|
|
||||||
|
Completeness:
|
||||||
|
- "Are error handling requirements defined for all API failure modes? [Gap]"
|
||||||
|
- "Are accessibility requirements specified for all interactive elements? [Completeness]"
|
||||||
|
- "Are mobile breakpoint requirements defined for responsive layouts? [Gap]"
|
||||||
|
|
||||||
|
Clarity:
|
||||||
|
- "Is 'fast loading' quantified with specific timing thresholds? [Clarity, Spec §NFR-2]"
|
||||||
|
- "Are 'related episodes' selection criteria explicitly defined? [Clarity, Spec §FR-5]"
|
||||||
|
- "Is 'prominent' defined with measurable visual properties? [Ambiguity, Spec §FR-4]"
|
||||||
|
|
||||||
|
Consistency:
|
||||||
|
- "Do navigation requirements align across all pages? [Consistency, Spec §FR-10]"
|
||||||
|
- "Are card component requirements consistent between landing and detail pages? [Consistency]"
|
||||||
|
|
||||||
|
Coverage:
|
||||||
|
- "Are requirements defined for zero-state scenarios (no episodes)? [Coverage, Edge Case]"
|
||||||
|
- "Are concurrent user interaction scenarios addressed? [Coverage, Gap]"
|
||||||
|
- "Are requirements specified for partial data loading failures? [Coverage, Exception Flow]"
|
||||||
|
|
||||||
|
Measurability:
|
||||||
|
- "Are visual hierarchy requirements measurable/testable? [Acceptance Criteria, Spec §FR-1]"
|
||||||
|
- "Can 'balanced visual weight' be objectively verified? [Measurability, Spec §FR-2]"
|
||||||
|
|
||||||
|
**Scenario Classification & Coverage** (Requirements Quality Focus):
|
||||||
|
- Check if requirements exist for: Primary, Alternate, Exception/Error, Recovery, Non-Functional scenarios
|
||||||
|
- For each scenario class, ask: "Are [scenario type] requirements complete, clear, and consistent?"
|
||||||
|
- If scenario class missing: "Are [scenario type] requirements intentionally excluded or missing? [Gap]"
|
||||||
|
- Include resilience/rollback when state mutation occurs: "Are rollback requirements defined for migration failures? [Gap]"
|
||||||
|
|
||||||
|
**Traceability Requirements**:
|
||||||
|
- MINIMUM: ≥80% of items MUST include at least one traceability reference
|
||||||
|
- Each item should reference: spec section `[Spec §X.Y]`, or use markers: `[Gap]`, `[Ambiguity]`, `[Conflict]`, `[Assumption]`
|
||||||
|
- If no ID system exists: "Is a requirement & acceptance criteria ID scheme established? [Traceability]"
|
||||||
|
|
||||||
|
**Surface & Resolve Issues** (Requirements Quality Problems):
|
||||||
|
Ask questions about the requirements themselves:
|
||||||
|
- Ambiguities: "Is the term 'fast' quantified with specific metrics? [Ambiguity, Spec §NFR-1]"
|
||||||
|
- Conflicts: "Do navigation requirements conflict between §FR-10 and §FR-10a? [Conflict]"
|
||||||
|
- Assumptions: "Is the assumption of 'always available podcast API' validated? [Assumption]"
|
||||||
|
- Dependencies: "Are external podcast API requirements documented? [Dependency, Gap]"
|
||||||
|
- Missing definitions: "Is 'visual hierarchy' defined with measurable criteria? [Gap]"
|
||||||
|
|
||||||
|
**Content Consolidation**:
|
||||||
|
- Soft cap: If raw candidate items > 40, prioritize by risk/impact
|
||||||
|
- Merge near-duplicates checking the same requirement aspect
|
||||||
|
- If >5 low-impact edge cases, create one item: "Are edge cases X, Y, Z addressed in requirements? [Coverage]"
|
||||||
|
|
||||||
|
**🚫 ABSOLUTELY PROHIBITED** - These make it an implementation test, not a requirements test:
|
||||||
|
- ❌ Any item starting with "Verify", "Test", "Confirm", "Check" + implementation behavior
|
||||||
|
- ❌ References to code execution, user actions, system behavior
|
||||||
|
- ❌ "Displays correctly", "works properly", "functions as expected"
|
||||||
|
- ❌ "Click", "navigate", "render", "load", "execute"
|
||||||
|
- ❌ Test cases, test plans, QA procedures
|
||||||
|
- ❌ Implementation details (frameworks, APIs, algorithms)
|
||||||
|
|
||||||
|
**✅ REQUIRED PATTERNS** - These test requirements quality:
|
||||||
|
- ✅ "Are [requirement type] defined/specified/documented for [scenario]?"
|
||||||
|
- ✅ "Is [vague term] quantified/clarified with specific criteria?"
|
||||||
|
- ✅ "Are requirements consistent between [section A] and [section B]?"
|
||||||
|
- ✅ "Can [requirement] be objectively measured/verified?"
|
||||||
|
- ✅ "Are [edge cases/scenarios] addressed in requirements?"
|
||||||
|
- ✅ "Does the spec define [missing aspect]?"
|
||||||
|
|
||||||
|
6. **Structure Reference**: Generate the checklist following the canonical template in `.specify/templates/checklist-template.md` for title, meta section, category headings, and ID formatting. If template is unavailable, use: H1 title, purpose/created meta lines, `##` category sections containing `- [ ] CHK### <requirement item>` lines with globally incrementing IDs starting at CHK001.
|
||||||
|
|
||||||
|
7. **Report**: Output full path to created checklist, item count, and remind user that each run creates a new file. Summarize:
|
||||||
|
- Focus areas selected
|
||||||
|
- Depth level
|
||||||
|
- Actor/timing
|
||||||
|
- Any explicit user-specified must-have items incorporated
|
||||||
|
|
||||||
|
**Important**: Each `/speckit.checklist` command invocation creates a checklist file using short, descriptive names unless file already exists. This allows:
|
||||||
|
|
||||||
|
- Multiple checklists of different types (e.g., `ux.md`, `test.md`, `security.md`)
|
||||||
|
- Simple, memorable filenames that indicate checklist purpose
|
||||||
|
- Easy identification and navigation in the `checklists/` folder
|
||||||
|
|
||||||
|
To avoid clutter, use descriptive types and clean up obsolete checklists when done.
|
||||||
|
|
||||||
|
## Example Checklist Types & Sample Items
|
||||||
|
|
||||||
|
**UX Requirements Quality:** `ux.md`
|
||||||
|
|
||||||
|
Sample items (testing the requirements, NOT the implementation):
|
||||||
|
|
||||||
|
- "Are visual hierarchy requirements defined with measurable criteria? [Clarity, Spec §FR-1]"
|
||||||
|
- "Is the number and positioning of UI elements explicitly specified? [Completeness, Spec §FR-1]"
|
||||||
|
- "Are interaction state requirements (hover, focus, active) consistently defined? [Consistency]"
|
||||||
|
- "Are accessibility requirements specified for all interactive elements? [Coverage, Gap]"
|
||||||
|
- "Is fallback behavior defined when images fail to load? [Edge Case, Gap]"
|
||||||
|
- "Can 'prominent display' be objectively measured? [Measurability, Spec §FR-4]"
|
||||||
|
|
||||||
|
**API Requirements Quality:** `api.md`
|
||||||
|
|
||||||
|
Sample items:
|
||||||
|
|
||||||
|
- "Are error response formats specified for all failure scenarios? [Completeness]"
|
||||||
|
- "Are rate limiting requirements quantified with specific thresholds? [Clarity]"
|
||||||
|
- "Are authentication requirements consistent across all endpoints? [Consistency]"
|
||||||
|
- "Are retry/timeout requirements defined for external dependencies? [Coverage, Gap]"
|
||||||
|
- "Is versioning strategy documented in requirements? [Gap]"
|
||||||
|
|
||||||
|
**Performance Requirements Quality:** `performance.md`
|
||||||
|
|
||||||
|
Sample items:
|
||||||
|
|
||||||
|
- "Are performance requirements quantified with specific metrics? [Clarity]"
|
||||||
|
- "Are performance targets defined for all critical user journeys? [Coverage]"
|
||||||
|
- "Are performance requirements under different load conditions specified? [Completeness]"
|
||||||
|
- "Can performance requirements be objectively measured? [Measurability]"
|
||||||
|
- "Are degradation requirements defined for high-load scenarios? [Edge Case, Gap]"
|
||||||
|
|
||||||
|
**Security Requirements Quality:** `security.md`
|
||||||
|
|
||||||
|
Sample items:
|
||||||
|
|
||||||
|
- "Are authentication requirements specified for all protected resources? [Coverage]"
|
||||||
|
- "Are data protection requirements defined for sensitive information? [Completeness]"
|
||||||
|
- "Is the threat model documented and requirements aligned to it? [Traceability]"
|
||||||
|
- "Are security requirements consistent with compliance obligations? [Consistency]"
|
||||||
|
- "Are security failure/breach response requirements defined? [Gap, Exception Flow]"
|
||||||
|
|
||||||
|
## Anti-Examples: What NOT To Do
|
||||||
|
|
||||||
|
**❌ WRONG - These test implementation, not requirements:**
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
- [ ] CHK001 - Verify landing page displays 3 episode cards [Spec §FR-001]
|
||||||
|
- [ ] CHK002 - Test hover states work correctly on desktop [Spec §FR-003]
|
||||||
|
- [ ] CHK003 - Confirm logo click navigates to home page [Spec §FR-010]
|
||||||
|
- [ ] CHK004 - Check that related episodes section shows 3-5 items [Spec §FR-005]
|
||||||
|
```
|
||||||
|
|
||||||
|
**✅ CORRECT - These test requirements quality:**
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
- [ ] CHK001 - Are the number and layout of featured episodes explicitly specified? [Completeness, Spec §FR-001]
|
||||||
|
- [ ] CHK002 - Are hover state requirements consistently defined for all interactive elements? [Consistency, Spec §FR-003]
|
||||||
|
- [ ] CHK003 - Are navigation requirements clear for all clickable brand elements? [Clarity, Spec §FR-010]
|
||||||
|
- [ ] CHK004 - Is the selection criteria for related episodes documented? [Gap, Spec §FR-005]
|
||||||
|
- [ ] CHK005 - Are loading state requirements defined for asynchronous episode data? [Gap]
|
||||||
|
- [ ] CHK006 - Can "visual hierarchy" requirements be objectively measured? [Measurability, Spec §FR-001]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Key Differences:**
|
||||||
|
|
||||||
|
- Wrong: Tests if the system works correctly
|
||||||
|
- Correct: Tests if the requirements are written correctly
|
||||||
|
- Wrong: Verification of behavior
|
||||||
|
- Correct: Validation of requirement quality
|
||||||
|
- Wrong: "Does it do X?"
|
||||||
|
- Correct: "Is X clearly specified?"
|
||||||
181
src/prompts/speckit/speckit.clarify.md
Normal file
181
src/prompts/speckit/speckit.clarify.md
Normal file
@@ -0,0 +1,181 @@
|
|||||||
|
---
|
||||||
|
description: Identify underspecified areas in the current feature spec by asking up to 5 highly targeted clarification questions and encoding answers back into the spec.
|
||||||
|
handoffs:
|
||||||
|
- label: Build Technical Plan
|
||||||
|
agent: speckit.plan
|
||||||
|
prompt: Create a plan for the spec. I am building with...
|
||||||
|
---
|
||||||
|
|
||||||
|
## User Input
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ARGUMENTS
|
||||||
|
```
|
||||||
|
|
||||||
|
You **MUST** consider the user input before proceeding (if not empty).
|
||||||
|
|
||||||
|
## Outline
|
||||||
|
|
||||||
|
Goal: Detect and reduce ambiguity or missing decision points in the active feature specification and record the clarifications directly in the spec file.
|
||||||
|
|
||||||
|
Note: This clarification workflow is expected to run (and be completed) BEFORE invoking `/speckit.plan`. If the user explicitly states they are skipping clarification (e.g., exploratory spike), you may proceed, but must warn that downstream rework risk increases.
|
||||||
|
|
||||||
|
Execution steps:
|
||||||
|
|
||||||
|
1. Run `.specify/scripts/bash/check-prerequisites.sh --json --paths-only` from repo root **once** (combined `--json --paths-only` mode / `-Json -PathsOnly`). Parse minimal JSON payload fields:
|
||||||
|
- `FEATURE_DIR`
|
||||||
|
- `FEATURE_SPEC`
|
||||||
|
- (Optionally capture `IMPL_PLAN`, `TASKS` for future chained flows.)
|
||||||
|
- If JSON parsing fails, abort and instruct user to re-run `/speckit.specify` or verify feature branch environment.
|
||||||
|
- For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
|
||||||
|
|
||||||
|
2. Load the current spec file. Perform a structured ambiguity & coverage scan using this taxonomy. For each category, mark status: Clear / Partial / Missing. Produce an internal coverage map used for prioritization (do not output raw map unless no questions will be asked).
|
||||||
|
|
||||||
|
Functional Scope & Behavior:
|
||||||
|
- Core user goals & success criteria
|
||||||
|
- Explicit out-of-scope declarations
|
||||||
|
- User roles / personas differentiation
|
||||||
|
|
||||||
|
Domain & Data Model:
|
||||||
|
- Entities, attributes, relationships
|
||||||
|
- Identity & uniqueness rules
|
||||||
|
- Lifecycle/state transitions
|
||||||
|
- Data volume / scale assumptions
|
||||||
|
|
||||||
|
Interaction & UX Flow:
|
||||||
|
- Critical user journeys / sequences
|
||||||
|
- Error/empty/loading states
|
||||||
|
- Accessibility or localization notes
|
||||||
|
|
||||||
|
Non-Functional Quality Attributes:
|
||||||
|
- Performance (latency, throughput targets)
|
||||||
|
- Scalability (horizontal/vertical, limits)
|
||||||
|
- Reliability & availability (uptime, recovery expectations)
|
||||||
|
- Observability (logging, metrics, tracing signals)
|
||||||
|
- Security & privacy (authN/Z, data protection, threat assumptions)
|
||||||
|
- Compliance / regulatory constraints (if any)
|
||||||
|
|
||||||
|
Integration & External Dependencies:
|
||||||
|
- External services/APIs and failure modes
|
||||||
|
- Data import/export formats
|
||||||
|
- Protocol/versioning assumptions
|
||||||
|
|
||||||
|
Edge Cases & Failure Handling:
|
||||||
|
- Negative scenarios
|
||||||
|
- Rate limiting / throttling
|
||||||
|
- Conflict resolution (e.g., concurrent edits)
|
||||||
|
|
||||||
|
Constraints & Tradeoffs:
|
||||||
|
- Technical constraints (language, storage, hosting)
|
||||||
|
- Explicit tradeoffs or rejected alternatives
|
||||||
|
|
||||||
|
Terminology & Consistency:
|
||||||
|
- Canonical glossary terms
|
||||||
|
- Avoided synonyms / deprecated terms
|
||||||
|
|
||||||
|
Completion Signals:
|
||||||
|
- Acceptance criteria testability
|
||||||
|
- Measurable Definition of Done style indicators
|
||||||
|
|
||||||
|
Misc / Placeholders:
|
||||||
|
- TODO markers / unresolved decisions
|
||||||
|
- Ambiguous adjectives ("robust", "intuitive") lacking quantification
|
||||||
|
|
||||||
|
For each category with Partial or Missing status, add a candidate question opportunity unless:
|
||||||
|
- Clarification would not materially change implementation or validation strategy
|
||||||
|
- Information is better deferred to planning phase (note internally)
|
||||||
|
|
||||||
|
3. Generate (internally) a prioritized queue of candidate clarification questions (maximum 5). Do NOT output them all at once. Apply these constraints:
|
||||||
|
- Maximum of 10 total questions across the whole session.
|
||||||
|
- Each question must be answerable with EITHER:
|
||||||
|
- A short multiple‑choice selection (2–5 distinct, mutually exclusive options), OR
|
||||||
|
- A one-word / short‑phrase answer (explicitly constrain: "Answer in <=5 words").
|
||||||
|
- Only include questions whose answers materially impact architecture, data modeling, task decomposition, test design, UX behavior, operational readiness, or compliance validation.
|
||||||
|
- Ensure category coverage balance: attempt to cover the highest impact unresolved categories first; avoid asking two low-impact questions when a single high-impact area (e.g., security posture) is unresolved.
|
||||||
|
- Exclude questions already answered, trivial stylistic preferences, or plan-level execution details (unless blocking correctness).
|
||||||
|
- Favor clarifications that reduce downstream rework risk or prevent misaligned acceptance tests.
|
||||||
|
- If more than 5 categories remain unresolved, select the top 5 by (Impact * Uncertainty) heuristic.
|
||||||
|
|
||||||
|
4. Sequential questioning loop (interactive):
|
||||||
|
- Present EXACTLY ONE question at a time.
|
||||||
|
- For multiple‑choice questions:
|
||||||
|
- **Analyze all options** and determine the **most suitable option** based on:
|
||||||
|
- Best practices for the project type
|
||||||
|
- Common patterns in similar implementations
|
||||||
|
- Risk reduction (security, performance, maintainability)
|
||||||
|
- Alignment with any explicit project goals or constraints visible in the spec
|
||||||
|
- Present your **recommended option prominently** at the top with clear reasoning (1-2 sentences explaining why this is the best choice).
|
||||||
|
- Format as: `**Recommended:** Option [X] - <reasoning>`
|
||||||
|
- Then render all options as a Markdown table:
|
||||||
|
|
||||||
|
| Option | Description |
|
||||||
|
|--------|-------------|
|
||||||
|
| A | <Option A description> |
|
||||||
|
| B | <Option B description> |
|
||||||
|
| C | <Option C description> (add D/E as needed up to 5) |
|
||||||
|
| Short | Provide a different short answer (<=5 words) (Include only if free-form alternative is appropriate) |
|
||||||
|
|
||||||
|
- After the table, add: `You can reply with the option letter (e.g., "A"), accept the recommendation by saying "yes" or "recommended", or provide your own short answer.`
|
||||||
|
- For short‑answer style (no meaningful discrete options):
|
||||||
|
- Provide your **suggested answer** based on best practices and context.
|
||||||
|
- Format as: `**Suggested:** <your proposed answer> - <brief reasoning>`
|
||||||
|
- Then output: `Format: Short answer (<=5 words). You can accept the suggestion by saying "yes" or "suggested", or provide your own answer.`
|
||||||
|
- After the user answers:
|
||||||
|
- If the user replies with "yes", "recommended", or "suggested", use your previously stated recommendation/suggestion as the answer.
|
||||||
|
- Otherwise, validate the answer maps to one option or fits the <=5 word constraint.
|
||||||
|
- If ambiguous, ask for a quick disambiguation (count still belongs to same question; do not advance).
|
||||||
|
- Once satisfactory, record it in working memory (do not yet write to disk) and move to the next queued question.
|
||||||
|
- Stop asking further questions when:
|
||||||
|
- All critical ambiguities resolved early (remaining queued items become unnecessary), OR
|
||||||
|
- User signals completion ("done", "good", "no more"), OR
|
||||||
|
- You reach 5 asked questions.
|
||||||
|
- Never reveal future queued questions in advance.
|
||||||
|
- If no valid questions exist at start, immediately report no critical ambiguities.
|
||||||
|
|
||||||
|
5. Integration after EACH accepted answer (incremental update approach):
|
||||||
|
- Maintain in-memory representation of the spec (loaded once at start) plus the raw file contents.
|
||||||
|
- For the first integrated answer in this session:
|
||||||
|
- Ensure a `## Clarifications` section exists (create it just after the highest-level contextual/overview section per the spec template if missing).
|
||||||
|
- Under it, create (if not present) a `### Session YYYY-MM-DD` subheading for today.
|
||||||
|
- Append a bullet line immediately after acceptance: `- Q: <question> → A: <final answer>`.
|
||||||
|
- Then immediately apply the clarification to the most appropriate section(s):
|
||||||
|
- Functional ambiguity → Update or add a bullet in Functional Requirements.
|
||||||
|
- User interaction / actor distinction → Update User Stories or Actors subsection (if present) with clarified role, constraint, or scenario.
|
||||||
|
- Data shape / entities → Update Data Model (add fields, types, relationships) preserving ordering; note added constraints succinctly.
|
||||||
|
- Non-functional constraint → Add/modify measurable criteria in Non-Functional / Quality Attributes section (convert vague adjective to metric or explicit target).
|
||||||
|
- Edge case / negative flow → Add a new bullet under Edge Cases / Error Handling (or create such subsection if template provides placeholder for it).
|
||||||
|
- Terminology conflict → Normalize term across spec; retain original only if necessary by adding `(formerly referred to as "X")` once.
|
||||||
|
- If the clarification invalidates an earlier ambiguous statement, replace that statement instead of duplicating; leave no obsolete contradictory text.
|
||||||
|
- Save the spec file AFTER each integration to minimize risk of context loss (atomic overwrite).
|
||||||
|
- Preserve formatting: do not reorder unrelated sections; keep heading hierarchy intact.
|
||||||
|
- Keep each inserted clarification minimal and testable (avoid narrative drift).
|
||||||
|
|
||||||
|
6. Validation (performed after EACH write plus final pass):
|
||||||
|
- Clarifications session contains exactly one bullet per accepted answer (no duplicates).
|
||||||
|
- Total asked (accepted) questions ≤ 5.
|
||||||
|
- Updated sections contain no lingering vague placeholders the new answer was meant to resolve.
|
||||||
|
- No contradictory earlier statement remains (scan for now-invalid alternative choices removed).
|
||||||
|
- Markdown structure valid; only allowed new headings: `## Clarifications`, `### Session YYYY-MM-DD`.
|
||||||
|
- Terminology consistency: same canonical term used across all updated sections.
|
||||||
|
|
||||||
|
7. Write the updated spec back to `FEATURE_SPEC`.
|
||||||
|
|
||||||
|
8. Report completion (after questioning loop ends or early termination):
|
||||||
|
- Number of questions asked & answered.
|
||||||
|
- Path to updated spec.
|
||||||
|
- Sections touched (list names).
|
||||||
|
- Coverage summary table listing each taxonomy category with Status: Resolved (was Partial/Missing and addressed), Deferred (exceeds question quota or better suited for planning), Clear (already sufficient), Outstanding (still Partial/Missing but low impact).
|
||||||
|
- If any Outstanding or Deferred remain, recommend whether to proceed to `/speckit.plan` or run `/speckit.clarify` again later post-plan.
|
||||||
|
- Suggested next command.
|
||||||
|
|
||||||
|
Behavior rules:
|
||||||
|
|
||||||
|
- If no meaningful ambiguities found (or all potential questions would be low-impact), respond: "No critical ambiguities detected worth formal clarification." and suggest proceeding.
|
||||||
|
- If spec file missing, instruct user to run `/speckit.specify` first (do not create a new spec here).
|
||||||
|
- Never exceed 5 total asked questions (clarification retries for a single question do not count as new questions).
|
||||||
|
- Avoid speculative tech stack questions unless the absence blocks functional clarity.
|
||||||
|
- Respect user early termination signals ("stop", "done", "proceed").
|
||||||
|
- If no questions asked due to full coverage, output a compact coverage summary (all categories Clear) then suggest advancing.
|
||||||
|
- If quota reached with unresolved high-impact categories remaining, explicitly flag them under Deferred with rationale.
|
||||||
|
|
||||||
|
Context for prioritization: $ARGUMENTS
|
||||||
82
src/prompts/speckit/speckit.constitution.md
Normal file
82
src/prompts/speckit/speckit.constitution.md
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
---
|
||||||
|
description: Create or update the project constitution from interactive or provided principle inputs, ensuring all dependent templates stay in sync.
|
||||||
|
handoffs:
|
||||||
|
- label: Build Specification
|
||||||
|
agent: speckit.specify
|
||||||
|
prompt: Implement the feature specification based on the updated constitution. I want to build...
|
||||||
|
---
|
||||||
|
|
||||||
|
## User Input
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ARGUMENTS
|
||||||
|
```
|
||||||
|
|
||||||
|
You **MUST** consider the user input before proceeding (if not empty).
|
||||||
|
|
||||||
|
## Outline
|
||||||
|
|
||||||
|
You are updating the project constitution at `.specify/memory/constitution.md`. This file is a TEMPLATE containing placeholder tokens in square brackets (e.g. `[PROJECT_NAME]`, `[PRINCIPLE_1_NAME]`). Your job is to (a) collect/derive concrete values, (b) fill the template precisely, and (c) propagate any amendments across dependent artifacts.
|
||||||
|
|
||||||
|
Follow this execution flow:
|
||||||
|
|
||||||
|
1. Load the existing constitution template at `.specify/memory/constitution.md`.
|
||||||
|
- Identify every placeholder token of the form `[ALL_CAPS_IDENTIFIER]`.
|
||||||
|
**IMPORTANT**: The user might require less or more principles than the ones used in the template. If a number is specified, respect that - follow the general template. You will update the doc accordingly.
|
||||||
|
|
||||||
|
2. Collect/derive values for placeholders:
|
||||||
|
- If user input (conversation) supplies a value, use it.
|
||||||
|
- Otherwise infer from existing repo context (README, docs, prior constitution versions if embedded).
|
||||||
|
- For governance dates: `RATIFICATION_DATE` is the original adoption date (if unknown ask or mark TODO), `LAST_AMENDED_DATE` is today if changes are made, otherwise keep previous.
|
||||||
|
- `CONSTITUTION_VERSION` must increment according to semantic versioning rules:
|
||||||
|
- MAJOR: Backward incompatible governance/principle removals or redefinitions.
|
||||||
|
- MINOR: New principle/section added or materially expanded guidance.
|
||||||
|
- PATCH: Clarifications, wording, typo fixes, non-semantic refinements.
|
||||||
|
- If version bump type ambiguous, propose reasoning before finalizing.
|
||||||
|
|
||||||
|
3. Draft the updated constitution content:
|
||||||
|
- Replace every placeholder with concrete text (no bracketed tokens left except intentionally retained template slots that the project has chosen not to define yet—explicitly justify any left).
|
||||||
|
- Preserve heading hierarchy and comments can be removed once replaced unless they still add clarifying guidance.
|
||||||
|
- Ensure each Principle section: succinct name line, paragraph (or bullet list) capturing non‑negotiable rules, explicit rationale if not obvious.
|
||||||
|
- Ensure Governance section lists amendment procedure, versioning policy, and compliance review expectations.
|
||||||
|
|
||||||
|
4. Consistency propagation checklist (convert prior checklist into active validations):
|
||||||
|
- Read `.specify/templates/plan-template.md` and ensure any "Constitution Check" or rules align with updated principles.
|
||||||
|
- Read `.specify/templates/spec-template.md` for scope/requirements alignment—update if constitution adds/removes mandatory sections or constraints.
|
||||||
|
- Read `.specify/templates/tasks-template.md` and ensure task categorization reflects new or removed principle-driven task types (e.g., observability, versioning, testing discipline).
|
||||||
|
- Read each command file in `.specify/templates/commands/*.md` (including this one) to verify no outdated references (agent-specific names like CLAUDE only) remain when generic guidance is required.
|
||||||
|
- Read any runtime guidance docs (e.g., `README.md`, `docs/quickstart.md`, or agent-specific guidance files if present). Update references to principles changed.
|
||||||
|
|
||||||
|
5. Produce a Sync Impact Report (prepend as an HTML comment at top of the constitution file after update):
|
||||||
|
- Version change: old → new
|
||||||
|
- List of modified principles (old title → new title if renamed)
|
||||||
|
- Added sections
|
||||||
|
- Removed sections
|
||||||
|
- Templates requiring updates (✅ updated / ⚠ pending) with file paths
|
||||||
|
- Follow-up TODOs if any placeholders intentionally deferred.
|
||||||
|
|
||||||
|
6. Validation before final output:
|
||||||
|
- No remaining unexplained bracket tokens.
|
||||||
|
- Version line matches report.
|
||||||
|
- Dates ISO format YYYY-MM-DD.
|
||||||
|
- Principles are declarative, testable, and free of vague language ("should" → replace with MUST/SHOULD rationale where appropriate).
|
||||||
|
|
||||||
|
7. Write the completed constitution back to `.specify/memory/constitution.md` (overwrite).
|
||||||
|
|
||||||
|
8. Output a final summary to the user with:
|
||||||
|
- New version and bump rationale.
|
||||||
|
- Any files flagged for manual follow-up.
|
||||||
|
- Suggested commit message (e.g., `docs: amend constitution to vX.Y.Z (principle additions + governance update)`).
|
||||||
|
|
||||||
|
Formatting & Style Requirements:
|
||||||
|
|
||||||
|
- Use Markdown headings exactly as in the template (do not demote/promote levels).
|
||||||
|
- Wrap long rationale lines to keep readability (<100 chars ideally) but do not hard enforce with awkward breaks.
|
||||||
|
- Keep a single blank line between sections.
|
||||||
|
- Avoid trailing whitespace.
|
||||||
|
|
||||||
|
If the user supplies partial updates (e.g., only one principle revision), still perform validation and version decision steps.
|
||||||
|
|
||||||
|
If critical info missing (e.g., ratification date truly unknown), insert `TODO(<FIELD_NAME>): explanation` and include in the Sync Impact Report under deferred items.
|
||||||
|
|
||||||
|
Do not create a new template; always operate on the existing `.specify/memory/constitution.md` file.
|
||||||
126
src/prompts/speckit/speckit.implement.md
Normal file
126
src/prompts/speckit/speckit.implement.md
Normal file
@@ -0,0 +1,126 @@
|
|||||||
|
---
|
||||||
|
description: Execute spec-kit tasks using Maestro's Auto Run feature with optional git worktree support for parallel implementation.
|
||||||
|
---
|
||||||
|
|
||||||
|
## User Input
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ARGUMENTS
|
||||||
|
```
|
||||||
|
|
||||||
|
You **MUST** consider the user input before proceeding (if not empty).
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This command guides you through implementing your spec-kit feature using Maestro's powerful automation capabilities. The `tasks.md` file generated by `/speckit.tasks` is designed to work seamlessly with Maestro's Auto Run feature.
|
||||||
|
|
||||||
|
## Implementation Workflow
|
||||||
|
|
||||||
|
### Step 1: Locate Your Tasks File
|
||||||
|
|
||||||
|
Your `tasks.md` file is located in your feature's spec directory (e.g., `specs/1-my-feature/tasks.md`). This file contains all implementation tasks in a checkbox format that Auto Run can process automatically.
|
||||||
|
|
||||||
|
### Step 2: Configure Auto Run
|
||||||
|
|
||||||
|
1. **Open the Right Bar** in Maestro (press `Cmd/Ctrl + B` or click the sidebar toggle)
|
||||||
|
2. **Select the "Auto Run" tab**
|
||||||
|
3. **Set the Auto Run folder** to your spec-kit documents directory:
|
||||||
|
- Click the folder icon or use the folder selector
|
||||||
|
- Navigate to your feature's spec directory (e.g., `specs/1-my-feature/`)
|
||||||
|
4. **Select your `tasks.md` file** from the document list
|
||||||
|
|
||||||
|
### Step 3: Start Automated Implementation
|
||||||
|
|
||||||
|
Once configured, Auto Run will:
|
||||||
|
- Read each task from `tasks.md`
|
||||||
|
- Execute tasks sequentially (respecting dependencies)
|
||||||
|
- Mark tasks as completed (`[X]`) with implementation notes
|
||||||
|
- Handle parallel tasks (`[P]` marker) when possible
|
||||||
|
|
||||||
|
**To start**: Click the "Run" button or press `Cmd/Ctrl + Enter` in the Auto Run panel.
|
||||||
|
|
||||||
|
## Advanced: Parallel Implementation with Git Worktrees
|
||||||
|
|
||||||
|
For larger features with independent components, you can use git worktrees to implement multiple parts in parallel across different Maestro sessions.
|
||||||
|
|
||||||
|
### What are Worktrees?
|
||||||
|
|
||||||
|
Git worktrees allow you to have multiple working directories for the same repository, each on a different branch. This enables:
|
||||||
|
- Multiple AI agents working on different feature branches simultaneously
|
||||||
|
- Isolated changes that won't conflict during development
|
||||||
|
- Easy merging when components are complete
|
||||||
|
|
||||||
|
### Setting Up Parallel Implementation
|
||||||
|
|
||||||
|
1. **Identify Independent Tasks**: Look for tasks marked with `[P]` in your `tasks.md` that can run in parallel.
|
||||||
|
|
||||||
|
2. **Create Worktrees in Maestro**:
|
||||||
|
- In each session, Maestro can automatically create a worktree for the feature branch
|
||||||
|
- Use the worktree toggle in Auto Run to enable worktree mode
|
||||||
|
- Each session gets its own isolated working directory
|
||||||
|
|
||||||
|
3. **Assign Tasks to Sessions**:
|
||||||
|
- Session 1: Phase 1 (Setup) + User Story 1 tasks
|
||||||
|
- Session 2: User Story 2 tasks (if independent)
|
||||||
|
- Session 3: User Story 3 tasks (if independent)
|
||||||
|
|
||||||
|
4. **Merge When Complete**:
|
||||||
|
- Each session commits to its feature branch
|
||||||
|
- Use Maestro's git integration to merge branches
|
||||||
|
- Or merge manually: `git merge session-branch`
|
||||||
|
|
||||||
|
### Worktree Commands
|
||||||
|
|
||||||
|
Maestro handles worktrees automatically, but for manual setup:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create a worktree for a feature branch
|
||||||
|
git worktree add ../my-feature-worktree feature-branch
|
||||||
|
|
||||||
|
# List existing worktrees
|
||||||
|
git worktree list
|
||||||
|
|
||||||
|
# Remove a worktree when done
|
||||||
|
git worktree remove ../my-feature-worktree
|
||||||
|
```
|
||||||
|
|
||||||
|
## Best Practices
|
||||||
|
|
||||||
|
1. **Complete Setup Phase First**: Always complete Phase 1 (Setup) before parallelizing user stories.
|
||||||
|
|
||||||
|
2. **Respect Dependencies**: Tasks without the `[P]` marker should run sequentially.
|
||||||
|
|
||||||
|
3. **Review Before Merging**: Use `/speckit.analyze` after implementation to verify consistency.
|
||||||
|
|
||||||
|
4. **Incremental Testing**: Each user story phase should be independently testable. Verify before moving to the next.
|
||||||
|
|
||||||
|
5. **Use Checklists**: If you created checklists with `/speckit.checklist`, verify them before marking the feature complete.
|
||||||
|
|
||||||
|
## Task Format Reference
|
||||||
|
|
||||||
|
Your `tasks.md` uses this format that Auto Run understands:
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
- [ ] T001 Setup project structure
|
||||||
|
- [ ] T002 [P] Configure database connection
|
||||||
|
- [ ] T003 [P] [US1] Create User model in src/models/user.py
|
||||||
|
- [ ] T004 [US1] Implement UserService
|
||||||
|
```
|
||||||
|
|
||||||
|
- `- [ ]` = Incomplete task (Auto Run will process)
|
||||||
|
- `- [X]` = Completed task (Auto Run will skip)
|
||||||
|
- `[P]` = Parallelizable (can run with other [P] tasks)
|
||||||
|
- `[US1]` = User Story 1 (groups related tasks)
|
||||||
|
|
||||||
|
## Getting Help
|
||||||
|
|
||||||
|
- **View task progress**: Check the Auto Run panel in the Right Bar
|
||||||
|
- **See implementation details**: Each completed task includes notes about what was done
|
||||||
|
- **Troubleshoot issues**: Check the session logs in the main terminal view
|
||||||
|
|
||||||
|
## Next Steps After Implementation
|
||||||
|
|
||||||
|
1. Run `/speckit.analyze` to verify implementation consistency
|
||||||
|
2. Complete any remaining checklists
|
||||||
|
3. Run tests to ensure everything works
|
||||||
|
4. Create a pull request with your changes
|
||||||
89
src/prompts/speckit/speckit.plan.md
Normal file
89
src/prompts/speckit/speckit.plan.md
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
---
|
||||||
|
description: Execute the implementation planning workflow using the plan template to generate design artifacts.
|
||||||
|
handoffs:
|
||||||
|
- label: Create Tasks
|
||||||
|
agent: speckit.tasks
|
||||||
|
prompt: Break the plan into tasks
|
||||||
|
send: true
|
||||||
|
- label: Create Checklist
|
||||||
|
agent: speckit.checklist
|
||||||
|
prompt: Create a checklist for the following domain...
|
||||||
|
---
|
||||||
|
|
||||||
|
## User Input
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ARGUMENTS
|
||||||
|
```
|
||||||
|
|
||||||
|
You **MUST** consider the user input before proceeding (if not empty).
|
||||||
|
|
||||||
|
## Outline
|
||||||
|
|
||||||
|
1. **Setup**: Run `.specify/scripts/bash/setup-plan.sh --json` from repo root and parse JSON for FEATURE_SPEC, IMPL_PLAN, SPECS_DIR, BRANCH. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
|
||||||
|
|
||||||
|
2. **Load context**: Read FEATURE_SPEC and `.specify/memory/constitution.md`. Load IMPL_PLAN template (already copied).
|
||||||
|
|
||||||
|
3. **Execute plan workflow**: Follow the structure in IMPL_PLAN template to:
|
||||||
|
- Fill Technical Context (mark unknowns as "NEEDS CLARIFICATION")
|
||||||
|
- Fill Constitution Check section from constitution
|
||||||
|
- Evaluate gates (ERROR if violations unjustified)
|
||||||
|
- Phase 0: Generate research.md (resolve all NEEDS CLARIFICATION)
|
||||||
|
- Phase 1: Generate data-model.md, contracts/, quickstart.md
|
||||||
|
- Phase 1: Update agent context by running the agent script
|
||||||
|
- Re-evaluate Constitution Check post-design
|
||||||
|
|
||||||
|
4. **Stop and report**: Command ends after Phase 2 planning. Report branch, IMPL_PLAN path, and generated artifacts.
|
||||||
|
|
||||||
|
## Phases
|
||||||
|
|
||||||
|
### Phase 0: Outline & Research
|
||||||
|
|
||||||
|
1. **Extract unknowns from Technical Context** above:
|
||||||
|
- For each NEEDS CLARIFICATION → research task
|
||||||
|
- For each dependency → best practices task
|
||||||
|
- For each integration → patterns task
|
||||||
|
|
||||||
|
2. **Generate and dispatch research agents**:
|
||||||
|
|
||||||
|
```text
|
||||||
|
For each unknown in Technical Context:
|
||||||
|
Task: "Research {unknown} for {feature context}"
|
||||||
|
For each technology choice:
|
||||||
|
Task: "Find best practices for {tech} in {domain}"
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Consolidate findings** in `research.md` using format:
|
||||||
|
- Decision: [what was chosen]
|
||||||
|
- Rationale: [why chosen]
|
||||||
|
- Alternatives considered: [what else evaluated]
|
||||||
|
|
||||||
|
**Output**: research.md with all NEEDS CLARIFICATION resolved
|
||||||
|
|
||||||
|
### Phase 1: Design & Contracts
|
||||||
|
|
||||||
|
**Prerequisites:** `research.md` complete
|
||||||
|
|
||||||
|
1. **Extract entities from feature spec** → `data-model.md`:
|
||||||
|
- Entity name, fields, relationships
|
||||||
|
- Validation rules from requirements
|
||||||
|
- State transitions if applicable
|
||||||
|
|
||||||
|
2. **Generate API contracts** from functional requirements:
|
||||||
|
- For each user action → endpoint
|
||||||
|
- Use standard REST/GraphQL patterns
|
||||||
|
- Output OpenAPI/GraphQL schema to `/contracts/`
|
||||||
|
|
||||||
|
3. **Agent context update**:
|
||||||
|
- Run `.specify/scripts/bash/update-agent-context.sh claude`
|
||||||
|
- These scripts detect which AI agent is in use
|
||||||
|
- Update the appropriate agent-specific context file
|
||||||
|
- Add only new technology from current plan
|
||||||
|
- Preserve manual additions between markers
|
||||||
|
|
||||||
|
**Output**: data-model.md, /contracts/*, quickstart.md, agent-specific file
|
||||||
|
|
||||||
|
## Key rules
|
||||||
|
|
||||||
|
- Use absolute paths
|
||||||
|
- ERROR on gate failures or unresolved clarifications
|
||||||
258
src/prompts/speckit/speckit.specify.md
Normal file
258
src/prompts/speckit/speckit.specify.md
Normal file
@@ -0,0 +1,258 @@
|
|||||||
|
---
|
||||||
|
description: Create or update the feature specification from a natural language feature description.
|
||||||
|
handoffs:
|
||||||
|
- label: Build Technical Plan
|
||||||
|
agent: speckit.plan
|
||||||
|
prompt: Create a plan for the spec. I am building with...
|
||||||
|
- label: Clarify Spec Requirements
|
||||||
|
agent: speckit.clarify
|
||||||
|
prompt: Clarify specification requirements
|
||||||
|
send: true
|
||||||
|
---
|
||||||
|
|
||||||
|
## User Input
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ARGUMENTS
|
||||||
|
```
|
||||||
|
|
||||||
|
You **MUST** consider the user input before proceeding (if not empty).
|
||||||
|
|
||||||
|
## Outline
|
||||||
|
|
||||||
|
The text the user typed after `/speckit.specify` in the triggering message **is** the feature description. Assume you always have it available in this conversation even if `$ARGUMENTS` appears literally below. Do not ask the user to repeat it unless they provided an empty command.
|
||||||
|
|
||||||
|
Given that feature description, do this:
|
||||||
|
|
||||||
|
1. **Generate a concise short name** (2-4 words) for the branch:
|
||||||
|
- Analyze the feature description and extract the most meaningful keywords
|
||||||
|
- Create a 2-4 word short name that captures the essence of the feature
|
||||||
|
- Use action-noun format when possible (e.g., "add-user-auth", "fix-payment-bug")
|
||||||
|
- Preserve technical terms and acronyms (OAuth2, API, JWT, etc.)
|
||||||
|
- Keep it concise but descriptive enough to understand the feature at a glance
|
||||||
|
- Examples:
|
||||||
|
- "I want to add user authentication" → "user-auth"
|
||||||
|
- "Implement OAuth2 integration for the API" → "oauth2-api-integration"
|
||||||
|
- "Create a dashboard for analytics" → "analytics-dashboard"
|
||||||
|
- "Fix payment processing timeout bug" → "fix-payment-timeout"
|
||||||
|
|
||||||
|
2. **Check for existing branches before creating new one**:
|
||||||
|
|
||||||
|
a. First, fetch all remote branches to ensure we have the latest information:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git fetch --all --prune
|
||||||
|
```
|
||||||
|
|
||||||
|
b. Find the highest feature number across all sources for the short-name:
|
||||||
|
- Remote branches: `git ls-remote --heads origin | grep -E 'refs/heads/[0-9]+-<short-name>$'`
|
||||||
|
- Local branches: `git branch | grep -E '^[* ]*[0-9]+-<short-name>$'`
|
||||||
|
- Specs directories: Check for directories matching `specs/[0-9]+-<short-name>`
|
||||||
|
|
||||||
|
c. Determine the next available number:
|
||||||
|
- Extract all numbers from all three sources
|
||||||
|
- Find the highest number N
|
||||||
|
- Use N+1 for the new branch number
|
||||||
|
|
||||||
|
d. Run the script `.specify/scripts/bash/create-new-feature.sh --json "$ARGUMENTS"` with the calculated number and short-name:
|
||||||
|
- Pass `--number N+1` and `--short-name "your-short-name"` along with the feature description
|
||||||
|
- Bash example: `.specify/scripts/bash/create-new-feature.sh --json "$ARGUMENTS" --json --number 5 --short-name "user-auth" "Add user authentication"`
|
||||||
|
- PowerShell example: `.specify/scripts/bash/create-new-feature.sh --json "$ARGUMENTS" -Json -Number 5 -ShortName "user-auth" "Add user authentication"`
|
||||||
|
|
||||||
|
**IMPORTANT**:
|
||||||
|
- Check all three sources (remote branches, local branches, specs directories) to find the highest number
|
||||||
|
- Only match branches/directories with the exact short-name pattern
|
||||||
|
- If no existing branches/directories found with this short-name, start with number 1
|
||||||
|
- You must only ever run this script once per feature
|
||||||
|
- The JSON is provided in the terminal as output - always refer to it to get the actual content you're looking for
|
||||||
|
- The JSON output will contain BRANCH_NAME and SPEC_FILE paths
|
||||||
|
- For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot")
|
||||||
|
|
||||||
|
3. Load `.specify/templates/spec-template.md` to understand required sections.
|
||||||
|
|
||||||
|
4. Follow this execution flow:
|
||||||
|
|
||||||
|
1. Parse user description from Input
|
||||||
|
If empty: ERROR "No feature description provided"
|
||||||
|
2. Extract key concepts from description
|
||||||
|
Identify: actors, actions, data, constraints
|
||||||
|
3. For unclear aspects:
|
||||||
|
- Make informed guesses based on context and industry standards
|
||||||
|
- Only mark with [NEEDS CLARIFICATION: specific question] if:
|
||||||
|
- The choice significantly impacts feature scope or user experience
|
||||||
|
- Multiple reasonable interpretations exist with different implications
|
||||||
|
- No reasonable default exists
|
||||||
|
- **LIMIT: Maximum 3 [NEEDS CLARIFICATION] markers total**
|
||||||
|
- Prioritize clarifications by impact: scope > security/privacy > user experience > technical details
|
||||||
|
4. Fill User Scenarios & Testing section
|
||||||
|
If no clear user flow: ERROR "Cannot determine user scenarios"
|
||||||
|
5. Generate Functional Requirements
|
||||||
|
Each requirement must be testable
|
||||||
|
Use reasonable defaults for unspecified details (document assumptions in Assumptions section)
|
||||||
|
6. Define Success Criteria
|
||||||
|
Create measurable, technology-agnostic outcomes
|
||||||
|
Include both quantitative metrics (time, performance, volume) and qualitative measures (user satisfaction, task completion)
|
||||||
|
Each criterion must be verifiable without implementation details
|
||||||
|
7. Identify Key Entities (if data involved)
|
||||||
|
8. Return: SUCCESS (spec ready for planning)
|
||||||
|
|
||||||
|
5. Write the specification to SPEC_FILE using the template structure, replacing placeholders with concrete details derived from the feature description (arguments) while preserving section order and headings.
|
||||||
|
|
||||||
|
6. **Specification Quality Validation**: After writing the initial spec, validate it against quality criteria:
|
||||||
|
|
||||||
|
a. **Create Spec Quality Checklist**: Generate a checklist file at `FEATURE_DIR/checklists/requirements.md` using the checklist template structure with these validation items:
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
# Specification Quality Checklist: [FEATURE NAME]
|
||||||
|
|
||||||
|
**Purpose**: Validate specification completeness and quality before proceeding to planning
|
||||||
|
**Created**: [DATE]
|
||||||
|
**Feature**: [Link to spec.md]
|
||||||
|
|
||||||
|
## Content Quality
|
||||||
|
|
||||||
|
- [ ] No implementation details (languages, frameworks, APIs)
|
||||||
|
- [ ] Focused on user value and business needs
|
||||||
|
- [ ] Written for non-technical stakeholders
|
||||||
|
- [ ] All mandatory sections completed
|
||||||
|
|
||||||
|
## Requirement Completeness
|
||||||
|
|
||||||
|
- [ ] No [NEEDS CLARIFICATION] markers remain
|
||||||
|
- [ ] Requirements are testable and unambiguous
|
||||||
|
- [ ] Success criteria are measurable
|
||||||
|
- [ ] Success criteria are technology-agnostic (no implementation details)
|
||||||
|
- [ ] All acceptance scenarios are defined
|
||||||
|
- [ ] Edge cases are identified
|
||||||
|
- [ ] Scope is clearly bounded
|
||||||
|
- [ ] Dependencies and assumptions identified
|
||||||
|
|
||||||
|
## Feature Readiness
|
||||||
|
|
||||||
|
- [ ] All functional requirements have clear acceptance criteria
|
||||||
|
- [ ] User scenarios cover primary flows
|
||||||
|
- [ ] Feature meets measurable outcomes defined in Success Criteria
|
||||||
|
- [ ] No implementation details leak into specification
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Items marked incomplete require spec updates before `/speckit.clarify` or `/speckit.plan`
|
||||||
|
```
|
||||||
|
|
||||||
|
b. **Run Validation Check**: Review the spec against each checklist item:
|
||||||
|
- For each item, determine if it passes or fails
|
||||||
|
- Document specific issues found (quote relevant spec sections)
|
||||||
|
|
||||||
|
c. **Handle Validation Results**:
|
||||||
|
|
||||||
|
- **If all items pass**: Mark checklist complete and proceed to step 6
|
||||||
|
|
||||||
|
- **If items fail (excluding [NEEDS CLARIFICATION])**:
|
||||||
|
1. List the failing items and specific issues
|
||||||
|
2. Update the spec to address each issue
|
||||||
|
3. Re-run validation until all items pass (max 3 iterations)
|
||||||
|
4. If still failing after 3 iterations, document remaining issues in checklist notes and warn user
|
||||||
|
|
||||||
|
- **If [NEEDS CLARIFICATION] markers remain**:
|
||||||
|
1. Extract all [NEEDS CLARIFICATION: ...] markers from the spec
|
||||||
|
2. **LIMIT CHECK**: If more than 3 markers exist, keep only the 3 most critical (by scope/security/UX impact) and make informed guesses for the rest
|
||||||
|
3. For each clarification needed (max 3), present options to user in this format:
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
## Question [N]: [Topic]
|
||||||
|
|
||||||
|
**Context**: [Quote relevant spec section]
|
||||||
|
|
||||||
|
**What we need to know**: [Specific question from NEEDS CLARIFICATION marker]
|
||||||
|
|
||||||
|
**Suggested Answers**:
|
||||||
|
|
||||||
|
| Option | Answer | Implications |
|
||||||
|
|--------|--------|--------------|
|
||||||
|
| A | [First suggested answer] | [What this means for the feature] |
|
||||||
|
| B | [Second suggested answer] | [What this means for the feature] |
|
||||||
|
| C | [Third suggested answer] | [What this means for the feature] |
|
||||||
|
| Custom | Provide your own answer | [Explain how to provide custom input] |
|
||||||
|
|
||||||
|
**Your choice**: _[Wait for user response]_
|
||||||
|
```
|
||||||
|
|
||||||
|
4. **CRITICAL - Table Formatting**: Ensure markdown tables are properly formatted:
|
||||||
|
- Use consistent spacing with pipes aligned
|
||||||
|
- Each cell should have spaces around content: `| Content |` not `|Content|`
|
||||||
|
- Header separator must have at least 3 dashes: `|--------|`
|
||||||
|
- Test that the table renders correctly in markdown preview
|
||||||
|
5. Number questions sequentially (Q1, Q2, Q3 - max 3 total)
|
||||||
|
6. Present all questions together before waiting for responses
|
||||||
|
7. Wait for user to respond with their choices for all questions (e.g., "Q1: A, Q2: Custom - [details], Q3: B")
|
||||||
|
8. Update the spec by replacing each [NEEDS CLARIFICATION] marker with the user's selected or provided answer
|
||||||
|
9. Re-run validation after all clarifications are resolved
|
||||||
|
|
||||||
|
d. **Update Checklist**: After each validation iteration, update the checklist file with current pass/fail status
|
||||||
|
|
||||||
|
7. Report completion with branch name, spec file path, checklist results, and readiness for the next phase (`/speckit.clarify` or `/speckit.plan`).
|
||||||
|
|
||||||
|
**NOTE:** The script creates and checks out the new branch and initializes the spec file before writing.
|
||||||
|
|
||||||
|
## General Guidelines
|
||||||
|
|
||||||
|
## Quick Guidelines
|
||||||
|
|
||||||
|
- Focus on **WHAT** users need and **WHY**.
|
||||||
|
- Avoid HOW to implement (no tech stack, APIs, code structure).
|
||||||
|
- Written for business stakeholders, not developers.
|
||||||
|
- DO NOT create any checklists that are embedded in the spec. That will be a separate command.
|
||||||
|
|
||||||
|
### Section Requirements
|
||||||
|
|
||||||
|
- **Mandatory sections**: Must be completed for every feature
|
||||||
|
- **Optional sections**: Include only when relevant to the feature
|
||||||
|
- When a section doesn't apply, remove it entirely (don't leave as "N/A")
|
||||||
|
|
||||||
|
### For AI Generation
|
||||||
|
|
||||||
|
When creating this spec from a user prompt:
|
||||||
|
|
||||||
|
1. **Make informed guesses**: Use context, industry standards, and common patterns to fill gaps
|
||||||
|
2. **Document assumptions**: Record reasonable defaults in the Assumptions section
|
||||||
|
3. **Limit clarifications**: Maximum 3 [NEEDS CLARIFICATION] markers - use only for critical decisions that:
|
||||||
|
- Significantly impact feature scope or user experience
|
||||||
|
- Have multiple reasonable interpretations with different implications
|
||||||
|
- Lack any reasonable default
|
||||||
|
4. **Prioritize clarifications**: scope > security/privacy > user experience > technical details
|
||||||
|
5. **Think like a tester**: Every vague requirement should fail the "testable and unambiguous" checklist item
|
||||||
|
6. **Common areas needing clarification** (only if no reasonable default exists):
|
||||||
|
- Feature scope and boundaries (include/exclude specific use cases)
|
||||||
|
- User types and permissions (if multiple conflicting interpretations possible)
|
||||||
|
- Security/compliance requirements (when legally/financially significant)
|
||||||
|
|
||||||
|
**Examples of reasonable defaults** (don't ask about these):
|
||||||
|
|
||||||
|
- Data retention: Industry-standard practices for the domain
|
||||||
|
- Performance targets: Standard web/mobile app expectations unless specified
|
||||||
|
- Error handling: User-friendly messages with appropriate fallbacks
|
||||||
|
- Authentication method: Standard session-based or OAuth2 for web apps
|
||||||
|
- Integration patterns: RESTful APIs unless specified otherwise
|
||||||
|
|
||||||
|
### Success Criteria Guidelines
|
||||||
|
|
||||||
|
Success criteria must be:
|
||||||
|
|
||||||
|
1. **Measurable**: Include specific metrics (time, percentage, count, rate)
|
||||||
|
2. **Technology-agnostic**: No mention of frameworks, languages, databases, or tools
|
||||||
|
3. **User-focused**: Describe outcomes from user/business perspective, not system internals
|
||||||
|
4. **Verifiable**: Can be tested/validated without knowing implementation details
|
||||||
|
|
||||||
|
**Good examples**:
|
||||||
|
|
||||||
|
- "Users can complete checkout in under 3 minutes"
|
||||||
|
- "System supports 10,000 concurrent users"
|
||||||
|
- "95% of searches return results in under 1 second"
|
||||||
|
- "Task completion rate improves by 40%"
|
||||||
|
|
||||||
|
**Bad examples** (implementation-focused):
|
||||||
|
|
||||||
|
- "API response time is under 200ms" (too technical, use "Users see results instantly")
|
||||||
|
- "Database can handle 1000 TPS" (implementation detail, use user-facing metric)
|
||||||
|
- "React components render efficiently" (framework-specific)
|
||||||
|
- "Redis cache hit rate above 80%" (technology-specific)
|
||||||
137
src/prompts/speckit/speckit.tasks.md
Normal file
137
src/prompts/speckit/speckit.tasks.md
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
---
|
||||||
|
description: Generate an actionable, dependency-ordered tasks.md for the feature based on available design artifacts.
|
||||||
|
handoffs:
|
||||||
|
- label: Analyze For Consistency
|
||||||
|
agent: speckit.analyze
|
||||||
|
prompt: Run a project analysis for consistency
|
||||||
|
send: true
|
||||||
|
- label: Implement Project
|
||||||
|
agent: speckit.implement
|
||||||
|
prompt: Start the implementation in phases
|
||||||
|
send: true
|
||||||
|
---
|
||||||
|
|
||||||
|
## User Input
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ARGUMENTS
|
||||||
|
```
|
||||||
|
|
||||||
|
You **MUST** consider the user input before proceeding (if not empty).
|
||||||
|
|
||||||
|
## Outline
|
||||||
|
|
||||||
|
1. **Setup**: Run `.specify/scripts/bash/check-prerequisites.sh --json` from repo root and parse FEATURE_DIR and AVAILABLE_DOCS list. All paths must be absolute. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
|
||||||
|
|
||||||
|
2. **Load design documents**: Read from FEATURE_DIR:
|
||||||
|
- **Required**: plan.md (tech stack, libraries, structure), spec.md (user stories with priorities)
|
||||||
|
- **Optional**: data-model.md (entities), contracts/ (API endpoints), research.md (decisions), quickstart.md (test scenarios)
|
||||||
|
- Note: Not all projects have all documents. Generate tasks based on what's available.
|
||||||
|
|
||||||
|
3. **Execute task generation workflow**:
|
||||||
|
- Load plan.md and extract tech stack, libraries, project structure
|
||||||
|
- Load spec.md and extract user stories with their priorities (P1, P2, P3, etc.)
|
||||||
|
- If data-model.md exists: Extract entities and map to user stories
|
||||||
|
- If contracts/ exists: Map endpoints to user stories
|
||||||
|
- If research.md exists: Extract decisions for setup tasks
|
||||||
|
- Generate tasks organized by user story (see Task Generation Rules below)
|
||||||
|
- Generate dependency graph showing user story completion order
|
||||||
|
- Create parallel execution examples per user story
|
||||||
|
- Validate task completeness (each user story has all needed tasks, independently testable)
|
||||||
|
|
||||||
|
4. **Generate tasks.md**: Use `.specify/templates/tasks-template.md` as structure, fill with:
|
||||||
|
- Correct feature name from plan.md
|
||||||
|
- Phase 1: Setup tasks (project initialization)
|
||||||
|
- Phase 2: Foundational tasks (blocking prerequisites for all user stories)
|
||||||
|
- Phase 3+: One phase per user story (in priority order from spec.md)
|
||||||
|
- Each phase includes: story goal, independent test criteria, tests (if requested), implementation tasks
|
||||||
|
- Final Phase: Polish & cross-cutting concerns
|
||||||
|
- All tasks must follow the strict checklist format (see Task Generation Rules below)
|
||||||
|
- Clear file paths for each task
|
||||||
|
- Dependencies section showing story completion order
|
||||||
|
- Parallel execution examples per story
|
||||||
|
- Implementation strategy section (MVP first, incremental delivery)
|
||||||
|
|
||||||
|
5. **Report**: Output path to generated tasks.md and summary:
|
||||||
|
- Total task count
|
||||||
|
- Task count per user story
|
||||||
|
- Parallel opportunities identified
|
||||||
|
- Independent test criteria for each story
|
||||||
|
- Suggested MVP scope (typically just User Story 1)
|
||||||
|
- Format validation: Confirm ALL tasks follow the checklist format (checkbox, ID, labels, file paths)
|
||||||
|
|
||||||
|
Context for task generation: $ARGUMENTS
|
||||||
|
|
||||||
|
The tasks.md should be immediately executable - each task must be specific enough that an LLM can complete it without additional context.
|
||||||
|
|
||||||
|
## Task Generation Rules
|
||||||
|
|
||||||
|
**CRITICAL**: Tasks MUST be organized by user story to enable independent implementation and testing.
|
||||||
|
|
||||||
|
**Tests are OPTIONAL**: Only generate test tasks if explicitly requested in the feature specification or if user requests TDD approach.
|
||||||
|
|
||||||
|
### Checklist Format (REQUIRED)
|
||||||
|
|
||||||
|
Every task MUST strictly follow this format:
|
||||||
|
|
||||||
|
```text
|
||||||
|
- [ ] [TaskID] [P?] [Story?] Description with file path
|
||||||
|
```
|
||||||
|
|
||||||
|
**Format Components**:
|
||||||
|
|
||||||
|
1. **Checkbox**: ALWAYS start with `- [ ]` (markdown checkbox)
|
||||||
|
2. **Task ID**: Sequential number (T001, T002, T003...) in execution order
|
||||||
|
3. **[P] marker**: Include ONLY if task is parallelizable (different files, no dependencies on incomplete tasks)
|
||||||
|
4. **[Story] label**: REQUIRED for user story phase tasks only
|
||||||
|
- Format: [US1], [US2], [US3], etc. (maps to user stories from spec.md)
|
||||||
|
- Setup phase: NO story label
|
||||||
|
- Foundational phase: NO story label
|
||||||
|
- User Story phases: MUST have story label
|
||||||
|
- Polish phase: NO story label
|
||||||
|
5. **Description**: Clear action with exact file path
|
||||||
|
|
||||||
|
**Examples**:
|
||||||
|
|
||||||
|
- ✅ CORRECT: `- [ ] T001 Create project structure per implementation plan`
|
||||||
|
- ✅ CORRECT: `- [ ] T005 [P] Implement authentication middleware in src/middleware/auth.py`
|
||||||
|
- ✅ CORRECT: `- [ ] T012 [P] [US1] Create User model in src/models/user.py`
|
||||||
|
- ✅ CORRECT: `- [ ] T014 [US1] Implement UserService in src/services/user_service.py`
|
||||||
|
- ❌ WRONG: `- [ ] Create User model` (missing ID and Story label)
|
||||||
|
- ❌ WRONG: `T001 [US1] Create model` (missing checkbox)
|
||||||
|
- ❌ WRONG: `- [ ] [US1] Create User model` (missing Task ID)
|
||||||
|
- ❌ WRONG: `- [ ] T001 [US1] Create model` (missing file path)
|
||||||
|
|
||||||
|
### Task Organization
|
||||||
|
|
||||||
|
1. **From User Stories (spec.md)** - PRIMARY ORGANIZATION:
|
||||||
|
- Each user story (P1, P2, P3...) gets its own phase
|
||||||
|
- Map all related components to their story:
|
||||||
|
- Models needed for that story
|
||||||
|
- Services needed for that story
|
||||||
|
- Endpoints/UI needed for that story
|
||||||
|
- If tests requested: Tests specific to that story
|
||||||
|
- Mark story dependencies (most stories should be independent)
|
||||||
|
|
||||||
|
2. **From Contracts**:
|
||||||
|
- Map each contract/endpoint → to the user story it serves
|
||||||
|
- If tests requested: Each contract → contract test task [P] before implementation in that story's phase
|
||||||
|
|
||||||
|
3. **From Data Model**:
|
||||||
|
- Map each entity to the user story(ies) that need it
|
||||||
|
- If entity serves multiple stories: Put in earliest story or Setup phase
|
||||||
|
- Relationships → service layer tasks in appropriate story phase
|
||||||
|
|
||||||
|
4. **From Setup/Infrastructure**:
|
||||||
|
- Shared infrastructure → Setup phase (Phase 1)
|
||||||
|
- Foundational/blocking tasks → Foundational phase (Phase 2)
|
||||||
|
- Story-specific setup → within that story's phase
|
||||||
|
|
||||||
|
### Phase Structure
|
||||||
|
|
||||||
|
- **Phase 1**: Setup (project initialization)
|
||||||
|
- **Phase 2**: Foundational (blocking prerequisites - MUST complete before user stories)
|
||||||
|
- **Phase 3+**: User Stories in priority order (P1, P2, P3...)
|
||||||
|
- Within each story: Tests (if requested) → Models → Services → Endpoints → Integration
|
||||||
|
- Each phase should be a complete, independently testable increment
|
||||||
|
- **Final Phase**: Polish & Cross-Cutting Concerns
|
||||||
30
src/prompts/speckit/speckit.taskstoissues.md
Normal file
30
src/prompts/speckit/speckit.taskstoissues.md
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
---
|
||||||
|
description: Convert existing tasks into actionable, dependency-ordered GitHub issues for the feature based on available design artifacts.
|
||||||
|
tools: ['github/github-mcp-server/issue_write']
|
||||||
|
---
|
||||||
|
|
||||||
|
## User Input
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ARGUMENTS
|
||||||
|
```
|
||||||
|
|
||||||
|
You **MUST** consider the user input before proceeding (if not empty).
|
||||||
|
|
||||||
|
## Outline
|
||||||
|
|
||||||
|
1. Run `.specify/scripts/bash/check-prerequisites.sh --json --require-tasks --include-tasks` from repo root and parse FEATURE_DIR and AVAILABLE_DOCS list. All paths must be absolute. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
|
||||||
|
1. From the executed script, extract the path to **tasks**.
|
||||||
|
1. Get the Git remote by running:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git config --get remote.origin.url
|
||||||
|
```
|
||||||
|
|
||||||
|
> [!CAUTION]
|
||||||
|
> ONLY PROCEED TO NEXT STEPS IF THE REMOTE IS A GITHUB URL
|
||||||
|
|
||||||
|
1. For each task in the list, use the GitHub MCP server to create a new issue in the repository that is representative of the Git remote.
|
||||||
|
|
||||||
|
> [!CAUTION]
|
||||||
|
> UNDER NO CIRCUMSTANCES EVER CREATE ISSUES IN REPOSITORIES THAT DO NOT MATCH THE REMOTE URL
|
||||||
@@ -81,6 +81,7 @@ import { ToastContainer } from './components/Toast';
|
|||||||
|
|
||||||
// Import services
|
// Import services
|
||||||
import { gitService } from './services/git';
|
import { gitService } from './services/git';
|
||||||
|
import { getSpeckitCommands } from './services/speckit';
|
||||||
|
|
||||||
// Import prompts and synopsis parsing
|
// Import prompts and synopsis parsing
|
||||||
import { autorunSynopsisPrompt, maestroSystemPrompt } from '../prompts';
|
import { autorunSynopsisPrompt, maestroSystemPrompt } from '../prompts';
|
||||||
@@ -90,7 +91,8 @@ import { parseSynopsis } from '../shared/synopsis';
|
|||||||
import type {
|
import type {
|
||||||
ToolType, SessionState, RightPanelTab, SettingsTab,
|
ToolType, SessionState, RightPanelTab, SettingsTab,
|
||||||
FocusArea, LogEntry, Session, Group, AITab, UsageStats, QueuedItem, BatchRunConfig,
|
FocusArea, LogEntry, Session, Group, AITab, UsageStats, QueuedItem, BatchRunConfig,
|
||||||
AgentError, BatchRunState, GroupChat, GroupChatMessage, GroupChatState
|
AgentError, BatchRunState, GroupChat, GroupChatMessage, GroupChatState,
|
||||||
|
SpecKitCommand
|
||||||
} from './types';
|
} from './types';
|
||||||
import { THEMES } from './constants/themes';
|
import { THEMES } from './constants/themes';
|
||||||
import { generateId } from './utils/ids';
|
import { generateId } from './utils/ids';
|
||||||
@@ -219,6 +221,8 @@ export default function MaestroConsole() {
|
|||||||
// --- STATE ---
|
// --- STATE ---
|
||||||
const [sessions, setSessions] = useState<Session[]>([]);
|
const [sessions, setSessions] = useState<Session[]>([]);
|
||||||
const [groups, setGroups] = useState<Group[]>([]);
|
const [groups, setGroups] = useState<Group[]>([]);
|
||||||
|
// Spec Kit commands (loaded from bundled prompts)
|
||||||
|
const [speckitCommands, setSpeckitCommands] = useState<SpecKitCommand[]>([]);
|
||||||
// Track worktree paths that were manually removed - prevents re-discovery during this session
|
// Track worktree paths that were manually removed - prevents re-discovery during this session
|
||||||
const [removedWorktreePaths, setRemovedWorktreePaths] = useState<Set<string>>(new Set());
|
const [removedWorktreePaths, setRemovedWorktreePaths] = useState<Set<string>>(new Set());
|
||||||
// Ref to always access current removed paths (avoids stale closure in async scanner)
|
// Ref to always access current removed paths (avoids stale closure in async scanner)
|
||||||
@@ -924,6 +928,19 @@ export default function MaestroConsole() {
|
|||||||
}
|
}
|
||||||
}, [settingsLoaded, checkForUpdatesOnStartup]);
|
}, [settingsLoaded, checkForUpdatesOnStartup]);
|
||||||
|
|
||||||
|
// Load spec-kit commands on startup
|
||||||
|
useEffect(() => {
|
||||||
|
const loadSpeckitCommands = async () => {
|
||||||
|
try {
|
||||||
|
const commands = await getSpeckitCommands();
|
||||||
|
setSpeckitCommands(commands);
|
||||||
|
} catch (error) {
|
||||||
|
console.error('[SpecKit] Failed to load commands:', error);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
loadSpeckitCommands();
|
||||||
|
}, []);
|
||||||
|
|
||||||
// Set up process event listeners for real-time output
|
// Set up process event listeners for real-time output
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
// Handle process output data (BATCHED for performance)
|
// Handle process output data (BATCHED for performance)
|
||||||
@@ -1954,12 +1971,14 @@ export default function MaestroConsole() {
|
|||||||
const sessionsRef = useRef(sessions);
|
const sessionsRef = useRef(sessions);
|
||||||
const updateGlobalStatsRef = useRef(updateGlobalStats);
|
const updateGlobalStatsRef = useRef(updateGlobalStats);
|
||||||
const customAICommandsRef = useRef(customAICommands);
|
const customAICommandsRef = useRef(customAICommands);
|
||||||
|
const speckitCommandsRef = useRef(speckitCommands);
|
||||||
const activeSessionIdRef = useRef(activeSessionId);
|
const activeSessionIdRef = useRef(activeSessionId);
|
||||||
groupsRef.current = groups;
|
groupsRef.current = groups;
|
||||||
addToastRef.current = addToast;
|
addToastRef.current = addToast;
|
||||||
sessionsRef.current = sessions;
|
sessionsRef.current = sessions;
|
||||||
updateGlobalStatsRef.current = updateGlobalStats;
|
updateGlobalStatsRef.current = updateGlobalStats;
|
||||||
customAICommandsRef.current = customAICommands;
|
customAICommandsRef.current = customAICommands;
|
||||||
|
speckitCommandsRef.current = speckitCommands;
|
||||||
activeSessionIdRef.current = activeSessionId;
|
activeSessionIdRef.current = activeSessionId;
|
||||||
|
|
||||||
// Note: spawnBackgroundSynopsisRef and spawnAgentWithPromptRef are now provided by useAgentExecution hook
|
// Note: spawnBackgroundSynopsisRef and spawnAgentWithPromptRef are now provided by useAgentExecution hook
|
||||||
@@ -2322,7 +2341,7 @@ export default function MaestroConsole() {
|
|||||||
// Get capabilities for the active session's agent type
|
// Get capabilities for the active session's agent type
|
||||||
const { hasCapability: hasActiveSessionCapability } = useAgentCapabilities(activeSession?.toolType);
|
const { hasCapability: hasActiveSessionCapability } = useAgentCapabilities(activeSession?.toolType);
|
||||||
|
|
||||||
// Combine built-in slash commands with custom AI commands AND agent-specific commands for autocomplete
|
// Combine built-in slash commands with custom AI commands, spec-kit commands, AND agent-specific commands for autocomplete
|
||||||
const allSlashCommands = useMemo(() => {
|
const allSlashCommands = useMemo(() => {
|
||||||
const customCommandsAsSlash = customAICommands
|
const customCommandsAsSlash = customAICommands
|
||||||
.map(cmd => ({
|
.map(cmd => ({
|
||||||
@@ -2331,6 +2350,15 @@ export default function MaestroConsole() {
|
|||||||
aiOnly: true, // Custom AI commands are only available in AI mode
|
aiOnly: true, // Custom AI commands are only available in AI mode
|
||||||
prompt: cmd.prompt, // Include prompt for execution
|
prompt: cmd.prompt, // Include prompt for execution
|
||||||
}));
|
}));
|
||||||
|
// Spec Kit commands (bundled from github/spec-kit)
|
||||||
|
const speckitCommandsAsSlash = speckitCommands
|
||||||
|
.map(cmd => ({
|
||||||
|
command: cmd.command,
|
||||||
|
description: cmd.description,
|
||||||
|
aiOnly: true, // Spec-kit commands are only available in AI mode
|
||||||
|
prompt: cmd.prompt, // Include prompt for execution
|
||||||
|
isSpeckit: true, // Mark as spec-kit command for special handling
|
||||||
|
}));
|
||||||
// Only include agent-specific commands if the agent supports slash commands
|
// Only include agent-specific commands if the agent supports slash commands
|
||||||
// This allows built-in and custom commands to be shown for all agents (Codex, OpenCode, etc.)
|
// This allows built-in and custom commands to be shown for all agents (Codex, OpenCode, etc.)
|
||||||
const agentCommands = hasActiveSessionCapability('supportsSlashCommands')
|
const agentCommands = hasActiveSessionCapability('supportsSlashCommands')
|
||||||
@@ -2340,8 +2368,8 @@ export default function MaestroConsole() {
|
|||||||
aiOnly: true, // Agent commands are only available in AI mode
|
aiOnly: true, // Agent commands are only available in AI mode
|
||||||
}))
|
}))
|
||||||
: [];
|
: [];
|
||||||
return [...slashCommands, ...customCommandsAsSlash, ...agentCommands];
|
return [...slashCommands, ...customCommandsAsSlash, ...speckitCommandsAsSlash, ...agentCommands];
|
||||||
}, [customAICommands, activeSession?.agentCommands, hasActiveSessionCapability]);
|
}, [customAICommands, speckitCommands, activeSession?.agentCommands, hasActiveSessionCapability]);
|
||||||
|
|
||||||
// Derive current input value and setter based on active session mode
|
// Derive current input value and setter based on active session mode
|
||||||
// For AI mode: use active tab's inputValue (stored per-tab)
|
// For AI mode: use active tab's inputValue (stored per-tab)
|
||||||
@@ -4878,8 +4906,15 @@ export default function MaestroConsole() {
|
|||||||
cmd => cmd.command === commandText
|
cmd => cmd.command === commandText
|
||||||
);
|
);
|
||||||
|
|
||||||
if (matchingCustomCommand) {
|
// Look up in spec-kit commands
|
||||||
console.log('[Remote] Found matching custom AI command:', matchingCustomCommand.command);
|
const matchingSpeckitCommand = speckitCommandsRef.current.find(
|
||||||
|
cmd => cmd.command === commandText
|
||||||
|
);
|
||||||
|
|
||||||
|
const matchingCommand = matchingCustomCommand || matchingSpeckitCommand;
|
||||||
|
|
||||||
|
if (matchingCommand) {
|
||||||
|
console.log('[Remote] Found matching command:', matchingCommand.command, matchingSpeckitCommand ? '(spec-kit)' : '(custom)');
|
||||||
|
|
||||||
// Get git branch for template substitution
|
// Get git branch for template substitution
|
||||||
let gitBranch: string | undefined;
|
let gitBranch: string | undefined;
|
||||||
@@ -4894,12 +4929,12 @@ export default function MaestroConsole() {
|
|||||||
|
|
||||||
// Substitute template variables
|
// Substitute template variables
|
||||||
promptToSend = substituteTemplateVariables(
|
promptToSend = substituteTemplateVariables(
|
||||||
matchingCustomCommand.prompt,
|
matchingCommand.prompt,
|
||||||
{ session, gitBranch }
|
{ session, gitBranch }
|
||||||
);
|
);
|
||||||
commandMetadata = {
|
commandMetadata = {
|
||||||
command: matchingCustomCommand.command,
|
command: matchingCommand.command,
|
||||||
description: matchingCustomCommand.description
|
description: matchingCommand.description
|
||||||
};
|
};
|
||||||
|
|
||||||
console.log('[Remote] Substituted prompt (first 100 chars):', promptToSend.substring(0, 100));
|
console.log('[Remote] Substituted prompt (first 100 chars):', promptToSend.substring(0, 100));
|
||||||
@@ -5200,8 +5235,9 @@ export default function MaestroConsole() {
|
|||||||
sessionCustomContextWindow: session.customContextWindow,
|
sessionCustomContextWindow: session.customContextWindow,
|
||||||
});
|
});
|
||||||
} else if (item.type === 'command' && item.command) {
|
} else if (item.type === 'command' && item.command) {
|
||||||
// Process a slash command - find the matching custom AI command
|
// Process a slash command - find the matching custom AI command or speckit command
|
||||||
const matchingCommand = customAICommands.find(cmd => cmd.command === item.command);
|
const matchingCommand = customAICommands.find(cmd => cmd.command === item.command)
|
||||||
|
|| speckitCommands.find(cmd => cmd.command === item.command);
|
||||||
if (matchingCommand) {
|
if (matchingCommand) {
|
||||||
// Substitute template variables
|
// Substitute template variables
|
||||||
let gitBranch: string | undefined;
|
let gitBranch: string | undefined;
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import { CustomThemeBuilder } from './CustomThemeBuilder';
|
|||||||
import { useLayerStack } from '../contexts/LayerStackContext';
|
import { useLayerStack } from '../contexts/LayerStackContext';
|
||||||
import { MODAL_PRIORITIES } from '../constants/modalPriorities';
|
import { MODAL_PRIORITIES } from '../constants/modalPriorities';
|
||||||
import { AICommandsPanel } from './AICommandsPanel';
|
import { AICommandsPanel } from './AICommandsPanel';
|
||||||
|
import { SpecKitCommandsPanel } from './SpecKitCommandsPanel';
|
||||||
import { formatShortcutKeys } from '../utils/shortcutFormatter';
|
import { formatShortcutKeys } from '../utils/shortcutFormatter';
|
||||||
import { ToggleButtonGroup } from './ToggleButtonGroup';
|
import { ToggleButtonGroup } from './ToggleButtonGroup';
|
||||||
import { SettingCheckbox } from './SettingCheckbox';
|
import { SettingCheckbox } from './SettingCheckbox';
|
||||||
@@ -1458,11 +1459,22 @@ export const SettingsModal = memo(function SettingsModal(props: SettingsModalPro
|
|||||||
)}
|
)}
|
||||||
|
|
||||||
{activeTab === 'aicommands' && (
|
{activeTab === 'aicommands' && (
|
||||||
|
<div className="space-y-8">
|
||||||
<AICommandsPanel
|
<AICommandsPanel
|
||||||
theme={theme}
|
theme={theme}
|
||||||
customAICommands={props.customAICommands}
|
customAICommands={props.customAICommands}
|
||||||
setCustomAICommands={props.setCustomAICommands}
|
setCustomAICommands={props.setCustomAICommands}
|
||||||
/>
|
/>
|
||||||
|
|
||||||
|
{/* Divider */}
|
||||||
|
<div
|
||||||
|
className="border-t"
|
||||||
|
style={{ borderColor: theme.colors.border }}
|
||||||
|
/>
|
||||||
|
|
||||||
|
{/* Spec Kit Commands Section */}
|
||||||
|
<SpecKitCommandsPanel theme={theme} />
|
||||||
|
</div>
|
||||||
)}
|
)}
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
378
src/renderer/components/SpecKitCommandsPanel.tsx
Normal file
378
src/renderer/components/SpecKitCommandsPanel.tsx
Normal file
@@ -0,0 +1,378 @@
|
|||||||
|
import React, { useState, useRef, useEffect } from 'react';
|
||||||
|
import { Edit2, Save, X, RotateCcw, RefreshCw, ExternalLink, ChevronDown, ChevronRight, Wand2 } from 'lucide-react';
|
||||||
|
import type { Theme, SpecKitCommand, SpecKitMetadata } from '../types';
|
||||||
|
import { useTemplateAutocomplete } from '../hooks/useTemplateAutocomplete';
|
||||||
|
import { TemplateAutocompleteDropdown } from './TemplateAutocompleteDropdown';
|
||||||
|
|
||||||
|
interface SpecKitCommandsPanelProps {
|
||||||
|
theme: Theme;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface EditingCommand {
|
||||||
|
id: string;
|
||||||
|
prompt: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function SpecKitCommandsPanel({ theme }: SpecKitCommandsPanelProps) {
|
||||||
|
const [commands, setCommands] = useState<SpecKitCommand[]>([]);
|
||||||
|
const [metadata, setMetadata] = useState<SpecKitMetadata | null>(null);
|
||||||
|
const [editingCommand, setEditingCommand] = useState<EditingCommand | null>(null);
|
||||||
|
const [isRefreshing, setIsRefreshing] = useState(false);
|
||||||
|
const [expandedCommands, setExpandedCommands] = useState<Set<string>>(new Set());
|
||||||
|
const [isLoading, setIsLoading] = useState(true);
|
||||||
|
|
||||||
|
const editCommandTextareaRef = useRef<HTMLTextAreaElement>(null);
|
||||||
|
|
||||||
|
// Template autocomplete for edit command prompt
|
||||||
|
const {
|
||||||
|
autocompleteState: editAutocompleteState,
|
||||||
|
handleKeyDown: handleEditAutocompleteKeyDown,
|
||||||
|
handleChange: handleEditAutocompleteChange,
|
||||||
|
selectVariable: selectEditVariable,
|
||||||
|
autocompleteRef: editAutocompleteRef,
|
||||||
|
} = useTemplateAutocomplete({
|
||||||
|
textareaRef: editCommandTextareaRef,
|
||||||
|
value: editingCommand?.prompt || '',
|
||||||
|
onChange: (value) => editingCommand && setEditingCommand({ ...editingCommand, prompt: value }),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Load commands and metadata on mount
|
||||||
|
useEffect(() => {
|
||||||
|
const loadData = async () => {
|
||||||
|
setIsLoading(true);
|
||||||
|
try {
|
||||||
|
const [promptsResult, metadataResult] = await Promise.all([
|
||||||
|
window.maestro.speckit.getPrompts(),
|
||||||
|
window.maestro.speckit.getMetadata(),
|
||||||
|
]);
|
||||||
|
|
||||||
|
if (promptsResult.success && promptsResult.commands) {
|
||||||
|
setCommands(promptsResult.commands);
|
||||||
|
}
|
||||||
|
if (metadataResult.success && metadataResult.metadata) {
|
||||||
|
setMetadata(metadataResult.metadata);
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Failed to load spec-kit commands:', error);
|
||||||
|
} finally {
|
||||||
|
setIsLoading(false);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
loadData();
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
const handleSaveEdit = async () => {
|
||||||
|
if (!editingCommand) return;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const result = await window.maestro.speckit.savePrompt(editingCommand.id, editingCommand.prompt);
|
||||||
|
if (result.success) {
|
||||||
|
setCommands(commands.map(cmd =>
|
||||||
|
cmd.id === editingCommand.id
|
||||||
|
? { ...cmd, prompt: editingCommand.prompt, isModified: true }
|
||||||
|
: cmd
|
||||||
|
));
|
||||||
|
setEditingCommand(null);
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Failed to save prompt:', error);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleReset = async (id: string) => {
|
||||||
|
try {
|
||||||
|
const result = await window.maestro.speckit.resetPrompt(id);
|
||||||
|
if (result.success && result.prompt) {
|
||||||
|
setCommands(commands.map(cmd =>
|
||||||
|
cmd.id === id
|
||||||
|
? { ...cmd, prompt: result.prompt!, isModified: false }
|
||||||
|
: cmd
|
||||||
|
));
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Failed to reset prompt:', error);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleRefresh = async () => {
|
||||||
|
setIsRefreshing(true);
|
||||||
|
try {
|
||||||
|
const result = await window.maestro.speckit.refresh();
|
||||||
|
if (result.success && result.metadata) {
|
||||||
|
setMetadata(result.metadata);
|
||||||
|
// Reload prompts after refresh
|
||||||
|
const promptsResult = await window.maestro.speckit.getPrompts();
|
||||||
|
if (promptsResult.success && promptsResult.commands) {
|
||||||
|
setCommands(promptsResult.commands);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Failed to refresh spec-kit prompts:', error);
|
||||||
|
} finally {
|
||||||
|
setIsRefreshing(false);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleCancelEdit = () => {
|
||||||
|
setEditingCommand(null);
|
||||||
|
};
|
||||||
|
|
||||||
|
const toggleExpanded = (id: string) => {
|
||||||
|
const newExpanded = new Set(expandedCommands);
|
||||||
|
if (newExpanded.has(id)) {
|
||||||
|
newExpanded.delete(id);
|
||||||
|
} else {
|
||||||
|
newExpanded.add(id);
|
||||||
|
}
|
||||||
|
setExpandedCommands(newExpanded);
|
||||||
|
};
|
||||||
|
|
||||||
|
const formatDate = (isoDate: string) => {
|
||||||
|
try {
|
||||||
|
return new Date(isoDate).toLocaleDateString(undefined, {
|
||||||
|
year: 'numeric',
|
||||||
|
month: 'short',
|
||||||
|
day: 'numeric',
|
||||||
|
});
|
||||||
|
} catch {
|
||||||
|
return isoDate;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if (isLoading) {
|
||||||
|
return (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div>
|
||||||
|
<label className="block text-xs font-bold opacity-70 uppercase mb-1 flex items-center gap-2">
|
||||||
|
<Wand2 className="w-3 h-3" />
|
||||||
|
Spec Kit Commands
|
||||||
|
</label>
|
||||||
|
<p className="text-xs opacity-50" style={{ color: theme.colors.textDim }}>
|
||||||
|
Loading spec-kit commands...
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div>
|
||||||
|
<label className="block text-xs font-bold opacity-70 uppercase mb-1 flex items-center gap-2">
|
||||||
|
<Wand2 className="w-3 h-3" />
|
||||||
|
Spec Kit Commands
|
||||||
|
</label>
|
||||||
|
<p className="text-xs opacity-50" style={{ color: theme.colors.textDim }}>
|
||||||
|
Bundled commands from{' '}
|
||||||
|
<a
|
||||||
|
href="https://github.com/github/spec-kit"
|
||||||
|
target="_blank"
|
||||||
|
rel="noopener noreferrer"
|
||||||
|
className="underline hover:opacity-80 inline-flex items-center gap-1"
|
||||||
|
style={{ color: theme.colors.accent }}
|
||||||
|
>
|
||||||
|
github/spec-kit
|
||||||
|
<ExternalLink className="w-2.5 h-2.5" />
|
||||||
|
</a>
|
||||||
|
{' '}for structured specification workflows.
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Metadata and refresh */}
|
||||||
|
{metadata && (
|
||||||
|
<div
|
||||||
|
className="flex items-center justify-between p-3 rounded-lg border"
|
||||||
|
style={{ backgroundColor: theme.colors.bgMain, borderColor: theme.colors.border }}
|
||||||
|
>
|
||||||
|
<div className="text-xs" style={{ color: theme.colors.textDim }}>
|
||||||
|
<span>Version: </span>
|
||||||
|
<span className="font-mono" style={{ color: theme.colors.textMain }}>
|
||||||
|
{metadata.sourceVersion}
|
||||||
|
</span>
|
||||||
|
<span className="mx-2">•</span>
|
||||||
|
<span>Updated: </span>
|
||||||
|
<span style={{ color: theme.colors.textMain }}>
|
||||||
|
{formatDate(metadata.lastRefreshed)}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
<button
|
||||||
|
onClick={handleRefresh}
|
||||||
|
disabled={isRefreshing}
|
||||||
|
className="flex items-center gap-1.5 px-3 py-1.5 rounded text-xs font-medium transition-all disabled:opacity-50"
|
||||||
|
style={{
|
||||||
|
backgroundColor: theme.colors.bgActivity,
|
||||||
|
color: theme.colors.textMain,
|
||||||
|
border: `1px solid ${theme.colors.border}`,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<RefreshCw className={`w-3 h-3 ${isRefreshing ? 'animate-spin' : ''}`} />
|
||||||
|
{isRefreshing ? 'Checking...' : 'Check for Updates'}
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{/* Commands list */}
|
||||||
|
<div className="space-y-2 max-h-[500px] overflow-y-auto pr-1 scrollbar-thin">
|
||||||
|
{commands.map((cmd) => (
|
||||||
|
<div
|
||||||
|
key={cmd.id}
|
||||||
|
className="rounded-lg border overflow-hidden"
|
||||||
|
style={{ backgroundColor: theme.colors.bgMain, borderColor: cmd.isCustom ? theme.colors.accent : theme.colors.border }}
|
||||||
|
>
|
||||||
|
{editingCommand?.id === cmd.id ? (
|
||||||
|
// Editing mode
|
||||||
|
<div className="p-3 space-y-3">
|
||||||
|
<div className="flex items-center justify-between">
|
||||||
|
<span className="font-mono font-bold text-sm" style={{ color: theme.colors.accent }}>
|
||||||
|
{cmd.command}
|
||||||
|
</span>
|
||||||
|
<div className="flex items-center gap-1">
|
||||||
|
<button
|
||||||
|
onClick={handleCancelEdit}
|
||||||
|
className="flex items-center gap-1 px-2 py-1 rounded text-xs font-medium transition-all"
|
||||||
|
style={{
|
||||||
|
backgroundColor: theme.colors.bgActivity,
|
||||||
|
color: theme.colors.textMain,
|
||||||
|
border: `1px solid ${theme.colors.border}`,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<X className="w-3 h-3" />
|
||||||
|
Cancel
|
||||||
|
</button>
|
||||||
|
<button
|
||||||
|
onClick={handleSaveEdit}
|
||||||
|
className="flex items-center gap-1 px-2 py-1 rounded text-xs font-medium transition-all"
|
||||||
|
style={{
|
||||||
|
backgroundColor: theme.colors.success,
|
||||||
|
color: '#000000',
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<Save className="w-3 h-3" />
|
||||||
|
Save
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div className="relative">
|
||||||
|
<textarea
|
||||||
|
ref={editCommandTextareaRef}
|
||||||
|
value={editingCommand.prompt}
|
||||||
|
onChange={handleEditAutocompleteChange}
|
||||||
|
onKeyDown={(e) => {
|
||||||
|
if (handleEditAutocompleteKeyDown(e)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (e.key === 'Tab') {
|
||||||
|
e.preventDefault();
|
||||||
|
const textarea = e.currentTarget;
|
||||||
|
const start = textarea.selectionStart;
|
||||||
|
const end = textarea.selectionEnd;
|
||||||
|
const value = textarea.value;
|
||||||
|
const newValue = value.substring(0, start) + '\t' + value.substring(end);
|
||||||
|
setEditingCommand({ ...editingCommand, prompt: newValue });
|
||||||
|
setTimeout(() => {
|
||||||
|
textarea.selectionStart = textarea.selectionEnd = start + 1;
|
||||||
|
}, 0);
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
rows={15}
|
||||||
|
className="w-full p-2 rounded border bg-transparent outline-none text-sm resize-y scrollbar-thin min-h-[300px] font-mono"
|
||||||
|
style={{ borderColor: theme.colors.border, color: theme.colors.textMain }}
|
||||||
|
/>
|
||||||
|
<TemplateAutocompleteDropdown
|
||||||
|
ref={editAutocompleteRef}
|
||||||
|
theme={theme}
|
||||||
|
state={editAutocompleteState}
|
||||||
|
onSelect={selectEditVariable}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
) : (
|
||||||
|
// Display mode
|
||||||
|
<>
|
||||||
|
<button
|
||||||
|
onClick={() => toggleExpanded(cmd.id)}
|
||||||
|
className="w-full px-3 py-2.5 flex items-center justify-between hover:bg-white/5 transition-colors"
|
||||||
|
>
|
||||||
|
<div className="flex items-center gap-2">
|
||||||
|
{expandedCommands.has(cmd.id) ? (
|
||||||
|
<ChevronDown className="w-3.5 h-3.5" style={{ color: theme.colors.textDim }} />
|
||||||
|
) : (
|
||||||
|
<ChevronRight className="w-3.5 h-3.5" style={{ color: theme.colors.textDim }} />
|
||||||
|
)}
|
||||||
|
<span className="font-mono font-bold text-sm" style={{ color: theme.colors.accent }}>
|
||||||
|
{cmd.command}
|
||||||
|
</span>
|
||||||
|
{cmd.isCustom && (
|
||||||
|
<span
|
||||||
|
className="px-1.5 py-0.5 rounded text-[10px] font-medium"
|
||||||
|
style={{ backgroundColor: theme.colors.accent + '20', color: theme.colors.accent }}
|
||||||
|
>
|
||||||
|
Maestro
|
||||||
|
</span>
|
||||||
|
)}
|
||||||
|
{cmd.isModified && (
|
||||||
|
<span
|
||||||
|
className="px-1.5 py-0.5 rounded text-[10px] font-medium"
|
||||||
|
style={{ backgroundColor: theme.colors.warning + '20', color: theme.colors.warning }}
|
||||||
|
>
|
||||||
|
Modified
|
||||||
|
</span>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
<span className="text-xs truncate max-w-[300px]" style={{ color: theme.colors.textDim }}>
|
||||||
|
{cmd.description}
|
||||||
|
</span>
|
||||||
|
</button>
|
||||||
|
{expandedCommands.has(cmd.id) && (
|
||||||
|
<div className="px-3 pb-3 pt-1 border-t" style={{ borderColor: theme.colors.border }}>
|
||||||
|
<div className="flex items-center justify-end gap-1 mb-2">
|
||||||
|
{cmd.isModified && (
|
||||||
|
<button
|
||||||
|
onClick={() => handleReset(cmd.id)}
|
||||||
|
className="flex items-center gap-1 px-2 py-1 rounded text-xs font-medium transition-all hover:bg-white/10"
|
||||||
|
style={{ color: theme.colors.textDim }}
|
||||||
|
title="Reset to bundled default"
|
||||||
|
>
|
||||||
|
<RotateCcw className="w-3 h-3" />
|
||||||
|
Reset
|
||||||
|
</button>
|
||||||
|
)}
|
||||||
|
<button
|
||||||
|
onClick={() => setEditingCommand({ id: cmd.id, prompt: cmd.prompt })}
|
||||||
|
className="flex items-center gap-1 px-2 py-1 rounded text-xs font-medium transition-all hover:bg-white/10"
|
||||||
|
style={{ color: theme.colors.textDim }}
|
||||||
|
title="Edit prompt"
|
||||||
|
>
|
||||||
|
<Edit2 className="w-3 h-3" />
|
||||||
|
Edit
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
<div
|
||||||
|
className="text-xs p-2 rounded font-mono overflow-y-auto max-h-48 scrollbar-thin whitespace-pre-wrap"
|
||||||
|
style={{ backgroundColor: theme.colors.bgActivity, color: theme.colors.textMain }}
|
||||||
|
>
|
||||||
|
{cmd.prompt.length > 500 ? cmd.prompt.substring(0, 500) + '...' : cmd.prompt}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{commands.length === 0 && (
|
||||||
|
<div
|
||||||
|
className="p-6 rounded-lg border border-dashed text-center"
|
||||||
|
style={{ borderColor: theme.colors.border }}
|
||||||
|
>
|
||||||
|
<Wand2 className="w-8 h-8 mx-auto mb-2 opacity-30" />
|
||||||
|
<p className="text-sm opacity-50" style={{ color: theme.colors.textDim }}>
|
||||||
|
No spec-kit commands loaded
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
55
src/renderer/global.d.ts
vendored
55
src/renderer/global.d.ts
vendored
@@ -1128,6 +1128,61 @@ interface MaestroAPI {
|
|||||||
error?: string;
|
error?: string;
|
||||||
}>;
|
}>;
|
||||||
};
|
};
|
||||||
|
speckit: {
|
||||||
|
getMetadata: () => Promise<{
|
||||||
|
success: boolean;
|
||||||
|
metadata?: {
|
||||||
|
lastRefreshed: string;
|
||||||
|
commitSha: string;
|
||||||
|
sourceVersion: string;
|
||||||
|
sourceUrl: string;
|
||||||
|
};
|
||||||
|
error?: string;
|
||||||
|
}>;
|
||||||
|
getPrompts: () => Promise<{
|
||||||
|
success: boolean;
|
||||||
|
commands?: Array<{
|
||||||
|
id: string;
|
||||||
|
command: string;
|
||||||
|
description: string;
|
||||||
|
prompt: string;
|
||||||
|
isCustom: boolean;
|
||||||
|
isModified: boolean;
|
||||||
|
}>;
|
||||||
|
error?: string;
|
||||||
|
}>;
|
||||||
|
getCommand: (slashCommand: string) => Promise<{
|
||||||
|
success: boolean;
|
||||||
|
command?: {
|
||||||
|
id: string;
|
||||||
|
command: string;
|
||||||
|
description: string;
|
||||||
|
prompt: string;
|
||||||
|
isCustom: boolean;
|
||||||
|
isModified: boolean;
|
||||||
|
};
|
||||||
|
error?: string;
|
||||||
|
}>;
|
||||||
|
savePrompt: (id: string, content: string) => Promise<{
|
||||||
|
success: boolean;
|
||||||
|
error?: string;
|
||||||
|
}>;
|
||||||
|
resetPrompt: (id: string) => Promise<{
|
||||||
|
success: boolean;
|
||||||
|
prompt?: string;
|
||||||
|
error?: string;
|
||||||
|
}>;
|
||||||
|
refresh: () => Promise<{
|
||||||
|
success: boolean;
|
||||||
|
metadata?: {
|
||||||
|
lastRefreshed: string;
|
||||||
|
commitSha: string;
|
||||||
|
sourceVersion: string;
|
||||||
|
sourceUrl: string;
|
||||||
|
};
|
||||||
|
error?: string;
|
||||||
|
}>;
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
declare global {
|
declare global {
|
||||||
|
|||||||
56
src/renderer/services/speckit.ts
Normal file
56
src/renderer/services/speckit.ts
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
/**
|
||||||
|
* Spec Kit Service
|
||||||
|
*
|
||||||
|
* Provides access to bundled spec-kit commands for the renderer.
|
||||||
|
* These commands integrate with the slash command system.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import type { SpecKitCommand, SpecKitMetadata } from '../types';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get all spec-kit commands from the main process
|
||||||
|
*/
|
||||||
|
export async function getSpeckitCommands(): Promise<SpecKitCommand[]> {
|
||||||
|
try {
|
||||||
|
const result = await window.maestro.speckit.getPrompts();
|
||||||
|
if (result.success && result.commands) {
|
||||||
|
return result.commands;
|
||||||
|
}
|
||||||
|
return [];
|
||||||
|
} catch (error) {
|
||||||
|
console.error('[SpecKit] Failed to get commands:', error);
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get spec-kit metadata (version, refresh date)
|
||||||
|
*/
|
||||||
|
export async function getSpeckitMetadata(): Promise<SpecKitMetadata | null> {
|
||||||
|
try {
|
||||||
|
const result = await window.maestro.speckit.getMetadata();
|
||||||
|
if (result.success && result.metadata) {
|
||||||
|
return result.metadata;
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
} catch (error) {
|
||||||
|
console.error('[SpecKit] Failed to get metadata:', error);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get a single spec-kit command by its slash command string
|
||||||
|
*/
|
||||||
|
export async function getSpeckitCommand(slashCommand: string): Promise<SpecKitCommand | null> {
|
||||||
|
try {
|
||||||
|
const result = await window.maestro.speckit.getCommand(slashCommand);
|
||||||
|
if (result.success && result.command) {
|
||||||
|
return result.command;
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
} catch (error) {
|
||||||
|
console.error('[SpecKit] Failed to get command:', error);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -532,6 +532,24 @@ export interface CustomAICommand {
|
|||||||
isBuiltIn?: boolean; // If true, cannot be deleted (only edited)
|
isBuiltIn?: boolean; // If true, cannot be deleted (only edited)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Spec Kit command definition (bundled from github/spec-kit)
|
||||||
|
export interface SpecKitCommand {
|
||||||
|
id: string; // e.g., 'constitution'
|
||||||
|
command: string; // e.g., '/speckit.constitution'
|
||||||
|
description: string;
|
||||||
|
prompt: string;
|
||||||
|
isCustom: boolean; // true only for 'implement' (our Maestro-specific version)
|
||||||
|
isModified: boolean; // true if user has edited
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spec Kit metadata for tracking version and refresh status
|
||||||
|
export interface SpecKitMetadata {
|
||||||
|
lastRefreshed: string; // ISO date
|
||||||
|
commitSha: string; // Git commit SHA or version tag
|
||||||
|
sourceVersion: string; // Semantic version (e.g., '0.0.90')
|
||||||
|
sourceUrl: string; // GitHub repo URL
|
||||||
|
}
|
||||||
|
|
||||||
// Leaderboard registration data for runmaestro.ai integration
|
// Leaderboard registration data for runmaestro.ai integration
|
||||||
export interface LeaderboardRegistration {
|
export interface LeaderboardRegistration {
|
||||||
// Required fields
|
// Required fields
|
||||||
|
|||||||
Reference in New Issue
Block a user