diff --git a/src/cli/commands/analyze.ts b/src/cli/commands/analyze.ts
new file mode 100644
index 00000000..13379d1e
--- /dev/null
+++ b/src/cli/commands/analyze.ts
@@ -0,0 +1,217 @@
+import { existsSync, mkdirSync } from "node:fs";
+import { writeFile } from "node:fs/promises";
+import { resolve, dirname } from "node:path";
+import type { CAC } from "cac";
+
+import type { RuleConfig, RuleId } from "../../core/contracts/rule.js";
+import { analyzeFile } from "../../core/engine/rule-engine.js";
+import { loadFile, isJsonFile, isFixtureDir } from "../../core/engine/loader.js";
+import {
+ getFigmaToken, getReportsDir, ensureReportsDir,
+} from "../../core/engine/config-store.js";
+import { calculateScores, formatScoreSummary, buildResultJson } from "../../core/engine/scoring.js";
+import { getConfigsWithPreset, RULE_CONFIGS, type Preset } from "../../core/rules/rule-config.js";
+import { ruleRegistry } from "../../core/rules/rule-registry.js";
+import { loadCustomRules } from "../../core/rules/custom/custom-rule-loader.js";
+import { loadConfigFile, mergeConfigs } from "../../core/rules/custom/config-loader.js";
+import { generateHtmlReport } from "../../core/report-html/index.js";
+import { trackEvent, trackError, EVENTS } from "../../core/monitoring/index.js";
+import { pickRandomScope, countNodes, MAX_NODES_WITHOUT_SCOPE } from "../helpers.js";
+
+interface AnalyzeOptions {
+ preset?: Preset;
+ output?: string;
+ token?: string;
+ api?: boolean;
+ screenshot?: boolean;
+ customRules?: string;
+ config?: string;
+ noOpen?: boolean;
+ json?: boolean;
+}
+
+export function registerAnalyze(cli: CAC): void {
+ cli
+ .command("analyze ", "Analyze a Figma file or JSON fixture")
+ .option("--preset ", "Analysis preset (relaxed | dev-friendly | ai-ready | strict)")
+ .option("--output ", "HTML report output path")
+ .option("--token ", "Figma API token (or use FIGMA_TOKEN env var)")
+ .option("--api", "Load via Figma REST API (requires FIGMA_TOKEN)")
+ .option("--screenshot", "Include screenshot comparison in report (requires ANTHROPIC_API_KEY)")
+ .option("--custom-rules ", "Path to custom rules JSON file")
+ .option("--config ", "Path to config JSON file (override rule scores/settings)")
+ .option("--no-open", "Don't open report in browser after analysis")
+ .option("--json", "Output JSON results to stdout (same format as MCP)")
+ .example(" canicode analyze https://www.figma.com/design/ABC123/MyDesign")
+ .example(" canicode analyze https://www.figma.com/design/ABC123/MyDesign --api --token YOUR_TOKEN")
+ .example(" canicode analyze ./fixtures/my-design --output report.html")
+ .example(" canicode analyze ./fixtures/my-design --custom-rules ./my-rules.json")
+ .example(" canicode analyze ./fixtures/my-design --config ./my-config.json")
+ .action(async (input: string, options: AnalyzeOptions) => {
+ const analysisStart = Date.now();
+ trackEvent(EVENTS.ANALYSIS_STARTED, { source: isJsonFile(input) || isFixtureDir(input) ? "fixture" : "figma" });
+ try {
+ // Check init
+ if (!options.token && !getFigmaToken() && !isJsonFile(input) && !isFixtureDir(input)) {
+ throw new Error(
+ "canicode is not configured. Run 'canicode init --token YOUR_TOKEN' first."
+ );
+ }
+
+ // Validate --screenshot requirements
+ if (options.screenshot) {
+ const anthropicKey = process.env["ANTHROPIC_API_KEY"];
+ if (!anthropicKey) {
+ throw new Error(
+ "ANTHROPIC_API_KEY required for --screenshot mode. Set it in .env or environment."
+ );
+ }
+ console.log("Screenshot comparison mode enabled (coming soon).\n");
+ }
+
+ // Load file
+ const { file, nodeId } = await loadFile(input, options.token);
+
+ // Scope enforcement for large files
+ const totalNodes = countNodes(file.document);
+ let effectiveNodeId = nodeId;
+
+ if (!effectiveNodeId && totalNodes > MAX_NODES_WITHOUT_SCOPE) {
+ if (isJsonFile(input) || isFixtureDir(input)) {
+ // Fixture: auto-pick a random suitable FRAME
+ const picked = pickRandomScope(file.document);
+ if (picked) {
+ effectiveNodeId = picked.id;
+ console.log(`\nAuto-scoped to "${picked.name}" (${picked.id}, ${countNodes(picked)} nodes) — file too large (${totalNodes} nodes) for unscoped analysis.`);
+ } else {
+ console.warn(`\nWarning: Could not find a suitable scope in fixture. Analyzing all ${totalNodes} nodes.`);
+ }
+ } else {
+ // Figma URL: require explicit node-id
+ throw new Error(
+ `Too many nodes (${totalNodes}) for unscoped analysis. ` +
+ `Max ${MAX_NODES_WITHOUT_SCOPE} nodes without a node-id scope.\n\n` +
+ `Add ?node-id=XXX to the Figma URL to target a specific section.\n` +
+ `Example: canicode analyze "https://www.figma.com/design/.../MyDesign?node-id=1-234"`
+ );
+ }
+ }
+ if (!effectiveNodeId && totalNodes > 100) {
+ console.warn(`\nWarning: Analyzing ${totalNodes} nodes without scope. Results may be noisy.`);
+ console.warn("Tip: Add ?node-id=XXX to analyze a specific section.\n");
+ }
+
+ console.log(`\nAnalyzing: ${file.name}`);
+ console.log(`Nodes: ${totalNodes}`);
+
+ // Build rule configs: start from preset or defaults
+ let configs: Record = options.preset
+ ? { ...getConfigsWithPreset(options.preset) }
+ : { ...RULE_CONFIGS };
+
+ // Load and merge config file overrides
+ let excludeNodeNames: string[] | undefined;
+ let excludeNodeTypes: string[] | undefined;
+
+ if (options.config) {
+ const configFile = await loadConfigFile(options.config);
+ configs = mergeConfigs(configs, configFile);
+ excludeNodeNames = configFile.excludeNodeNames;
+ excludeNodeTypes = configFile.excludeNodeTypes;
+ console.log(`Config loaded: ${options.config}`);
+ }
+
+ // Load and register custom rules
+ if (options.customRules) {
+ const { rules, configs: customConfigs } = await loadCustomRules(options.customRules);
+ for (const rule of rules) {
+ ruleRegistry.register(rule);
+ }
+ configs = { ...configs, ...customConfigs };
+ console.log(`Custom rules loaded: ${rules.length} rules from ${options.customRules}`);
+ }
+
+ // Build analysis options
+ const analyzeOptions = {
+ configs: configs as Record,
+ ...(effectiveNodeId && { targetNodeId: effectiveNodeId }),
+ ...(excludeNodeNames && { excludeNodeNames }),
+ ...(excludeNodeTypes && { excludeNodeTypes }),
+ };
+
+ // Run analysis
+ const result = analyzeFile(file, analyzeOptions);
+ console.log(`Nodes: ${result.nodeCount} (max depth: ${result.maxDepth})`);
+
+ // Calculate scores
+ const scores = calculateScores(result);
+
+ // JSON output mode
+ if (options.json) {
+ console.log(JSON.stringify(buildResultJson(file.name, result, scores), null, 2));
+ return;
+ }
+
+ // Print summary to terminal
+ console.log("\n" + "=".repeat(50));
+ console.log(formatScoreSummary(scores));
+ console.log("=".repeat(50));
+
+ // Generate HTML report
+ const now = new Date();
+ const ts = `${now.getFullYear()}-${String(now.getMonth() + 1).padStart(2, "0")}-${String(now.getDate()).padStart(2, "0")}-${String(now.getHours()).padStart(2, "0")}-${String(now.getMinutes()).padStart(2, "0")}`;
+ let outputPath: string;
+
+ if (options.output) {
+ outputPath = resolve(options.output);
+ const outputDir = dirname(outputPath);
+ if (!existsSync(outputDir)) {
+ mkdirSync(outputDir, { recursive: true });
+ }
+ } else {
+ ensureReportsDir();
+ outputPath = resolve(getReportsDir(), `report-${ts}-${file.fileKey}.html`);
+ }
+
+ const figmaToken = options.token ?? getFigmaToken();
+ const html = generateHtmlReport(file, result, scores, { figmaToken });
+ await writeFile(outputPath, html, "utf-8");
+ console.log(`\nReport saved: ${outputPath}`);
+
+ trackEvent(EVENTS.ANALYSIS_COMPLETED, {
+ nodeCount: result.nodeCount,
+ issueCount: result.issues.length,
+ grade: scores.overall.grade,
+ percentage: scores.overall.percentage,
+ duration: Date.now() - analysisStart,
+ });
+ trackEvent(EVENTS.REPORT_GENERATED, { format: "html" });
+
+ // Open in browser unless --no-open
+ if (!options.noOpen) {
+ const { exec } = await import("node:child_process");
+ const cmd = process.platform === "darwin" ? "open" : process.platform === "win32" ? "start" : "xdg-open";
+ exec(`${cmd} "${outputPath}"`);
+ }
+
+ // Exit with error code if grade is F
+ if (scores.overall.grade === "F") {
+ process.exit(1);
+ }
+ } catch (error) {
+ trackError(
+ error instanceof Error ? error : new Error(String(error)),
+ { command: "analyze", input },
+ );
+ trackEvent(EVENTS.ANALYSIS_FAILED, {
+ error: error instanceof Error ? error.message : String(error),
+ duration: Date.now() - analysisStart,
+ });
+ console.error(
+ "\nError:",
+ error instanceof Error ? error.message : String(error)
+ );
+ process.exit(1);
+ }
+ });
+}
diff --git a/src/cli/commands/config.ts b/src/cli/commands/config.ts
new file mode 100644
index 00000000..84ebc375
--- /dev/null
+++ b/src/cli/commands/config.ts
@@ -0,0 +1,48 @@
+import type { CAC } from "cac";
+
+import {
+ getConfigPath, readConfig, setTelemetryEnabled,
+} from "../../core/engine/config-store.js";
+
+interface ConfigOptions {
+ telemetry?: boolean;
+}
+
+export function registerConfig(cli: CAC): void {
+ cli
+ .command("config", "Manage canicode configuration")
+ .option("--telemetry", "Enable anonymous telemetry")
+ .option("--no-telemetry", "Disable anonymous telemetry")
+ .action((options: ConfigOptions) => {
+ try {
+ // CAC maps --no-telemetry to options.telemetry === false
+ if (options.telemetry === false) {
+ setTelemetryEnabled(false);
+ console.log("Telemetry disabled. No analytics data will be sent.");
+ return;
+ }
+
+ if (options.telemetry === true) {
+ setTelemetryEnabled(true);
+ console.log("Telemetry enabled. Only anonymous usage events are tracked — no design data.");
+ return;
+ }
+
+ // No flags: show current config
+ const cfg = readConfig();
+ console.log("CANICODE CONFIG\n");
+ console.log(` Config path: ${getConfigPath()}`);
+ console.log(` Figma token: ${cfg.figmaToken ? "set" : "not set"}`);
+ console.log(` Telemetry: ${cfg.telemetry !== false ? "enabled" : "disabled"}`);
+ console.log(`\nOptions:`);
+ console.log(` canicode config --no-telemetry Opt out of anonymous telemetry`);
+ console.log(` canicode config --telemetry Opt back in`);
+ } catch (error) {
+ console.error(
+ "\nError:",
+ error instanceof Error ? error.message : String(error)
+ );
+ process.exit(1);
+ }
+ });
+}
diff --git a/src/cli/commands/design-tree.ts b/src/cli/commands/design-tree.ts
new file mode 100644
index 00000000..35ba5af5
--- /dev/null
+++ b/src/cli/commands/design-tree.ts
@@ -0,0 +1,60 @@
+import { existsSync, mkdirSync } from "node:fs";
+import { resolve, dirname } from "node:path";
+import type { CAC } from "cac";
+
+import { loadFile, isJsonFile } from "../../core/engine/loader.js";
+
+export function registerDesignTree(cli: CAC): void {
+ cli
+ .command(
+ "design-tree ",
+ "Generate a DOM-like design tree from a Figma file or fixture"
+ )
+ .option("--token ", "Figma API token (or use FIGMA_TOKEN env var)")
+ .option("--output ", "Output file path (default: stdout)")
+ .option("--vector-dir ", "Directory with SVG files for VECTOR nodes (auto-detected from fixture path)")
+ .option("--image-dir ", "Directory with image PNGs for IMAGE fill nodes (auto-detected from fixture path)")
+ .example(" canicode design-tree ./fixtures/my-design")
+ .example(" canicode design-tree https://www.figma.com/design/ABC/File?node-id=1-234 --output tree.txt")
+ .action(async (input: string, options: { token?: string; output?: string; vectorDir?: string; imageDir?: string }) => {
+ try {
+ const { file } = await loadFile(input, options.token);
+
+ const fixtureBase = isJsonFile(input) ? dirname(resolve(input)) : resolve(input);
+
+ // Auto-detect vector dir from fixture path
+ let vectorDir = options.vectorDir;
+ if (!vectorDir) {
+ const autoDir = resolve(fixtureBase, "vectors");
+ if (existsSync(autoDir)) vectorDir = autoDir;
+ }
+
+ // Auto-detect image dir from fixture path
+ let imageDir = options.imageDir;
+ if (!imageDir) {
+ const autoDir = resolve(fixtureBase, "images");
+ if (existsSync(autoDir)) imageDir = autoDir;
+ }
+
+ const { generateDesignTreeWithStats } = await import("../../core/engine/design-tree.js");
+ const treeOptions = {
+ ...(vectorDir ? { vectorDir } : {}),
+ ...(imageDir ? { imageDir } : {}),
+ };
+ const stats = generateDesignTreeWithStats(file, treeOptions);
+
+ if (options.output) {
+ const outputDir = dirname(resolve(options.output));
+ if (!existsSync(outputDir)) mkdirSync(outputDir, { recursive: true });
+ const { writeFile: writeFileAsync } = await import("node:fs/promises");
+ await writeFileAsync(resolve(options.output), stats.tree, "utf-8");
+ console.log(`Design tree saved: ${resolve(options.output)} (${Math.round(stats.bytes / 1024)}KB, ~${stats.estimatedTokens} tokens)`);
+ } else {
+ console.log(stats.tree);
+ }
+ } catch (error) {
+ console.error("\nError:", error instanceof Error ? error.message : String(error));
+ process.exit(1);
+ }
+ });
+}
diff --git a/src/cli/commands/implement.ts b/src/cli/commands/implement.ts
new file mode 100644
index 00000000..15b652a9
--- /dev/null
+++ b/src/cli/commands/implement.ts
@@ -0,0 +1,253 @@
+import { existsSync, mkdirSync } from "node:fs";
+import { writeFile } from "node:fs/promises";
+import { resolve, dirname } from "node:path";
+import type { CAC } from "cac";
+
+import { parseFigmaUrl } from "../../core/adapters/figma-url-parser.js";
+import { analyzeFile } from "../../core/engine/rule-engine.js";
+import { loadFile, isFigmaUrl, isJsonFile, isFixtureDir } from "../../core/engine/loader.js";
+import { getFigmaToken } from "../../core/engine/config-store.js";
+import { calculateScores, buildResultJson } from "../../core/engine/scoring.js";
+import { collectVectorNodeIds, collectImageNodes, sanitizeFilename } from "../helpers.js";
+
+interface ImplementOptions {
+ token?: string;
+ output?: string;
+ prompt?: string;
+ imageScale?: string;
+}
+
+export function registerImplement(cli: CAC): void {
+ cli
+ .command(
+ "implement ",
+ "Prepare design-to-code package: analysis + design tree + assets + prompt"
+ )
+ .option("--token ", "Figma API token (or use FIGMA_TOKEN env var)")
+ .option("--output ", "Output directory (default: ./canicode-implement/)")
+ .option("--prompt ", "Custom prompt file (default: built-in HTML+CSS prompt)")
+ .option("--image-scale ", "Image export scale: 2 for PC (default), 3 for mobile")
+ .example(" canicode implement ./fixtures/my-design")
+ .example(" canicode implement ./fixtures/my-design --prompt ./my-react-prompt.md --image-scale 3")
+ .action(async (input: string, options: ImplementOptions) => {
+ try {
+
+ const outputDir = resolve(options.output ?? "canicode-implement");
+ mkdirSync(outputDir, { recursive: true });
+
+ console.log("\nPreparing implementation package...\n");
+
+ // 1. Load file
+ const { file } = await loadFile(input, options.token);
+ console.log(`Design: ${file.name}`);
+
+ // 2. Analysis
+ const result = analyzeFile(file);
+ const scores = calculateScores(result);
+ const resultJson = buildResultJson(file.name, result, scores);
+ await writeFile(resolve(outputDir, "analysis.json"), JSON.stringify(resultJson, null, 2), "utf-8");
+ console.log(` analysis.json: ${result.issues.length} issues, grade ${scores.overall.grade}`);
+
+ // 3. Prepare assets (before design tree, so tree can reference image paths)
+ const fixtureBase = (isJsonFile(input) || isFixtureDir(input))
+ ? (isJsonFile(input) ? dirname(resolve(input)) : resolve(input))
+ : undefined;
+
+ let vectorDir = fixtureBase ? resolve(fixtureBase, "vectors") : undefined;
+ let imageDir = fixtureBase ? resolve(fixtureBase, "images") : undefined;
+
+ // Copy fixture assets to output
+ if (vectorDir && existsSync(vectorDir)) {
+ const vecOutputDir = resolve(outputDir, "vectors");
+ mkdirSync(vecOutputDir, { recursive: true });
+ const { readdirSync, copyFileSync } = await import("node:fs");
+ const vecFiles = readdirSync(vectorDir).filter(f => f.endsWith(".svg"));
+ for (const f of vecFiles) {
+ copyFileSync(resolve(vectorDir, f), resolve(vecOutputDir, f));
+ }
+ vectorDir = vecOutputDir;
+ console.log(` vectors/: ${vecFiles.length} SVGs copied`);
+ }
+
+ if (imageDir && existsSync(imageDir)) {
+ const imgOutputDir = resolve(outputDir, "images");
+ mkdirSync(imgOutputDir, { recursive: true });
+ const { readdirSync, copyFileSync } = await import("node:fs");
+ const imgFiles = readdirSync(imageDir).filter(f => f.endsWith(".png") || f.endsWith(".jpg") || f.endsWith(".json"));
+ for (const f of imgFiles) {
+ copyFileSync(resolve(imageDir, f), resolve(imgOutputDir, f));
+ }
+ imageDir = imgOutputDir;
+ const pngCount = imgFiles.filter(f => f.endsWith(".png")).length;
+ console.log(` images/: ${pngCount} assets copied`);
+ }
+
+ // Download assets from Figma API for live URLs
+ if (isFigmaUrl(input) && !fixtureBase) {
+ const figmaToken = options.token ?? getFigmaToken();
+ if (figmaToken) {
+ const imgScale = options.imageScale !== undefined ? Number(options.imageScale) : 2;
+ if (!Number.isFinite(imgScale) || imgScale < 1 || imgScale > 4) {
+ console.error("Error: --image-scale must be 1-4 (2 for PC, 3 for mobile)");
+ process.exit(1);
+ }
+
+ const { FigmaClient } = await import("../../core/adapters/figma-client.js");
+ const client = new FigmaClient({ token: figmaToken });
+
+ // Download screenshot
+ const { nodeId } = parseFigmaUrl(input);
+ const rootNodeId = nodeId?.replace(/-/g, ":") ?? file.document.id;
+ try {
+ const screenshotUrls = await client.getNodeImages(file.fileKey, [rootNodeId], { format: "png", scale: 2 });
+ const screenshotUrl = screenshotUrls[rootNodeId];
+ if (screenshotUrl) {
+ const resp = await fetch(screenshotUrl);
+ if (resp.ok) {
+ const buf = Buffer.from(await resp.arrayBuffer());
+ await writeFile(resolve(outputDir, "screenshot.png"), buf);
+ console.log(` screenshot.png: saved`);
+ }
+ }
+ } catch {
+ console.warn(" screenshot.png: failed to download (continuing)");
+ }
+
+ // Download vector SVGs
+ const vectorNodeIds = collectVectorNodeIds(file.document);
+ if (vectorNodeIds.length > 0) {
+ const vecOutDir = resolve(outputDir, "vectors");
+ mkdirSync(vecOutDir, { recursive: true });
+ try {
+ const svgUrls = await client.getNodeImages(file.fileKey, vectorNodeIds, { format: "svg" });
+ let downloaded = 0;
+ for (const [id, svgUrl] of Object.entries(svgUrls)) {
+ if (!svgUrl) continue;
+ try {
+ const resp = await fetch(svgUrl);
+ if (resp.ok) {
+ const svg = await resp.text();
+ const safeId = id.replace(/:/g, "-");
+ await writeFile(resolve(vecOutDir, `${safeId}.svg`), svg, "utf-8");
+ downloaded++;
+ }
+ } catch { /* skip */ }
+ }
+ console.log(` vectors/: ${downloaded}/${vectorNodeIds.length} SVGs`);
+ } catch {
+ console.warn(" vectors/: failed to download (continuing)");
+ }
+ }
+
+ // Download image PNGs
+ const imgNodes = collectImageNodes(file.document);
+ if (imgNodes.length > 0) {
+ const imgOutDir = resolve(outputDir, "images");
+ mkdirSync(imgOutDir, { recursive: true });
+ try {
+ const imgUrls = await client.getNodeImages(
+ file.fileKey,
+ imgNodes.map(n => n.id),
+ { format: "png", scale: imgScale },
+ );
+ const usedNames = new Map();
+ let downloaded = 0;
+ for (const { id, name } of imgNodes) {
+ const imgUrl = imgUrls[id];
+ if (!imgUrl) continue;
+ let base = sanitizeFilename(name);
+ const count = usedNames.get(base) ?? 0;
+ usedNames.set(base, count + 1);
+ if (count > 0) base = `${base}-${count + 1}`;
+ const filename = `${base}@${imgScale}x.png`;
+ try {
+ const resp = await fetch(imgUrl);
+ if (resp.ok) {
+ const buf = Buffer.from(await resp.arrayBuffer());
+ await writeFile(resolve(imgOutDir, filename), buf);
+ downloaded++;
+ }
+ } catch { /* skip */ }
+ }
+ // Write mapping.json for design-tree
+ const mapping: Record = {};
+ const usedNamesForMapping = new Map();
+ for (const { id, name } of imgNodes) {
+ let base = sanitizeFilename(name);
+ const cnt = usedNamesForMapping.get(base) ?? 0;
+ usedNamesForMapping.set(base, cnt + 1);
+ if (cnt > 0) base = `${base}-${cnt + 1}`;
+ mapping[id] = `${base}@${imgScale}x.png`;
+ }
+ await writeFile(resolve(imgOutDir, "mapping.json"), JSON.stringify(mapping, null, 2), "utf-8");
+
+ imageDir = imgOutDir;
+ console.log(` images/: ${downloaded}/${imgNodes.length} PNGs (@${imgScale}x)`);
+ } catch {
+ console.warn(" images/: failed to download (continuing)");
+ }
+ }
+
+ // Update vectorDir to point to downloaded assets
+ const vecOutCheck = resolve(outputDir, "vectors");
+ if (existsSync(vecOutCheck)) vectorDir = vecOutCheck;
+ }
+ }
+
+ // 4. Design tree (after assets so image paths are available)
+ const { generateDesignTreeWithStats } = await import("../../core/engine/design-tree.js");
+ const treeOptions = {
+ ...(vectorDir && existsSync(vectorDir) ? { vectorDir } : {}),
+ ...(imageDir && existsSync(imageDir) ? { imageDir } : {}),
+ };
+ const stats = generateDesignTreeWithStats(file, treeOptions);
+ await writeFile(resolve(outputDir, "design-tree.txt"), stats.tree, "utf-8");
+ console.log(` design-tree.txt: ~${stats.estimatedTokens} tokens`);
+
+ // 5. Assemble prompt
+ if (options.prompt) {
+ // Custom prompt: copy user's file
+ const { readFile: rf } = await import("node:fs/promises");
+ const customPrompt = await rf(resolve(options.prompt), "utf-8");
+ await writeFile(resolve(outputDir, "PROMPT.md"), customPrompt, "utf-8");
+ console.log(` PROMPT.md: custom (${options.prompt})`);
+ } else {
+ // Default: built-in HTML+CSS prompt
+ const { readFile: rf } = await import("node:fs/promises");
+ const { dirname: dirnameFn, resolve: resolveFn } = await import("node:path");
+ const { fileURLToPath } = await import("node:url");
+ const cliDir = dirnameFn(fileURLToPath(import.meta.url));
+ const projectRoot = resolveFn(cliDir, "../..");
+ const altRoot = resolveFn(cliDir, "..");
+
+ let prompt = "";
+ for (const root of [projectRoot, altRoot]) {
+ const p = resolveFn(root, ".claude/skills/design-to-code/PROMPT.md");
+ try {
+ prompt = await rf(p, "utf-8");
+ break;
+ } catch { /* try next */ }
+ }
+
+ if (prompt) {
+ await writeFile(resolve(outputDir, "PROMPT.md"), prompt, "utf-8");
+ console.log(` PROMPT.md: default (html-css)`);
+ } else {
+ console.warn(" PROMPT.md: built-in prompt not found (skipped)");
+ }
+ }
+
+ // Summary
+ console.log(`\n${"=".repeat(50)}`);
+ console.log(`Implementation package ready: ${outputDir}/`);
+ console.log(` Grade: ${scores.overall.grade} (${scores.overall.percentage}%)`);
+ console.log(` Issues: ${result.issues.length}`);
+ console.log(` Design tree: ~${stats.estimatedTokens} tokens`);
+ console.log(`${"=".repeat(50)}`);
+ console.log(`\nNext: Feed design-tree.txt + PROMPT.md to your AI assistant.`);
+ } catch (error) {
+ console.error("\nError:", error instanceof Error ? error.message : String(error));
+ process.exit(1);
+ }
+ });
+}
diff --git a/src/cli/commands/init.ts b/src/cli/commands/init.ts
new file mode 100644
index 00000000..5d473563
--- /dev/null
+++ b/src/cli/commands/init.ts
@@ -0,0 +1,58 @@
+import type { CAC } from "cac";
+
+import {
+ initAiready, getConfigPath, getReportsDir,
+} from "../../core/engine/config-store.js";
+
+interface InitOptions {
+ token?: string;
+ mcp?: boolean;
+}
+
+export function registerInit(cli: CAC): void {
+ cli
+ .command("init", "Set up canicode (Figma token or MCP)")
+ .option("--token ", "Save Figma API token to ~/.canicode/")
+ .option("--mcp", "Show Figma MCP setup instructions")
+ .action((options: InitOptions) => {
+ try {
+ if (options.token) {
+ initAiready(options.token);
+
+ console.log(` Config saved: ${getConfigPath()}`);
+ console.log(` Reports will be saved to: ${getReportsDir()}/`);
+ console.log(`\n Next: canicode analyze "https://www.figma.com/design/..."`);
+ return;
+ }
+
+ if (options.mcp) {
+ console.log(`FIGMA MCP SETUP (for Claude Code)\n`);
+ console.log(`1. Register the official Figma MCP server at project level:`);
+ console.log(` claude mcp add -s project -t http figma https://mcp.figma.com/mcp\n`);
+ console.log(` This creates .mcp.json in your project root.\n`);
+ console.log(`2. Use the /canicode skill in Claude Code:`);
+ console.log(` /canicode https://www.figma.com/design/.../MyDesign?node-id=1-234\n`);
+ console.log(` The skill calls Figma MCP directly — no FIGMA_TOKEN needed.`);
+ return;
+ }
+
+ // No flags: show setup guide
+ console.log(`CANICODE SETUP\n`);
+ console.log(`Choose your Figma data source:\n`);
+ console.log(`Option 1: REST API (recommended for CI/automation)`);
+ console.log(` canicode init --token YOUR_FIGMA_TOKEN`);
+ console.log(` Get token: figma.com > Settings > Personal access tokens\n`);
+ console.log(`Option 2: Figma MCP (recommended for Claude Code)`);
+ console.log(` canicode init --mcp`);
+ console.log(` Uses the /canicode skill in Claude Code with official Figma MCP\n`);
+ console.log(`After setup:`);
+ console.log(` canicode analyze "https://www.figma.com/design/..."`);
+ } catch (error) {
+ console.error(
+ "\nError:",
+ error instanceof Error ? error.message : String(error)
+ );
+ process.exit(1);
+ }
+ });
+}
diff --git a/src/cli/commands/internal/calibrate-analyze.ts b/src/cli/commands/internal/calibrate-analyze.ts
new file mode 100644
index 00000000..aba290e3
--- /dev/null
+++ b/src/cli/commands/internal/calibrate-analyze.ts
@@ -0,0 +1,84 @@
+import { existsSync, mkdirSync } from "node:fs";
+import { writeFile } from "node:fs/promises";
+import { resolve, dirname } from "node:path";
+import type { CAC } from "cac";
+
+import {
+ runCalibrationAnalyze,
+ filterConversionCandidates,
+} from "../../../agents/orchestrator.js";
+
+interface CalibrateAnalyzeOptions {
+ output?: string;
+ runDir?: string;
+ token?: string;
+ targetNodeId?: string;
+}
+
+export function registerCalibrateAnalyze(cli: CAC): void {
+ cli
+ .command(
+ "calibrate-analyze ",
+ "Run calibration analysis and output JSON for conversion step"
+ )
+ .option("--output ", "Output JSON path", { default: "logs/calibration/calibration-analysis.json" })
+ .option("--run-dir ", "Run directory (overrides --output, writes to /analysis.json)")
+ .option("--token ", "Figma API token (or use FIGMA_TOKEN env var)")
+ .option("--target-node-id ", "Scope analysis to a specific node")
+ .action(async (input: string, options: CalibrateAnalyzeOptions) => {
+ try {
+ console.log("Running calibration analysis...");
+
+ const calibConfig = {
+ input,
+ maxConversionNodes: 20,
+ samplingStrategy: "top-issues" as const,
+ outputPath: "logs/calibration/calibration-report.md",
+ ...(options.token && { token: options.token }),
+ ...(options.targetNodeId && { targetNodeId: options.targetNodeId }),
+ };
+
+ const { analysisOutput, ruleScores, fileKey } =
+ await runCalibrationAnalyze(calibConfig);
+
+ // Filter out icon/graphic nodes that are not useful for code conversion
+ const filteredSummaries = filterConversionCandidates(
+ analysisOutput.nodeIssueSummaries,
+ analysisOutput.analysisResult.file.document
+ );
+
+ const outputData = {
+ fileKey,
+ fileName: analysisOutput.analysisResult.file.name,
+ analyzedAt: analysisOutput.analysisResult.analyzedAt,
+ nodeCount: analysisOutput.analysisResult.nodeCount,
+ issueCount: analysisOutput.analysisResult.issues.length,
+ scoreReport: analysisOutput.scoreReport,
+ nodeIssueSummaries: filteredSummaries,
+ ruleScores,
+ };
+
+ const outputPath = options.runDir
+ ? resolve(options.runDir, "analysis.json")
+ : resolve(options.output ?? "logs/calibration/calibration-analysis.json");
+ const outputDir = dirname(outputPath);
+ if (!existsSync(outputDir)) {
+ mkdirSync(outputDir, { recursive: true });
+ }
+ await writeFile(outputPath, JSON.stringify(outputData, null, 2), "utf-8");
+
+ console.log(`\nAnalysis complete.`);
+ console.log(` Nodes: ${outputData.nodeCount}`);
+ console.log(` Issues: ${outputData.issueCount}`);
+ console.log(` Nodes with issues: ${outputData.nodeIssueSummaries.length}`);
+ console.log(` Grade: ${outputData.scoreReport.overall.grade} (${outputData.scoreReport.overall.percentage}%)`);
+ console.log(`\nOutput saved: ${outputPath}`);
+ } catch (error) {
+ console.error(
+ "\nError:",
+ error instanceof Error ? error.message : String(error)
+ );
+ process.exit(1);
+ }
+ });
+}
diff --git a/src/cli/commands/internal/calibrate-evaluate.ts b/src/cli/commands/internal/calibrate-evaluate.ts
new file mode 100644
index 00000000..4ce3733d
--- /dev/null
+++ b/src/cli/commands/internal/calibrate-evaluate.ts
@@ -0,0 +1,101 @@
+import { existsSync, mkdirSync } from "node:fs";
+import { writeFile } from "node:fs/promises";
+import { resolve, dirname } from "node:path";
+import type { CAC } from "cac";
+
+import { runCalibrationEvaluate } from "../../../agents/orchestrator.js";
+
+interface CalibrateEvaluateOptions {
+ output?: string;
+ runDir?: string;
+}
+
+export function registerCalibrateEvaluate(cli: CAC): void {
+ cli
+ .command(
+ "calibrate-evaluate ",
+ "Evaluate conversion results and generate calibration report"
+ )
+ .option("--output ", "Report output path")
+ .option("--run-dir ", "Run directory (reads analysis.json + conversion.json, writes summary.md)")
+ .action(async (analysisJsonPath: string, conversionJsonPath: string, options: CalibrateEvaluateOptions) => {
+ try {
+ console.log("Running calibration evaluation...");
+
+ const analysisPath = options.runDir
+ ? resolve(options.runDir, "analysis.json")
+ : resolve(analysisJsonPath);
+ const conversionPath = options.runDir
+ ? resolve(options.runDir, "conversion.json")
+ : resolve(conversionJsonPath);
+
+ if (!existsSync(analysisPath)) {
+ throw new Error(`Analysis file not found: ${analysisPath}`);
+ }
+ if (!existsSync(conversionPath)) {
+ throw new Error(`Conversion file not found: ${conversionPath}`);
+ }
+
+ const { readFile } = await import("node:fs/promises");
+ const analysisData = JSON.parse(await readFile(analysisPath, "utf-8"));
+ const conversionData = JSON.parse(await readFile(conversionPath, "utf-8"));
+
+ // Derive fixture name from run-dir: --
+ let fixtureName: string | undefined;
+ if (options.runDir) {
+ const dirName = resolve(options.runDir).split(/[/\\]/).pop() ?? "";
+ const idx = dirName.lastIndexOf("--");
+ fixtureName = idx === -1 ? dirName : dirName.slice(0, idx);
+ }
+
+ const { evaluationOutput, tuningOutput, report } = runCalibrationEvaluate(
+ analysisData,
+ conversionData,
+ analysisData.ruleScores,
+ { collectEvidence: !!options.runDir, ...(fixtureName ? { fixtureName } : {}) }
+ );
+
+ let outputPath: string;
+ if (options.runDir) {
+ outputPath = resolve(options.runDir, "summary.md");
+ } else if (options.output) {
+ outputPath = resolve(options.output);
+ } else {
+ const calNow = new Date();
+ const calTs = `${calNow.getFullYear()}-${String(calNow.getMonth() + 1).padStart(2, "0")}-${String(calNow.getDate()).padStart(2, "0")}-${String(calNow.getHours()).padStart(2, "0")}-${String(calNow.getMinutes()).padStart(2, "0")}`;
+ outputPath = resolve(`logs/calibration/calibration-${calTs}.md`);
+ }
+ const calOutputDir = dirname(outputPath);
+ if (!existsSync(calOutputDir)) {
+ mkdirSync(calOutputDir, { recursive: true });
+ }
+ await writeFile(outputPath, report, "utf-8");
+
+ const mismatchCounts = {
+ overscored: 0,
+ underscored: 0,
+ "missing-rule": 0,
+ validated: 0,
+ };
+ for (const m of evaluationOutput.mismatches) {
+ const key = m.type as keyof typeof mismatchCounts;
+ mismatchCounts[key]++;
+ }
+
+ console.log(`\nEvaluation complete.`);
+ console.log(` Validated: ${mismatchCounts.validated}`);
+ console.log(` Overscored: ${mismatchCounts.overscored}`);
+ console.log(` Underscored: ${mismatchCounts.underscored}`);
+ console.log(` Missing rules: ${mismatchCounts["missing-rule"]}`);
+ console.log(` Score adjustments proposed: ${tuningOutput.adjustments.length}`);
+ console.log(` New rule proposals: ${tuningOutput.newRuleProposals.length}`);
+ console.log(`\nReport saved: ${outputPath}`);
+ } catch (error) {
+ console.error(
+ "\nError:",
+ error instanceof Error ? error.message : String(error)
+ );
+ process.exit(1);
+ }
+ });
+}
diff --git a/src/cli/commands/internal/calibrate-gap-report.ts b/src/cli/commands/internal/calibrate-gap-report.ts
new file mode 100644
index 00000000..7d50524f
--- /dev/null
+++ b/src/cli/commands/internal/calibrate-gap-report.ts
@@ -0,0 +1,87 @@
+import { existsSync, mkdirSync } from "node:fs";
+import { writeFile } from "node:fs/promises";
+import { resolve, dirname } from "node:path";
+import type { CAC } from "cac";
+
+import { generateGapRuleReport } from "../../../agents/gap-rule-report.js";
+
+interface CalibrateGapReportOptions {
+ calibrationDir?: string;
+ output?: string;
+ minRepeat?: string;
+ json?: boolean;
+}
+
+export function registerCalibrateGapReport(cli: CAC): void {
+ cli
+ .command(
+ "calibrate-gap-report",
+ "Aggregate gap data and calibration runs into a rule review report"
+ )
+ .option("--calibration-dir ", "Calibration runs directory", {
+ default: "logs/calibration",
+ })
+ .option("--output ", "Markdown report path", {
+ default: "logs/calibration/REPORT.md",
+ })
+ .option("--min-repeat ", "Minimum distinct fixtures to treat as a repeating pattern", {
+ default: "2",
+ })
+ .option("--json", "Print JSON summary to stdout")
+ .action(async (options: CalibrateGapReportOptions) => {
+ try {
+ const minRepeat = Math.max(1, parseInt(options.minRepeat ?? "2", 10) || 2);
+ const result = generateGapRuleReport({
+ calibrationDir: resolve(options.calibrationDir ?? "logs/calibration"),
+ minPatternRepeat: minRepeat,
+ });
+
+ const outPath = resolve(options.output ?? "logs/calibration/REPORT.md");
+ const outDir = dirname(outPath);
+ if (!existsSync(outDir)) {
+ mkdirSync(outDir, { recursive: true });
+ }
+
+ // Backup existing report with timestamp before overwriting
+ if (existsSync(outPath)) {
+ const { readFile: readFileAsync } = await import("node:fs/promises");
+ const existing = await readFileAsync(outPath, "utf-8");
+ // Extract timestamp from the "Generated:" line
+ const match = existing.match(/Generated:\s*(\d{4}-\d{2}-\d{2}T[\d:.]+Z)/);
+ if (match?.[1]) {
+ const ts = match[1].replace(/[:.]/g, "-").replace("T", "-").replace("Z", "");
+ const backupPath = outPath.replace(/\.md$/, `--${ts}.md`);
+ await writeFile(backupPath, existing, "utf-8");
+ console.log(` Previous report backed up: ${backupPath}`);
+ }
+ }
+
+ await writeFile(outPath, result.markdown, "utf-8");
+
+ console.log("Gap rule review report written.");
+ console.log(` Runs with gaps: ${result.gapRunCount}`);
+ console.log(` Runs with snapshots: ${result.runCount}`);
+ console.log(` Output: ${outPath}`);
+
+ if (options.json) {
+ console.log(
+ JSON.stringify(
+ {
+ gapRunCount: result.gapRunCount,
+ runCount: result.runCount,
+ outputPath: outPath,
+ },
+ null,
+ 2
+ )
+ );
+ }
+ } catch (error) {
+ console.error(
+ "\nError:",
+ error instanceof Error ? error.message : String(error)
+ );
+ process.exit(1);
+ }
+ });
+}
diff --git a/src/cli/commands/internal/calibrate-run.ts b/src/cli/commands/internal/calibrate-run.ts
new file mode 100644
index 00000000..8d0a19e1
--- /dev/null
+++ b/src/cli/commands/internal/calibrate-run.ts
@@ -0,0 +1,58 @@
+import type { CAC } from "cac";
+
+import { parseFigmaUrl } from "../../../core/adapters/figma-url-parser.js";
+import { isFigmaUrl } from "../../../core/engine/loader.js";
+import { getFigmaToken } from "../../../core/engine/config-store.js";
+import { runCalibrationAnalyze } from "../../../agents/orchestrator.js";
+
+interface CalibrateRunOptions {
+ output?: string;
+ token?: string;
+ maxNodes?: number;
+ sampling?: string;
+}
+
+export function registerCalibrateRun(cli: CAC): void {
+ cli
+ .command(
+ "calibrate-run ",
+ "Run full calibration pipeline (analysis-only, conversion via /calibrate-loop)"
+ )
+ .option("--output ", "Markdown report output path")
+ .option("--token ", "Figma API token (or use FIGMA_TOKEN env var)")
+ .option("--max-nodes ", "Max nodes to convert", { default: 5 })
+ .option("--sampling ", "Sampling strategy (all | top-issues | random)", { default: "top-issues" })
+ .action(async (input: string, options: CalibrateRunOptions) => {
+ try {
+ const figmaToken = options.token ?? getFigmaToken();
+
+ if (isFigmaUrl(input) && !parseFigmaUrl(input).nodeId) {
+ console.warn("\nWarning: No node-id specified. Calibrating entire file may produce noisy results.");
+ console.warn("Tip: Add ?node-id=XXX to target a specific section.\n");
+ }
+
+ console.log("Running calibration pipeline (analysis-only)...");
+ console.log(` Input: ${input}`);
+ console.log("");
+
+ const { analysisOutput } = await runCalibrationAnalyze({
+ input,
+ maxConversionNodes: options.maxNodes ?? 5,
+ samplingStrategy: (options.sampling as "all" | "top-issues" | "random") ?? "top-issues",
+ outputPath: options.output ?? "unused",
+ ...(figmaToken && { token: figmaToken }),
+ });
+
+ console.log("\nCalibration complete (analysis-only).");
+ console.log(` Grade: ${analysisOutput.scoreReport.overall.grade} (${analysisOutput.scoreReport.overall.percentage}%)`);
+ console.log(` Nodes with issues: ${analysisOutput.nodeIssueSummaries.length}`);
+ console.log(" Note: Use /calibrate-loop in Claude Code for full pipeline with visual comparison.");
+ } catch (error) {
+ console.error(
+ "\nError:",
+ error instanceof Error ? error.message : String(error)
+ );
+ process.exit(1);
+ }
+ });
+}
diff --git a/src/cli/commands/internal/fixture-management.ts b/src/cli/commands/internal/fixture-management.ts
new file mode 100644
index 00000000..9f617f3a
--- /dev/null
+++ b/src/cli/commands/internal/fixture-management.ts
@@ -0,0 +1,158 @@
+import { existsSync } from "node:fs";
+import { resolve } from "node:path";
+import type { CAC } from "cac";
+
+import {
+ listActiveFixtures,
+ listDoneFixtures,
+ moveFixtureToDone,
+ parseDebateResult,
+ extractAppliedRuleIds,
+ extractFixtureName,
+ resolveLatestRunDir,
+ checkConvergence,
+} from "../../../agents/run-directory.js";
+import {
+ pruneCalibrationEvidence,
+ pruneDiscoveryEvidence,
+} from "../../../agents/evidence-collector.js";
+
+export function registerFixtureManagement(cli: CAC): void {
+ cli
+ .command(
+ "fixture-list [fixturesDir]",
+ "List active and done fixtures"
+ )
+ .option("--json", "Output as JSON")
+ .action((fixturesDir: string | undefined, options: { json?: boolean }) => {
+ const dir = fixturesDir ?? "fixtures";
+ const active = listActiveFixtures(dir);
+ const done = listDoneFixtures(dir);
+
+ if (options.json) {
+ console.log(JSON.stringify({ active, done }, null, 2));
+ } else {
+ console.log(`Active fixtures (${active.length}):`);
+ for (const p of active) {
+ console.log(` ${p}`);
+ }
+ console.log(`\nDone fixtures (${done.length}):`);
+ for (const p of done) {
+ console.log(` ${p}`);
+ }
+ }
+ });
+
+ cli
+ .command(
+ "fixture-done ",
+ "Move a converged fixture to done/"
+ )
+ .option("--fixtures-dir ", "Fixtures root directory", { default: "fixtures" })
+ .option("--force", "Skip convergence check")
+ .option("--run-dir ", "Run directory to check for convergence (auto-resolves latest if omitted)")
+ .option("--dry-run", "Show convergence judgment without moving files")
+ .option(
+ "--lenient-convergence",
+ "Converged when no applied/revised decisions (ignore rejected; see calibration issue #14)"
+ )
+ .action(
+ (fixturePath: string, options: {
+ fixturesDir?: string;
+ force?: boolean;
+ runDir?: string;
+ dryRun?: boolean;
+ lenientConvergence?: boolean;
+ }) => {
+ const fixtureName = extractFixtureName(fixturePath);
+
+ // Resolve run directory: explicit --run-dir or auto-resolve latest
+ let runDir = options.runDir ? resolve(options.runDir) : null;
+ if (!runDir && !options.force) {
+ const latest = resolveLatestRunDir(fixtureName);
+ if (latest) {
+ runDir = latest;
+ console.log(`Auto-resolved latest run: ${runDir}`);
+ }
+ }
+
+ if (!options.force) {
+ if (!runDir) {
+ console.error(`Error: no run directory found for fixture "${fixtureName}". Specify --run-dir, or use --force to skip check.`);
+ process.exit(1);
+ }
+ const summary = checkConvergence(runDir, { lenient: options.lenientConvergence });
+ console.log(`\nConvergence check (${summary.mode}):`);
+ console.log(` ${summary.reason}`);
+ if (summary.total > 0) {
+ console.log(` applied=${summary.applied} revised=${summary.revised} rejected=${summary.rejected} kept=${summary.kept}`);
+ }
+
+ if (options.dryRun) {
+ console.log(`\n[dry-run] Would ${summary.converged ? "move" : "NOT move"} fixture: ${fixturePath}`);
+ return;
+ }
+
+ if (!summary.converged) {
+ console.error(`\nError: fixture has not converged. Use --force to override or --lenient-convergence.`);
+ process.exit(1);
+ }
+ } else if (options.dryRun) {
+ console.log(`[dry-run] --force: would move fixture without convergence check: ${fixturePath}`);
+ return;
+ }
+
+ const dest = moveFixtureToDone(fixturePath, options.fixturesDir ?? "fixtures");
+ if (dest) {
+ console.log(`\nMoved to: ${dest}`);
+ } else {
+ console.error(`Error: fixture not found: ${fixturePath}`);
+ process.exit(1);
+ }
+ });
+}
+
+export function registerEvidencePrune(cli: CAC): void {
+ cli
+ .command(
+ "calibrate-prune-evidence ",
+ "Prune evidence for rules applied by the Arbitrator in the given run"
+ )
+ .action((runDir: string) => {
+ if (!existsSync(resolve(runDir))) {
+ console.log(`Run directory not found: ${runDir}`);
+ return;
+ }
+ const debate = parseDebateResult(resolve(runDir));
+ if (!debate) {
+ console.log("No debate.json found — nothing to prune.");
+ return;
+ }
+
+ const appliedIds = extractAppliedRuleIds(debate);
+ if (appliedIds.length === 0) {
+ console.log("No applied/revised rules — nothing to prune.");
+ return;
+ }
+
+ pruneCalibrationEvidence(appliedIds);
+ console.log(`Pruned calibration evidence for ${appliedIds.length} rule(s): ${appliedIds.join(", ")}`);
+ });
+
+ cli
+ .command(
+ "discovery-prune-evidence ",
+ "Prune discovery evidence for a category addressed by /add-rule"
+ )
+ .action((category: string | string[]) => {
+ const categories = Array.isArray(category) ? category : [category];
+ try {
+ pruneDiscoveryEvidence(categories);
+ console.log(`Pruned discovery evidence for categories: ${categories.join(", ")}`);
+ } catch (err) {
+ const msg = err instanceof Error ? err.message : String(err);
+ console.error(`[evidence] Failed to prune discovery evidence: ${msg}`);
+ process.exitCode = 1;
+ }
+ });
+}
diff --git a/src/cli/commands/list-rules.ts b/src/cli/commands/list-rules.ts
new file mode 100644
index 00000000..05dad61b
--- /dev/null
+++ b/src/cli/commands/list-rules.ts
@@ -0,0 +1,80 @@
+import type { CAC } from "cac";
+
+import type { RuleConfig } from "../../core/contracts/rule.js";
+import { RULE_CONFIGS } from "../../core/rules/rule-config.js";
+import { ruleRegistry } from "../../core/rules/rule-registry.js";
+import { loadCustomRules } from "../../core/rules/custom/custom-rule-loader.js";
+import { loadConfigFile, mergeConfigs } from "../../core/rules/custom/config-loader.js";
+
+interface ListRulesOptions {
+ customRules?: string;
+ config?: string;
+ json?: boolean;
+}
+
+export function registerListRules(cli: CAC): void {
+ cli
+ .command("list-rules", "List all analysis rules with scores and severity")
+ .option("--custom-rules ", "Include custom rules from JSON file")
+ .option("--config ", "Apply config overrides to show effective scores")
+ .option("--json", "Output as JSON")
+ .action(async (options: ListRulesOptions) => {
+ try {
+ let configs: Record = { ...RULE_CONFIGS };
+
+ if (options.config) {
+ const configFile = await loadConfigFile(options.config);
+ configs = mergeConfigs(configs, configFile);
+ }
+
+ if (options.customRules) {
+ const { rules, configs: customConfigs } = await loadCustomRules(options.customRules);
+ for (const rule of rules) {
+ ruleRegistry.register(rule);
+ }
+ configs = { ...configs, ...customConfigs };
+ }
+
+ const rules = ruleRegistry.getAll().map((rule) => {
+ const config = configs[rule.definition.id as string];
+ return {
+ id: rule.definition.id,
+ name: rule.definition.name,
+ category: rule.definition.category,
+ severity: config?.severity ?? "risk",
+ score: config?.score ?? 0,
+ enabled: config?.enabled ?? true,
+ };
+ });
+
+ if (options.json) {
+ console.log(JSON.stringify(rules, null, 2));
+ return;
+ }
+
+ // Group by category
+ const byCategory = new Map();
+ for (const rule of rules) {
+ const list = byCategory.get(rule.category) ?? [];
+ list.push(rule);
+ byCategory.set(rule.category, list);
+ }
+
+ for (const [category, catRules] of byCategory) {
+ console.log(`\n ${category.toUpperCase()}`);
+ for (const r of catRules) {
+ const status = r.enabled ? "" : " (disabled)";
+ const pad = " ".repeat(Math.max(0, 40 - r.id.length));
+ console.log(` ${r.id}${pad} ${String(r.score).padStart(4)} ${r.severity}${status}`);
+ }
+ }
+ console.log(`\n Total: ${rules.length} rules\n`);
+ } catch (error) {
+ console.error(
+ "\nError:",
+ error instanceof Error ? error.message : String(error)
+ );
+ process.exit(1);
+ }
+ });
+}
diff --git a/src/cli/commands/prompt.ts b/src/cli/commands/prompt.ts
new file mode 100644
index 00000000..0e090663
--- /dev/null
+++ b/src/cli/commands/prompt.ts
@@ -0,0 +1,31 @@
+import type { CAC } from "cac";
+
+export function registerPrompt(cli: CAC): void {
+ cli
+ .command("prompt", "Output the standard design-to-code prompt for AI code generation")
+ .action(async () => {
+ try {
+ const { readFile } = await import("node:fs/promises");
+ const { dirname: dirnameFn, resolve: resolveFn } = await import("node:path");
+ const { fileURLToPath } = await import("node:url");
+ const __dirname = dirnameFn(fileURLToPath(import.meta.url));
+ // Try from source location first, then npm-installed location
+ const paths = [
+ resolveFn(__dirname, "../../.claude/skills/design-to-code/PROMPT.md"),
+ resolveFn(__dirname, "../.claude/skills/design-to-code/PROMPT.md"),
+ ];
+ for (const p of paths) {
+ try {
+ const content = await readFile(p, "utf-8");
+ console.log(content);
+ return;
+ } catch { /* try next */ }
+ }
+ console.error("Prompt file not found");
+ process.exit(1);
+ } catch (error) {
+ console.error("Error:", error instanceof Error ? error.message : String(error));
+ process.exit(1);
+ }
+ });
+}
diff --git a/src/cli/commands/save-fixture.ts b/src/cli/commands/save-fixture.ts
new file mode 100644
index 00000000..2c05aa87
--- /dev/null
+++ b/src/cli/commands/save-fixture.ts
@@ -0,0 +1,188 @@
+import { mkdirSync } from "node:fs";
+import { writeFile } from "node:fs/promises";
+import { resolve } from "node:path";
+import type { CAC } from "cac";
+
+import { parseFigmaUrl } from "../../core/adapters/figma-url-parser.js";
+import { loadFile, isFigmaUrl } from "../../core/engine/loader.js";
+import { getFigmaToken } from "../../core/engine/config-store.js";
+import { collectVectorNodeIds, collectImageNodes, sanitizeFilename, countNodes } from "../helpers.js";
+
+interface SaveFixtureOptions {
+ output?: string;
+ api?: boolean;
+ token?: string;
+ imageScale?: string;
+ name?: string;
+}
+
+export function registerSaveFixture(cli: CAC): void {
+ cli
+ .command(
+ "save-fixture ",
+ "Save Figma design as a fixture directory for offline analysis"
+ )
+ .option("--output ", "Output directory (default: fixtures//)")
+ .option("--name ", "Fixture name (default: extracted from URL)")
+ .option("--token ", "Figma API token (or use FIGMA_TOKEN env var)")
+ .option("--image-scale ", "Image export scale: 2 for PC (default), 3 for mobile")
+ .example(" canicode save-fixture https://www.figma.com/design/ABC123/MyDesign?node-id=1-234")
+ .example(" canicode save-fixture https://www.figma.com/design/ABC123/MyDesign?node-id=1-234 --image-scale 3")
+ .action(async (input: string, options: SaveFixtureOptions) => {
+ try {
+ if (!isFigmaUrl(input)) {
+ throw new Error("save-fixture requires a Figma URL as input.");
+ }
+
+ if (!parseFigmaUrl(input).nodeId) {
+ console.warn("\nWarning: No node-id specified. Saving entire file as fixture.");
+ console.warn("Tip: Add ?node-id=XXX to save a specific section.\n");
+ }
+
+ const { file } = await loadFile(input, options.token);
+ file.sourceUrl = input;
+
+ const fixtureName = options.name ?? file.fileKey;
+ const fixtureDir = resolve(options.output ?? `fixtures/${fixtureName}`);
+ mkdirSync(fixtureDir, { recursive: true });
+
+ // 0. Resolve component master node trees
+ const figmaTokenForComponents = options.token ?? getFigmaToken();
+ if (figmaTokenForComponents) {
+ const { FigmaClient: FC } = await import("../../core/adapters/figma-client.js");
+ const { resolveComponentDefinitions } = await import("../../core/adapters/component-resolver.js");
+ const componentClient = new FC({ token: figmaTokenForComponents });
+ try {
+ const definitions = await resolveComponentDefinitions(componentClient, file.fileKey, file.document);
+ const count = Object.keys(definitions).length;
+ if (count > 0) {
+ file.componentDefinitions = definitions;
+ console.log(`Resolved ${count} component master node tree(s)`);
+ }
+ } catch {
+ console.warn("Warning: failed to resolve component definitions (continuing)");
+ }
+ }
+
+ // 1. Save data.json
+ const dataPath = resolve(fixtureDir, "data.json");
+ await writeFile(dataPath, JSON.stringify(file, null, 2), "utf-8");
+ console.log(`Fixture saved: ${fixtureDir}/`);
+ console.log(` data.json: ${file.name} (${countNodes(file.document)} nodes)`);
+
+ // 2. Download screenshot
+ const figmaToken = options.token ?? getFigmaToken();
+ if (figmaToken) {
+ const { FigmaClient } = await import("../../core/adapters/figma-client.js");
+ const client = new FigmaClient({ token: figmaToken });
+ const { nodeId } = parseFigmaUrl(input);
+ const rootNodeId = nodeId?.replace(/-/g, ":") ?? file.document.id;
+
+ try {
+ const imageUrls = await client.getNodeImages(file.fileKey, [rootNodeId], { format: "png", scale: 2 });
+ const url = imageUrls[rootNodeId];
+ if (url) {
+ const resp = await fetch(url);
+ if (resp.ok) {
+ const buffer = Buffer.from(await resp.arrayBuffer());
+ const { writeFile: writeFileSync } = await import("node:fs/promises");
+ await writeFileSync(resolve(fixtureDir, "screenshot.png"), buffer);
+ console.log(` screenshot.png: saved`);
+ }
+ }
+ } catch {
+ console.warn(" screenshot.png: failed to download (continuing)");
+ }
+
+ // 3. Download SVGs for VECTOR nodes
+ const vectorNodeIds = collectVectorNodeIds(file.document);
+ if (vectorNodeIds.length > 0) {
+ const vectorDir = resolve(fixtureDir, "vectors");
+ mkdirSync(vectorDir, { recursive: true });
+
+ const svgUrls = await client.getNodeImages(file.fileKey, vectorNodeIds, { format: "svg" });
+ let downloaded = 0;
+ for (const [id, svgUrl] of Object.entries(svgUrls)) {
+ if (!svgUrl) continue;
+ try {
+ const resp = await fetch(svgUrl);
+ if (resp.ok) {
+ const svg = await resp.text();
+ const safeId = id.replace(/:/g, "-");
+ await writeFile(resolve(vectorDir, `${safeId}.svg`), svg, "utf-8");
+ downloaded++;
+ }
+ } catch {
+ // Skip failed downloads
+ }
+ }
+ console.log(` vectors/: ${downloaded}/${vectorNodeIds.length} SVGs`);
+ }
+
+ // 4. Download PNGs for IMAGE fill nodes
+ const imageNodes = collectImageNodes(file.document);
+ if (imageNodes.length > 0) {
+ const imgScale = options.imageScale !== undefined ? Number(options.imageScale) : 2;
+ if (!Number.isFinite(imgScale) || imgScale < 1 || imgScale > 4) {
+ console.error("Error: --image-scale must be 1-4 (2 for PC, 3 for mobile)");
+ process.exit(1);
+ }
+
+ const imageDir = resolve(fixtureDir, "images");
+ mkdirSync(imageDir, { recursive: true });
+
+ const imageUrls = await client.getNodeImages(
+ file.fileKey,
+ imageNodes.map((n) => n.id),
+ { format: "png", scale: imgScale }
+ );
+
+ const usedNames = new Map();
+ const nodeIdToFilename = new Map();
+ for (const { id, name } of imageNodes) {
+ let base = sanitizeFilename(name);
+ const count = usedNames.get(base) ?? 0;
+ usedNames.set(base, count + 1);
+ if (count > 0) base = `${base}-${count + 1}`;
+ nodeIdToFilename.set(id, `${base}@${imgScale}x.png`);
+ }
+
+ let imgDownloaded = 0;
+ for (const [id, imgUrl] of Object.entries(imageUrls)) {
+ if (!imgUrl) continue;
+ const filename = nodeIdToFilename.get(id);
+ if (!filename) continue;
+ try {
+ const resp = await fetch(imgUrl);
+ if (resp.ok) {
+ const buf = Buffer.from(await resp.arrayBuffer());
+ await writeFile(resolve(fixtureDir, "images", filename), buf);
+ imgDownloaded++;
+ }
+ } catch {
+ // Skip failed downloads
+ }
+ }
+
+ const mapping: Record = {};
+ for (const [id, filename] of nodeIdToFilename) {
+ mapping[id] = filename;
+ }
+ await writeFile(
+ resolve(imageDir, "mapping.json"),
+ JSON.stringify(mapping, null, 2),
+ "utf-8"
+ );
+
+ console.log(` images/: ${imgDownloaded}/${imageNodes.length} PNGs (@${imgScale}x)`);
+ }
+ }
+ } catch (error) {
+ console.error(
+ "\nError:",
+ error instanceof Error ? error.message : String(error)
+ );
+ process.exit(1);
+ }
+ });
+}
diff --git a/src/cli/commands/visual-compare.ts b/src/cli/commands/visual-compare.ts
new file mode 100644
index 00000000..a8bd4475
--- /dev/null
+++ b/src/cli/commands/visual-compare.ts
@@ -0,0 +1,102 @@
+import { resolve } from "node:path";
+import type { CAC } from "cac";
+
+import { getFigmaToken } from "../../core/engine/config-store.js";
+
+interface VisualCompareOptions {
+ figmaUrl: string;
+ token?: string;
+ output?: string;
+ width?: number;
+ height?: number;
+ figmaScale?: string;
+}
+
+export function registerVisualCompare(cli: CAC): void {
+ cli
+ .command(
+ "visual-compare ",
+ "Compare rendered code against Figma screenshot (pixel-level similarity)"
+ )
+ .option("--figma-url ", "Figma URL with node-id (required)")
+ .option("--token ", "Figma API token (or use FIGMA_TOKEN env var)")
+ .option("--output ", "Output directory for screenshots and diff (default: /tmp/canicode-visual-compare)")
+ .option("--width ", "Logical viewport width in CSS px (default: infer from Figma PNG ÷ export scale)")
+ .option("--height ", "Logical viewport height in CSS px (default: infer from Figma PNG ÷ export scale)")
+ .option("--figma-scale ", "Figma export scale (default: 2, matches save-fixture / @2x PNGs)")
+ .example(" canicode visual-compare ./generated/index.html --figma-url 'https://www.figma.com/design/ABC/File?node-id=1-234'")
+ .action(async (codePath: string, options: VisualCompareOptions) => {
+ try {
+ if (!options.figmaUrl) {
+ console.error("Error: --figma-url is required");
+ process.exit(1);
+ }
+
+ const token = options.token ?? getFigmaToken();
+ if (!token) {
+ console.error("Error: Figma token required. Use --token or set FIGMA_TOKEN env var.");
+ process.exit(1);
+ }
+
+ const { visualCompare } = await import("../../core/engine/visual-compare.js");
+
+ const exportScale =
+ options.figmaScale !== undefined ? Number(options.figmaScale) : undefined;
+ if (exportScale !== undefined && (!Number.isFinite(exportScale) || exportScale < 1)) {
+ console.error("Error: --figma-scale must be a number >= 1");
+ process.exit(1);
+ }
+
+ // CAC passes option values as strings — coerce to numbers before validation
+ const width = options.width !== undefined ? Number(options.width) : undefined;
+ const height = options.height !== undefined ? Number(options.height) : undefined;
+
+ if (width !== undefined && (!Number.isFinite(width) || width <= 0)) {
+ console.error("Error: --width must be a positive number");
+ process.exit(1);
+ }
+ if (height !== undefined && (!Number.isFinite(height) || height <= 0)) {
+ console.error("Error: --height must be a positive number");
+ process.exit(1);
+ }
+
+ const hasViewportOverride = width !== undefined || height !== undefined;
+
+ console.log("Comparing...");
+ const result = await visualCompare({
+ figmaUrl: options.figmaUrl,
+ figmaToken: token,
+ codePath: resolve(codePath),
+ outputDir: options.output,
+ ...(exportScale !== undefined ? { figmaExportScale: exportScale } : {}),
+ ...(hasViewportOverride
+ ? {
+ viewport: {
+ ...(width !== undefined ? { width } : {}),
+ ...(height !== undefined ? { height } : {}),
+ },
+ }
+ : {}),
+ });
+
+ // JSON output for programmatic use
+ console.log(JSON.stringify({
+ similarity: result.similarity,
+ diffPixels: result.diffPixels,
+ totalPixels: result.totalPixels,
+ width: result.width,
+ height: result.height,
+ figmaScreenshot: result.figmaScreenshotPath,
+ codeScreenshot: result.codeScreenshotPath,
+ diff: result.diffPath,
+ }, null, 2));
+
+ } catch (error) {
+ console.error(
+ "\nError:",
+ error instanceof Error ? error.message : String(error)
+ );
+ process.exit(1);
+ }
+ });
+}
diff --git a/src/cli/helpers.ts b/src/cli/helpers.ts
new file mode 100644
index 00000000..82d01276
--- /dev/null
+++ b/src/cli/helpers.ts
@@ -0,0 +1,79 @@
+import type { AnalysisFile, AnalysisNode } from "../core/contracts/figma-node.js";
+
+export const MAX_NODES_WITHOUT_SCOPE = 500;
+
+/**
+ * Find all FRAME/COMPONENT nodes with 50-500 nodes in their subtree,
+ * then pick one at random. Used to auto-scope fixture analysis.
+ */
+export function pickRandomScope(root: AnalysisFile["document"]): AnalysisFile["document"] | null {
+ const candidates: AnalysisFile["document"][] = [];
+
+ function collect(node: AnalysisFile["document"]): void {
+ const isContainer = node.type === "FRAME" || node.type === "COMPONENT" || node.type === "SECTION";
+ if (isContainer) {
+ const size = countNodes(node);
+ if (size >= 50 && size <= 500) {
+ candidates.push(node);
+ }
+ }
+ if ("children" in node && node.children) {
+ for (const child of node.children) {
+ collect(child);
+ }
+ }
+ }
+
+ collect(root);
+ if (candidates.length === 0) return null;
+ const idx = Math.floor(Math.random() * candidates.length);
+ return candidates[idx] ?? null;
+}
+
+export function collectVectorNodeIds(node: { id: string; type: string; children?: readonly unknown[] | undefined }): string[] {
+ const ids: string[] = [];
+ if (node.type === "VECTOR") ids.push(node.id);
+ if (node.children) {
+ for (const child of node.children) {
+ ids.push(...collectVectorNodeIds(child as typeof node));
+ }
+ }
+ return ids;
+}
+
+export function collectImageNodes(node: AnalysisNode): Array<{ id: string; name: string }> {
+ const nodes: Array<{ id: string; name: string }> = [];
+ function walk(n: AnalysisNode): void {
+ if (n.fills && Array.isArray(n.fills)) {
+ for (const fill of n.fills) {
+ if ((fill as { type?: string }).type === "IMAGE") {
+ nodes.push({ id: n.id, name: n.name });
+ break;
+ }
+ }
+ }
+ if (n.children) {
+ for (const child of n.children) walk(child);
+ }
+ }
+ walk(node);
+ return nodes;
+}
+
+export function sanitizeFilename(name: string): string {
+ return name
+ .toLowerCase()
+ .replace(/[^a-z0-9]+/g, "-")
+ .replace(/^-+|-+$/g, "")
+ || "image";
+}
+
+export function countNodes(node: { children?: readonly unknown[] | undefined }): number {
+ let count = 1;
+ if (node.children) {
+ for (const child of node.children) {
+ count += countNodes(child as { children?: readonly unknown[] | undefined });
+ }
+ }
+ return count;
+}
diff --git a/src/cli/index.ts b/src/cli/index.ts
index f4caa3f6..34f7f0ca 100644
--- a/src/cli/index.ts
+++ b/src/cli/index.ts
@@ -1,7 +1,4 @@
#!/usr/bin/env node
-import { existsSync, mkdirSync } from "node:fs";
-import { writeFile } from "node:fs/promises";
-import { resolve, dirname } from "node:path";
import { createRequire } from "node:module";
import { config } from "dotenv";
import cac from "cac";
@@ -9,34 +6,34 @@ import cac from "cac";
// Load .env file
config();
-import { parseFigmaUrl } from "../core/adapters/figma-url-parser.js";
-import type { AnalysisFile, AnalysisNode } from "../core/contracts/figma-node.js";
-import type { RuleConfig, RuleId } from "../core/contracts/rule.js";
-import { analyzeFile } from "../core/engine/rule-engine.js";
-import { loadFile, isFigmaUrl, isJsonFile, isFixtureDir } from "../core/engine/loader.js";
import {
- getFigmaToken, initAiready, getConfigPath, getReportsDir, ensureReportsDir,
- readConfig, getTelemetryEnabled, setTelemetryEnabled, getPosthogApiKey, getSentryDsn, getDeviceId,
+ getTelemetryEnabled, getPosthogApiKey, getSentryDsn, getDeviceId,
} from "../core/engine/config-store.js";
-import { calculateScores, formatScoreSummary, buildResultJson } from "../core/engine/scoring.js";
-import { getConfigsWithPreset, RULE_CONFIGS, type Preset } from "../core/rules/rule-config.js";
-import { ruleRegistry } from "../core/rules/rule-registry.js";
-import { loadCustomRules } from "../core/rules/custom/custom-rule-loader.js";
-import { loadConfigFile, mergeConfigs } from "../core/rules/custom/config-loader.js";
-import { generateHtmlReport } from "../core/report-html/index.js";
-import {
- runCalibrationAnalyze,
- runCalibrationEvaluate,
- filterConversionCandidates,
-} from "../agents/orchestrator.js";
-import { generateGapRuleReport } from "../agents/gap-rule-report.js";
-import { handleDocs } from "./docs.js";
-import { initMonitoring, trackEvent, trackError, shutdownMonitoring, EVENTS } from "../core/monitoring/index.js";
+import { initMonitoring, shutdownMonitoring } from "../core/monitoring/index.js";
import { POSTHOG_API_KEY as BUILTIN_PH_KEY, SENTRY_DSN as BUILTIN_SENTRY_DSN } from "../core/monitoring/keys.js";
+import { handleDocs } from "./docs.js";
// Import rules to register them
import "../core/rules/index.js";
+// User-facing commands
+import { registerAnalyze } from "./commands/analyze.js";
+import { registerSaveFixture } from "./commands/save-fixture.js";
+import { registerDesignTree } from "./commands/design-tree.js";
+import { registerImplement } from "./commands/implement.js";
+import { registerVisualCompare } from "./commands/visual-compare.js";
+import { registerInit } from "./commands/init.js";
+import { registerConfig } from "./commands/config.js";
+import { registerListRules } from "./commands/list-rules.js";
+import { registerPrompt } from "./commands/prompt.js";
+
+// Internal commands (used by subagents, hidden from user help)
+import { registerCalibrateAnalyze } from "./commands/internal/calibrate-analyze.js";
+import { registerCalibrateEvaluate } from "./commands/internal/calibrate-evaluate.js";
+import { registerCalibrateGapReport } from "./commands/internal/calibrate-gap-report.js";
+import { registerCalibrateRun } from "./commands/internal/calibrate-run.js";
+import { registerFixtureManagement, registerEvidencePrune } from "./commands/internal/fixture-management.js";
+
const require = createRequire(import.meta.url);
const pkg = require("../../package.json") as { version: string };
@@ -61,1523 +58,32 @@ process.on("beforeExit", () => {
shutdownMonitoring();
});
-const MAX_NODES_WITHOUT_SCOPE = 500;
-
-/**
- * Find all FRAME/COMPONENT nodes with 50-500 nodes in their subtree,
- * then pick one at random. Used to auto-scope fixture analysis.
- */
-function pickRandomScope(root: AnalysisFile["document"]): AnalysisFile["document"] | null {
- const candidates: AnalysisFile["document"][] = [];
-
- function collect(node: AnalysisFile["document"]): void {
- const isContainer = node.type === "FRAME" || node.type === "COMPONENT" || node.type === "SECTION";
- if (isContainer) {
- const size = countNodes(node);
- if (size >= 50 && size <= 500) {
- candidates.push(node);
- }
- }
- if ("children" in node && node.children) {
- for (const child of node.children) {
- collect(child);
- }
- }
- }
-
- collect(root);
- if (candidates.length === 0) return null;
- const idx = Math.floor(Math.random() * candidates.length);
- return candidates[idx] ?? null;
-}
-
-function collectVectorNodeIds(node: { id: string; type: string; children?: readonly unknown[] | undefined }): string[] {
- const ids: string[] = [];
- if (node.type === "VECTOR") ids.push(node.id);
- if (node.children) {
- for (const child of node.children) {
- ids.push(...collectVectorNodeIds(child as typeof node));
- }
- }
- return ids;
-}
-
-function collectImageNodes(node: AnalysisNode): Array<{ id: string; name: string }> {
- const nodes: Array<{ id: string; name: string }> = [];
- function walk(n: AnalysisNode): void {
- if (n.fills && Array.isArray(n.fills)) {
- for (const fill of n.fills) {
- if ((fill as { type?: string }).type === "IMAGE") {
- nodes.push({ id: n.id, name: n.name });
- break;
- }
- }
- }
- if (n.children) {
- for (const child of n.children) walk(child);
- }
- }
- walk(node);
- return nodes;
-}
-
-function sanitizeFilename(name: string): string {
- return name
- .toLowerCase()
- .replace(/[^a-z0-9]+/g, "-")
- .replace(/^-+|-+$/g, "")
- || "image";
-}
-
-function countNodes(node: { children?: readonly unknown[] | undefined }): number {
- let count = 1;
- if (node.children) {
- for (const child of node.children) {
- count += countNodes(child as { children?: readonly unknown[] | undefined });
- }
- }
- return count;
-}
-
-interface AnalyzeOptions {
- preset?: Preset;
- output?: string;
- token?: string;
- api?: boolean;
- screenshot?: boolean;
- customRules?: string;
- config?: string;
- noOpen?: boolean;
- json?: boolean;
-}
-
-cli
- .command("analyze ", "Analyze a Figma file or JSON fixture")
- .option("--preset ", "Analysis preset (relaxed | dev-friendly | ai-ready | strict)")
- .option("--output ", "HTML report output path")
- .option("--token ", "Figma API token (or use FIGMA_TOKEN env var)")
- .option("--api", "Load via Figma REST API (requires FIGMA_TOKEN)")
- .option("--screenshot", "Include screenshot comparison in report (requires ANTHROPIC_API_KEY)")
- .option("--custom-rules ", "Path to custom rules JSON file")
- .option("--config ", "Path to config JSON file (override rule scores/settings)")
- .option("--no-open", "Don't open report in browser after analysis")
- .option("--json", "Output JSON results to stdout (same format as MCP)")
- .example(" canicode analyze https://www.figma.com/design/ABC123/MyDesign")
- .example(" canicode analyze https://www.figma.com/design/ABC123/MyDesign --api --token YOUR_TOKEN")
- .example(" canicode analyze ./fixtures/my-design --output report.html")
- .example(" canicode analyze ./fixtures/my-design --custom-rules ./my-rules.json")
- .example(" canicode analyze ./fixtures/my-design --config ./my-config.json")
- .action(async (input: string, options: AnalyzeOptions) => {
- const analysisStart = Date.now();
- trackEvent(EVENTS.ANALYSIS_STARTED, { source: isJsonFile(input) || isFixtureDir(input) ? "fixture" : "figma" });
- try {
- // Check init
- if (!options.token && !getFigmaToken() && !isJsonFile(input) && !isFixtureDir(input)) {
- throw new Error(
- "canicode is not configured. Run 'canicode init --token YOUR_TOKEN' first."
- );
- }
-
- // Validate --screenshot requirements
- if (options.screenshot) {
- const anthropicKey = process.env["ANTHROPIC_API_KEY"];
- if (!anthropicKey) {
- throw new Error(
- "ANTHROPIC_API_KEY required for --screenshot mode. Set it in .env or environment."
- );
- }
- console.log("Screenshot comparison mode enabled (coming soon).\n");
- }
-
- // Load file
- const { file, nodeId } = await loadFile(input, options.token);
-
- // Scope enforcement for large files
- const totalNodes = countNodes(file.document);
- let effectiveNodeId = nodeId;
-
- if (!effectiveNodeId && totalNodes > MAX_NODES_WITHOUT_SCOPE) {
- if (isJsonFile(input) || isFixtureDir(input)) {
- // Fixture: auto-pick a random suitable FRAME
- const picked = pickRandomScope(file.document);
- if (picked) {
- effectiveNodeId = picked.id;
- console.log(`\nAuto-scoped to "${picked.name}" (${picked.id}, ${countNodes(picked)} nodes) — file too large (${totalNodes} nodes) for unscoped analysis.`);
- } else {
- console.warn(`\nWarning: Could not find a suitable scope in fixture. Analyzing all ${totalNodes} nodes.`);
- }
- } else {
- // Figma URL: require explicit node-id
- throw new Error(
- `Too many nodes (${totalNodes}) for unscoped analysis. ` +
- `Max ${MAX_NODES_WITHOUT_SCOPE} nodes without a node-id scope.\n\n` +
- `Add ?node-id=XXX to the Figma URL to target a specific section.\n` +
- `Example: canicode analyze "https://www.figma.com/design/.../MyDesign?node-id=1-234"`
- );
- }
- }
- if (!effectiveNodeId && totalNodes > 100) {
- console.warn(`\nWarning: Analyzing ${totalNodes} nodes without scope. Results may be noisy.`);
- console.warn("Tip: Add ?node-id=XXX to analyze a specific section.\n");
- }
-
- console.log(`\nAnalyzing: ${file.name}`);
- console.log(`Nodes: ${totalNodes}`);
-
- // Build rule configs: start from preset or defaults
- let configs: Record = options.preset
- ? { ...getConfigsWithPreset(options.preset) }
- : { ...RULE_CONFIGS };
-
- // Load and merge config file overrides
- let excludeNodeNames: string[] | undefined;
- let excludeNodeTypes: string[] | undefined;
-
- if (options.config) {
- const configFile = await loadConfigFile(options.config);
- configs = mergeConfigs(configs, configFile);
- excludeNodeNames = configFile.excludeNodeNames;
- excludeNodeTypes = configFile.excludeNodeTypes;
- console.log(`Config loaded: ${options.config}`);
- }
-
- // Load and register custom rules
- if (options.customRules) {
- const { rules, configs: customConfigs } = await loadCustomRules(options.customRules);
- for (const rule of rules) {
- ruleRegistry.register(rule);
- }
- configs = { ...configs, ...customConfigs };
- console.log(`Custom rules loaded: ${rules.length} rules from ${options.customRules}`);
- }
-
- // Build analysis options
- const analyzeOptions = {
- configs: configs as Record,
- ...(effectiveNodeId && { targetNodeId: effectiveNodeId }),
- ...(excludeNodeNames && { excludeNodeNames }),
- ...(excludeNodeTypes && { excludeNodeTypes }),
- };
-
- // Run analysis
- const result = analyzeFile(file, analyzeOptions);
- console.log(`Nodes: ${result.nodeCount} (max depth: ${result.maxDepth})`);
-
- // Calculate scores
- const scores = calculateScores(result);
-
- // JSON output mode
- if (options.json) {
- console.log(JSON.stringify(buildResultJson(file.name, result, scores), null, 2));
- return;
- }
-
- // Print summary to terminal
- console.log("\n" + "=".repeat(50));
- console.log(formatScoreSummary(scores));
- console.log("=".repeat(50));
-
- // Generate HTML report
- const now = new Date();
- const ts = `${now.getFullYear()}-${String(now.getMonth() + 1).padStart(2, "0")}-${String(now.getDate()).padStart(2, "0")}-${String(now.getHours()).padStart(2, "0")}-${String(now.getMinutes()).padStart(2, "0")}`;
- let outputPath: string;
-
- if (options.output) {
- outputPath = resolve(options.output);
- const outputDir = dirname(outputPath);
- if (!existsSync(outputDir)) {
- mkdirSync(outputDir, { recursive: true });
- }
- } else {
- ensureReportsDir();
- outputPath = resolve(getReportsDir(), `report-${ts}-${file.fileKey}.html`);
- }
-
- const figmaToken = options.token ?? getFigmaToken();
- const html = generateHtmlReport(file, result, scores, { figmaToken });
- await writeFile(outputPath, html, "utf-8");
- console.log(`\nReport saved: ${outputPath}`);
-
- trackEvent(EVENTS.ANALYSIS_COMPLETED, {
- nodeCount: result.nodeCount,
- issueCount: result.issues.length,
- grade: scores.overall.grade,
- percentage: scores.overall.percentage,
- duration: Date.now() - analysisStart,
- });
- trackEvent(EVENTS.REPORT_GENERATED, { format: "html" });
-
- // Open in browser unless --no-open
- if (!options.noOpen) {
- const { exec } = await import("node:child_process");
- const cmd = process.platform === "darwin" ? "open" : process.platform === "win32" ? "start" : "xdg-open";
- exec(`${cmd} "${outputPath}"`);
- }
-
- // Exit with error code if grade is F
- if (scores.overall.grade === "F") {
- process.exit(1);
- }
- } catch (error) {
- trackError(
- error instanceof Error ? error : new Error(String(error)),
- { command: "analyze", input },
- );
- trackEvent(EVENTS.ANALYSIS_FAILED, {
- error: error instanceof Error ? error.message : String(error),
- duration: Date.now() - analysisStart,
- });
- console.error(
- "\nError:",
- error instanceof Error ? error.message : String(error)
- );
- process.exit(1);
- }
- });
-
-// ============================================
-// Internal calibration commands (used by subagents, hidden from user help)
-// ============================================
-
-interface CalibrateAnalyzeOptions {
- output?: string;
- runDir?: string;
- token?: string;
- targetNodeId?: string;
-}
-
-cli
- .command(
- "calibrate-analyze ",
- "Run calibration analysis and output JSON for conversion step"
- )
- .option("--output ", "Output JSON path", { default: "logs/calibration/calibration-analysis.json" })
- .option("--run-dir ", "Run directory (overrides --output, writes to /analysis.json)")
- .option("--token ", "Figma API token (or use FIGMA_TOKEN env var)")
- .option("--target-node-id ", "Scope analysis to a specific node")
- .action(async (input: string, options: CalibrateAnalyzeOptions) => {
- try {
- console.log("Running calibration analysis...");
-
- const calibConfig = {
- input,
- maxConversionNodes: 20,
- samplingStrategy: "top-issues" as const,
- outputPath: "logs/calibration/calibration-report.md",
- ...(options.token && { token: options.token }),
- ...(options.targetNodeId && { targetNodeId: options.targetNodeId }),
- };
-
- const { analysisOutput, ruleScores, fileKey } =
- await runCalibrationAnalyze(calibConfig);
-
- // Filter out icon/graphic nodes that are not useful for code conversion
- const filteredSummaries = filterConversionCandidates(
- analysisOutput.nodeIssueSummaries,
- analysisOutput.analysisResult.file.document
- );
-
- const outputData = {
- fileKey,
- fileName: analysisOutput.analysisResult.file.name,
- analyzedAt: analysisOutput.analysisResult.analyzedAt,
- nodeCount: analysisOutput.analysisResult.nodeCount,
- issueCount: analysisOutput.analysisResult.issues.length,
- scoreReport: analysisOutput.scoreReport,
- nodeIssueSummaries: filteredSummaries,
- ruleScores,
- };
-
- const outputPath = options.runDir
- ? resolve(options.runDir, "analysis.json")
- : resolve(options.output ?? "logs/calibration/calibration-analysis.json");
- const outputDir = dirname(outputPath);
- if (!existsSync(outputDir)) {
- mkdirSync(outputDir, { recursive: true });
- }
- await writeFile(outputPath, JSON.stringify(outputData, null, 2), "utf-8");
-
- console.log(`\nAnalysis complete.`);
- console.log(` Nodes: ${outputData.nodeCount}`);
- console.log(` Issues: ${outputData.issueCount}`);
- console.log(` Nodes with issues: ${outputData.nodeIssueSummaries.length}`);
- console.log(` Grade: ${outputData.scoreReport.overall.grade} (${outputData.scoreReport.overall.percentage}%)`);
- console.log(`\nOutput saved: ${outputPath}`);
- } catch (error) {
- console.error(
- "\nError:",
- error instanceof Error ? error.message : String(error)
- );
- process.exit(1);
- }
- });
-
-interface CalibrateEvaluateOptions {
- output?: string;
- runDir?: string;
-}
-
-cli
- .command(
- "calibrate-evaluate ",
- "Evaluate conversion results and generate calibration report"
- )
- .option("--output ", "Report output path")
- .option("--run-dir ", "Run directory (reads analysis.json + conversion.json, writes summary.md)")
- .action(async (analysisJsonPath: string, conversionJsonPath: string, options: CalibrateEvaluateOptions) => {
- try {
- console.log("Running calibration evaluation...");
-
- const analysisPath = options.runDir
- ? resolve(options.runDir, "analysis.json")
- : resolve(analysisJsonPath);
- const conversionPath = options.runDir
- ? resolve(options.runDir, "conversion.json")
- : resolve(conversionJsonPath);
-
- if (!existsSync(analysisPath)) {
- throw new Error(`Analysis file not found: ${analysisPath}`);
- }
- if (!existsSync(conversionPath)) {
- throw new Error(`Conversion file not found: ${conversionPath}`);
- }
-
- const { readFile } = await import("node:fs/promises");
- const analysisData = JSON.parse(await readFile(analysisPath, "utf-8"));
- const conversionData = JSON.parse(await readFile(conversionPath, "utf-8"));
-
- // Derive fixture name from run-dir: --
- let fixtureName: string | undefined;
- if (options.runDir) {
- const dirName = resolve(options.runDir).split(/[/\\]/).pop() ?? "";
- const idx = dirName.lastIndexOf("--");
- fixtureName = idx === -1 ? dirName : dirName.slice(0, idx);
- }
-
- const { evaluationOutput, tuningOutput, report } = runCalibrationEvaluate(
- analysisData,
- conversionData,
- analysisData.ruleScores,
- { collectEvidence: !!options.runDir, ...(fixtureName ? { fixtureName } : {}) }
- );
-
- let outputPath: string;
- if (options.runDir) {
- outputPath = resolve(options.runDir, "summary.md");
- } else if (options.output) {
- outputPath = resolve(options.output);
- } else {
- const calNow = new Date();
- const calTs = `${calNow.getFullYear()}-${String(calNow.getMonth() + 1).padStart(2, "0")}-${String(calNow.getDate()).padStart(2, "0")}-${String(calNow.getHours()).padStart(2, "0")}-${String(calNow.getMinutes()).padStart(2, "0")}`;
- outputPath = resolve(`logs/calibration/calibration-${calTs}.md`);
- }
- const calOutputDir = dirname(outputPath);
- if (!existsSync(calOutputDir)) {
- mkdirSync(calOutputDir, { recursive: true });
- }
- await writeFile(outputPath, report, "utf-8");
-
- const mismatchCounts = {
- overscored: 0,
- underscored: 0,
- "missing-rule": 0,
- validated: 0,
- };
- for (const m of evaluationOutput.mismatches) {
- const key = m.type as keyof typeof mismatchCounts;
- mismatchCounts[key]++;
- }
-
- console.log(`\nEvaluation complete.`);
- console.log(` Validated: ${mismatchCounts.validated}`);
- console.log(` Overscored: ${mismatchCounts.overscored}`);
- console.log(` Underscored: ${mismatchCounts.underscored}`);
- console.log(` Missing rules: ${mismatchCounts["missing-rule"]}`);
- console.log(` Score adjustments proposed: ${tuningOutput.adjustments.length}`);
- console.log(` New rule proposals: ${tuningOutput.newRuleProposals.length}`);
- console.log(`\nReport saved: ${outputPath}`);
- } catch (error) {
- console.error(
- "\nError:",
- error instanceof Error ? error.message : String(error)
- );
- process.exit(1);
- }
- });
-
-interface CalibrateGapReportOptions {
- calibrationDir?: string;
- output?: string;
- minRepeat?: string;
- json?: boolean;
-}
-
-cli
- .command(
- "calibrate-gap-report",
- "Aggregate gap data and calibration runs into a rule review report"
- )
- .option("--calibration-dir ", "Calibration runs directory", {
- default: "logs/calibration",
- })
- .option("--output ", "Markdown report path", {
- default: "logs/calibration/REPORT.md",
- })
- .option("--min-repeat ", "Minimum distinct fixtures to treat as a repeating pattern", {
- default: "2",
- })
- .option("--json", "Print JSON summary to stdout")
- .action(async (options: CalibrateGapReportOptions) => {
- try {
- const minRepeat = Math.max(1, parseInt(options.minRepeat ?? "2", 10) || 2);
- const result = generateGapRuleReport({
- calibrationDir: resolve(options.calibrationDir ?? "logs/calibration"),
- minPatternRepeat: minRepeat,
- });
-
- const outPath = resolve(options.output ?? "logs/calibration/REPORT.md");
- const outDir = dirname(outPath);
- if (!existsSync(outDir)) {
- mkdirSync(outDir, { recursive: true });
- }
-
- // Backup existing report with timestamp before overwriting
- if (existsSync(outPath)) {
- const { readFile: readFileAsync } = await import("node:fs/promises");
- const existing = await readFileAsync(outPath, "utf-8");
- // Extract timestamp from the "Generated:" line
- const match = existing.match(/Generated:\s*(\d{4}-\d{2}-\d{2}T[\d:.]+Z)/);
- if (match?.[1]) {
- const ts = match[1].replace(/[:.]/g, "-").replace("T", "-").replace("Z", "");
- const backupPath = outPath.replace(/\.md$/, `--${ts}.md`);
- await writeFile(backupPath, existing, "utf-8");
- console.log(` Previous report backed up: ${backupPath}`);
- }
- }
-
- await writeFile(outPath, result.markdown, "utf-8");
-
- console.log("Gap rule review report written.");
- console.log(` Runs with gaps: ${result.gapRunCount}`);
- console.log(` Runs with snapshots: ${result.runCount}`);
- console.log(` Output: ${outPath}`);
-
- if (options.json) {
- console.log(
- JSON.stringify(
- {
- gapRunCount: result.gapRunCount,
- runCount: result.runCount,
- outputPath: outPath,
- },
- null,
- 2
- )
- );
- }
- } catch (error) {
- console.error(
- "\nError:",
- error instanceof Error ? error.message : String(error)
- );
- process.exit(1);
- }
- });
-
-// ============================================
-// Fixture management commands
-// ============================================
-
-import {
- listActiveFixtures,
- listDoneFixtures,
- moveFixtureToDone,
- parseDebateResult,
- extractAppliedRuleIds,
- extractFixtureName,
- resolveLatestRunDir,
- checkConvergence,
-} from "../agents/run-directory.js";
-import {
- pruneCalibrationEvidence,
- pruneDiscoveryEvidence,
-} from "../agents/evidence-collector.js";
-
-cli
- .command(
- "fixture-list [fixturesDir]",
- "List active and done fixtures"
- )
- .option("--json", "Output as JSON")
- .action((fixturesDir: string | undefined, options: { json?: boolean }) => {
- const dir = fixturesDir ?? "fixtures";
- const active = listActiveFixtures(dir);
- const done = listDoneFixtures(dir);
-
- if (options.json) {
- console.log(JSON.stringify({ active, done }, null, 2));
- } else {
- console.log(`Active fixtures (${active.length}):`);
- for (const p of active) {
- console.log(` ${p}`);
- }
- console.log(`\nDone fixtures (${done.length}):`);
- for (const p of done) {
- console.log(` ${p}`);
- }
- }
- });
-
-cli
- .command(
- "fixture-done ",
- "Move a converged fixture to done/"
- )
- .option("--fixtures-dir ", "Fixtures root directory", { default: "fixtures" })
- .option("--force", "Skip convergence check")
- .option("--run-dir ", "Run directory to check for convergence (auto-resolves latest if omitted)")
- .option("--dry-run", "Show convergence judgment without moving files")
- .option(
- "--lenient-convergence",
- "Converged when no applied/revised decisions (ignore rejected; see calibration issue #14)"
- )
- .action(
- (fixturePath: string, options: {
- fixturesDir?: string;
- force?: boolean;
- runDir?: string;
- dryRun?: boolean;
- lenientConvergence?: boolean;
- }) => {
- const fixtureName = extractFixtureName(fixturePath);
-
- // Resolve run directory: explicit --run-dir or auto-resolve latest
- let runDir = options.runDir ? resolve(options.runDir) : null;
- if (!runDir && !options.force) {
- const latest = resolveLatestRunDir(fixtureName);
- if (latest) {
- runDir = latest;
- console.log(`Auto-resolved latest run: ${runDir}`);
- }
- }
-
- if (!options.force) {
- if (!runDir) {
- console.error(`Error: no run directory found for fixture "${fixtureName}". Specify --run-dir, or use --force to skip check.`);
- process.exit(1);
- }
- const summary = checkConvergence(runDir, { lenient: options.lenientConvergence });
- console.log(`\nConvergence check (${summary.mode}):`);
- console.log(` ${summary.reason}`);
- if (summary.total > 0) {
- console.log(` applied=${summary.applied} revised=${summary.revised} rejected=${summary.rejected} kept=${summary.kept}`);
- }
-
- if (options.dryRun) {
- console.log(`\n[dry-run] Would ${summary.converged ? "move" : "NOT move"} fixture: ${fixturePath}`);
- return;
- }
-
- if (!summary.converged) {
- console.error(`\nError: fixture has not converged. Use --force to override or --lenient-convergence.`);
- process.exit(1);
- }
- } else if (options.dryRun) {
- console.log(`[dry-run] --force: would move fixture without convergence check: ${fixturePath}`);
- return;
- }
-
- const dest = moveFixtureToDone(fixturePath, options.fixturesDir ?? "fixtures");
- if (dest) {
- console.log(`\nMoved to: ${dest}`);
- } else {
- console.error(`Error: fixture not found: ${fixturePath}`);
- process.exit(1);
- }
- });
-
-cli
- .command(
- "calibrate-prune-evidence ",
- "Prune evidence for rules applied by the Arbitrator in the given run"
- )
- .action((runDir: string) => {
- if (!existsSync(resolve(runDir))) {
- console.log(`Run directory not found: ${runDir}`);
- return;
- }
- const debate = parseDebateResult(resolve(runDir));
- if (!debate) {
- console.log("No debate.json found — nothing to prune.");
- return;
- }
-
- const appliedIds = extractAppliedRuleIds(debate);
- if (appliedIds.length === 0) {
- console.log("No applied/revised rules — nothing to prune.");
- return;
- }
-
- pruneCalibrationEvidence(appliedIds);
- console.log(`Pruned calibration evidence for ${appliedIds.length} rule(s): ${appliedIds.join(", ")}`);
- });
-
-cli
- .command(
- "discovery-prune-evidence ",
- "Prune discovery evidence for a category addressed by /add-rule"
- )
- .action((category: string | string[]) => {
- const categories = Array.isArray(category) ? category : [category];
- try {
- pruneDiscoveryEvidence(categories);
- console.log(`Pruned discovery evidence for categories: ${categories.join(", ")}`);
- } catch (err) {
- const msg = err instanceof Error ? err.message : String(err);
- console.error(`[evidence] Failed to prune discovery evidence: ${msg}`);
- process.exitCode = 1;
- }
- });
-
-interface CalibrateRunOptions {
- output?: string;
- token?: string;
- maxNodes?: number;
- sampling?: string;
-}
-
-cli
- .command(
- "calibrate-run ",
- "Run full calibration pipeline (analysis-only, conversion via /calibrate-loop)"
- )
- .option("--output ", "Markdown report output path")
- .option("--token ", "Figma API token (or use FIGMA_TOKEN env var)")
- .option("--max-nodes ", "Max nodes to convert", { default: 5 })
- .option("--sampling ", "Sampling strategy (all | top-issues | random)", { default: "top-issues" })
- .action(async (input: string, options: CalibrateRunOptions) => {
- try {
- const figmaToken = options.token ?? getFigmaToken();
-
- if (isFigmaUrl(input) && !parseFigmaUrl(input).nodeId) {
- console.warn("\nWarning: No node-id specified. Calibrating entire file may produce noisy results.");
- console.warn("Tip: Add ?node-id=XXX to target a specific section.\n");
- }
-
- console.log("Running calibration pipeline (analysis-only)...");
- console.log(` Input: ${input}`);
- console.log("");
-
- const { analysisOutput } = await runCalibrationAnalyze({
- input,
- maxConversionNodes: options.maxNodes ?? 5,
- samplingStrategy: (options.sampling as "all" | "top-issues" | "random") ?? "top-issues",
- outputPath: options.output ?? "unused",
- ...(figmaToken && { token: figmaToken }),
- });
-
- console.log("\nCalibration complete (analysis-only).");
- console.log(` Grade: ${analysisOutput.scoreReport.overall.grade} (${analysisOutput.scoreReport.overall.percentage}%)`);
- console.log(` Nodes with issues: ${analysisOutput.nodeIssueSummaries.length}`);
- console.log(" Note: Use /calibrate-loop in Claude Code for full pipeline with visual comparison.");
- } catch (error) {
- console.error(
- "\nError:",
- error instanceof Error ? error.message : String(error)
- );
- process.exit(1);
- }
- });
-
-// ============================================
-// Utility commands
-// ============================================
-
-interface SaveFixtureOptions {
- output?: string;
- api?: boolean;
- token?: string;
- imageScale?: string;
-}
-
-cli
- .command(
- "save-fixture ",
- "Save Figma design as a fixture directory for offline analysis"
- )
- .option("--output ", "Output directory (default: fixtures//)")
- .option("--name ", "Fixture name (default: extracted from URL)")
- .option("--token ", "Figma API token (or use FIGMA_TOKEN env var)")
- .option("--image-scale ", "Image export scale: 2 for PC (default), 3 for mobile")
- .example(" canicode save-fixture https://www.figma.com/design/ABC123/MyDesign?node-id=1-234")
- .example(" canicode save-fixture https://www.figma.com/design/ABC123/MyDesign?node-id=1-234 --image-scale 3")
- .action(async (input: string, options: SaveFixtureOptions & { name?: string }) => {
- try {
- if (!isFigmaUrl(input)) {
- throw new Error("save-fixture requires a Figma URL as input.");
- }
-
- if (!parseFigmaUrl(input).nodeId) {
- console.warn("\nWarning: No node-id specified. Saving entire file as fixture.");
- console.warn("Tip: Add ?node-id=XXX to save a specific section.\n");
- }
-
- const { file } = await loadFile(input, options.token);
- file.sourceUrl = input;
-
- const fixtureName = options.name ?? file.fileKey;
- const fixtureDir = resolve(options.output ?? `fixtures/${fixtureName}`);
- mkdirSync(fixtureDir, { recursive: true });
-
- // 0. Resolve component master node trees
- const figmaTokenForComponents = options.token ?? getFigmaToken();
- if (figmaTokenForComponents) {
- const { FigmaClient: FC } = await import("../core/adapters/figma-client.js");
- const { resolveComponentDefinitions } = await import("../core/adapters/component-resolver.js");
- const componentClient = new FC({ token: figmaTokenForComponents });
- try {
- const definitions = await resolveComponentDefinitions(componentClient, file.fileKey, file.document);
- const count = Object.keys(definitions).length;
- if (count > 0) {
- file.componentDefinitions = definitions;
- console.log(`Resolved ${count} component master node tree(s)`);
- }
- } catch {
- console.warn("Warning: failed to resolve component definitions (continuing)");
- }
- }
-
- // 1. Save data.json
- const dataPath = resolve(fixtureDir, "data.json");
- await writeFile(dataPath, JSON.stringify(file, null, 2), "utf-8");
- console.log(`Fixture saved: ${fixtureDir}/`);
- console.log(` data.json: ${file.name} (${countNodes(file.document)} nodes)`);
-
- // 2. Download screenshot
- const figmaToken = options.token ?? getFigmaToken();
- if (figmaToken) {
- const { FigmaClient } = await import("../core/adapters/figma-client.js");
- const client = new FigmaClient({ token: figmaToken });
- const { nodeId } = parseFigmaUrl(input);
- const rootNodeId = nodeId?.replace(/-/g, ":") ?? file.document.id;
-
- try {
- const imageUrls = await client.getNodeImages(file.fileKey, [rootNodeId], { format: "png", scale: 2 });
- const url = imageUrls[rootNodeId];
- if (url) {
- const resp = await fetch(url);
- if (resp.ok) {
- const buffer = Buffer.from(await resp.arrayBuffer());
- const { writeFile: writeFileSync } = await import("node:fs/promises");
- await writeFileSync(resolve(fixtureDir, "screenshot.png"), buffer);
- console.log(` screenshot.png: saved`);
- }
- }
- } catch {
- console.warn(" screenshot.png: failed to download (continuing)");
- }
-
- // 3. Download SVGs for VECTOR nodes
- const vectorNodeIds = collectVectorNodeIds(file.document);
- if (vectorNodeIds.length > 0) {
- const vectorDir = resolve(fixtureDir, "vectors");
- mkdirSync(vectorDir, { recursive: true });
-
- const svgUrls = await client.getNodeImages(file.fileKey, vectorNodeIds, { format: "svg" });
- let downloaded = 0;
- for (const [id, svgUrl] of Object.entries(svgUrls)) {
- if (!svgUrl) continue;
- try {
- const resp = await fetch(svgUrl);
- if (resp.ok) {
- const svg = await resp.text();
- const safeId = id.replace(/:/g, "-");
- await writeFile(resolve(vectorDir, `${safeId}.svg`), svg, "utf-8");
- downloaded++;
- }
- } catch {
- // Skip failed downloads
- }
- }
- console.log(` vectors/: ${downloaded}/${vectorNodeIds.length} SVGs`);
- }
-
- // 4. Download PNGs for IMAGE fill nodes
- const imageNodes = collectImageNodes(file.document);
- if (imageNodes.length > 0) {
- const imgScale = options.imageScale !== undefined ? Number(options.imageScale) : 2;
- if (!Number.isFinite(imgScale) || imgScale < 1 || imgScale > 4) {
- console.error("Error: --image-scale must be 1-4 (2 for PC, 3 for mobile)");
- process.exit(1);
- }
-
- const imageDir = resolve(fixtureDir, "images");
- mkdirSync(imageDir, { recursive: true });
-
- const imageUrls = await client.getNodeImages(
- file.fileKey,
- imageNodes.map((n) => n.id),
- { format: "png", scale: imgScale }
- );
-
- const usedNames = new Map();
- const nodeIdToFilename = new Map();
- for (const { id, name } of imageNodes) {
- let base = sanitizeFilename(name);
- const count = usedNames.get(base) ?? 0;
- usedNames.set(base, count + 1);
- if (count > 0) base = `${base}-${count + 1}`;
- nodeIdToFilename.set(id, `${base}@${imgScale}x.png`);
- }
-
- let imgDownloaded = 0;
- for (const [id, imgUrl] of Object.entries(imageUrls)) {
- if (!imgUrl) continue;
- const filename = nodeIdToFilename.get(id);
- if (!filename) continue;
- try {
- const resp = await fetch(imgUrl);
- if (resp.ok) {
- const buf = Buffer.from(await resp.arrayBuffer());
- await writeFile(resolve(imageDir, filename), buf);
- imgDownloaded++;
- }
- } catch {
- // Skip failed downloads
- }
- }
-
- const mapping: Record = {};
- for (const [id, filename] of nodeIdToFilename) {
- mapping[id] = filename;
- }
- await writeFile(
- resolve(imageDir, "mapping.json"),
- JSON.stringify(mapping, null, 2),
- "utf-8"
- );
-
- console.log(` images/: ${imgDownloaded}/${imageNodes.length} PNGs (@${imgScale}x)`);
- }
- }
- } catch (error) {
- console.error(
- "\nError:",
- error instanceof Error ? error.message : String(error)
- );
- process.exit(1);
- }
- });
-
// ============================================
-// Design tree command
+// User-facing commands
// ============================================
-
-cli
- .command(
- "design-tree ",
- "Generate a DOM-like design tree from a Figma file or fixture"
- )
- .option("--token ", "Figma API token (or use FIGMA_TOKEN env var)")
- .option("--output ", "Output file path (default: stdout)")
- .option("--vector-dir ", "Directory with SVG files for VECTOR nodes (auto-detected from fixture path)")
- .option("--image-dir ", "Directory with image PNGs for IMAGE fill nodes (auto-detected from fixture path)")
- .example(" canicode design-tree ./fixtures/my-design")
- .example(" canicode design-tree https://www.figma.com/design/ABC/File?node-id=1-234 --output tree.txt")
- .action(async (input: string, options: { token?: string; output?: string; vectorDir?: string; imageDir?: string }) => {
- try {
- const { file } = await loadFile(input, options.token);
-
- const fixtureBase = isJsonFile(input) ? dirname(resolve(input)) : resolve(input);
-
- // Auto-detect vector dir from fixture path
- let vectorDir = options.vectorDir;
- if (!vectorDir) {
- const autoDir = resolve(fixtureBase, "vectors");
- if (existsSync(autoDir)) vectorDir = autoDir;
- }
-
- // Auto-detect image dir from fixture path
- let imageDir = options.imageDir;
- if (!imageDir) {
- const autoDir = resolve(fixtureBase, "images");
- if (existsSync(autoDir)) imageDir = autoDir;
- }
-
- const { generateDesignTreeWithStats } = await import("../core/engine/design-tree.js");
- const treeOptions = {
- ...(vectorDir ? { vectorDir } : {}),
- ...(imageDir ? { imageDir } : {}),
- };
- const stats = generateDesignTreeWithStats(file, treeOptions);
-
- if (options.output) {
- const outputDir = dirname(resolve(options.output));
- if (!existsSync(outputDir)) mkdirSync(outputDir, { recursive: true });
- const { writeFile: writeFileAsync } = await import("node:fs/promises");
- await writeFileAsync(resolve(options.output), stats.tree, "utf-8");
- console.log(`Design tree saved: ${resolve(options.output)} (${Math.round(stats.bytes / 1024)}KB, ~${stats.estimatedTokens} tokens)`);
- } else {
- console.log(stats.tree);
- }
- } catch (error) {
- console.error("\nError:", error instanceof Error ? error.message : String(error));
- process.exit(1);
- }
- });
+registerAnalyze(cli);
+registerSaveFixture(cli);
+registerDesignTree(cli);
+registerImplement(cli);
+registerVisualCompare(cli);
+registerInit(cli);
+registerConfig(cli);
+registerListRules(cli);
+registerPrompt(cli);
// ============================================
-// Implement command — design-to-code package
+// Internal commands (calibration & fixtures)
// ============================================
-
-interface ImplementOptions {
- token?: string;
- output?: string;
- prompt?: string;
- imageScale?: string;
-}
-
-cli
- .command(
- "implement ",
- "Prepare design-to-code package: analysis + design tree + assets + prompt"
- )
- .option("--token ", "Figma API token (or use FIGMA_TOKEN env var)")
- .option("--output ", "Output directory (default: ./canicode-implement/)")
- .option("--prompt ", "Custom prompt file (default: built-in HTML+CSS prompt)")
- .option("--image-scale ", "Image export scale: 2 for PC (default), 3 for mobile")
- .example(" canicode implement ./fixtures/my-design")
- .example(" canicode implement ./fixtures/my-design --prompt ./my-react-prompt.md --image-scale 3")
- .action(async (input: string, options: ImplementOptions) => {
- try {
-
- const outputDir = resolve(options.output ?? "canicode-implement");
- mkdirSync(outputDir, { recursive: true });
-
- console.log("\nPreparing implementation package...\n");
-
- // 1. Load file
- const { file } = await loadFile(input, options.token);
- console.log(`Design: ${file.name}`);
-
- // 2. Analysis
- const result = analyzeFile(file);
- const scores = calculateScores(result);
- const resultJson = buildResultJson(file.name, result, scores);
- await writeFile(resolve(outputDir, "analysis.json"), JSON.stringify(resultJson, null, 2), "utf-8");
- console.log(` analysis.json: ${result.issues.length} issues, grade ${scores.overall.grade}`);
-
- // 3. Prepare assets (before design tree, so tree can reference image paths)
- const fixtureBase = (isJsonFile(input) || isFixtureDir(input))
- ? (isJsonFile(input) ? dirname(resolve(input)) : resolve(input))
- : undefined;
-
- let vectorDir = fixtureBase ? resolve(fixtureBase, "vectors") : undefined;
- let imageDir = fixtureBase ? resolve(fixtureBase, "images") : undefined;
-
- // Copy fixture assets to output
- if (vectorDir && existsSync(vectorDir)) {
- const vecOutputDir = resolve(outputDir, "vectors");
- mkdirSync(vecOutputDir, { recursive: true });
- const { readdirSync, copyFileSync } = await import("node:fs");
- const vecFiles = readdirSync(vectorDir).filter(f => f.endsWith(".svg"));
- for (const f of vecFiles) {
- copyFileSync(resolve(vectorDir, f), resolve(vecOutputDir, f));
- }
- console.log(` vectors/: ${vecFiles.length} SVGs copied`);
- }
-
- if (imageDir && existsSync(imageDir)) {
- const imgOutputDir = resolve(outputDir, "images");
- mkdirSync(imgOutputDir, { recursive: true });
- const { readdirSync, copyFileSync } = await import("node:fs");
- const imgFiles = readdirSync(imageDir).filter(f => f.endsWith(".png") || f.endsWith(".jpg") || f.endsWith(".json"));
- for (const f of imgFiles) {
- copyFileSync(resolve(imageDir, f), resolve(imgOutputDir, f));
- }
- const pngCount = imgFiles.filter(f => f.endsWith(".png")).length;
- console.log(` images/: ${pngCount} assets copied`);
- }
-
- // Download assets from Figma API for live URLs
- if (isFigmaUrl(input) && !fixtureBase) {
- const figmaToken = options.token ?? getFigmaToken();
- if (figmaToken) {
- const imgScale = options.imageScale !== undefined ? Number(options.imageScale) : 2;
- if (!Number.isFinite(imgScale) || imgScale < 1 || imgScale > 4) {
- console.error("Error: --image-scale must be 1-4 (2 for PC, 3 for mobile)");
- process.exit(1);
- }
-
- const { FigmaClient } = await import("../core/adapters/figma-client.js");
- const client = new FigmaClient({ token: figmaToken });
-
- // Download screenshot
- const { nodeId } = parseFigmaUrl(input);
- const rootNodeId = nodeId?.replace(/-/g, ":") ?? file.document.id;
- try {
- const screenshotUrls = await client.getNodeImages(file.fileKey, [rootNodeId], { format: "png", scale: 2 });
- const screenshotUrl = screenshotUrls[rootNodeId];
- if (screenshotUrl) {
- const resp = await fetch(screenshotUrl);
- if (resp.ok) {
- const buf = Buffer.from(await resp.arrayBuffer());
- await writeFile(resolve(outputDir, "screenshot.png"), buf);
- console.log(` screenshot.png: saved`);
- }
- }
- } catch {
- console.warn(" screenshot.png: failed to download (continuing)");
- }
-
- // Download vector SVGs
- const vectorNodeIds = collectVectorNodeIds(file.document);
- if (vectorNodeIds.length > 0) {
- const vecOutDir = resolve(outputDir, "vectors");
- mkdirSync(vecOutDir, { recursive: true });
- try {
- const svgUrls = await client.getNodeImages(file.fileKey, vectorNodeIds, { format: "svg" });
- let downloaded = 0;
- for (const [id, svgUrl] of Object.entries(svgUrls)) {
- if (!svgUrl) continue;
- try {
- const resp = await fetch(svgUrl);
- if (resp.ok) {
- const svg = await resp.text();
- const safeId = id.replace(/:/g, "-");
- await writeFile(resolve(vecOutDir, `${safeId}.svg`), svg, "utf-8");
- downloaded++;
- }
- } catch { /* skip */ }
- }
- console.log(` vectors/: ${downloaded}/${vectorNodeIds.length} SVGs`);
- } catch {
- console.warn(" vectors/: failed to download (continuing)");
- }
- }
-
- // Download image PNGs
- const imgNodes = collectImageNodes(file.document);
- if (imgNodes.length > 0) {
- const imgOutDir = resolve(outputDir, "images");
- mkdirSync(imgOutDir, { recursive: true });
- try {
- const imgUrls = await client.getNodeImages(
- file.fileKey,
- imgNodes.map(n => n.id),
- { format: "png", scale: imgScale },
- );
- const usedNames = new Map();
- let downloaded = 0;
- for (const { id, name } of imgNodes) {
- const imgUrl = imgUrls[id];
- if (!imgUrl) continue;
- let base = sanitizeFilename(name);
- const count = usedNames.get(base) ?? 0;
- usedNames.set(base, count + 1);
- if (count > 0) base = `${base}-${count + 1}`;
- const filename = `${base}@${imgScale}x.png`;
- try {
- const resp = await fetch(imgUrl);
- if (resp.ok) {
- const buf = Buffer.from(await resp.arrayBuffer());
- await writeFile(resolve(imgOutDir, filename), buf);
- downloaded++;
- }
- } catch { /* skip */ }
- }
- // Write mapping.json for design-tree
- const mapping: Record = {};
- const usedNamesForMapping = new Map();
- for (const { id, name } of imgNodes) {
- let base = sanitizeFilename(name);
- const cnt = usedNamesForMapping.get(base) ?? 0;
- usedNamesForMapping.set(base, cnt + 1);
- if (cnt > 0) base = `${base}-${cnt + 1}`;
- mapping[id] = `${base}@${imgScale}x.png`;
- }
- await writeFile(resolve(imgOutDir, "mapping.json"), JSON.stringify(mapping, null, 2), "utf-8");
-
- imageDir = imgOutDir;
- console.log(` images/: ${downloaded}/${imgNodes.length} PNGs (@${imgScale}x)`);
- } catch {
- console.warn(" images/: failed to download (continuing)");
- }
- }
-
- // Update vectorDir to point to downloaded assets
- const vecOutCheck = resolve(outputDir, "vectors");
- if (existsSync(vecOutCheck)) vectorDir = vecOutCheck;
- }
- }
-
- // 4. Design tree (after assets so image paths are available)
- const { generateDesignTreeWithStats } = await import("../core/engine/design-tree.js");
- const treeOptions = {
- ...(vectorDir && existsSync(vectorDir) ? { vectorDir } : {}),
- ...(imageDir && existsSync(imageDir) ? { imageDir } : {}),
- };
- const stats = generateDesignTreeWithStats(file, treeOptions);
- await writeFile(resolve(outputDir, "design-tree.txt"), stats.tree, "utf-8");
- console.log(` design-tree.txt: ~${stats.estimatedTokens} tokens`);
-
- // 5. Assemble prompt
- if (options.prompt) {
- // Custom prompt: copy user's file
- const { readFile: rf } = await import("node:fs/promises");
- const customPrompt = await rf(resolve(options.prompt), "utf-8");
- await writeFile(resolve(outputDir, "PROMPT.md"), customPrompt, "utf-8");
- console.log(` PROMPT.md: custom (${options.prompt})`);
- } else {
- // Default: built-in HTML+CSS prompt
- const { readFile: rf } = await import("node:fs/promises");
- const { dirname: dirnameFn, resolve: resolveFn } = await import("node:path");
- const { fileURLToPath } = await import("node:url");
- const cliDir = dirnameFn(fileURLToPath(import.meta.url));
- const projectRoot = resolveFn(cliDir, "../..");
- const altRoot = resolveFn(cliDir, "..");
-
- let prompt = "";
- for (const root of [projectRoot, altRoot]) {
- const p = resolveFn(root, ".claude/skills/design-to-code/PROMPT.md");
- try {
- prompt = await rf(p, "utf-8");
- break;
- } catch { /* try next */ }
- }
-
- if (prompt) {
- await writeFile(resolve(outputDir, "PROMPT.md"), prompt, "utf-8");
- console.log(` PROMPT.md: default (html-css)`);
- } else {
- console.warn(" PROMPT.md: built-in prompt not found (skipped)");
- }
- }
-
- // Summary
- console.log(`\n${"=".repeat(50)}`);
- console.log(`Implementation package ready: ${outputDir}/`);
- console.log(` Grade: ${scores.overall.grade} (${scores.overall.percentage}%)`);
- console.log(` Issues: ${result.issues.length}`);
- console.log(` Design tree: ~${stats.estimatedTokens} tokens`);
- console.log(`${"=".repeat(50)}`);
- console.log(`\nNext: Feed design-tree.txt + PROMPT.md to your AI assistant.`);
- } catch (error) {
- console.error("\nError:", error instanceof Error ? error.message : String(error));
- process.exit(1);
- }
- });
-
-// ============================================
-// Visual compare command
-// ============================================
-
-interface VisualCompareOptions {
- figmaUrl: string;
- token?: string;
- output?: string;
- width?: number;
- height?: number;
-}
-
-cli
- .command(
- "visual-compare ",
- "Compare rendered code against Figma screenshot (pixel-level similarity)"
- )
- .option("--figma-url ", "Figma URL with node-id (required)")
- .option("--token ", "Figma API token (or use FIGMA_TOKEN env var)")
- .option("--output ", "Output directory for screenshots and diff (default: /tmp/canicode-visual-compare)")
- .option("--width ", "Logical viewport width in CSS px (default: infer from Figma PNG ÷ export scale)")
- .option("--height ", "Logical viewport height in CSS px (default: infer from Figma PNG ÷ export scale)")
- .option("--figma-scale ", "Figma export scale (default: 2, matches save-fixture / @2x PNGs)")
- .example(" canicode visual-compare ./generated/index.html --figma-url 'https://www.figma.com/design/ABC/File?node-id=1-234'")
- .action(async (codePath: string, options: VisualCompareOptions & { figmaScale?: string }) => {
- try {
- if (!options.figmaUrl) {
- console.error("Error: --figma-url is required");
- process.exit(1);
- }
-
- const token = options.token ?? getFigmaToken();
- if (!token) {
- console.error("Error: Figma token required. Use --token or set FIGMA_TOKEN env var.");
- process.exit(1);
- }
-
- const { visualCompare } = await import("../core/engine/visual-compare.js");
-
- const exportScale =
- options.figmaScale !== undefined ? Number(options.figmaScale) : undefined;
- if (exportScale !== undefined && (!Number.isFinite(exportScale) || exportScale < 1)) {
- console.error("Error: --figma-scale must be a number >= 1");
- process.exit(1);
- }
-
- // CAC passes option values as strings — coerce to numbers before validation
- const width = options.width !== undefined ? Number(options.width) : undefined;
- const height = options.height !== undefined ? Number(options.height) : undefined;
-
- if (width !== undefined && (!Number.isFinite(width) || width <= 0)) {
- console.error("Error: --width must be a positive number");
- process.exit(1);
- }
- if (height !== undefined && (!Number.isFinite(height) || height <= 0)) {
- console.error("Error: --height must be a positive number");
- process.exit(1);
- }
-
- const hasViewportOverride = width !== undefined || height !== undefined;
-
- console.log("Comparing...");
- const result = await visualCompare({
- figmaUrl: options.figmaUrl,
- figmaToken: token,
- codePath: resolve(codePath),
- outputDir: options.output,
- ...(exportScale !== undefined ? { figmaExportScale: exportScale } : {}),
- ...(hasViewportOverride
- ? {
- viewport: {
- ...(width !== undefined ? { width } : {}),
- ...(height !== undefined ? { height } : {}),
- },
- }
- : {}),
- });
-
- // JSON output for programmatic use
- console.log(JSON.stringify({
- similarity: result.similarity,
- diffPixels: result.diffPixels,
- totalPixels: result.totalPixels,
- width: result.width,
- height: result.height,
- figmaScreenshot: result.figmaScreenshotPath,
- codeScreenshot: result.codeScreenshotPath,
- diff: result.diffPath,
- }, null, 2));
-
- } catch (error) {
- console.error(
- "\nError:",
- error instanceof Error ? error.message : String(error)
- );
- process.exit(1);
- }
- });
-
-// ============================================
-// Setup command
-// ============================================
-
-interface InitOptions {
- token?: string;
- mcp?: boolean;
-}
-
-cli
- .command("init", "Set up canicode (Figma token or MCP)")
- .option("--token ", "Save Figma API token to ~/.canicode/")
- .option("--mcp", "Show Figma MCP setup instructions")
- .action((options: InitOptions) => {
- try {
- if (options.token) {
- initAiready(options.token);
-
- console.log(` Config saved: ${getConfigPath()}`);
- console.log(` Reports will be saved to: ${getReportsDir()}/`);
- console.log(`\n Next: canicode analyze "https://www.figma.com/design/..."`);
- return;
- }
-
- if (options.mcp) {
- console.log(`FIGMA MCP SETUP (for Claude Code)\n`);
- console.log(`1. Register the official Figma MCP server at project level:`);
- console.log(` claude mcp add -s project -t http figma https://mcp.figma.com/mcp\n`);
- console.log(` This creates .mcp.json in your project root.\n`);
- console.log(`2. Use the /canicode skill in Claude Code:`);
- console.log(` /canicode https://www.figma.com/design/.../MyDesign?node-id=1-234\n`);
- console.log(` The skill calls Figma MCP directly — no FIGMA_TOKEN needed.`);
- return;
- }
-
- // No flags: show setup guide
- console.log(`CANICODE SETUP\n`);
- console.log(`Choose your Figma data source:\n`);
- console.log(`Option 1: REST API (recommended for CI/automation)`);
- console.log(` canicode init --token YOUR_FIGMA_TOKEN`);
- console.log(` Get token: figma.com > Settings > Personal access tokens\n`);
- console.log(`Option 2: Figma MCP (recommended for Claude Code)`);
- console.log(` canicode init --mcp`);
- console.log(` Uses the /canicode skill in Claude Code with official Figma MCP\n`);
- console.log(`After setup:`);
- console.log(` canicode analyze "https://www.figma.com/design/..."`);
- } catch (error) {
- console.error(
- "\nError:",
- error instanceof Error ? error.message : String(error)
- );
- process.exit(1);
- }
- });
-
-// ============================================
-// Config command (telemetry opt-out)
-// ============================================
-
-interface ConfigOptions {
- telemetry?: boolean;
- noTelemetry?: boolean;
-}
-
-cli
- .command("config", "Manage canicode configuration")
- .option("--telemetry", "Enable anonymous telemetry")
- .option("--no-telemetry", "Disable anonymous telemetry")
- .action((options: ConfigOptions) => {
- try {
- if (options.noTelemetry === true) {
- setTelemetryEnabled(false);
- console.log("Telemetry disabled. No analytics data will be sent.");
- return;
- }
-
- if (options.telemetry === true) {
- setTelemetryEnabled(true);
- console.log("Telemetry enabled. Only anonymous usage events are tracked — no design data.");
- return;
- }
-
- // No flags: show current config
- const cfg = readConfig();
- console.log("CANICODE CONFIG\n");
- console.log(` Config path: ${getConfigPath()}`);
- console.log(` Figma token: ${cfg.figmaToken ? "set" : "not set"}`);
- console.log(` Telemetry: ${cfg.telemetry !== false ? "enabled" : "disabled"}`);
- console.log(`\nOptions:`);
- console.log(` canicode config --no-telemetry Opt out of anonymous telemetry`);
- console.log(` canicode config --telemetry Opt back in`);
- } catch (error) {
- console.error(
- "\nError:",
- error instanceof Error ? error.message : String(error)
- );
- process.exit(1);
- }
- });
-
-// ============================================
-// List rules command
-// ============================================
-
-interface ListRulesOptions {
- customRules?: string;
- config?: string;
- json?: boolean;
-}
-
-cli
- .command("list-rules", "List all analysis rules with scores and severity")
- .option("--custom-rules ", "Include custom rules from JSON file")
- .option("--config ", "Apply config overrides to show effective scores")
- .option("--json", "Output as JSON")
- .action(async (options: ListRulesOptions) => {
- try {
- let configs: Record = { ...RULE_CONFIGS };
-
- if (options.config) {
- const configFile = await loadConfigFile(options.config);
- configs = mergeConfigs(configs, configFile);
- }
-
- if (options.customRules) {
- const { rules, configs: customConfigs } = await loadCustomRules(options.customRules);
- for (const rule of rules) {
- ruleRegistry.register(rule);
- }
- configs = { ...configs, ...customConfigs };
- }
-
- const rules = ruleRegistry.getAll().map((rule) => {
- const config = configs[rule.definition.id as string];
- return {
- id: rule.definition.id,
- name: rule.definition.name,
- category: rule.definition.category,
- severity: config?.severity ?? "risk",
- score: config?.score ?? 0,
- enabled: config?.enabled ?? true,
- };
- });
-
- if (options.json) {
- console.log(JSON.stringify(rules, null, 2));
- return;
- }
-
- // Group by category
- const byCategory = new Map();
- for (const rule of rules) {
- const list = byCategory.get(rule.category) ?? [];
- list.push(rule);
- byCategory.set(rule.category, list);
- }
-
- for (const [category, catRules] of byCategory) {
- console.log(`\n ${category.toUpperCase()}`);
- for (const r of catRules) {
- const status = r.enabled ? "" : " (disabled)";
- const pad = " ".repeat(Math.max(0, 40 - r.id.length));
- console.log(` ${r.id}${pad} ${String(r.score).padStart(4)} ${r.severity}${status}`);
- }
- }
- console.log(`\n Total: ${rules.length} rules\n`);
- } catch (error) {
- console.error(
- "\nError:",
- error instanceof Error ? error.message : String(error)
- );
- process.exit(1);
- }
- });
-
-// ============================================
-// Prompt command
-// ============================================
-
-cli
- .command("prompt", "Output the standard design-to-code prompt for AI code generation")
- .action(async () => {
- try {
- const { readFile } = await import("node:fs/promises");
- const { dirname: dirnameFn, resolve: resolveFn } = await import("node:path");
- const { fileURLToPath } = await import("node:url");
- const __dirname = dirnameFn(fileURLToPath(import.meta.url));
- // Try from source location first, then npm-installed location
- const paths = [
- resolveFn(__dirname, "../../.claude/skills/design-to-code/PROMPT.md"),
- resolveFn(__dirname, "../.claude/skills/design-to-code/PROMPT.md"),
- ];
- for (const p of paths) {
- try {
- const content = await readFile(p, "utf-8");
- console.log(content);
- return;
- } catch { /* try next */ }
- }
- console.error("Prompt file not found");
- process.exit(1);
- } catch (error) {
- console.error("Error:", error instanceof Error ? error.message : String(error));
- process.exit(1);
- }
- });
+registerCalibrateAnalyze(cli);
+registerCalibrateEvaluate(cli);
+registerCalibrateGapReport(cli);
+registerCalibrateRun(cli);
+registerFixtureManagement(cli);
+registerEvidencePrune(cli);
// ============================================
// Documentation command
// ============================================
-
cli
.command("docs [topic]", "Show documentation (topics: setup, rules, config, visual-compare, design-tree)")
.action((topic?: string) => {