diff --git a/.claude-plugin/marketplace.json b/.claude-plugin/marketplace.json index c8f5818..08f9c33 100644 --- a/.claude-plugin/marketplace.json +++ b/.claude-plugin/marketplace.json @@ -6,7 +6,7 @@ }, "metadata": { "description": "Project management plugins with Gitea and NetBox integrations", - "version": "8.1.0" + "version": "9.0.0" }, "plugins": [ { @@ -277,6 +277,186 @@ ], "license": "MIT", "domain": "core" + }, + { + "name": "saas-api-platform", + "version": "0.1.0", + "description": "REST and GraphQL API scaffolding for FastAPI and Express projects", + "source": "./plugins/saas-api-platform", + "author": { + "name": "Leo Miranda", + "email": "leobmiranda@gmail.com" + }, + "homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/saas-api-platform/README.md", + "repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git", + "category": "development", + "tags": [ + "api", + "rest", + "graphql", + "fastapi", + "express", + "openapi" + ], + "license": "MIT", + "domain": "saas" + }, + { + "name": "saas-db-migrate", + "version": "0.1.0", + "description": "Database migration management for Alembic, Prisma, and raw SQL", + "source": "./plugins/saas-db-migrate", + "author": { + "name": "Leo Miranda", + "email": "leobmiranda@gmail.com" + }, + "homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/saas-db-migrate/README.md", + "repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git", + "category": "development", + "tags": [ + "database", + "migrations", + "alembic", + "prisma", + "sql", + "schema" + ], + "license": "MIT", + "domain": "saas" + }, + { + "name": "saas-react-platform", + "version": "0.1.0", + "description": "React frontend development toolkit for Next.js and Vite projects", + "source": "./plugins/saas-react-platform", + "author": { + "name": "Leo Miranda", + "email": "leobmiranda@gmail.com" + }, + "homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/saas-react-platform/README.md", + "repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git", + "category": "development", + "tags": [ + "react", + "nextjs", + "vite", + "typescript", + "frontend", + "components" + ], + "license": "MIT", + "domain": "saas" + }, + { + "name": "saas-test-pilot", + "version": "0.1.0", + "description": "Test automation toolkit for pytest, Jest, Vitest, and Playwright", + "source": "./plugins/saas-test-pilot", + "author": { + "name": "Leo Miranda", + "email": "leobmiranda@gmail.com" + }, + "homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/saas-test-pilot/README.md", + "repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git", + "category": "development", + "tags": [ + "testing", + "pytest", + "jest", + "vitest", + "playwright", + "coverage" + ], + "license": "MIT", + "domain": "saas" + }, + { + "name": "data-seed", + "version": "0.1.0", + "description": "Test data generation and database seeding with relationship-aware profiles", + "source": "./plugins/data-seed", + "author": { + "name": "Leo Miranda", + "email": "leobmiranda@gmail.com" + }, + "homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/data-seed/README.md", + "repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git", + "category": "data", + "tags": [ + "seed-data", + "test-data", + "faker", + "fixtures", + "database" + ], + "license": "MIT", + "domain": "data" + }, + { + "name": "ops-release-manager", + "version": "0.1.0", + "description": "Release management with semantic versioning, changelogs, and tag automation", + "source": "./plugins/ops-release-manager", + "author": { + "name": "Leo Miranda", + "email": "leobmiranda@gmail.com" + }, + "homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/ops-release-manager/README.md", + "repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git", + "category": "development", + "tags": [ + "release", + "semver", + "changelog", + "versioning", + "tags" + ], + "license": "MIT", + "domain": "ops" + }, + { + "name": "ops-deploy-pipeline", + "version": "0.1.0", + "description": "CI/CD deployment pipeline management for Docker Compose and systemd services", + "source": "./plugins/ops-deploy-pipeline", + "author": { + "name": "Leo Miranda", + "email": "leobmiranda@gmail.com" + }, + "homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/ops-deploy-pipeline/README.md", + "repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git", + "category": "infrastructure", + "tags": [ + "deploy", + "docker-compose", + "systemd", + "caddy", + "cicd" + ], + "license": "MIT", + "domain": "ops" + }, + { + "name": "debug-mcp", + "version": "0.1.0", + "description": "MCP server debugging, inspection, and development toolkit", + "source": "./plugins/debug-mcp", + "author": { + "name": "Leo Miranda", + "email": "leobmiranda@gmail.com" + }, + "homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/debug-mcp/README.md", + "repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git", + "category": "development", + "tags": [ + "mcp", + "debugging", + "diagnostics", + "server", + "development" + ], + "license": "MIT", + "domain": "debug" } ] } diff --git a/CHANGELOG.md b/CHANGELOG.md index 8befd4c..52ee265 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,55 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ## [Unreleased] +### Added + +- **Phase 3: 8 new plugin scaffolds** + - `saas-api-platform` (domain: saas) — REST/GraphQL API scaffolding for FastAPI and Express. 6 commands, 2 agents, 5 skills + - `saas-db-migrate` (domain: saas) — Database migration management for Alembic, Prisma, and raw SQL. 6 commands, 2 agents, 5 skills + - `saas-react-platform` (domain: saas) — React frontend toolkit for Next.js and Vite projects. 6 commands, 2 agents, 6 skills + - `saas-test-pilot` (domain: saas) — Test automation for pytest, Jest, Vitest, and Playwright. 6 commands, 2 agents, 6 skills + - `data-seed` (domain: data) — Test data generation and database seeding. 5 commands, 2 agents, 5 skills + - `ops-release-manager` (domain: ops) — Release management with SemVer, changelogs, and tag automation. 6 commands, 2 agents, 5 skills + - `ops-deploy-pipeline` (domain: ops) — CI/CD deployment pipeline for Docker Compose and systemd. 6 commands, 2 agents, 6 skills + - `debug-mcp` (domain: debug) — MCP server debugging, inspection, and development toolkit. 5 commands, 1 agent, 5 skills +- 8 design documents in `docs/designs/` for all new plugins + +--- + +## [9.0.0] - 2026-02-06 + +### BREAKING CHANGES + +#### Command Consolidation (v9.0.0) + +All commands renamed to `/ ` sub-command pattern. Every command across all 12 plugins now follows this convention. See [MIGRATION-v9.md](./docs/MIGRATION-v9.md) for the complete old-to-new mapping. + +**Key changes:** +- **projman:** `/sprint-plan` → `/sprint plan`, `/pm-setup` → `/projman setup`, `/pm-review` → `/sprint review`, `/pm-test` → `/sprint test`, `/labels-sync` → `/labels sync` +- **git-flow:** 8→5 commands. `/git-commit` → `/gitflow commit`. Three commit variants (`-push`, `-merge`, `-sync`) consolidated into `--push`/`--merge`/`--sync` flags. `/branch-start` → `/gitflow branch-start`, `/git-status` → `/gitflow status`, `/git-config` → `/gitflow config` +- **pr-review:** `/pr-review` → `/pr review`, `/project-init` → `/pr init`, `/project-sync` → `/pr sync` +- **clarity-assist:** `/clarify` → `/clarity clarify`, `/quick-clarify` → `/clarity quick-clarify` +- **doc-guardian:** `/doc-audit` → `/doc audit`, `/changelog-gen` → `/doc changelog-gen`, `/stale-docs` → `/doc stale-docs` +- **code-sentinel:** `/security-scan` → `/sentinel scan`, `/refactor` → `/sentinel refactor` +- **claude-config-maintainer:** `/config-analyze` → `/claude-config analyze` (all 8 commands prefixed) +- **contract-validator:** `/validate-contracts` → `/cv validate`, `/check-agent` → `/cv check-agent` +- **cmdb-assistant:** `/cmdb-search` → `/cmdb search`, `/change-audit` → `/cmdb change-audit`, `/ip-conflicts` → `/cmdb ip-conflicts` +- **data-platform:** `/data-ingest` → `/data ingest`, `/dbt-test` → `/data dbt-test`, `/lineage-viz` → `/data lineage-viz` +- **viz-platform:** `/accessibility-check` → `/viz accessibility-check`, `/design-gate` → `/viz design-gate`, `/design-review` → `/viz design-review` + +### Added + +- Dispatch files for all 12 plugins — each plugin now has a `.md` routing table listing all sub-commands +- `name:` frontmatter field added to all command files for sub-command resolution +- `docs/MIGRATION-v9.md` — Complete old-to-new command mapping for consumer migration +- `docs/COMMANDS-CHEATSHEET.md` — Full rewrite with v9.0.0 command names + +### Changed + +- All documentation updated with new command names: CLAUDE.md, README.md, CONFIGURATION.md, UPDATING.md, agent-workflow.spec.md, netbox/README.md +- All cross-plugin references updated (skills, agents, integration files) +- `marketplace.json` version bumped to 9.0.0 + --- ## [8.1.0] - 2026-02-06 diff --git a/CLAUDE.md b/CLAUDE.md index c635503..229ce16 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -128,25 +128,33 @@ These plugins exist in source but are **NOT relevant** to this project's workflo | **data-platform** | For data engineering projects (pandas, PostgreSQL, dbt) | | **viz-platform** | For dashboard projects (Dash, Plotly) | | **cmdb-assistant** | For infrastructure projects (NetBox) | +| **saas-api-platform** | For REST/GraphQL API projects (FastAPI, Express) | +| **saas-db-migrate** | For database migration projects (Alembic, Prisma) | +| **saas-react-platform** | For React frontend projects (Next.js, Vite) | +| **saas-test-pilot** | For test automation projects (pytest, Jest, Playwright) | +| **data-seed** | For test data generation and seeding | +| **ops-release-manager** | For release management workflows | +| **ops-deploy-pipeline** | For deployment pipeline management | +| **debug-mcp** | For MCP server debugging and development | -**Do NOT suggest** `/data-ingest`, `/data-profile`, `/viz-chart`, `/cmdb-*` commands - they don't apply here. +**Do NOT suggest** `/data ingest`, `/data profile`, `/viz chart`, `/cmdb *`, `/api *`, `/db-migrate *`, `/react *`, `/test *`, `/seed *`, `/release *`, `/deploy *`, `/debug-mcp *` commands - they don't apply here. ### Key Distinction | Context | Path | What To Do | |---------|------|------------| | **Editing plugin source** | `~/claude-plugins-work/plugins/` | Modify code, add features | -| **Using installed plugins** | `~/.claude/plugins/marketplaces/` | Run commands like `/sprint-plan` | +| **Using installed plugins** | `~/.claude/plugins/marketplaces/` | Run commands like `/sprint plan` | -When user says "run /sprint-plan", use the INSTALLED plugin. -When user says "fix the sprint-plan command", edit the SOURCE code. +When user says "run /sprint plan", use the INSTALLED plugin. +When user says "fix the sprint plan command", edit the SOURCE code. --- ## Project Overview **Repository:** leo-claude-mktplace -**Version:** 8.1.0 +**Version:** 9.0.0 **Status:** Production Ready A plugin marketplace for Claude Code containing: @@ -165,6 +173,14 @@ A plugin marketplace for Claude Code containing: | `viz-platform` | DMC validation, Plotly charts, and theming for dashboards | 1.1.0 | | `contract-validator` | Cross-plugin compatibility validation and agent verification | 1.1.0 | | `project-hygiene` | Project file organization and cleanup checks | 0.1.0 | +| `saas-api-platform` | REST/GraphQL API scaffolding for FastAPI and Express | 0.1.0 | +| `saas-db-migrate` | Database migration management for Alembic, Prisma, raw SQL | 0.1.0 | +| `saas-react-platform` | React frontend toolkit for Next.js and Vite | 0.1.0 | +| `saas-test-pilot` | Test automation for pytest, Jest, Vitest, Playwright | 0.1.0 | +| `data-seed` | Test data generation and database seeding | 0.1.0 | +| `ops-release-manager` | Release management with SemVer and changelog automation | 0.1.0 | +| `ops-deploy-pipeline` | Deployment pipeline for Docker Compose and systemd | 0.1.0 | +| `debug-mcp` | MCP server debugging and development toolkit | 0.1.0 | ## Quick Start @@ -180,16 +196,16 @@ A plugin marketplace for Claude Code containing: | Category | Commands | |----------|----------| -| **Setup** | `/pm-setup` (modes: `--full`, `--quick`, `--sync`) | -| **Sprint** | `/sprint-plan`, `/sprint-start`, `/sprint-status` (with `--diagram`), `/sprint-close` | -| **Quality** | `/pm-review`, `/pm-test` (modes: `run`, `gen`) | +| **Setup** | `/projman setup` (modes: `--full`, `--quick`, `--sync`) | +| **Sprint** | `/sprint plan`, `/sprint start`, `/sprint status` (with `--diagram`), `/sprint close` | +| **Quality** | `/sprint review`, `/sprint test` (modes: `run`, `gen`) | | **Project** | `/project initiation\|plan\|status\|close` | | **ADR** | `/adr create\|list\|update\|supersede` | -| **PR Review** | `/pr-review`, `/pr-summary`, `/pr-findings`, `/pr-diff` | -| **Docs** | `/doc-audit`, `/doc-sync`, `/changelog-gen`, `/doc-coverage`, `/stale-docs` | -| **Security** | `/security-scan`, `/refactor`, `/refactor-dry` | -| **Config** | `/config-analyze`, `/config-optimize`, `/config-diff`, `/config-lint` | -| **Validation** | `/validate-contracts`, `/check-agent`, `/list-interfaces`, `/dependency-graph`, `/cv status` | +| **PR Review** | `/pr review`, `/pr summary`, `/pr findings`, `/pr diff` | +| **Docs** | `/doc audit`, `/doc sync`, `/doc changelog-gen`, `/doc coverage`, `/doc stale-docs` | +| **Security** | `/sentinel scan`, `/sentinel refactor`, `/sentinel refactor-dry` | +| **Config** | `/claude-config analyze`, `/claude-config optimize`, `/claude-config diff`, `/claude-config lint` | +| **Validation** | `/cv validate`, `/cv check-agent`, `/cv list-interfaces`, `/cv dependency-graph`, `/cv status` | | **Maintenance** | `/hygiene check` | ### Plugin Commands - NOT RELEVANT to This Project @@ -198,9 +214,17 @@ These commands are being developed but don't apply to this project's workflow: | Category | Commands | For Projects Using | |----------|----------|-------------------| -| **Data** | `/data-ingest`, `/data-profile`, `/data-schema`, `/data-lineage`, `/dbt-test` | pandas, PostgreSQL, dbt | -| **Visualization** | `/viz-component`, `/viz-chart`, `/viz-dashboard`, `/viz-theme` | Dash, Plotly dashboards | -| **CMDB** | `/cmdb-search`, `/cmdb-device`, `/cmdb-sync` | NetBox infrastructure | +| **Data** | `/data ingest`, `/data profile`, `/data schema`, `/data lineage`, `/data dbt-test` | pandas, PostgreSQL, dbt | +| **Visualization** | `/viz component`, `/viz chart`, `/viz dashboard`, `/viz theme` | Dash, Plotly dashboards | +| **CMDB** | `/cmdb search`, `/cmdb device`, `/cmdb sync` | NetBox infrastructure | +| **API** | `/api scaffold`, `/api validate`, `/api docs`, `/api middleware` | FastAPI, Express | +| **DB Migrate** | `/db-migrate generate`, `/db-migrate validate`, `/db-migrate plan` | Alembic, Prisma | +| **React** | `/react component`, `/react route`, `/react state`, `/react hook` | Next.js, Vite | +| **Testing** | `/test generate`, `/test coverage`, `/test fixtures`, `/test e2e` | pytest, Jest, Playwright | +| **Seeding** | `/seed generate`, `/seed profile`, `/seed apply` | Faker, test data | +| **Release** | `/release prepare`, `/release validate`, `/release tag` | SemVer releases | +| **Deploy** | `/deploy generate`, `/deploy validate`, `/deploy check` | Docker Compose, systemd | +| **Debug MCP** | `/debug-mcp status`, `/debug-mcp test`, `/debug-mcp logs` | MCP server development | ## Repository Structure @@ -223,11 +247,11 @@ leo-claude-mktplace/ │ │ └── skills/ # 23 reusable skill files │ ├── git-flow/ # Git workflow automation │ │ ├── .claude-plugin/plugin.json -│ │ ├── commands/ # 8 commands +│ │ ├── commands/ # 5 commands │ │ └── agents/ │ ├── pr-review/ # Multi-agent PR review │ │ ├── .claude-plugin/plugin.json -│ │ ├── commands/ # 6 commands +│ │ ├── commands/ # 8 commands │ │ └── agents/ # 5 agents │ ├── clarity-assist/ # Prompt optimization │ │ ├── .claude-plugin/plugin.json @@ -363,8 +387,8 @@ Wiki-based Request for Comments system for tracking feature ideas from proposal **Lifecycle:** Draft → Review → Approved → Implementing → Implemented **Integration with Sprint Planning:** -- `/sprint-plan` detects approved RFCs and offers selection -- `/sprint-close` updates RFC status on completion +- `/sprint plan` detects approved RFCs and offers selection +- `/sprint close` updates RFC status on completion ## Label Taxonomy @@ -373,7 +397,7 @@ Wiki-based Request for Comments system for tracking feature ideas from proposal **Organization:** Agent/2, Complexity/3, Efforts/5, Priority/4, Risk/3, Source/4, Status/4, Type/6 **Repository:** Component/9, Tech/7, Domain/2, Epic/5, RnD/4 -Sync with `/labels-sync` command. +Sync with `/labels sync` command. ## Lessons Learned System @@ -411,8 +435,10 @@ Stored in Gitea Wiki under `lessons-learned/sprints/`. | Domain | Plugins | |--------|---------| | `core` | projman, git-flow, pr-review, code-sentinel, doc-guardian, clarity-assist, contract-validator, claude-config-maintainer, project-hygiene | -| `data` | data-platform, viz-platform | -| `ops` | cmdb-assistant | +| `data` | data-platform, viz-platform, data-seed | +| `saas` | saas-api-platform, saas-db-migrate, saas-react-platform, saas-test-pilot | +| `ops` | cmdb-assistant, ops-release-manager, ops-deploy-pipeline | +| `debug` | debug-mcp | ### Adding a Command to projman diff --git a/README.md b/README.md index d1c4ed7..7367845 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Leo Claude Marketplace - v8.1.0 +# Leo Claude Marketplace - v9.0.0 A collection of Claude Code plugins for project management, infrastructure automation, and development workflows. @@ -16,7 +16,7 @@ Use the launcher script to load only the plugins you need, reducing token overhe | `review` | pr-review, code-sentinel | Lightweight code review | | `data` | data-platform, viz-platform | Data engineering and visualization | | `infra` | cmdb-assistant | Infrastructure/CMDB management | -| `full` | All 12 plugins via marketplace.json | When you need everything | +| `full` | All 20 plugins via marketplace.json | When you need everything | **Examples:** ```bash @@ -45,7 +45,7 @@ AI-guided sprint planning with full Gitea integration. Transforms a proven 15-sp - Branch-aware security (development/staging/production) - Pre-sprint-close code quality review and test verification -**Commands:** `/sprint-plan`, `/sprint-start`, `/sprint-status`, `/sprint-close`, `/labels-sync`, `/pm-setup`, `/pm-review`, `/pm-test`, `/pm-debug`, `/suggest-version`, `/proposal-status`, `/rfc` +**Commands:** `/sprint plan`, `/sprint start`, `/sprint status`, `/sprint close`, `/labels sync`, `/projman setup`, `/sprint review`, `/sprint test`, `/projman debug`, `/projman suggest-version`, `/projman proposal-status`, `/rfc` #### [git-flow](./plugins/git-flow) **Git Workflow Automation** @@ -58,7 +58,7 @@ Smart git operations with intelligent commit messages and branch management. - Merge and cleanup automation - Protected branch awareness -**Commands:** `/git-commit`, `/git-commit-push`, `/git-commit-merge`, `/git-commit-sync`, `/branch-start`, `/branch-cleanup`, `/git-status`, `/git-config` +**Commands:** `/gitflow commit` (with `--push`, `--merge`, `--sync` flags), `/gitflow branch-start`, `/gitflow branch-cleanup`, `/gitflow status`, `/gitflow config` #### [pr-review](./plugins/pr-review) **Multi-Agent PR Review** @@ -70,14 +70,14 @@ Comprehensive pull request review using specialized agents. - Actionable feedback with suggested fixes - Gitea integration for automated review submission -**Commands:** `/pr-review`, `/pr-summary`, `/pr-findings`, `/pr-diff`, `/pr-setup`, `/project-init`, `/project-sync` +**Commands:** `/pr review`, `/pr summary`, `/pr findings`, `/pr diff`, `/pr setup`, `/pr init`, `/pr sync` #### [claude-config-maintainer](./plugins/claude-config-maintainer) **CLAUDE.md and Settings Optimization** Analyze, optimize, and create CLAUDE.md configuration files. Audit and optimize settings.local.json permissions. -**Commands:** `/analyze`, `/optimize`, `/init`, `/config-diff`, `/config-lint`, `/config-audit-settings`, `/config-optimize-settings`, `/config-permissions-map` +**Commands:** `/claude-config analyze`, `/claude-config optimize`, `/claude-config init`, `/claude-config diff`, `/claude-config lint`, `/claude-config audit-settings`, `/claude-config optimize-settings`, `/claude-config permissions-map` #### [contract-validator](./plugins/contract-validator) **Cross-Plugin Compatibility Validation** @@ -90,7 +90,7 @@ Validate plugin marketplaces for command conflicts, tool overlaps, and broken ag - Data flow validation for agent sequences - Markdown or JSON reports with actionable suggestions -**Commands:** `/validate-contracts`, `/check-agent`, `/list-interfaces`, `/dependency-graph`, `/cv-setup` +**Commands:** `/cv validate`, `/cv check-agent`, `/cv list-interfaces`, `/cv dependency-graph`, `/cv setup` ### Productivity @@ -103,14 +103,14 @@ Transform vague requests into clear specifications using structured methodology. - ND-friendly question patterns (option-based, chunked) - Conflict detection and escalation protocols -**Commands:** `/clarify`, `/quick-clarify` +**Commands:** `/clarity clarify`, `/clarity quick-clarify` #### [doc-guardian](./plugins/doc-guardian) **Documentation Lifecycle Management** Automatic documentation drift detection and synchronization. -**Commands:** `/doc-audit`, `/doc-sync`, `/changelog-gen`, `/doc-coverage`, `/stale-docs` +**Commands:** `/doc audit`, `/doc sync`, `/doc changelog-gen`, `/doc coverage`, `/doc stale-docs` #### [project-hygiene](./plugins/project-hygiene) **Post-Task Cleanup Automation** @@ -124,7 +124,7 @@ Hook-based cleanup that runs after Claude completes work. Security vulnerability detection and code refactoring tools. -**Commands:** `/security-scan`, `/refactor`, `/refactor-dry` +**Commands:** `/sentinel scan`, `/sentinel refactor`, `/sentinel refactor-dry` ### Infrastructure @@ -133,7 +133,7 @@ Security vulnerability detection and code refactoring tools. Full CRUD operations for network infrastructure management directly from Claude Code. -**Commands:** `/cmdb-setup`, `/cmdb-search`, `/cmdb-device`, `/cmdb-ip`, `/cmdb-site`, `/cmdb-audit`, `/cmdb-register`, `/cmdb-sync`, `/cmdb-topology`, `/change-audit`, `/ip-conflicts` +**Commands:** `/cmdb setup`, `/cmdb search`, `/cmdb device`, `/cmdb ip`, `/cmdb site`, `/cmdb audit`, `/cmdb register`, `/cmdb sync`, `/cmdb topology`, `/cmdb change-audit`, `/cmdb ip-conflicts` ### Data Engineering @@ -148,7 +148,7 @@ Comprehensive data engineering toolkit with persistent DataFrame storage. - 100k row limit with chunking support - Auto-detection of dbt projects -**Commands:** `/data-ingest`, `/data-profile`, `/data-schema`, `/data-explain`, `/data-lineage`, `/lineage-viz`, `/data-run`, `/dbt-test`, `/data-quality`, `/data-review`, `/data-gate`, `/data-setup` +**Commands:** `/data ingest`, `/data profile`, `/data schema`, `/data explain`, `/data lineage`, `/data lineage-viz`, `/data run`, `/data dbt-test`, `/data quality`, `/data review`, `/data gate`, `/data setup` ### Visualization @@ -164,7 +164,109 @@ Visualization toolkit with version-locked component validation and design token - 5 Page tools for multi-page app structure - Dual theme storage: user-level and project-level -**Commands:** `/viz-chart`, `/viz-chart-export`, `/viz-dashboard`, `/viz-theme`, `/viz-theme-new`, `/viz-theme-css`, `/viz-component`, `/accessibility-check`, `/viz-breakpoints`, `/design-review`, `/design-gate`, `/viz-setup` +**Commands:** `/viz chart`, `/viz chart-export`, `/viz dashboard`, `/viz theme`, `/viz theme-new`, `/viz theme-css`, `/viz component`, `/viz accessibility-check`, `/viz breakpoints`, `/viz design-review`, `/viz design-gate`, `/viz setup` + +#### [data-seed](./plugins/data-seed) +**Test Data Generation and Database Seeding** + +Relationship-aware test data generation with reusable seed profiles. + +- Schema inference from existing databases +- Faker-based data generation with locale support +- Foreign key relationship resolution +- Reusable seed profiles for consistent test environments + +**Commands:** `/seed setup`, `/seed generate`, `/seed profile`, `/seed validate`, `/seed apply` + +### SaaS Development + +#### [saas-api-platform](./plugins/saas-api-platform) +**REST and GraphQL API Scaffolding** + +API development toolkit for FastAPI and Express projects with OpenAPI integration. + +- Framework-aware scaffolding (FastAPI, Express) +- OpenAPI spec generation and validation +- Middleware catalog with authentication, CORS, rate limiting +- Route pattern enforcement and test generation + +**Commands:** `/api setup`, `/api scaffold`, `/api validate`, `/api docs`, `/api middleware`, `/api test-routes` + +#### [saas-db-migrate](./plugins/saas-db-migrate) +**Database Migration Management** + +Migration toolkit for Alembic, Prisma, and raw SQL with safety validation. + +- ORM/tool auto-detection +- Migration safety analysis (data loss, locks, rollback) +- Execution planning with rollback strategies +- Migration history tracking + +**Commands:** `/db-migrate setup`, `/db-migrate generate`, `/db-migrate validate`, `/db-migrate plan`, `/db-migrate history`, `/db-migrate rollback` + +#### [saas-react-platform](./plugins/saas-react-platform) +**React Frontend Development Toolkit** + +Component scaffolding, routing, and state management for Next.js and Vite projects. + +- Framework detection (Next.js App Router/Pages, Vite, CRA, Remix) +- TypeScript-first component generation with co-located tests +- State management pattern selection (Context, Zustand, Redux Toolkit) +- Anti-pattern detection and component tree analysis + +**Commands:** `/react setup`, `/react component`, `/react route`, `/react state`, `/react hook`, `/react lint` + +#### [saas-test-pilot](./plugins/saas-test-pilot) +**Test Automation Toolkit** + +Test generation and coverage analysis for pytest, Jest, Vitest, and Playwright. + +- Framework auto-detection and configuration +- Test case generation from code analysis +- Coverage gap detection with risk prioritization +- E2E test scenario generation from user stories + +**Commands:** `/test setup`, `/test generate`, `/test coverage`, `/test fixtures`, `/test e2e`, `/test run` + +### Operations + +#### [ops-release-manager](./plugins/ops-release-manager) +**Release Management Automation** + +Semantic versioning, changelog generation, and tag management. + +- Version location auto-detection across manifests +- Conventional commit-based bump suggestions +- Keep a Changelog format automation +- Release branch/tag creation and rollback + +**Commands:** `/release setup`, `/release prepare`, `/release validate`, `/release tag`, `/release rollback`, `/release status` + +#### [ops-deploy-pipeline](./plugins/ops-deploy-pipeline) +**Deployment Pipeline Management** + +CI/CD for Docker Compose and systemd-based services on self-hosted infrastructure. + +- Docker Compose configuration generation +- Caddy reverse proxy patterns +- Environment-specific config management +- Pre-deployment health checks and rollback planning + +**Commands:** `/deploy setup`, `/deploy generate`, `/deploy validate`, `/deploy env`, `/deploy check`, `/deploy rollback` + +### Debugging + +#### [debug-mcp](./plugins/debug-mcp) +**MCP Server Debugging Toolkit** + +Diagnostic tools for MCP server health, testing, and development. + +- MCP server health status dashboard +- Individual tool call testing +- Server log analysis with error pattern recognition +- MCP server scaffold generation + +**Commands:** `/debug-mcp status`, `/debug-mcp test`, `/debug-mcp logs`, `/debug-mcp inspect`, `/debug-mcp scaffold` ## Domain Advisory Pattern @@ -172,14 +274,14 @@ The marketplace supports cross-plugin domain advisory integration: - **Domain Detection**: projman automatically detects when issues involve specialized domains (frontend/viz, data engineering) - **Acceptance Criteria**: Domain-specific acceptance criteria are added to issues during planning -- **Execution Gates**: Domain validation gates (`/design-gate`, `/data-gate`) run before issue completion +- **Execution Gates**: Domain validation gates (`/viz design-gate`, `/data gate`) run before issue completion - **Extensible**: New domains can be added by creating advisory agents and gate commands **Current Domains:** | Domain | Plugin | Gate Command | |--------|--------|--------------| -| Visualization | viz-platform | `/design-gate` | -| Data | data-platform | `/data-gate` | +| Visualization | viz-platform | `/viz design-gate` | +| Data | data-platform | `/data gate` | ## MCP Servers @@ -280,7 +382,7 @@ Add to `.claude/settings.json` in your target project: After installing plugins, run the setup wizard: ``` -/pm-setup +/projman setup ``` The wizard handles everything: @@ -292,12 +394,12 @@ The wizard handles everything: **For new projects** (when system is already configured): ``` -/project-init +/pr init ``` **After moving a repository:** ``` -/project-sync +/pr sync ``` See [docs/CONFIGURATION.md](./docs/CONFIGURATION.md) for manual setup and advanced options. @@ -332,17 +434,17 @@ After installing plugins, the `/plugin` command may show `(no content)` - this i | Plugin | Test Command | |--------|--------------| -| git-flow | `/git-flow:git-status` | +| git-flow | `/git-flow:gitflow-status` | | projman | `/projman:sprint-status` | | pr-review | `/pr-review:pr-summary` | -| clarity-assist | `/clarity-assist:clarify` | +| clarity-assist | `/clarity-assist:clarity-clarify` | | doc-guardian | `/doc-guardian:doc-audit` | -| code-sentinel | `/code-sentinel:security-scan` | -| claude-config-maintainer | `/claude-config-maintainer:analyze` | +| code-sentinel | `/code-sentinel:sentinel-scan` | +| claude-config-maintainer | `/claude-config-maintainer:claude-config-analyze` | | cmdb-assistant | `/cmdb-assistant:cmdb-search` | | data-platform | `/data-platform:data-ingest` | | viz-platform | `/viz-platform:viz-chart` | -| contract-validator | `/contract-validator:validate-contracts` | +| contract-validator | `/contract-validator:cv-validate` | ## Repository Structure diff --git a/docs/COMMANDS-CHEATSHEET.md b/docs/COMMANDS-CHEATSHEET.md index 716f7ec..9a476c6 100644 --- a/docs/COMMANDS-CHEATSHEET.md +++ b/docs/COMMANDS-CHEATSHEET.md @@ -1,6 +1,8 @@ # Plugin Commands Cheat Sheet -Quick reference for all commands in the Leo Claude Marketplace. +Quick reference for all commands in the Leo Claude Marketplace (v9.0.0+). + +All commands follow the `/ ` sub-command pattern. --- @@ -8,107 +10,104 @@ Quick reference for all commands in the Leo Claude Marketplace. | Plugin | Command | Auto | Manual | Description | |--------|---------|:----:|:------:|-------------| -| **projman** | `/sprint-plan` | | X | Start sprint planning with AI-guided architecture analysis and issue creation | -| **projman** | `/sprint-start` | | X | Begin sprint execution with dependency analysis and parallel task coordination (requires approval or `--force`) | -| **projman** | `/sprint-status` | | X | Check current sprint progress (add `--diagram` for Mermaid visualization) | -| **projman** | `/pm-review` | | X | Pre-sprint-close code quality review (debug artifacts, security, error handling) | -| **projman** | `/pm-test` | | X | Run tests (`/pm-test run`) or generate tests (`/pm-test gen `) | -| **projman** | `/sprint-close` | | X | Complete sprint and capture lessons learned to Gitea Wiki | -| **projman** | `/labels-sync` | | X | Synchronize label taxonomy from Gitea | -| **projman** | `/pm-setup` | | X | Auto-detect mode or use `--full`, `--quick`, `--sync`, `--clear-cache` | -| **projman** | `/rfc` | | X | RFC lifecycle management (`/rfc create\|list\|review\|approve\|reject`) | -| **projman** | `/project initiation` | | X | Source analysis + project charter creation | -| **projman** | `/project plan` | | X | WBS, risk register, and sprint roadmap | -| **projman** | `/project status` | | X | Full project hierarchy status view | -| **projman** | `/project close` | | X | Retrospective, lessons learned, and archive | -| **projman** | `/adr create` | | X | Create Architecture Decision Record in wiki | -| **projman** | `/adr list` | | X | List ADRs by status (accepted, proposed, deprecated) | -| **projman** | `/adr update` | | X | Update ADR content or transition status | -| **projman** | `/adr supersede` | | X | Supersede an ADR with a new one | -| **git-flow** | `/git-commit` | | X | Create commit with auto-generated conventional message | -| **git-flow** | `/git-commit-push` | | X | Commit and push to remote in one operation | -| **git-flow** | `/git-commit-merge` | | X | Commit current changes, then merge into target branch | -| **git-flow** | `/git-commit-sync` | | X | Full sync: commit, push, and sync with upstream/base branch | -| **git-flow** | `/branch-start` | | X | Create new feature/fix/chore branch with naming conventions | -| **git-flow** | `/branch-cleanup` | | X | Remove merged branches locally and optionally on remote | -| **git-flow** | `/git-status` | | X | Enhanced git status with recommendations | -| **git-flow** | `/git-config` | | X | Configure git-flow settings for the project | -| **pr-review** | `/pr-setup` | | X | Setup wizard for pr-review (shares Gitea MCP with projman) | -| **pr-review** | `/project-init` | | X | Quick project setup for PR reviews | -| **pr-review** | `/project-sync` | | X | Sync config with git remote after repo move/rename | -| **pr-review** | `/pr-review` | | X | Full multi-agent PR review with confidence scoring | -| **pr-review** | `/pr-summary` | | X | Quick summary of PR changes | -| **pr-review** | `/pr-findings` | | X | List and filter review findings by category/severity | -| **pr-review** | `/pr-diff` | | X | Formatted diff with inline review comments and annotations | -| **clarity-assist** | `/clarify` | | X | Full 4-D prompt optimization with ND accommodations | -| **clarity-assist** | `/quick-clarify` | | X | Rapid single-pass clarification for simple requests | -| **doc-guardian** | `/doc-audit` | | X | Full documentation audit - scans for doc drift | -| **doc-guardian** | `/doc-sync` | | X | Synchronize pending documentation updates | -| **doc-guardian** | `/changelog-gen` | | X | Generate changelog from conventional commits | -| **doc-guardian** | `/doc-coverage` | | X | Documentation coverage metrics by function/class | -| **doc-guardian** | `/stale-docs` | | X | Flag documentation behind code changes | -| **code-sentinel** | `/security-scan` | | X | Full security audit (SQL injection, XSS, secrets, etc.) | -| **code-sentinel** | `/refactor` | | X | Apply refactoring patterns to improve code | -| **code-sentinel** | `/refactor-dry` | | X | Preview refactoring without applying changes | +| **projman** | `/sprint plan` | | X | Start sprint planning with AI-guided architecture analysis and issue creation | +| **projman** | `/sprint start` | | X | Begin sprint execution with dependency analysis and parallel task coordination (requires approval or `--force`) | +| **projman** | `/sprint status` | | X | Check current sprint progress (add `--diagram` for Mermaid visualization) | +| **projman** | `/sprint review` | | X | Pre-sprint-close code quality review (debug artifacts, security, error handling) | +| **projman** | `/sprint test` | | X | Run tests (`/sprint test run`) or generate tests (`/sprint test gen `) | +| **projman** | `/sprint close` | | X | Complete sprint and capture lessons learned to Gitea Wiki | +| **projman** | `/labels sync` | | X | Synchronize label taxonomy from Gitea | +| **projman** | `/projman setup` | | X | Auto-detect mode or use `--full`, `--quick`, `--sync`, `--clear-cache` | +| **projman** | `/rfc create\|list\|review\|approve\|reject` | | X | RFC lifecycle management | +| **projman** | `/project initiation\|plan\|status\|close` | | X | Project lifecycle management | +| **projman** | `/adr create\|list\|update\|supersede` | | X | Architecture Decision Records | +| **git-flow** | `/gitflow commit` | | X | Create commit with auto-generated conventional message. Flags: `--push`, `--merge`, `--sync` | +| **git-flow** | `/gitflow branch-start` | | X | Create new feature/fix/chore branch with naming conventions | +| **git-flow** | `/gitflow branch-cleanup` | | X | Remove merged branches locally and optionally on remote | +| **git-flow** | `/gitflow status` | | X | Enhanced git status with recommendations | +| **git-flow** | `/gitflow config` | | X | Configure git-flow settings for the project | +| **pr-review** | `/pr setup` | | X | Setup wizard for pr-review (shares Gitea MCP with projman) | +| **pr-review** | `/pr init` | | X | Quick project setup for PR reviews | +| **pr-review** | `/pr sync` | | X | Sync config with git remote after repo move/rename | +| **pr-review** | `/pr review` | | X | Full multi-agent PR review with confidence scoring | +| **pr-review** | `/pr summary` | | X | Quick summary of PR changes | +| **pr-review** | `/pr findings` | | X | List and filter review findings by category/severity | +| **pr-review** | `/pr diff` | | X | Formatted diff with inline review comments and annotations | +| **clarity-assist** | `/clarity clarify` | | X | Full 4-D prompt optimization with ND accommodations | +| **clarity-assist** | `/clarity quick-clarify` | | X | Rapid single-pass clarification for simple requests | +| **doc-guardian** | `/doc audit` | | X | Full documentation audit - scans for doc drift | +| **doc-guardian** | `/doc sync` | | X | Synchronize pending documentation updates | +| **doc-guardian** | `/doc changelog-gen` | | X | Generate changelog from conventional commits | +| **doc-guardian** | `/doc coverage` | | X | Documentation coverage metrics by function/class | +| **doc-guardian** | `/doc stale-docs` | | X | Flag documentation behind code changes | +| **code-sentinel** | `/sentinel scan` | | X | Full security audit (SQL injection, XSS, secrets, etc.) | +| **code-sentinel** | `/sentinel refactor` | | X | Apply refactoring patterns to improve code | +| **code-sentinel** | `/sentinel refactor-dry` | | X | Preview refactoring without applying changes | | **code-sentinel** | *PreToolUse hook* | X | | Scans code before writing; blocks critical issues | -| **claude-config-maintainer** | `/config-analyze` | | X | Analyze CLAUDE.md for optimization opportunities | -| **claude-config-maintainer** | `/config-optimize` | | X | Optimize CLAUDE.md structure with preview/backup | -| **claude-config-maintainer** | `/config-init` | | X | Initialize new CLAUDE.md for a project | -| **claude-config-maintainer** | `/config-diff` | | X | Track CLAUDE.md changes over time with behavioral impact | -| **claude-config-maintainer** | `/config-lint` | | X | Lint CLAUDE.md for anti-patterns and best practices | -| **claude-config-maintainer** | `/config-audit-settings` | | X | Audit settings.local.json permissions (100-point score) | -| **claude-config-maintainer** | `/config-optimize-settings` | | X | Optimize permissions (profiles, consolidation, dry-run) | -| **claude-config-maintainer** | `/config-permissions-map` | | X | Visual review layer + permission coverage map | -| **cmdb-assistant** | `/cmdb-setup` | | X | Setup wizard for NetBox MCP server | -| **cmdb-assistant** | `/cmdb-search` | | X | Search NetBox for devices, IPs, sites | -| **cmdb-assistant** | `/cmdb-device` | | X | Manage network devices (create, view, update, delete) | -| **cmdb-assistant** | `/cmdb-ip` | | X | Manage IP addresses and prefixes | -| **cmdb-assistant** | `/cmdb-site` | | X | Manage sites, locations, racks, and regions | -| **cmdb-assistant** | `/cmdb-audit` | | X | Data quality analysis (VMs, devices, naming, roles) | -| **cmdb-assistant** | `/cmdb-register` | | X | Register current machine into NetBox with running apps | -| **cmdb-assistant** | `/cmdb-sync` | | X | Sync machine state with NetBox (detect drift, update) | -| **cmdb-assistant** | `/cmdb-topology` | | X | Infrastructure topology diagrams (rack, network, site views) | -| **cmdb-assistant** | `/change-audit` | | X | NetBox audit trail queries with filtering | -| **cmdb-assistant** | `/ip-conflicts` | | X | Detect IP conflicts and overlapping prefixes | +| **claude-config-maintainer** | `/claude-config analyze` | | X | Analyze CLAUDE.md for optimization opportunities | +| **claude-config-maintainer** | `/claude-config optimize` | | X | Optimize CLAUDE.md structure with preview/backup | +| **claude-config-maintainer** | `/claude-config init` | | X | Initialize new CLAUDE.md for a project | +| **claude-config-maintainer** | `/claude-config diff` | | X | Track CLAUDE.md changes over time with behavioral impact | +| **claude-config-maintainer** | `/claude-config lint` | | X | Lint CLAUDE.md for anti-patterns and best practices | +| **claude-config-maintainer** | `/claude-config audit-settings` | | X | Audit settings.local.json permissions (100-point score) | +| **claude-config-maintainer** | `/claude-config optimize-settings` | | X | Optimize permissions (profiles, consolidation, dry-run) | +| **claude-config-maintainer** | `/claude-config permissions-map` | | X | Visual review layer + permission coverage map | +| **cmdb-assistant** | `/cmdb setup` | | X | Setup wizard for NetBox MCP server | +| **cmdb-assistant** | `/cmdb search` | | X | Search NetBox for devices, IPs, sites | +| **cmdb-assistant** | `/cmdb device` | | X | Manage network devices (create, view, update, delete) | +| **cmdb-assistant** | `/cmdb ip` | | X | Manage IP addresses and prefixes | +| **cmdb-assistant** | `/cmdb site` | | X | Manage sites, locations, racks, and regions | +| **cmdb-assistant** | `/cmdb audit` | | X | Data quality analysis (VMs, devices, naming, roles) | +| **cmdb-assistant** | `/cmdb register` | | X | Register current machine into NetBox with running apps | +| **cmdb-assistant** | `/cmdb sync` | | X | Sync machine state with NetBox (detect drift, update) | +| **cmdb-assistant** | `/cmdb topology` | | X | Infrastructure topology diagrams (rack, network, site views) | +| **cmdb-assistant** | `/cmdb change-audit` | | X | NetBox audit trail queries with filtering | +| **cmdb-assistant** | `/cmdb ip-conflicts` | | X | Detect IP conflicts and overlapping prefixes | | **project-hygiene** | `/hygiene check` | | X | Project file organization and cleanup check | -| **data-platform** | `/data-ingest` | | X | Load data from CSV, Parquet, JSON into DataFrame | -| **data-platform** | `/data-profile` | | X | Generate data profiling report with statistics | -| **data-platform** | `/data-schema` | | X | Explore database schemas, tables, columns | -| **data-platform** | `/data-explain` | | X | Explain query execution plan | -| **data-platform** | `/data-lineage` | | X | Show dbt model lineage and dependencies | -| **data-platform** | `/data-run` | | X | Run dbt models with validation | -| **data-platform** | `/lineage-viz` | | X | dbt lineage visualization as Mermaid diagrams | -| **data-platform** | `/dbt-test` | | X | Formatted dbt test runner with summary and failure details | -| **data-platform** | `/data-quality` | | X | DataFrame quality checks (nulls, duplicates, types, outliers) | -| **data-platform** | `/data-setup` | | X | Setup wizard for data-platform MCP servers | -| **viz-platform** | `/viz-setup` | | X | Setup wizard for viz-platform MCP server | -| **viz-platform** | `/viz-chart` | | X | Create Plotly charts with theme integration | -| **viz-platform** | `/viz-dashboard` | | X | Create dashboard layouts with filters and grids | -| **viz-platform** | `/viz-theme` | | X | Apply existing theme to visualizations | -| **viz-platform** | `/viz-theme-new` | | X | Create new custom theme with design tokens | -| **viz-platform** | `/viz-theme-css` | | X | Export theme as CSS custom properties | -| **viz-platform** | `/viz-component` | | X | Inspect DMC component props and validation | -| **viz-platform** | `/viz-chart-export` | | X | Export charts to PNG, SVG, PDF via kaleido | -| **viz-platform** | `/accessibility-check` | | X | Color blind validation (WCAG contrast ratios) | -| **viz-platform** | `/viz-breakpoints` | | X | Configure responsive layout breakpoints | -| **viz-platform** | `/design-review` | | X | Detailed design system audits | -| **viz-platform** | `/design-gate` | | X | Binary pass/fail design system validation gates | -| **data-platform** | `/data-review` | | X | Comprehensive data integrity audits | -| **data-platform** | `/data-gate` | | X | Binary pass/fail data integrity gates | -| **contract-validator** | `/validate-contracts` | | X | Full marketplace compatibility validation | -| **contract-validator** | `/check-agent` | | X | Validate single agent definition | -| **contract-validator** | `/list-interfaces` | | X | Show all plugin interfaces | -| **contract-validator** | `/dependency-graph` | | X | Mermaid visualization of plugin dependencies | -| **contract-validator** | `/cv-setup` | | X | Setup wizard for contract-validator MCP | +| **data-platform** | `/data ingest` | | X | Load data from CSV, Parquet, JSON into DataFrame | +| **data-platform** | `/data profile` | | X | Generate data profiling report with statistics | +| **data-platform** | `/data schema` | | X | Explore database schemas, tables, columns | +| **data-platform** | `/data explain` | | X | Explain query execution plan | +| **data-platform** | `/data lineage` | | X | Show dbt model lineage and dependencies | +| **data-platform** | `/data run` | | X | Run dbt models with validation | +| **data-platform** | `/data lineage-viz` | | X | dbt lineage visualization as Mermaid diagrams | +| **data-platform** | `/data dbt-test` | | X | Formatted dbt test runner with summary and failure details | +| **data-platform** | `/data quality` | | X | DataFrame quality checks (nulls, duplicates, types, outliers) | +| **data-platform** | `/data review` | | X | Comprehensive data integrity audits | +| **data-platform** | `/data gate` | | X | Binary pass/fail data integrity gates | +| **data-platform** | `/data setup` | | X | Setup wizard for data-platform MCP servers | +| **viz-platform** | `/viz setup` | | X | Setup wizard for viz-platform MCP server | +| **viz-platform** | `/viz chart` | | X | Create Plotly charts with theme integration | +| **viz-platform** | `/viz chart-export` | | X | Export charts to PNG, SVG, PDF via kaleido | +| **viz-platform** | `/viz dashboard` | | X | Create dashboard layouts with filters and grids | +| **viz-platform** | `/viz theme` | | X | Apply existing theme to visualizations | +| **viz-platform** | `/viz theme-new` | | X | Create new custom theme with design tokens | +| **viz-platform** | `/viz theme-css` | | X | Export theme as CSS custom properties | +| **viz-platform** | `/viz component` | | X | Inspect DMC component props and validation | +| **viz-platform** | `/viz accessibility-check` | | X | Color blind validation (WCAG contrast ratios) | +| **viz-platform** | `/viz breakpoints` | | X | Configure responsive layout breakpoints | +| **viz-platform** | `/viz design-review` | | X | Detailed design system audits | +| **viz-platform** | `/viz design-gate` | | X | Binary pass/fail design system validation gates | +| **contract-validator** | `/cv validate` | | X | Full marketplace compatibility validation | +| **contract-validator** | `/cv check-agent` | | X | Validate single agent definition | +| **contract-validator** | `/cv list-interfaces` | | X | Show all plugin interfaces | +| **contract-validator** | `/cv dependency-graph` | | X | Mermaid visualization of plugin dependencies | +| **contract-validator** | `/cv setup` | | X | Setup wizard for contract-validator MCP | | **contract-validator** | `/cv status` | | X | Marketplace-wide health check (installation, MCP, configuration) | --- +## Migration from v8.x + +All commands were renamed in v9.0.0 to follow `/ ` pattern. See [MIGRATION-v9.md](./MIGRATION-v9.md) for the complete old-to-new mapping. + +--- + ## Plugins by Category | Category | Plugins | Primary Use | |----------|---------|-------------| -| **Setup** | projman, pr-review, cmdb-assistant, data-platform, viz-platform, contract-validator | `/pm-setup`, `/pr-setup`, `/cmdb-setup`, `/data-setup`, `/viz-setup`, `/cv-setup` | +| **Setup** | projman, pr-review, cmdb-assistant, data-platform, viz-platform, contract-validator | `/projman setup`, `/pr setup`, `/cmdb setup`, `/data setup`, `/viz setup`, `/cv setup` | | **Task Planning** | projman, clarity-assist | Sprint management, requirement clarification | | **Code Quality** | code-sentinel, pr-review | Security scanning, PR reviews | | **Documentation** | doc-guardian, claude-config-maintainer | Doc sync, CLAUDE.md maintenance | @@ -139,15 +138,15 @@ Quick reference for all commands in the Leo Claude Marketplace. Full workflow from idea to implementation using RFCs: ``` -1. /clarify # Clarify the feature idea -2. /rfc create # Create RFC from clarified spec +1. /clarity clarify # Clarify the feature idea +2. /rfc create # Create RFC from clarified spec ... refine RFC content ... -3. /rfc review 0001 # Submit RFC for review +3. /rfc review 0001 # Submit RFC for review ... review discussion ... -4. /rfc approve 0001 # Approve RFC for implementation -5. /sprint-plan # Select approved RFC for sprint +4. /rfc approve 0001 # Approve RFC for implementation +5. /sprint plan # Select approved RFC for sprint ... implement feature ... -6. /sprint-close # Complete sprint, RFC marked Implemented +6. /sprint close # Complete sprint, RFC marked Implemented ``` ### Example 1: Starting a New Feature Sprint @@ -155,17 +154,17 @@ Full workflow from idea to implementation using RFCs: A typical workflow for planning and executing a feature sprint: ``` -1. /clarify # Clarify requirements if vague -2. /sprint-plan # Plan the sprint with architecture analysis -3. /labels-sync # Ensure labels are up-to-date -4. /sprint-start # Begin execution with dependency ordering -5. /branch-start feat/... # Create feature branch +1. /clarity clarify # Clarify requirements if vague +2. /sprint plan # Plan the sprint with architecture analysis +3. /labels sync # Ensure labels are up-to-date +4. /sprint start # Begin execution with dependency ordering +5. /gitflow branch-start feat/... # Create feature branch ... implement features ... -6. /git-commit # Commit with conventional message -7. /sprint-status --diagram # Check progress with visualization -8. /pm-review # Pre-close quality review -9. /pm-test run # Verify test coverage -10. /sprint-close # Capture lessons learned +6. /gitflow commit # Commit with conventional message +7. /sprint status --diagram # Check progress with visualization +8. /sprint review # Pre-close quality review +9. /sprint test run # Verify test coverage +10. /sprint close # Capture lessons learned ``` ### Example 2: Daily Development Cycle @@ -173,12 +172,12 @@ A typical workflow for planning and executing a feature sprint: Quick daily workflow with git-flow: ``` -1. /git-status # Check current state -2. /branch-start fix/... # Start bugfix branch +1. /gitflow status # Check current state +2. /gitflow branch-start fix/... # Start bugfix branch ... make changes ... -3. /git-commit # Auto-generate commit message -4. /git-commit-push # Push to remote -5. /branch-cleanup # Clean merged branches +3. /gitflow commit # Auto-generate commit message +4. /gitflow commit --push # Commit and push to remote +5. /gitflow branch-cleanup # Clean merged branches ``` ### Example 3: Pull Request Review Workflow @@ -186,10 +185,10 @@ Quick daily workflow with git-flow: Reviewing a PR before merge: ``` -1. /pr-summary # Quick overview of changes -2. /pr-review # Full multi-agent review -3. /pr-findings # Filter findings by severity -4. /security-scan # Deep security audit if needed +1. /pr summary # Quick overview of changes +2. /pr review # Full multi-agent review +3. /pr findings # Filter findings by severity +4. /sentinel scan # Deep security audit if needed ``` ### Example 4: Documentation Maintenance @@ -197,10 +196,10 @@ Reviewing a PR before merge: Keeping docs in sync: ``` -1. /doc-audit # Scan for documentation drift -2. /doc-sync # Apply pending updates -3. /config-analyze # Check CLAUDE.md health -4. /config-optimize # Optimize if needed +1. /doc audit # Scan for documentation drift +2. /doc sync # Apply pending updates +3. /claude-config analyze # Check CLAUDE.md health +4. /claude-config optimize # Optimize if needed ``` ### Example 5: Code Refactoring Session @@ -208,11 +207,11 @@ Keeping docs in sync: Safe refactoring with preview: ``` -1. /refactor-dry # Preview opportunities -2. /security-scan # Baseline security check -3. /refactor # Apply improvements -4. /pm-test run # Verify nothing broke -5. /git-commit # Commit with descriptive message +1. /sentinel refactor-dry # Preview opportunities +2. /sentinel scan # Baseline security check +3. /sentinel refactor # Apply improvements +4. /sprint test run # Verify nothing broke +5. /gitflow commit # Commit with descriptive message ``` ### Example 6: Infrastructure Documentation @@ -220,10 +219,10 @@ Safe refactoring with preview: Managing infrastructure with CMDB: ``` -1. /cmdb-search "server" # Find existing devices -2. /cmdb-device view X # Check device details -3. /cmdb-ip list # List available IPs -4. /cmdb-site view Y # Check site info +1. /cmdb search "server" # Find existing devices +2. /cmdb device view X # Check device details +3. /cmdb ip list # List available IPs +4. /cmdb site view Y # Check site info ``` ### Example 6b: Data Engineering Workflow @@ -231,12 +230,12 @@ Managing infrastructure with CMDB: Working with data pipelines: ``` -1. /data-ingest file.csv # Load data into DataFrame -2. /data-profile # Generate data profiling report -3. /data-schema # Explore database schemas -4. /data-lineage model_name # View dbt model dependencies -5. /data-run model_name # Execute dbt models -6. /data-explain "SELECT ..." # Analyze query execution plan +1. /data ingest file.csv # Load data into DataFrame +2. /data profile # Generate data profiling report +3. /data schema # Explore database schemas +4. /data lineage model_name # View dbt model dependencies +5. /data run model_name # Execute dbt models +6. /data explain "SELECT ..." # Analyze query execution plan ``` ### Example 7: First-Time Setup (New Machine) @@ -244,13 +243,13 @@ Working with data pipelines: Setting up the marketplace for the first time: ``` -1. /pm-setup --full # Full setup: MCP + system config + project +1. /projman setup --full # Full setup: MCP + system config + project # → Follow prompts for Gitea URL, org # → Add token manually when prompted # → Confirm repository name 2. # Restart Claude Code session -3. /labels-sync # Sync Gitea labels -4. /sprint-plan # Plan first sprint +3. /labels sync # Sync Gitea labels +4. /sprint plan # Plan first sprint ``` ### Example 8: New Project Setup (System Already Configured) @@ -258,11 +257,11 @@ Setting up the marketplace for the first time: Adding a new project when system config exists: ``` -1. /pm-setup --quick # Quick project setup (auto-detected) +1. /projman setup --quick # Quick project setup (auto-detected) # → Confirms detected repo name # → Creates .env -2. /labels-sync # Sync Gitea labels -3. /sprint-plan # Plan first sprint +2. /labels sync # Sync Gitea labels +3. /sprint plan # Plan first sprint ``` --- @@ -270,10 +269,11 @@ Adding a new project when system config exists: ## Quick Tips - **Hooks run automatically** - code-sentinel and git-flow protect you without manual invocation -- **Use `/git-commit` over `git commit`** - generates better commit messages following conventions -- **Run `/pm-review` before `/sprint-close`** - catches issues before closing the sprint -- **Use `/clarify` for vague requests** - especially helpful for complex requirements -- **`/refactor-dry` is safe** - always preview before applying refactoring changes +- **Use `/gitflow commit` over `git commit`** - generates better commit messages following conventions +- **Run `/sprint review` before `/sprint close`** - catches issues before closing the sprint +- **Use `/clarity clarify` for vague requests** - especially helpful for complex requirements +- **`/sentinel refactor-dry` is safe** - always preview before applying refactoring changes +- **`/gitflow commit --push`** replaces the old `/git-commit-push` - fewer commands to remember --- diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index a838496..5eb27cd 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -9,7 +9,7 @@ Centralized configuration documentation for all plugins and MCP servers in the L **After installing the marketplace and plugins via Claude Code:** ``` -/pm-setup +/projman setup ``` The interactive wizard auto-detects what's needed and handles everything except manually adding your API tokens. @@ -25,8 +25,8 @@ The interactive wizard auto-detects what's needed and handles everything except └─────────────────────────────────────────────────────────────────────────────┘ │ ▼ - /pm-setup --full - (or /pm-setup auto-detects) + /projman setup --full + (or /projman setup auto-detects) │ ┌──────────────────────────────┼──────────────────────────────┐ ▼ ▼ ▼ @@ -79,7 +79,7 @@ The interactive wizard auto-detects what's needed and handles everything except │ ┌───────────────┴───────────────┐ ▼ ▼ - /pm-setup --quick /pm-setup + /projman setup --quick /projman setup (explicit mode) (auto-detects mode) │ │ │ ┌──────────┴──────────┐ @@ -109,7 +109,7 @@ The interactive wizard auto-detects what's needed and handles everything except ## What Runs Automatically vs User Interaction -### `/pm-setup --full` - Full Setup +### `/projman setup --full` - Full Setup | Phase | Type | What Happens | |-------|------|--------------| @@ -121,7 +121,7 @@ The interactive wizard auto-detects what's needed and handles everything except | **6. Project Config** | Automated | Creates `.env` file, checks `.gitignore` | | **7. Validation** | Automated | Tests API connectivity, shows summary | -### `/pm-setup --quick` - Quick Project Setup +### `/projman setup --quick` - Quick Project Setup | Phase | Type | What Happens | |-------|------|--------------| @@ -136,10 +136,10 @@ The interactive wizard auto-detects what's needed and handles everything except | Mode | When to Use | What It Does | |------|-------------|--------------| -| `/pm-setup` | Any time | Auto-detects: runs full, quick, or sync as needed | -| `/pm-setup --full` | First time on a machine | Full setup: MCP server + system config + project config | -| `/pm-setup --quick` | Starting a new project | Quick setup: project config only (assumes system is ready) | -| `/pm-setup --sync` | After repo move/rename | Updates .env to match current git remote | +| `/projman setup` | Any time | Auto-detects: runs full, quick, or sync as needed | +| `/projman setup --full` | First time on a machine | Full setup: MCP server + system config + project config | +| `/projman setup --quick` | Starting a new project | Quick setup: project config only (assumes system is ready) | +| `/projman setup --sync` | After repo move/rename | Updates .env to match current git remote | **Auto-detection logic:** 1. No system config → **full** mode @@ -148,9 +148,9 @@ The interactive wizard auto-detects what's needed and handles everything except 4. Both exist, match → already configured, offer to reconfigure **Typical workflow:** -1. Install plugin → run `/pm-setup` (auto-runs full mode) -2. Start new project → run `/pm-setup` (auto-runs quick mode) -3. Repository moved? → run `/pm-setup` (auto-runs sync mode) +1. Install plugin → run `/projman setup` (auto-runs full mode) +2. Start new project → run `/projman setup` (auto-runs quick mode) +3. Repository moved? → run `/projman setup` (auto-runs sync mode) --- @@ -182,7 +182,7 @@ This marketplace uses a **hybrid configuration** approach: **Benefits:** - Single token per service (update once, use everywhere) -- Easy multi-project setup (just run `/pm-setup` in each project) +- Easy multi-project setup (just run `/projman setup` in each project) - Security (tokens never committed to git, never typed into AI chat) - Project isolation (each project can override defaults) @@ -190,7 +190,7 @@ This marketplace uses a **hybrid configuration** approach: ## Prerequisites -Before running `/pm-setup`: +Before running `/projman setup`: 1. **Python 3.10+** installed ```bash @@ -213,7 +213,7 @@ Before running `/pm-setup`: Run the setup wizard in Claude Code: ``` -/pm-setup +/projman setup ``` The wizard will guide you through each step interactively and auto-detect the appropriate mode. @@ -387,18 +387,18 @@ PR_REVIEW_AUTO_SUBMIT=false | Plugin | System Config | Project Config | Setup Command | |--------|---------------|----------------|---------------| -| **projman** | gitea.env | .env (GITEA_REPO=owner/repo) | `/pm-setup` | -| **pr-review** | gitea.env | .env (GITEA_REPO=owner/repo) | `/pr-setup` | +| **projman** | gitea.env | .env (GITEA_REPO=owner/repo) | `/projman setup` | +| **pr-review** | gitea.env | .env (GITEA_REPO=owner/repo) | `/pr setup` | | **git-flow** | git-flow.env (optional) | .env (optional) | None needed | | **clarity-assist** | None | None | None needed | -| **cmdb-assistant** | netbox.env | None | `/cmdb-setup` | -| **data-platform** | postgres.env | .env (optional) | `/data-setup` | -| **viz-platform** | None | .env (optional DMC_VERSION) | `/viz-setup` | +| **cmdb-assistant** | netbox.env | None | `/cmdb setup` | +| **data-platform** | postgres.env | .env (optional) | `/data setup` | +| **viz-platform** | None | .env (optional DMC_VERSION) | `/viz setup` | | **doc-guardian** | None | None | None needed | | **code-sentinel** | None | None | None needed | | **project-hygiene** | None | None | None needed | | **claude-config-maintainer** | None | None | None needed | -| **contract-validator** | None | None | `/cv-setup` | +| **contract-validator** | None | None | `/cv setup` | --- @@ -408,7 +408,7 @@ Once system-level config is set up, adding new projects is simple: ``` cd ~/projects/new-project -/pm-setup +/projman setup ``` The command auto-detects that system config exists and runs quick project setup. @@ -631,7 +631,7 @@ For agents with 8+ skills, use **phase-based loading** in the agent body text. T ### API Validation -When running `/pm-setup`, the command: +When running `/projman setup`, the command: 1. **Detects** organization and repository from git remote URL 2. **Validates** via Gitea API: `GET /api/v1/repos/{org}/{repo}` @@ -646,7 +646,7 @@ When you start a Claude Code session, a hook automatically: 1. Reads `GITEA_REPO` (in `owner/repo` format) from `.env` 2. Compares with current `git remote get-url origin` -3. **Warns** if mismatch detected: "Repository location mismatch. Run `/pm-setup --sync` to update." +3. **Warns** if mismatch detected: "Repository location mismatch. Run `/projman setup --sync` to update." This helps when you: - Move a repository to a different organization @@ -668,7 +668,7 @@ curl -H "Authorization: token $GITEA_API_TOKEN" "$GITEA_API_URL/user" In Claude Code, after restarting your session: ``` -/labels-sync +/labels sync ``` If this works, your setup is complete. @@ -741,7 +741,7 @@ cat .env 3. **Never type tokens into AI chat** - Always edit config files directly in your editor - - The `/pm-setup` wizard respects this + - The `/projman setup` wizard respects this 4. **Rotate tokens periodically** - Every 6-12 months diff --git a/docs/MIGRATION-v9.md b/docs/MIGRATION-v9.md new file mode 100644 index 0000000..a285685 --- /dev/null +++ b/docs/MIGRATION-v9.md @@ -0,0 +1,215 @@ +# Migration Guide: v8.x → v9.0.0 + +## Overview + +v9.0.0 standardizes all commands to the `/ ` sub-command pattern. Every command in the marketplace now follows this convention. + +**Breaking change:** All old command names are removed. Update your workflows, scripts, and CLAUDE.md references. + +--- + +## Complete Command Mapping + +### projman + +| Old (v8.x) | New (v9.0.0) | Notes | +|-------------|--------------|-------| +| `/sprint-plan` | `/sprint plan` | | +| `/sprint-start` | `/sprint start` | | +| `/sprint-status` | `/sprint status` | | +| `/sprint-close` | `/sprint close` | | +| `/pm-review` | `/sprint review` | Moved under `/sprint` | +| `/pm-test` | `/sprint test` | Moved under `/sprint` | +| `/pm-setup` | `/projman setup` | Moved under `/projman` | +| `/pm-debug` | `/projman debug` | Moved under `/projman` | +| `/labels-sync` | `/labels sync` | | +| `/suggest-version` | `/projman suggest-version` | Moved under `/projman` | +| `/proposal-status` | `/projman proposal-status` | Moved under `/projman` | +| `/rfc ` | `/rfc ` | Unchanged | +| `/project ` | `/project ` | Unchanged | +| `/adr ` | `/adr ` | Unchanged | + +### git-flow + +| Old (v8.x) | New (v9.0.0) | Notes | +|-------------|--------------|-------| +| `/git-commit` | `/gitflow commit` | | +| `/git-commit-push` | `/gitflow commit --push` | **Consolidated** into flag | +| `/git-commit-merge` | `/gitflow commit --merge` | **Consolidated** into flag | +| `/git-commit-sync` | `/gitflow commit --sync` | **Consolidated** into flag | +| `/branch-start` | `/gitflow branch-start` | | +| `/branch-cleanup` | `/gitflow branch-cleanup` | | +| `/git-status` | `/gitflow status` | | +| `/git-config` | `/gitflow config` | | + +**Note:** The three commit variants (`-push`, `-merge`, `-sync`) are now flags on `/gitflow commit`. This reduces 8 commands to 5. + +### pr-review + +| Old (v8.x) | New (v9.0.0) | Notes | +|-------------|--------------|-------| +| `/pr-review` | `/pr review` | | +| `/pr-summary` | `/pr summary` | | +| `/pr-findings` | `/pr findings` | | +| `/pr-diff` | `/pr diff` | | +| `/pr-setup` | `/pr setup` | | +| `/project-init` | `/pr init` | Renamed | +| `/project-sync` | `/pr sync` | Renamed | + +### clarity-assist + +| Old (v8.x) | New (v9.0.0) | Notes | +|-------------|--------------|-------| +| `/clarify` | `/clarity clarify` | | +| `/quick-clarify` | `/clarity quick-clarify` | | + +### doc-guardian + +| Old (v8.x) | New (v9.0.0) | Notes | +|-------------|--------------|-------| +| `/doc-audit` | `/doc audit` | | +| `/doc-sync` | `/doc sync` | | +| `/changelog-gen` | `/doc changelog-gen` | Moved under `/doc` | +| `/doc-coverage` | `/doc coverage` | | +| `/stale-docs` | `/doc stale-docs` | Moved under `/doc` | + +### code-sentinel + +| Old (v8.x) | New (v9.0.0) | Notes | +|-------------|--------------|-------| +| `/security-scan` | `/sentinel scan` | | +| `/refactor` | `/sentinel refactor` | | +| `/refactor-dry` | `/sentinel refactor-dry` | | + +### claude-config-maintainer + +| Old (v8.x) | New (v9.0.0) | Notes | +|-------------|--------------|-------| +| `/config-analyze` (or `/analyze`) | `/claude-config analyze` | | +| `/config-optimize` (or `/optimize`) | `/claude-config optimize` | | +| `/config-init` (or `/init`) | `/claude-config init` | | +| `/config-diff` | `/claude-config diff` | | +| `/config-lint` | `/claude-config lint` | | +| `/config-audit-settings` | `/claude-config audit-settings` | | +| `/config-optimize-settings` | `/claude-config optimize-settings` | | +| `/config-permissions-map` | `/claude-config permissions-map` | | + +### contract-validator + +| Old (v8.x) | New (v9.0.0) | Notes | +|-------------|--------------|-------| +| `/validate-contracts` | `/cv validate` | | +| `/check-agent` | `/cv check-agent` | | +| `/list-interfaces` | `/cv list-interfaces` | | +| `/dependency-graph` | `/cv dependency-graph` | | +| `/cv-setup` | `/cv setup` | | +| `/cv status` | `/cv status` | Unchanged | + +### cmdb-assistant + +| Old (v8.x) | New (v9.0.0) | Notes | +|-------------|--------------|-------| +| `/cmdb-setup` | `/cmdb setup` | | +| `/cmdb-search` | `/cmdb search` | | +| `/cmdb-device` | `/cmdb device` | | +| `/cmdb-ip` | `/cmdb ip` | | +| `/cmdb-site` | `/cmdb site` | | +| `/cmdb-audit` | `/cmdb audit` | | +| `/cmdb-register` | `/cmdb register` | | +| `/cmdb-sync` | `/cmdb sync` | | +| `/cmdb-topology` | `/cmdb topology` | | +| `/change-audit` | `/cmdb change-audit` | Moved under `/cmdb` | +| `/ip-conflicts` | `/cmdb ip-conflicts` | Moved under `/cmdb` | + +### data-platform + +| Old (v8.x) | New (v9.0.0) | Notes | +|-------------|--------------|-------| +| `/data-ingest` | `/data ingest` | | +| `/data-profile` | `/data profile` | | +| `/data-schema` | `/data schema` | | +| `/data-explain` | `/data explain` | | +| `/data-lineage` | `/data lineage` | | +| `/data-run` | `/data run` | | +| `/lineage-viz` | `/data lineage-viz` | Moved under `/data` | +| `/dbt-test` | `/data dbt-test` | Moved under `/data` | +| `/data-quality` | `/data quality` | | +| `/data-review` | `/data review` | | +| `/data-gate` | `/data gate` | | +| `/data-setup` | `/data setup` | | + +### viz-platform + +| Old (v8.x) | New (v9.0.0) | Notes | +|-------------|--------------|-------| +| `/viz-setup` | `/viz setup` | | +| `/viz-chart` | `/viz chart` | | +| `/viz-chart-export` | `/viz chart-export` | | +| `/viz-dashboard` | `/viz dashboard` | | +| `/viz-theme` | `/viz theme` | | +| `/viz-theme-new` | `/viz theme-new` | | +| `/viz-theme-css` | `/viz theme-css` | | +| `/viz-component` | `/viz component` | | +| `/accessibility-check` | `/viz accessibility-check` | Moved under `/viz` | +| `/viz-breakpoints` | `/viz breakpoints` | | +| `/design-review` | `/viz design-review` | Moved under `/viz` | +| `/design-gate` | `/viz design-gate` | Moved under `/viz` | + +### project-hygiene + +No changes — already used `/ ` pattern. + +| Command | Status | +|---------|--------| +| `/hygiene check` | Unchanged | + +--- + +## Verifying Plugin Installation (v9.0.0) + +Test commands use the new format: + +| Plugin | Test Command | +|--------|--------------| +| git-flow | `/git-flow:gitflow-status` | +| projman | `/projman:sprint-status` | +| pr-review | `/pr-review:pr-summary` | +| clarity-assist | `/clarity-assist:clarity-clarify` | +| doc-guardian | `/doc-guardian:doc-audit` | +| code-sentinel | `/code-sentinel:sentinel-scan` | +| claude-config-maintainer | `/claude-config-maintainer:claude-config-analyze` | +| cmdb-assistant | `/cmdb-assistant:cmdb-search` | +| data-platform | `/data-platform:data-ingest` | +| viz-platform | `/viz-platform:viz-chart` | +| contract-validator | `/contract-validator:cv-validate` | + +--- + +## CLAUDE.md Updates + +If your project's CLAUDE.md references old command names, update them: + +**Find old references:** +```bash +grep -rn '/sprint-plan\|/pm-setup\|/git-commit\|/pr-review\|/security-scan\|/config-analyze\|/validate-contracts\|/cmdb-search\|/data-ingest\|/viz-chart\b\|/clarify\b\|/doc-audit' CLAUDE.md +``` + +**Key patterns to search and replace:** +- `/sprint-plan` → `/sprint plan` +- `/pm-setup` → `/projman setup` +- `/pm-review` → `/sprint review` +- `/git-commit` → `/gitflow commit` +- `/pr-review` → `/pr review` +- `/security-scan` → `/sentinel scan` +- `/refactor` → `/sentinel refactor` +- `/config-analyze` → `/claude-config analyze` +- `/validate-contracts` → `/cv validate` +- `/clarify` → `/clarity clarify` +- `/doc-audit` → `/doc audit` +- `/cmdb-search` → `/cmdb search` +- `/data-ingest` → `/data ingest` +- `/viz-chart` → `/viz chart` + +--- + +*Last Updated: 2026-02-06* diff --git a/docs/UPDATING.md b/docs/UPDATING.md index 1375b72..39d8b46 100644 --- a/docs/UPDATING.md +++ b/docs/UPDATING.md @@ -48,7 +48,7 @@ cd ~/.claude/plugins/marketplaces/leo-claude-mktplace && ./scripts/setup.sh ### When to Re-run Setup -You typically **don't need** to re-run setup after updates. However, re-run your plugin's setup command (e.g., `/pm-setup`, `/pr-setup`, `/cmdb-setup`) if: +You typically **don't need** to re-run setup after updates. However, re-run your plugin's setup command (e.g., `/projman setup`, `/pr setup`, `/cmdb setup`) if: - Changelog mentions **new required environment variables** - Changelog mentions **breaking changes** to configuration @@ -59,7 +59,7 @@ You typically **don't need** to re-run setup after updates. However, re-run your If an update requires new project-level configuration: ``` -/project-init +/pr init ``` This will detect existing settings and only add what's missing. @@ -98,8 +98,8 @@ When updating, review if changes affect the setup workflow: 1. **Check for setup command changes:** ```bash git diff HEAD~1 plugins/*/commands/*-setup.md - git diff HEAD~1 plugins/*/commands/project-init.md - git diff HEAD~1 plugins/*/commands/project-sync.md + git diff HEAD~1 plugins/*/commands/pr-init.md + git diff HEAD~1 plugins/*/commands/pr-sync.md ``` 2. **Check for hook changes:** @@ -114,7 +114,7 @@ When updating, review if changes affect the setup workflow: **If setup commands changed:** - Review what's new (new validation steps, new prompts, etc.) -- Consider re-running your plugin's setup command or `/project-init` to benefit from improvements +- Consider re-running your plugin's setup command or `/pr init` to benefit from improvements - Existing configurations remain valid unless changelog notes breaking changes **If hooks changed:** @@ -123,7 +123,7 @@ When updating, review if changes affect the setup workflow: **If configuration structure changed:** - Check if new variables are required -- Run `/project-sync` if repository detection logic improved +- Run `/pr sync` if repository detection logic improved --- @@ -142,7 +142,7 @@ deactivate ### Configuration no longer works 1. Check CHANGELOG.md for breaking changes -2. Run your plugin's setup command (e.g., `/pm-setup`) to re-validate and fix configuration +2. Run your plugin's setup command (e.g., `/projman setup`) to re-validate and fix configuration 3. Compare your config files with documentation in `docs/CONFIGURATION.md` ### MCP server won't start after update diff --git a/docs/architecture/agent-workflow.spec.md b/docs/architecture/agent-workflow.spec.md index a1db283..1fc54c9 100644 --- a/docs/architecture/agent-workflow.spec.md +++ b/docs/architecture/agent-workflow.spec.md @@ -27,7 +27,7 @@ | ID | Label | Type | Lane | Sequence | |----|-------|------|------|----------| -| p1-start | /sprint-plan | rounded-rect | user-lane | 1 | +| p1-start | /sprint plan | rounded-rect | user-lane | 1 | | p1-activate | Planner Activates | rectangle | planner-lane | 2 | | p1-search-lessons | Search Lessons Learned | rectangle | planner-lane | 3 | | p1-gitea-wiki-query | Query Past Lessons (Wiki) | rectangle | gitea-lane | 4 | @@ -61,7 +61,7 @@ | ID | Label | Type | Lane | Sequence | |----|-------|------|------|----------| -| p2-start | /sprint-start | rounded-rect | user-lane | 11 | +| p2-start | /sprint start | rounded-rect | user-lane | 11 | | p2-orch-activate | Orchestrator Activates | rectangle | orchestrator-lane | 12 | | p2-fetch-issues | Fetch Sprint Issues | rectangle | orchestrator-lane | 13 | | p2-gitea-list | List Open Issues | rectangle | gitea-lane | 14 | @@ -128,7 +128,7 @@ | ID | Label | Type | Lane | Sequence | |----|-------|------|------|----------| -| p3-start | /sprint-close | rounded-rect | user-lane | 31 | +| p3-start | /sprint close | rounded-rect | user-lane | 31 | | p3-orch-activate | Orchestrator Activates | rectangle | orchestrator-lane | 32 | | p3-review | Review Sprint | rectangle | orchestrator-lane | 33 | | p3-gitea-status | Get Final Status | rectangle | gitea-lane | 34 | diff --git a/docs/designs/data-seed.md b/docs/designs/data-seed.md new file mode 100644 index 0000000..e15439c --- /dev/null +++ b/docs/designs/data-seed.md @@ -0,0 +1,70 @@ +# Design: data-seed + +**Domain:** `data` +**Target Version:** v9.3.0 + +## Purpose + +Test data generation and database seeding. Generates realistic fake data based on schema definitions, supports reproducible seeds, and manages seed files for development and testing environments. + +## Target Users + +- Developers needing test data for local development +- QA teams requiring reproducible datasets +- Projects with complex relational data models + +## Commands + +| Command | Description | +|---------|-------------| +| `/seed setup` | Setup wizard — detect schema source, configure output paths | +| `/seed generate` | Generate seed data from schema or model definitions | +| `/seed apply` | Apply seed data to database or create fixture files | +| `/seed profile` | Define reusable data profiles (small, medium, large, edge-cases) | +| `/seed validate` | Validate seed data against schema constraints and foreign keys | + +## Agent Architecture + +| Agent | Model | Mode | Role | +|-------|-------|------|------| +| `seed-generator` | sonnet | acceptEdits | Data generation, profile management | +| `seed-validator` | haiku | plan | Read-only validation of seed data integrity | + +## Skills + +| Skill | Purpose | +|-------|---------| +| `schema-inference` | Infer data types and constraints from models/migrations | +| `faker-patterns` | Realistic data generation patterns (names, emails, addresses, etc.) | +| `relationship-resolution` | Foreign key and relationship-aware data generation | +| `profile-management` | Seed profile definitions and sizing | +| `visual-header` | Standard command output headers | + +## MCP Server + +**Not required.** Seed data is generated as files (JSON, SQL, CSV). Database insertion is handled by the application's own tooling. + +## Integration Points + +| Plugin | Integration | +|--------|-------------| +| saas-db-migrate | Schema models used as seed generation input | +| data-platform | Generated data can be loaded via `/data ingest` | +| saas-test-pilot | Seed data used in integration test fixtures | +| projman | Issue labels: `Component/Data`, `Tech/Faker` | + +## Token Budget + +| Component | Estimated Tokens | +|-----------|-----------------| +| `claude-md-integration.md` | ~500 | +| Dispatch file (`seed.md`) | ~200 | +| 5 commands (avg) | ~3,000 | +| 2 agents | ~1,000 | +| 5 skills | ~2,000 | +| **Total** | **~6,700** | + +## Open Questions + +- Should we support database-specific seed formats (pg_dump, mysqldump)? +- Integration with Faker library or custom generation? diff --git a/docs/designs/debug-mcp.md b/docs/designs/debug-mcp.md new file mode 100644 index 0000000..78538e1 --- /dev/null +++ b/docs/designs/debug-mcp.md @@ -0,0 +1,70 @@ +# Design: debug-mcp + +**Domain:** `debug` +**Target Version:** v9.8.0 + +## Purpose + +MCP server debugging and development toolkit. Provides tools for inspecting MCP server health, testing tool calls, viewing server logs, and developing new MCP servers. Essential for marketplace developers building or troubleshooting MCP integrations. + +## Target Users + +- Plugin developers building MCP servers +- Users troubleshooting MCP connectivity issues +- Marketplace maintainers validating MCP configurations + +## Commands + +| Command | Description | +|---------|-------------| +| `/debug-mcp status` | Show all MCP servers: running/failed, tool count, last error | +| `/debug-mcp test` | Test a specific MCP tool call with sample input | +| `/debug-mcp logs` | View recent MCP server stderr/stdout logs | +| `/debug-mcp inspect` | Inspect MCP server config (.mcp.json entry, venv, dependencies) | +| `/debug-mcp scaffold` | Generate MCP server skeleton (Python, stdio transport) | + +## Agent Architecture + +| Agent | Model | Mode | Role | +|-------|-------|------|------| +| `mcp-debugger` | sonnet | default | Server inspection, log analysis, scaffold generation | + +Single agent is sufficient — this plugin is primarily diagnostic with one generative command. + +## Skills + +| Skill | Purpose | +|-------|---------| +| `mcp-protocol` | MCP stdio protocol, tool/resource/prompt schemas | +| `server-patterns` | Python MCP server patterns (FastMCP, raw protocol) | +| `venv-diagnostics` | Virtual environment health checks, dependency validation | +| `log-analysis` | MCP server error pattern recognition | +| `visual-header` | Standard command output headers | + +## MCP Server + +**Not required.** This plugin inspects other MCP servers via file system (reading .mcp.json, checking venvs, reading logs). It does not need its own MCP server. + +## Integration Points + +| Plugin | Integration | +|--------|-------------| +| contract-validator | `/cv status` delegates to debug-mcp for detailed MCP diagnostics | +| projman | `/projman setup` can invoke `/debug-mcp status` for post-setup verification | +| All plugins with MCP | Debug-mcp can diagnose any MCP server in the marketplace | + +## Token Budget + +| Component | Estimated Tokens | +|-----------|-----------------| +| `claude-md-integration.md` | ~500 | +| Dispatch file (`debug-mcp.md`) | ~200 | +| 5 commands (avg) | ~3,000 | +| 1 agent | ~600 | +| 5 skills | ~2,000 | +| **Total** | **~6,300** | + +## Open Questions + +- Should this plugin have a hook that auto-runs on MCP failure (SessionStart)? +- Should `/debug-mcp scaffold` generate both Python and TypeScript templates? diff --git a/docs/designs/ops-deploy-pipeline.md b/docs/designs/ops-deploy-pipeline.md new file mode 100644 index 0000000..09c7d8a --- /dev/null +++ b/docs/designs/ops-deploy-pipeline.md @@ -0,0 +1,72 @@ +# Design: ops-deploy-pipeline + +**Domain:** `ops` +**Target Version:** v9.7.0 + +## Purpose + +CI/CD deployment pipeline management for Docker Compose and systemd-based services. Generates deployment configurations, validates pipeline definitions, and manages environment-specific settings. Tailored for self-hosted infrastructure (not cloud-native). + +## Target Users + +- Self-hosted service operators (Raspberry Pi, VPS, bare-metal) +- Teams deploying via Docker Compose +- Projects needing environment-specific configuration management + +## Commands + +| Command | Description | +|---------|-------------| +| `/deploy setup` | Setup wizard — detect deployment method, configure targets | +| `/deploy generate` | Generate docker-compose.yml, Caddyfile entries, systemd units | +| `/deploy validate` | Validate deployment configs (ports, volumes, networks, env vars) | +| `/deploy env` | Manage environment-specific config files (.env.production, etc.) | +| `/deploy check` | Pre-deployment health check (disk, memory, port conflicts) | +| `/deploy rollback` | Generate rollback plan for a deployment | + +## Agent Architecture + +| Agent | Model | Mode | Role | +|-------|-------|------|------| +| `deploy-planner` | sonnet | default | Configuration generation, rollback planning | +| `deploy-validator` | haiku | plan | Read-only validation of configs and pre-flight checks | + +## Skills + +| Skill | Purpose | +|-------|---------| +| `compose-patterns` | Docker Compose best practices, multi-service patterns | +| `caddy-conventions` | Caddyfile reverse proxy patterns, subdomain routing | +| `env-management` | Environment variable management across environments | +| `health-checks` | Pre-deployment system health validation | +| `rollback-patterns` | Deployment rollback strategies | +| `visual-header` | Standard command output headers | + +## MCP Server + +**Not required initially.** Could add SSH-based remote execution MCP server in the future for remote deployment. + +## Integration Points + +| Plugin | Integration | +|--------|-------------| +| cmdb-assistant | Deployment targets pulled from NetBox device inventory | +| ops-release-manager | Release tags trigger deployment preparation | +| projman | Issue labels: `Component/Infra`, `Tech/Docker`, `Tech/Caddy` | +| code-sentinel | Security scan of deployment configs (exposed ports, secrets in env) | + +## Token Budget + +| Component | Estimated Tokens | +|-----------|-----------------| +| `claude-md-integration.md` | ~700 | +| Dispatch file (`deploy.md`) | ~200 | +| 6 commands (avg) | ~3,600 | +| 2 agents | ~1,200 | +| 6 skills | ~2,500 | +| **Total** | **~8,200** | + +## Open Questions + +- Should this support Kubernetes/Helm for users who need it? +- SSH-based remote execution via MCP server for actual deployments? diff --git a/docs/designs/ops-release-manager.md b/docs/designs/ops-release-manager.md new file mode 100644 index 0000000..c2db0ef --- /dev/null +++ b/docs/designs/ops-release-manager.md @@ -0,0 +1,71 @@ +# Design: ops-release-manager + +**Domain:** `ops` +**Target Version:** v9.6.0 + +## Purpose + +Release management automation including semantic versioning, changelog generation, release branch creation, and tag management. Coordinates the release process across git, changelogs, and package manifests. + +## Target Users + +- Project maintainers managing releases +- Teams following SemVer and conventional commits +- Projects with multiple version locations to keep in sync + +## Commands + +| Command | Description | +|---------|-------------| +| `/release setup` | Setup wizard — detect version locations, configure release flow | +| `/release prepare` | Prepare release: bump versions, update changelog, create branch | +| `/release validate` | Pre-release checks (clean tree, tests pass, changelog has content) | +| `/release tag` | Create and push git tag with release notes | +| `/release rollback` | Revert a release (delete tag, revert version bump commit) | +| `/release status` | Show current version, unreleased changes, next version suggestion | + +## Agent Architecture + +| Agent | Model | Mode | Role | +|-------|-------|------|------| +| `release-coordinator` | sonnet | acceptEdits | Version bumping, changelog updates, branch/tag creation | +| `release-validator` | haiku | plan | Pre-release validation, dependency checks | + +## Skills + +| Skill | Purpose | +|-------|---------| +| `version-detection` | Find version locations (package.json, pyproject.toml, marketplace.json, etc.) | +| `semver-rules` | SemVer bump logic based on conventional commits | +| `changelog-conventions` | Keep a Changelog format, unreleased section management | +| `release-workflow` | Branch-based vs tag-based release patterns | +| `visual-header` | Standard command output headers | + +## MCP Server + +**Not required.** All operations are git and file-based. + +## Integration Points + +| Plugin | Integration | +|--------|-------------| +| git-flow | `/release prepare` uses gitflow conventions for branch creation | +| doc-guardian | `/release validate` checks documentation is up to date | +| projman | Sprint close can trigger `/release prepare` for sprint-based releases | +| ops-deploy-pipeline | Release tags trigger deployment pipeline | + +## Token Budget + +| Component | Estimated Tokens | +|-----------|-----------------| +| `claude-md-integration.md` | ~600 | +| Dispatch file (`release.md`) | ~200 | +| 6 commands (avg) | ~3,600 | +| 2 agents | ~1,200 | +| 5 skills | ~2,000 | +| **Total** | **~7,600** | + +## Open Questions + +- Should this subsume the existing `release.sh` script in this repo? +- Support for GitHub Releases / Gitea Releases API via MCP? diff --git a/docs/designs/saas-api-platform.md b/docs/designs/saas-api-platform.md new file mode 100644 index 0000000..d8d2a7e --- /dev/null +++ b/docs/designs/saas-api-platform.md @@ -0,0 +1,71 @@ +# Design: saas-api-platform + +**Domain:** `saas` +**Target Version:** v9.1.0 + +## Purpose + +Provides scaffolding, validation, and development workflow tools for REST and GraphQL API backends. Supports FastAPI (Python) and Express (Node.js) with OpenAPI spec generation, route validation, and middleware management. + +## Target Users + +- Backend developers building API services +- Teams using FastAPI or Express frameworks +- Projects requiring OpenAPI/Swagger documentation + +## Commands + +| Command | Description | +|---------|-------------| +| `/api setup` | Setup wizard — detect framework, configure MCP server | +| `/api scaffold` | Generate API routes, models, schemas from spec or description | +| `/api validate` | Validate routes against OpenAPI spec, check missing endpoints | +| `/api docs` | Generate/update OpenAPI spec from code annotations | +| `/api test-routes` | Generate request/response test cases for API endpoints | +| `/api middleware` | Add/configure middleware (auth, CORS, rate-limiting, logging) | + +## Agent Architecture + +| Agent | Model | Mode | Role | +|-------|-------|------|------| +| `api-architect` | sonnet | default | Route design, schema generation, middleware planning | +| `api-validator` | haiku | plan | Read-only validation of routes against spec | + +## Skills + +| Skill | Purpose | +|-------|---------| +| `framework-detection` | Detect FastAPI vs Express, identify project structure | +| `openapi-conventions` | OpenAPI 3.x spec generation rules and patterns | +| `route-patterns` | RESTful route naming, versioning, pagination conventions | +| `middleware-catalog` | Common middleware patterns per framework | +| `visual-header` | Standard command output headers | + +## MCP Server + +**Not required.** All operations are file-based (reading/writing code and specs). No external API needed. + +## Integration Points + +| Plugin | Integration | +|--------|-------------| +| projman | Issue labels: `Component/API`, `Tech/FastAPI`, `Tech/Express` | +| code-sentinel | PreToolUse hook scans generated routes for security issues | +| saas-test-pilot | `/api test-routes` generates stubs consumable by test-pilot | +| saas-db-migrate | Schema models shared between API models and migrations | + +## Token Budget + +| Component | Estimated Tokens | +|-----------|-----------------| +| `claude-md-integration.md` | ~800 | +| Dispatch file (`api.md`) | ~200 | +| 6 commands (avg) | ~3,600 | +| 2 agents | ~1,200 | +| 5 skills | ~2,500 | +| **Total** | **~8,300** | + +## Open Questions + +- Should MCP server be added later for live API testing (curl-like requests)? +- Support for gRPC/tRPC in addition to REST/GraphQL? diff --git a/docs/designs/saas-db-migrate.md b/docs/designs/saas-db-migrate.md new file mode 100644 index 0000000..e1fb3b3 --- /dev/null +++ b/docs/designs/saas-db-migrate.md @@ -0,0 +1,71 @@ +# Design: saas-db-migrate + +**Domain:** `saas` +**Target Version:** v9.2.0 + +## Purpose + +Database migration management for SQL databases. Supports Alembic (Python/SQLAlchemy), Prisma (Node.js), and raw SQL migrations. Provides migration generation, validation, rollback planning, and drift detection. + +## Target Users + +- Backend developers managing database schemas +- Teams using SQLAlchemy/Alembic or Prisma +- Projects needing migration safety checks before deployment + +## Commands + +| Command | Description | +|---------|-------------| +| `/db-migrate setup` | Setup wizard — detect ORM/migration tool, configure paths | +| `/db-migrate generate` | Generate migration from model diff or description | +| `/db-migrate validate` | Check migration safety (destructive ops, data loss risk, locking) | +| `/db-migrate plan` | Show migration execution plan with rollback strategy | +| `/db-migrate history` | Display migration history and current state | +| `/db-migrate rollback` | Generate rollback migration for a given migration | + +## Agent Architecture + +| Agent | Model | Mode | Role | +|-------|-------|------|------| +| `migration-planner` | sonnet | default | Migration generation, rollback planning | +| `migration-auditor` | haiku | plan | Read-only safety validation (destructive op detection) | + +## Skills + +| Skill | Purpose | +|-------|---------| +| `orm-detection` | Detect Alembic vs Prisma vs raw SQL, identify config | +| `migration-safety` | Rules for detecting destructive operations (DROP, ALTER, data loss) | +| `rollback-patterns` | Standard rollback generation patterns per tool | +| `naming-conventions` | Migration file naming and ordering conventions | +| `visual-header` | Standard command output headers | + +## MCP Server + +**Not required.** Migrations are file-based. Database connectivity is handled by the ORM tool itself, not by Claude. + +## Integration Points + +| Plugin | Integration | +|--------|-------------| +| projman | Issue labels: `Component/Database`, `Tech/SQLAlchemy`, `Tech/Prisma` | +| saas-api-platform | Schema models shared between API and migration layers | +| code-sentinel | Migration validation as part of security scan | +| data-platform | PostgreSQL tools can inspect live schema for drift detection | + +## Token Budget + +| Component | Estimated Tokens | +|-----------|-----------------| +| `claude-md-integration.md` | ~600 | +| Dispatch file (`db-migrate.md`) | ~200 | +| 6 commands (avg) | ~3,600 | +| 2 agents | ~1,200 | +| 5 skills | ~2,000 | +| **Total** | **~7,600** | + +## Open Questions + +- Should this integrate with data-platform's PostgreSQL MCP server for live schema comparison? +- Support for NoSQL migration tools (Mongoose, etc.)? diff --git a/docs/designs/saas-react-platform.md b/docs/designs/saas-react-platform.md new file mode 100644 index 0000000..0167493 --- /dev/null +++ b/docs/designs/saas-react-platform.md @@ -0,0 +1,73 @@ +# Design: saas-react-platform + +**Domain:** `saas` +**Target Version:** v9.4.0 + +## Purpose + +React frontend development toolkit with component scaffolding, routing setup, state management patterns, and build configuration. Supports Next.js and Vite-based React projects with TypeScript. + +## Target Users + +- Frontend developers building React applications +- Teams using Next.js or Vite + React +- Projects needing consistent component architecture + +## Commands + +| Command | Description | +|---------|-------------| +| `/react setup` | Setup wizard — detect framework (Next.js/Vite), configure paths | +| `/react component` | Scaffold React component with props, types, tests, stories | +| `/react route` | Add route with page component, loader, and error boundary | +| `/react state` | Set up state management pattern (Context, Zustand, Redux Toolkit) | +| `/react hook` | Generate custom hook with TypeScript types and tests | +| `/react lint` | Validate component tree, check prop drilling, detect anti-patterns | + +## Agent Architecture + +| Agent | Model | Mode | Role | +|-------|-------|------|------| +| `react-architect` | sonnet | default | Component design, routing, state management | +| `react-auditor` | haiku | plan | Read-only lint and anti-pattern detection | + +## Skills + +| Skill | Purpose | +|-------|---------| +| `framework-detection` | Detect Next.js vs Vite, App Router vs Pages Router | +| `component-patterns` | Standard component structure, naming, file organization | +| `state-patterns` | State management patterns and when to use each | +| `routing-conventions` | Route naming, dynamic routes, middleware patterns | +| `typescript-patterns` | TypeScript utility types, generics, prop typing | +| `visual-header` | Standard command output headers | + +## MCP Server + +**Not required.** All operations are file-based (component generation, route configuration). + +## Integration Points + +| Plugin | Integration | +|--------|-------------| +| projman | Issue labels: `Component/Frontend`, `Tech/React`, `Tech/Next.js` | +| viz-platform | DMC components integrate with React component architecture | +| saas-api-platform | API client generation from OpenAPI spec | +| saas-test-pilot | Component test generation via `/react component` | +| code-sentinel | Security scan for XSS, unsafe HTML, client-side secrets | + +## Token Budget + +| Component | Estimated Tokens | +|-----------|-----------------| +| `claude-md-integration.md` | ~800 | +| Dispatch file (`react.md`) | ~200 | +| 6 commands (avg) | ~3,600 | +| 2 agents | ~1,200 | +| 6 skills | ~3,000 | +| **Total** | **~8,800** | + +## Open Questions + +- Should we support Vue.js/Svelte as alternative frameworks? +- Integration with Storybook for component documentation? diff --git a/docs/designs/saas-test-pilot.md b/docs/designs/saas-test-pilot.md new file mode 100644 index 0000000..d162ef4 --- /dev/null +++ b/docs/designs/saas-test-pilot.md @@ -0,0 +1,73 @@ +# Design: saas-test-pilot + +**Domain:** `saas` +**Target Version:** v9.5.0 + +## Purpose + +Test automation toolkit supporting unit, integration, and end-to-end testing. Generates test cases from code analysis, manages test fixtures, and provides coverage analysis with gap detection. + +## Target Users + +- Developers writing tests for Python or JavaScript/TypeScript projects +- Teams enforcing test coverage requirements +- Projects needing test generation from existing code + +## Commands + +| Command | Description | +|---------|-------------| +| `/test setup` | Setup wizard — detect test framework, configure paths | +| `/test generate` | Generate test cases for functions/classes/modules | +| `/test coverage` | Analyze test coverage and identify untested code paths | +| `/test fixtures` | Generate or manage test fixtures and mocks | +| `/test e2e` | Generate end-to-end test scenarios from user stories | +| `/test run` | Run tests with formatted output and failure analysis | + +## Agent Architecture + +| Agent | Model | Mode | Role | +|-------|-------|------|------| +| `test-architect` | sonnet | acceptEdits | Test generation, fixture creation, e2e scenarios | +| `coverage-analyst` | haiku | plan | Read-only coverage analysis and gap detection | + +## Skills + +| Skill | Purpose | +|-------|---------| +| `framework-detection` | Detect pytest/Jest/Vitest/Playwright, identify config | +| `test-patterns` | Unit/integration/e2e test patterns and best practices | +| `mock-patterns` | Mocking strategies for different dependency types | +| `coverage-analysis` | Coverage gap detection and prioritization | +| `fixture-management` | Fixture organization, factories, builders | +| `visual-header` | Standard command output headers | + +## MCP Server + +**Not required.** Test generation is file-based. Test execution uses the project's own test runner via Bash. + +## Integration Points + +| Plugin | Integration | +|--------|-------------| +| projman | `/sprint test` delegates to test-pilot when installed | +| saas-api-platform | API route tests generated from `/api test-routes` | +| saas-react-platform | Component tests generated alongside components | +| data-seed | Test fixtures use seed data profiles | +| code-sentinel | Security test patterns included in generation | + +## Token Budget + +| Component | Estimated Tokens | +|-----------|-----------------| +| `claude-md-integration.md` | ~700 | +| Dispatch file (`test.md`) | ~200 | +| 6 commands (avg) | ~3,600 | +| 2 agents | ~1,200 | +| 6 skills | ~2,500 | +| **Total** | **~8,200** | + +## Open Questions + +- Should `/test run` replace projman's `/sprint test run` or supplement it? +- Support for property-based testing (Hypothesis, fast-check)? diff --git a/mcp-servers/netbox/README.md b/mcp-servers/netbox/README.md index b88faf9..a09dd22 100644 --- a/mcp-servers/netbox/README.md +++ b/mcp-servers/netbox/README.md @@ -98,10 +98,10 @@ If unset, all modules are enabled (backward compatible). | Module | Tool Count | Description | cmdb-assistant Commands | |--------|------------|-------------|------------------------| -| `dcim` | ~60 | Sites, devices, racks, interfaces, cables | `/cmdb-device`, `/cmdb-site`, `/cmdb-search`, `/cmdb-topology` | -| `ipam` | ~40 | IP addresses, prefixes, VLANs, VRFs | `/cmdb-ip`, `/ip-conflicts`, `/cmdb-search` | -| `virtualization` | ~20 | Clusters, VMs, VM interfaces | `/cmdb-search`, `/cmdb-audit`, `/cmdb-register` | -| `extras` | ~12 | Tags, journal entries, audit log | `/change-audit`, `/cmdb-register` | +| `dcim` | ~60 | Sites, devices, racks, interfaces, cables | `/cmdb device`, `/cmdb site`, `/cmdb search`, `/cmdb topology` | +| `ipam` | ~40 | IP addresses, prefixes, VLANs, VRFs | `/cmdb ip`, `/cmdb ip-conflicts`, `/cmdb search` | +| `virtualization` | ~20 | Clusters, VMs, VM interfaces | `/cmdb search`, `/cmdb audit`, `/cmdb register` | +| `extras` | ~12 | Tags, journal entries, audit log | `/cmdb change-audit`, `/cmdb register` | | `circuits` | ~15 | Providers, circuits, terminations | — | | `tenancy` | ~12 | Tenants, contacts | — | | `vpn` | ~15 | Tunnels, IKE/IPSec policies, L2VPN | — | diff --git a/plugins/clarity-assist/agents/clarity-coach.md b/plugins/clarity-assist/agents/clarity-coach.md index d9100d6..a7cab57 100644 --- a/plugins/clarity-assist/agents/clarity-coach.md +++ b/plugins/clarity-assist/agents/clarity-coach.md @@ -119,7 +119,7 @@ Track gathered information in a mental model: ### After Clarification -Produce a clear specification (see /clarify command for format). +Produce a clear specification (see /clarity clarify command for format). ## Example Session diff --git a/plugins/clarity-assist/claude-md-integration.md b/plugins/clarity-assist/claude-md-integration.md index 37f76b1..58982fa 100644 --- a/plugins/clarity-assist/claude-md-integration.md +++ b/plugins/clarity-assist/claude-md-integration.md @@ -18,8 +18,8 @@ This project uses the clarity-assist plugin for requirement gathering. | Command | Use Case | |---------|----------| -| `/clarify` | Full 4-D methodology for complex requests | -| `/quick-clarify` | Rapid mode for simple disambiguation | +| `/clarity clarify` | Full 4-D methodology for complex requests | +| `/clarity quick-clarify` | Rapid mode for simple disambiguation | ### Communication Style diff --git a/plugins/clarity-assist/commands/clarify.md b/plugins/clarity-assist/commands/clarity-clarify.md similarity index 96% rename from plugins/clarity-assist/commands/clarify.md rename to plugins/clarity-assist/commands/clarity-clarify.md index 04b5950..aed6690 100644 --- a/plugins/clarity-assist/commands/clarify.md +++ b/plugins/clarity-assist/commands/clarity-clarify.md @@ -1,4 +1,8 @@ -# /clarify - Full Prompt Optimization +--- +name: clarity clarify +--- + +# /clarity clarify - Full Prompt Optimization ## Visual Output diff --git a/plugins/clarity-assist/commands/quick-clarify.md b/plugins/clarity-assist/commands/clarity-quick-clarify.md similarity index 83% rename from plugins/clarity-assist/commands/quick-clarify.md rename to plugins/clarity-assist/commands/clarity-quick-clarify.md index abfc31e..1587beb 100644 --- a/plugins/clarity-assist/commands/quick-clarify.md +++ b/plugins/clarity-assist/commands/clarity-quick-clarify.md @@ -1,4 +1,8 @@ -# /quick-clarify - Rapid Clarification Mode +--- +name: clarity quick-clarify +--- + +# /clarity quick-clarify - Rapid Clarification Mode ## Visual Output @@ -23,7 +27,7 @@ Single-pass clarification for requests that are mostly clear but need minor disa - `skills/nd-accommodations.md` - ND-friendly question patterns - `skills/clarification-techniques.md` - Echo and micro-summary techniques -- `skills/escalation-patterns.md` - When to escalate to full /clarify +- `skills/escalation-patterns.md` - When to escalate to full `/clarity clarify` ## Workflow @@ -37,7 +41,7 @@ No formal specification document needed. Proceed after brief confirmation, docum ## Escalation -If complexity emerges, offer to switch to full `/clarify`: +If complexity emerges, offer to switch to full `/clarity clarify`: ``` "This is more involved than it first appeared. Want me to switch diff --git a/plugins/clarity-assist/commands/clarity.md b/plugins/clarity-assist/commands/clarity.md new file mode 100644 index 0000000..6652c52 --- /dev/null +++ b/plugins/clarity-assist/commands/clarity.md @@ -0,0 +1,14 @@ +--- +description: Prompt optimization and requirement clarification +--- + +# /clarity + +Prompt optimization and requirement clarification with ND-friendly accommodations. + +## Sub-commands + +| Sub-command | Description | +|-------------|-------------| +| `/clarity clarify` | Full 4-D methodology for complex requests | +| `/clarity quick-clarify` | Rapid mode for simple disambiguation | diff --git a/plugins/clarity-assist/docs/ND-SUPPORT.md b/plugins/clarity-assist/docs/ND-SUPPORT.md index 11f4b6c..c5f5ccf 100644 --- a/plugins/clarity-assist/docs/ND-SUPPORT.md +++ b/plugins/clarity-assist/docs/ND-SUPPORT.md @@ -58,8 +58,8 @@ Our design philosophy centers on three principles: ### 3. Customizable Verbosity **Detail Levels** -- `/clarify` - Full methodology for complex requests (more thorough, more questions) -- `/quick-clarify` - Rapid mode for simple disambiguation (fewer questions, faster) +- `/clarity clarify` - Full methodology for complex requests (more thorough, more questions) +- `/clarity quick-clarify` - Rapid mode for simple disambiguation (fewer questions, faster) **User Control** - Users can always say "that's enough detail" to end questioning early @@ -68,7 +68,7 @@ Our design philosophy centers on three principles: ### 4. Vagueness Detection -The `UserPromptSubmit` hook automatically detects prompts that might benefit from clarification and gently suggests using `/clarify`. +The `UserPromptSubmit` hook automatically detects prompts that might benefit from clarification and gently suggests using `/clarity clarify`. **Detection Signals** - Short prompts (< 10 words) without specific technical terms @@ -156,10 +156,10 @@ This triggers vagueness detection because: - No specific technical context - No measurable outcome -### After: Clarified with /clarify +### After: Clarified with /clarity clarify ``` -User: /clarify Make the app faster +User: /clarity clarify Make the app faster Claude: Let me help clarify what kind of performance improvements you're looking for. @@ -235,7 +235,7 @@ Optimize initial page load time to under 2 seconds by addressing bundle size and For simpler requests that just need minor disambiguation: ``` -User: /quick-clarify Add a delete button to the user list +User: /clarity quick-clarify Add a delete button to the user list Claude: I'll add a delete button to each row in the user list. @@ -286,14 +286,14 @@ export CLARITY_ASSIST_VAGUENESS_THRESHOLD=0.8 ### If You're Feeling Overwhelmed -- Use `/quick-clarify` instead of `/clarify` for faster interactions +- Use `/clarity quick-clarify` instead of `/clarity clarify` for faster interactions - Say "let's focus on just one thing" to narrow scope - Ask to "pause and summarize" at any point - It's OK to say "I don't know" - the plugin will offer concrete alternatives ### If You Have Executive Function Challenges -- Start with `/clarify` even for tasks you think are simple - it helps with planning +- Start with `/clarity clarify` even for tasks you think are simple - it helps with planning - The structured specification can serve as a checklist - Use the scope boundaries to prevent scope creep diff --git a/plugins/clarity-assist/hooks/vagueness-check.sh b/plugins/clarity-assist/hooks/vagueness-check.sh index 8fe017a..5682846 100755 --- a/plugins/clarity-assist/hooks/vagueness-check.sh +++ b/plugins/clarity-assist/hooks/vagueness-check.sh @@ -240,7 +240,7 @@ if (( $(echo "$SCORE >= $THRESHOLD" | bc -l) )); then # Gentle, non-blocking suggestion echo "$PREFIX Your prompt could benefit from more clarity." - echo "$PREFIX Consider running /clarify to refine your request." + echo "$PREFIX Consider running /clarity clarify to refine your request." echo "$PREFIX (Vagueness score: ${SCORE_PCT}% - this is a suggestion, not a block)" # Additional RFC suggestion if feature request detected diff --git a/plugins/clarity-assist/skills/escalation-patterns.md b/plugins/clarity-assist/skills/escalation-patterns.md index eba07ab..d5613a0 100644 --- a/plugins/clarity-assist/skills/escalation-patterns.md +++ b/plugins/clarity-assist/skills/escalation-patterns.md @@ -40,7 +40,7 @@ add the other parts. Sound good?" ## Choosing Initial Mode -### Use /quick-clarify When +### Use /clarity quick-clarify When - Request is fairly clear, just one or two ambiguities - User is in a hurry @@ -48,7 +48,7 @@ add the other parts. Sound good?" - Simple feature additions or bug fixes - Confidence is high (>90%) -### Use /clarify When +### Use /clarity clarify When - Complex multi-step requests - Requirements with multiple possible interpretations diff --git a/plugins/claude-config-maintainer/agents/maintainer.md b/plugins/claude-config-maintainer/agents/maintainer.md index b27159a..0fa5709 100644 --- a/plugins/claude-config-maintainer/agents/maintainer.md +++ b/plugins/claude-config-maintainer/agents/maintainer.md @@ -102,7 +102,7 @@ Also check for hook-based plugins (project-hygiene uses `PostToolUse` hooks). For each detected plugin, search CLAUDE.md for: - Plugin name mention (e.g., "projman", "cmdb-assistant") -- Command references (e.g., `/sprint-plan`, `/cmdb-search`) +- Command references (e.g., `/sprint plan`, `/cmdb search`) - MCP tool mentions (e.g., `list_issues`, `dcim_list_devices`) **Step 3: Load Integration Snippets** diff --git a/plugins/claude-config-maintainer/claude-md-integration.md b/plugins/claude-config-maintainer/claude-md-integration.md index 9d97593..c795903 100644 --- a/plugins/claude-config-maintainer/claude-md-integration.md +++ b/plugins/claude-config-maintainer/claude-md-integration.md @@ -6,14 +6,14 @@ This project uses the **claude-config-maintainer** plugin to analyze and optimiz | Command | Description | |---------|-------------| -| `/config-analyze` | Analyze CLAUDE.md for optimization opportunities with 100-point scoring | -| `/config-optimize` | Automatically optimize CLAUDE.md structure and content | -| `/config-init` | Initialize a new CLAUDE.md file for a project | -| `/config-diff` | Track CLAUDE.md changes over time with behavioral impact analysis | -| `/config-lint` | Lint CLAUDE.md for anti-patterns and best practices (31 rules) | -| `/config-audit-settings` | Audit settings.local.json permissions with 100-point scoring | -| `/config-optimize-settings` | Optimize permission patterns and apply named profiles | -| `/config-permissions-map` | Visual map of review layers and permission coverage | +| `/claude-config analyze` | Analyze CLAUDE.md for optimization opportunities with 100-point scoring | +| `/claude-config optimize` | Automatically optimize CLAUDE.md structure and content | +| `/claude-config init` | Initialize a new CLAUDE.md file for a project | +| `/claude-config diff` | Track CLAUDE.md changes over time with behavioral impact analysis | +| `/claude-config lint` | Lint CLAUDE.md for anti-patterns and best practices (31 rules) | +| `/claude-config audit-settings` | Audit settings.local.json permissions with 100-point scoring | +| `/claude-config optimize-settings` | Optimize permission patterns and apply named profiles | +| `/claude-config permissions-map` | Visual map of review layers and permission coverage | ### CLAUDE.md Scoring System @@ -47,10 +47,10 @@ The settings audit uses a 100-point scoring system across four categories: ### Usage Guidelines -- Run `/config-analyze` periodically to assess CLAUDE.md quality -- Run `/config-audit-settings` to check permission efficiency +- Run `/claude-config analyze` periodically to assess CLAUDE.md quality +- Run `/claude-config audit-settings` to check permission efficiency - Target a score of **70+/100** for effective Claude Code operation - Address HIGH priority issues first when optimizing -- Use `/config-init` when setting up new projects to start with best practices -- Use `/config-permissions-map` to visualize review layer coverage +- Use `/claude-config init` when setting up new projects to start with best practices +- Use `/claude-config permissions-map` to visualize review layer coverage - Re-analyze after making changes to verify improvements diff --git a/plugins/claude-config-maintainer/commands/analyze.md b/plugins/claude-config-maintainer/commands/claude-config-analyze.md similarity index 92% rename from plugins/claude-config-maintainer/commands/analyze.md rename to plugins/claude-config-maintainer/commands/claude-config-analyze.md index 18a3ecc..ca8f801 100644 --- a/plugins/claude-config-maintainer/commands/analyze.md +++ b/plugins/claude-config-maintainer/commands/claude-config-analyze.md @@ -1,8 +1,9 @@ --- +name: claude-config analyze description: Analyze CLAUDE.md for optimization opportunities and plugin integration --- -# Analyze CLAUDE.md +# /claude-config analyze Analyze your CLAUDE.md and provide a scored report with recommendations. @@ -20,7 +21,7 @@ Display: `CONFIG-MAINTAINER - CLAUDE.md Analysis` ## Usage ``` -/config-analyze +/claude-config analyze ``` ## Workflow diff --git a/plugins/claude-config-maintainer/commands/config-audit-settings.md b/plugins/claude-config-maintainer/commands/claude-config-audit-settings.md similarity index 92% rename from plugins/claude-config-maintainer/commands/config-audit-settings.md rename to plugins/claude-config-maintainer/commands/claude-config-audit-settings.md index 346249a..c03ce53 100644 --- a/plugins/claude-config-maintainer/commands/config-audit-settings.md +++ b/plugins/claude-config-maintainer/commands/claude-config-audit-settings.md @@ -1,9 +1,9 @@ --- -name: config-audit-settings +name: claude-config audit-settings description: Audit settings.local.json for permission optimization opportunities --- -# /config-audit-settings +# /claude-config audit-settings Audit Claude Code `settings.local.json` permissions with 100-point scoring across redundancy, coverage, safety alignment, and profile fit. @@ -24,8 +24,8 @@ Before executing, load: ## Usage ``` -/config-audit-settings # Full audit with recommendations -/config-audit-settings --diagram # Include Mermaid diagram of review layer coverage +/claude-config audit-settings # Full audit with recommendations +/claude-config audit-settings --diagram # Include Mermaid diagram of review layer coverage ``` ## Workflow @@ -128,9 +128,9 @@ Recommendations: ... Follow-Up Actions: - 1. Run /config-optimize-settings to apply recommendations - 2. Run /config-optimize-settings --dry-run to preview first - 3. Run /config-optimize-settings --profile=reviewed to apply profile + 1. Run /claude-config optimize-settings to apply recommendations + 2. Run /claude-config optimize-settings --dry-run to preview first + 3. Run /claude-config optimize-settings --profile=reviewed to apply profile ``` ## Diagram Output (--diagram flag) diff --git a/plugins/claude-config-maintainer/commands/config-diff.md b/plugins/claude-config-maintainer/commands/claude-config-diff.md similarity index 72% rename from plugins/claude-config-maintainer/commands/config-diff.md rename to plugins/claude-config-maintainer/commands/claude-config-diff.md index cd09750..b4c5c98 100644 --- a/plugins/claude-config-maintainer/commands/config-diff.md +++ b/plugins/claude-config-maintainer/commands/claude-config-diff.md @@ -1,8 +1,9 @@ --- +name: claude-config diff description: Show diff between current CLAUDE.md and last commit --- -# Compare CLAUDE.md Changes +# /claude-config diff Show differences between CLAUDE.md versions to track configuration drift. @@ -18,10 +19,10 @@ Display: `CONFIG-MAINTAINER - CLAUDE.md Diff` ## Usage ``` -/config-diff # Working vs last commit -/config-diff --commit=abc1234 # Working vs specific commit -/config-diff --from=v1.0 --to=v2.0 # Compare two commits -/config-diff --section="Critical Rules" # Specific section only +/claude-config diff # Working vs last commit +/claude-config diff --commit=abc1234 # Working vs specific commit +/claude-config diff --from=v1.0 --to=v2.0 # Compare two commits +/claude-config diff --section="Critical Rules" # Specific section only ``` ## Workflow diff --git a/plugins/claude-config-maintainer/commands/init.md b/plugins/claude-config-maintainer/commands/claude-config-init.md similarity index 81% rename from plugins/claude-config-maintainer/commands/init.md rename to plugins/claude-config-maintainer/commands/claude-config-init.md index 1396d43..774ebc8 100644 --- a/plugins/claude-config-maintainer/commands/init.md +++ b/plugins/claude-config-maintainer/commands/claude-config-init.md @@ -1,8 +1,9 @@ --- +name: claude-config init description: Initialize a new CLAUDE.md file for a project --- -# Initialize CLAUDE.md +# /claude-config init Create a new CLAUDE.md file tailored to your project. @@ -19,9 +20,9 @@ Display: `CONFIG-MAINTAINER - CLAUDE.md Initialization` ## Usage ``` -/config-init # Interactive -/config-init --minimal # Minimal version -/config-init --comprehensive # Detailed version +/claude-config init # Interactive +/claude-config init --minimal # Minimal version +/claude-config init --comprehensive # Detailed version ``` ## Workflow diff --git a/plugins/claude-config-maintainer/commands/config-lint.md b/plugins/claude-config-maintainer/commands/claude-config-lint.md similarity index 79% rename from plugins/claude-config-maintainer/commands/config-lint.md rename to plugins/claude-config-maintainer/commands/claude-config-lint.md index bfc8fe8..2b783f7 100644 --- a/plugins/claude-config-maintainer/commands/config-lint.md +++ b/plugins/claude-config-maintainer/commands/claude-config-lint.md @@ -1,8 +1,9 @@ --- +name: claude-config lint description: Lint CLAUDE.md for common anti-patterns and best practices --- -# Lint CLAUDE.md +# /claude-config lint Check CLAUDE.md against best practices and detect common anti-patterns. @@ -18,9 +19,9 @@ Display: `CONFIG-MAINTAINER - CLAUDE.md Lint` ## Usage ``` -/config-lint # Full lint -/config-lint --fix # Auto-fix issues -/config-lint --rules=security # Check specific category +/claude-config lint # Full lint +/claude-config lint --fix # Auto-fix issues +/claude-config lint --rules=security # Check specific category ``` ## Workflow diff --git a/plugins/claude-config-maintainer/commands/config-optimize-settings.md b/plugins/claude-config-maintainer/commands/claude-config-optimize-settings.md similarity index 92% rename from plugins/claude-config-maintainer/commands/config-optimize-settings.md rename to plugins/claude-config-maintainer/commands/claude-config-optimize-settings.md index b8ccd7e..6d081c8 100644 --- a/plugins/claude-config-maintainer/commands/config-optimize-settings.md +++ b/plugins/claude-config-maintainer/commands/claude-config-optimize-settings.md @@ -1,9 +1,9 @@ --- -name: config-optimize-settings +name: claude-config optimize-settings description: Optimize settings.local.json permissions based on audit recommendations --- -# /config-optimize-settings +# /claude-config optimize-settings Optimize Claude Code `settings.local.json` permission patterns and apply named profiles. @@ -25,10 +25,10 @@ Before executing, load: ## Usage ``` -/config-optimize-settings # Apply audit recommendations -/config-optimize-settings --dry-run # Preview only, no changes -/config-optimize-settings --profile=reviewed # Apply named profile -/config-optimize-settings --consolidate-only # Only merge/dedupe, no new rules +/claude-config optimize-settings # Apply audit recommendations +/claude-config optimize-settings --dry-run # Preview only, no changes +/claude-config optimize-settings --profile=reviewed # Apply named profile +/claude-config optimize-settings --consolidate-only # Only merge/dedupe, no new rules ``` ## Options @@ -44,7 +44,7 @@ Before executing, load: ### Step 1: Run Audit Analysis -Execute the same analysis as `/config-audit-settings`: +Execute the same analysis as `/claude-config audit-settings`: 1. Locate settings file 2. Parse permission arrays 3. Detect issues (duplicates, subsets, merge candidates, etc.) @@ -214,7 +214,7 @@ DRY RUN - No changes will be made [... preview content ...] To apply these changes, run: - /config-optimize-settings + /claude-config optimize-settings ``` ### Applied Output diff --git a/plugins/claude-config-maintainer/commands/optimize.md b/plugins/claude-config-maintainer/commands/claude-config-optimize.md similarity index 79% rename from plugins/claude-config-maintainer/commands/optimize.md rename to plugins/claude-config-maintainer/commands/claude-config-optimize.md index fef2ac7..d73b273 100644 --- a/plugins/claude-config-maintainer/commands/optimize.md +++ b/plugins/claude-config-maintainer/commands/claude-config-optimize.md @@ -1,8 +1,9 @@ --- +name: claude-config optimize description: Optimize CLAUDE.md structure and content --- -# Optimize CLAUDE.md +# /claude-config optimize Automatically optimize CLAUDE.md based on best practices. @@ -20,9 +21,9 @@ Display: `CONFIG-MAINTAINER - CLAUDE.md Optimization` ## Usage ``` -/config-optimize # Full optimization -/config-optimize --condense # Reduce verbosity -/config-optimize --dry-run # Preview only +/claude-config optimize # Full optimization +/claude-config optimize --condense # Reduce verbosity +/claude-config optimize --dry-run # Preview only ``` ## Workflow diff --git a/plugins/claude-config-maintainer/commands/config-permissions-map.md b/plugins/claude-config-maintainer/commands/claude-config-permissions-map.md similarity index 96% rename from plugins/claude-config-maintainer/commands/config-permissions-map.md rename to plugins/claude-config-maintainer/commands/claude-config-permissions-map.md index 175646d..f090e0d 100644 --- a/plugins/claude-config-maintainer/commands/config-permissions-map.md +++ b/plugins/claude-config-maintainer/commands/claude-config-permissions-map.md @@ -1,9 +1,9 @@ --- -name: config-permissions-map +name: claude-config permissions-map description: Generate visual map of review layers and permission coverage --- -# /config-permissions-map +# /claude-config permissions-map Generate a Mermaid diagram showing the relationship between file operations, review layers, and permission status. @@ -26,8 +26,8 @@ Also read: `/mnt/skills/user/mermaid-diagrams/SKILL.md` (for diagram requirement ## Usage ``` -/config-permissions-map # Generate and display diagram -/config-permissions-map --save # Save diagram to .mermaid file +/claude-config permissions-map # Generate and display diagram +/claude-config permissions-map --save # Save diagram to .mermaid file ``` ## Workflow diff --git a/plugins/claude-config-maintainer/commands/claude-config.md b/plugins/claude-config-maintainer/commands/claude-config.md new file mode 100644 index 0000000..e080f0e --- /dev/null +++ b/plugins/claude-config-maintainer/commands/claude-config.md @@ -0,0 +1,20 @@ +--- +description: CLAUDE.md and settings optimization +--- + +# /claude-config + +CLAUDE.md and settings.local.json optimization for Claude Code projects. + +## Sub-commands + +| Sub-command | Description | +|-------------|-------------| +| `/claude-config analyze` | Analyze CLAUDE.md for optimization opportunities | +| `/claude-config optimize` | Optimize CLAUDE.md structure with preview/backup | +| `/claude-config init` | Initialize new CLAUDE.md for a project | +| `/claude-config diff` | Track CLAUDE.md changes over time with behavioral impact | +| `/claude-config lint` | Lint CLAUDE.md for anti-patterns and best practices | +| `/claude-config audit-settings` | Audit settings.local.json permissions (100-point score) | +| `/claude-config optimize-settings` | Optimize permissions (profiles, consolidation, dry-run) | +| `/claude-config permissions-map` | Visual review layer + permission coverage map | diff --git a/plugins/claude-config-maintainer/skills/diff-analysis.md b/plugins/claude-config-maintainer/skills/diff-analysis.md index 1896af7..a6d8d5f 100644 --- a/plugins/claude-config-maintainer/skills/diff-analysis.md +++ b/plugins/claude-config-maintainer/skills/diff-analysis.md @@ -6,7 +6,7 @@ This skill defines how to analyze and present CLAUDE.md differences. | Mode | Command | Description | |------|---------|-------------| -| Working vs HEAD | `/config-diff` | Uncommitted changes | +| Working vs HEAD | `/claude-config diff` | Uncommitted changes | | Working vs Commit | `--commit=REF` | Changes since specific point | | Commit to Commit | `--from=X --to=Y` | Historical comparison | | Branch Comparison | `--branch=NAME` | Cross-branch differences | diff --git a/plugins/claude-config-maintainer/skills/visual-header.md b/plugins/claude-config-maintainer/skills/visual-header.md index e691c09..d670640 100644 --- a/plugins/claude-config-maintainer/skills/visual-header.md +++ b/plugins/claude-config-maintainer/skills/visual-header.md @@ -12,56 +12,56 @@ This skill defines the standard visual header for claude-config-maintainer comma ## Command-Specific Headers -### /config-analyze +### /claude-config analyze ``` +-----------------------------------------------------------------+ | CONFIG-MAINTAINER - CLAUDE.md Analysis | +-----------------------------------------------------------------+ ``` -### /config-optimize +### /claude-config optimize ``` +-----------------------------------------------------------------+ | CONFIG-MAINTAINER - CLAUDE.md Optimization | +-----------------------------------------------------------------+ ``` -### /config-lint +### /claude-config lint ``` +-----------------------------------------------------------------+ | CONFIG-MAINTAINER - CLAUDE.md Lint | +-----------------------------------------------------------------+ ``` -### /config-diff +### /claude-config diff ``` +-----------------------------------------------------------------+ | CONFIG-MAINTAINER - CLAUDE.md Diff | +-----------------------------------------------------------------+ ``` -### /config-init +### /claude-config init ``` +-----------------------------------------------------------------+ | CONFIG-MAINTAINER - CLAUDE.md Initialization | +-----------------------------------------------------------------+ ``` -### /config-audit-settings +### /claude-config audit-settings ``` +-----------------------------------------------------------------+ | CONFIG-MAINTAINER - Settings Audit | +-----------------------------------------------------------------+ ``` -### /config-optimize-settings +### /claude-config optimize-settings ``` +-----------------------------------------------------------------+ | CONFIG-MAINTAINER - Settings Optimization | +-----------------------------------------------------------------+ ``` -### /config-permissions-map +### /claude-config permissions-map ``` +-----------------------------------------------------------------+ | CONFIG-MAINTAINER - Permissions Map | diff --git a/plugins/cmdb-assistant/agents/cmdb-assistant.md b/plugins/cmdb-assistant/agents/cmdb-assistant.md index 725d42e..bc93d0d 100644 --- a/plugins/cmdb-assistant/agents/cmdb-assistant.md +++ b/plugins/cmdb-assistant/agents/cmdb-assistant.md @@ -97,13 +97,13 @@ ipam_list_prefixes prefix= | Command | Purpose | |---------|---------| -| `/cmdb-search ` | Search across all CMDB objects | -| `/cmdb-device ` | Device CRUD operations | -| `/cmdb-ip ` | IP address and prefix management | -| `/cmdb-site ` | Site and location management | -| `/cmdb-audit [scope]` | Data quality analysis | -| `/cmdb-register` | Register current machine | -| `/cmdb-sync` | Sync machine state with NetBox | -| `/cmdb-topology ` | Generate infrastructure diagrams | -| `/change-audit [filters]` | Audit NetBox changes | -| `/ip-conflicts [scope]` | Detect IP conflicts | +| `/cmdb search ` | Search across all CMDB objects | +| `/cmdb device ` | Device CRUD operations | +| `/cmdb ip ` | IP address and prefix management | +| `/cmdb site ` | Site and location management | +| `/cmdb audit [scope]` | Data quality analysis | +| `/cmdb register` | Register current machine | +| `/cmdb sync` | Sync machine state with NetBox | +| `/cmdb topology ` | Generate infrastructure diagrams | +| `/cmdb change-audit [filters]` | Audit NetBox changes | +| `/cmdb ip-conflicts [scope]` | Detect IP conflicts | diff --git a/plugins/cmdb-assistant/claude-md-integration.md b/plugins/cmdb-assistant/claude-md-integration.md index d0e9b39..9deb89a 100644 --- a/plugins/cmdb-assistant/claude-md-integration.md +++ b/plugins/cmdb-assistant/claude-md-integration.md @@ -6,10 +6,10 @@ This project uses the **cmdb-assistant** plugin for NetBox CMDB integration to m | Command | Description | |---------|-------------| -| `/cmdb-search` | Search across all NetBox objects | -| `/cmdb-device` | Manage devices (create, update, list) | -| `/cmdb-ip` | Manage IP addresses and prefixes | -| `/cmdb-site` | Manage sites and locations | +| `/cmdb search` | Search across all NetBox objects | +| `/cmdb device` | Manage devices (create, update, list) | +| `/cmdb ip` | Manage IP addresses and prefixes | +| `/cmdb site` | Manage sites and locations | ### MCP Tools Available diff --git a/plugins/cmdb-assistant/commands/cmdb-audit.md b/plugins/cmdb-assistant/commands/cmdb-audit.md index 67ff7f5..179fa3f 100644 --- a/plugins/cmdb-assistant/commands/cmdb-audit.md +++ b/plugins/cmdb-assistant/commands/cmdb-audit.md @@ -1,8 +1,9 @@ --- +name: cmdb audit description: Audit NetBox data quality and identify consistency issues --- -# CMDB Data Quality Audit +# /cmdb audit Analyze NetBox data for quality issues and best practice violations. @@ -16,7 +17,7 @@ Analyze NetBox data for quality issues and best practice violations. ## Usage ``` -/cmdb-audit [scope] +/cmdb audit [scope] ``` **Scopes:** @@ -49,9 +50,9 @@ Execute `skills/audit-workflow.md` which covers: ## Examples -- `/cmdb-audit` - Full audit -- `/cmdb-audit vms` - VM-specific checks -- `/cmdb-audit naming` - Naming conventions +- `/cmdb audit` - Full audit +- `/cmdb audit vms` - VM-specific checks +- `/cmdb audit naming` - Naming conventions ## User Request diff --git a/plugins/cmdb-assistant/commands/change-audit.md b/plugins/cmdb-assistant/commands/cmdb-change-audit.md similarity index 77% rename from plugins/cmdb-assistant/commands/change-audit.md rename to plugins/cmdb-assistant/commands/cmdb-change-audit.md index 83636f9..7fffb69 100644 --- a/plugins/cmdb-assistant/commands/change-audit.md +++ b/plugins/cmdb-assistant/commands/cmdb-change-audit.md @@ -1,8 +1,9 @@ --- +name: cmdb change-audit description: Audit NetBox changes with filtering by date, user, or object type --- -# CMDB Change Audit +# /cmdb change-audit Query and analyze the NetBox audit log for change tracking and compliance. @@ -15,7 +16,7 @@ Query and analyze the NetBox audit log for change tracking and compliance. ## Usage ``` -/change-audit [filters] +/cmdb change-audit [filters] ``` **Filters:** @@ -46,11 +47,11 @@ If user asks for "security audit" or "compliance report": ## Examples -- `/change-audit` - Recent changes (last 24 hours) -- `/change-audit last 7 days` - Past week -- `/change-audit by admin` - All changes by admin -- `/change-audit type dcim.device` - Device changes only -- `/change-audit action delete` - All deletions +- `/cmdb change-audit` - Recent changes (last 24 hours) +- `/cmdb change-audit last 7 days` - Past week +- `/cmdb change-audit by admin` - All changes by admin +- `/cmdb change-audit type dcim.device` - Device changes only +- `/cmdb change-audit action delete` - All deletions ## User Request diff --git a/plugins/cmdb-assistant/commands/cmdb-device.md b/plugins/cmdb-assistant/commands/cmdb-device.md index ccdc748..b1a41a7 100644 --- a/plugins/cmdb-assistant/commands/cmdb-device.md +++ b/plugins/cmdb-assistant/commands/cmdb-device.md @@ -1,4 +1,8 @@ -# CMDB Device Management +--- +name: cmdb device +--- + +# /cmdb device Manage network devices in NetBox. @@ -10,7 +14,7 @@ Manage network devices in NetBox. ## Usage ``` -/cmdb-device [options] +/cmdb device [options] ``` ## Instructions @@ -45,10 +49,10 @@ After creating a device, offer to: ## Examples -- `/cmdb-device list` -- `/cmdb-device show core-router-01` -- `/cmdb-device create web-server-03` -- `/cmdb-device at headquarters` +- `/cmdb device list` +- `/cmdb device show core-router-01` +- `/cmdb device create web-server-03` +- `/cmdb device at headquarters` ## User Request diff --git a/plugins/cmdb-assistant/commands/ip-conflicts.md b/plugins/cmdb-assistant/commands/cmdb-ip-conflicts.md similarity index 85% rename from plugins/cmdb-assistant/commands/ip-conflicts.md rename to plugins/cmdb-assistant/commands/cmdb-ip-conflicts.md index 5f1e2b7..298f1a6 100644 --- a/plugins/cmdb-assistant/commands/ip-conflicts.md +++ b/plugins/cmdb-assistant/commands/cmdb-ip-conflicts.md @@ -1,8 +1,9 @@ --- +name: cmdb ip-conflicts description: Detect IP address conflicts and overlapping prefixes in NetBox --- -# CMDB IP Conflict Detection +# /cmdb ip-conflicts Scan NetBox IPAM data to identify IP address conflicts and overlapping prefixes. @@ -15,7 +16,7 @@ Scan NetBox IPAM data to identify IP address conflicts and overlapping prefixes. ## Usage ``` -/ip-conflicts [scope] +/cmdb ip-conflicts [scope] ``` **Scopes:** @@ -49,9 +50,9 @@ Execute conflict detection from `skills/ip-management.md`: ## Examples -- `/ip-conflicts` - Full scan -- `/ip-conflicts addresses` - Duplicate IPs only -- `/ip-conflicts vrf Production` - Scan specific VRF +- `/cmdb ip-conflicts` - Full scan +- `/cmdb ip-conflicts addresses` - Duplicate IPs only +- `/cmdb ip-conflicts vrf Production` - Scan specific VRF ## User Request diff --git a/plugins/cmdb-assistant/commands/cmdb-ip.md b/plugins/cmdb-assistant/commands/cmdb-ip.md index 751af23..d0d2b02 100644 --- a/plugins/cmdb-assistant/commands/cmdb-ip.md +++ b/plugins/cmdb-assistant/commands/cmdb-ip.md @@ -1,4 +1,8 @@ -# CMDB IP Management +--- +name: cmdb ip +--- + +# /cmdb ip Manage IP addresses and prefixes in NetBox. @@ -11,7 +15,7 @@ Manage IP addresses and prefixes in NetBox. ## Usage ``` -/cmdb-ip [options] +/cmdb ip [options] ``` ## Instructions @@ -42,10 +46,10 @@ Execute operations from `skills/ip-management.md`. ## Examples -- `/cmdb-ip prefixes` -- `/cmdb-ip available in 10.0.1.0/24` -- `/cmdb-ip allocate from 10.0.1.0/24` -- `/cmdb-ip assign 10.0.1.50/24 to web-server-01 eth0` +- `/cmdb ip prefixes` +- `/cmdb ip available in 10.0.1.0/24` +- `/cmdb ip allocate from 10.0.1.0/24` +- `/cmdb ip assign 10.0.1.50/24 to web-server-01 eth0` ## User Request diff --git a/plugins/cmdb-assistant/commands/cmdb-register.md b/plugins/cmdb-assistant/commands/cmdb-register.md index 1e0fcdb..d59f4f7 100644 --- a/plugins/cmdb-assistant/commands/cmdb-register.md +++ b/plugins/cmdb-assistant/commands/cmdb-register.md @@ -1,8 +1,9 @@ --- +name: cmdb register description: Register the current machine into NetBox with all running applications --- -# CMDB Machine Registration +# /cmdb register Register the current machine into NetBox, including hardware info, network interfaces, and running applications. @@ -17,7 +18,7 @@ Register the current machine into NetBox, including hardware info, network inter ## Usage ``` -/cmdb-register [--site ] [--tenant ] [--role ] +/cmdb register [--site ] [--tenant ] [--role ] ``` **Options:** @@ -41,7 +42,7 @@ Execute `skills/device-registration.md` which covers: | Error | Action | |-------|--------| -| Device already exists | Suggest `/cmdb-sync` or ask to proceed | +| Device already exists | Suggest `/cmdb sync` or ask to proceed | | Site not found | List available sites, offer to create new | | Docker not available | Skip container registration, note in summary | | Permission denied | Note which operations failed, suggest fixes | diff --git a/plugins/cmdb-assistant/commands/cmdb-search.md b/plugins/cmdb-assistant/commands/cmdb-search.md index 56d3787..5fab23f 100644 --- a/plugins/cmdb-assistant/commands/cmdb-search.md +++ b/plugins/cmdb-assistant/commands/cmdb-search.md @@ -1,4 +1,8 @@ -# CMDB Search +--- +name: cmdb search +--- + +# /cmdb search ## Visual Output @@ -17,7 +21,7 @@ Search NetBox for devices, IPs, sites, or any CMDB object. ## Usage ``` -/cmdb-search +/cmdb search ``` ## Instructions @@ -37,9 +41,9 @@ For broad searches, query multiple endpoints and consolidate results. ## Examples -- `/cmdb-search router` - Find all devices with "router" in the name -- `/cmdb-search 10.0.1.0/24` - Find prefix and IPs within it -- `/cmdb-search datacenter` - Find sites matching "datacenter" +- `/cmdb search router` - Find all devices with "router" in the name +- `/cmdb search 10.0.1.0/24` - Find prefix and IPs within it +- `/cmdb search datacenter` - Find sites matching "datacenter" ## User Query diff --git a/plugins/cmdb-assistant/commands/cmdb-setup.md b/plugins/cmdb-assistant/commands/cmdb-setup.md index 90ecb9a..de5d364 100644 --- a/plugins/cmdb-assistant/commands/cmdb-setup.md +++ b/plugins/cmdb-assistant/commands/cmdb-setup.md @@ -1,8 +1,9 @@ --- +name: cmdb setup description: Interactive setup wizard for cmdb-assistant plugin --- -# CMDB Assistant Setup Wizard +# /cmdb setup Configure the cmdb-assistant plugin with NetBox integration. @@ -18,7 +19,7 @@ Configure the cmdb-assistant plugin with NetBox integration. ## Usage ``` -/cmdb-setup +/cmdb setup ``` ## Instructions @@ -63,10 +64,10 @@ System Config: ~/.config/claude/netbox.env Restart your Claude Code session for MCP tools. After restart, try: -- /cmdb-device -- /cmdb-ip
-- /cmdb-site -- /cmdb-search +- /cmdb device +- /cmdb ip
+- /cmdb site +- /cmdb search ``` ## User Request diff --git a/plugins/cmdb-assistant/commands/cmdb-site.md b/plugins/cmdb-assistant/commands/cmdb-site.md index 68fcfc5..f5667f9 100644 --- a/plugins/cmdb-assistant/commands/cmdb-site.md +++ b/plugins/cmdb-assistant/commands/cmdb-site.md @@ -1,4 +1,8 @@ -# CMDB Site Management +--- +name: cmdb site +--- + +# /cmdb site Manage sites and locations in NetBox. @@ -10,7 +14,7 @@ Manage sites and locations in NetBox. ## Usage ``` -/cmdb-site [options] +/cmdb site [options] ``` ## Instructions @@ -40,10 +44,10 @@ Execute `skills/visual-header.md` with context "Site Management". ## Examples -- `/cmdb-site list` -- `/cmdb-site show headquarters` -- `/cmdb-site create branch-office-nyc` -- `/cmdb-site racks at headquarters` +- `/cmdb site list` +- `/cmdb site show headquarters` +- `/cmdb site create branch-office-nyc` +- `/cmdb site racks at headquarters` ## User Request diff --git a/plugins/cmdb-assistant/commands/cmdb-sync.md b/plugins/cmdb-assistant/commands/cmdb-sync.md index 1119aff..8faa0ce 100644 --- a/plugins/cmdb-assistant/commands/cmdb-sync.md +++ b/plugins/cmdb-assistant/commands/cmdb-sync.md @@ -1,8 +1,9 @@ --- +name: cmdb sync description: Synchronize current machine state with existing NetBox record --- -# CMDB Machine Sync +# /cmdb sync Update an existing NetBox device record with the current machine state. @@ -16,7 +17,7 @@ Update an existing NetBox device record with the current machine state. ## Usage ``` -/cmdb-sync [--full] [--dry-run] +/cmdb sync [--full] [--dry-run] ``` **Options:** @@ -48,7 +49,7 @@ Execute `skills/sync-workflow.md` which covers: | Error | Action | |-------|--------| -| Device not found | Suggest `/cmdb-register` | +| Device not found | Suggest `/cmdb register` | | Permission denied | Note which failed, continue others | | Cluster not found | Offer to create or skip container sync | diff --git a/plugins/cmdb-assistant/commands/cmdb-topology.md b/plugins/cmdb-assistant/commands/cmdb-topology.md index d021dc1..e785408 100644 --- a/plugins/cmdb-assistant/commands/cmdb-topology.md +++ b/plugins/cmdb-assistant/commands/cmdb-topology.md @@ -1,8 +1,9 @@ --- +name: cmdb topology description: Generate infrastructure topology diagrams from NetBox data --- -# CMDB Topology Visualization +# /cmdb topology Generate Mermaid diagrams showing infrastructure topology from NetBox. @@ -15,7 +16,7 @@ Generate Mermaid diagrams showing infrastructure topology from NetBox. ## Usage ``` -/cmdb-topology [scope] +/cmdb topology [scope] ``` **Views:** @@ -43,11 +44,11 @@ Always provide: ## Examples -- `/cmdb-topology rack server-rack-01` - Rack elevation -- `/cmdb-topology network` - All network connections -- `/cmdb-topology network Home` - Network for Home site -- `/cmdb-topology site Headquarters` - Site overview -- `/cmdb-topology full` - Full infrastructure +- `/cmdb topology rack server-rack-01` - Rack elevation +- `/cmdb topology network` - All network connections +- `/cmdb topology network Home` - Network for Home site +- `/cmdb topology site Headquarters` - Site overview +- `/cmdb topology full` - Full infrastructure ## User Request diff --git a/plugins/cmdb-assistant/commands/cmdb.md b/plugins/cmdb-assistant/commands/cmdb.md new file mode 100644 index 0000000..7290220 --- /dev/null +++ b/plugins/cmdb-assistant/commands/cmdb.md @@ -0,0 +1,23 @@ +--- +description: NetBox CMDB infrastructure management +--- + +# /cmdb + +NetBox CMDB integration for infrastructure management. + +## Sub-commands + +| Sub-command | Description | +|-------------|-------------| +| `/cmdb search` | Search NetBox for devices, IPs, sites | +| `/cmdb device` | Manage network devices (create, view, update, delete) | +| `/cmdb ip` | Manage IP addresses and prefixes | +| `/cmdb site` | Manage sites, locations, racks, and regions | +| `/cmdb audit` | Data quality analysis (VMs, devices, naming, roles) | +| `/cmdb register` | Register current machine into NetBox | +| `/cmdb sync` | Sync machine state with NetBox (detect drift) | +| `/cmdb topology` | Infrastructure topology diagrams | +| `/cmdb change-audit` | NetBox audit trail queries with filtering | +| `/cmdb ip-conflicts` | Detect IP conflicts and overlapping prefixes | +| `/cmdb setup` | Setup wizard for NetBox MCP server | diff --git a/plugins/cmdb-assistant/skills/audit-workflow.md b/plugins/cmdb-assistant/skills/audit-workflow.md index b504579..b0cd8ac 100644 --- a/plugins/cmdb-assistant/skills/audit-workflow.md +++ b/plugins/cmdb-assistant/skills/audit-workflow.md @@ -147,8 +147,8 @@ dcim_update_device id=X platform=Y ### Next Steps -- Run `/cmdb-register` to properly register new machines -- Use `/cmdb-sync` to update existing registrations +- Run `/cmdb register` to properly register new machines +- Use `/cmdb sync` to update existing registrations - Consider bulk updates via NetBox web UI for >10 items ``` diff --git a/plugins/cmdb-assistant/skills/device-registration.md b/plugins/cmdb-assistant/skills/device-registration.md index 9553d8a..456df83 100644 --- a/plugins/cmdb-assistant/skills/device-registration.md +++ b/plugins/cmdb-assistant/skills/device-registration.md @@ -25,7 +25,7 @@ Use commands from `system-discovery` skill to gather: ``` dcim_list_devices name= ``` - If exists, suggest `/cmdb-sync` instead. + If exists, suggest `/cmdb sync` instead. 2. **Verify/Create site:** ``` @@ -131,7 +131,7 @@ Add journal entry: extras_create_journal_entry assigned_object_type="dcim.device" assigned_object_id= - comments="Device registered via /cmdb-register command\n\nDiscovered:\n- X network interfaces\n- Y IP addresses\n- Z Docker containers" + comments="Device registered via /cmdb register command\n\nDiscovered:\n- X network interfaces\n- Y IP addresses\n- Z Docker containers" ``` ## Summary Report Template @@ -162,8 +162,8 @@ extras_create_journal_entry | media_jellyfin | Media Server | 2.0 | 2048MB | Active | ### Next Steps -- Run `/cmdb-sync` periodically to keep data current -- Run `/cmdb-audit` to check data quality +- Run `/cmdb sync` periodically to keep data current +- Run `/cmdb audit` to check data quality - Add tags for classification ``` @@ -171,7 +171,7 @@ extras_create_journal_entry | Error | Action | |-------|--------| -| Device already exists | Suggest `/cmdb-sync` or ask to proceed | +| Device already exists | Suggest `/cmdb sync` or ask to proceed | | Site not found | List available sites, offer to create new | | Docker not available | Skip container registration, note in summary | | Permission denied | Note which operations failed, suggest fixes | diff --git a/plugins/cmdb-assistant/skills/sync-workflow.md b/plugins/cmdb-assistant/skills/sync-workflow.md index f6b053f..3c9d63b 100644 --- a/plugins/cmdb-assistant/skills/sync-workflow.md +++ b/plugins/cmdb-assistant/skills/sync-workflow.md @@ -16,7 +16,7 @@ Load these skills: dcim_list_devices name= ``` -If not found, suggest `/cmdb-register` first. +If not found, suggest `/cmdb register` first. If found: - Store device ID and current field values @@ -167,7 +167,7 @@ virt_update_vm id= status="offline" extras_create_journal_entry assigned_object_type="dcim.device" assigned_object_id= - comments="Device synced via /cmdb-sync command\n\nChanges applied:\n- " + comments="Device synced via /cmdb sync command\n\nChanges applied:\n- " ``` ## Sync Modes @@ -185,7 +185,7 @@ extras_create_journal_entry | Error | Action | |-------|--------| -| Device not found | Suggest `/cmdb-register` | +| Device not found | Suggest `/cmdb register` | | Permission denied | Note which failed, continue others | | Cluster not found | Offer to create or skip container sync | | API errors | Log error, continue with remaining | diff --git a/plugins/cmdb-assistant/skills/visual-header.md b/plugins/cmdb-assistant/skills/visual-header.md index 972ceca..f656924 100644 --- a/plugins/cmdb-assistant/skills/visual-header.md +++ b/plugins/cmdb-assistant/skills/visual-header.md @@ -14,17 +14,17 @@ Standard visual header for cmdb-assistant commands. | Command | Context | |---------|---------| -| `/cmdb-search` | Search | -| `/cmdb-device` | Device Management | -| `/cmdb-ip` | IP Management | -| `/cmdb-site` | Site Management | -| `/cmdb-audit` | Data Quality Audit | -| `/cmdb-register` | Machine Registration | -| `/cmdb-sync` | Machine Sync | -| `/cmdb-topology` | Topology | -| `/change-audit` | Change Audit | -| `/ip-conflicts` | IP Conflict Detection | -| `/cmdb-setup` | Setup Wizard | +| `/cmdb search` | Search | +| `/cmdb device` | Device Management | +| `/cmdb ip` | IP Management | +| `/cmdb site` | Site Management | +| `/cmdb audit` | Data Quality Audit | +| `/cmdb register` | Machine Registration | +| `/cmdb sync` | Machine Sync | +| `/cmdb topology` | Topology | +| `/cmdb change-audit` | Change Audit | +| `/cmdb ip-conflicts` | IP Conflict Detection | +| `/cmdb setup` | Setup Wizard | | Agent mode | Infrastructure Management | ## Usage diff --git a/plugins/code-sentinel/claude-md-integration.md b/plugins/code-sentinel/claude-md-integration.md index 88d11ba..7e71372 100644 --- a/plugins/code-sentinel/claude-md-integration.md +++ b/plugins/code-sentinel/claude-md-integration.md @@ -16,11 +16,11 @@ PreToolUse hooks scan all code changes for: Critical issues are blocked. Warnings are noted but allowed. ### Commands -- `/security-scan` - Full project security audit -- `/refactor ` - Apply refactoring pattern -- `/refactor-dry ` - Preview refactoring opportunities +- `/sentinel scan` - Full project security audit +- `/sentinel refactor ` - Apply refactoring pattern +- `/sentinel refactor-dry ` - Preview refactoring opportunities ### Severity Levels -- 🔴 Critical: Must fix immediately -- 🟠 High: Fix before release -- 🟡 Medium: Improve when possible +- Critical: Must fix immediately +- High: Fix before release +- Medium: Improve when possible diff --git a/plugins/code-sentinel/commands/refactor-dry.md b/plugins/code-sentinel/commands/sentinel-refactor-dry.md similarity index 87% rename from plugins/code-sentinel/commands/refactor-dry.md rename to plugins/code-sentinel/commands/sentinel-refactor-dry.md index 4ae5bab..a8e870e 100644 --- a/plugins/code-sentinel/commands/refactor-dry.md +++ b/plugins/code-sentinel/commands/sentinel-refactor-dry.md @@ -1,8 +1,9 @@ --- +name: sentinel refactor-dry description: Preview refactoring changes without applying them --- -# Refactor Dry Run +# /sentinel refactor-dry Analyze and preview refactoring opportunities without making changes. @@ -16,7 +17,7 @@ Analyze and preview refactoring opportunities without making changes. ## Usage ``` -/refactor-dry [--all] +/sentinel refactor-dry [--all] ``` **Target:** File path, function name, or "." for current file @@ -41,7 +42,7 @@ Analyze and preview refactoring opportunities without making changes. ### Recommended (High Impact, Low Risk) 1. **pattern** at lines X-Y - Impact: High | Risk: Low - - Run: `/refactor --pattern=` + - Run: `/sentinel refactor --pattern=` ### Optional - Lower priority items diff --git a/plugins/code-sentinel/commands/refactor.md b/plugins/code-sentinel/commands/sentinel-refactor.md similarity index 92% rename from plugins/code-sentinel/commands/refactor.md rename to plugins/code-sentinel/commands/sentinel-refactor.md index 1c8d5e8..6f53259 100644 --- a/plugins/code-sentinel/commands/refactor.md +++ b/plugins/code-sentinel/commands/sentinel-refactor.md @@ -1,8 +1,9 @@ --- +name: sentinel refactor description: Apply refactoring patterns to improve code structure and maintainability --- -# Refactor +# /sentinel refactor Apply refactoring transformations to specified code. @@ -16,7 +17,7 @@ Apply refactoring transformations to specified code. ## Usage ``` -/refactor [--pattern=] +/sentinel refactor [--pattern=] ``` **Target:** File path, function name, or "." for current context diff --git a/plugins/code-sentinel/commands/security-scan.md b/plugins/code-sentinel/commands/sentinel-scan.md similarity index 96% rename from plugins/code-sentinel/commands/security-scan.md rename to plugins/code-sentinel/commands/sentinel-scan.md index 6f9348c..5289043 100644 --- a/plugins/code-sentinel/commands/security-scan.md +++ b/plugins/code-sentinel/commands/sentinel-scan.md @@ -1,8 +1,9 @@ --- +name: sentinel scan description: Full security audit of codebase - scans all files for vulnerability patterns --- -# Security Scan +# /sentinel scan Comprehensive security audit of the project. diff --git a/plugins/code-sentinel/commands/sentinel.md b/plugins/code-sentinel/commands/sentinel.md new file mode 100644 index 0000000..a407fea --- /dev/null +++ b/plugins/code-sentinel/commands/sentinel.md @@ -0,0 +1,15 @@ +--- +description: Security scanning and code refactoring +--- + +# /sentinel + +Security scanning and safe code refactoring tools. + +## Sub-commands + +| Sub-command | Description | +|-------------|-------------| +| `/sentinel scan` | Full security audit (SQL injection, XSS, secrets, etc.) | +| `/sentinel refactor` | Apply refactoring patterns to improve code | +| `/sentinel refactor-dry` | Preview refactoring without applying changes | diff --git a/plugins/code-sentinel/skills/dry-run-workflow.md b/plugins/code-sentinel/skills/dry-run-workflow.md index 4f0c27d..e158e75 100644 --- a/plugins/code-sentinel/skills/dry-run-workflow.md +++ b/plugins/code-sentinel/skills/dry-run-workflow.md @@ -60,7 +60,7 @@ High impact, low risk opportunities: - Description of the change - Impact: High/Medium/Low (specific metric improvement) - Risk: Low/Medium/High (why) - - Run: `/refactor --pattern=` + - Run: `/sentinel refactor --pattern=` ``` ### Optional Section diff --git a/plugins/contract-validator/agents/agent-check.md b/plugins/contract-validator/agents/agent-check.md index 8715f3b..a26ea3b 100644 --- a/plugins/contract-validator/agents/agent-check.md +++ b/plugins/contract-validator/agents/agent-check.md @@ -91,7 +91,7 @@ You are an agent definition validator. Your role is to verify that a specific ag ## Example Interaction -**User**: /check-agent Orchestrator +**User**: /cv check-agent Orchestrator **Agent**: 1. Parses CLAUDE.md, finds Orchestrator agent @@ -101,7 +101,7 @@ You are an agent definition validator. Your role is to verify that a specific ag 5. Validates data flow: no data producers/consumers used 6. Reports: "Agent Orchestrator: VALID - all 3 tool references found" -**User**: /check-agent InvalidAgent +**User**: /cv check-agent InvalidAgent **Agent**: 1. Parses CLAUDE.md, agent not found diff --git a/plugins/contract-validator/agents/full-validation.md b/plugins/contract-validator/agents/full-validation.md index 7acd9c1..6476e55 100644 --- a/plugins/contract-validator/agents/full-validation.md +++ b/plugins/contract-validator/agents/full-validation.md @@ -93,7 +93,7 @@ You are a contract validation specialist. Your role is to perform comprehensive ## Example Interaction -**User**: /validate-contracts ~/claude-plugins-work +**User**: /cv validate ~/claude-plugins-work **Agent**: 1. Discovers 12 plugins in marketplace diff --git a/plugins/contract-validator/claude-md-integration.md b/plugins/contract-validator/claude-md-integration.md index e2b396a..32219e9 100644 --- a/plugins/contract-validator/claude-md-integration.md +++ b/plugins/contract-validator/claude-md-integration.md @@ -13,15 +13,15 @@ This marketplace uses the contract-validator plugin for cross-plugin compatibili | Command | Purpose | |---------|---------| -| `/validate-contracts` | Full marketplace compatibility validation | -| `/check-agent` | Validate single agent definition | -| `/list-interfaces` | Show all plugin interfaces | +| `/cv validate` | Full marketplace compatibility validation | +| `/cv check-agent` | Validate single agent definition | +| `/cv list-interfaces` | Show all plugin interfaces | ### Validation Workflow Run before merging plugin changes: -1. `/validate-contracts` - Check for conflicts +1. `/cv validate` - Check for conflicts 2. Review errors (must fix) and warnings (should review) 3. Fix issues before merging @@ -91,7 +91,7 @@ Avoid generic names that may conflict: | `/setup` | Setup wizard | # GOOD - Plugin-specific prefix -| `/data-setup` | Data platform setup wizard | +| `/data setup` | Data platform setup wizard | ``` ### Document All Tools @@ -125,20 +125,20 @@ This agent uses tools from: ``` # Before merging new plugin -/validate-contracts +/cv validate # Check specific agent after changes -/check-agent Orchestrator +/cv check-agent Orchestrator ``` ### Plugin Development ``` # See what interfaces exist -/list-interfaces +/cv list-interfaces # After adding new command, verify no conflicts -/validate-contracts +/cv validate ``` ### CI/CD Integration @@ -148,5 +148,5 @@ Add to your pipeline: ```yaml - name: Validate Plugin Contracts run: | - claude --skill contract-validator:validate-contracts --args "${{ github.workspace }}" + claude --skill contract-validator:cv-validate --args "${{ github.workspace }}" ``` diff --git a/plugins/contract-validator/commands/check-agent.md b/plugins/contract-validator/commands/cv-check-agent.md similarity index 80% rename from plugins/contract-validator/commands/check-agent.md rename to plugins/contract-validator/commands/cv-check-agent.md index 54f57b6..fed6273 100644 --- a/plugins/contract-validator/commands/check-agent.md +++ b/plugins/contract-validator/commands/cv-check-agent.md @@ -1,4 +1,8 @@ -# /check-agent - Validate Agent Definition +--- +name: cv check-agent +--- + +# /cv check-agent ## Skills to Load - skills/visual-output.md @@ -9,7 +13,7 @@ ## Usage ``` -/check-agent [claude_md_path] +/cv check-agent [claude_md_path] ``` ## Parameters @@ -38,7 +42,7 @@ ## Examples ``` -/check-agent Planner -/check-agent Orchestrator ./CLAUDE.md -/check-agent data-analysis ~/project/CLAUDE.md +/cv check-agent Planner +/cv check-agent Orchestrator ./CLAUDE.md +/cv check-agent data-analysis ~/project/CLAUDE.md ``` diff --git a/plugins/contract-validator/commands/dependency-graph.md b/plugins/contract-validator/commands/cv-dependency-graph.md similarity index 74% rename from plugins/contract-validator/commands/dependency-graph.md rename to plugins/contract-validator/commands/cv-dependency-graph.md index 8fe3f3c..a768a24 100644 --- a/plugins/contract-validator/commands/dependency-graph.md +++ b/plugins/contract-validator/commands/cv-dependency-graph.md @@ -1,4 +1,8 @@ -# /dependency-graph - Generate Dependency Visualization +--- +name: cv dependency-graph +--- + +# /cv dependency-graph ## Skills to Load - skills/visual-output.md @@ -10,7 +14,7 @@ ## Usage ``` -/dependency-graph [marketplace_path] [--format ] [--show-tools] +/cv dependency-graph [marketplace_path] [--format ] [--show-tools] ``` ## Parameters @@ -41,15 +45,15 @@ ## Examples ``` -/dependency-graph -/dependency-graph --show-tools -/dependency-graph --format text -/dependency-graph ~/claude-plugins-work +/cv dependency-graph +/cv dependency-graph --show-tools +/cv dependency-graph --format text +/cv dependency-graph ~/claude-plugins-work ``` ## Integration -Use with `/validate-contracts`: -1. Run `/dependency-graph` to visualize -2. Run `/validate-contracts` to find issues +Use with `/cv validate`: +1. Run `/cv dependency-graph` to visualize +2. Run `/cv validate` to find issues 3. Fix and regenerate diff --git a/plugins/contract-validator/commands/list-interfaces.md b/plugins/contract-validator/commands/cv-list-interfaces.md similarity index 84% rename from plugins/contract-validator/commands/list-interfaces.md rename to plugins/contract-validator/commands/cv-list-interfaces.md index dce1ed7..240ad4d 100644 --- a/plugins/contract-validator/commands/list-interfaces.md +++ b/plugins/contract-validator/commands/cv-list-interfaces.md @@ -1,4 +1,8 @@ -# /list-interfaces - Show Plugin Interfaces +--- +name: cv list-interfaces +--- + +# /cv list-interfaces ## Skills to Load - skills/visual-output.md @@ -9,7 +13,7 @@ ## Usage ``` -/list-interfaces [marketplace_path] +/cv list-interfaces [marketplace_path] ``` ## Parameters @@ -41,6 +45,6 @@ ## Examples ``` -/list-interfaces -/list-interfaces ~/claude-plugins-work +/cv list-interfaces +/cv list-interfaces ~/claude-plugins-work ``` diff --git a/plugins/contract-validator/commands/cv-setup.md b/plugins/contract-validator/commands/cv-setup.md index ec0f467..e260ecd 100644 --- a/plugins/contract-validator/commands/cv-setup.md +++ b/plugins/contract-validator/commands/cv-setup.md @@ -1,8 +1,9 @@ --- +name: cv setup description: Interactive setup wizard for contract-validator plugin --- -# /cv-setup - Contract Validator Setup Wizard +# /cv setup ## Skills to Load - skills/visual-output.md @@ -40,9 +41,9 @@ description: Interactive setup wizard for contract-validator plugin ## Post-Setup Commands -- `/validate-contracts` - Full marketplace validation -- `/check-agent` - Validate single agent -- `/list-interfaces` - Show all plugin interfaces +- `/cv validate` - Full marketplace validation +- `/cv check-agent` - Validate single agent +- `/cv list-interfaces` - Show all plugin interfaces ## No Configuration Required diff --git a/plugins/contract-validator/commands/cv-status.md b/plugins/contract-validator/commands/cv-status.md index f624fb2..490fb25 100644 --- a/plugins/contract-validator/commands/cv-status.md +++ b/plugins/contract-validator/commands/cv-status.md @@ -1,4 +1,5 @@ --- +name: cv status description: Marketplace-wide health check across all installed plugins --- diff --git a/plugins/contract-validator/commands/validate-contracts.md b/plugins/contract-validator/commands/cv-validate.md similarity index 87% rename from plugins/contract-validator/commands/validate-contracts.md rename to plugins/contract-validator/commands/cv-validate.md index 1f01079..efc0adf 100644 --- a/plugins/contract-validator/commands/validate-contracts.md +++ b/plugins/contract-validator/commands/cv-validate.md @@ -1,4 +1,8 @@ -# /validate-contracts - Full Contract Validation +--- +name: cv validate +--- + +# /cv validate ## Skills to Load - skills/visual-output.md @@ -10,7 +14,7 @@ ## Usage ``` -/validate-contracts [marketplace_path] +/cv validate [marketplace_path] ``` ## Parameters @@ -40,6 +44,6 @@ ## Examples ``` -/validate-contracts -/validate-contracts ~/claude-plugins-work +/cv validate +/cv validate ~/claude-plugins-work ``` diff --git a/plugins/contract-validator/commands/cv.md b/plugins/contract-validator/commands/cv.md new file mode 100644 index 0000000..054d76f --- /dev/null +++ b/plugins/contract-validator/commands/cv.md @@ -0,0 +1,18 @@ +--- +description: Cross-plugin compatibility validation +--- + +# /cv + +Cross-plugin compatibility validation and agent verification. + +## Sub-commands + +| Sub-command | Description | +|-------------|-------------| +| `/cv validate` | Full marketplace compatibility validation | +| `/cv check-agent` | Validate single agent definition | +| `/cv list-interfaces` | Show all plugin interfaces | +| `/cv dependency-graph` | Mermaid visualization of plugin dependencies | +| `/cv setup` | Setup wizard for contract-validator MCP | +| `/cv status` | Marketplace-wide health check | diff --git a/plugins/contract-validator/skills/mcp-tools-reference.md b/plugins/contract-validator/skills/mcp-tools-reference.md index 4802edd..597d947 100644 --- a/plugins/contract-validator/skills/mcp-tools-reference.md +++ b/plugins/contract-validator/skills/mcp-tools-reference.md @@ -65,6 +65,6 @@ Available MCP tools for contract-validator operations. ## Error Handling If MCP tools fail: -1. Check if `/cv-setup` has been run +1. Check if `/cv setup` has been run 2. Verify session was restarted after setup 3. Check MCP server venv exists and is valid diff --git a/plugins/data-platform/agents/data-advisor.md b/plugins/data-platform/agents/data-advisor.md index 1d00581..1a8b4a9 100644 --- a/plugins/data-platform/agents/data-advisor.md +++ b/plugins/data-platform/agents/data-advisor.md @@ -23,8 +23,8 @@ You are a strict data integrity auditor. Your role is to review code for proper ## Trigger Conditions Activate this agent when: -- User runs `/data-review ` -- User runs `/data-gate ` +- User runs `/data review ` +- User runs `/data gate ` - Projman orchestrator requests data domain gate check - Code review includes database operations, dbt models, or data pipelines @@ -78,7 +78,7 @@ Activate this agent when: ### Review Mode (default) -Triggered by `/data-review ` +Triggered by `/data review ` **Characteristics:** - Produces detailed report with all findings @@ -89,7 +89,7 @@ Triggered by `/data-review ` ### Gate Mode -Triggered by `/data-gate ` or projman orchestrator domain gate +Triggered by `/data gate ` or projman orchestrator domain gate **Characteristics:** - Binary PASS/FAIL output @@ -203,7 +203,7 @@ Blocking Issues (2): 2. portfolio_app/toronto/loaders/census.py:67 - References table 'census_raw' which does not exist Fix: Table was renamed to 'census_demographics' in migration 003. -Run /data-review for full audit report. +Run /data review for full audit report. ``` ### Review Mode Output @@ -292,7 +292,7 @@ When called as a domain gate by projman orchestrator: ## Example Interactions -**User**: `/data-review dbt/models/staging/` +**User**: `/data review dbt/models/staging/` **Agent**: 1. Scans all .sql files in staging/ 2. Runs dbt_parse to validate project @@ -301,7 +301,7 @@ When called as a domain gate by projman orchestrator: 5. Cross-references test coverage 6. Returns detailed report -**User**: `/data-gate portfolio_app/toronto/` +**User**: `/data gate portfolio_app/toronto/` **Agent**: 1. Scans for Python files with pg_query/pg_execute 2. Checks if referenced tables exist diff --git a/plugins/data-platform/claude-md-integration.md b/plugins/data-platform/claude-md-integration.md index 15da3c9..efbf89a 100644 --- a/plugins/data-platform/claude-md-integration.md +++ b/plugins/data-platform/claude-md-integration.md @@ -18,12 +18,12 @@ This project uses the data-platform plugin for data engineering workflows. | Command | Purpose | |---------|---------| -| `/data-ingest` | Load data from files or database | -| `/data-profile` | Generate statistical profile | -| `/data-schema` | Show schema information | -| `/data-explain` | Explain dbt model | -| `/data-lineage` | Show data lineage | -| `/data-run` | Execute dbt models | +| `/data ingest` | Load data from files or database | +| `/data profile` | Generate statistical profile | +| `/data schema` | Show schema information | +| `/data explain` | Explain dbt model | +| `/data lineage` | Show data lineage | +| `/data run` | Execute dbt models | ### data_ref Convention @@ -36,9 +36,9 @@ DataFrames are stored with references. Use meaningful names: ### dbt Workflow -1. Always validate before running: `/data-run` includes automatic `dbt_parse` +1. Always validate before running: `/data run` includes automatic `dbt_parse` 2. For dbt 1.9+, check for deprecated syntax before commits -3. Use `/data-lineage` to understand impact of changes +3. Use `/data lineage` to understand impact of changes ### Database Access @@ -69,22 +69,22 @@ DATA_PLATFORM_MAX_ROWS=100000 ### Data Exploration ``` -/data-ingest data/raw_customers.csv -/data-profile raw_customers -/data-schema +/data ingest data/raw_customers.csv +/data profile raw_customers +/data schema ``` ### ETL Development ``` -/data-schema orders # Understand source -/data-explain stg_orders # Understand transformation -/data-run stg_orders # Test the model -/data-lineage fct_orders # Check downstream impact +/data schema orders # Understand source +/data explain stg_orders # Understand transformation +/data run stg_orders # Test the model +/data lineage fct_orders # Check downstream impact ``` ### Database Analysis ``` -/data-schema # List all tables +/data schema # List all tables pg_columns orders # Detailed schema st_tables # Find spatial data ``` diff --git a/plugins/data-platform/commands/dbt-test.md b/plugins/data-platform/commands/data-dbt-test.md similarity index 69% rename from plugins/data-platform/commands/dbt-test.md rename to plugins/data-platform/commands/data-dbt-test.md index e32fc50..5b6ea20 100644 --- a/plugins/data-platform/commands/dbt-test.md +++ b/plugins/data-platform/commands/data-dbt-test.md @@ -1,4 +1,8 @@ -# /dbt-test - Run dbt Tests +--- +name: data dbt-test +--- + +# /data dbt-test - Run dbt Tests ## Skills to Load - skills/dbt-workflow.md @@ -12,7 +16,7 @@ Display header: `DATA-PLATFORM - dbt Tests` ## Usage ``` -/dbt-test [selection] [--warn-only] +/data dbt-test [selection] [--warn-only] ``` ## Workflow @@ -32,9 +36,9 @@ Execute `skills/dbt-workflow.md` test workflow: ## Examples ``` -/dbt-test # Run all tests -/dbt-test dim_customers # Tests for specific model -/dbt-test tag:critical # Run critical tests only +/data dbt-test # Run all tests +/data dbt-test dim_customers # Tests for specific model +/data dbt-test tag:critical # Run critical tests only ``` ## Required MCP Tools diff --git a/plugins/data-platform/commands/data-explain.md b/plugins/data-platform/commands/data-explain.md index 8a38d7f..d88b589 100644 --- a/plugins/data-platform/commands/data-explain.md +++ b/plugins/data-platform/commands/data-explain.md @@ -1,4 +1,8 @@ -# /data-explain - dbt Model Explanation +--- +name: data explain +--- + +# /data explain - dbt Model Explanation ## Skills to Load - skills/dbt-workflow.md @@ -13,7 +17,7 @@ Display header: `DATA-PLATFORM - Model Explanation` ## Usage ``` -/data-explain +/data explain ``` ## Workflow @@ -26,8 +30,8 @@ Display header: `DATA-PLATFORM - Model Explanation` ## Examples ``` -/data-explain dim_customers -/data-explain fct_orders +/data explain dim_customers +/data explain fct_orders ``` ## Required MCP Tools diff --git a/plugins/data-platform/commands/data-gate.md b/plugins/data-platform/commands/data-gate.md index 5014c25..6ba3ee5 100644 --- a/plugins/data-platform/commands/data-gate.md +++ b/plugins/data-platform/commands/data-gate.md @@ -1,4 +1,5 @@ --- +name: data gate description: Data integrity compliance gate (pass/fail) for sprint execution gate_contract: v1 arguments: @@ -7,21 +8,21 @@ arguments: required: true --- -# /data-gate +# /data gate Binary pass/fail validation for data integrity compliance. Used by projman orchestrator during sprint execution to gate issue completion. ## Usage ``` -/data-gate +/data gate ``` **Examples:** ``` -/data-gate ./dbt/models/staging/ -/data-gate ./portfolio_app/toronto/parsers/ -/data-gate ./dbt/ +/data gate ./dbt/models/staging/ +/data gate ./portfolio_app/toronto/parsers/ +/data gate ./dbt/ ``` ## What It Does @@ -63,7 +64,7 @@ Blocking Issues (2): 2. portfolio_app/toronto/loaders/census.py:67 - References table 'census_raw' which does not exist Fix: Table was renamed to 'census_demographics' in migration 003. -Run /data-review for full audit report. +Run /data review for full audit report. ``` ## Integration with projman @@ -78,9 +79,9 @@ This command is automatically invoked by the projman orchestrator when: - PASS: Issue can be marked complete - FAIL: Issue stays open, blocker comment added with failure details -## Differences from /data-review +## Differences from /data review -| Aspect | /data-gate | /data-review | +| Aspect | /data gate | /data review | |--------|------------|--------------| | Output | Binary PASS/FAIL | Detailed report with all severities | | Severity | FAIL only | FAIL + WARN + INFO | @@ -95,7 +96,7 @@ This command is automatically invoked by the projman orchestrator when: - **Quick validation**: Fast pass/fail without full report - **Pre-merge checks**: Verify data changes before integration -For detailed findings including warnings and suggestions, use `/data-review` instead. +For detailed findings including warnings and suggestions, use `/data review` instead. ## Requirements diff --git a/plugins/data-platform/commands/data-ingest.md b/plugins/data-platform/commands/data-ingest.md index ffc423f..b1f725c 100644 --- a/plugins/data-platform/commands/data-ingest.md +++ b/plugins/data-platform/commands/data-ingest.md @@ -1,4 +1,8 @@ -# /data-ingest - Data Ingestion +--- +name: data ingest +--- + +# /data ingest - Data Ingestion ## Skills to Load - skills/mcp-tools-reference.md @@ -11,7 +15,7 @@ Display header: `DATA-PLATFORM - Ingest` ## Usage ``` -/data-ingest [source] +/data ingest [source] ``` ## Workflow @@ -31,9 +35,9 @@ Display header: `DATA-PLATFORM - Ingest` ## Examples ``` -/data-ingest data/sales.csv -/data-ingest data/customers.parquet -/data-ingest "SELECT * FROM orders WHERE created_at > '2024-01-01'" +/data ingest data/sales.csv +/data ingest data/customers.parquet +/data ingest "SELECT * FROM orders WHERE created_at > '2024-01-01'" ``` ## Required MCP Tools diff --git a/plugins/data-platform/commands/lineage-viz.md b/plugins/data-platform/commands/data-lineage-viz.md similarity index 74% rename from plugins/data-platform/commands/lineage-viz.md rename to plugins/data-platform/commands/data-lineage-viz.md index 2879021..f728df8 100644 --- a/plugins/data-platform/commands/lineage-viz.md +++ b/plugins/data-platform/commands/data-lineage-viz.md @@ -1,4 +1,8 @@ -# /lineage-viz - Mermaid Lineage Visualization +--- +name: data lineage-viz +--- + +# /data lineage-viz - Mermaid Lineage Visualization ## Skills to Load - skills/lineage-analysis.md @@ -12,7 +16,7 @@ Display header: `DATA-PLATFORM - Lineage Visualization` ## Usage ``` -/lineage-viz [--direction TB|LR] [--depth N] +/data lineage-viz [--direction TB|LR] [--depth N] ``` ## Workflow @@ -31,9 +35,9 @@ Display header: `DATA-PLATFORM - Lineage Visualization` ## Examples ``` -/lineage-viz dim_customers -/lineage-viz fct_orders --direction TB -/lineage-viz rpt_revenue --depth 2 +/data lineage-viz dim_customers +/data lineage-viz fct_orders --direction TB +/data lineage-viz rpt_revenue --depth 2 ``` ## Required MCP Tools diff --git a/plugins/data-platform/commands/data-lineage.md b/plugins/data-platform/commands/data-lineage.md index 087c814..b8a4003 100644 --- a/plugins/data-platform/commands/data-lineage.md +++ b/plugins/data-platform/commands/data-lineage.md @@ -1,4 +1,8 @@ -# /data-lineage - Data Lineage Visualization +--- +name: data lineage +--- + +# /data lineage - Data Lineage Visualization ## Skills to Load - skills/lineage-analysis.md @@ -12,7 +16,7 @@ Display header: `DATA-PLATFORM - Lineage` ## Usage ``` -/data-lineage [--depth N] +/data lineage [--depth N] ``` ## Workflow @@ -25,8 +29,8 @@ Display header: `DATA-PLATFORM - Lineage` ## Examples ``` -/data-lineage dim_customers -/data-lineage fct_orders --depth 3 +/data lineage dim_customers +/data lineage fct_orders --depth 3 ``` ## Required MCP Tools diff --git a/plugins/data-platform/commands/data-profile.md b/plugins/data-platform/commands/data-profile.md index 100cf9b..d570e3a 100644 --- a/plugins/data-platform/commands/data-profile.md +++ b/plugins/data-platform/commands/data-profile.md @@ -1,4 +1,8 @@ -# /data-profile - Data Profiling +--- +name: data profile +--- + +# /data profile - Data Profiling ## Skills to Load - skills/data-profiling.md @@ -12,7 +16,7 @@ Display header: `DATA-PLATFORM - Data Profile` ## Usage ``` -/data-profile +/data profile ``` ## Workflow @@ -27,8 +31,8 @@ Execute `skills/data-profiling.md` profiling workflow: ## Examples ``` -/data-profile sales_data -/data-profile df_a1b2c3d4 +/data profile sales_data +/data profile df_a1b2c3d4 ``` ## Required MCP Tools diff --git a/plugins/data-platform/commands/data-quality.md b/plugins/data-platform/commands/data-quality.md index fb2c211..e474d8e 100644 --- a/plugins/data-platform/commands/data-quality.md +++ b/plugins/data-platform/commands/data-quality.md @@ -1,4 +1,8 @@ -# /data-quality - Data Quality Assessment +--- +name: data quality +--- + +# /data quality - Data Quality Assessment ## Skills to Load - skills/data-profiling.md @@ -12,7 +16,7 @@ Display header: `DATA-PLATFORM - Data Quality` ## Usage ``` -/data-quality [--strict] +/data quality [--strict] ``` ## Workflow @@ -33,8 +37,8 @@ Execute `skills/data-profiling.md` quality assessment: ## Examples ``` -/data-quality sales_data -/data-quality df_customers --strict +/data quality sales_data +/data quality df_customers --strict ``` ## Quality Thresholds diff --git a/plugins/data-platform/commands/data-review.md b/plugins/data-platform/commands/data-review.md index 0d450a0..09ebd11 100644 --- a/plugins/data-platform/commands/data-review.md +++ b/plugins/data-platform/commands/data-review.md @@ -1,4 +1,5 @@ --- +name: data review description: Audit data integrity, schema validity, and dbt compliance arguments: - name: path @@ -6,21 +7,21 @@ arguments: required: true --- -# /data-review +# /data review Comprehensive data integrity audit producing a detailed report with findings at all severity levels. For human review and standalone codebase auditing. ## Usage ``` -/data-review +/data review ``` **Examples:** ``` -/data-review ./dbt/ -/data-review ./portfolio_app/toronto/ -/data-review ./dbt/models/marts/ +/data review ./dbt/ +/data review ./portfolio_app/toronto/ +/data review ./dbt/models/marts/ ``` ## What It Does @@ -79,46 +80,46 @@ VERDICT: PASS | FAIL (N blocking issues) ### Before Sprint Planning Audit data layer health to identify tech debt and inform sprint scope. ``` -/data-review ./dbt/ +/data review ./dbt/ ``` ### During Code Review Get detailed data integrity findings alongside code review comments. ``` -/data-review ./dbt/models/staging/stg_new_source.sql +/data review ./dbt/models/staging/stg_new_source.sql ``` ### After Migrations Verify schema changes didn't break anything downstream. ``` -/data-review ./migrations/ +/data review ./migrations/ ``` ### Periodic Health Checks Regular data infrastructure audits for proactive maintenance. ``` -/data-review ./data_pipeline/ +/data review ./data_pipeline/ ``` ### New Project Onboarding Understand the current state of data architecture. ``` -/data-review . +/data review . ``` ## Severity Levels | Level | Meaning | Gate Impact | |-------|---------|-------------| -| **FAIL** | Blocking issues that will cause runtime errors | Would block `/data-gate` | +| **FAIL** | Blocking issues that will cause runtime errors | Would block `/data gate` | | **WARN** | Quality issues that should be addressed | Does not block gate | | **INFO** | Suggestions for improvement | Does not block gate | -## Differences from /data-gate +## Differences from /data gate -`/data-review` gives you the full picture. `/data-gate` gives the orchestrator a yes/no. +`/data review` gives you the full picture. `/data gate` gives the orchestrator a yes/no. -| Aspect | /data-gate | /data-review | +| Aspect | /data gate | /data review | |--------|------------|--------------| | Output | Binary PASS/FAIL | Detailed report | | Severity | FAIL only | FAIL + WARN + INFO | @@ -126,8 +127,8 @@ Understand the current state of data architecture. | Verbosity | Minimal | Comprehensive | | Speed | Fast (skips INFO) | Thorough | -Use `/data-review` when you want to understand. -Use `/data-gate` when you want to automate. +Use `/data review` when you want to understand. +Use `/data gate` when you want to automate. ## Requirements @@ -144,6 +145,6 @@ Use `/data-gate` when you want to automate. ## Related Commands -- `/data-gate` - Binary pass/fail for automation -- `/data-lineage` - Visualize dbt model dependencies -- `/data-schema` - Explore database schema +- `/data gate` - Binary pass/fail for automation +- `/data lineage` - Visualize dbt model dependencies +- `/data schema` - Explore database schema diff --git a/plugins/data-platform/commands/data-run.md b/plugins/data-platform/commands/data-run.md index e1df4b1..fea853b 100644 --- a/plugins/data-platform/commands/data-run.md +++ b/plugins/data-platform/commands/data-run.md @@ -1,4 +1,8 @@ -# /data-run - Execute dbt Models +--- +name: data run +--- + +# /data run - Execute dbt Models ## Skills to Load - skills/dbt-workflow.md @@ -12,7 +16,7 @@ Display header: `DATA-PLATFORM - dbt Run` ## Usage ``` -/data-run [model_selection] [--full-refresh] +/data run [model_selection] [--full-refresh] ``` ## Workflow @@ -30,11 +34,11 @@ See `skills/dbt-workflow.md` for full selection patterns. ## Examples ``` -/data-run # Run all models -/data-run dim_customers # Run specific model -/data-run +fct_orders # Run model and upstream -/data-run tag:daily # Run models with tag -/data-run --full-refresh # Rebuild incremental models +/data run # Run all models +/data run dim_customers # Run specific model +/data run +fct_orders # Run model and upstream +/data run tag:daily # Run models with tag +/data run --full-refresh # Rebuild incremental models ``` ## Required MCP Tools diff --git a/plugins/data-platform/commands/data-schema.md b/plugins/data-platform/commands/data-schema.md index e22c874..dee48ba 100644 --- a/plugins/data-platform/commands/data-schema.md +++ b/plugins/data-platform/commands/data-schema.md @@ -1,4 +1,8 @@ -# /data-schema - Schema Exploration +--- +name: data schema +--- + +# /data schema - Schema Exploration ## Skills to Load - skills/mcp-tools-reference.md @@ -11,7 +15,7 @@ Display header: `DATA-PLATFORM - Schema Explorer` ## Usage ``` -/data-schema [table_name | data_ref] +/data schema [table_name | data_ref] ``` ## Workflow @@ -30,9 +34,9 @@ Display header: `DATA-PLATFORM - Schema Explorer` ## Examples ``` -/data-schema # List all tables and DataFrames -/data-schema customers # Show table schema -/data-schema sales_data # Show DataFrame schema +/data schema # List all tables and DataFrames +/data schema customers # Show table schema +/data schema sales_data # Show DataFrame schema ``` ## Required MCP Tools diff --git a/plugins/data-platform/commands/data-setup.md b/plugins/data-platform/commands/data-setup.md index 5aced28..bb39d18 100644 --- a/plugins/data-platform/commands/data-setup.md +++ b/plugins/data-platform/commands/data-setup.md @@ -1,4 +1,8 @@ -# /data-setup - Data Platform Setup Wizard +--- +name: data setup +--- + +# /data setup - Data Platform Setup Wizard ## Skills to Load - skills/setup-workflow.md @@ -11,7 +15,7 @@ Display header: `DATA-PLATFORM - Setup Wizard` ## Usage ``` -/data-setup +/data setup ``` ## Workflow diff --git a/plugins/data-platform/commands/data.md b/plugins/data-platform/commands/data.md new file mode 100644 index 0000000..40de7aa --- /dev/null +++ b/plugins/data-platform/commands/data.md @@ -0,0 +1,24 @@ +--- +description: Data engineering tools with pandas, PostgreSQL, and dbt +--- + +# /data + +Data engineering tools with pandas, PostgreSQL/PostGIS, and dbt integration. + +## Sub-commands + +| Sub-command | Description | +|-------------|-------------| +| `/data ingest` | Load data from CSV, Parquet, JSON into DataFrame | +| `/data profile` | Generate data profiling report with statistics | +| `/data schema` | Explore database schemas, tables, columns | +| `/data explain` | Explain query execution plan | +| `/data lineage` | Show dbt model lineage and dependencies | +| `/data lineage-viz` | dbt lineage visualization as Mermaid diagrams | +| `/data run` | Run dbt models with validation | +| `/data dbt-test` | Formatted dbt test runner with summary | +| `/data quality` | DataFrame quality checks | +| `/data review` | Comprehensive data integrity audits | +| `/data gate` | Binary pass/fail data integrity gates | +| `/data setup` | Setup wizard for data-platform MCP servers | diff --git a/plugins/data-platform/skills/data-integrity-audit.md b/plugins/data-platform/skills/data-integrity-audit.md index 7c3394e..60fef2a 100644 --- a/plugins/data-platform/skills/data-integrity-audit.md +++ b/plugins/data-platform/skills/data-integrity-audit.md @@ -215,7 +215,7 @@ Blocking Issues (N): 2. - Fix: -Run /data-review for full audit report. +Run /data review for full audit report. ``` ### Review Mode (Detailed) @@ -293,7 +293,7 @@ Do not flag violations in: When called as a domain gate: 1. Orchestrator detects `Domain/Data` label on issue 2. Orchestrator identifies changed files -3. Orchestrator invokes `/data-gate ` +3. Orchestrator invokes `/data gate ` 4. Agent runs gate mode scan 5. Returns PASS/FAIL to orchestrator 6. Orchestrator decides whether to complete issue @@ -301,7 +301,7 @@ When called as a domain gate: ### Standalone Usage For manual audits: -1. User runs `/data-review ` +1. User runs `/data review ` 2. Agent runs full review mode scan 3. Returns detailed report with all severity levels 4. User decides on actions diff --git a/plugins/data-seed/.claude-plugin/plugin.json b/plugins/data-seed/.claude-plugin/plugin.json new file mode 100644 index 0000000..659325d --- /dev/null +++ b/plugins/data-seed/.claude-plugin/plugin.json @@ -0,0 +1,25 @@ +{ + "name": "data-seed", + "version": "1.0.0", + "description": "Test data generation and database seeding with reproducible profiles", + "author": { + "name": "Leo Miranda", + "email": "leobmiranda@gmail.com" + }, + "homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/data-seed/README.md", + "repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git", + "license": "MIT", + "keywords": [ + "test-data", + "seeding", + "faker", + "fixtures", + "schema", + "database", + "reproducible" + ], + "commands": [ + "./commands/" + ], + "domain": "data" +} diff --git a/plugins/data-seed/README.md b/plugins/data-seed/README.md new file mode 100644 index 0000000..26c56e8 --- /dev/null +++ b/plugins/data-seed/README.md @@ -0,0 +1,74 @@ +# data-seed Plugin + +Test data generation and database seeding with reproducible profiles for Claude Code. + +## Overview + +The data-seed plugin generates realistic test data from schema definitions. It supports multiple ORM dialects (SQLAlchemy, Prisma, Django ORM, raw SQL DDL), handles foreign key dependencies automatically, and produces output in SQL, JSON, or CSV formats. + +Key features: +- **Schema-first**: Parses your existing schema — no manual configuration needed +- **Realistic data**: Locale-aware faker providers for names, emails, addresses, and more +- **Reproducible**: Deterministic generation from seed profiles +- **Dependency-aware**: Resolves FK relationships and generates in correct insertion order +- **Profile-based**: Reusable profiles for small (unit tests), medium (development), and large (stress tests) + +## Installation + +This plugin is part of the Leo Claude Marketplace. Install via the marketplace or copy the `plugins/data-seed/` directory to your Claude Code plugins path. + +## Commands + +| Command | Description | +|---------|-------------| +| `/seed setup` | Setup wizard — detect schema source, configure output format | +| `/seed generate` | Generate seed data from schema or models | +| `/seed apply` | Apply seed data to database or create fixture files | +| `/seed profile` | Define and manage reusable data profiles | +| `/seed validate` | Validate seed data against schema constraints | + +## Quick Start + +``` +/seed setup # Detect schema, configure output +/seed generate # Generate data with medium profile +/seed validate # Verify generated data integrity +/seed apply # Write fixture files +``` + +## Agents + +| Agent | Model | Role | +|-------|-------|------| +| `seed-generator` | Sonnet | Data generation, profile management, and seed application | +| `seed-validator` | Haiku | Read-only validation of seed data integrity | + +## Skills + +| Skill | Purpose | +|-------|---------| +| `schema-inference` | Parse ORM models and SQL DDL into normalized schema | +| `faker-patterns` | Map columns to realistic faker providers | +| `relationship-resolution` | FK dependency ordering and circular dependency handling | +| `profile-management` | Seed profile CRUD and configuration | +| `visual-header` | Standard visual output formatting | + +## Supported Schema Sources + +- SQLAlchemy models (2.0+ and legacy 1.x) +- Prisma schema +- Django ORM models +- Raw SQL DDL (CREATE TABLE statements) +- JSON Schema definitions + +## Output Formats + +- SQL INSERT statements +- JSON fixtures (Django-compatible) +- CSV files +- Prisma seed scripts +- Python factory objects + +## License + +MIT License — Part of the Leo Claude Marketplace. diff --git a/plugins/data-seed/agents/seed-generator.md b/plugins/data-seed/agents/seed-generator.md new file mode 100644 index 0000000..fe07755 --- /dev/null +++ b/plugins/data-seed/agents/seed-generator.md @@ -0,0 +1,96 @@ +--- +name: seed-generator +description: Data generation, profile management, and seed application. Use when generating test data, managing seed profiles, or applying fixtures to databases. +model: sonnet +permissionMode: acceptEdits +--- + +# Seed Generator Agent + +You are a test data generation specialist. Your role is to create realistic, schema-compliant seed data for databases and fixture files using faker patterns, profile-based configuration, and dependency-aware insertion ordering. + +## Visual Output Requirements + +**MANDATORY: Display header at start of every response.** + +``` ++----------------------------------------------------------------------+ +| DATA-SEED - [Command Name] | +| [Context Line] | ++----------------------------------------------------------------------+ +``` + +## Trigger Conditions + +Activate this agent when: +- User runs `/seed setup` +- User runs `/seed generate [options]` +- User runs `/seed apply [options]` +- User runs `/seed profile [action]` + +## Skills to Load + +- skills/schema-inference.md +- skills/faker-patterns.md +- skills/relationship-resolution.md +- skills/profile-management.md +- skills/visual-header.md + +## Core Principles + +### Schema-First Approach +Always derive data generation rules from the schema definition, never from assumptions: +- Parse the actual schema source (SQLAlchemy, Prisma, Django, raw SQL) +- Respect every constraint: NOT NULL, UNIQUE, CHECK, foreign keys, defaults +- Map types precisely — do not generate strings for integer columns or vice versa + +### Reproducibility +- Seed the random number generator from the profile name + table name for deterministic output +- Same profile + same schema = same data every time +- Document the seed value in output metadata for reproducibility + +### Realistic Data +- Use locale-aware faker providers for names, addresses, phone numbers +- Generate plausible relationships (not every user has exactly one order) +- Include edge cases at configurable ratios (empty strings, boundary integers, unicode) +- Distribute enum values with realistic skew (not uniform) + +### Safety +- Never modify schema or drop tables +- Database operations always wrapped in transactions +- TRUNCATE operations require explicit user confirmation +- Display execution plan before applying to database + +## Operating Modes + +### Setup Mode +- Detect project ORM/schema type +- Configure output format and directory +- Initialize default profiles + +### Generate Mode +- Parse schema, resolve dependencies, generate data +- Output to configured format (SQL, JSON, CSV, factory objects) + +### Apply Mode +- Read generated seed data +- Apply to database or write framework-specific fixture files +- Support clean (TRUNCATE) + seed workflow + +### Profile Mode +- CRUD operations on data profiles +- Configure row counts, edge case ratios, custom overrides + +## Error Handling + +| Error | Response | +|-------|----------| +| Schema source not found | Prompt user to run `/seed setup` | +| Circular FK dependency detected | Use deferred constraint strategy, explain to user | +| UNIQUE constraint collision after 100 retries | FAIL: report column and suggest increasing uniqueness pool | +| Database connection failed (apply mode) | Report error, suggest using file target instead | +| Unsupported ORM dialect | WARN: fall back to raw SQL DDL parsing | + +## Communication Style + +Clear and structured. Show what will be generated before generating it. Display progress per table during generation. Summarize output with file paths and row counts. For errors, explain the constraint that was violated and suggest a fix. diff --git a/plugins/data-seed/agents/seed-validator.md b/plugins/data-seed/agents/seed-validator.md new file mode 100644 index 0000000..57f770b --- /dev/null +++ b/plugins/data-seed/agents/seed-validator.md @@ -0,0 +1,106 @@ +--- +name: seed-validator +description: Read-only validation of seed data integrity and schema compliance. Use when verifying generated test data against constraints and referential integrity. +model: haiku +permissionMode: plan +disallowedTools: Write, Edit, MultiEdit +--- + +# Seed Validator Agent + +You are a strict seed data integrity auditor. Your role is to validate generated test data against schema definitions, checking type constraints, referential integrity, uniqueness, and statistical properties. You never modify files or data — analysis and reporting only. + +## Visual Output Requirements + +**MANDATORY: Display header at start of every response.** + +``` ++----------------------------------------------------------------------+ +| DATA-SEED - Validate | +| [Profile Name or Target Path] | ++----------------------------------------------------------------------+ +``` + +## Trigger Conditions + +Activate this agent when: +- User runs `/seed validate [options]` +- Generator agent requests post-generation validation + +## Skills to Load + +- skills/schema-inference.md +- skills/relationship-resolution.md +- skills/visual-header.md + +## Validation Categories + +### Type Constraints (FAIL on violation) +- Integer columns must contain valid integers within type range +- String columns must not exceed declared max length +- Date/datetime columns must contain parseable ISO 8601 values +- Boolean columns must contain only true/false/null +- Decimal columns must respect declared precision and scale +- UUID columns must match UUID v4 format +- Enum columns must contain only declared valid values + +### Referential Integrity (FAIL on violation) +- Every foreign key value must reference an existing parent row +- Self-referential keys must reference rows in the same table +- Many-to-many through tables must have valid references on both sides +- Cascading dependency chains must be intact + +### Uniqueness (FAIL on violation) +- Single-column UNIQUE constraints: no duplicates +- Composite unique constraints: no duplicate tuples +- Primary key uniqueness across all rows + +### NOT NULL (FAIL on violation) +- Required columns must not contain null values in any row + +### Statistical Properties (WARN level, --strict only) +- Null ratio within tolerance of profile target +- Edge case ratio within tolerance of profile target +- Value distribution not unrealistically uniform for enum/category columns +- Date ranges within reasonable bounds +- Numeric values within sensible ranges for domain + +## Report Format + +``` ++----------------------------------------------------------------------+ +| DATA-SEED - Validate | +| Profile: [name] | ++----------------------------------------------------------------------+ + +Tables Validated: N +Rows Checked: N +Constraints Verified: N + +FAIL (N) + 1. [table.column] Description of violation + Fix: Suggested corrective action + +WARN (N) + 1. [table.column] Description of concern + Suggestion: Recommended improvement + +INFO (N) + 1. [table] Statistical observation + Note: Context + +VERDICT: PASS | FAIL (N blocking issues) +``` + +## Error Handling + +| Error | Response | +|-------|----------| +| No seed data found | Report error, suggest running `/seed generate` | +| Schema source missing | Report error, suggest running `/seed setup` | +| Malformed seed file | FAIL: report file path and parse error | +| Profile not found | Use default profile, WARN about missing profile | + +## Communication Style + +Precise and concise. Report exact locations of violations with table name, column name, and row numbers where applicable. Group findings by severity. Always include a clear PASS/FAIL verdict at the end. diff --git a/plugins/data-seed/claude-md-integration.md b/plugins/data-seed/claude-md-integration.md new file mode 100644 index 0000000..648bbe9 --- /dev/null +++ b/plugins/data-seed/claude-md-integration.md @@ -0,0 +1,93 @@ +# data-seed Plugin - CLAUDE.md Integration + +Add this section to your project's CLAUDE.md to enable data-seed plugin features. + +## Suggested CLAUDE.md Section + +```markdown +## Test Data Generation (data-seed) + +This project uses the data-seed plugin for test data generation and database seeding. + +### Configuration + +**Schema Source**: Auto-detected from project ORM (SQLAlchemy, Prisma, Django, raw SQL) +**Output Directory**: `seeds/` or `fixtures/` (configurable via `/seed setup`) +**Profiles**: `seed-profiles.json` in output directory + +### Available Commands + +| Command | Purpose | +|---------|---------| +| `/seed setup` | Configure schema source and output format | +| `/seed generate` | Generate test data from schema | +| `/seed apply` | Apply seed data to database or fixture files | +| `/seed profile` | Manage data profiles (small, medium, large) | +| `/seed validate` | Validate seed data against schema constraints | + +### Data Profiles + +| Profile | Rows/Table | Edge Cases | Use Case | +|---------|------------|------------|----------| +| `small` | 10 | None | Unit tests | +| `medium` | 100 | 10% | Development | +| `large` | 1000 | 5% | Performance testing | + +### Typical Workflow + +``` +/seed setup # First-time configuration +/seed generate --profile medium # Generate development data +/seed validate # Verify integrity +/seed apply --target file # Write fixture files +``` + +### Custom Profiles + +Create custom profiles for project-specific needs: +``` +/seed profile create staging +``` + +Override row counts per table and set custom value pools for enum columns. +``` + +## Environment Variables + +Add to project `.env` if needed: + +```env +# Seed data configuration +SEED_OUTPUT_DIR=./seeds +SEED_DEFAULT_PROFILE=medium +SEED_DEFAULT_LOCALE=en_US +``` + +## Typical Workflows + +### Initial Setup +``` +/seed setup # Detect schema, configure output +/seed generate # Generate with default profile +/seed validate # Verify data integrity +``` + +### CI/CD Integration +``` +/seed generate --profile small # Fast, minimal data for tests +/seed apply --target file # Write fixtures +# Run test suite with fixtures +``` + +### Development Environment +``` +/seed generate --profile medium # Realistic development data +/seed apply --target database --clean # Clean and seed database +``` + +### Performance Testing +``` +/seed generate --profile large # High-volume data +/seed apply --target database # Load into test database +# Run performance benchmarks +``` diff --git a/plugins/data-seed/commands/seed-apply.md b/plugins/data-seed/commands/seed-apply.md new file mode 100644 index 0000000..6496496 --- /dev/null +++ b/plugins/data-seed/commands/seed-apply.md @@ -0,0 +1,70 @@ +--- +name: seed apply +--- + +# /seed apply - Apply Seed Data + +## Skills to Load +- skills/profile-management.md +- skills/visual-header.md + +## Visual Output + +Display header: `DATA-SEED - Apply` + +## Usage + +``` +/seed apply [--profile ] [--target ] [--clean] [--dry-run] +``` + +## Workflow + +### 1. Locate Seed Data +- Look for generated seed files in configured output directory +- If no seed data found, prompt user to run `/seed generate` first +- Display available seed datasets with timestamps and profiles + +### 2. Determine Target +- `--target database`: Apply directly to connected database via SQL execution +- `--target file` (default): Write fixture files for framework consumption +- Auto-detect framework for file output: + - Django: `fixtures/` directory as JSON fixtures compatible with `loaddata` + - SQLAlchemy: Python factory files or SQL insert scripts + - Prisma: `prisma/seed.ts` compatible format + - Generic: SQL insert statements or CSV files + +### 3. Pre-Apply Validation +- If targeting database: verify connection, check table existence +- If `--clean` specified: generate TRUNCATE/DELETE statements for affected tables (respecting FK order) +- Display execution plan showing table order, row counts, and clean operations +- If `--dry-run`: display plan and exit without applying + +### 4. Apply Data +- Execute in dependency order (parents before children) +- If targeting database: wrap in transaction, rollback on error +- If targeting files: write all files atomically +- Track progress: display per-table status during application + +### 5. Post-Apply Summary +- Report rows inserted per table +- Report any errors or skipped rows +- Display total execution time +- If database target: verify row counts match expectations + +## Examples + +``` +/seed apply # Write fixture files (default) +/seed apply --target database # Insert directly into database +/seed apply --profile small --clean # Clean + apply small dataset +/seed apply --dry-run # Preview without applying +/seed apply --target database --clean # Truncate then seed database +``` + +## Safety + +- Database operations always use transactions +- `--clean` requires explicit confirmation before executing TRUNCATE +- Never drops tables or modifies schema — seed data only +- `--dry-run` is always safe and produces no side effects diff --git a/plugins/data-seed/commands/seed-generate.md b/plugins/data-seed/commands/seed-generate.md new file mode 100644 index 0000000..936094c --- /dev/null +++ b/plugins/data-seed/commands/seed-generate.md @@ -0,0 +1,71 @@ +--- +name: seed generate +--- + +# /seed generate - Generate Seed Data + +## Skills to Load +- skills/schema-inference.md +- skills/faker-patterns.md +- skills/relationship-resolution.md +- skills/visual-header.md + +## Visual Output + +Display header: `DATA-SEED - Generate` + +## Usage + +``` +/seed generate [table_name] [--profile ] [--rows ] [--format ] [--locale ] +``` + +## Workflow + +### 1. Parse Schema +- Load schema from configured source (see `/seed setup`) +- Extract tables, columns, types, constraints, and relationships +- Use `skills/schema-inference.md` to normalize types across ORM dialects + +### 2. Resolve Generation Order +- Build dependency graph from foreign key relationships +- Use `skills/relationship-resolution.md` to determine insertion order +- Handle circular dependencies via deferred constraint resolution +- If specific `table_name` provided, generate only that table plus its dependencies + +### 3. Select Profile +- Load profile from `seed-profiles.json` (default: `medium`) +- Override row count if `--rows` specified +- Apply profile-specific edge case ratios and custom value overrides + +### 4. Generate Data +- For each table in dependency order: + - Map column types to faker providers using `skills/faker-patterns.md` + - Respect NOT NULL constraints (never generate null for required fields) + - Respect UNIQUE constraints (track generated values, retry on collision) + - Generate foreign key values from previously generated parent rows + - Apply locale-specific patterns for names, addresses, phone numbers + - Handle enum/check constraints by selecting from valid values only + - Include edge cases per profile settings (empty strings, boundary values, unicode) + +### 5. Output Results +- Write generated data in requested format to configured output directory +- Display summary: tables generated, row counts, file paths +- Report any constraint violations or generation warnings + +## Examples + +``` +/seed generate # All tables, medium profile +/seed generate users # Only users table + dependencies +/seed generate --profile large # All tables, 1000 rows each +/seed generate orders --rows 50 # 50 order rows +/seed generate --format json # Output as JSON fixtures +/seed generate --locale pt_BR # Brazilian Portuguese data +``` + +## Edge Cases + +- Self-referential foreign keys (e.g., `manager_id` on `employees`): generate root rows first, then assign managers from existing rows +- Many-to-many through tables: generate both sides first, then populate junction table +- Nullable foreign keys: generate null values at the profile's configured null ratio diff --git a/plugins/data-seed/commands/seed-profile.md b/plugins/data-seed/commands/seed-profile.md new file mode 100644 index 0000000..e4ed68c --- /dev/null +++ b/plugins/data-seed/commands/seed-profile.md @@ -0,0 +1,86 @@ +--- +name: seed profile +--- + +# /seed profile - Manage Data Profiles + +## Skills to Load +- skills/profile-management.md +- skills/visual-header.md + +## Visual Output + +Display header: `DATA-SEED - Profile Management` + +## Usage + +``` +/seed profile list +/seed profile show +/seed profile create +/seed profile edit +/seed profile delete +``` + +## Workflow + +### list — Show All Profiles +- Read `seed-profiles.json` from configured output directory +- Display table: name, row counts per table, edge case ratio, description +- Highlight the default profile + +### show — Profile Details +- Display full profile definition including: + - Per-table row counts + - Edge case configuration (null ratio, boundary values, unicode strings) + - Custom value overrides per column + - Locale settings + - Relationship density settings + +### create — New Profile +- Ask user for profile name and description +- Ask for base row count (applies to all tables unless overridden) +- Ask for per-table overrides (optional) +- Ask for edge case ratio (0.0 = no edge cases, 1.0 = all edge cases) +- Ask for custom column overrides (e.g., `users.role` always "admin") +- Save to `seed-profiles.json` + +### edit — Modify Profile +- Load existing profile, display current values +- Allow user to modify any field interactively +- Save updated profile + +### delete — Remove Profile +- Confirm deletion with user +- Cannot delete the last remaining profile +- Remove from `seed-profiles.json` + +## Profile Schema + +```json +{ + "name": "medium", + "description": "Realistic dataset for development and manual testing", + "default_rows": 100, + "table_overrides": { + "users": 50, + "orders": 200, + "order_items": 500 + }, + "edge_case_ratio": 0.1, + "null_ratio": 0.05, + "locale": "en_US", + "custom_values": { + "users.status": ["active", "active", "active", "inactive"], + "users.role": ["user", "user", "user", "admin"] + } +} +``` + +## Built-in Profiles + +| Profile | Rows | Edge Cases | Use Case | +|---------|------|------------|----------| +| `small` | 10 | 0% | Unit tests, quick validation | +| `medium` | 100 | 10% | Development, manual testing | +| `large` | 1000 | 5% | Performance testing, stress testing | diff --git a/plugins/data-seed/commands/seed-setup.md b/plugins/data-seed/commands/seed-setup.md new file mode 100644 index 0000000..ad70203 --- /dev/null +++ b/plugins/data-seed/commands/seed-setup.md @@ -0,0 +1,59 @@ +--- +name: seed setup +--- + +# /seed setup - Data Seed Setup Wizard + +## Skills to Load +- skills/schema-inference.md +- skills/visual-header.md + +## Visual Output + +Display header: `DATA-SEED - Setup Wizard` + +## Usage + +``` +/seed setup +``` + +## Workflow + +### Phase 1: Environment Detection +- Detect project type: Python (SQLAlchemy, Django ORM), Node.js (Prisma, TypeORM), or raw SQL +- Check for existing schema files: `schema.prisma`, `models.py`, `*.sql` DDL files +- Identify package manager and installed ORM libraries + +### Phase 2: Schema Source Configuration +- Ask user to confirm detected schema source or specify manually +- Supported sources: + - SQLAlchemy models (`models.py`, `models/` directory) + - Prisma schema (`prisma/schema.prisma`) + - Django models (`models.py` with Django imports) + - Raw SQL DDL files (`*.sql` with CREATE TABLE statements) + - JSON Schema definitions (`*.schema.json`) +- Store schema source path for future commands + +### Phase 3: Output Configuration +- Ask preferred output format: SQL inserts, JSON fixtures, CSV files, or ORM factory objects +- Ask preferred output directory (default: `seeds/` or `fixtures/`) +- Ask default locale for faker data (default: `en_US`) + +### Phase 4: Profile Initialization +- Create default profiles if none exist: + - `small` — 10 rows per table, minimal relationships + - `medium` — 100 rows per table, realistic relationships + - `large` — 1000 rows per table, stress-test volume +- Store profiles in `seed-profiles.json` in output directory + +### Phase 5: Validation +- Verify schema can be parsed from detected source +- Display summary with detected tables, column counts, and relationship map +- Inform user of available commands + +## Important Notes + +- Uses Bash, Read, Write, AskUserQuestion tools +- Does not require database connection (schema-first approach) +- Profile definitions are portable across environments diff --git a/plugins/data-seed/commands/seed-validate.md b/plugins/data-seed/commands/seed-validate.md new file mode 100644 index 0000000..39df615 --- /dev/null +++ b/plugins/data-seed/commands/seed-validate.md @@ -0,0 +1,98 @@ +--- +name: seed validate +--- + +# /seed validate - Validate Seed Data + +## Skills to Load +- skills/schema-inference.md +- skills/relationship-resolution.md +- skills/visual-header.md + +## Visual Output + +Display header: `DATA-SEED - Validate` + +## Usage + +``` +/seed validate [--profile ] [--strict] +``` + +## Workflow + +### 1. Load Schema and Seed Data +- Parse schema from configured source using `skills/schema-inference.md` +- Load generated seed data from output directory +- If no seed data found, report error and suggest running `/seed generate` + +### 2. Type Constraint Validation +- For each column in each table, verify generated values match declared type: + - Integer columns contain only integers within range (INT, BIGINT, SMALLINT) + - String columns respect max length constraints (VARCHAR(N)) + - Date/datetime columns contain parseable date values + - Boolean columns contain only true/false/null + - Decimal columns respect precision and scale + - UUID columns contain valid UUID format + - Enum columns contain only declared valid values + +### 3. Referential Integrity Validation +- Use `skills/relationship-resolution.md` to build FK dependency graph +- For every foreign key value in child tables, verify parent row exists +- For self-referential keys, verify referenced row exists in same table +- For many-to-many through tables, verify both sides exist +- Report orphaned references as FAIL + +### 4. Constraint Compliance +- NOT NULL: verify no null values in required columns +- UNIQUE: verify no duplicate values in unique columns or unique-together groups +- CHECK constraints: evaluate check expressions against generated data +- Default values: verify defaults are applied where column value is omitted + +### 5. Statistical Validation (--strict mode) +- Verify null ratio matches profile configuration within tolerance +- Verify edge case ratio matches profile configuration +- Verify row counts match profile specification +- Verify distribution of enum/category values is not unrealistically uniform +- Verify date ranges are within reasonable bounds (not year 9999) + +### 6. Report +- Display validation results grouped by severity: + - **FAIL**: Type mismatch, FK violation, NOT NULL violation, UNIQUE violation + - **WARN**: Unrealistic distributions, unexpected null ratios, date range issues + - **INFO**: Statistics summary, coverage metrics + +``` ++----------------------------------------------------------------------+ +| DATA-SEED - Validate | +| Profile: medium | ++----------------------------------------------------------------------+ + +Tables Validated: 8 +Rows Checked: 1,450 +Constraints Verified: 42 + +FAIL (0) + No blocking violations found. + +WARN (2) + 1. [orders.created_at] Date range spans 200 years + Suggestion: Constrain date generator to recent years + + 2. [users.email] 3 duplicate values detected + Suggestion: Increase faker uniqueness retry count + +INFO (1) + 1. [order_items] Null ratio 0.12 (profile target: 0.10) + Within acceptable tolerance. + +VERDICT: PASS (0 blocking issues) +``` + +## Examples + +``` +/seed validate # Standard validation +/seed validate --profile large # Validate large profile data +/seed validate --strict # Include statistical checks +``` diff --git a/plugins/data-seed/commands/seed.md b/plugins/data-seed/commands/seed.md new file mode 100644 index 0000000..56517b4 --- /dev/null +++ b/plugins/data-seed/commands/seed.md @@ -0,0 +1,17 @@ +--- +description: Test data generation — create realistic fake data from schema definitions +--- + +# /seed + +Test data generation and database seeding with reproducible profiles. + +## Sub-commands + +| Sub-command | Description | +|-------------|-------------| +| `/seed setup` | Setup wizard for data-seed configuration | +| `/seed generate` | Generate seed data from schema or models | +| `/seed apply` | Apply seed data to database or create fixture files | +| `/seed profile` | Define reusable data profiles (small, medium, large) | +| `/seed validate` | Validate seed data against schema constraints | diff --git a/plugins/data-seed/skills/faker-patterns.md b/plugins/data-seed/skills/faker-patterns.md new file mode 100644 index 0000000..b4c0961 --- /dev/null +++ b/plugins/data-seed/skills/faker-patterns.md @@ -0,0 +1,90 @@ +--- +name: faker-patterns +description: Realistic data generation patterns using faker providers with locale awareness +--- + +# Faker Patterns + +## Purpose + +Map schema column types and naming conventions to appropriate faker data generators. This skill ensures generated test data is realistic, locale-aware, and respects type constraints. + +--- + +## Column Name to Provider Mapping + +Use column name heuristics to select the most realistic faker provider: + +| Column Name Pattern | Faker Provider | Example Output | +|---------------------|---------------|----------------| +| `*name`, `first_name` | `faker.name()` / `faker.first_name()` | "Alice Johnson" | +| `*last_name`, `surname` | `faker.last_name()` | "Rodriguez" | +| `*email` | `faker.email()` | "alice@example.com" | +| `*phone*`, `*tel*` | `faker.phone_number()` | "+1-555-0123" | +| `*address*`, `*street*` | `faker.street_address()` | "742 Evergreen Terrace" | +| `*city` | `faker.city()` | "Toronto" | +| `*state*`, `*province*` | `faker.state()` | "Ontario" | +| `*country*` | `faker.country()` | "Canada" | +| `*zip*`, `*postal*` | `faker.postcode()` | "M5V 2H1" | +| `*url*`, `*website*` | `faker.url()` | "https://example.com" | +| `*company*`, `*org*` | `faker.company()` | "Acme Corp" | +| `*title*`, `*subject*` | `faker.sentence(nb_words=5)` | "Updated quarterly report summary" | +| `*description*`, `*bio*`, `*body*` | `faker.paragraph()` | Multi-sentence text | +| `*created*`, `*updated*`, `*_at` | `faker.date_time_between(start_date='-2y')` | "2024-06-15T10:30:00" | +| `*date*`, `*dob*`, `*birth*` | `faker.date_of_birth(minimum_age=18)` | "1990-03-22" | +| `*price*`, `*amount*`, `*cost*` | `faker.pydecimal(min_value=0.01, max_value=9999.99)` | 49.99 | +| `*quantity*`, `*count*` | `faker.random_int(min=1, max=100)` | 7 | +| `*status*` | Random from enum or `["active", "inactive", "pending"]` | "active" | +| `*uuid*`, `*guid*` | `faker.uuid4()` | "550e8400-e29b-41d4-a716-446655440000" | +| `*ip*`, `*ip_address*` | `faker.ipv4()` | "192.168.1.42" | +| `*color*`, `*colour*` | `faker.hex_color()` | "#3498db" | +| `*password*`, `*hash*` | `faker.sha256()` | Hash string (never plaintext) | +| `*image*`, `*avatar*`, `*photo*` | `faker.image_url()` | "https://picsum.photos/200" | +| `*slug*` | `faker.slug()` | "updated-quarterly-report" | +| `*username*`, `*login*` | `faker.user_name()` | "alice_johnson42" | + +## Type Fallback Mapping + +When column name does not match any pattern, fall back to type-based generation: + +| Canonical Type | Generator | +|----------------|-----------| +| `string` | `faker.pystr(max_chars=max_length)` | +| `integer` | `faker.random_int(min=0, max=2147483647)` | +| `float` | `faker.pyfloat(min_value=0, max_value=10000)` | +| `decimal` | `faker.pydecimal(left_digits=precision-scale, right_digits=scale)` | +| `boolean` | `faker.pybool()` | +| `datetime` | `faker.date_time_between(start_date='-2y', end_date='now')` | +| `date` | `faker.date_between(start_date='-2y', end_date='today')` | +| `uuid` | `faker.uuid4()` | +| `json` | `{"key": faker.word(), "value": faker.sentence()}` | + +## Locale Support + +Supported locales affect names, addresses, phone formats, and postal codes: + +| Locale | Names | Addresses | Phone | Currency | +|--------|-------|-----------|-------|----------| +| `en_US` | English names | US addresses | US format | USD | +| `en_CA` | English names | Canadian addresses | CA format | CAD | +| `en_GB` | English names | UK addresses | UK format | GBP | +| `pt_BR` | Portuguese names | Brazilian addresses | BR format | BRL | +| `fr_FR` | French names | French addresses | FR format | EUR | +| `de_DE` | German names | German addresses | DE format | EUR | +| `ja_JP` | Japanese names | Japanese addresses | JP format | JPY | +| `es_ES` | Spanish names | Spanish addresses | ES format | EUR | + +Default locale: `en_US`. Override per-profile or per-command with `--locale`. + +## Edge Case Values + +Include at configurable ratio (default 10%): + +| Type | Edge Cases | +|------|------------| +| `string` | Empty string `""`, max-length string, unicode characters, emoji, SQL special chars `'; DROP TABLE --` | +| `integer` | 0, -1, MAX_INT, MIN_INT | +| `float` | 0.0, -0.0, very small (0.0001), very large (999999.99) | +| `date` | Today, yesterday, epoch (1970-01-01), leap day (2024-02-29) | +| `boolean` | null (if nullable) | +| `email` | Plus-addressed `user+tag@example.com`, long domain, subdomain email | diff --git a/plugins/data-seed/skills/profile-management.md b/plugins/data-seed/skills/profile-management.md new file mode 100644 index 0000000..97b36bd --- /dev/null +++ b/plugins/data-seed/skills/profile-management.md @@ -0,0 +1,116 @@ +--- +name: profile-management +description: Seed profile definitions with row counts, edge case ratios, and custom value overrides +--- + +# Profile Management + +## Purpose + +Define and manage reusable seed data profiles that control how much data is generated, what edge cases are included, and what custom overrides apply. Profiles enable reproducible, consistent test data across environments. + +--- + +## Profile Storage + +Profiles are stored in `seed-profiles.json` in the configured output directory (default: `seeds/` or `fixtures/`). + +## Profile Schema + +```json +{ + "profiles": [ + { + "name": "profile-name", + "description": "Human-readable description", + "default_rows": 100, + "table_overrides": { + "table_name": 200 + }, + "edge_case_ratio": 0.1, + "null_ratio": 0.05, + "locale": "en_US", + "seed_value": 42, + "custom_values": { + "table.column": ["value1", "value2", "value3"] + }, + "relationship_density": { + "many_to_many": 0.3, + "self_ref_max_depth": 3 + } + } + ], + "default_profile": "medium" +} +``` + +## Field Definitions + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `name` | string | Yes | Unique profile identifier (lowercase, hyphens allowed) | +| `description` | string | No | What this profile is for | +| `default_rows` | integer | Yes | Row count for tables without explicit override | +| `table_overrides` | object | No | Per-table row count overrides | +| `edge_case_ratio` | float | No | Fraction of rows with edge case values (0.0 to 1.0, default 0.1) | +| `null_ratio` | float | No | Fraction of nullable columns set to null (0.0 to 1.0, default 0.05) | +| `locale` | string | No | Faker locale for name/address generation (default "en_US") | +| `seed_value` | integer | No | Random seed for reproducibility (default: hash of profile name) | +| `custom_values` | object | No | Column-specific value pools (table.column -> array of values) | +| `relationship_density` | object | No | Controls many-to-many fill ratio and self-referential depth | + +## Built-in Profiles + +### small +- `default_rows`: 10 +- `edge_case_ratio`: 0.0 +- `null_ratio`: 0.0 +- Use case: unit tests, schema validation, quick smoke tests +- Characteristics: minimal data, no edge cases, all required fields populated + +### medium +- `default_rows`: 100 +- `edge_case_ratio`: 0.1 +- `null_ratio`: 0.05 +- Use case: development, manual testing, demo environments +- Characteristics: realistic volume, occasional edge cases, some nulls + +### large +- `default_rows`: 1000 +- `edge_case_ratio`: 0.05 +- `null_ratio`: 0.03 +- Use case: performance testing, pagination testing, stress testing +- Characteristics: high volume, lower edge case ratio to avoid noise + +## Custom Value Overrides + +Override the faker generator for specific columns with a weighted value pool: + +```json +{ + "custom_values": { + "users.role": ["user", "user", "user", "admin"], + "orders.status": ["completed", "completed", "pending", "cancelled", "refunded"], + "products.currency": ["USD"] + } +} +``` + +Values are selected randomly with replacement. Duplicate entries in the array increase that value's probability (e.g., "user" appears 3x = 75% probability). + +## Profile Operations + +### Resolution Order +When determining row count for a table: +1. Command-line `--rows` flag (highest priority) +2. Profile `table_overrides` for that specific table +3. Profile `default_rows` +4. Built-in default: 100 + +### Validation Rules +- Profile name must be unique within `seed-profiles.json` +- `default_rows` must be >= 1 +- `edge_case_ratio` must be between 0.0 and 1.0 +- `null_ratio` must be between 0.0 and 1.0 +- Custom value arrays must not be empty +- Cannot delete the last remaining profile diff --git a/plugins/data-seed/skills/relationship-resolution.md b/plugins/data-seed/skills/relationship-resolution.md new file mode 100644 index 0000000..610ef91 --- /dev/null +++ b/plugins/data-seed/skills/relationship-resolution.md @@ -0,0 +1,118 @@ +--- +name: relationship-resolution +description: Foreign key resolution, dependency ordering, and circular dependency handling for seed data +--- + +# Relationship Resolution + +## Purpose + +Determine the correct order for generating and inserting seed data across tables with foreign key dependencies. Handle edge cases including circular dependencies, self-referential relationships, and many-to-many through tables. + +--- + +## Dependency Graph Construction + +### Step 1: Extract Foreign Keys +For each table, identify all columns with foreign key constraints: +- Direct FK references to other tables +- Self-referential FKs (same table) +- Composite FKs spanning multiple columns + +### Step 2: Build Directed Graph +- Each table is a node +- Each FK creates a directed edge: child -> parent (child depends on parent) +- Self-referential edges are noted but excluded from ordering (handled separately) + +### Step 3: Topological Sort +- Apply topological sort to determine insertion order +- Tables with no dependencies come first +- Tables depending on others come after their dependencies +- Result: ordered list where every table's dependencies appear before it + +## Insertion Order Example + +Given schema: +``` +users (no FK) +categories (no FK) +products (FK -> categories) +orders (FK -> users) +order_items (FK -> orders, FK -> products) +reviews (FK -> users, FK -> products) +``` + +Insertion order: `users, categories, products, orders, order_items, reviews` + +Deletion order (reverse): `reviews, order_items, orders, products, categories, users` + +## Circular Dependency Handling + +When topological sort detects a cycle: + +### Strategy 1: Nullable FK Deferral +If one FK in the cycle is nullable: +1. Insert rows with nullable FK set to NULL +2. Complete the cycle for the other table +3. UPDATE the nullable FK to point to the now-existing rows + +Example: `departments.manager_id -> employees`, `employees.department_id -> departments` +1. Insert departments with `manager_id = NULL` +2. Insert employees referencing departments +3. UPDATE departments to set `manager_id` to an employee + +### Strategy 2: Deferred Constraints +If database supports deferred constraints (PostgreSQL): +1. Set FK constraints to DEFERRED within transaction +2. Insert all rows in any order +3. Constraints checked at COMMIT time + +### Strategy 3: Two-Pass Generation +If neither strategy works: +1. First pass: generate all rows without cross-cycle FK values +2. Second pass: update FK values to reference generated rows from the other table + +## Self-Referential Relationships + +Common pattern: `employees.manager_id -> employees.id` + +### Generation Strategy +1. Generate root rows first (manager_id = NULL) — these are top-level managers +2. Generate second tier referencing root rows +3. Generate remaining rows referencing any previously generated row +4. Depth distribution controlled by profile (default: max depth 3, pyramid shape) + +### Configuration +```json +{ + "self_ref_null_ratio": 0.1, + "self_ref_max_depth": 3, + "self_ref_distribution": "pyramid" +} +``` + +## Many-to-Many Through Tables + +Detection: a table with exactly two FK columns and no non-FK data columns (excluding PK and timestamps). + +### Generation Strategy +1. Generate both parent tables first +2. Generate through table rows pairing random parents +3. Respect uniqueness on the (FK1, FK2) composite — no duplicate pairings +4. Density controlled by profile: sparse (10% of possible pairs), medium (30%), dense (60%) + +## Deletion Order + +When `--clean` is specified for `/seed apply`: +1. Reverse the insertion order +2. TRUNCATE or DELETE in this order to avoid FK violations +3. For circular dependencies: disable FK checks, truncate, re-enable (with user confirmation) + +## Error Handling + +| Scenario | Response | +|----------|----------| +| Unresolvable cycle (no nullable FKs, no deferred constraints) | FAIL: report cycle, suggest schema modification | +| Missing parent table in schema | FAIL: report orphaned FK reference | +| FK references non-existent column | FAIL: report schema inconsistency | +| Through table detection false positive | WARN: ask user to confirm junction table identification | diff --git a/plugins/data-seed/skills/schema-inference.md b/plugins/data-seed/skills/schema-inference.md new file mode 100644 index 0000000..55cde93 --- /dev/null +++ b/plugins/data-seed/skills/schema-inference.md @@ -0,0 +1,81 @@ +--- +name: schema-inference +description: Infer data types, constraints, and relationships from ORM models or raw SQL DDL +--- + +# Schema Inference + +## Purpose + +Parse and normalize schema definitions from multiple ORM dialects into a unified internal representation. This skill enables data generation and validation commands to work across SQLAlchemy, Prisma, Django ORM, and raw SQL DDL without dialect-specific logic in every command. + +--- + +## Supported Schema Sources + +| Source | Detection | File Patterns | +|--------|-----------|---------------| +| SQLAlchemy | `from sqlalchemy import`, `Column(`, `mapped_column(` | `models.py`, `models/*.py` | +| Prisma | `model` blocks with `@id`, `@relation` | `prisma/schema.prisma` | +| Django ORM | `from django.db import models`, `models.CharField` | `models.py` with Django imports | +| Raw SQL DDL | `CREATE TABLE` statements | `*.sql`, `schema.sql`, `migrations/*.sql` | +| JSON Schema | `"type": "object"`, `"properties":` | `*.schema.json` | + +## Type Normalization + +Map dialect-specific types to a canonical set: + +| Canonical Type | SQLAlchemy | Prisma | Django | SQL | +|----------------|------------|--------|--------|-----| +| `string` | `String(N)`, `Text` | `String` | `CharField`, `TextField` | `VARCHAR(N)`, `TEXT` | +| `integer` | `Integer`, `BigInteger`, `SmallInteger` | `Int`, `BigInt` | `IntegerField`, `BigIntegerField` | `INT`, `BIGINT`, `SMALLINT` | +| `float` | `Float`, `Numeric` | `Float` | `FloatField` | `FLOAT`, `REAL`, `DOUBLE` | +| `decimal` | `Numeric(P,S)` | `Decimal` | `DecimalField` | `DECIMAL(P,S)`, `NUMERIC(P,S)` | +| `boolean` | `Boolean` | `Boolean` | `BooleanField` | `BOOLEAN`, `BIT` | +| `datetime` | `DateTime` | `DateTime` | `DateTimeField` | `TIMESTAMP`, `DATETIME` | +| `date` | `Date` | `DateTime` | `DateField` | `DATE` | +| `uuid` | `UUID` | `String @default(uuid())` | `UUIDField` | `UUID` | +| `json` | `JSON` | `Json` | `JSONField` | `JSON`, `JSONB` | +| `enum` | `Enum(...)` | `enum` block | `choices=` | `ENUM(...)`, `CHECK IN (...)` | + +## Constraint Extraction + +For each column, extract: +- **nullable**: Whether NULL values are allowed (default: true unless PK or explicit NOT NULL) +- **unique**: Whether values must be unique +- **max_length**: For string types, the maximum character length +- **precision/scale**: For decimal types +- **default**: Default value expression +- **check**: CHECK constraint expressions (e.g., `age >= 0`) +- **primary_key**: Whether this column is part of the primary key + +## Relationship Extraction + +Identify foreign key relationships: +- **parent_table**: The referenced table +- **parent_column**: The referenced column (usually PK) +- **on_delete**: CASCADE, SET NULL, RESTRICT, NO ACTION +- **self_referential**: True if FK references same table +- **many_to_many**: Detected from junction/through tables with two FKs and no additional non-FK columns + +## Output Format + +Internal representation used by other skills: + +```json +{ + "tables": { + "users": { + "columns": { + "id": {"type": "integer", "primary_key": true, "nullable": false}, + "email": {"type": "string", "max_length": 255, "unique": true, "nullable": false}, + "name": {"type": "string", "max_length": 100, "nullable": false}, + "manager_id": {"type": "integer", "nullable": true, "foreign_key": {"table": "users", "column": "id"}} + }, + "relationships": [ + {"type": "self_referential", "column": "manager_id", "references": "users.id"} + ] + } + } +} +``` diff --git a/plugins/data-seed/skills/visual-header.md b/plugins/data-seed/skills/visual-header.md new file mode 100644 index 0000000..2bb4992 --- /dev/null +++ b/plugins/data-seed/skills/visual-header.md @@ -0,0 +1,27 @@ +# Visual Header Skill + +Standard visual header for data-seed commands. + +## Header Template + +``` ++----------------------------------------------------------------------+ +| DATA-SEED - [Context] | ++----------------------------------------------------------------------+ +``` + +## Context Values by Command + +| Command | Context | +|---------|---------| +| `/seed setup` | Setup Wizard | +| `/seed generate` | Generate | +| `/seed apply` | Apply | +| `/seed profile` | Profile Management | +| `/seed validate` | Validate | +| Agent mode (seed-generator) | Data Generation | +| Agent mode (seed-validator) | Validation | + +## Usage + +Display header at the start of every command response before proceeding with the operation. diff --git a/plugins/debug-mcp/.claude-plugin/plugin.json b/plugins/debug-mcp/.claude-plugin/plugin.json new file mode 100644 index 0000000..08303ba --- /dev/null +++ b/plugins/debug-mcp/.claude-plugin/plugin.json @@ -0,0 +1,25 @@ +{ + "name": "debug-mcp", + "version": "1.0.0", + "description": "MCP server debugging, inspection, and development toolkit", + "author": { + "name": "Leo Miranda", + "email": "leobmiranda@gmail.com" + }, + "homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/debug-mcp/README.md", + "repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git", + "license": "MIT", + "keywords": [ + "mcp", + "debugging", + "inspection", + "development", + "server", + "protocol", + "diagnostics" + ], + "commands": [ + "./commands/" + ], + "domain": "debug" +} diff --git a/plugins/debug-mcp/README.md b/plugins/debug-mcp/README.md new file mode 100644 index 0000000..234ad50 --- /dev/null +++ b/plugins/debug-mcp/README.md @@ -0,0 +1,62 @@ +# debug-mcp + +MCP server debugging, inspection, and development toolkit. + +## Overview + +This plugin provides tools for diagnosing MCP server issues, testing tool invocations, analyzing server logs, inspecting configurations and dependencies, and scaffolding new MCP servers. It is essential for maintaining and developing MCP integrations in the Leo Claude Marketplace. + +## Commands + +| Command | Description | +|---------|-------------| +| `/debug-mcp status` | Show all MCP servers with health status | +| `/debug-mcp test` | Test a specific MCP tool call | +| `/debug-mcp logs` | View recent MCP server logs and errors | +| `/debug-mcp inspect` | Inspect MCP server config and dependencies | +| `/debug-mcp scaffold` | Generate MCP server skeleton project | + +## Agent + +| Agent | Model | Mode | Purpose | +|-------|-------|------|---------| +| mcp-debugger | sonnet | default | All debug-mcp operations: inspection, testing, log analysis, scaffolding | + +## Skills + +| Skill | Description | +|-------|-------------| +| mcp-protocol | MCP stdio protocol specification, JSON-RPC messages, tool/resource/prompt definitions | +| server-patterns | Python MCP server directory structure, FastMCP pattern, config loader, entry points | +| venv-diagnostics | Virtual environment health checks: existence, Python binary, packages, imports | +| log-analysis | Common MCP error patterns with root causes and fixes | +| visual-header | Standard command output header | + +## Architecture + +``` +plugins/debug-mcp/ +├── .claude-plugin/ +│ └── plugin.json +├── commands/ +│ ├── debug-mcp.md # Dispatch file +│ ├── debug-mcp-status.md +│ ├── debug-mcp-test.md +│ ├── debug-mcp-logs.md +│ ├── debug-mcp-inspect.md +│ └── debug-mcp-scaffold.md +├── agents/ +│ └── mcp-debugger.md +├── skills/ +│ ├── mcp-protocol.md +│ ├── server-patterns.md +│ ├── venv-diagnostics.md +│ ├── log-analysis.md +│ └── visual-header.md +├── claude-md-integration.md +└── README.md +``` + +## License + +MIT License - Part of the Leo Claude Marketplace. diff --git a/plugins/debug-mcp/agents/mcp-debugger.md b/plugins/debug-mcp/agents/mcp-debugger.md new file mode 100644 index 0000000..2e81f6b --- /dev/null +++ b/plugins/debug-mcp/agents/mcp-debugger.md @@ -0,0 +1,95 @@ +--- +name: mcp-debugger +description: MCP server inspection, log analysis, and scaffold generation. Use for debugging MCP connectivity issues, testing tools, inspecting server configs, and creating new MCP servers. +model: sonnet +permissionMode: default +--- + +# MCP Debugger Agent + +You are an MCP (Model Context Protocol) server specialist. You diagnose MCP server issues, inspect configurations, analyze logs, test tool invocations, and scaffold new servers. + +## Skills to Load + +- `skills/visual-header.md` +- `skills/mcp-protocol.md` +- `skills/server-patterns.md` +- `skills/venv-diagnostics.md` +- `skills/log-analysis.md` + +## Visual Output Requirements + +**MANDATORY: Display header at start of every response.** + +``` ++----------------------------------------------------------------------+ +| DEBUG-MCP - [Context] | ++----------------------------------------------------------------------+ +``` + +## Core Knowledge + +### .mcp.json Structure + +The `.mcp.json` file in the project root defines all MCP servers: + +```json +{ + "mcpServers": { + "server-name": { + "command": "path/to/.venv/bin/python", + "args": ["-m", "mcp_server.server"], + "cwd": "path/to/server/dir" + } + } +} +``` + +### MCP Server Lifecycle + +1. Claude Code reads `.mcp.json` at session start +2. For each server, spawns the command as a subprocess +3. Communication happens over stdio (JSON-RPC) +4. Server registers tools, resources, and prompts +5. Claude Code makes tool calls as needed during conversation + +### Common Failure Points + +| Failure | Symptom | Root Cause | +|---------|---------|------------| +| "X MCP servers failed" | Session start warning | Broken venv, missing deps, bad config | +| Tool not found | Tool call returns error | Server not loaded, tool name wrong | +| Timeout | Tool call hangs | Server crashed, infinite loop, network | +| Permission denied | API errors | Invalid token, expired credentials | + +## Behavior Guidelines + +### Diagnostics + +1. **Always start with .mcp.json** - Read it first to understand the server landscape +2. **Check venvs systematically** - Use `skills/venv-diagnostics.md` patterns +3. **Read actual error messages** - Parse logs rather than guessing +4. **Test incrementally** - Verify executable, then import, then tool call + +### Scaffolding + +1. **Follow existing patterns** - Match the structure of existing servers in `mcp-servers/` +2. **Use FastMCP** - Prefer the decorator-based pattern for new servers +3. **Include config.py** - Always generate a configuration loader with env file support +4. **Register in .mcp.json** - Show the user the entry to add, confirm before writing + +### Security + +1. **Never display full API tokens** - Mask all but last 4 characters +2. **Check .gitignore** - Ensure credential files are excluded from version control +3. **Validate SSL settings** - Warn if SSL verification is disabled + +## Available Commands + +| Command | Purpose | +|---------|---------| +| `/debug-mcp status` | Server health overview | +| `/debug-mcp test` | Test a specific tool call | +| `/debug-mcp logs` | View and analyze server logs | +| `/debug-mcp inspect` | Deep server inspection | +| `/debug-mcp scaffold` | Generate new server skeleton | diff --git a/plugins/debug-mcp/claude-md-integration.md b/plugins/debug-mcp/claude-md-integration.md new file mode 100644 index 0000000..75052c8 --- /dev/null +++ b/plugins/debug-mcp/claude-md-integration.md @@ -0,0 +1,34 @@ +# Debug MCP Integration + +Add to your project's CLAUDE.md: + +## MCP Server Debugging (debug-mcp) + +This project uses the **debug-mcp** plugin for diagnosing and developing MCP server integrations. + +### Available Commands + +| Command | Description | +|---------|-------------| +| `/debug-mcp status` | Show health status of all configured MCP servers | +| `/debug-mcp test` | Test a specific MCP tool call with parameters | +| `/debug-mcp logs` | View and analyze recent MCP server error logs | +| `/debug-mcp inspect` | Deep inspection of server config, dependencies, and tools | +| `/debug-mcp scaffold` | Generate a new MCP server project skeleton | + +### Usage Guidelines + +- Run `/debug-mcp status` when Claude Code reports MCP server failures at session start +- Use `/debug-mcp inspect --deps` to diagnose missing package issues +- Use `/debug-mcp test ` to verify individual tool functionality +- Use `/debug-mcp logs --errors-only` to quickly find error patterns +- Use `/debug-mcp scaffold` when creating a new MCP server integration + +### Common Troubleshooting + +| Symptom | Command | +|---------|---------| +| "X MCP servers failed" at startup | `/debug-mcp status` | +| Tool call returns error | `/debug-mcp test ` | +| ImportError in server | `/debug-mcp inspect --deps` | +| Unknown server errors | `/debug-mcp logs --server=` | diff --git a/plugins/debug-mcp/commands/debug-mcp-inspect.md b/plugins/debug-mcp/commands/debug-mcp-inspect.md new file mode 100644 index 0000000..0fec606 --- /dev/null +++ b/plugins/debug-mcp/commands/debug-mcp-inspect.md @@ -0,0 +1,129 @@ +--- +name: debug-mcp inspect +description: Inspect MCP server configuration, dependencies, and tool definitions +--- + +# /debug-mcp inspect + +Deep inspection of an MCP server's configuration, dependencies, and tool definitions. + +## Skills to Load + +- `skills/visual-header.md` +- `skills/venv-diagnostics.md` +- `skills/mcp-protocol.md` + +## Agent + +Delegate to `agents/mcp-debugger.md`. + +## Usage + +``` +/debug-mcp inspect [--tools] [--deps] [--config] +``` + +**Arguments:** +- `server_name` - Name of the MCP server from .mcp.json + +**Options:** +- `--tools` - List all registered tools with their schemas +- `--deps` - Show dependency analysis (installed vs required) +- `--config` - Show configuration files and environment variables +- (no flags) - Show all sections + +## Instructions + +Execute `skills/visual-header.md` with context "Server Inspection". + +### Phase 1: Configuration + +1. Read `.mcp.json` and extract the server definition +2. Display: + - Server name + - Command and arguments + - Working directory + - Environment variable references + +``` +## Server: gitea + +### Configuration (.mcp.json) +- Command: /path/to/mcp-servers/gitea/.venv/bin/python +- Args: ["-m", "mcp_server.server"] +- CWD: /path/to/mcp-servers/gitea +- Env file: ~/.config/claude/gitea.env +``` + +### Phase 2: Dependencies (--deps) + +Apply `skills/venv-diagnostics.md`: + +1. Read `requirements.txt` from the server's cwd +2. Compare with installed packages: + ```bash + cd && .venv/bin/pip freeze + ``` +3. Identify: + - Missing packages (in requirements but not installed) + - Version mismatches (installed version differs from required) + - Extra packages (installed but not in requirements) + +``` +### Dependencies + +| Package | Required | Installed | Status | +|---------|----------|-----------|--------| +| mcp | >=1.0.0 | 1.2.3 | OK | +| httpx | >=0.24 | 0.25.0 | OK | +| pynetbox | >=7.0 | — | MISSING | + +- Missing: 1 package +- Mismatched: 0 packages +``` + +### Phase 3: Tools (--tools) + +Parse the server source code to extract tool definitions: + +1. Find Python files with `@mcp.tool` decorators or `server.add_tool()` calls +2. Extract tool name, description, and parameter schema +3. Group by module/category if applicable + +``` +### Tools (42 registered) + +#### Issues (6 tools) +| Tool | Description | Params | +|------|-------------|--------| +| list_issues | List issues from repository | state?, labels?, repo? | +| get_issue | Get specific issue | issue_number (required) | +| create_issue | Create new issue | title (required), body (required) | +... +``` + +### Phase 4: Environment Configuration (--config) + +1. Locate env file referenced in .mcp.json +2. Read the file (mask secret values) +3. Check for missing required variables + +``` +### Environment Configuration + +File: ~/.config/claude/gitea.env +| Variable | Value | Status | +|----------|-------|--------| +| GITEA_API_URL | https://gitea.example.com/api/v1 | OK | +| GITEA_API_TOKEN | ****...a1b2 | OK | + +File: .env (project level) +| Variable | Value | Status | +|----------|-------|--------| +| GITEA_ORG | personal-projects | OK | +| GITEA_REPO | leo-claude-mktplace | OK | +``` + +## User Request + +$ARGUMENTS diff --git a/plugins/debug-mcp/commands/debug-mcp-logs.md b/plugins/debug-mcp/commands/debug-mcp-logs.md new file mode 100644 index 0000000..ad9d71f --- /dev/null +++ b/plugins/debug-mcp/commands/debug-mcp-logs.md @@ -0,0 +1,98 @@ +--- +name: debug-mcp logs +description: View recent MCP server logs and error patterns +--- + +# /debug-mcp logs + +View and analyze recent MCP server log output. + +## Skills to Load + +- `skills/visual-header.md` +- `skills/log-analysis.md` + +## Agent + +Delegate to `agents/mcp-debugger.md`. + +## Usage + +``` +/debug-mcp logs [--server=] [--lines=] [--errors-only] +``` + +**Options:** +- `--server` - Filter to a specific server (default: all) +- `--lines` - Number of recent lines to show (default: 50) +- `--errors-only` - Show only error-level log entries + +## Instructions + +Execute `skills/visual-header.md` with context "Log Analysis". + +### Phase 1: Locate Log Sources + +MCP servers in Claude Code output to stderr. Log locations vary: + +1. **Claude Code session logs** - Check `~/.claude/logs/` for recent session logs +2. **Server stderr** - If server runs as a subprocess, logs go to Claude Code's stderr +3. **Custom log files** - Some servers may write to files in their cwd + +Search for log files: +```bash +# Claude Code logs +ls -la ~/.claude/logs/ 2>/dev/null + +# Server-specific logs +ls -la /*.log 2>/dev/null +ls -la /logs/ 2>/dev/null +``` + +### Phase 2: Parse Logs + +1. Read the most recent log entries (default 50 lines) +2. Filter by server name if `--server` specified +3. If `--errors-only`, filter for patterns: + - Lines containing `ERROR`, `CRITICAL`, `FATAL` + - Python tracebacks (`Traceback (most recent call last)`) + - JSON-RPC error responses (`"error":`) + +### Phase 3: Error Analysis + +Apply patterns from `skills/log-analysis.md`: + +1. **Categorize errors** by type (ImportError, ConnectionError, TimeoutError, etc.) +2. **Count occurrences** of each error pattern +3. **Identify root cause** using the common patterns from the skill +4. **Suggest fix** for each error category + +### Phase 4: Report + +``` +## MCP Server Logs + +### Server: gitea +Last 10 entries: +[2025-11-15 10:00:01] INFO Initialized with 42 tools +[2025-11-15 10:00:05] INFO Tool call: list_issues (245ms) +... + +### Server: netbox +Last 10 entries: +[2025-11-15 09:58:00] ERROR ImportError: No module named 'pynetbox' + +### Error Summary + +| Server | Error Type | Count | Root Cause | Fix | +|--------|-----------|-------|------------|-----| +| netbox | ImportError | 3 | Missing dependency | pip install pynetbox | + +### Recommendations +1. Fix netbox: Reinstall dependencies in venv +2. All other servers: No issues detected +``` + +## User Request + +$ARGUMENTS diff --git a/plugins/debug-mcp/commands/debug-mcp-scaffold.md b/plugins/debug-mcp/commands/debug-mcp-scaffold.md new file mode 100644 index 0000000..b4b04ed --- /dev/null +++ b/plugins/debug-mcp/commands/debug-mcp-scaffold.md @@ -0,0 +1,138 @@ +--- +name: debug-mcp scaffold +description: Generate a new MCP server skeleton project with standard structure +--- + +# /debug-mcp scaffold + +Generate a new MCP server project with the standard directory structure, entry point, and configuration. + +## Skills to Load + +- `skills/visual-header.md` +- `skills/server-patterns.md` +- `skills/mcp-protocol.md` + +## Agent + +Delegate to `agents/mcp-debugger.md`. + +## Usage + +``` +/debug-mcp scaffold [--tools=] [--location=] +``` + +**Arguments:** +- `server_name` - Name for the new MCP server (lowercase, hyphens) + +**Options:** +- `--tools` - Comma-separated list of initial tool names to generate stubs +- `--location` - Where to create the server (default: `mcp-servers/`) + +## Instructions + +Execute `skills/visual-header.md` with context "Server Scaffold". + +### Phase 1: Gather Requirements + +1. Ask user for: + - Server purpose (one sentence) + - External service it integrates with (if any) + - Authentication type (API key, OAuth, none) + - Initial tools to register (at least one) + +### Phase 2: Generate Project Structure + +Apply patterns from `skills/server-patterns.md`: + +``` +mcp-servers// +├── mcp_server/ +│ ├── __init__.py +│ ├── config.py # Configuration loader (env files) +│ ├── server.py # MCP server entry point +│ └── tools/ +│ ├── __init__.py +│ └── .py # Tool implementations +├── tests/ +│ ├── __init__.py +│ └── test_tools.py # Tool unit tests +├── requirements.txt # Python dependencies +└── README.md # Server documentation +``` + +### Phase 3: Generate Files + +#### server.py +- Import FastMCP or raw MCP protocol handler +- Register tools from tools/ directory +- Configure stdio transport +- Add startup logging with tool count + +#### config.py +- Load from `~/.config/claude/.env` +- Fall back to project-level `.env` +- Validate required variables on startup +- Mask sensitive values in logs + +#### tools/.py +- For each tool name provided in `--tools`: + - Generate a stub function with `@mcp.tool` decorator + - Include docstring with description + - Define inputSchema with parameter types + - Return placeholder response + +#### requirements.txt +``` +mcp>=1.0.0 +httpx>=0.24.0 +python-dotenv>=1.0.0 +``` + +#### README.md +- Server name and description +- Installation instructions (venv setup) +- Configuration (env variables) +- Available tools table +- Architecture diagram + +### Phase 4: Register in .mcp.json + +1. Read the project's `.mcp.json` +2. Add the new server entry: + ```json + "": { + "command": "mcp-servers//.venv/bin/python", + "args": ["-m", "mcp_server.server"], + "cwd": "mcp-servers/" + } + ``` +3. Show the change and ask user to confirm before writing + +### Phase 5: Completion + +``` +## Scaffold Complete + +### Created Files +- mcp-servers//mcp_server/server.py +- mcp-servers//mcp_server/config.py +- mcp-servers//mcp_server/tools/.py +- mcp-servers//requirements.txt +- mcp-servers//README.md + +### Next Steps +1. Create virtual environment: + cd mcp-servers/ && python3 -m venv .venv && .venv/bin/pip install -r requirements.txt +2. Add credentials: + Edit ~/.config/claude/.env +3. Implement tool logic: + Edit mcp-servers//mcp_server/tools/.py +4. Restart Claude Code session to load the new server +5. Test: /debug-mcp test +``` + +## User Request + +$ARGUMENTS diff --git a/plugins/debug-mcp/commands/debug-mcp-status.md b/plugins/debug-mcp/commands/debug-mcp-status.md new file mode 100644 index 0000000..96046b1 --- /dev/null +++ b/plugins/debug-mcp/commands/debug-mcp-status.md @@ -0,0 +1,101 @@ +--- +name: debug-mcp status +description: Show all configured MCP servers with health status, venv state, and tool counts +--- + +# /debug-mcp status + +Display the health status of all MCP servers configured in the project. + +## Skills to Load + +- `skills/visual-header.md` +- `skills/venv-diagnostics.md` +- `skills/log-analysis.md` + +## Agent + +Delegate to `agents/mcp-debugger.md`. + +## Usage + +``` +/debug-mcp status [--server=] [--verbose] +``` + +**Options:** +- `--server` - Check a specific server only +- `--verbose` - Show detailed output including tool lists + +## Instructions + +Execute `skills/visual-header.md` with context "Server Status". + +### Phase 1: Locate Configuration + +1. Read `.mcp.json` from the project root +2. Parse the `mcpServers` object to extract all server definitions +3. For each server, extract: + - Server name (key in mcpServers) + - Command path (usually Python interpreter in .venv) + - Arguments (module path) + - Working directory (`cwd`) + - Environment variables or env file references + +### Phase 2: Check Each Server + +For each configured MCP server: + +1. **Executable check** - Does the command path exist? + ```bash + test -f && echo "OK" || echo "MISSING" + ``` + +2. **Virtual environment check** - Apply `skills/venv-diagnostics.md`: + - Does `.venv/` directory exist in the server's cwd? + - Is the Python binary intact (not broken symlink)? + - Are requirements satisfied? + +3. **Config file check** - Does the referenced env file exist? + ```bash + test -f && echo "OK" || echo "MISSING" + ``` + +4. **Module check** - Can the server module be imported? + ```bash + cd && .venv/bin/python -c "import " 2>&1 + ``` + +### Phase 3: Report + +``` +## MCP Server Status + +| Server | Executable | Venv | Config | Import | Status | +|--------|-----------|------|--------|--------|--------| +| gitea | OK | OK | OK | OK | HEALTHY | +| netbox | OK | MISSING | OK | FAIL | ERROR | +| data-platform | OK | OK | OK | OK | HEALTHY | + +### Errors + +#### netbox +- Venv missing: /path/to/mcp-servers/netbox/.venv does not exist +- Import failed: ModuleNotFoundError: No module named 'pynetbox' +- Fix: cd /path/to/mcp-servers/netbox && python3 -m venv .venv && .venv/bin/pip install -r requirements.txt + +### Summary +- Healthy: 4/5 +- Errors: 1/5 +``` + +### Phase 4: Verbose Mode + +If `--verbose`, additionally show for each healthy server: +- Tool count (parse server source for `@mcp.tool` decorators or tool registration) +- Resource count +- Last modification time of server.py + +## User Request + +$ARGUMENTS diff --git a/plugins/debug-mcp/commands/debug-mcp-test.md b/plugins/debug-mcp/commands/debug-mcp-test.md new file mode 100644 index 0000000..f076c5b --- /dev/null +++ b/plugins/debug-mcp/commands/debug-mcp-test.md @@ -0,0 +1,103 @@ +--- +name: debug-mcp test +description: Test a specific MCP tool call by invoking it and displaying the result +--- + +# /debug-mcp test + +Test a specific MCP tool by invoking it with sample parameters. + +## Skills to Load + +- `skills/visual-header.md` +- `skills/mcp-protocol.md` + +## Agent + +Delegate to `agents/mcp-debugger.md`. + +## Usage + +``` +/debug-mcp test [--params=] +``` + +**Arguments:** +- `server_name` - Name of the MCP server from .mcp.json +- `tool_name` - Name of the tool to invoke +- `--params` - JSON object with tool parameters (optional) + +## Instructions + +Execute `skills/visual-header.md` with context "Tool Test". + +### Phase 1: Validate Inputs + +1. Read `.mcp.json` and verify the server exists +2. Check if the server is healthy (run quick executable check) +3. If tool_name is not provided, list available tools for the server and ask user to select + +### Phase 2: Tool Discovery + +1. Parse the server source code to find the tool definition +2. Extract the tool's `inputSchema` (parameters, types, required fields) +3. Display the schema to the user: + ``` + ## Tool: list_issues + Server: gitea + + Parameters: + - state (string, optional): "open", "closed", "all" [default: "open"] + - labels (array[string], optional): Filter by labels + - repo (string, optional): Repository name + ``` + +### Phase 3: Parameter Preparation + +1. If `--params` provided, validate against the tool's inputSchema +2. If no params provided and tool has required params, ask user for values +3. If no params and all optional, invoke with defaults + +### Phase 4: Invocation + +Invoke the MCP tool using the available MCP tool functions: +1. Call the tool with prepared parameters +2. Capture the response +3. Measure response time + +### Phase 5: Result Display + +``` +## Test Result + +### Request +- Server: gitea +- Tool: list_issues +- Params: {"state": "open", "repo": "leo-claude-mktplace"} + +### Response +- Status: Success +- Time: 245ms +- Result: + [formatted JSON response, truncated if large] + +### Schema Validation +- All required params provided: YES +- Response type matches expected: YES +``` + +### Error Handling + +If the tool call fails, apply `skills/mcp-protocol.md` error patterns: + +``` +### Error +- Type: ConnectionRefused +- Message: Could not connect to MCP server +- Likely Cause: Server not running or venv broken +- Fix: Run /debug-mcp status to diagnose +``` + +## User Request + +$ARGUMENTS diff --git a/plugins/debug-mcp/commands/debug-mcp.md b/plugins/debug-mcp/commands/debug-mcp.md new file mode 100644 index 0000000..91f76b7 --- /dev/null +++ b/plugins/debug-mcp/commands/debug-mcp.md @@ -0,0 +1,17 @@ +--- +description: MCP debugging — inspect servers, test tools, view logs, scaffold new servers +--- + +# /debug-mcp + +MCP server debugging, inspection, and development toolkit. + +## Sub-commands + +| Sub-command | Description | +|-------------|-------------| +| `/debug-mcp status` | Show all MCP servers with health status | +| `/debug-mcp test` | Test a specific MCP tool call | +| `/debug-mcp logs` | View recent MCP server logs and errors | +| `/debug-mcp inspect` | Inspect MCP server config and dependencies | +| `/debug-mcp scaffold` | Generate MCP server skeleton project | diff --git a/plugins/debug-mcp/skills/log-analysis.md b/plugins/debug-mcp/skills/log-analysis.md new file mode 100644 index 0000000..9f852c3 --- /dev/null +++ b/plugins/debug-mcp/skills/log-analysis.md @@ -0,0 +1,105 @@ +# Log Analysis Skill + +Common MCP server error patterns, their root causes, and fixes. + +## Error Pattern: ImportError + +``` +ImportError: No module named 'pynetbox' +``` + +**Root Cause:** Missing Python package in the virtual environment. + +**Fix:** +```bash +cd && .venv/bin/pip install -r requirements.txt +``` + +**Prevention:** Always run `pip install -r requirements.txt` after creating or updating a venv. + +## Error Pattern: ConnectionRefused + +``` +ConnectionRefusedError: [Errno 111] Connection refused +``` + +**Root Cause:** The external service the MCP server connects to is not running or not reachable. + +**Checks:** +1. Is the target service running? (e.g., Gitea, NetBox) +2. Is the URL correct in the env file? +3. Is there a firewall or VPN issue? + +**Fix:** Verify the service URL in `~/.config/claude/.env` and confirm the service is accessible. + +## Error Pattern: JSONDecodeError + +``` +json.decoder.JSONDecodeError: Expecting value: line 1 column 1 +``` + +**Root Cause:** The server received non-JSON response from the external API. Usually means: +- API returned HTML error page (wrong URL) +- API returned empty response (auth failed silently) +- Proxy intercepted the request + +**Fix:** Check the API URL ends with the correct path (e.g., `/api/v1` for Gitea, `/api` for NetBox). + +## Error Pattern: TimeoutError + +``` +TimeoutError: timed out +httpx.ReadTimeout: +``` + +**Root Cause:** Server startup took too long or external API is slow. + +**Checks:** +1. Network latency to the external service +2. Server doing heavy initialization (loading all tools) +3. Large response from API + +**Fix:** Increase timeout in server config or reduce initial tool registration. + +## Error Pattern: PermissionError + +``` +PermissionError: [Errno 13] Permission denied: '/path/to/file' +``` + +**Root Cause:** Server process cannot read/write required files. + +**Fix:** Check file ownership and permissions. Common locations: +- `~/.config/claude/*.env` (should be readable by user) +- Server's `.venv/` directory +- Log files + +## Error Pattern: FileNotFoundError (Venv) + +``` +FileNotFoundError: [Errno 2] No such file or directory: '.venv/bin/python' +``` + +**Root Cause:** Virtual environment does not exist or was deleted. + +**Fix:** Create the venv: +```bash +cd && python3 -m venv .venv && .venv/bin/pip install -r requirements.txt +``` + +## Error Pattern: SSL Certificate Error + +``` +ssl.SSLCertVerificationError: certificate verify failed +``` + +**Root Cause:** Self-signed certificate on the target service. + +**Fix:** Set `VERIFY_SSL=false` in the env file (not recommended for production). + +## Log Parsing Tips + +1. **Python tracebacks** - Read from bottom up. The last line is the actual error. +2. **JSON-RPC errors** - Look for `"error"` key in JSON responses. +3. **Startup failures** - First few lines after server spawn reveal initialization issues. +4. **Repeated errors** - Same error in a loop means the server is retrying and failing. diff --git a/plugins/debug-mcp/skills/mcp-protocol.md b/plugins/debug-mcp/skills/mcp-protocol.md new file mode 100644 index 0000000..b74e095 --- /dev/null +++ b/plugins/debug-mcp/skills/mcp-protocol.md @@ -0,0 +1,131 @@ +# MCP Protocol Skill + +Model Context Protocol (MCP) specification reference for debugging and development. + +## Protocol Overview + +MCP uses JSON-RPC 2.0 over stdio (standard input/output) for communication between Claude Code and MCP servers. + +### Transport Types + +| Transport | Description | Use Case | +|-----------|-------------|----------| +| **stdio** | JSON-RPC over stdin/stdout | Default for Claude Code | +| **SSE** | Server-Sent Events over HTTP | Remote servers | + +## Tool Definitions + +Tools are the primary way MCP servers expose functionality. + +### Tool Registration + +```python +@mcp.tool() +def list_issues(state: str = "open", labels: list[str] = None) -> str: + """List issues from the repository. + + Args: + state: Issue state filter (open, closed, all) + labels: Filter by label names + """ + # implementation +``` + +### Tool Schema (JSON) + +```json +{ + "name": "list_issues", + "description": "List issues from the repository", + "inputSchema": { + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": ["open", "closed", "all"], + "default": "open", + "description": "Issue state filter" + }, + "labels": { + "type": "array", + "items": { "type": "string" }, + "description": "Filter by label names" + } + }, + "required": [] + } +} +``` + +## Resource Definitions + +Resources expose data that can be read by the client. + +```python +@mcp.resource("config://settings") +def get_settings() -> str: + """Return current configuration.""" + return json.dumps(config) +``` + +## Prompt Definitions + +Prompts provide reusable prompt templates. + +```python +@mcp.prompt() +def analyze_issue(issue_number: int) -> str: + """Generate a prompt to analyze a specific issue.""" + return f"Analyze issue #{issue_number} and suggest solutions." +``` + +## JSON-RPC Message Format + +### Request + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "method": "tools/call", + "params": { + "name": "list_issues", + "arguments": {"state": "open"} + } +} +``` + +### Response (Success) + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "content": [{"type": "text", "text": "..."}] + } +} +``` + +### Response (Error) + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "error": { + "code": -32600, + "message": "Invalid Request" + } +} +``` + +## Error Codes + +| Code | Meaning | +|------|---------| +| -32700 | Parse error (invalid JSON) | +| -32600 | Invalid request | +| -32601 | Method not found | +| -32602 | Invalid params | +| -32603 | Internal error | diff --git a/plugins/debug-mcp/skills/server-patterns.md b/plugins/debug-mcp/skills/server-patterns.md new file mode 100644 index 0000000..73bd545 --- /dev/null +++ b/plugins/debug-mcp/skills/server-patterns.md @@ -0,0 +1,139 @@ +# MCP Server Patterns Skill + +Standard patterns for building Python MCP servers compatible with Claude Code. + +## Directory Structure + +``` +mcp-servers// +├── mcp_server/ +│ ├── __init__.py # Package marker +│ ├── config.py # Configuration loader +│ ├── server.py # Entry point (MCP server setup) +│ └── tools/ +│ ├── __init__.py # Tool registration +│ └── .py # Tool implementations grouped by domain +├── tests/ +│ ├── __init__.py +│ └── test_tools.py +├── requirements.txt +└── README.md +``` + +## FastMCP Pattern (Recommended) + +The FastMCP decorator pattern provides the simplest server implementation: + +```python +# server.py +from mcp.server.fastmcp import FastMCP +from .config import load_config + +mcp = FastMCP("server-name") +config = load_config() + +# Import tools to register them +from .tools import category # noqa: F401 + +if __name__ == "__main__": + mcp.run() +``` + +```python +# tools/category.py +from ..server import mcp, config + +@mcp.tool() +def my_tool(param: str) -> str: + """Tool description.""" + return f"Result for {param}" +``` + +## Configuration Loader Pattern + +```python +# config.py +import os +from pathlib import Path +from dotenv import load_dotenv + +def load_config() -> dict: + # System-level config + sys_config = Path.home() / ".config" / "claude" / ".env" + if sys_config.exists(): + load_dotenv(sys_config) + + # Project-level overrides + project_env = Path.cwd() / ".env" + if project_env.exists(): + load_dotenv(project_env, override=True) + + config = { + "api_url": os.getenv("_API_URL"), + "api_token": os.getenv("_API_TOKEN"), + } + + # Validate required + missing = [k for k, v in config.items() if v is None] + if missing: + raise ValueError(f"Missing config: {', '.join(missing)}") + + return config +``` + +## Entry Point Configuration + +In `.mcp.json`: + +```json +{ + "mcpServers": { + "": { + "command": "mcp-servers//.venv/bin/python", + "args": ["-m", "mcp_server.server"], + "cwd": "mcp-servers/" + } + } +} +``` + +The `-m` flag runs the module as a script, using `__main__.py` or the `if __name__ == "__main__"` block. + +## Requirements + +Minimum dependencies for an MCP server: + +``` +mcp>=1.0.0 +python-dotenv>=1.0.0 +``` + +For HTTP-based integrations add: +``` +httpx>=0.24.0 +``` + +## Testing Pattern + +```python +# tests/test_tools.py +import pytest +from mcp_server.tools.category import my_tool + +def test_my_tool(): + result = my_tool("test") + assert "test" in result +``` + +## Startup Logging + +Always log initialization status to stderr: + +```python +import sys + +def log(msg): + print(msg, file=sys.stderr) + +log(f"MCP Server '{name}' initialized: {len(tools)} tools registered") +``` diff --git a/plugins/debug-mcp/skills/venv-diagnostics.md b/plugins/debug-mcp/skills/venv-diagnostics.md new file mode 100644 index 0000000..21d9f88 --- /dev/null +++ b/plugins/debug-mcp/skills/venv-diagnostics.md @@ -0,0 +1,89 @@ +# Virtual Environment Diagnostics Skill + +Patterns for checking virtual environment health in MCP server directories. + +## Check 1: Venv Exists + +```bash +test -d /.venv && echo "EXISTS" || echo "MISSING" +``` + +If missing, the server will fail to start. Fix: +```bash +cd && python3 -m venv .venv +``` + +## Check 2: Python Binary Intact + +Venvs can break when the system Python is upgraded (symlink becomes dangling). + +```bash +/.venv/bin/python --version 2>&1 +``` + +If error contains "No such file or directory" despite .venv existing, the symlink is broken. + +Fix: +```bash +cd && rm -rf .venv && python3 -m venv .venv && .venv/bin/pip install -r requirements.txt +``` + +**IMPORTANT:** Never delete .venv without explicit user approval. Show the diagnosis and ask user to confirm the fix. + +## Check 3: Requirements Satisfied + +Compare requirements.txt with installed packages: + +```bash +cd && .venv/bin/pip freeze > /tmp/installed.txt +``` + +Then diff against requirements.txt: +- **Missing packages:** In requirements but not installed +- **Version mismatch:** Installed version does not satisfy requirement specifier +- **Extra packages:** Installed but not in requirements (usually OK, may indicate stale venv) + +Quick check: +```bash +cd && .venv/bin/pip check 2>&1 +``` + +This reports broken dependencies (missing or incompatible versions). + +## Check 4: Module Import Test + +Verify the server's main module can be imported: + +```bash +cd && .venv/bin/python -c "import mcp_server.server" 2>&1 +``` + +Common failures: +| Error | Cause | Fix | +|-------|-------|-----| +| `ModuleNotFoundError: No module named 'mcp'` | MCP SDK not installed | `pip install mcp` | +| `ModuleNotFoundError: No module named ''` | Missing dependency | `pip install -r requirements.txt` | +| `ImportError: cannot import name 'X'` | Version mismatch | `pip install --upgrade ` | +| `SyntaxError` | Python version too old | Check `python3 --version` >= 3.10 | + +## Check 5: Broken Symlinks + +Find broken symlinks in the venv: + +```bash +find /.venv -type l ! -exec test -e {} \; -print 2>/dev/null +``` + +Any output indicates broken symlinks that may cause import failures. + +## Health Summary Format + +``` +### Venv: +- Directory: EXISTS +- Python: 3.11.2 (OK) +- Packages: 12 installed, 10 required, 0 missing +- Import: OK +- Broken symlinks: 0 +- Status: HEALTHY +``` diff --git a/plugins/debug-mcp/skills/visual-header.md b/plugins/debug-mcp/skills/visual-header.md new file mode 100644 index 0000000..f8a6afb --- /dev/null +++ b/plugins/debug-mcp/skills/visual-header.md @@ -0,0 +1,26 @@ +# Visual Header Skill + +Standard visual header for debug-mcp commands. + +## Header Template + +``` ++----------------------------------------------------------------------+ +| DEBUG-MCP - [Context] | ++----------------------------------------------------------------------+ +``` + +## Context Values by Command + +| Command | Context | +|---------|---------| +| `/debug-mcp status` | Server Status | +| `/debug-mcp test` | Tool Test | +| `/debug-mcp logs` | Log Analysis | +| `/debug-mcp inspect` | Server Inspection | +| `/debug-mcp scaffold` | Server Scaffold | +| Agent mode | MCP Debugging | + +## Usage + +Display header at the start of every command response before proceeding with the operation. diff --git a/plugins/doc-guardian/claude-md-integration.md b/plugins/doc-guardian/claude-md-integration.md index bffb2f8..f7d7e22 100644 --- a/plugins/doc-guardian/claude-md-integration.md +++ b/plugins/doc-guardian/claude-md-integration.md @@ -9,11 +9,11 @@ This project uses doc-guardian for automatic documentation synchronization. ### Behavior - Documentation drift is detected automatically when files change - Pending updates are queued silently during work -- Run `/doc-sync` to apply all pending documentation updates -- Run `/doc-audit` for a full project documentation review -- Run `/changelog-gen` to generate changelog from conventional commits -- Run `/doc-coverage` to check documentation coverage metrics -- Run `/stale-docs` to find documentation that may be outdated +- Run `/doc sync` to apply all pending documentation updates +- Run `/doc audit` for a full project documentation review +- Run `/doc changelog-gen` to generate changelog from conventional commits +- Run `/doc coverage` to check documentation coverage metrics +- Run `/doc stale-docs` to find documentation that may be outdated ### Documentation Files Tracked - README.md (root and subdirectories) diff --git a/plugins/doc-guardian/commands/doc-audit.md b/plugins/doc-guardian/commands/doc-audit.md index 4ab8441..b5a9c08 100644 --- a/plugins/doc-guardian/commands/doc-audit.md +++ b/plugins/doc-guardian/commands/doc-audit.md @@ -1,8 +1,9 @@ --- +name: doc audit description: Full documentation audit - scans entire project for doc drift without making changes --- -# Documentation Audit +# /doc audit Perform a comprehensive documentation drift analysis. diff --git a/plugins/doc-guardian/commands/changelog-gen.md b/plugins/doc-guardian/commands/doc-changelog-gen.md similarity index 96% rename from plugins/doc-guardian/commands/changelog-gen.md rename to plugins/doc-guardian/commands/doc-changelog-gen.md index 3a89f4b..43ca109 100644 --- a/plugins/doc-guardian/commands/changelog-gen.md +++ b/plugins/doc-guardian/commands/doc-changelog-gen.md @@ -1,8 +1,9 @@ --- +name: doc changelog-gen description: Generate changelog from conventional commits in Keep-a-Changelog format --- -# Changelog Generation +# /doc changelog-gen Generate a changelog entry from conventional commits. diff --git a/plugins/doc-guardian/commands/doc-coverage.md b/plugins/doc-guardian/commands/doc-coverage.md index 040147a..77dc647 100644 --- a/plugins/doc-guardian/commands/doc-coverage.md +++ b/plugins/doc-guardian/commands/doc-coverage.md @@ -1,8 +1,9 @@ --- +name: doc coverage description: Calculate documentation coverage percentage for functions and classes --- -# Documentation Coverage +# /doc coverage Analyze codebase to calculate documentation coverage metrics. diff --git a/plugins/doc-guardian/commands/stale-docs.md b/plugins/doc-guardian/commands/doc-stale-docs.md similarity index 97% rename from plugins/doc-guardian/commands/stale-docs.md rename to plugins/doc-guardian/commands/doc-stale-docs.md index bd3c857..15d4ec9 100644 --- a/plugins/doc-guardian/commands/stale-docs.md +++ b/plugins/doc-guardian/commands/doc-stale-docs.md @@ -1,8 +1,9 @@ --- +name: doc stale-docs description: Detect documentation files that are stale relative to their associated code --- -# Stale Documentation Detection +# /doc stale-docs Identify documentation files that may be outdated based on commit history. diff --git a/plugins/doc-guardian/commands/doc-sync.md b/plugins/doc-guardian/commands/doc-sync.md index 7d47623..aad0c49 100644 --- a/plugins/doc-guardian/commands/doc-sync.md +++ b/plugins/doc-guardian/commands/doc-sync.md @@ -1,8 +1,9 @@ --- +name: doc sync description: Synchronize all pending documentation updates in a single commit --- -# Documentation Sync +# /doc sync Apply all pending documentation updates detected by doc-guardian hooks. diff --git a/plugins/doc-guardian/commands/doc.md b/plugins/doc-guardian/commands/doc.md new file mode 100644 index 0000000..190d3cb --- /dev/null +++ b/plugins/doc-guardian/commands/doc.md @@ -0,0 +1,17 @@ +--- +description: Documentation management and drift detection +--- + +# /doc + +Documentation management, drift detection, and synchronization. + +## Sub-commands + +| Sub-command | Description | +|-------------|-------------| +| `/doc audit` | Full documentation audit - scans for doc drift | +| `/doc sync` | Synchronize pending documentation updates | +| `/doc changelog-gen` | Generate changelog from conventional commits | +| `/doc coverage` | Documentation coverage metrics by function/class | +| `/doc stale-docs` | Flag documentation behind code changes | diff --git a/plugins/doc-guardian/skills/changelog-format.md b/plugins/doc-guardian/skills/changelog-format.md index 1da053e..968d6c2 100644 --- a/plugins/doc-guardian/skills/changelog-format.md +++ b/plugins/doc-guardian/skills/changelog-format.md @@ -11,7 +11,7 @@ Defines Keep a Changelog format and how to parse Conventional Commits. ## When to Use -- **changelog-gen**: Generating changelog entries from commits +- **doc changelog-gen**: Generating changelog entries from commits - **git-flow integration**: Validating commit message format --- diff --git a/plugins/doc-guardian/skills/coverage-calculation.md b/plugins/doc-guardian/skills/coverage-calculation.md index 5b389fb..6c9e957 100644 --- a/plugins/doc-guardian/skills/coverage-calculation.md +++ b/plugins/doc-guardian/skills/coverage-calculation.md @@ -11,8 +11,8 @@ Defines how to calculate documentation coverage and thresholds. ## When to Use -- **doc-coverage**: Full coverage analysis -- **doc-audit**: Completeness checks +- **doc coverage**: Full coverage analysis +- **doc audit**: Completeness checks --- diff --git a/plugins/doc-guardian/skills/doc-patterns.md b/plugins/doc-guardian/skills/doc-patterns.md index f8dac18..d5ce933 100644 --- a/plugins/doc-guardian/skills/doc-patterns.md +++ b/plugins/doc-guardian/skills/doc-patterns.md @@ -11,8 +11,8 @@ Defines common documentation file structures and their contents. ## When to Use -- **doc-audit**: Understanding what to check in each doc type -- **doc-coverage**: Identifying documentation locations +- **doc audit**: Understanding what to check in each doc type +- **doc coverage**: Identifying documentation locations --- diff --git a/plugins/doc-guardian/skills/drift-detection.md b/plugins/doc-guardian/skills/drift-detection.md index 8baecaa..9f90ba6 100644 --- a/plugins/doc-guardian/skills/drift-detection.md +++ b/plugins/doc-guardian/skills/drift-detection.md @@ -11,8 +11,8 @@ Defines how to detect documentation drift through cross-reference analysis. ## When to Use -- **doc-audit**: Full cross-reference analysis -- **stale-docs**: Commit-based staleness detection +- **doc audit**: Full cross-reference analysis +- **doc stale-docs**: Commit-based staleness detection - **SessionStart hook**: Real-time drift detection --- diff --git a/plugins/doc-guardian/skills/staleness-metrics.md b/plugins/doc-guardian/skills/staleness-metrics.md index a72da61..6ee6241 100644 --- a/plugins/doc-guardian/skills/staleness-metrics.md +++ b/plugins/doc-guardian/skills/staleness-metrics.md @@ -11,8 +11,8 @@ Defines how to measure documentation staleness relative to code changes. ## When to Use -- **stale-docs**: Commit-based staleness detection -- **doc-audit**: Age-based analysis +- **doc stale-docs**: Commit-based staleness detection +- **doc audit**: Age-based analysis --- diff --git a/plugins/doc-guardian/skills/sync-workflow.md b/plugins/doc-guardian/skills/sync-workflow.md index 4aff5d8..17d8800 100644 --- a/plugins/doc-guardian/skills/sync-workflow.md +++ b/plugins/doc-guardian/skills/sync-workflow.md @@ -11,7 +11,7 @@ Defines how to synchronize documentation with code changes. ## When to Use -- **doc-sync**: Apply pending documentation updates +- **doc sync**: Apply pending documentation updates - **PostToolUse hook**: Queue drift for later sync --- diff --git a/plugins/git-flow/claude-md-integration.md b/plugins/git-flow/claude-md-integration.md index a6c54c9..e58cfb8 100644 --- a/plugins/git-flow/claude-md-integration.md +++ b/plugins/git-flow/claude-md-integration.md @@ -40,11 +40,11 @@ Use conventional commits: | Command | Use Case | |---------|----------| -| `/git-commit` | Create commit with smart message | -| `/git-commit-push` | Commit and push | -| `/git-commit-merge` | Commit and merge to base | -| `/branch-start` | Start new branch | -| `/git-status` | Enhanced status | +| `/gitflow commit` | Smart commit with optional --push, --merge, --sync | +| `/gitflow commit --push` | Commit and push to remote | +| `/gitflow commit --merge` | Commit and merge into target branch | +| `/gitflow branch-start` | Start new branch | +| `/gitflow status` | Enhanced status | ### Protected Branches diff --git a/plugins/git-flow/commands/git-commit-merge.md b/plugins/git-flow/commands/git-commit-merge.md deleted file mode 100644 index 24c36d9..0000000 --- a/plugins/git-flow/commands/git-commit-merge.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -name: git-commit-merge -description: Commit current changes and merge branch into target -agent: git-assistant ---- - -# /git-commit-merge - Commit and Merge - -## Skills - -- skills/visual-header.md -- skills/commit-conventions.md -- skills/merge-workflow.md -- skills/git-safety.md -- skills/environment-variables.md - -## Purpose - -Commit current changes, then merge the current branch into a target branch. - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `--target` | Target branch (default: GIT_DEFAULT_BASE) | -| `--squash` | Squash commits on merge | -| `--no-delete` | Keep branch after merge | - -## Workflow - -1. **Display header** - Show GIT-FLOW Commit & Merge header -2. **Run /git-commit** - Execute standard commit workflow -3. **Identify target** - Prompt for target branch if not specified -4. **Select strategy** - Merge commit, squash, or rebase (per merge-workflow.md) -5. **Execute merge** - Switch to target, pull, merge, push -6. **Handle conflicts** - Guide resolution if needed -7. **Cleanup** - Offer to delete merged branch (per git-safety.md) -8. **Report** - Show merge summary - -## Output - -``` -Committed: abc1234 -feat(auth): add password reset functionality - -Merged feat/password-reset -> development -Deleted branch: feat/password-reset - -development is now at: def5678 -``` diff --git a/plugins/git-flow/commands/git-commit-push.md b/plugins/git-flow/commands/git-commit-push.md deleted file mode 100644 index 4607338..0000000 --- a/plugins/git-flow/commands/git-commit-push.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -name: git-commit-push -description: Create a commit and push to remote in one operation -agent: git-assistant ---- - -# /git-commit-push - Commit and Push - -## Skills - -- skills/visual-header.md -- skills/commit-conventions.md -- skills/sync-workflow.md -- skills/git-safety.md -- skills/environment-variables.md - -## Purpose - -Create a commit and push to the remote repository in one operation. - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `--message`, `-m` | Override auto-generated message | -| `--force` | Force push (requires confirmation) | - -## Workflow - -1. **Display header** - Show GIT-FLOW Commit & Push header -2. **Run /git-commit** - Execute standard commit workflow -3. **Check upstream** - Set up tracking if needed (`git push -u`) -4. **Push** - Push to remote -5. **Handle conflicts** - Offer rebase/merge/force if push fails (per sync-workflow.md) -6. **Verify safety** - Warn before push to protected branches (per git-safety.md) -7. **Report** - Show push result - -## Output - -``` -Committed: abc1234 -feat(auth): add password reset functionality - -Pushed to: origin/feat/password-reset -Remote URL: https://github.com/user/repo -``` diff --git a/plugins/git-flow/commands/git-commit-sync.md b/plugins/git-flow/commands/git-commit-sync.md deleted file mode 100644 index d69a044..0000000 --- a/plugins/git-flow/commands/git-commit-sync.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -name: git-commit-sync -description: Commit, push, and sync with base branch -agent: git-assistant ---- - -# /git-commit-sync - Commit, Push, and Sync - -## Skills - -- skills/visual-header.md -- skills/commit-conventions.md -- skills/sync-workflow.md -- skills/merge-workflow.md -- skills/environment-variables.md - -## Purpose - -Full sync operation: commit local changes, push to remote, sync with upstream/base branch, and detect stale branches. - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `--base` | Override default base branch | -| `--no-rebase` | Use merge instead of rebase | - -## Workflow - -1. **Display header** - Show GIT-FLOW Commit Sync header -2. **Run /git-commit** - Execute standard commit workflow -3. **Push to remote** - Push committed changes -4. **Fetch with prune** - `git fetch --all --prune` -5. **Sync with base** - Rebase on base branch (per sync-workflow.md) -6. **Handle conflicts** - Guide resolution if conflicts occur (per merge-workflow.md) -7. **Push again** - `git push --force-with-lease` if rebased -8. **Detect stale** - Report stale local branches -9. **Report status** - Show sync summary - -## Output - -``` -Committed: abc1234 -Pushed to: origin/feat/password-reset -Synced with: development (xyz7890) - -Status: Clean, up-to-date -Stale branches: 2 found - run /branch-cleanup -``` diff --git a/plugins/git-flow/commands/git-commit.md b/plugins/git-flow/commands/git-commit.md deleted file mode 100644 index 92a1a60..0000000 --- a/plugins/git-flow/commands/git-commit.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -name: git-commit -description: Create a git commit with auto-generated conventional commit message -agent: git-assistant ---- - -# /git-commit - Smart Commit - -## Skills - -- skills/visual-header.md -- skills/git-safety.md -- skills/commit-conventions.md -- skills/environment-variables.md - -## Purpose - -Create a git commit with an auto-generated conventional commit message based on staged changes. - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `--message`, `-m` | Override auto-generated message | -| `--all`, `-a` | Stage all changes before commit | - -## Workflow - -1. **Display header** - Show GIT-FLOW Smart Commit header -2. **Check protected branch** - Warn if on protected branch (per git-safety.md) -3. **Analyze changes** - Run `git status` and `git diff --staged` -4. **Handle unstaged** - Prompt to stage if nothing staged -5. **Generate message** - Create conventional commit message (per commit-conventions.md) -6. **Confirm or edit** - Present message with options to use, edit, regenerate, or cancel -7. **Execute commit** - Run `git commit` with message and co-author footer - -## Output - -``` -Committed: abc1234 -feat(auth): add password reset functionality - -Files: 3 changed, 45 insertions(+), 12 deletions(-) -``` diff --git a/plugins/git-flow/commands/branch-cleanup.md b/plugins/git-flow/commands/gitflow-branch-cleanup.md similarity index 93% rename from plugins/git-flow/commands/branch-cleanup.md rename to plugins/git-flow/commands/gitflow-branch-cleanup.md index d21ef4a..d989ac1 100644 --- a/plugins/git-flow/commands/branch-cleanup.md +++ b/plugins/git-flow/commands/gitflow-branch-cleanup.md @@ -1,10 +1,10 @@ --- -name: branch-cleanup +name: gitflow branch-cleanup description: Remove merged and stale branches locally and optionally on remote agent: git-assistant --- -# /branch-cleanup - Clean Merged and Stale Branches +# /gitflow branch-cleanup - Clean Merged and Stale Branches ## Skills diff --git a/plugins/git-flow/commands/branch-start.md b/plugins/git-flow/commands/gitflow-branch-start.md similarity index 94% rename from plugins/git-flow/commands/branch-start.md rename to plugins/git-flow/commands/gitflow-branch-start.md index fae307c..d73d82b 100644 --- a/plugins/git-flow/commands/branch-start.md +++ b/plugins/git-flow/commands/gitflow-branch-start.md @@ -1,10 +1,10 @@ --- -name: branch-start +name: gitflow branch-start description: Create a new feature/fix/chore branch with consistent naming agent: git-assistant --- -# /branch-start - Start New Branch +# /gitflow branch-start - Start New Branch ## Skills diff --git a/plugins/git-flow/commands/gitflow-commit.md b/plugins/git-flow/commands/gitflow-commit.md new file mode 100644 index 0000000..3dd8be3 --- /dev/null +++ b/plugins/git-flow/commands/gitflow-commit.md @@ -0,0 +1,98 @@ +--- +name: gitflow commit +description: Create a git commit with auto-generated conventional commit message. Supports --push, --merge, --sync flags. +agent: git-assistant +--- + +# /gitflow commit - Smart Commit + +## Skills + +- skills/visual-header.md +- skills/git-safety.md +- skills/commit-conventions.md +- skills/sync-workflow.md +- skills/merge-workflow.md +- skills/environment-variables.md + +## Purpose + +Create a git commit with an auto-generated conventional commit message. Optionally push, merge, or sync in the same operation. + +## Parameters + +| Parameter | Description | +|-----------|-------------| +| `--message`, `-m` | Override auto-generated message | +| `--all`, `-a` | Stage all changes before commit | +| `--push` | After commit: push to remote (replaces former `/git-commit-push`) | +| `--merge [target]` | After commit: merge into target branch (replaces former `/git-commit-merge`) | +| `--sync` | After commit: push and sync with base branch (replaces former `/git-commit-sync`) | +| `--force` | Force push (with --push or --sync, requires confirmation) | +| `--squash` | Squash commits on merge (with --merge) | +| `--no-delete` | Keep branch after merge (with --merge) | +| `--base` | Override default base branch (with --sync) | +| `--no-rebase` | Use merge instead of rebase (with --sync) | + +## Workflow + +### Base: Commit +1. **Display header** — GIT-FLOW Smart Commit +2. **Check protected branch** — per git-safety.md +3. **Analyze changes** — `git status` and `git diff --staged` +4. **Handle unstaged** — Prompt to stage if nothing staged +5. **Generate message** — Create conventional commit (per commit-conventions.md) +6. **Confirm or edit** — Present message with options +7. **Execute commit** + +### Flag: --push +8. **Check upstream** — Set up tracking if needed +9. **Push to remote** +10. **Handle conflicts** — Offer rebase/merge/force if push fails + +### Flag: --merge +8. **Identify target** — Prompt for target branch if not specified +9. **Select strategy** — Merge commit, squash, or rebase (per merge-workflow.md) +10. **Execute merge** — Switch to target, pull, merge, push +11. **Handle conflicts** — Guide resolution +12. **Cleanup** — Offer to delete merged branch + +### Flag: --sync +8. **Push committed changes** +9. **Fetch with prune** — `git fetch --all --prune` +10. **Sync with base** — Rebase on base branch (per sync-workflow.md) +11. **Handle conflicts** — Guide resolution (per merge-workflow.md) +12. **Push again** — `git push --force-with-lease` if rebased +13. **Report stale branches** + +## Flag Mutual Exclusivity + +`--push`, `--merge`, and `--sync` are mutually exclusive. If multiple are provided, error with: +"Only one of --push, --merge, or --sync may be specified." + +## Output + +### Base commit: +``` +Committed: abc1234 +feat(auth): add password reset functionality +``` + +### With --push: +``` +Committed: abc1234 +Pushed to: origin/feat/password-reset +``` + +### With --merge: +``` +Committed: abc1234 +Merged feat/password-reset -> development +``` + +### With --sync: +``` +Committed: abc1234 +Pushed to: origin/feat/password-reset +Synced with: development +``` diff --git a/plugins/git-flow/commands/git-config.md b/plugins/git-flow/commands/gitflow-config.md similarity index 95% rename from plugins/git-flow/commands/git-config.md rename to plugins/git-flow/commands/gitflow-config.md index d41cc02..d5ac077 100644 --- a/plugins/git-flow/commands/git-config.md +++ b/plugins/git-flow/commands/gitflow-config.md @@ -1,10 +1,10 @@ --- -name: git-config +name: gitflow config description: Configure git-flow settings for the current project agent: git-assistant --- -# /git-config - Configure git-flow +# /gitflow config - Configure Git-Flow ## Skills diff --git a/plugins/git-flow/commands/git-status.md b/plugins/git-flow/commands/gitflow-status.md similarity index 89% rename from plugins/git-flow/commands/git-status.md rename to plugins/git-flow/commands/gitflow-status.md index b2b1d6f..40674a9 100644 --- a/plugins/git-flow/commands/git-status.md +++ b/plugins/git-flow/commands/gitflow-status.md @@ -1,10 +1,10 @@ --- -name: git-status +name: gitflow status description: Show comprehensive git status with recommendations agent: git-assistant --- -# /git-status - Enhanced Status +# /gitflow status - Enhanced Git Status ## Skills @@ -51,6 +51,6 @@ Unstaged: 2. Ready to commit with 1 staged file --- Quick Actions --- -/git-commit - Commit staged changes -/git-commit-push - Commit and push +/gitflow commit - Commit staged changes +/gitflow commit --push - Commit and push ``` diff --git a/plugins/git-flow/commands/gitflow.md b/plugins/git-flow/commands/gitflow.md new file mode 100644 index 0000000..3e48d85 --- /dev/null +++ b/plugins/git-flow/commands/gitflow.md @@ -0,0 +1,17 @@ +--- +description: Git workflow automation with safety enforcement +--- + +# /gitflow + +Git workflow automation with smart commits, branch management, and safety enforcement. + +## Sub-commands + +| Sub-command | Description | +|-------------|-------------| +| `/gitflow commit` | Smart commit with optional --push, --merge, --sync | +| `/gitflow branch-start` | Create a properly-named feature branch | +| `/gitflow branch-cleanup` | Clean up merged/stale branches | +| `/gitflow status` | Enhanced git status with recommendations | +| `/gitflow config` | Configure git-flow settings | diff --git a/plugins/git-flow/docs/BRANCHING-STRATEGY.md b/plugins/git-flow/docs/BRANCHING-STRATEGY.md index b582c6b..33f6f70 100644 --- a/plugins/git-flow/docs/BRANCHING-STRATEGY.md +++ b/plugins/git-flow/docs/BRANCHING-STRATEGY.md @@ -104,7 +104,7 @@ gitGraph ``` Or use git-flow command: ``` - /branch-start add user authentication + /gitflow branch-start add user authentication ``` 2. **Implement changes** @@ -125,7 +125,7 @@ gitGraph 5. **Cleanup** - Delete feature branch after merge ``` - /branch-cleanup + /gitflow branch-cleanup ``` ### Release Promotion diff --git a/plugins/git-flow/skills/branch-naming.md b/plugins/git-flow/skills/branch-naming.md index 6c2cf35..b36fb15 100644 --- a/plugins/git-flow/skills/branch-naming.md +++ b/plugins/git-flow/skills/branch-naming.md @@ -6,7 +6,7 @@ Defines branch naming conventions and validation rules for consistent repository ## When to Use -- Creating new branches with `/branch-start` +- Creating new branches with `/gitflow branch-start` - Validating branch names - Converting descriptions to branch names diff --git a/plugins/git-flow/skills/commit-conventions.md b/plugins/git-flow/skills/commit-conventions.md index 8fa0351..eb09465 100644 --- a/plugins/git-flow/skills/commit-conventions.md +++ b/plugins/git-flow/skills/commit-conventions.md @@ -6,7 +6,7 @@ Defines conventional commit message format for consistent, parseable commit hist ## When to Use -- Generating commit messages in `/git-commit` +- Generating commit messages in `/gitflow commit` - Validating user-provided commit messages - Explaining commit format to users diff --git a/plugins/git-flow/skills/environment-variables.md b/plugins/git-flow/skills/environment-variables.md index 7b9421b..32b3ced 100644 --- a/plugins/git-flow/skills/environment-variables.md +++ b/plugins/git-flow/skills/environment-variables.md @@ -6,7 +6,7 @@ Centralized reference for all git-flow environment variables and their defaults. ## When to Use -- Configuring git-flow behavior in `/git-config` +- Configuring git-flow behavior in `/gitflow config` - Documenting available options to users - Setting up project-specific overrides diff --git a/plugins/git-flow/skills/merge-workflow.md b/plugins/git-flow/skills/merge-workflow.md index b8374d8..83bbd08 100644 --- a/plugins/git-flow/skills/merge-workflow.md +++ b/plugins/git-flow/skills/merge-workflow.md @@ -6,7 +6,7 @@ Defines merge strategies, conflict resolution approaches, and post-merge cleanup ## When to Use -- Merging feature branches in `/git-commit-merge` +- Merging feature branches in `/gitflow commit --merge` - Resolving conflicts during sync operations - Cleaning up after successful merges diff --git a/plugins/git-flow/skills/sync-workflow.md b/plugins/git-flow/skills/sync-workflow.md index 70d721b..8657bf6 100644 --- a/plugins/git-flow/skills/sync-workflow.md +++ b/plugins/git-flow/skills/sync-workflow.md @@ -6,8 +6,8 @@ Defines push/pull patterns, rebase strategies, upstream tracking, and stale bran ## When to Use -- Pushing commits in `/git-commit-push` -- Full sync operations in `/git-commit-sync` +- Pushing commits in `/gitflow commit --push` +- Full sync operations in `/gitflow commit --sync` - Detecting and reporting stale branches ## Push Workflow @@ -92,7 +92,7 @@ Stale local branches (remote deleted): - feat/old-feature (was tracking origin/feat/old-feature) - fix/merged-bugfix (was tracking origin/fix/merged-bugfix) -Run /branch-cleanup to remove these branches. +Run /gitflow branch-cleanup to remove these branches. ``` ## Remote Pruning @@ -123,7 +123,7 @@ No conflicts detected. Cleanup: Remote refs pruned: 2 - Stale local branches: 2 (run /branch-cleanup to remove) + Stale local branches: 2 (run /gitflow branch-cleanup to remove) ``` ## Tracking Setup diff --git a/plugins/git-flow/skills/visual-header.md b/plugins/git-flow/skills/visual-header.md index 70ece8b..f42d720 100644 --- a/plugins/git-flow/skills/visual-header.md +++ b/plugins/git-flow/skills/visual-header.md @@ -19,56 +19,56 @@ Standard header format for consistent visual output across all git-flow commands ## Command Headers -### /git-commit +### /gitflow commit ``` +----------------------------------------------------------------------+ | GIT-FLOW Smart Commit | +----------------------------------------------------------------------+ ``` -### /git-commit-push +### /gitflow commit --push ``` +----------------------------------------------------------------------+ | GIT-FLOW Commit & Push | +----------------------------------------------------------------------+ ``` -### /git-commit-sync +### /gitflow commit --sync ``` +----------------------------------------------------------------------+ | GIT-FLOW Commit Sync | +----------------------------------------------------------------------+ ``` -### /git-commit-merge +### /gitflow commit --merge ``` +----------------------------------------------------------------------+ | GIT-FLOW Commit & Merge | +----------------------------------------------------------------------+ ``` -### /branch-start +### /gitflow branch-start ``` +----------------------------------------------------------------------+ | GIT-FLOW Branch Start | +----------------------------------------------------------------------+ ``` -### /branch-cleanup +### /gitflow branch-cleanup ``` +----------------------------------------------------------------------+ | GIT-FLOW Branch Cleanup | +----------------------------------------------------------------------+ ``` -### /git-status +### /gitflow status ``` +----------------------------------------------------------------------+ | GIT-FLOW Status | +----------------------------------------------------------------------+ ``` -### /git-config +### /gitflow config ``` +----------------------------------------------------------------------+ | GIT-FLOW Configuration | diff --git a/plugins/ops-deploy-pipeline/.claude-plugin/plugin.json b/plugins/ops-deploy-pipeline/.claude-plugin/plugin.json new file mode 100644 index 0000000..13165b4 --- /dev/null +++ b/plugins/ops-deploy-pipeline/.claude-plugin/plugin.json @@ -0,0 +1,25 @@ +{ + "name": "ops-deploy-pipeline", + "version": "1.0.0", + "description": "CI/CD deployment pipeline management for Docker Compose and self-hosted infrastructure", + "author": { + "name": "Leo Miranda", + "email": "leobmiranda@gmail.com" + }, + "homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/ops-deploy-pipeline/README.md", + "repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git", + "license": "MIT", + "keywords": [ + "deployment", + "docker-compose", + "caddy", + "ci-cd", + "infrastructure", + "rollback", + "self-hosted" + ], + "commands": [ + "./commands/" + ], + "domain": "ops" +} diff --git a/plugins/ops-deploy-pipeline/README.md b/plugins/ops-deploy-pipeline/README.md new file mode 100644 index 0000000..3e49066 --- /dev/null +++ b/plugins/ops-deploy-pipeline/README.md @@ -0,0 +1,68 @@ +# ops-deploy-pipeline + +CI/CD deployment pipeline management for Docker Compose and self-hosted infrastructure. + +## Overview + +This plugin provides deployment configuration generation, validation, environment management, and rollback planning for services running on Docker Compose with Caddy reverse proxy. It is designed for self-hosted infrastructure, particularly Raspberry Pi and ARM64 targets. + +## Commands + +| Command | Description | +|---------|-------------| +| `/deploy setup` | Interactive setup wizard for deployment configuration | +| `/deploy generate` | Generate docker-compose.yml, Caddyfile, and systemd units | +| `/deploy validate` | Validate deployment configs for correctness and best practices | +| `/deploy env` | Manage environment-specific config files | +| `/deploy check` | Pre-deployment health check (disk, memory, ports, DNS, Docker) | +| `/deploy rollback` | Generate rollback plan for a deployment | + +## Agents + +| Agent | Model | Mode | Purpose | +|-------|-------|------|---------| +| deploy-planner | sonnet | default | Configuration generation and rollback planning | +| deploy-validator | haiku | plan | Read-only validation and health checks | + +## Skills + +| Skill | Description | +|-------|-------------| +| compose-patterns | Docker Compose best practices and multi-service patterns | +| caddy-conventions | Caddyfile reverse proxy and subdomain routing patterns | +| env-management | Environment variable management across deployment stages | +| health-checks | Pre-deployment system health verification | +| rollback-patterns | Deployment rollback strategies and data safety | +| visual-header | Standard command output header | + +## Architecture + +``` +plugins/ops-deploy-pipeline/ +├── .claude-plugin/ +│ └── plugin.json +├── commands/ +│ ├── deploy.md # Dispatch file +│ ├── deploy-setup.md +│ ├── deploy-generate.md +│ ├── deploy-validate.md +│ ├── deploy-env.md +│ ├── deploy-check.md +│ └── deploy-rollback.md +├── agents/ +│ ├── deploy-planner.md +│ └── deploy-validator.md +├── skills/ +│ ├── compose-patterns.md +│ ├── caddy-conventions.md +│ ├── env-management.md +│ ├── health-checks.md +│ ├── rollback-patterns.md +│ └── visual-header.md +├── claude-md-integration.md +└── README.md +``` + +## License + +MIT License - Part of the Leo Claude Marketplace. diff --git a/plugins/ops-deploy-pipeline/agents/deploy-planner.md b/plugins/ops-deploy-pipeline/agents/deploy-planner.md new file mode 100644 index 0000000..4e9918d --- /dev/null +++ b/plugins/ops-deploy-pipeline/agents/deploy-planner.md @@ -0,0 +1,71 @@ +--- +name: deploy-planner +description: Deployment configuration generation and rollback planning for self-hosted services. Use for generating docker-compose.yml, Caddyfile, systemd units, environment configs, and rollback plans. +model: sonnet +permissionMode: default +--- + +# Deploy Planner Agent + +You are a deployment engineer specializing in Docker Compose-based self-hosted infrastructure. You generate production-ready configuration files and rollback plans. + +## Skills to Load + +- `skills/visual-header.md` +- `skills/compose-patterns.md` +- `skills/caddy-conventions.md` +- `skills/env-management.md` +- `skills/rollback-patterns.md` + +## Visual Output Requirements + +**MANDATORY: Display header at start of every response.** + +``` ++----------------------------------------------------------------------+ +| DEPLOY-PIPELINE - [Context] | ++----------------------------------------------------------------------+ +``` + +## Expertise + +- Docker Compose service orchestration +- Caddy reverse proxy configuration +- systemd service unit authoring +- Environment variable management and secret handling +- Blue-green and recreate rollback strategies +- ARM64 (Raspberry Pi) deployment constraints + +## Behavior Guidelines + +### Configuration Generation + +1. **Always generate valid YAML/config** - Syntax must be correct and parseable +2. **Pin image versions** - Never use `latest` in generated configs; ask user for specific version +3. **Include healthchecks** - Every service gets a healthcheck block +4. **Network isolation** - Create dedicated bridge networks, never use `host` mode without justification +5. **Resource awareness** - Default to conservative memory limits (256MB) for Raspberry Pi targets +6. **Document inline** - Add YAML comments explaining non-obvious choices + +### Environment Management + +1. **Never embed secrets** - Always use `env_file` references +2. **Provide .env.example** - Template with placeholder values and documentation comments +3. **Separate by environment** - .env.development, .env.staging, .env.production +4. **Validate completeness** - Cross-reference docker-compose variable references with env files + +### Rollback Planning + +1. **Capture current state** - Always document what is running before proposing changes +2. **Backup data first** - Volume backup commands must precede any destructive operations +3. **Estimate downtime** - Be explicit about service interruption duration +4. **Flag irreversible changes** - Database migrations, deleted volumes, schema changes + +## Available Commands + +| Command | Purpose | +|---------|---------| +| `/deploy setup` | Initial project setup wizard | +| `/deploy generate` | Generate deployment configs | +| `/deploy env` | Manage environment files | +| `/deploy rollback` | Create rollback plan | diff --git a/plugins/ops-deploy-pipeline/agents/deploy-validator.md b/plugins/ops-deploy-pipeline/agents/deploy-validator.md new file mode 100644 index 0000000..c5c501f --- /dev/null +++ b/plugins/ops-deploy-pipeline/agents/deploy-validator.md @@ -0,0 +1,79 @@ +--- +name: deploy-validator +description: Read-only validation of deployment configs and pre-flight health checks. Use for validating docker-compose.yml, Caddyfile, and running pre-deployment system checks. +model: haiku +permissionMode: plan +disallowedTools: Write, Edit, MultiEdit +--- + +# Deploy Validator Agent + +You are a deployment validation specialist. You analyze configuration files for correctness, security issues, and best practices without making any modifications. + +## Skills to Load + +- `skills/visual-header.md` +- `skills/compose-patterns.md` +- `skills/health-checks.md` + +## Visual Output Requirements + +**MANDATORY: Display header at start of every response.** + +``` ++----------------------------------------------------------------------+ +| DEPLOY-PIPELINE - [Context] | ++----------------------------------------------------------------------+ +``` + +## Expertise + +- Docker Compose YAML syntax and semantics validation +- Caddyfile directive validation +- systemd unit file correctness +- Port conflict detection +- Environment variable completeness checking +- System resource availability assessment +- ARM64 image compatibility verification + +## Behavior Guidelines + +### Read-Only Operation + +You MUST NOT modify any files. Your role is strictly analysis and reporting. + +1. **Read configuration files** - Parse and analyze docker-compose.yml, Caddyfile, .env files +2. **Run diagnostic commands** - Use Bash to check system state (disk, memory, ports, Docker) +3. **Report findings** - Structured output with severity levels +4. **Recommend fixes** - Tell the user what to change, but do not change it yourself + +### Validation Severity Levels + +| Level | Meaning | Action | +|-------|---------|--------| +| Critical | Deployment will fail or cause data loss | Must fix before deploying | +| Warning | Deployment may have issues | Should fix before deploying | +| Info | Best practice recommendation | Consider improving | + +### Validation Approach + +1. **Syntax first** - Ensure files parse correctly before checking semantics +2. **Cross-reference** - Check that docker-compose services match Caddy upstream targets +3. **Environment completeness** - Verify all `${VAR}` references have corresponding values +4. **Port conflicts** - Check host ports against running services +5. **Architecture compatibility** - Verify images support target architecture + +### Report Format + +Always output findings in a structured report: +- Group by severity (Critical > Warning > Info) +- Include file path and line number when possible +- Provide specific fix recommendation for each finding +- End with summary counts and overall PASS/FAIL status + +## Available Commands + +| Command | Purpose | +|---------|---------| +| `/deploy validate` | Validate deployment configs | +| `/deploy check` | Pre-deployment health check | diff --git a/plugins/ops-deploy-pipeline/claude-md-integration.md b/plugins/ops-deploy-pipeline/claude-md-integration.md new file mode 100644 index 0000000..56deaf8 --- /dev/null +++ b/plugins/ops-deploy-pipeline/claude-md-integration.md @@ -0,0 +1,27 @@ +# Deploy Pipeline Integration + +Add to your project's CLAUDE.md: + +## Deployment Management (ops-deploy-pipeline) + +This project uses the **ops-deploy-pipeline** plugin for Docker Compose deployment configuration, validation, and rollback planning. + +### Available Commands + +| Command | Description | +|---------|-------------| +| `/deploy setup` | Setup deployment configuration for this project | +| `/deploy generate` | Generate docker-compose.yml, Caddyfile, systemd units | +| `/deploy validate` | Validate configs for correctness and best practices | +| `/deploy env` | Manage .env.development / .env.production files | +| `/deploy check` | Pre-deployment health check (system resources, ports, Docker) | +| `/deploy rollback` | Generate rollback plan with volume backup steps | + +### Usage Guidelines + +- Run `/deploy setup` first to establish project deployment profile +- Use `/deploy generate` to create initial configs, then customize +- Always run `/deploy validate` before deploying to catch issues +- Use `/deploy check` on the target server before `docker compose up` +- Generate a `/deploy rollback` plan before any production deployment +- Never commit `.env.production` or `.env.staging` to version control diff --git a/plugins/ops-deploy-pipeline/commands/deploy-check.md b/plugins/ops-deploy-pipeline/commands/deploy-check.md new file mode 100644 index 0000000..4e0d85a --- /dev/null +++ b/plugins/ops-deploy-pipeline/commands/deploy-check.md @@ -0,0 +1,110 @@ +--- +name: deploy check +description: Pre-deployment health check — verify system readiness before deploying +--- + +# /deploy check + +Run pre-deployment health checks to verify the target system is ready. + +## Skills to Load + +- `skills/visual-header.md` +- `skills/health-checks.md` + +## Agent + +Delegate to `agents/deploy-validator.md`. + +## Usage + +``` +/deploy check [--service=] [--verbose] +``` + +**Options:** +- `--service` - Check readiness for a specific service only +- `--verbose` - Show detailed output for each check + +## Instructions + +Execute `skills/visual-header.md` with context "Pre-Deployment Check". + +### Phase 1: System Resources + +Run system checks using Bash tool: + +| Check | Command | Pass Condition | +|-------|---------|----------------| +| Disk space | `df -h /` | >10% free | +| Memory | `free -m` | >256MB available | +| CPU load | `uptime` | Load average < CPU count | +| Temperature | `vcgencmd measure_temp` (RPi) or `/sys/class/thermal/` | <70C | + +### Phase 2: Docker Environment + +| Check | Command | Pass Condition | +|-------|---------|----------------| +| Docker daemon | `docker info` | Running | +| Docker Compose | `docker compose version` | Installed | +| Disk usage | `docker system df` | <80% usage | +| Network | `docker network ls` | Expected networks exist | + +### Phase 3: Port Availability + +1. Read `docker-compose.yml` for all host port mappings +2. Check each port with `ss -tlnp | grep :` +3. If port is in use, identify the process occupying it +4. Flag conflicts as Critical + +### Phase 4: DNS and Network + +| Check | Command | Pass Condition | +|-------|---------|----------------| +| DNS resolution | `nslookup ` | Resolves correctly | +| Reverse proxy | `curl -s -o /dev/null -w "%{http_code}" http://localhost:80` | Caddy responding | +| Tailscale | `tailscale status` | Connected (if applicable) | + +### Phase 5: Image Availability + +1. Parse `docker-compose.yml` for image references +2. Run `docker pull --dry-run ` or `docker manifest inspect ` +3. Verify images exist and support the target architecture (arm64 for RPi) +4. Report image sizes and estimated pull time + +### Phase 6: Report + +``` +## Pre-Deployment Check Report + +### System Resources +[OK] Disk: 45% used (54GB free) +[OK] Memory: 1.2GB available +[OK] CPU: Load 0.8 (4 cores) +[OK] Temperature: 52C + +### Docker +[OK] Docker daemon: Running (v24.0.7) +[OK] Compose: v2.21.0 +[WARN] Docker disk: 72% used — consider pruning + +### Ports +[OK] 8080 — Available +[FAIL] 3000 — In use by grafana (PID 1234) + +### Network +[OK] DNS: myapp.hotport resolves +[OK] Caddy: Responding on :80 + +### Images +[OK] postgres:16-alpine — arm64 available (89MB) +[WARN] custom-app:latest — No arm64 manifest found + +### Summary +- Passed: 10 | Warnings: 2 | Failed: 1 +- Status: NOT READY — fix port conflict on 3000 +``` + +## User Request + +$ARGUMENTS diff --git a/plugins/ops-deploy-pipeline/commands/deploy-env.md b/plugins/ops-deploy-pipeline/commands/deploy-env.md new file mode 100644 index 0000000..1ef7132 --- /dev/null +++ b/plugins/ops-deploy-pipeline/commands/deploy-env.md @@ -0,0 +1,91 @@ +--- +name: deploy env +description: Manage environment-specific configuration files for deployments +--- + +# /deploy env + +Create and manage environment-specific configuration files. + +## Skills to Load + +- `skills/visual-header.md` +- `skills/env-management.md` + +## Agent + +Delegate to `agents/deploy-planner.md`. + +## Usage + +``` +/deploy env [--action=] [--env=] +``` + +**Actions:** +- `create` - Create a new environment config from .env.example (default) +- `diff` - Show differences between environment configs +- `sync` - Sync missing keys from .env.example to all environments +- `list` - List all environment files and their variable counts + +## Instructions + +Execute `skills/visual-header.md` with context "Environment Management". + +### Action: create + +1. Check `.env.example` exists as the source template +2. If missing, scan `docker-compose.yml` for referenced `${VARIABLES}` and create `.env.example` +3. Ask user which environment to create (development, staging, production) +4. Copy `.env.example` to `.env.` +5. For production, flag variables that need real values (API keys, passwords) +6. For development, suggest sensible defaults (localhost URLs, debug=true) +7. Warn user to never commit `.env.production` to version control +8. Verify `.gitignore` includes `.env.production` and `.env.staging` + +### Action: diff + +1. Read all `.env.*` files in the project +2. Compare variable names across environments +3. Report: + - Variables present in one environment but not others + - Variables with identical values across environments (potential issue) + - Variables in docker-compose but missing from all env files +4. Display as a comparison table + +### Action: sync + +1. Read `.env.example` as the canonical list of variables +2. For each `.env.` file: + - Identify missing variables + - Append missing variables with placeholder values + - Report what was added +3. Do NOT modify existing values + +### Action: list + +1. List all `.env*` files in the project +2. For each file, show: + - Variable count + - Last modified date + - Whether all docker-compose referenced variables are present + +## Output Format + +``` +## Environment Files + +| File | Variables | Coverage | Status | +|------|-----------|----------|--------| +| .env.example | 12 | 100% | Template | +| .env.development | 12 | 100% | OK | +| .env.production | 10 | 83% | Missing 2 vars | + +### Missing in .env.production +- DATABASE_URL (referenced in docker-compose.yml:15) +- REDIS_PASSWORD (referenced in docker-compose.yml:28) +``` + +## User Request + +$ARGUMENTS diff --git a/plugins/ops-deploy-pipeline/commands/deploy-generate.md b/plugins/ops-deploy-pipeline/commands/deploy-generate.md new file mode 100644 index 0000000..71857df --- /dev/null +++ b/plugins/ops-deploy-pipeline/commands/deploy-generate.md @@ -0,0 +1,102 @@ +--- +name: deploy generate +description: Generate docker-compose.yml, Caddyfile, and systemd units for a service +--- + +# /deploy generate + +Generate deployment configuration files from templates and project context. + +## Skills to Load + +- `skills/visual-header.md` +- `skills/compose-patterns.md` +- `skills/caddy-conventions.md` +- `skills/env-management.md` + +## Agent + +Delegate to `agents/deploy-planner.md`. + +## Usage + +``` +/deploy generate [--target=] [--service=] +``` + +**Targets:** +- `compose` - Generate docker-compose.yml only +- `caddy` - Generate Caddyfile snippet only +- `systemd` - Generate systemd service unit only +- `all` - Generate all configuration files (default) + +## Instructions + +Execute `skills/visual-header.md` with context "Config Generation". + +### Phase 1: Context Analysis + +1. Read existing project files to determine: + - Application language/framework (Dockerfile, package.json, requirements.txt, go.mod) + - Required services (database, cache, message queue) + - Exposed ports + - Volume requirements (data persistence, config files) +2. Check if `deploy/` directory exists from previous `/deploy setup` +3. Read `.env.example` if present for variable names + +### Phase 2: Docker Compose Generation + +Apply patterns from `skills/compose-patterns.md`: + +1. **Service definition** - Image or build context, restart policy, healthcheck +2. **Network isolation** - Create dedicated network for the stack +3. **Volume management** - Named volumes for persistence, bind mounts for config +4. **Resource limits** - Memory and CPU limits appropriate for target platform +5. **Dependency ordering** - `depends_on` with `condition: service_healthy` +6. **Environment variables** - Reference `env_file` rather than inline secrets + +### Phase 3: Caddyfile Generation + +Apply patterns from `skills/caddy-conventions.md`: + +1. **Subdomain routing** - `subdomain.hostname` block +2. **Reverse proxy** - Point to container:port with Docker network DNS +3. **Headers** - Security headers, CORS if needed +4. **Rate limiting** - Default rate limit for API endpoints + +### Phase 4: Systemd Unit Generation (optional) + +Generate `systemd/.service` for non-Docker services: +1. Unit description and dependencies +2. ExecStart/ExecStop commands +3. Restart policy and watchdog +4. User/Group restrictions + +### Phase 5: Output + +1. Show generated files with syntax highlighting +2. Ask user to confirm before writing +3. Write files to appropriate locations +4. Display validation summary + +## Output Format + +``` +## Generated Files + +### docker-compose.yml +[content with annotations] + +### Caddyfile snippet +[content with annotations] + +### Summary +- Services: 3 (app, db, redis) +- Networks: 1 (app-network) +- Volumes: 2 (db-data, redis-data) +- Next: /deploy validate +``` + +## User Request + +$ARGUMENTS diff --git a/plugins/ops-deploy-pipeline/commands/deploy-rollback.md b/plugins/ops-deploy-pipeline/commands/deploy-rollback.md new file mode 100644 index 0000000..8853fe8 --- /dev/null +++ b/plugins/ops-deploy-pipeline/commands/deploy-rollback.md @@ -0,0 +1,126 @@ +--- +name: deploy rollback +description: Generate a rollback plan to revert a deployment to the previous state +--- + +# /deploy rollback + +Generate a rollback plan for reverting a deployment. + +## Skills to Load + +- `skills/visual-header.md` +- `skills/rollback-patterns.md` +- `skills/compose-patterns.md` + +## Agent + +Delegate to `agents/deploy-planner.md`. + +## Usage + +``` +/deploy rollback [--service=] [--dry-run] [--strategy=] +``` + +**Options:** +- `--service` - Target a specific service (default: entire stack) +- `--dry-run` - Show plan without executing +- `--strategy` - Rollback strategy: `recreate` (default) or `blue-green` + +## Instructions + +Execute `skills/visual-header.md` with context "Rollback Planning". + +### Phase 1: Current State Capture + +1. List running containers for the stack: + ```bash + docker compose ps + ``` +2. Record current image digests: + ```bash + docker compose images + ``` +3. Check for volume data that may need backup: + ```bash + docker volume ls --filter name= + ``` +4. Record current environment variables from `.env` +5. Save current `docker-compose.yml` hash for verification + +### Phase 2: Previous State Detection + +Attempt to identify the previous deployment state: + +1. Check git history for previous `docker-compose.yml`: + ```bash + git log --oneline -5 -- docker-compose.yml + ``` +2. Check Docker image history for previous tags +3. Look for backup files: `docker-compose.yml.bak`, `.env.bak` +4. If no previous state found, warn user and ask for target state + +### Phase 3: Rollback Plan Generation + +Apply patterns from `skills/rollback-patterns.md`: + +#### Strategy: recreate (default) +1. Stop current containers: `docker compose down` +2. Restore previous docker-compose.yml from git +3. Restore previous .env file +4. Pull previous images if needed +5. Start containers: `docker compose up -d` +6. Verify health checks pass + +#### Strategy: blue-green +1. Start previous version alongside current (different ports) +2. Verify previous version health +3. Switch reverse proxy to point to previous version +4. Stop current version +5. Rename previous version to use standard ports + +### Phase 4: Data Considerations + +1. Identify services with persistent volumes (databases, file storage) +2. Check if database migrations were run (irreversible changes) +3. Recommend volume backup before rollback: + ```bash + docker run --rm -v :/data -v $(pwd):/backup alpine tar czf /backup/.tar.gz /data + ``` +4. Flag if rollback may cause data loss + +### Phase 5: Output + +``` +## Rollback Plan + +### Target +- Service: myapp-stack +- Current: v2.1.0 (deployed 2h ago) +- Rollback to: v2.0.3 + +### Steps +1. Backup database volume (estimated: 2min) + docker run --rm -v myapp_db:/data -v $(pwd):/backup alpine tar czf /backup/db-backup.tar.gz /data +2. Stop current stack + docker compose down +3. Restore previous config + git checkout HEAD~1 -- docker-compose.yml +4. Start previous version + docker compose up -d +5. Verify health + docker compose ps + +### Warnings +- Database migration v45 was applied — may need manual revert +- Volume myapp_uploads has 230MB of new data since last deploy + +### Estimated Downtime +- Strategy: recreate — ~30 seconds +- Strategy: blue-green — ~0 seconds (requires port availability) +``` + +## User Request + +$ARGUMENTS diff --git a/plugins/ops-deploy-pipeline/commands/deploy-setup.md b/plugins/ops-deploy-pipeline/commands/deploy-setup.md new file mode 100644 index 0000000..d97e136 --- /dev/null +++ b/plugins/ops-deploy-pipeline/commands/deploy-setup.md @@ -0,0 +1,81 @@ +--- +name: deploy setup +description: Interactive setup wizard for deployment pipeline configuration +--- + +# /deploy setup + +Configure the ops-deploy-pipeline plugin for a project. + +## Skills to Load + +- `skills/visual-header.md` +- `skills/compose-patterns.md` + +## Agent + +Delegate to `agents/deploy-planner.md`. + +## Usage + +``` +/deploy setup +``` + +## Instructions + +Execute `skills/visual-header.md` with context "Setup Wizard". + +### Phase 1: Project Detection + +1. Read the current directory for existing configuration files: + - `docker-compose.yml` or `docker-compose.yaml` + - `Caddyfile` or `caddy/Caddyfile` + - `.env`, `.env.example`, `.env.production`, `.env.development` + - `systemd/*.service` files +2. Report what was found and what is missing + +### Phase 2: Deployment Profile + +Ask user to select deployment profile: + +| Profile | Description | +|---------|-------------| +| **single-service** | One container, one reverse proxy entry | +| **multi-service** | Multiple containers with shared network | +| **full-stack** | Application + database + cache + reverse proxy | + +### Phase 3: Infrastructure Target + +Collect target information: +1. **Hostname** - Server hostname (e.g., `hotport`) +2. **Subdomain** - Service subdomain (e.g., `myapp.hotport`) +3. **Port** - Internal service port +4. **Network mode** - Tailscale, local, or both + +### Phase 4: Generate Scaffold + +Based on profile and target: +1. Create `deploy/` directory if it does not exist +2. Generate `.env.example` with documented variables +3. Create deployment checklist in `deploy/CHECKLIST.md` +4. Report next steps to user + +### Completion Summary + +``` +DEPLOY-PIPELINE SETUP COMPLETE + +Profile: multi-service +Target: myapp.hotport +Config Dir: deploy/ + +Next steps: +- /deploy generate Generate docker-compose.yml and Caddyfile +- /deploy env Create environment-specific configs +- /deploy validate Validate generated configs +``` + +## User Request + +$ARGUMENTS diff --git a/plugins/ops-deploy-pipeline/commands/deploy-validate.md b/plugins/ops-deploy-pipeline/commands/deploy-validate.md new file mode 100644 index 0000000..24ddc19 --- /dev/null +++ b/plugins/ops-deploy-pipeline/commands/deploy-validate.md @@ -0,0 +1,103 @@ +--- +name: deploy validate +description: Validate deployment configs for correctness, security, and best practices +--- + +# /deploy validate + +Validate Docker Compose, Caddyfile, and systemd configurations. + +## Skills to Load + +- `skills/visual-header.md` +- `skills/compose-patterns.md` +- `skills/health-checks.md` + +## Agent + +Delegate to `agents/deploy-validator.md`. + +## Usage + +``` +/deploy validate [--target=] [--strict] +``` + +**Options:** +- `--target` - Which config to validate (default: `all`) +- `--strict` - Treat warnings as errors + +## Instructions + +Execute `skills/visual-header.md` with context "Config Validation". + +### Phase 1: File Discovery + +Locate configuration files: +- `docker-compose.yml` / `docker-compose.yaml` +- `Caddyfile` or `caddy/Caddyfile` +- `systemd/*.service` +- `.env`, `.env.production`, `.env.development` + +Report any expected files that are missing. + +### Phase 2: Docker Compose Validation + +Check against patterns from `skills/compose-patterns.md`: + +| Check | Severity | Description | +|-------|----------|-------------| +| Valid YAML syntax | Critical | File must parse correctly | +| Image tags pinned | Warning | Avoid `latest` tag in production | +| Healthchecks defined | Warning | All services should have healthchecks | +| Restart policy set | Warning | Should be `unless-stopped` or `always` | +| Resource limits | Info | Memory/CPU limits recommended for constrained hosts | +| Network isolation | Warning | Services should use dedicated network, not `host` | +| Volume permissions | Warning | Bind mounts should have explicit read/write mode | +| No hardcoded secrets | Critical | Secrets must use env_file or Docker secrets | +| Port conflicts | Critical | No duplicate host port mappings | +| Dependency ordering | Info | Services with depends_on should use health conditions | + +### Phase 3: Caddyfile Validation + +| Check | Severity | Description | +|-------|----------|-------------| +| Valid syntax | Critical | Directives must be properly formatted | +| HTTPS configuration | Info | Automatic HTTPS or explicit cert paths | +| Reverse proxy targets | Warning | Target must match docker-compose service names | +| Security headers | Info | Recommend X-Frame-Options, CSP, HSTS | +| Duplicate routes | Critical | No conflicting route definitions | + +### Phase 4: Environment File Validation + +| Check | Severity | Description | +|-------|----------|-------------| +| .env.example exists | Warning | Template for required variables | +| No secrets in .env.example | Critical | Example file must use placeholders | +| All referenced vars defined | Critical | docker-compose env vars must have values | +| Consistent across environments | Info | Same keys in dev/staging/prod | + +### Phase 5: Report + +``` +## Validation Report + +### Critical (must fix) +- [file:line] Description of issue + Fix: Recommended solution + +### Warnings (should fix) +- [file:line] Description of issue + Fix: Recommended solution + +### Info (consider) +- [file:line] Description of improvement + +### Summary +- Critical: X | Warnings: Y | Info: Z +- Status: PASS / FAIL +``` + +## User Request + +$ARGUMENTS diff --git a/plugins/ops-deploy-pipeline/commands/deploy.md b/plugins/ops-deploy-pipeline/commands/deploy.md new file mode 100644 index 0000000..b91bab4 --- /dev/null +++ b/plugins/ops-deploy-pipeline/commands/deploy.md @@ -0,0 +1,18 @@ +--- +description: Deployment management — generate configs, validate pipelines, manage environments +--- + +# /deploy + +CI/CD deployment pipeline management for Docker Compose and self-hosted infrastructure. + +## Sub-commands + +| Sub-command | Description | +|-------------|-------------| +| `/deploy setup` | Interactive setup wizard for deployment configuration | +| `/deploy generate` | Generate docker-compose.yml, Caddyfile, and systemd units | +| `/deploy validate` | Validate deployment configs for correctness and best practices | +| `/deploy env` | Manage environment-specific config files (.env.development, .env.production) | +| `/deploy check` | Pre-deployment health check (disk, memory, ports, DNS, Docker) | +| `/deploy rollback` | Generate rollback plan for a deployment | diff --git a/plugins/ops-deploy-pipeline/skills/caddy-conventions.md b/plugins/ops-deploy-pipeline/skills/caddy-conventions.md new file mode 100644 index 0000000..645280c --- /dev/null +++ b/plugins/ops-deploy-pipeline/skills/caddy-conventions.md @@ -0,0 +1,126 @@ +# Caddy Conventions Skill + +Caddyfile patterns for reverse proxy configuration in self-hosted environments. + +## Subdomain Routing + +Each service gets a subdomain of the server hostname: + +```caddyfile +myapp.hotport { + reverse_proxy app:8080 +} +``` + +For services on non-standard ports: +```caddyfile +myapp.hotport { + reverse_proxy app:3000 +} +``` + +## Reverse Proxy Directives + +### Basic Reverse Proxy + +```caddyfile +subdomain.hostname { + reverse_proxy container_name:port +} +``` + +### With Health Checks + +```caddyfile +subdomain.hostname { + reverse_proxy container_name:port { + health_uri /health + health_interval 30s + health_timeout 10s + } +} +``` + +### Load Balancing (Multiple Instances) + +```caddyfile +subdomain.hostname { + reverse_proxy app1:8080 app2:8080 { + lb_policy round_robin + } +} +``` + +## Security Headers + +Apply to all sites: + +```caddyfile +(security_headers) { + header { + X-Content-Type-Options nosniff + X-Frame-Options SAMEORIGIN + Referrer-Policy strict-origin-when-cross-origin + -Server + } +} +``` + +Import in site blocks: `import security_headers` + +## Rate Limiting + +For API endpoints: + +```caddyfile +subdomain.hostname { + rate_limit { + zone api_zone { + key {remote_host} + events 100 + window 1m + } + } + reverse_proxy app:8080 +} +``` + +## Docker Network Integration + +Caddy must be on the same Docker network as the target service to use container DNS names. The Caddy container needs: + +```yaml +networks: + - caddy-network + - app-network # Join each app's network +``` + +## CORS Configuration + +```caddyfile +subdomain.hostname { + header Access-Control-Allow-Origin "*" + header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS" + header Access-Control-Allow-Headers "Content-Type, Authorization" + + @options method OPTIONS + respond @options 204 + + reverse_proxy app:8080 +} +``` + +## Automatic HTTPS + +- Caddy provides automatic HTTPS for public domains +- For local `.hotport` subdomains, use HTTP only (no valid TLS cert) +- For Tailscale access, consider `tls internal` for self-signed certs + +## File Server (Static Assets) + +```caddyfile +files.hotport { + root * /srv/files + file_server browse +} +``` diff --git a/plugins/ops-deploy-pipeline/skills/compose-patterns.md b/plugins/ops-deploy-pipeline/skills/compose-patterns.md new file mode 100644 index 0000000..94437b2 --- /dev/null +++ b/plugins/ops-deploy-pipeline/skills/compose-patterns.md @@ -0,0 +1,127 @@ +# Docker Compose Patterns Skill + +Best practices and patterns for Docker Compose service definitions targeting self-hosted infrastructure. + +## Service Naming + +- Use lowercase with hyphens: `my-service` +- Prefix with stack name for multi-project hosts: `myapp-db`, `myapp-redis` +- Container name should match service name: `container_name: myapp-db` + +## Network Isolation + +Every stack should define its own bridge network: + +```yaml +networks: + app-network: + driver: bridge +``` + +Services join the stack network. Only the reverse proxy entry point should be exposed to the host. + +## Volume Management + +- Use **named volumes** for data persistence (databases, uploads) +- Use **bind mounts** for configuration files only +- Set explicit permissions with `:ro` for read-only mounts +- Label volumes with `labels` for identification + +```yaml +volumes: + db-data: + labels: + com.project: myapp + com.service: database +``` + +## Healthchecks + +Every service MUST have a healthcheck: + +```yaml +healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s +``` + +Common healthcheck patterns: +- HTTP: `curl -f http://localhost:/health` +- PostgreSQL: `pg_isready -U ` +- Redis: `redis-cli ping` +- MySQL: `mysqladmin ping -h localhost` + +## Restart Policies + +| Environment | Policy | +|-------------|--------| +| Development | `restart: "no"` | +| Production | `restart: unless-stopped` | +| Critical services | `restart: always` | + +## Resource Limits + +For Raspberry Pi (8GB RAM): + +```yaml +deploy: + resources: + limits: + memory: 256M + cpus: '1.0' + reservations: + memory: 128M +``` + +## Dependency Ordering + +Use healthcheck-aware dependencies: + +```yaml +depends_on: + db: + condition: service_healthy + redis: + condition: service_started +``` + +## Environment Variables + +Never inline secrets. Use `env_file`: + +```yaml +env_file: + - .env + - .env.${DEPLOY_ENV:-development} +``` + +## Multi-Service Patterns + +### Web App + Database + Cache + +```yaml +services: + app: + image: myapp:1.0.0 + env_file: [.env] + depends_on: + db: { condition: service_healthy } + redis: { condition: service_healthy } + networks: [app-network] + + db: + image: postgres:16-alpine + volumes: [db-data:/var/lib/postgresql/data] + healthcheck: + test: ["CMD", "pg_isready", "-U", "postgres"] + networks: [app-network] + + redis: + image: redis:7-alpine + healthcheck: + test: ["CMD", "redis-cli", "ping"] + networks: [app-network] +``` diff --git a/plugins/ops-deploy-pipeline/skills/env-management.md b/plugins/ops-deploy-pipeline/skills/env-management.md new file mode 100644 index 0000000..7bbde60 --- /dev/null +++ b/plugins/ops-deploy-pipeline/skills/env-management.md @@ -0,0 +1,92 @@ +# Environment Management Skill + +Patterns for managing environment variables across deployment stages. + +## File Naming Convention + +| File | Purpose | Git Tracked | +|------|---------|-------------| +| `.env.example` | Template with placeholder values | Yes | +| `.env` | Local development defaults | No | +| `.env.development` | Development-specific overrides | No | +| `.env.staging` | Staging environment values | No | +| `.env.production` | Production secrets and config | No | + +## .env.example Format + +Document every variable with comments: + +```bash +# Application Settings +APP_NAME=myapp +APP_PORT=8080 +APP_DEBUG=false + +# Database Configuration +# PostgreSQL connection string +DATABASE_URL=postgresql://user:password@db:5432/myapp +DATABASE_POOL_SIZE=5 + +# Redis Configuration +REDIS_URL=redis://redis:6379/0 + +# External Services +# Generate at: https://example.com/api-keys +API_KEY=your-api-key-here +API_SECRET=your-secret-here +``` + +## Secret Handling Rules + +1. **Never commit secrets** to version control +2. `.env.production` and `.env.staging` MUST be in `.gitignore` +3. Use placeholder values in `.env.example`: `your-api-key-here`, `changeme`, `` +4. For shared team secrets, use a secrets manager or encrypted vault +5. Document where to obtain each secret in comments + +## Docker Compose Integration + +### Single env_file + +```yaml +env_file: + - .env +``` + +### Multi-environment + +```yaml +env_file: + - .env + - .env.${DEPLOY_ENV:-development} +``` + +### Variable Interpolation + +Docker Compose supports `${VAR:-default}` syntax: + +```yaml +services: + app: + image: myapp:${APP_VERSION:-latest} + ports: + - "${APP_PORT:-8080}:8080" +``` + +## Environment Diff Checking + +When comparing environments, check for: + +1. **Missing variables** - Present in .env.example but absent in target +2. **Extra variables** - Present in target but not in .env.example (may be stale) +3. **Placeholder values** - Production still has `changeme` or `your-*-here` +4. **Identical secrets** - Same password used in dev and prod (security risk) + +## Validation Checklist + +- [ ] All docker-compose `${VAR}` references have corresponding entries +- [ ] No secrets in `.env.example` +- [ ] `.gitignore` excludes `.env.production` and `.env.staging` +- [ ] Production variables have real values (no placeholders) +- [ ] Database URLs point to correct hosts per environment +- [ ] Debug flags are `false` in production diff --git a/plugins/ops-deploy-pipeline/skills/health-checks.md b/plugins/ops-deploy-pipeline/skills/health-checks.md new file mode 100644 index 0000000..81993c2 --- /dev/null +++ b/plugins/ops-deploy-pipeline/skills/health-checks.md @@ -0,0 +1,114 @@ +# Health Checks Skill + +Pre-deployment system health checks for self-hosted infrastructure. + +## Disk Space Checks + +```bash +# Check root filesystem +df -h / | awk 'NR==2 {print $5}' + +# Check Docker data directory +df -h /var/lib/docker | awk 'NR==2 {print $5}' + +# Docker-specific disk usage +docker system df +``` + +| Threshold | Status | +|-----------|--------| +| <70% used | OK | +| 70-85% used | Warning - consider pruning | +| >85% used | Critical - prune before deploying | + +Pruning commands: +```bash +docker system prune -f # Remove stopped containers, unused networks +docker image prune -a -f # Remove unused images +docker volume prune -f # Remove unused volumes (CAUTION: data loss) +``` + +## Memory Checks + +```bash +free -m | awk 'NR==2 {printf "Total: %sMB, Used: %sMB, Available: %sMB\n", $2, $3, $7}' +``` + +| Available Memory | Status | +|-----------------|--------| +| >512MB | OK | +| 256-512MB | Warning - may be tight | +| <256MB | Critical - deployment may OOM | + +## Port Availability + +```bash +# Check if a specific port is in use +ss -tlnp | grep : + +# List all listening ports +ss -tlnp +``` + +If a port is occupied: +1. Identify the process: `ss -tlnp | grep :` shows PID +2. Check if it is the same service being updated (expected) +3. If it is a different service, flag as Critical conflict + +## DNS Resolution + +```bash +# Check if subdomain resolves +nslookup . + +# Check /etc/hosts for local resolution +grep /etc/hosts +``` + +For `.hotport` subdomains, DNS is resolved via router hosts file or `/etc/hosts`. + +## Docker Daemon Status + +```bash +# Check Docker is running +docker info > /dev/null 2>&1 && echo "OK" || echo "FAIL" + +# Check Docker version +docker version --format '{{.Server.Version}}' + +# Check Docker Compose +docker compose version +``` + +## Image Pull Verification + +```bash +# Check if image exists for target architecture +docker manifest inspect : 2>/dev/null + +# Check available architectures +docker manifest inspect : | grep architecture +``` + +For Raspberry Pi, required architecture is `arm64` or `arm/v8`. + +## SSL Certificate Checks + +```bash +# Check certificate expiry (for HTTPS services) +echo | openssl s_client -servername -connect :443 2>/dev/null | openssl x509 -noout -dates +``` + +## Temperature (Raspberry Pi) + +```bash +vcgencmd measure_temp +# or +cat /sys/class/thermal/thermal_zone0/temp # Divide by 1000 for Celsius +``` + +| Temperature | Status | +|-------------|--------| +| <60C | OK | +| 60-70C | Warning - fan should be active | +| >70C | Critical - may throttle | diff --git a/plugins/ops-deploy-pipeline/skills/rollback-patterns.md b/plugins/ops-deploy-pipeline/skills/rollback-patterns.md new file mode 100644 index 0000000..3e0edb4 --- /dev/null +++ b/plugins/ops-deploy-pipeline/skills/rollback-patterns.md @@ -0,0 +1,136 @@ +# Rollback Patterns Skill + +Strategies for reverting deployments safely with minimal data loss and downtime. + +## Strategy: Recreate (Default) + +Simple stop-and-restart with previous configuration. + +### Steps + +1. **Backup current state** + ```bash + cp docker-compose.yml docker-compose.yml.bak + cp .env .env.bak + docker compose images > current-images.txt + ``` + +2. **Backup volumes with data** + ```bash + docker run --rm -v :/data -v $(pwd)/backups:/backup \ + alpine tar czf /backup/-$(date +%Y%m%d%H%M).tar.gz /data + ``` + +3. **Stop current deployment** + ```bash + docker compose down + ``` + +4. **Restore previous config** + ```bash + git checkout -- docker-compose.yml .env + ``` + +5. **Start previous version** + ```bash + docker compose pull + docker compose up -d + ``` + +6. **Verify health** + ```bash + docker compose ps + docker compose logs --tail=20 + ``` + +### Estimated Downtime + +- Small stack (1-3 services): 10-30 seconds +- Medium stack (4-8 services): 30-60 seconds +- Large stack with DB: 1-3 minutes (depends on DB startup) + +## Strategy: Blue-Green + +Zero-downtime rollback by running both versions simultaneously. + +### Prerequisites + +- Available ports for the alternate deployment +- Reverse proxy that can switch upstream targets +- No port conflicts between blue and green instances + +### Steps + +1. **Start previous version on alternate ports** + - Modify docker-compose to use different host ports + - Start with `docker compose -p -green up -d` + +2. **Verify previous version health** + - Hit health endpoints on alternate ports + - Confirm service functionality + +3. **Switch reverse proxy** + - Update Caddyfile to point to green deployment + - Reload Caddy: `docker exec caddy caddy reload --config /etc/caddy/Caddyfile` + +4. **Stop current (blue) version** + ```bash + docker compose -p -blue down + ``` + +5. **Rename green to primary** + - Restore original ports in docker-compose + - Recreate with standard project name + +### Estimated Downtime + +- Near zero: Only the Caddy reload (sub-second) + +## Database Rollback Considerations + +### Safe (Reversible) + +- Data inserts only (can delete new rows) +- No schema changes +- Configuration changes in env vars + +### Dangerous (May Cause Data Loss) + +- Schema migrations that drop columns +- Data transformations (one-way) +- Index changes on large tables + +### Mitigation + +1. Always backup database volume before rollback +2. Check for migration files between versions +3. If schema changed, may need to restore from backup rather than rollback +4. Document migration reversibility in deploy notes + +## Volume Backup and Restore + +### Backup + +```bash +docker run --rm \ + -v :/data:ro \ + -v $(pwd)/backups:/backup \ + alpine tar czf /backup/.tar.gz -C /data . +``` + +### Restore + +```bash +docker run --rm \ + -v :/data \ + -v $(pwd)/backups:/backup \ + alpine sh -c "rm -rf /data/* && tar xzf /backup/.tar.gz -C /data" +``` + +## Post-Rollback Verification + +1. All containers running: `docker compose ps` +2. Health checks passing: `docker compose ps --format json | grep -c healthy` +3. Logs clean: `docker compose logs --tail=50 --no-color` +4. Application responding: `curl -s http://localhost:/health` +5. Data integrity: Spot-check recent records in database diff --git a/plugins/ops-deploy-pipeline/skills/visual-header.md b/plugins/ops-deploy-pipeline/skills/visual-header.md new file mode 100644 index 0000000..2b01079 --- /dev/null +++ b/plugins/ops-deploy-pipeline/skills/visual-header.md @@ -0,0 +1,27 @@ +# Visual Header Skill + +Standard visual header for ops-deploy-pipeline commands. + +## Header Template + +``` ++----------------------------------------------------------------------+ +| DEPLOY-PIPELINE - [Context] | ++----------------------------------------------------------------------+ +``` + +## Context Values by Command + +| Command | Context | +|---------|---------| +| `/deploy setup` | Setup Wizard | +| `/deploy generate` | Config Generation | +| `/deploy validate` | Config Validation | +| `/deploy env` | Environment Management | +| `/deploy check` | Pre-Deployment Check | +| `/deploy rollback` | Rollback Planning | +| Agent mode | Deployment Management | + +## Usage + +Display header at the start of every command response before proceeding with the operation. diff --git a/plugins/ops-release-manager/.claude-plugin/plugin.json b/plugins/ops-release-manager/.claude-plugin/plugin.json new file mode 100644 index 0000000..44e2e58 --- /dev/null +++ b/plugins/ops-release-manager/.claude-plugin/plugin.json @@ -0,0 +1,25 @@ +{ + "name": "ops-release-manager", + "version": "1.0.0", + "description": "Release management with semantic versioning, changelog generation, and tag management", + "author": { + "name": "Leo Miranda", + "email": "leobmiranda@gmail.com" + }, + "homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/ops-release-manager/README.md", + "repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git", + "license": "MIT", + "keywords": [ + "release", + "semver", + "versioning", + "changelog", + "tags", + "deployment", + "release-management" + ], + "commands": [ + "./commands/" + ], + "domain": "ops" +} diff --git a/plugins/ops-release-manager/README.md b/plugins/ops-release-manager/README.md new file mode 100644 index 0000000..17c5cb5 --- /dev/null +++ b/plugins/ops-release-manager/README.md @@ -0,0 +1,66 @@ +# ops-release-manager + +Release management with semantic versioning, changelog generation, and tag management. + +## Overview + +ops-release-manager automates the release process: version bumping across all project files, changelog updates following Keep a Changelog format, git tag creation with release notes, and rollback capabilities. It supports Node.js, Python, Rust, and Claude marketplace projects. + +## Commands + +| Command | Description | +|---------|-------------| +| `/release setup` | Detect version locations and configure release workflow | +| `/release prepare` | Bump versions, update changelog, create release branch | +| `/release validate` | Pre-release checks (versions, changelog, dependencies) | +| `/release tag` | Create annotated git tag with release notes | +| `/release rollback` | Revert a release (remove tag, revert version bump) | +| `/release status` | Show current version and unreleased changes | + +## Agents + +| Agent | Model | Mode | Role | +|-------|-------|------|------| +| release-coordinator | sonnet | acceptEdits | Version bumping, changelog, tags, branches | +| release-validator | haiku | plan (read-only) | Pre-release validation and dependency checks | + +## Skills + +| Skill | Purpose | +|-------|---------| +| version-detection | Find version in package.json, pyproject.toml, Cargo.toml, README, etc. | +| semver-rules | SemVer bump logic, conventional commit analysis | +| changelog-conventions | Keep a Changelog format, Unreleased section management | +| release-workflow | Branch-based and tag-based release patterns, rollback procedures | +| visual-header | Consistent command output headers | + +## Supported Ecosystems + +| Ecosystem | Version File | Lock File | +|-----------|-------------|-----------| +| Node.js | package.json | package-lock.json | +| Python | pyproject.toml, setup.cfg | poetry.lock, requirements.txt | +| Rust | Cargo.toml | Cargo.lock | +| Claude Marketplace | marketplace.json, plugin.json | N/A | + +## Release Flow + +``` +/release status -> See what is unreleased +/release prepare -> Bump versions + changelog +/release validate -> Pre-release checks +/release tag -> Create git tag +``` + +If something goes wrong: +``` +/release rollback -> Revert the release +``` + +## Installation + +This plugin is part of the Leo Claude Marketplace. It is installed automatically when the marketplace is configured. + +## License + +MIT diff --git a/plugins/ops-release-manager/agents/release-coordinator.md b/plugins/ops-release-manager/agents/release-coordinator.md new file mode 100644 index 0000000..78077b4 --- /dev/null +++ b/plugins/ops-release-manager/agents/release-coordinator.md @@ -0,0 +1,59 @@ +--- +name: release-coordinator +description: Version bumping, changelog updates, and release branch/tag management +model: sonnet +permissionMode: acceptEdits +--- + +# Release Coordinator Agent + +You are a release engineer specializing in semantic versioning, changelog management, and release automation across multiple language ecosystems. + +## Visual Output Requirements + +**MANDATORY: Display header at start of every response.** + +``` ++----------------------------------------------------------------------+ +| RELEASE-MANAGER - [Command Context] | ++----------------------------------------------------------------------+ +``` + +## Core Principles + +1. **Version consistency is non-negotiable** — Every version location must match. A mismatch between package.json and README is a release blocker. + +2. **Changelogs are for humans** — Write changelog entries that explain the impact on users, not the implementation details. + +3. **Tags are immutable** — Once a tag is pushed, treat it as permanent. Rollbacks create revert commits, not force-pushed tags (unless explicitly requested). + +4. **Releases are reversible** — Every action taken during release preparation must have a documented undo path. + +## Expertise + +- **SemVer:** Major/minor/patch rules, pre-release identifiers (-alpha, -beta, -rc.1) +- **Changelog:** Keep a Changelog format, conventional commits parsing +- **Git:** Annotated tags, release branches, merge strategies +- **Ecosystems:** package.json, pyproject.toml, Cargo.toml, marketplace.json, setup.cfg +- **CI/CD:** Release triggers, deployment pipelines, artifact publishing + +## Release Workflow + +When preparing a release: + +1. **Verify state** — Clean working directory, correct branch, versions in sync. + +2. **Determine version** — From explicit input or conventional commit analysis. When in doubt, ask the user. + +3. **Update files atomically** — Change all version locations in a single commit. Never leave versions out of sync. + +4. **Changelog first** — Update the changelog before creating the tag. The changelog is the source of truth for what is in the release. + +5. **Review before finalizing** — Always show the complete diff before creating tags or pushing. A release is hard to undo once published. + +## Output Style + +- Show diffs for every file change +- Confirm destructive actions (tag deletion, force push) explicitly +- Provide copy-pasteable commands for manual steps +- Include "next steps" at the end of every operation diff --git a/plugins/ops-release-manager/agents/release-validator.md b/plugins/ops-release-manager/agents/release-validator.md new file mode 100644 index 0000000..edabeb8 --- /dev/null +++ b/plugins/ops-release-manager/agents/release-validator.md @@ -0,0 +1,70 @@ +--- +name: release-validator +description: Pre-release validation and dependency checks +model: haiku +permissionMode: plan +disallowedTools: Write, Edit, MultiEdit +--- + +# Release Validator Agent + +You are a release quality gate focused on detecting issues before they reach production. You never modify files — only analyze and report. + +## Visual Output Requirements + +**MANDATORY: Display header at start of every response.** + +``` ++----------------------------------------------------------------------+ +| RELEASE-MANAGER - Validate Release | ++----------------------------------------------------------------------+ +``` + +## Core Principles + +1. **Block on critical issues** — Version mismatches, missing changelog entries, and failing tests are release blockers. Do not soft-pedal them. + +2. **Warn on non-critical** — Minor documentation gaps or low-severity advisories are warnings, not blockers. + +3. **Be specific** — "Version mismatch" is useless. "package.json says 2.4.0 but README.md says 2.3.1" is actionable. + +4. **Check everything** — Users forget things. Check version files, changelog, git state, lock files, and documentation systematically. + +## Validation Checklist + +When validating a release, check every item: + +1. **Version Files** + - All detected locations report the same version + - Version is greater than the latest git tag + - Version follows SemVer format (no leading zeros, valid pre-release) + +2. **Changelog** + - Current version section exists + - Date is present and reasonable (today or within last 7 days) + - At least one entry exists + - Categories follow Keep a Changelog ordering + - No leftover [Unreleased] content that should have been moved + +3. **Git State** + - Working directory clean + - Branch up to date with remote + - No unresolved merge conflicts + - Tag does not already exist + +4. **Dependencies** + - Lock file matches manifest + - No critical security advisories + - No deprecated packages in direct dependencies + +5. **Documentation** + - README version matches + - Migration guide exists for major bumps + - Breaking changes are documented in changelog + +## Output Style + +- Use a structured pass/fail/warn table +- Include specific details for every non-pass result +- Provide a clear GO/NO-GO verdict at the end +- List exact steps to fix any failures diff --git a/plugins/ops-release-manager/claude-md-integration.md b/plugins/ops-release-manager/claude-md-integration.md new file mode 100644 index 0000000..c2d32d2 --- /dev/null +++ b/plugins/ops-release-manager/claude-md-integration.md @@ -0,0 +1,31 @@ +# Release Manager Integration + +Add to your project's CLAUDE.md: + +## Release Management + +This project uses ops-release-manager for versioning and release automation. + +### Commands +- `/release setup` - Detect version locations and configure release workflow +- `/release status` - Show current version and unreleased changes +- `/release prepare ` - Bump versions and update changelog +- `/release validate` - Pre-release checks before tagging +- `/release tag` - Create annotated git tag with release notes +- `/release rollback` - Revert a release if needed + +### Versioning +- Follows [SemVer](https://semver.org/) (MAJOR.MINOR.PATCH) +- Version locations: package.json, README.md, CHANGELOG.md (auto-detected) +- Changelog follows [Keep a Changelog](https://keepachangelog.com) format + +### Release Process +1. All changes documented under `[Unreleased]` in CHANGELOG.md +2. Run `/release prepare minor` (or major/patch) when ready +3. Run `/release validate` to verify readiness +4. Run `/release tag --push` to finalize + +### Conventions +- Tag format: `vX.Y.Z` (annotated tags with release notes) +- Branch format: `release/X.Y.Z` (for major/minor releases) +- Commit message: `chore(release): prepare vX.Y.Z` diff --git a/plugins/ops-release-manager/commands/release-prepare.md b/plugins/ops-release-manager/commands/release-prepare.md new file mode 100644 index 0000000..1a869af --- /dev/null +++ b/plugins/ops-release-manager/commands/release-prepare.md @@ -0,0 +1,88 @@ +--- +name: release prepare +description: Prepare a release — bump versions across all files, update changelog, create release branch +--- + +# /release prepare + +Prepare a new release by bumping version numbers, updating the changelog, and optionally creating a release branch. + +## Visual Output + +``` ++----------------------------------------------------------------------+ +| RELEASE-MANAGER - Prepare Release | ++----------------------------------------------------------------------+ +``` + +## Usage + +``` +/release prepare [--branch] [--no-branch] +``` + +**Version:** Explicit version (e.g., `2.4.0`) or bump type (`major`, `minor`, `patch`) +**--branch:** Create a release/X.Y.Z branch (default for minor/major) +**--no-branch:** Skip branch creation + +## Skills to Load + +- skills/version-detection.md +- skills/semver-rules.md +- skills/changelog-conventions.md + +## Process + +1. **Determine Target Version** + - If explicit version: validate it follows SemVer + - If bump type: calculate from current version + - `patch`: 2.3.1 -> 2.3.2 + - `minor`: 2.3.1 -> 2.4.0 + - `major`: 2.3.1 -> 3.0.0 + - If no argument: analyze commits since last tag, suggest bump type + +2. **Pre-flight Checks** + - Working directory is clean (no uncommitted changes) + - On correct base branch (development or main) + - [Unreleased] section in CHANGELOG.md has content + - All tests passing (if CI status available) + +3. **Update Version Files** + - Update all detected version locations (from setup) + - Show diff for each file before applying + - Maintain format consistency (quotes, spacing) + +4. **Update Changelog** + - Replace `[Unreleased]` with `[X.Y.Z] - YYYY-MM-DD` + - Add new empty `[Unreleased]` section above + - Update comparison links at bottom if present + +5. **Create Release Branch** (if applicable) + - Branch name: `release/X.Y.Z` + - Commit all version changes + - Commit message: `chore(release): prepare vX.Y.Z` + +6. **Summary** + - List all files modified + - Show the new version + - Next steps: review, validate, then tag + +## Output Format + +``` +## Release Preparation: v2.4.0 + +### Files Updated +- package.json: 2.3.1 -> 2.4.0 +- README.md: v2.3.1 -> v2.4.0 +- CHANGELOG.md: [Unreleased] -> [2.4.0] - 2026-02-06 + +### Branch +- Created: release/2.4.0 +- Commit: chore(release): prepare v2.4.0 + +### Next Steps +1. Review changes: `git diff development` +2. Validate: `/release validate` +3. Tag: `/release tag` +``` diff --git a/plugins/ops-release-manager/commands/release-rollback.md b/plugins/ops-release-manager/commands/release-rollback.md new file mode 100644 index 0000000..95e2908 --- /dev/null +++ b/plugins/ops-release-manager/commands/release-rollback.md @@ -0,0 +1,79 @@ +--- +name: release rollback +description: Revert a release — remove git tag, revert version bump commit, restore previous state +--- + +# /release rollback + +Revert a release by removing the git tag and reverting version bump changes. + +## Visual Output + +``` ++----------------------------------------------------------------------+ +| RELEASE-MANAGER - Rollback Release | ++----------------------------------------------------------------------+ +``` + +## Usage + +``` +/release rollback [] [--tag-only] [--force] +``` + +**Version:** Version to rollback (defaults to latest tag) +**--tag-only:** Only remove the tag, keep version changes +**--force:** Skip confirmation prompts + +## Skills to Load + +- skills/release-workflow.md + +## Process + +1. **Identify Release to Rollback** + - If version specified: find matching tag + - If not specified: use most recent tag + - Show the release details for confirmation + +2. **Safety Checks** + - Warn if tag has been pushed to remote + - Warn if other branches have been based on this release + - Warn if CI pipeline has already deployed + - Require explicit confirmation (unless --force) + +3. **Remove Git Tag** + - Delete local tag: `git tag -d vX.Y.Z` + - If tag was pushed: `git push origin :refs/tags/vX.Y.Z` + - Confirm tag removal + +4. **Revert Version Changes** (unless --tag-only) + - Find the version bump commit + - Create a revert commit: `git revert --no-edit` + - This restores CHANGELOG.md, version files to previous state + +5. **Cleanup** + - If release branch exists: offer to delete it + - Update any tracking references + - Show final state + +## Output Format + +``` +## Rollback: v2.4.0 + +### Actions Taken +- [x] Deleted local tag v2.4.0 +- [x] Deleted remote tag v2.4.0 +- [x] Reverted commit abc1234 (chore(release): prepare v2.4.0) +- [x] Deleted branch release/2.4.0 + +### Current State +- Version: 2.3.1 (restored) +- Latest tag: v2.3.1 +- CHANGELOG.md: [Unreleased] section restored + +### Warnings +- If any deployments were triggered, manual rollback may be needed +- Notify team members of the release revert +``` diff --git a/plugins/ops-release-manager/commands/release-setup.md b/plugins/ops-release-manager/commands/release-setup.md new file mode 100644 index 0000000..4c39906 --- /dev/null +++ b/plugins/ops-release-manager/commands/release-setup.md @@ -0,0 +1,71 @@ +--- +name: release setup +description: Detect version locations, release conventions, and configure release workflow +--- + +# /release setup + +Setup wizard for release management. Detects existing version locations and release conventions. + +## Visual Output + +``` ++----------------------------------------------------------------------+ +| RELEASE-MANAGER - Setup | ++----------------------------------------------------------------------+ +``` + +## Skills to Load + +- skills/version-detection.md + +## Process + +1. **Detect Version Locations** + - Scan for version strings in standard files: + - `package.json` (Node.js) + - `pyproject.toml` (Python) + - `setup.cfg`, `setup.py` (Python legacy) + - `Cargo.toml` (Rust) + - `marketplace.json` (Claude plugins) + - `README.md` title line + - `CHANGELOG.md` header + - Record each location with current version value + +2. **Check Version Consistency** + - Compare all detected versions + - Flag any mismatches between files + - Identify the "source of truth" file + +3. **Detect Release Conventions** + - Git tags: check `git tag` for existing pattern (v1.0.0 vs 1.0.0) + - Branching: check for release/* branches + - Changelog format: detect Keep a Changelog vs other + - CI/CD: check for release workflows in .github/workflows or .gitlab-ci.yml + +4. **Present Configuration** + - Show detected settings + - Ask user to confirm or override + - Store preferences for future commands + +## Output Format + +``` +## Release Configuration + +### Version Locations +| File | Current Version | Pattern | +|------|----------------|---------| +| package.json | 2.3.1 | "version": "X.Y.Z" | +| README.md | 2.3.1 | # Project - vX.Y.Z | +| CHANGELOG.md | 2.3.1 | ## [X.Y.Z] - YYYY-MM-DD | + +### Conventions +- Tag format: vX.Y.Z +- Branch pattern: release/X.Y.Z +- Changelog: Keep a Changelog format +- Source of truth: package.json + +### Status: Ready +All versions in sync. Release workflow configured. +``` diff --git a/plugins/ops-release-manager/commands/release-status.md b/plugins/ops-release-manager/commands/release-status.md new file mode 100644 index 0000000..1610258 --- /dev/null +++ b/plugins/ops-release-manager/commands/release-status.md @@ -0,0 +1,86 @@ +--- +name: release status +description: Show current version, unreleased changes, and release readiness +--- + +# /release status + +Display the current version, unreleased changes, and overall release readiness. + +## Visual Output + +``` ++----------------------------------------------------------------------+ +| RELEASE-MANAGER - Status | ++----------------------------------------------------------------------+ +``` + +## Usage + +``` +/release status [--verbose] +``` + +**--verbose:** Include full unreleased changelog and commit list + +## Skills to Load + +- skills/version-detection.md +- skills/semver-rules.md + +## Process + +1. **Current Version** + - Read version from all known locations + - Show the latest git tag + - Flag any version mismatches + +2. **Unreleased Changes** + - Read [Unreleased] section from CHANGELOG.md + - Count entries by category (Added, Changed, Fixed, etc.) + - If verbose: show full content + +3. **Commit Analysis** + - List commits since last tag + - Parse conventional commit prefixes (feat, fix, chore, etc.) + - Suggest bump type based on commit types: + - Any `BREAKING CHANGE` or `!` → major + - Any `feat` → minor + - Only `fix`, `chore`, `docs` → patch + - If verbose: show commit list + +4. **Readiness Assessment** + - Check if [Unreleased] has content + - Check if all versions are in sync + - Check git state (clean working directory) + - Summarize blockers if any + +## Output Format + +``` +## Release Status + +### Current Version: 2.3.1 (tag: v2.3.1) +All 3 version locations in sync. + +### Unreleased Changes +| Category | Count | +|----------|-------| +| Added | 3 | +| Fixed | 2 | +| Changed | 1 | + +### Commits Since v2.3.1: 14 +- 5 feat (new features) +- 6 fix (bug fixes) +- 3 chore (maintenance) + +### Suggested Bump: MINOR (2.3.1 -> 2.4.0) +Reason: 5 new features detected + +### Readiness: READY +- [x] Unreleased changes documented +- [x] Versions in sync +- [x] Working directory clean +Run `/release prepare minor` to begin. +``` diff --git a/plugins/ops-release-manager/commands/release-tag.md b/plugins/ops-release-manager/commands/release-tag.md new file mode 100644 index 0000000..7d42341 --- /dev/null +++ b/plugins/ops-release-manager/commands/release-tag.md @@ -0,0 +1,83 @@ +--- +name: release tag +description: Create annotated git tag with release notes extracted from changelog +--- + +# /release tag + +Create and push an annotated git tag with release notes from the changelog. + +## Visual Output + +``` ++----------------------------------------------------------------------+ +| RELEASE-MANAGER - Tag Release | ++----------------------------------------------------------------------+ +``` + +## Usage + +``` +/release tag [--push] [--draft] +``` + +**--push:** Push tag to remote immediately (default: ask) +**--draft:** Create tag locally without pushing + +## Skills to Load + +- skills/release-workflow.md + +## Process + +1. **Pre-flight** + - Verify `/release validate` passes (run automatically if not done) + - Confirm current version from version files + - Check that tag does not already exist + +2. **Extract Release Notes** + - Read the current version's section from CHANGELOG.md + - Format as tag annotation body + - Include version number and date in tag message + +3. **Create Tag** + - Tag name: `vX.Y.Z` (matching project convention) + - Annotated tag with release notes as message + - Command: `git tag -a vX.Y.Z -m "Release vX.Y.Z\n\n"` + +4. **Push Decision** + - If --push: push tag to origin + - If --draft: keep local only + - Otherwise: show tag details and ask user + +5. **Post-Tag Actions** + - If release branch exists: remind to merge back and delete branch + - If CI release pipeline detected: note it will be triggered + - Show the complete release summary + +## Output Format + +``` +## Release Tagged: v2.4.0 + +### Tag +- Name: v2.4.0 +- Commit: abc1234 (HEAD) +- Date: 2026-02-06 + +### Release Notes +#### Added +- New feature X +- New feature Y + +#### Fixed +- Bug fix Z + +### Status: Tag created locally +Run `git push origin v2.4.0` to publish. + +### Post-Release +- [ ] Merge release/2.4.0 back to development +- [ ] Delete release/2.4.0 branch +- [ ] Verify CI pipeline triggered +``` diff --git a/plugins/ops-release-manager/commands/release-validate.md b/plugins/ops-release-manager/commands/release-validate.md new file mode 100644 index 0000000..e95a997 --- /dev/null +++ b/plugins/ops-release-manager/commands/release-validate.md @@ -0,0 +1,75 @@ +--- +name: release validate +description: Pre-release validation — verify version consistency, changelog, dependencies, and readiness +--- + +# /release validate + +Run pre-release checks to verify the project is ready for release. + +## Visual Output + +``` ++----------------------------------------------------------------------+ +| RELEASE-MANAGER - Validate Release | ++----------------------------------------------------------------------+ +``` + +## Skills to Load + +- skills/version-detection.md +- skills/changelog-conventions.md + +## Process + +1. **Version Consistency** + - Read version from all known locations + - Verify all locations report the same version + - Check version is greater than the latest git tag + - Verify version follows SemVer format + +2. **Changelog Validation** + - Verify [X.Y.Z] section exists with today's date (or recent date) + - Check all required categories are present if entries exist + - Verify no empty [Unreleased] content was left behind + - Check comparison links are updated + +3. **Git State** + - Working directory is clean + - Branch is up to date with remote + - No merge conflicts pending + - All CI checks passing (if detectable) + +4. **Dependency Check** + - Lock file is up to date (package-lock.json, poetry.lock, Cargo.lock) + - No known vulnerable dependencies (if audit tool available) + - No unpinned dependencies in production config + +5. **Documentation** + - README references correct version + - Migration guide exists for major versions + - Breaking changes are documented + +6. **Report** + - Show pass/fail for each check + - Block release if any critical check fails + - Warn on non-critical issues + +## Output Format + +``` +## Release Validation: v2.4.0 + +### Checks +| Check | Status | Details | +|-------|--------|---------| +| Version consistency | PASS | 3/3 files match v2.4.0 | +| Changelog | PASS | [2.4.0] section with 5 entries | +| Git state | PASS | Clean, up to date | +| Lock file | PASS | package-lock.json current | +| Dependencies | WARN | 1 advisory (low severity) | +| Documentation | PASS | README updated | + +### Result: READY FOR RELEASE +1 warning (non-blocking). Proceed with `/release tag`. +``` diff --git a/plugins/ops-release-manager/commands/release.md b/plugins/ops-release-manager/commands/release.md new file mode 100644 index 0000000..e45e247 --- /dev/null +++ b/plugins/ops-release-manager/commands/release.md @@ -0,0 +1,18 @@ +--- +description: Release management — version bumping, changelog updates, tag creation +--- + +# /release + +Release management with semantic versioning, changelog generation, and tag management. + +## Sub-commands + +| Sub-command | Description | +|-------------|-------------| +| `/release setup` | Setup wizard — detect version locations and release conventions | +| `/release prepare` | Prepare release: bump versions, update changelog, create branch | +| `/release validate` | Pre-release checks — verify versions, changelog, dependencies | +| `/release tag` | Create and push git tag with release notes | +| `/release rollback` | Revert a release — remove tag, revert version bump | +| `/release status` | Show current version and unreleased changes | diff --git a/plugins/ops-release-manager/skills/changelog-conventions.md b/plugins/ops-release-manager/skills/changelog-conventions.md new file mode 100644 index 0000000..0ec6c71 --- /dev/null +++ b/plugins/ops-release-manager/skills/changelog-conventions.md @@ -0,0 +1,74 @@ +--- +description: Keep a Changelog format, Unreleased section management, and category ordering +--- + +# Changelog Conventions Skill + +## Overview + +Standards for maintaining a changelog following the Keep a Changelog format (keepachangelog.com). The changelog is the primary release communication artifact. + +## File Structure + +```markdown +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com), +and this project adheres to [Semantic Versioning](https://semver.org). + +## [Unreleased] + +### Added +- New features go here during development + +## [2.3.1] - 2026-01-15 + +### Fixed +- Bug fix description + +## [2.3.0] - 2026-01-01 + +### Added +- Feature description + +[Unreleased]: https://github.com/user/repo/compare/v2.3.1...HEAD +[2.3.1]: https://github.com/user/repo/compare/v2.3.0...v2.3.1 +[2.3.0]: https://github.com/user/repo/releases/tag/v2.3.0 +``` + +## Category Ordering + +Categories must appear in this order (only include categories with entries): + +1. **Added** — New features +2. **Changed** — Changes to existing functionality +3. **Deprecated** — Features that will be removed in future +4. **Removed** — Features removed in this release +5. **Fixed** — Bug fixes +6. **Security** — Security vulnerability fixes + +## Unreleased Section Management + +### During Development +All changes go under `[Unreleased]`. Never create a versioned section until release time. + +### At Release Time +1. Replace `[Unreleased]` heading with `[X.Y.Z] - YYYY-MM-DD` +2. Add new empty `[Unreleased]` section above +3. Update comparison links at bottom of file + +### Entry Writing Guidelines +- Start with a verb (Add, Fix, Change, Remove, Deprecate) +- Focus on user impact, not implementation details +- Reference issue numbers where applicable +- Keep entries concise (one line preferred) +- Group related changes into a single entry when appropriate + +## Comparison Links + +Maintain comparison links at the bottom of the file: +- `[Unreleased]` compares latest tag to HEAD +- Each version compares to the previous version +- First version links to the release tag diff --git a/plugins/ops-release-manager/skills/release-workflow.md b/plugins/ops-release-manager/skills/release-workflow.md new file mode 100644 index 0000000..e00dfa9 --- /dev/null +++ b/plugins/ops-release-manager/skills/release-workflow.md @@ -0,0 +1,99 @@ +--- +description: Release patterns, branching strategies, tagging, and pre-release versions +--- + +# Release Workflow Skill + +## Overview + +End-to-end release workflow patterns including branching, tagging, and rollback procedures. + +## Release Patterns + +### Tag-Based Release (Simple) +Best for: small projects, continuous deployment + +1. Commit changes to main/development +2. Update version files and changelog +3. Create annotated tag: `git tag -a vX.Y.Z -m "message"` +4. Push tag: `git push origin vX.Y.Z` +5. CI triggers deployment from tag + +### Branch-Based Release (Standard) +Best for: projects with QA cycles, staged releases + +1. Create branch: `git checkout -b release/X.Y.Z` +2. Update version files and changelog on branch +3. QA testing on release branch +4. Merge to main: `git merge release/X.Y.Z` +5. Tag on main: `git tag -a vX.Y.Z` +6. Merge back to development: `git merge release/X.Y.Z` +7. Delete release branch + +## Git Tag Operations + +### Creating Tags +```bash +# Annotated tag with release notes +git tag -a vX.Y.Z -m "Release vX.Y.Z + +Added: +- Feature description + +Fixed: +- Bug fix description" + +# Push single tag +git push origin vX.Y.Z + +# Push all tags +git push origin --tags +``` + +### Deleting Tags (Rollback) +```bash +# Delete local tag +git tag -d vX.Y.Z + +# Delete remote tag +git push origin :refs/tags/vX.Y.Z +``` + +## Pre-Release Workflow + +For releases that need staged rollout: + +1. `vX.Y.Z-alpha.1` — First alpha, feature incomplete +2. `vX.Y.Z-alpha.2` — Updated alpha +3. `vX.Y.Z-beta.1` — Feature complete, testing +4. `vX.Y.Z-rc.1` — Release candidate, final validation +5. `vX.Y.Z` — Stable release + +Each pre-release tag follows the same tagging process but does not update the main changelog section. + +## Rollback Procedure + +### If Tag Not Yet Pushed +1. Delete local tag +2. Revert version commit +3. Done + +### If Tag Already Pushed +1. Delete remote tag +2. Delete local tag +3. Revert version commit +4. Push revert commit +5. Notify team about release revert + +### If Deployment Occurred +1. Follow the above steps +2. Trigger deployment of the previous version +3. Verify rollback in production +4. Post-mortem on what went wrong + +## Safety Rules + +- Never force-push tags without explicit user confirmation +- Always create annotated tags (not lightweight) +- Include release notes in tag message +- Verify tag points to expected commit before pushing diff --git a/plugins/ops-release-manager/skills/semver-rules.md b/plugins/ops-release-manager/skills/semver-rules.md new file mode 100644 index 0000000..58ba45d --- /dev/null +++ b/plugins/ops-release-manager/skills/semver-rules.md @@ -0,0 +1,65 @@ +--- +description: Semantic versioning bump logic and conventional commit analysis +--- + +# SemVer Rules Skill + +## Overview + +Rules for determining the correct version bump based on the nature of changes, following Semantic Versioning 2.0.0 (semver.org). + +## Bump Type Rules + +### MAJOR (X.0.0) +Increment when making incompatible API changes: +- Removing a public function, class, or endpoint +- Changing function signatures (parameter types, return types) +- Renaming public exports +- Changing default behavior in a breaking way +- Dropping support for a platform or runtime version + +### MINOR (x.Y.0) +Increment when adding functionality in a backwards-compatible manner: +- Adding new functions, classes, or endpoints +- Adding optional parameters to existing functions +- New configuration options with sensible defaults +- Deprecating functionality (without removing it) +- Performance improvements that do not change behavior + +### PATCH (x.y.Z) +Increment when making backwards-compatible bug fixes: +- Fixing incorrect behavior +- Correcting documentation errors that affected usage +- Security patches that do not change API +- Fixing edge cases or error handling + +## Conventional Commits Mapping + +| Commit Prefix | Bump Type | Examples | +|---------------|-----------|---------| +| `feat:` | MINOR | New feature, new command, new option | +| `fix:` | PATCH | Bug fix, error correction | +| `docs:` | PATCH | Documentation update | +| `chore:` | PATCH | Dependency update, cleanup | +| `refactor:` | PATCH | Internal restructuring, no behavior change | +| `perf:` | PATCH | Performance improvement | +| `test:` | PATCH | Test additions or fixes | +| `BREAKING CHANGE:` | MAJOR | Any commit with this footer | +| `feat!:` / `fix!:` | MAJOR | Breaking change indicated by `!` | + +## Pre-release Versions + +For releases not yet stable: +- Alpha: `X.Y.Z-alpha.N` — feature incomplete, unstable +- Beta: `X.Y.Z-beta.N` — feature complete, testing +- Release Candidate: `X.Y.Z-rc.N` — ready for release, final testing + +Pre-release versions have lower precedence than the normal version: +`1.0.0-alpha.1 < 1.0.0-beta.1 < 1.0.0-rc.1 < 1.0.0` + +## Decision Flow + +1. Any breaking changes? -> MAJOR +2. Any new features? -> MINOR +3. Only fixes and maintenance? -> PATCH +4. When in doubt, ask the user diff --git a/plugins/ops-release-manager/skills/version-detection.md b/plugins/ops-release-manager/skills/version-detection.md new file mode 100644 index 0000000..6dee812 --- /dev/null +++ b/plugins/ops-release-manager/skills/version-detection.md @@ -0,0 +1,58 @@ +--- +description: Detect version locations across project files and parse current version +--- + +# Version Detection Skill + +## Overview + +Find and parse version strings from all standard locations in a project. Supports multiple language ecosystems. + +## Detection Targets + +### Node.js / JavaScript +| File | Pattern | Example | +|------|---------|---------| +| `package.json` | `"version": "X.Y.Z"` | `"version": "2.3.1"` | +| `package-lock.json` | `"version": "X.Y.Z"` (root) | `"version": "2.3.1"` | + +### Python +| File | Pattern | Example | +|------|---------|---------| +| `pyproject.toml` | `version = "X.Y.Z"` | `version = "2.3.1"` | +| `setup.cfg` | `version = X.Y.Z` | `version = 2.3.1` | +| `setup.py` | `version="X.Y.Z"` | `version="2.3.1"` | +| `__version__.py` | `__version__ = "X.Y.Z"` | `__version__ = "2.3.1"` | + +### Rust +| File | Pattern | Example | +|------|---------|---------| +| `Cargo.toml` | `version = "X.Y.Z"` | `version = "2.3.1"` | + +### Claude Marketplace +| File | Pattern | Example | +|------|---------|---------| +| `marketplace.json` | `"version": "X.Y.Z"` | `"version": "2.3.1"` | +| `plugin.json` | `"version": "X.Y.Z"` | `"version": "2.3.1"` | + +### Documentation +| File | Pattern | Example | +|------|---------|---------| +| `README.md` | Title containing `vX.Y.Z` | `# Project - v2.3.1` | +| `CHANGELOG.md` | `## [X.Y.Z]` | `## [2.3.1] - 2026-01-15` | + +## Git Tags + +Parse existing tags to determine latest released version: +- `git tag --sort=-v:refname` — list tags by version +- Support both `vX.Y.Z` and `X.Y.Z` formats +- Detect the project's tag convention from existing tags + +## Version Parsing + +Extract and validate SemVer components: +- Major, Minor, Patch (required) +- Pre-release identifier (optional): `-alpha.1`, `-beta.2`, `-rc.1` +- Build metadata (optional): `+build.123` + +Report any versions that do not conform to SemVer. diff --git a/plugins/ops-release-manager/skills/visual-header.md b/plugins/ops-release-manager/skills/visual-header.md new file mode 100644 index 0000000..abc95c0 --- /dev/null +++ b/plugins/ops-release-manager/skills/visual-header.md @@ -0,0 +1,27 @@ +# Visual Header Skill + +Standard visual header for ops-release-manager commands. + +## Header Template + +``` ++----------------------------------------------------------------------+ +| RELEASE-MANAGER - [Context] | ++----------------------------------------------------------------------+ +``` + +## Context Values by Command + +| Command | Context | +|---------|---------| +| `/release setup` | Setup | +| `/release prepare` | Prepare Release | +| `/release validate` | Validate Release | +| `/release tag` | Tag Release | +| `/release rollback` | Rollback Release | +| `/release status` | Status | +| Agent mode | Release Management | + +## Usage + +Display header at the start of every command response before proceeding with the operation. diff --git a/plugins/pr-review/claude-md-integration.md b/plugins/pr-review/claude-md-integration.md index f9f45a4..862b0cb 100644 --- a/plugins/pr-review/claude-md-integration.md +++ b/plugins/pr-review/claude-md-integration.md @@ -12,10 +12,13 @@ This project uses the pr-review plugin for automated code review. | Command | Use Case | |---------|----------| -| `/pr-review ` | Full multi-agent review | -| `/pr-summary ` | Quick change summary | -| `/pr-findings ` | Filter review findings | -| `/pr-diff ` | View diff with inline comments | +| `/pr review ` | Full multi-agent review | +| `/pr summary ` | Quick change summary | +| `/pr findings ` | Filter review findings | +| `/pr diff ` | View diff with inline comments | +| `/pr init` | Quick project setup | +| `/pr sync` | Sync config with git remote | +| `/pr setup` | Full setup wizard | ### Review Categories diff --git a/plugins/pr-review/commands/pr-diff.md b/plugins/pr-review/commands/pr-diff.md index ea28529..ccadc9c 100644 --- a/plugins/pr-review/commands/pr-diff.md +++ b/plugins/pr-review/commands/pr-diff.md @@ -1,4 +1,10 @@ -# /pr-diff - Annotated PR Diff Viewer +--- +name: pr diff +description: Formatted diff with inline review comments +agent: coordinator +--- + +# /pr diff - Annotated PR Diff Viewer ## Visual Output @@ -13,7 +19,7 @@ Display header: `PR-REVIEW - Diff Viewer` ## Usage ``` -/pr-diff [--repo owner/repo] [--context ] [--no-comments] [--file ] +/pr diff [--repo owner/repo] [--context ] [--no-comments] [--file ] ``` ## Workflow @@ -43,6 +49,6 @@ Use annotated diff format from `skills/output-formats.md` | Command | Purpose | |---------|---------| -| `/pr-summary` | Quick overview | -| `/pr-review` | Full review | -| `/pr-findings` | Filter findings | +| `/pr summary` | Quick overview | +| `/pr review` | Full review | +| `/pr findings` | Filter findings | diff --git a/plugins/pr-review/commands/pr-findings.md b/plugins/pr-review/commands/pr-findings.md index 50ae9f1..308267b 100644 --- a/plugins/pr-review/commands/pr-findings.md +++ b/plugins/pr-review/commands/pr-findings.md @@ -1,4 +1,10 @@ -# /pr-findings - Filter Review Findings +--- +name: pr findings +description: List and filter review findings by category/severity +agent: coordinator +--- + +# /pr findings - Filter Review Findings ## Visual Output @@ -13,14 +19,14 @@ Display header: `PR-REVIEW - Findings` ## Usage ``` -/pr-findings [--category ] [--severity ] [--confidence ] [--file ] [--compact] [--json] +/pr findings [--category ] [--severity ] [--confidence ] [--file ] [--compact] [--json] ``` ## Workflow ### Without Previous Review -Prompt: "No review found. Run /pr-review, /pr-summary, or cancel?" +Prompt: "No review found. Run `/pr review`, `/pr summary`, or cancel?" ### With Previous Review @@ -38,8 +44,8 @@ Reference `skills/output-formats.md`: ## Examples ```bash -/pr-findings 123 --category security -/pr-findings 123 --severity critical,major -/pr-findings 123 --confidence 0.8 -/pr-findings 123 --file src/api/* +/pr findings 123 --category security +/pr findings 123 --severity critical,major +/pr findings 123 --confidence 0.8 +/pr findings 123 --file src/api/* ``` diff --git a/plugins/pr-review/commands/project-init.md b/plugins/pr-review/commands/pr-init.md similarity index 72% rename from plugins/pr-review/commands/project-init.md rename to plugins/pr-review/commands/pr-init.md index e25ac94..fe9b1ff 100644 --- a/plugins/pr-review/commands/project-init.md +++ b/plugins/pr-review/commands/pr-init.md @@ -1,8 +1,9 @@ --- +name: pr init description: Quick project setup - configures only project-level settings --- -# Project Initialization (PR Review) +# /pr init - Project Initialization (PR Review) ## Visual Output @@ -17,13 +18,13 @@ Display header: `PR-REVIEW - Project Setup` Fast setup when system-level config already exists. -**Use when:** Already ran `/pr-setup`, starting new project +**Use when:** Already ran `/pr setup`, starting new project ## Workflow ### Pre-Flight Check -Verify `~/.config/claude/gitea.env` exists. If missing: redirect to `/pr-setup` +Verify `~/.config/claude/gitea.env` exists. If missing: redirect to `/pr setup` ### Project Setup @@ -40,6 +41,6 @@ Display project configured format from `skills/output-formats.md` ## Ready Commands -- `/pr-review ` - Full review -- `/pr-summary ` - Quick summary -- `/pr-findings ` - List findings +- `/pr review ` - Full review +- `/pr summary ` - Quick summary +- `/pr findings ` - List findings diff --git a/plugins/pr-review/commands/pr-review.md b/plugins/pr-review/commands/pr-review.md index cfec27c..15e073c 100644 --- a/plugins/pr-review/commands/pr-review.md +++ b/plugins/pr-review/commands/pr-review.md @@ -1,4 +1,10 @@ -# /pr-review - Full Multi-Agent Review +--- +name: pr review +description: Full multi-agent PR review with confidence scoring +agent: coordinator +--- + +# /pr review - Full Multi-Agent Review ## Visual Output @@ -14,7 +20,7 @@ Display header: `PR-REVIEW - Full Review` ## Usage ``` -/pr-review [--repo owner/repo] +/pr review [--repo owner/repo] ``` ## Workflow diff --git a/plugins/pr-review/commands/pr-setup.md b/plugins/pr-review/commands/pr-setup.md index 375b1ef..dd39deb 100644 --- a/plugins/pr-review/commands/pr-setup.md +++ b/plugins/pr-review/commands/pr-setup.md @@ -1,8 +1,9 @@ --- +name: pr setup description: Interactive setup wizard for pr-review plugin --- -# PR Review Setup Wizard +# /pr setup - PR Review Setup Wizard ## Visual Output @@ -40,6 +41,6 @@ Test API connection, display completion summary, remind to restart session ## Available Commands After Setup -- `/pr-review ` - Full multi-agent review -- `/pr-summary ` - Quick summary -- `/pr-findings ` - List findings +- `/pr review ` - Full multi-agent review +- `/pr summary ` - Quick summary +- `/pr findings ` - List findings diff --git a/plugins/pr-review/commands/pr-summary.md b/plugins/pr-review/commands/pr-summary.md index 5a4cb3d..e4f9117 100644 --- a/plugins/pr-review/commands/pr-summary.md +++ b/plugins/pr-review/commands/pr-summary.md @@ -1,4 +1,10 @@ -# /pr-summary - Quick PR Summary +--- +name: pr summary +description: Quick summary of PR changes +agent: coordinator +--- + +# /pr summary - Quick PR Summary ## Visual Output @@ -13,7 +19,7 @@ Display header: `PR-REVIEW - Quick Summary` ## Usage ``` -/pr-summary [--repo owner/repo] +/pr summary [--repo owner/repo] ``` ## Workflow @@ -38,4 +44,4 @@ Use summary format from `skills/output-formats.md` - Quick overview before full review - Triage multiple PRs -- Decide if /pr-review is needed +- Decide if `/pr review` is needed diff --git a/plugins/pr-review/commands/project-sync.md b/plugins/pr-review/commands/pr-sync.md similarity index 92% rename from plugins/pr-review/commands/project-sync.md rename to plugins/pr-review/commands/pr-sync.md index 1e49d00..dbfbad2 100644 --- a/plugins/pr-review/commands/project-sync.md +++ b/plugins/pr-review/commands/pr-sync.md @@ -1,8 +1,9 @@ --- +name: pr sync description: Sync project configuration with current git remote --- -# Project Sync (PR Review) +# /pr sync - Project Sync (PR Review) ## Visual Output diff --git a/plugins/pr-review/commands/pr.md b/plugins/pr-review/commands/pr.md new file mode 100644 index 0000000..b068f8c --- /dev/null +++ b/plugins/pr-review/commands/pr.md @@ -0,0 +1,19 @@ +--- +description: Pull request review and management +--- + +# /pr + +Multi-agent pull request review with confidence scoring. + +## Sub-commands + +| Sub-command | Description | +|-------------|-------------| +| `/pr review` | Full multi-agent PR review with confidence scoring | +| `/pr summary` | Quick summary of PR changes | +| `/pr findings` | List and filter review findings by category/severity | +| `/pr diff` | Formatted diff with inline review comments | +| `/pr init` | Quick project setup for PR reviews | +| `/pr sync` | Sync config with git remote after repo move/rename | +| `/pr setup` | Setup wizard for pr-review | diff --git a/plugins/pr-review/skills/output-formats.md b/plugins/pr-review/skills/output-formats.md index c363f9b..75a4431 100644 --- a/plugins/pr-review/skills/output-formats.md +++ b/plugins/pr-review/skills/output-formats.md @@ -167,9 +167,9 @@ Fix: +============================================================+ Ready to review PRs: -- /pr-review Full multi-agent review -- /pr-summary Quick summary -- /pr-findings List findings +- /pr review Full multi-agent review +- /pr summary Quick summary +- /pr findings List findings ``` ## Annotated Diff Format diff --git a/plugins/pr-review/skills/pr-analysis.md b/plugins/pr-review/skills/pr-analysis.md index abacebf..07e81a9 100644 --- a/plugins/pr-review/skills/pr-analysis.md +++ b/plugins/pr-review/skills/pr-analysis.md @@ -106,7 +106,7 @@ Key files: ``` Scope: [Small|Medium|Large] Risk: [Low|Medium|High] -Recommendation: [/pr-review suggested | Looks good to merge] +Recommendation: [/pr review suggested | Looks good to merge] ``` ## Annotated Diff Display diff --git a/plugins/projman/CONFIGURATION.md b/plugins/projman/CONFIGURATION.md index f66d636..8f05fc2 100644 --- a/plugins/projman/CONFIGURATION.md +++ b/plugins/projman/CONFIGURATION.md @@ -5,6 +5,6 @@ See **[docs/CONFIGURATION.md](../../docs/CONFIGURATION.md)** for complete setup ## Quick Commands ``` -/pm-setup --full # First time on this machine -/pm-setup --quick # New project (system already configured) +/projman setup --full # First time on this machine +/projman setup --quick # New project (system already configured) ``` diff --git a/plugins/projman/claude-md-integration.md b/plugins/projman/claude-md-integration.md index 19d0dbc..741f400 100644 --- a/plugins/projman/claude-md-integration.md +++ b/plugins/projman/claude-md-integration.md @@ -6,12 +6,12 @@ This project uses the **projman** plugin for sprint planning and project managem | Command | Description | |---------|-------------| -| `/sprint-plan` | Start sprint planning with AI-guided architecture analysis | -| `/sprint-start` | Begin sprint execution with relevant lessons learned | -| `/sprint-status` | Check current sprint progress and identify blockers | -| `/sprint-close` | Complete sprint and capture lessons learned to Gitea Wiki | -| `/labels-sync` | Synchronize label taxonomy from Gitea | -| `/pm-setup` | Run initial setup for projman plugin | +| `/sprint plan` | Start sprint planning with AI-guided architecture analysis | +| `/sprint start` | Begin sprint execution with relevant lessons learned | +| `/sprint status` | Check current sprint progress and identify blockers | +| `/sprint close` | Complete sprint and capture lessons learned to Gitea Wiki | +| `/labels sync` | Synchronize label taxonomy from Gitea | +| `/projman setup` | Run initial setup for projman plugin | | `/rfc create` | Create new RFC from conversation or clarified spec | | `/rfc list` | List all RFCs grouped by status | | `/rfc review` | Submit Draft RFC for review | @@ -56,8 +56,8 @@ The following Gitea MCP tools are available for issue and project management: ### Usage Guidelines -- **Always use `/sprint-plan`** when starting new development work -- **Check `/sprint-status`** regularly during active sprints -- **Run `/sprint-close`** at the end of each sprint to capture lessons learned +- **Always use `/sprint plan`** when starting new development work +- **Check `/sprint status`** regularly during active sprints +- **Run `/sprint close`** at the end of each sprint to capture lessons learned - Use `suggest_labels` when creating issues to ensure proper categorization - Search lessons learned with `search_lessons` before implementing features to avoid repeated mistakes diff --git a/plugins/projman/commands/labels-sync.md b/plugins/projman/commands/labels-sync.md index c00e74e..8fc3a3c 100644 --- a/plugins/projman/commands/labels-sync.md +++ b/plugins/projman/commands/labels-sync.md @@ -1,4 +1,5 @@ --- +name: labels sync description: Fetch and validate label taxonomy from Gitea, create missing required labels --- @@ -16,7 +17,7 @@ Fetch current label taxonomy from Gitea, validate required labels exist, and cre ## Invocation -Run `/labels-sync` when setting up the plugin or after taxonomy updates. +Run `/labels sync` when setting up the plugin or after taxonomy updates. ## Workflow diff --git a/plugins/projman/commands/labels.md b/plugins/projman/commands/labels.md new file mode 100644 index 0000000..cdede03 --- /dev/null +++ b/plugins/projman/commands/labels.md @@ -0,0 +1,13 @@ +--- +description: Label management +--- + +# /labels + +Label management for projman. + +## Sub-commands + +| Sub-command | Description | +|-------------|-------------| +| `/labels sync` | Sync label taxonomy to Gitea repository | diff --git a/plugins/projman/commands/project-initiation.md b/plugins/projman/commands/project-initiation.md index fdf21be..6a3a154 100644 --- a/plugins/projman/commands/project-initiation.md +++ b/plugins/projman/commands/project-initiation.md @@ -46,7 +46,7 @@ Show the charter to the user. Wait for approval before proceeding to `/project p ## DO NOT -- Create sprint issues — that's `/sprint-plan` +- Create sprint issues — that's `/sprint plan` - Create WBS or roadmap — that's `/project plan` - Make architecture decisions — suggest ADRs via `/adr create` - Skip user approval of the charter diff --git a/plugins/projman/commands/project-plan.md b/plugins/projman/commands/project-plan.md index c87cbcf..0e3ba49 100644 --- a/plugins/projman/commands/project-plan.md +++ b/plugins/projman/commands/project-plan.md @@ -44,7 +44,7 @@ Update `Project: {Name}` wiki page: - Change status: `Initiating` → `Planning` ### Step 6: Present and Confirm -Show all artifacts to user. Approval transitions to `Executing` (ready for first `/sprint-plan`). +Show all artifacts to user. Approval transitions to `Executing` (ready for first `/sprint plan`). ## Output diff --git a/plugins/projman/commands/pm-setup.md b/plugins/projman/commands/projman-setup.md similarity index 88% rename from plugins/projman/commands/pm-setup.md rename to plugins/projman/commands/projman-setup.md index e7c1ed2..8a68768 100644 --- a/plugins/projman/commands/pm-setup.md +++ b/plugins/projman/commands/projman-setup.md @@ -1,8 +1,9 @@ --- +name: projman setup description: Configure projman - full setup, quick project init, or sync after repo move --- -# PM Setup +# Projman Setup ## Skills Required @@ -22,11 +23,11 @@ Unified setup command for all configuration needs. ## Invocation ``` -/pm-setup # Auto-detect appropriate mode -/pm-setup --full # Full wizard (MCP + system + project) -/pm-setup --quick # Project-only setup -/pm-setup --sync # Update after repo move -/pm-setup --clear-cache # Clear plugin cache (between sessions only) +/projman setup # Auto-detect appropriate mode +/projman setup --full # Full wizard (MCP + system + project) +/projman setup --quick # Project-only setup +/projman setup --sync # Update after repo move +/projman setup --clear-cache # Clear plugin cache (between sessions only) ``` ## Mode Detection diff --git a/plugins/projman/commands/projman.md b/plugins/projman/commands/projman.md new file mode 100644 index 0000000..38b7530 --- /dev/null +++ b/plugins/projman/commands/projman.md @@ -0,0 +1,13 @@ +--- +description: projman plugin management +--- + +# /projman + +Plugin-level management commands for projman. + +## Sub-commands + +| Sub-command | Description | +|-------------|-------------| +| `/projman setup` | Configure projman for a project | diff --git a/plugins/projman/commands/rfc.md b/plugins/projman/commands/rfc.md index 75dcedc..6785605 100644 --- a/plugins/projman/commands/rfc.md +++ b/plugins/projman/commands/rfc.md @@ -38,14 +38,14 @@ Manage the full RFC lifecycle through sub-commands. RFCs provide a structured wa Create a new RFC wiki page to track a feature idea through the review lifecycle. **Workflow:** -1. Check if conversation has clarified specification (from `/clarify`) +1. Check if conversation has clarified specification (from `/clarity clarify`) 2. If no context: prompt for Summary, Motivation, and initial Design 3. Call `allocate_rfc_number` MCP tool for next sequential number 4. Create RFC page using template from `skills/rfc-templates.md` 5. Update RFC-Index wiki page (create if doesn't exist) 6. Display RFC number, wiki link, and next steps -**Input Mapping (from /clarify):** +**Input Mapping (from /clarity clarify):** | Clarify Section | RFC Section | |-----------------|-------------| diff --git a/plugins/projman/commands/sprint-close.md b/plugins/projman/commands/sprint-close.md index def98b8..ea317a1 100644 --- a/plugins/projman/commands/sprint-close.md +++ b/plugins/projman/commands/sprint-close.md @@ -1,4 +1,5 @@ --- +name: sprint close description: Complete sprint and capture lessons learned to Gitea Wiki agent: orchestrator --- @@ -21,7 +22,7 @@ Complete the sprint, capture lessons learned to Gitea Wiki, and update documenta ## Invocation -Run `/sprint-close` when sprint work is complete. +Run `/sprint close` when sprint work is complete. ## Workflow diff --git a/plugins/projman/commands/sprint-plan.md b/plugins/projman/commands/sprint-plan.md index e3815cd..87c0238 100644 --- a/plugins/projman/commands/sprint-plan.md +++ b/plugins/projman/commands/sprint-plan.md @@ -1,4 +1,5 @@ --- +name: sprint plan description: Start sprint planning with AI-guided architecture analysis and issue creation agent: planner --- diff --git a/plugins/projman/commands/pm-review.md b/plugins/projman/commands/sprint-review.md similarity index 92% rename from plugins/projman/commands/pm-review.md rename to plugins/projman/commands/sprint-review.md index 92781b9..136e174 100644 --- a/plugins/projman/commands/pm-review.md +++ b/plugins/projman/commands/sprint-review.md @@ -1,9 +1,10 @@ --- +name: sprint review description: Pre-sprint-close code quality review agent: code-reviewer --- -# PM Review - Code Review for Sprint Close +# Sprint Review - Code Review for Sprint Close ## Skills Required @@ -16,7 +17,7 @@ Review recent code changes for quality issues before closing the sprint. ## Invocation -Run `/pm-review` before `/sprint-close` to catch issues. +Run `/sprint review` before `/sprint close` to catch issues. ## Workflow diff --git a/plugins/projman/commands/sprint-start.md b/plugins/projman/commands/sprint-start.md index a8fa63d..721f4d0 100644 --- a/plugins/projman/commands/sprint-start.md +++ b/plugins/projman/commands/sprint-start.md @@ -1,4 +1,5 @@ --- +name: sprint start description: Begin sprint execution with relevant lessons learned from previous sprints agent: orchestrator --- @@ -24,7 +25,7 @@ Initiate sprint execution. The orchestrator agent verifies approval, analyzes de ## Invocation -Run `/sprint-start` when ready to begin executing a planned sprint. +Run `/sprint start` when ready to begin executing a planned sprint. **Flags:** - `--force` — Bypass approval gate (emergency only, logged to milestone) diff --git a/plugins/projman/commands/sprint-status.md b/plugins/projman/commands/sprint-status.md index 8b286d6..10c694f 100644 --- a/plugins/projman/commands/sprint-status.md +++ b/plugins/projman/commands/sprint-status.md @@ -1,4 +1,5 @@ --- +name: sprint status description: Check current sprint progress, identify blockers, optionally generate dependency diagram or token budget report --- @@ -22,9 +23,9 @@ Check current sprint progress, identify blockers, and show execution status. Opt ## Invocation ``` -/sprint-status # Text-based status report -/sprint-status --diagram # Include Mermaid dependency diagram -/sprint-status --tokens # Show token budget estimation report +/sprint status # Text-based status report +/sprint status --diagram # Include Mermaid dependency diagram +/sprint status --tokens # Show token budget estimation report ``` ## Workflow diff --git a/plugins/projman/commands/pm-test.md b/plugins/projman/commands/sprint-test.md similarity index 82% rename from plugins/projman/commands/pm-test.md rename to plugins/projman/commands/sprint-test.md index 82dccbb..3fb9ac4 100644 --- a/plugins/projman/commands/pm-test.md +++ b/plugins/projman/commands/sprint-test.md @@ -1,8 +1,9 @@ --- +name: sprint test description: Run tests with coverage or generate tests for specified code --- -# PM Test +# Sprint Test ## Skills Required @@ -15,13 +16,13 @@ Unified testing command for running tests and generating new tests. ## Invocation ``` -/pm-test # Default: run tests -/pm-test run # Run tests, check coverage -/pm-test run --coverage # Run with coverage report -/pm-test run --verbose # Verbose output -/pm-test gen # Generate tests for target -/pm-test gen --type=unit # Specific test type -/pm-test gen --framework=jest # Specific framework +/sprint test # Default: run tests +/sprint test run # Run tests, check coverage +/sprint test run --coverage # Run with coverage report +/sprint test run --verbose # Verbose output +/sprint test gen # Generate tests for target +/sprint test gen --type=unit # Specific test type +/sprint test gen --framework=jest # Specific framework ``` ## Mode Selection @@ -104,15 +105,15 @@ See `skills/test-standards.md` for test patterns and structure. ## Sprint Integration -The `/pm-test` command plays a critical role in the sprint close workflow: +The `/sprint test` command plays a critical role in the sprint close workflow: -1. After `/pm-review` identifies code quality issues -2. Before `/sprint-close` finalizes the sprint +1. After `/sprint review` identifies code quality issues +2. Before `/sprint close` finalizes the sprint 3. The code reviewer and orchestrator reference test results when deciding if a sprint is ready to close ### Pre-Close Verification -When running `/pm-test run` before sprint close: +When running `/sprint test run` before sprint close: 1. **Identify sprint files** - Files changed in the current sprint (via git diff against development) 2. **Check test coverage** - Report which sprint files have tests and which don't @@ -125,31 +126,31 @@ When running `/pm-test run` before sprint close: ### Run all tests ``` -/pm-test run +/sprint test run ``` Detects framework, runs full test suite, reports results. ### Run with coverage ``` -/pm-test run --coverage +/sprint test run --coverage ``` Same as above plus coverage percentage per file. ### Generate tests for a specific file ``` -/pm-test gen src/auth/jwt_service.py +/sprint test gen src/auth/jwt_service.py ``` Analyzes the file, generates a test file at `tests/test_jwt_service.py`. ### Generate specific test type ``` -/pm-test gen src/api/routes/auth.py --type=integration +/sprint test gen src/api/routes/auth.py --type=integration ``` Generates integration tests (request/response patterns) instead of unit tests. ### Generate with specific framework ``` -/pm-test gen src/components/Card.jsx --framework=vitest +/sprint test gen src/components/Card.jsx --framework=vitest ``` Uses Vitest instead of auto-detected framework. @@ -161,7 +162,7 @@ Uses Vitest instead of auto-detected framework. |----------|----------| | No test framework detected | List what was checked, ask user to specify test command | | Tests fail | Report failures clearly, recommend "TESTS MUST PASS before sprint close" | -| No tests exist for sprint files | Warn with file list, offer to generate with `/pm-test gen` | +| No tests exist for sprint files | Warn with file list, offer to generate with `/sprint test gen` | | External services required | Ask for confirmation before running tests that need database/API | | Mixed framework project | Detect all frameworks, ask which to run or run all | diff --git a/plugins/projman/commands/sprint.md b/plugins/projman/commands/sprint.md new file mode 100644 index 0000000..643e083 --- /dev/null +++ b/plugins/projman/commands/sprint.md @@ -0,0 +1,18 @@ +--- +description: Sprint lifecycle management +--- + +# /sprint + +Sprint lifecycle management for projman. + +## Sub-commands + +| Sub-command | Description | +|-------------|-------------| +| `/sprint plan` | Analyze requirements, create issues, request approval | +| `/sprint start` | Begin execution, load context, dispatch tasks | +| `/sprint status` | Check progress, blockers, completion percentage | +| `/sprint close` | Capture lessons learned, close milestone | +| `/sprint review` | Pre-close code quality review | +| `/sprint test` | Run/generate tests for sprint scope | diff --git a/plugins/projman/skills/branch-security.md b/plugins/projman/skills/branch-security.md index 274d08b..c11da4b 100644 --- a/plugins/projman/skills/branch-security.md +++ b/plugins/projman/skills/branch-security.md @@ -14,7 +14,7 @@ Defines branch detection, classification, and branch-aware authorization rules. - **Planner agent**: Before planning any sprint work - **Orchestrator agent**: Before executing any sprint tasks - **Executor agent**: Before modifying any files -- **Commands**: `/sprint-plan`, `/sprint-start`, `/sprint-close` +- **Commands**: `/sprint plan`, `/sprint start`, `/sprint close` --- diff --git a/plugins/projman/skills/dependency-management.md b/plugins/projman/skills/dependency-management.md index cbc6050..e59d916 100644 --- a/plugins/projman/skills/dependency-management.md +++ b/plugins/projman/skills/dependency-management.md @@ -12,7 +12,7 @@ Defines how to analyze dependencies, plan parallel execution, and prevent file c ## When to Use - **Orchestrator agent**: When starting sprint execution -- **Commands**: `/sprint-start`, `/sprint-diagram` +- **Commands**: `/sprint start`, `/sprint-diagram` --- diff --git a/plugins/projman/skills/git-workflow.md b/plugins/projman/skills/git-workflow.md index b1099be..6f78346 100644 --- a/plugins/projman/skills/git-workflow.md +++ b/plugins/projman/skills/git-workflow.md @@ -13,7 +13,7 @@ Defines branch naming conventions, merge protocols, and git operations. - **Orchestrator agent**: When coordinating git operations - **Executor agent**: When creating branches and commits -- **Commands**: `/sprint-start`, `/sprint-close` +- **Commands**: `/sprint start`, `/sprint close` --- diff --git a/plugins/projman/skills/input-detection.md b/plugins/projman/skills/input-detection.md index 48af239..1f060cf 100644 --- a/plugins/projman/skills/input-detection.md +++ b/plugins/projman/skills/input-detection.md @@ -12,7 +12,7 @@ Defines how to detect where planning input is coming from and how to handle each ## When to Use - **Planner agent**: At start of sprint planning -- **Commands**: `/sprint-plan` +- **Commands**: `/sprint plan` --- diff --git a/plugins/projman/skills/issue-conventions.md b/plugins/projman/skills/issue-conventions.md index 8156b8b..7c85c39 100644 --- a/plugins/projman/skills/issue-conventions.md +++ b/plugins/projman/skills/issue-conventions.md @@ -12,7 +12,7 @@ Defines standard formats for issue titles, bodies, and wiki references. ## When to Use - **Planner agent**: When creating issues during sprint planning -- **Commands**: `/sprint-plan` +- **Commands**: `/sprint plan` --- diff --git a/plugins/projman/skills/label-taxonomy/labels-reference.md b/plugins/projman/skills/label-taxonomy/labels-reference.md index 17dd10d..c649b9d 100644 --- a/plugins/projman/skills/label-taxonomy/labels-reference.md +++ b/plugins/projman/skills/label-taxonomy/labels-reference.md @@ -317,21 +317,21 @@ Most issues should have multiple labels from different categories: This skill is loaded when agents need to suggest labels: -**In /sprint-plan:** +**In /sprint plan:** The planner agent uses this reference along with `suggest_labels` MCP tool to recommend appropriate labels for newly created issues. -**In /labels-sync:** +**In /labels sync:** The command updates this file with the latest taxonomy from Gitea. ## Keeping This Updated -**IMPORTANT:** Run `/labels-sync` to: +**IMPORTANT:** Run `/labels sync` to: 1. Fetch actual labels from Gitea 2. Update this reference file 3. Ensure suggestion logic matches current taxonomy **Update frequency:** -- First time setup: Run `/labels-sync` immediately +- First time setup: Run `/labels sync` immediately - Regular updates: Monthly or when taxonomy changes - Team notification: When new labels are added to Gitea @@ -341,6 +341,6 @@ The command updates this file with the latest taxonomy from Gitea. 1. Fetch labels dynamically using `get_labels` MCP tool 2. Use `suggest_labels` for intelligent suggestions 3. Reference this skill for context and patterns -4. Update this file via `/labels-sync` when taxonomy changes +4. Update this file via `/labels sync` when taxonomy changes This ensures the plugin adapts to taxonomy evolution without code changes. diff --git a/plugins/projman/skills/lessons-learned.md b/plugins/projman/skills/lessons-learned.md index b556036..3420de1 100644 --- a/plugins/projman/skills/lessons-learned.md +++ b/plugins/projman/skills/lessons-learned.md @@ -13,7 +13,7 @@ Defines the workflow for capturing lessons at sprint close and searching them at - **Planner agent**: Search lessons at sprint start - **Orchestrator agent**: Capture lessons at sprint close -- **Commands**: `/sprint-plan`, `/sprint-start`, `/sprint-close` +- **Commands**: `/sprint plan`, `/sprint start`, `/sprint close` --- diff --git a/plugins/projman/skills/planning-workflow.md b/plugins/projman/skills/planning-workflow.md index 1955481..da28553 100644 --- a/plugins/projman/skills/planning-workflow.md +++ b/plugins/projman/skills/planning-workflow.md @@ -11,8 +11,8 @@ Defines the complete 11-step planning workflow from validation through approval. ## When to Use -- **Planner agent**: When executing `/sprint-plan` -- **Commands**: `/sprint-plan` +- **Planner agent**: When executing `/sprint plan` +- **Commands**: `/sprint plan` --- diff --git a/plugins/projman/skills/progress-tracking.md b/plugins/projman/skills/progress-tracking.md index 2d89bf3..3dc74c4 100644 --- a/plugins/projman/skills/progress-tracking.md +++ b/plugins/projman/skills/progress-tracking.md @@ -13,7 +13,7 @@ Defines structured progress comment format and status label management. - **Orchestrator agent**: When tracking sprint execution - **Executor agent**: When posting progress updates -- **Commands**: `/sprint-start`, `/sprint-status` +- **Commands**: `/sprint start`, `/sprint status` --- diff --git a/plugins/projman/skills/repo-validation.md b/plugins/projman/skills/repo-validation.md index 25b7a1d..2ae207f 100644 --- a/plugins/projman/skills/repo-validation.md +++ b/plugins/projman/skills/repo-validation.md @@ -12,7 +12,7 @@ Validates that the repository belongs to an organization and has the required la ## When to Use - **Planner agent**: At start of sprint planning -- **Commands**: `/sprint-plan`, `/labels-sync`, `/project-init` +- **Commands**: `/sprint plan`, `/labels sync`, `/pr init` --- diff --git a/plugins/projman/skills/review-checklist.md b/plugins/projman/skills/review-checklist.md index c092c1f..f4c0a7e 100644 --- a/plugins/projman/skills/review-checklist.md +++ b/plugins/projman/skills/review-checklist.md @@ -12,7 +12,7 @@ Defines code review criteria, severity classification, and output format. ## When to Use - **Code Reviewer agent**: During pre-sprint-close review -- **Commands**: `/pm-review` +- **Commands**: `/sprint review` --- diff --git a/plugins/projman/skills/rfc-templates.md b/plugins/projman/skills/rfc-templates.md index 974d4f6..97f431e 100644 --- a/plugins/projman/skills/rfc-templates.md +++ b/plugins/projman/skills/rfc-templates.md @@ -369,7 +369,7 @@ For rapid RFC creation from conversation: ## Creating RFC from Clarified Spec -When `/clarify` provides a clarified specification, map sections: +When `/clarity clarify` provides a clarified specification, map sections: | Clarify Output | RFC Section | |----------------|-------------| diff --git a/plugins/projman/skills/rfc-workflow.md b/plugins/projman/skills/rfc-workflow.md index ea6b570..d7edad4 100644 --- a/plugins/projman/skills/rfc-workflow.md +++ b/plugins/projman/skills/rfc-workflow.md @@ -13,7 +13,7 @@ Defines the Request for Comments (RFC) system for capturing, reviewing, and trac - **Planner agent**: When detecting approved RFCs for sprint planning - **Commands**: `/rfc create`, `/rfc list`, `/rfc review`, `/rfc approve`, `/rfc reject` -- **Integration**: With `/sprint-plan` to select approved RFCs for implementation +- **Integration**: With `/sprint plan` to select approved RFCs for implementation --- @@ -78,12 +78,12 @@ Defines the Request for Comments (RFC) system for capturing, reviewing, and trac - **Action**: Update status, add Decision section with rejection reason ### Approved → Implementing -- **Who can transition**: Planner agent via `/sprint-plan` +- **Who can transition**: Planner agent via `/sprint plan` - **Requirements**: RFC selected for sprint - **Action**: Update status, add Sprint reference, update RFC-Index ### Implementing → Implemented -- **Who can transition**: Orchestrator agent via `/sprint-close` +- **Who can transition**: Orchestrator agent via `/sprint close` - **Requirements**: Sprint completed successfully - **Action**: Update status, add completion date, link to lessons learned @@ -303,6 +303,6 @@ When RFC status changes: | `/rfc review` | Transitions Draft -> Review | | `/rfc approve` | Transitions Review -> Approved | | `/rfc reject` | Transitions Review/Draft -> Rejected | -| `/sprint-plan` | Detects Approved RFCs, transitions to Implementing | -| `/sprint-close` | Transitions Implementing -> Implemented | +| `/sprint plan` | Detects Approved RFCs, transitions to Implementing | +| `/sprint close` | Transitions Implementing -> Implemented | | `clarity-assist` | Suggests `/rfc create` for feature ideas | diff --git a/plugins/projman/skills/risk-register.md b/plugins/projman/skills/risk-register.md index c50ace0..9d8033c 100644 --- a/plugins/projman/skills/risk-register.md +++ b/plugins/projman/skills/risk-register.md @@ -55,4 +55,4 @@ Page name: `Risk-Register: {Name}` (e.g., `Risk-Register: Driving School SaaS`) - `/project plan` creates initial risk register - `/project status` summarizes open risk count and top-3 by score -- `/sprint-close` updates risk statuses in lessons learned +- `/sprint close` updates risk statuses in lessons learned diff --git a/plugins/projman/skills/setup-workflows.md b/plugins/projman/skills/setup-workflows.md index d62930d..d91fd61 100644 --- a/plugins/projman/skills/setup-workflows.md +++ b/plugins/projman/skills/setup-workflows.md @@ -1,6 +1,6 @@ # Setup Workflows -Shared workflows for the `/pm-setup` command modes. +Shared workflows for the `/projman setup` command modes. ## Mode Detection Logic diff --git a/plugins/projman/skills/sprint-approval.md b/plugins/projman/skills/sprint-approval.md index d218123..6bd6295 100644 --- a/plugins/projman/skills/sprint-approval.md +++ b/plugins/projman/skills/sprint-approval.md @@ -13,7 +13,7 @@ Defines the approval workflow that gates sprint execution. - **Planner agent**: After creating issues, request approval - **Orchestrator agent**: Before execution, verify approval exists -- **Commands**: `/sprint-plan`, `/sprint-start` +- **Commands**: `/sprint plan`, `/sprint start` --- @@ -89,11 +89,11 @@ get_milestone(repo="org/repo", milestone_id=17) Sprint 17 milestone does not contain an approval record. Execution cannot proceed without approval. -Required: Run /sprint-plan first to: +Required: Run /sprint plan first to: 1. Review the sprint scope 2. Get explicit approval for execution -To override (emergency only): /sprint-start --force +To override (emergency only): /sprint start --force This bypasses the approval gate and logs a warning to the milestone. ``` @@ -125,7 +125,7 @@ Task #48 wants to create: feat/48-api-docs → STOP and ask user to approve expanded scope ``` -**Operations outside scope should trigger re-approval via `/sprint-plan`.** +**Operations outside scope should trigger re-approval via `/sprint plan`.** --- diff --git a/plugins/projman/skills/sprint-lifecycle.md b/plugins/projman/skills/sprint-lifecycle.md index 2ee7060..0c96619 100644 --- a/plugins/projman/skills/sprint-lifecycle.md +++ b/plugins/projman/skills/sprint-lifecycle.md @@ -20,17 +20,17 @@ Defines the valid sprint lifecycle states and transitions, enforced via labels o ``` idle -> Sprint/Planning -> Sprint/Executing -> Sprint/Reviewing -> idle - (sprint-plan) (sprint-start) (pm-review) (sprint-close) + (sprint plan) (sprint start) (sprint review) (sprint close) ``` ## State Labels | Label | Set By | Meaning | |-------|--------|---------| -| *(no Sprint/* label)* | `/sprint-close` or initial state | Idle - no active sprint phase | -| `Sprint/Planning` | `/sprint-plan` | Planning in progress | -| `Sprint/Executing` | `/sprint-start` | Execution in progress | -| `Sprint/Reviewing` | `/pm-review` | Code review in progress | +| *(no Sprint/* label)* | `/sprint close` or initial state | Idle - no active sprint phase | +| `Sprint/Planning` | `/sprint plan` | Planning in progress | +| `Sprint/Executing` | `/sprint start` | Execution in progress | +| `Sprint/Reviewing` | `/sprint review` | Code review in progress | **Rule:** Only ONE `Sprint/*` label may exist on a milestone at a time. Setting a new one removes the previous one. @@ -40,11 +40,11 @@ idle -> Sprint/Planning -> Sprint/Executing -> Sprint/Reviewing -> idle | Command | Expected State | Sets State | On Wrong State | |---------|---------------|------------|----------------| -| `/sprint-plan` | idle (no Sprint/* label) | `Sprint/Planning` | Warn: "Sprint is in [state]. Run `/sprint-close` first or use `--force` to re-plan." Allow with `--force`. | -| `/sprint-start` | `Sprint/Planning` | `Sprint/Executing` | Warn: "Expected Sprint/Planning state but found [state]. Run `/sprint-plan` first or use `--force`." Allow with `--force`. | -| `/pm-review` | `Sprint/Executing` | `Sprint/Reviewing` | Warn: "Expected Sprint/Executing state but found [state]." Allow with `--force`. | -| `/sprint-close` | `Sprint/Reviewing` | Remove all Sprint/* labels (idle) | Warn: "Expected Sprint/Reviewing state but found [state]. Run `/pm-review` first or use `--force`." Allow with `--force`. | -| `/sprint-status` | Any | No change (read-only) | Display current state in output. | +| `/sprint plan` | idle (no Sprint/* label) | `Sprint/Planning` | Warn: "Sprint is in [state]. Run `/sprint close` first or use `--force` to re-plan." Allow with `--force`. | +| `/sprint start` | `Sprint/Planning` | `Sprint/Executing` | Warn: "Expected Sprint/Planning state but found [state]. Run `/sprint plan` first or use `--force`." Allow with `--force`. | +| `/sprint review` | `Sprint/Executing` | `Sprint/Reviewing` | Warn: "Expected Sprint/Executing state but found [state]." Allow with `--force`. | +| `/sprint close` | `Sprint/Reviewing` | Remove all Sprint/* labels (idle) | Warn: "Expected Sprint/Reviewing state but found [state]. Run `/sprint review` first or use `--force`." Allow with `--force`. | +| `/sprint status` | Any | No change (read-only) | Display current state in output. | --- @@ -86,7 +86,7 @@ After command completes successfully: ## Displaying State -In `/sprint-status` output, include: +In `/sprint status` output, include: ``` Sprint Phase: Executing (since 2026-02-01) diff --git a/plugins/projman/skills/sprint-roadmap.md b/plugins/projman/skills/sprint-roadmap.md index 1faabd6..d12f7fe 100644 --- a/plugins/projman/skills/sprint-roadmap.md +++ b/plugins/projman/skills/sprint-roadmap.md @@ -36,6 +36,6 @@ Page name: `Roadmap: {Name}` (e.g., `Roadmap: Driving School SaaS`) ## Integration - `/project plan` creates the initial roadmap from epic decomposition + dependency analysis -- `/sprint-plan` references the roadmap to determine sprint scope -- `/sprint-close` updates sprint status in roadmap +- `/sprint plan` references the roadmap to determine sprint scope +- `/sprint close` updates sprint status in roadmap - `/project status` shows roadmap progress diff --git a/plugins/projman/skills/token-budget-report.md b/plugins/projman/skills/token-budget-report.md index 2c9fbba..fe324f2 100644 --- a/plugins/projman/skills/token-budget-report.md +++ b/plugins/projman/skills/token-budget-report.md @@ -10,9 +10,9 @@ Provides directional token consumption estimates at sprint workflow boundaries. ## When to Display -- End of `/sprint-plan` (after all issues created) -- End of `/sprint-close` (after lessons captured) -- On explicit request: `/sprint-status --tokens` +- End of `/sprint plan` (after all issues created) +- End of `/sprint close` (after lessons captured) +- On explicit request: `/sprint status --tokens` --- @@ -36,10 +36,10 @@ These are loaded once at session start. Update this table if MCP servers change. | Phase | Typical Skills Loaded | Est. Tokens | |-------|----------------------|-------------| -| Planning (`/sprint-plan`) | mcp-tools-reference, label-taxonomy, sprint-planning, architecture-analysis, rfc-workflow | ~3,000–5,000 | -| Execution (`/sprint-start`) | mcp-tools-reference, branch-security, plan-then-batch | ~2,000–3,000 | -| Review (`/pm-review`) | mcp-tools-reference, review-checklist | ~1,500–2,500 | -| Close (`/sprint-close`) | mcp-tools-reference, sprint-lifecycle, lessons-learned | ~2,000–3,000 | +| Planning (`/sprint plan`) | mcp-tools-reference, label-taxonomy, sprint-planning, architecture-analysis, rfc-workflow | ~3,000–5,000 | +| Execution (`/sprint start`) | mcp-tools-reference, branch-security, plan-then-batch | ~2,000–3,000 | +| Review (`/sprint review`) | mcp-tools-reference, review-checklist | ~1,500–2,500 | +| Close (`/sprint close`) | mcp-tools-reference, sprint-lifecycle, lessons-learned | ~2,000–3,000 | To get actual numbers: count characters in each skill file loaded during the phase, divide by 4. diff --git a/plugins/projman/skills/visual-output.md b/plugins/projman/skills/visual-output.md index 090f6b3..366852c 100644 --- a/plugins/projman/skills/visual-output.md +++ b/plugins/projman/skills/visual-output.md @@ -40,10 +40,10 @@ For commands that don't invoke a specific agent phase: | Command | Phase Emoji | Phase Name | |---------|-------------|------------| -| `/sprint-status` | 📊 Chart | STATUS | -| `/pm-setup` | ⚙️ Gear | SETUP | -| `/labels-sync` | 🏷️ Label | LABELS | -| `/pm-test` | 🧪 Flask | TEST | +| `/sprint status` | 📊 Chart | STATUS | +| `/projman setup` | ⚙️ Gear | SETUP | +| `/labels sync` | 🏷️ Label | LABELS | +| `/sprint test` | 🧪 Flask | TEST | | `/rfc` | 📄 Document | RFC [Sub-Command] | | `/project` | 📋 Clipboard | PROJECT [Sub-Command] | | `/adr` | 📐 Ruler | ADR [Sub-Command] | diff --git a/plugins/projman/skills/wbs.md b/plugins/projman/skills/wbs.md index 99a2654..b7a375d 100644 --- a/plugins/projman/skills/wbs.md +++ b/plugins/projman/skills/wbs.md @@ -6,7 +6,7 @@ description: Work Breakdown Structure skill for decomposing projects and sprints ## Purpose -Bridges project-level epics and sprint-level issues. Used by `/project plan` to create the initial decomposition and by `/sprint-plan` to refine sprint scope. +Bridges project-level epics and sprint-level issues. Used by `/project plan` to create the initial decomposition and by `/sprint plan` to refine sprint scope. ## Wiki Page @@ -31,7 +31,7 @@ Page name: `WBS: {Name}` (e.g., `WBS: Driving School SaaS`) ## Sprint Refinement -During `/sprint-plan`, the planner: +During `/sprint plan`, the planner: 1. Loads the WBS 2. Identifies the next unstarted work packages 3. Creates issues from Level 3 tasks @@ -40,6 +40,6 @@ During `/sprint-plan`, the planner: ## Integration - `/project plan` creates the initial WBS from epic decomposition -- `/sprint-plan` consumes WBS work packages to create sprint issues -- `/sprint-close` updates WBS with completion status +- `/sprint plan` consumes WBS work packages to create sprint issues +- `/sprint close` updates WBS with completion status - `/project status` aggregates WBS progress for project-level view diff --git a/plugins/projman/skills/wiki-conventions.md b/plugins/projman/skills/wiki-conventions.md index 98db76e..6242b96 100644 --- a/plugins/projman/skills/wiki-conventions.md +++ b/plugins/projman/skills/wiki-conventions.md @@ -12,7 +12,7 @@ Defines naming conventions, dependency headers, and structure for all wiki pages - **Planner agent**: When creating wiki pages during planning - **Orchestrator agent**: When updating status at sprint close -- **Commands**: `/sprint-plan`, `/sprint-close`, `/project initiation`, `/project plan`, `/project status`, `/project close`, `/adr create` +- **Commands**: `/sprint plan`, `/sprint close`, `/project initiation`, `/project plan`, `/project status`, `/project close`, `/adr create` --- diff --git a/plugins/saas-api-platform/.claude-plugin/plugin.json b/plugins/saas-api-platform/.claude-plugin/plugin.json new file mode 100644 index 0000000..32abbf8 --- /dev/null +++ b/plugins/saas-api-platform/.claude-plugin/plugin.json @@ -0,0 +1,26 @@ +{ + "name": "saas-api-platform", + "description": "REST and GraphQL API scaffolding, validation, and documentation for FastAPI and Express", + "version": "1.0.0", + "author": { + "name": "Leo Miranda", + "email": "leobmiranda@gmail.com" + }, + "homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/saas-api-platform/README.md", + "repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git", + "license": "MIT", + "keywords": [ + "api", + "rest", + "graphql", + "fastapi", + "express", + "openapi", + "scaffolding", + "middleware" + ], + "commands": [ + "./commands/" + ], + "domain": "saas" +} diff --git a/plugins/saas-api-platform/README.md b/plugins/saas-api-platform/README.md new file mode 100644 index 0000000..8fa95b7 --- /dev/null +++ b/plugins/saas-api-platform/README.md @@ -0,0 +1,43 @@ +# saas-api-platform + +REST and GraphQL API scaffolding, validation, and documentation for FastAPI and Express. + +## Overview + +The saas-api-platform plugin provides a complete API development toolkit. It detects your framework, generates routes and models following RESTful conventions, validates implemented endpoints against OpenAPI specifications, and manages middleware configuration. + +## Supported Frameworks + +- **FastAPI** (Python) - Pydantic models, dependency injection, async endpoints +- **Express** (Node.js/TypeScript) - Router patterns, Zod/Joi validation, middleware chains + +## Commands + +| Command | Description | +|---------|-------------| +| `/api setup` | Setup wizard - detect framework, map project structure | +| `/api scaffold ` | Generate CRUD routes, models, and schemas | +| `/api validate` | Validate routes against OpenAPI specification | +| `/api docs` | Generate or update OpenAPI 3.x specification from code | +| `/api test-routes` | Generate test cases for API endpoints | +| `/api middleware ` | Add and configure middleware (auth, CORS, rate-limit, etc.) | + +## Agents + +| Agent | Model | Mode | Purpose | +|-------|-------|------|---------| +| `api-architect` | sonnet | default | Route design, schema generation, middleware planning | +| `api-validator` | haiku | plan (read-only) | OpenAPI compliance validation | + +## Installation + +This plugin is part of the Leo Claude Marketplace. It is installed automatically when the marketplace is configured. + +### Prerequisites + +- A FastAPI or Express project to work with +- Run `/api setup` before using other commands + +## Configuration + +The `/api setup` command creates `.api-platform.json` in your project root with detected settings. All subsequent commands read this file for framework and convention configuration. diff --git a/plugins/saas-api-platform/agents/api-architect.md b/plugins/saas-api-platform/agents/api-architect.md new file mode 100644 index 0000000..341b6d7 --- /dev/null +++ b/plugins/saas-api-platform/agents/api-architect.md @@ -0,0 +1,83 @@ +--- +name: api-architect +description: Route design, schema generation, and middleware planning for API projects +model: sonnet +permissionMode: default +--- + +# API Architect Agent + +You are an API architect specializing in REST and GraphQL API design for FastAPI and Express. You generate production-quality scaffolding, maintain OpenAPI specifications, and configure middleware stacks. + +## Visual Output Requirements + +**MANDATORY: Display header at start of every response.** + +``` ++----------------------------------------------------------------------+ +| API-PLATFORM - API Architect | +| [Context Line] | ++----------------------------------------------------------------------+ +``` + +## Expertise + +- RESTful API design principles and best practices +- OpenAPI 3.x specification authoring and maintenance +- FastAPI application architecture (routers, dependencies, Pydantic models) +- Express application architecture (routers, middleware chains, validation) +- Authentication patterns (JWT, OAuth2, API keys) +- Pagination, filtering, sorting, and versioning strategies +- Middleware configuration and ordering + +## Skills to Load + +- skills/framework-detection.md +- skills/route-patterns.md +- skills/openapi-conventions.md +- skills/middleware-catalog.md +- skills/visual-header.md + +## Operating Principles + +### Framework-Aware Code Generation + +Always check `.api-platform.json` before generating any code. Adapt output to the detected framework: + +- **FastAPI**: Use type hints, Pydantic models, dependency injection, async endpoints +- **Express**: Use middleware chains, Zod/Joi validation, async/await handlers, error-first callbacks where appropriate + +### Code Quality Standards + +All generated code must: + +1. Include proper error handling (try/catch, HTTP exception mapping) +2. Use framework-idiomatic patterns (no mixing conventions) +3. Include inline comments explaining non-obvious design decisions +4. Follow project's existing code style (detected during setup) +5. Import only what is needed (no wildcard imports) + +### RESTful Design Compliance + +When generating routes: + +1. Use plural nouns for resource collections (`/users`, not `/user`) +2. Use HTTP methods correctly (GET = read, POST = create, PUT = replace, PATCH = partial update, DELETE = remove) +3. Return appropriate status codes (200, 201, 204, 400, 401, 403, 404, 409, 422, 500) +4. Include pagination metadata in list responses +5. Support filtering via query parameters with consistent naming +6. Version APIs via URL prefix when configured + +### Schema Generation + +When creating models/schemas: + +1. Separate create, update, and response schemas (different required fields) +2. Include field descriptions and examples for documentation +3. Add validation constraints (min/max length, regex patterns, enums) +4. Use proper types (datetime, UUID, Decimal where appropriate) +5. Include timestamp fields (created_at, updated_at) on response schemas + +## Communication Style + +Concise and technical. Show generated file contents with brief explanations of design decisions. When multiple approaches exist, explain the chosen one and why. Always list files created or modified with clear indicators ([+] new, [~] modified). diff --git a/plugins/saas-api-platform/agents/api-validator.md b/plugins/saas-api-platform/agents/api-validator.md new file mode 100644 index 0000000..b7c273f --- /dev/null +++ b/plugins/saas-api-platform/agents/api-validator.md @@ -0,0 +1,85 @@ +--- +name: api-validator +description: Read-only validation of API routes against OpenAPI specification +model: haiku +permissionMode: plan +disallowedTools: Write, Edit, MultiEdit +--- + +# API Validator Agent + +You are a strict API compliance auditor. Your role is to compare implemented API routes against OpenAPI specifications and report discrepancies. You never modify files; you only read and report. + +## Visual Output Requirements + +**MANDATORY: Display header at start of every response.** + +``` ++----------------------------------------------------------------------+ +| API-PLATFORM - API Validator | +| [Context Line] | ++----------------------------------------------------------------------+ +``` + +## Expertise + +- OpenAPI 3.x specification parsing and interpretation +- REST API compliance auditing +- Schema comparison and drift detection +- HTTP status code correctness verification +- Parameter validation (path, query, header, cookie) + +## Skills to Load + +- skills/openapi-conventions.md +- skills/route-patterns.md +- skills/visual-header.md + +## Validation Methodology + +### 1. Spec Parsing + +Read the OpenAPI specification and build an internal map of: +- All defined paths with their methods +- Parameter definitions (required/optional, types, constraints) +- Request body schemas per operation +- Response schemas per status code +- Security requirements per operation +- Deprecated operations + +### 2. Code Scanning + +Read route files and extract: +- Registered paths and HTTP methods +- Path parameters and query parameters used +- Request body validation schemas +- Response status codes returned +- Middleware/dependency applied (auth, validation) + +### 3. Cross-Reference Analysis + +Compare the two maps and flag discrepancies: + +| Check | Code has, Spec missing | Spec has, Code missing | +|-------|----------------------|----------------------| +| Endpoint | FAIL: Undocumented endpoint | FAIL: Unimplemented endpoint | +| Parameter | WARN: Undocumented param | WARN: Unused param in spec | +| Response code | WARN: Undocumented response | INFO: Aspirational response | +| Schema field | WARN: Schema drift | WARN: Schema drift | +| Auth requirement | WARN: Missing auth docs | FAIL: Auth not enforced | + +### 4. Severity Classification + +| Level | Criteria | Action | +|-------|----------|--------| +| **FAIL** | Endpoint exists in one place but not the other; auth in spec but not enforced | Must fix before release | +| **WARN** | Schema drift, undocumented parameters or status codes, deprecated endpoints still active | Should fix | +| **INFO** | Missing descriptions, missing examples, optimization suggestions | Improve documentation | + +## Report Format + +Always output findings grouped by severity, with exact locations (file:line where possible) and actionable fix instructions. Include a summary with endpoint coverage percentage and pass/fail verdict. + +## Communication Style + +Precise and factual. Report what was found, where, and what to do about it. No opinions or subjective assessments. Every finding must include the specific location and a concrete fix action. diff --git a/plugins/saas-api-platform/claude-md-integration.md b/plugins/saas-api-platform/claude-md-integration.md new file mode 100644 index 0000000..dd4a74d --- /dev/null +++ b/plugins/saas-api-platform/claude-md-integration.md @@ -0,0 +1,64 @@ +# saas-api-platform Plugin - CLAUDE.md Integration + +Add this section to your project's CLAUDE.md to enable saas-api-platform plugin features. + +## Suggested CLAUDE.md Section + +```markdown +## API Platform Integration + +This project uses the saas-api-platform plugin for API development workflows. + +### Configuration + +Run `/api setup` to auto-detect framework and configure project paths. +Settings stored in `.api-platform.json` in project root. + +### Available Commands + +| Command | Purpose | +|---------|---------| +| `/api setup` | Detect framework and configure project | +| `/api scaffold ` | Generate CRUD routes, models, schemas | +| `/api validate` | Check routes against OpenAPI spec | +| `/api docs` | Generate/update OpenAPI specification | +| `/api test-routes` | Generate endpoint test cases | +| `/api middleware ` | Add auth, CORS, rate-limit, logging | + +### When to Use + +- **Starting a new resource**: `/api scaffold orders` generates routes, models, and registers the router +- **Before PR/deploy**: `/api validate` ensures spec and code are in sync +- **After route changes**: `/api docs --update` refreshes the OpenAPI spec +- **Adding infrastructure**: `/api middleware auth` adds JWT authentication +- **Before release**: `/api test-routes --coverage=full` generates comprehensive tests + +### Conventions + +- All routes follow RESTful naming (plural nouns, no verbs in paths) +- Versioning via URL prefix (`/v1/`) when configured +- Pagination on all list endpoints (page, page_size parameters) +- Consistent error response format with error codes and request IDs +``` + +## Typical Workflows + +### New Resource +``` +/api scaffold products +/api docs --update +/api test-routes products +/api validate +``` + +### Add Authentication +``` +/api middleware add auth +/api validate --strict +``` + +### Pre-Release Check +``` +/api validate --strict +/api test-routes --coverage=full +``` diff --git a/plugins/saas-api-platform/commands/api-docs.md b/plugins/saas-api-platform/commands/api-docs.md new file mode 100644 index 0000000..9f41524 --- /dev/null +++ b/plugins/saas-api-platform/commands/api-docs.md @@ -0,0 +1,109 @@ +--- +name: api docs +description: Generate or update OpenAPI specification from code +agent: api-architect +--- + +# /api docs - OpenAPI Specification Generator + +## Skills to Load + +- skills/openapi-conventions.md +- skills/framework-detection.md +- skills/visual-header.md + +## Visual Output + +Display header: `API-PLATFORM - Docs` + +## Usage + +``` +/api docs [--format=yaml|json] [--output=] [--update] +``` + +**Arguments:** +- `--format`: Output format, default `yaml` +- `--output`: Output file path (default: `openapi.yaml` or `openapi.json` in project root) +- `--update`: Update existing spec instead of regenerating from scratch + +## Process + +### 1. Scan Route Definitions + +Read all route files identified during `/api setup`: + +**FastAPI:** +- Parse `@app.get`, `@app.post`, `@router.get`, etc. decorators +- Extract path, method, response_model, status_code, dependencies +- Read Pydantic model definitions for schema extraction +- Capture docstrings as endpoint descriptions + +**Express:** +- Parse `router.get`, `router.post`, etc. calls +- Extract path patterns and middleware chains +- Read Zod/Joi/JSON Schema validators for schema extraction +- Capture JSDoc comments as endpoint descriptions + +### 2. Build OpenAPI Document + +Generate OpenAPI 3.x specification: + +- **Info section**: Title from package name, version from package config +- **Servers**: Extract from environment or configuration +- **Paths**: One entry per endpoint with method, parameters, request body, responses +- **Components/Schemas**: Extracted from model/schema definitions +- **Security schemes**: Based on detected auth patterns (JWT, API key, OAuth2) +- **Tags**: Group endpoints by resource/router prefix + +### 3. Handle Updates (--update mode) + +When updating existing spec: +- Preserve manually-added descriptions, examples, and extensions +- Add new endpoints not yet in spec +- Flag removed endpoints for user review (do not auto-delete) +- Update schemas that changed in code +- Show diff of changes before writing + +### 4. Write Output + +Write the generated or updated spec to the target file. + +## Output Format + +``` ++----------------------------------------------------------------------+ +| API-PLATFORM - Docs | ++----------------------------------------------------------------------+ + +Mode: Generate (new) +Format: YAML +Output: ./openapi.yaml + +Extracted: + Routes: 14 endpoints from 4 files + Schemas: 8 models extracted + Auth: JWT Bearer scheme detected + Tags: users, orders, products, auth + +OpenAPI 3.0.3 spec written to openapi.yaml + +Sections Generated: + [+] info (title, version, description) + [+] servers (1 server) + [+] paths (14 operations) + [+] components (8 schemas, 1 security scheme) + [+] tags (4 tags) + +Next Steps: + - Review generated spec for accuracy + - Add examples to request/response bodies + - Run /api validate to verify consistency +``` + +## Important Notes + +- Generated spec is a starting point; review and enrich with examples +- Descriptions are extracted from docstrings when available +- Complex schemas (inheritance, unions) may need manual adjustment +- The `--update` flag preserves manual additions to the spec diff --git a/plugins/saas-api-platform/commands/api-middleware.md b/plugins/saas-api-platform/commands/api-middleware.md new file mode 100644 index 0000000..8ab394b --- /dev/null +++ b/plugins/saas-api-platform/commands/api-middleware.md @@ -0,0 +1,132 @@ +--- +name: api middleware +description: Add and configure API middleware +agent: api-architect +--- + +# /api middleware - Middleware Manager + +## Skills to Load + +- skills/middleware-catalog.md +- skills/framework-detection.md +- skills/visual-header.md + +## Visual Output + +Display header: `API-PLATFORM - Middleware` + +## Usage + +``` +/api middleware [] [--options] +``` + +**Actions:** +- `add ` - Add middleware to the application +- `list` - List currently configured middleware +- `remove ` - Remove middleware configuration + +**Middleware Types:** +- `auth` - Authentication (JWT, OAuth2, API key) +- `cors` - Cross-Origin Resource Sharing +- `rate-limit` - Rate limiting per client/IP +- `logging` - Request/response logging +- `error-handler` - Global error handling and formatting +- `validation` - Request body/query validation + +## Process + +### 1. Detect Framework + +Read `.api-platform.json` to determine target framework. Middleware implementation differs significantly between FastAPI and Express. + +### 2. Add Middleware (`add` action) + +**Auth Middleware:** +- Ask user for auth type: JWT Bearer, OAuth2, API key header +- Generate middleware file with token validation logic +- Add dependency/middleware registration to app +- Create auth utility functions (decode token, verify permissions) +- Generate `.env.example` entries for secrets (JWT_SECRET, etc.) + +**CORS Middleware:** +- Ask for allowed origins (default: `["*"]` for development) +- Configure allowed methods, headers, credentials +- Set max_age for preflight caching +- FastAPI: `CORSMiddleware` configuration +- Express: `cors` package configuration + +**Rate Limiting:** +- Ask for rate limit strategy: fixed window, sliding window, token bucket +- Configure limits per endpoint or global (requests/minute) +- Set burst allowance +- Configure response headers (X-RateLimit-Limit, X-RateLimit-Remaining) +- FastAPI: `slowapi` or custom middleware +- Express: `express-rate-limit` package + +**Logging:** +- Configure log format (JSON structured, Apache combined, custom) +- Set log levels per environment (debug for dev, info for prod) +- Include request ID generation and propagation +- Configure sensitive field masking (Authorization header, passwords) +- FastAPI: custom middleware with `structlog` or `logging` +- Express: `morgan` or `pino-http` + +**Error Handler:** +- Global exception/error handler with consistent response format +- Map framework exceptions to HTTP status codes +- Include request ID in error responses +- Mask internal details in production mode +- Log full stack traces server-side + +**Validation:** +- Request body validation using framework-native tools +- Query parameter validation and type coercion +- Custom validation error response format + +### 3. List Middleware (`list` action) + +Scan app configuration and display active middleware in execution order. + +### 4. Remove Middleware (`remove` action) + +Remove middleware registration and associated files. Warn about dependencies. + +## Output Format + +``` ++----------------------------------------------------------------------+ +| API-PLATFORM - Middleware | ++----------------------------------------------------------------------+ + +Action: add +Type: rate-limit +Framework: FastAPI + +Files Created/Modified: + [+] app/middleware/rate_limit.py (rate limiter implementation) + [~] app/main.py (middleware registered) + [~] .env.example (RATE_LIMIT_PER_MINUTE added) + +Configuration: + Strategy: Sliding window + Global Limit: 60 requests/minute + Burst: 10 additional + Headers: X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset + +Dependencies Added: + slowapi>=0.1.9 + +Next Steps: + - pip install slowapi + - Configure RATE_LIMIT_PER_MINUTE in .env + - Override per-endpoint limits in route decorators if needed +``` + +## Important Notes + +- Middleware order matters; auth should run before rate-limiting +- CORS must be configured before route handlers +- Error handler should be the outermost middleware +- Rate limiting should use persistent store (Redis) in production diff --git a/plugins/saas-api-platform/commands/api-scaffold.md b/plugins/saas-api-platform/commands/api-scaffold.md new file mode 100644 index 0000000..dac9300 --- /dev/null +++ b/plugins/saas-api-platform/commands/api-scaffold.md @@ -0,0 +1,123 @@ +--- +name: api scaffold +description: Generate API routes, models, and schemas +agent: api-architect +--- + +# /api scaffold - Route and Model Scaffolding + +## Skills to Load + +- skills/framework-detection.md +- skills/route-patterns.md +- skills/openapi-conventions.md +- skills/visual-header.md + +## Visual Output + +Display header: `API-PLATFORM - Scaffold` + +## Usage + +``` +/api scaffold [--methods=GET,POST,PUT,DELETE] [--nested-under=] +``` + +**Arguments:** +- ``: Name of the resource (e.g., `users`, `orders`, `products`) +- `--methods`: Comma-separated HTTP methods (default: all CRUD) +- `--nested-under`: Create as nested resource under parent (e.g., `--nested-under=users`) + +## Prerequisites + +Run `/api setup` first. This command reads `.api-platform.json` for framework and conventions. + +## Process + +### 1. Read Configuration + +Load `.api-platform.json` to determine: +- Target framework (FastAPI or Express) +- Route and model directories +- Versioning scheme +- Response format conventions + +### 2. Generate Route File + +Create route/controller file with endpoints: + +**FastAPI Example (`routes/{resource}.py`):** +- `GET /{version}/{resource}` - List with pagination, filtering, sorting +- `GET /{version}/{resource}/{id}` - Get by ID with 404 handling +- `POST /{version}/{resource}` - Create with request body validation +- `PUT /{version}/{resource}/{id}` - Full update with 404 handling +- `PATCH /{version}/{resource}/{id}` - Partial update +- `DELETE /{version}/{resource}/{id}` - Delete with 204 response + +**Express Example (`routes/{resource}.js`):** +- Same endpoints adapted to Express router patterns +- Includes error handling middleware chain + +### 3. Generate Request/Response Models + +Create schema definitions appropriate to the framework: + +- **FastAPI**: Pydantic models in `models/{resource}.py` + - `{Resource}Create` - POST/PUT request body + - `{Resource}Update` - PATCH request body (all fields optional) + - `{Resource}Response` - Response model with `id`, timestamps + - `{Resource}List` - Paginated list wrapper + +- **Express**: JSON Schema or Zod schemas in `schemas/{resource}.js` + - Create schema, Update schema, Response schema + +### 4. Generate Validation Schemas + +Add input validation: +- Required fields, type constraints, string length limits +- Enum values where appropriate +- Nested object validation for complex resources + +### 5. Register Routes + +Update the main router/app file to include new routes: +- Add import statement +- Register route prefix +- Maintain alphabetical ordering of imports + +## Output Format + +``` ++----------------------------------------------------------------------+ +| API-PLATFORM - Scaffold | ++----------------------------------------------------------------------+ + +Resource: orders +Framework: FastAPI +Versioning: /v1/ + +Files Created: + [+] app/routes/orders.py (6 endpoints) + [+] app/models/orders.py (4 Pydantic models) + [~] app/main.py (route registered) + +Endpoints Generated: + GET /v1/orders List with pagination + GET /v1/orders/{id} Get by ID + POST /v1/orders Create + PUT /v1/orders/{id} Full update + PATCH /v1/orders/{id} Partial update + DELETE /v1/orders/{id} Delete + +Next Steps: + - Add business logic to route handlers + - Run /api validate to check against OpenAPI spec + - Run /api test-routes to generate test cases +``` + +## Nested Resources + +When `--nested-under` is specified: +- Routes are prefixed: `/{parent}/{parent_id}/{resource}` +- Models include `{parent}_id` foreign key field +- Route file includes parent ID path parameter validation diff --git a/plugins/saas-api-platform/commands/api-setup.md b/plugins/saas-api-platform/commands/api-setup.md new file mode 100644 index 0000000..5e79e6f --- /dev/null +++ b/plugins/saas-api-platform/commands/api-setup.md @@ -0,0 +1,99 @@ +--- +name: api setup +description: Setup wizard for saas-api-platform +agent: api-architect +--- + +# /api setup - API Platform Setup Wizard + +## Skills to Load + +- skills/framework-detection.md +- skills/visual-header.md + +## Visual Output + +Display header: `API-PLATFORM - Setup Wizard` + +## Usage + +``` +/api setup +``` + +## Workflow + +### Phase 1: Framework Detection + +Scan the project root for framework indicators: + +| File / Pattern | Framework | Confidence | +|----------------|-----------|------------| +| `main.py` with `from fastapi` | FastAPI | High | +| `app.py` with `from fastapi` | FastAPI | High | +| `requirements.txt` containing `fastapi` | FastAPI | Medium | +| `pyproject.toml` with `fastapi` dependency | FastAPI | Medium | +| `package.json` with `express` dependency | Express | High | +| `app.js` or `app.ts` with `require('express')` | Express | High | +| `tsconfig.json` + express in deps | Express (TypeScript) | High | + +If no framework detected, ask user to select one. + +### Phase 2: Project Structure Mapping + +Identify existing project layout: + +- **Route files**: Locate route/controller directories +- **Models**: Locate model/schema definition files +- **Middleware**: Locate existing middleware +- **Tests**: Locate test directories and test runner config +- **OpenAPI spec**: Check for existing `openapi.yaml`, `openapi.json`, or `swagger.json` + +Report findings to user with directory tree. + +### Phase 3: Convention Configuration + +Ask user about project conventions: + +- **Route style**: RESTful nested (`/users/{id}/posts`) vs flat (`/user-posts`) +- **Versioning**: URL prefix (`/v1/`) vs header-based vs none +- **Auth pattern**: JWT, OAuth2, API key, or none +- **Response format**: JSON:API, HAL, plain JSON + +Store decisions in `.api-platform.json` in project root for future commands. + +### Phase 4: Validation + +Verify detected configuration: + +- Confirm framework version +- Confirm route directory location +- Confirm model directory location +- Display summary with all detected settings + +## Output Format + +``` ++----------------------------------------------------------------------+ +| API-PLATFORM - Setup Wizard | ++----------------------------------------------------------------------+ + +Framework: FastAPI 0.104.1 +Route Dir: ./app/routes/ +Models Dir: ./app/models/ +Tests Dir: ./tests/ +OpenAPI Spec: ./openapi.yaml (existing) + +Conventions: + Versioning: /v1/ URL prefix + Auth: JWT Bearer + Response: Plain JSON + +Configuration saved to .api-platform.json +``` + +## Important Notes + +- This command does NOT create project files; it only detects and configures +- If `.api-platform.json` already exists, offer to update or keep existing +- All subsequent commands rely on setup configuration diff --git a/plugins/saas-api-platform/commands/api-test-routes.md b/plugins/saas-api-platform/commands/api-test-routes.md new file mode 100644 index 0000000..d7b77f3 --- /dev/null +++ b/plugins/saas-api-platform/commands/api-test-routes.md @@ -0,0 +1,126 @@ +--- +name: api test-routes +description: Generate request/response test cases for API endpoints +agent: api-architect +--- + +# /api test-routes - API Test Generator + +## Skills to Load + +- skills/route-patterns.md +- skills/visual-header.md + +## Visual Output + +Display header: `API-PLATFORM - Test Routes` + +## Usage + +``` +/api test-routes [] [--coverage=basic|full] [--runner=pytest|jest|vitest] +``` + +**Arguments:** +- ``: Specific resource to generate tests for (default: all) +- `--coverage`: `basic` generates happy-path only; `full` includes edge cases (default: `full`) +- `--runner`: Test runner to target (auto-detected from project) + +## Prerequisites + +Run `/api setup` first. Route files must exist (either manually written or via `/api scaffold`). + +## Process + +### 1. Detect Test Runner + +Auto-detect based on framework: +- **FastAPI**: `pytest` with `httpx` (TestClient) +- **Express**: `jest` or `vitest` with `supertest` + +Check for existing test config (`pytest.ini`, `jest.config.*`, `vitest.config.*`). + +### 2. Scan Endpoints + +For each route file, extract: +- HTTP method and path +- Required and optional parameters +- Request body schema with field types and constraints +- Expected response status codes +- Authentication requirements + +### 3. Generate Test Cases + +For each endpoint, generate test cases in categories: + +**Happy Path (basic coverage):** +- Valid request returns expected status code +- Response body matches expected schema +- List endpoints return paginated results +- Create endpoint returns created resource with ID + +**Validation (full coverage):** +- Missing required fields returns 422/400 +- Invalid field types return 422/400 +- String fields exceeding max length return 422/400 +- Invalid enum values return 422/400 + +**Authentication (full coverage):** +- Request without auth token returns 401 +- Request with expired token returns 401 +- Request with insufficient permissions returns 403 + +**Edge Cases (full coverage):** +- GET non-existent resource returns 404 +- DELETE already-deleted resource returns 404 +- PUT with partial body returns 422/400 +- Concurrent creation of duplicate resources + +**Pagination (full coverage for list endpoints):** +- Default pagination returns correct page size +- Custom page size works correctly +- Out-of-range page returns empty results +- Sort parameters produce correct ordering + +### 4. Write Test Files + +Create test files in the project test directory: +- One test file per resource +- Test fixtures for common setup (auth tokens, sample data) +- Helper functions for repetitive assertions + +## Output Format + +``` ++----------------------------------------------------------------------+ +| API-PLATFORM - Test Routes | ++----------------------------------------------------------------------+ + +Coverage: full +Runner: pytest + httpx +Resource: orders + +Files Created: + [+] tests/test_orders.py (18 test cases) + [+] tests/conftest.py (fixtures updated) + +Test Cases Generated: + Happy Path: 6 tests + Validation: 5 tests + Auth: 3 tests + Edge Cases: 4 tests + + Total: 18 test cases for 6 endpoints + +Next Steps: + - Review generated tests and adjust sample data + - Add database fixtures for integration tests + - Run: pytest tests/test_orders.py -v +``` + +## Important Notes + +- Generated tests use placeholder sample data; review and adjust for your domain +- Auth tests require a fixture that provides valid/invalid tokens +- Integration tests may need database setup/teardown fixtures +- Tests follow Arrange-Act-Assert pattern diff --git a/plugins/saas-api-platform/commands/api-validate.md b/plugins/saas-api-platform/commands/api-validate.md new file mode 100644 index 0000000..c00981e --- /dev/null +++ b/plugins/saas-api-platform/commands/api-validate.md @@ -0,0 +1,124 @@ +--- +name: api validate +description: Validate routes against OpenAPI specification +agent: api-validator +--- + +# /api validate - OpenAPI Validation + +## Skills to Load + +- skills/openapi-conventions.md +- skills/route-patterns.md +- skills/visual-header.md + +## Visual Output + +Display header: `API-PLATFORM - Validate` + +## Usage + +``` +/api validate [--spec=] [--strict] +``` + +**Arguments:** +- `--spec`: Path to OpenAPI spec file (default: auto-detect `openapi.yaml` or `openapi.json`) +- `--strict`: Treat warnings as errors + +## Prerequisites + +Run `/api setup` first. Requires an existing OpenAPI specification to validate against. + +## Process + +### 1. Locate OpenAPI Spec + +Search for spec file in order: +1. Path provided via `--spec` flag +2. `openapi.yaml` in project root +3. `openapi.json` in project root +4. `swagger.yaml` or `swagger.json` in project root +5. `docs/openapi.yaml` or `docs/openapi.json` + +If not found, report error and suggest `/api docs` to generate one. + +### 2. Parse Route Definitions + +Scan route files to extract implemented endpoints: +- HTTP method and path pattern +- Path parameters and query parameters +- Request body schema (if POST/PUT/PATCH) +- Response status codes and schemas +- Authentication requirements + +### 3. Compare Against Spec + +Run validation checks: + +| Check | Severity | Description | +|-------|----------|-------------| +| Missing endpoint in spec | FAIL | Route exists in code but not in OpenAPI spec | +| Missing endpoint in code | FAIL | Spec defines endpoint not implemented | +| Parameter mismatch | FAIL | Path/query parameters differ between code and spec | +| Schema mismatch | WARN | Request/response model fields differ from spec | +| Missing response codes | WARN | Code returns status codes not documented in spec | +| Missing descriptions | INFO | Endpoints or parameters lack descriptions | +| Missing examples | INFO | Spec lacks request/response examples | +| Deprecated endpoint still active | WARN | Spec marks endpoint deprecated but code still serves it | + +### 4. Generate Report + +Group findings by severity and provide actionable fixes. + +## Output Format + +``` ++----------------------------------------------------------------------+ +| API-PLATFORM - Validate | ++----------------------------------------------------------------------+ + +Spec: openapi.yaml (OpenAPI 3.0.3) +Routes Scanned: 14 endpoints in 4 files +Spec Endpoints: 16 defined + +FINDINGS + +FAIL (2) + 1. [POST /v1/orders] Missing from OpenAPI spec + Fix: Add path definition to openapi.yaml + + 2. [GET /v1/users] Query param 'role' not in spec + Fix: Add 'role' query parameter to path definition + +WARN (3) + 1. [PUT /v1/products/{id}] Response 422 not documented + Fix: Add 422 response to spec with validation error schema + + 2. [GET /v1/orders] Schema mismatch - 'total_amount' field + Code: float, Spec: string + Fix: Update spec to use number type + + 3. [DELETE /v1/users/{id}] Marked deprecated in spec + Suggestion: Remove endpoint or update spec status + +INFO (1) + 1. [GET /v1/products] Missing response example + Suggestion: Add example to improve documentation + +SUMMARY + Endpoints: 14 implemented / 16 in spec + Coverage: 87.5% + FAIL: 2 (must fix) + WARN: 3 (should fix) + INFO: 1 (improve) + +VERDICT: FAIL (2 blocking issues) +``` + +## Exit Guidance + +- FAIL findings: Block deployment, spec and code must agree +- WARN findings: Fix before release for accurate documentation +- INFO findings: Improve for developer experience +- `--strict` mode: All WARN become FAIL diff --git a/plugins/saas-api-platform/commands/api.md b/plugins/saas-api-platform/commands/api.md new file mode 100644 index 0000000..d2b9c80 --- /dev/null +++ b/plugins/saas-api-platform/commands/api.md @@ -0,0 +1,19 @@ +--- +name: api +description: API development toolkit — route scaffolding, OpenAPI validation, middleware management +--- + +# /api + +REST and GraphQL API scaffolding, validation, and documentation for FastAPI and Express. + +## Sub-commands + +| Sub-command | Description | +|-------------|-------------| +| `/api setup` | Setup wizard for saas-api-platform | +| `/api scaffold` | Generate routes, models, and schemas | +| `/api validate` | Validate routes against OpenAPI spec | +| `/api docs` | Generate or update OpenAPI specification | +| `/api test-routes` | Generate test cases for API endpoints | +| `/api middleware` | Add and configure middleware | diff --git a/plugins/saas-api-platform/skills/framework-detection.md b/plugins/saas-api-platform/skills/framework-detection.md new file mode 100644 index 0000000..010247a --- /dev/null +++ b/plugins/saas-api-platform/skills/framework-detection.md @@ -0,0 +1,81 @@ +--- +name: framework-detection +description: Detect FastAPI vs Express, identify project structure, locate route files and models +--- + +# Framework Detection + +## Purpose + +Identify the API framework in use and map the project structure. This skill is loaded by the `api-architect` agent during setup and code generation to ensure framework-appropriate output. + +--- + +## Detection Rules + +### FastAPI Detection + +| Indicator | Location | Confidence | +|-----------|----------|------------| +| `from fastapi import` in Python files | `*.py` | High | +| `fastapi` in `requirements.txt` | Project root | Medium | +| `fastapi` in `pyproject.toml` dependencies | Project root | Medium | +| `uvicorn` in requirements alongside fastapi | Project root | High | +| `@app.get`, `@router.post` decorators | `*.py` | High | + +**FastAPI Project Structure (typical):** +``` +app/ + __init__.py + main.py # FastAPI app instance, middleware, router includes + routes/ # Route modules (one per resource) + models/ # Pydantic models (request/response schemas) + dependencies/ # Dependency injection functions + middleware/ # Custom middleware + core/ # Config, security, database setup +``` + +### Express Detection + +| Indicator | Location | Confidence | +|-----------|----------|------------| +| `express` in `package.json` dependencies | Project root | High | +| `require('express')` or `import express` | `*.js`, `*.ts` | High | +| `express.Router()` usage | `*.js`, `*.ts` | High | +| `tsconfig.json` present alongside Express | Project root | Express+TS variant | + +**Express Project Structure (typical):** +``` +src/ + app.js or app.ts # Express app instance, middleware, router mounts + routes/ # Route modules (one per resource) + models/ # Database models (Sequelize, Mongoose, etc.) + schemas/ # Validation schemas (Zod, Joi) + middleware/ # Custom middleware + config/ # Environment config, database setup +``` + +--- + +## Project Mapping Procedure + +1. **Check package files first**: `requirements.txt`, `pyproject.toml`, `package.json` +2. **Scan entry points**: `main.py`, `app.py`, `app.js`, `app.ts`, `index.js`, `index.ts` +3. **Locate route directory**: Search for `routes/`, `routers/`, `controllers/`, `api/` +4. **Locate model directory**: Search for `models/`, `schemas/`, `entities/` +5. **Locate test directory**: Search for `tests/`, `test/`, `__tests__/`, `spec/` +6. **Check for existing OpenAPI spec**: `openapi.yaml`, `openapi.json`, `swagger.*` + +--- + +## Framework Version Detection + +**FastAPI**: Parse version from `requirements.txt` pin or `pyproject.toml` constraint. Important for feature availability (e.g., `lifespan` parameter in 0.93+). + +**Express**: Parse version from `package.json` dependencies. Important for middleware compatibility (Express 4 vs 5). + +--- + +## Ambiguous Projects + +If both FastAPI and Express indicators are found (monorepo), ask the user which service to target. Store the selection in `.api-platform.json` with a `service_root` field pointing to the specific service directory. diff --git a/plugins/saas-api-platform/skills/middleware-catalog.md b/plugins/saas-api-platform/skills/middleware-catalog.md new file mode 100644 index 0000000..b49a2c0 --- /dev/null +++ b/plugins/saas-api-platform/skills/middleware-catalog.md @@ -0,0 +1,137 @@ +--- +name: middleware-catalog +description: Common middleware patterns for auth, CORS, rate-limiting, logging, and error handling per framework +--- + +# Middleware Catalog + +## Purpose + +Reference catalog of middleware patterns for FastAPI and Express. This skill is loaded by the `api-architect` agent when adding or configuring middleware, ensuring correct implementation per framework. + +--- + +## Middleware Execution Order + +Middleware should be registered in this order (outermost to innermost): + +1. **Error Handler** - Catches all unhandled exceptions +2. **CORS** - Must run before any route processing +3. **Request ID** - Generate unique ID for tracing +4. **Logging** - Log incoming request details +5. **Rate Limiting** - Reject excessive requests early +6. **Authentication** - Validate credentials +7. **Authorization** - Check permissions (often per-route) +8. **Validation** - Validate request body/params (often per-route) +9. **Route Handler** - Business logic + +--- + +## Authentication Middleware + +### JWT Bearer (FastAPI) +- Use `fastapi.security.HTTPBearer` dependency +- Decode token with `python-jose` or `PyJWT` +- Store decoded claims in request state +- Return 401 for missing/invalid/expired tokens +- Environment: `JWT_SECRET`, `JWT_ALGORITHM` (default HS256) + +### JWT Bearer (Express) +- Use `express-jwt` or custom middleware +- Decode token from `Authorization: Bearer ` header +- Attach decoded user to `req.user` +- Return 401 for missing/invalid/expired tokens +- Environment: `JWT_SECRET`, `JWT_ALGORITHM` + +### API Key +- Read from `X-API-Key` header +- Compare against stored keys (database or environment) +- Return 401 for missing key, 403 for invalid key + +--- + +## CORS Middleware + +### FastAPI +```python +from fastapi.middleware.cors import CORSMiddleware +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], # Restrict in production + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], + max_age=600, +) +``` + +### Express +```javascript +const cors = require('cors'); +app.use(cors({ + origin: '*', // Restrict in production + credentials: true, + methods: ['GET','POST','PUT','PATCH','DELETE'], + maxAge: 600, +})); +``` + +--- + +## Rate Limiting + +### Strategies +- **Fixed window**: N requests per time window (simple, bursty at boundaries) +- **Sliding window**: Smooth rate enforcement (recommended) +- **Token bucket**: Allows controlled bursts + +### Response Headers +- `X-RateLimit-Limit`: Maximum requests allowed +- `X-RateLimit-Remaining`: Requests remaining in window +- `X-RateLimit-Reset`: Timestamp when limit resets + +### Production Notes +- Use Redis as backend store for distributed rate limiting +- In-memory stores do not work with multiple server instances +- Consider different limits for authenticated vs anonymous users + +--- + +## Logging Middleware + +### Structured Logging Fields +Every request log entry should include: +- `request_id`: Unique identifier for tracing +- `method`: HTTP method +- `path`: Request path +- `status_code`: Response status +- `duration_ms`: Processing time +- `client_ip`: Client IP address +- `user_id`: Authenticated user ID (if available) + +### Sensitive Field Masking +Never log: `Authorization` header values, request bodies containing `password`, `token`, `secret`, `credit_card` fields. + +--- + +## Error Handler + +### Standard Error Response +All errors must produce consistent JSON: +```json +{ + "error": { + "code": "ERROR_CODE", + "message": "Human-readable message", + "request_id": "abc-123" + } +} +``` + +### Exception Mapping +- Validation errors: 422 with field-level details +- Not found: 404 with resource type and ID +- Authentication: 401 with generic message (no details) +- Authorization: 403 with required permission +- Conflict: 409 with conflicting field +- Internal: 500 with request_id only (no stack traces in production) diff --git a/plugins/saas-api-platform/skills/openapi-conventions.md b/plugins/saas-api-platform/skills/openapi-conventions.md new file mode 100644 index 0000000..bbd5770 --- /dev/null +++ b/plugins/saas-api-platform/skills/openapi-conventions.md @@ -0,0 +1,125 @@ +--- +name: openapi-conventions +description: OpenAPI 3.x spec generation rules, path naming, schema definitions, response codes +--- + +# OpenAPI Conventions + +## Purpose + +Defines rules and patterns for generating and validating OpenAPI 3.x specifications. This skill ensures consistency between generated specs and industry best practices. + +--- + +## Specification Structure + +An OpenAPI 3.0.3 document must include: + +```yaml +openapi: "3.0.3" +info: + title: + version: + description: +servers: + - url: + description: +paths: + /: ... +components: + schemas: ... + securitySchemes: ... +tags: ... +``` + +--- + +## Path Naming Rules + +| Rule | Example | Anti-Pattern | +|------|---------|-------------| +| Plural nouns for collections | `/users` | `/user`, `/getUsers` | +| Lowercase with hyphens | `/order-items` | `/orderItems`, `/order_items` | +| Resource ID in path | `/users/{user_id}` | `/users?id=123` | +| Nested resources max 2 levels | `/users/{id}/orders` | `/users/{id}/orders/{oid}/items/{iid}` | +| No verbs in paths | `/users` + POST | `/createUser` | +| Version prefix when configured | `/v1/users` | `/api/v1/users` (redundant) | + +--- + +## Response Code Standards + +| Method | Success | Client Error | Server Error | +|--------|---------|-------------|-------------| +| GET (single) | 200 | 404 | 500 | +| GET (list) | 200 | 400 (bad params) | 500 | +| POST | 201 | 400, 409 (conflict), 422 | 500 | +| PUT | 200 | 400, 404, 422 | 500 | +| PATCH | 200 | 400, 404, 422 | 500 | +| DELETE | 204 | 404 | 500 | + +All endpoints should also document 401 (unauthorized) and 403 (forbidden) when auth is required. + +--- + +## Schema Definition Rules + +1. **Naming**: PascalCase for schema names (`UserCreate`, `OrderResponse`) +2. **Reuse**: Use `$ref` for shared schemas instead of duplication +3. **Required fields**: Explicitly list required fields; do not rely on defaults +4. **Types**: Use specific types (`integer` not `number` for IDs; `string` with `format: date-time` for timestamps) +5. **Descriptions**: Every property should have a `description` field +6. **Examples**: Include `example` values for key properties +7. **Nullable**: Use `nullable: true` explicitly when fields can be null + +--- + +## Pagination Schema + +List endpoints must return a paginated wrapper: + +```yaml +PaginatedResponse: + type: object + properties: + items: + type: array + items: + $ref: '#/components/schemas/ResourceResponse' + total: + type: integer + description: Total number of items + page: + type: integer + description: Current page number + page_size: + type: integer + description: Items per page + pages: + type: integer + description: Total number of pages +``` + +--- + +## Security Scheme Patterns + +**JWT Bearer:** +```yaml +securitySchemes: + BearerAuth: + type: http + scheme: bearer + bearerFormat: JWT +``` + +**API Key:** +```yaml +securitySchemes: + ApiKeyAuth: + type: apiKey + in: header + name: X-API-Key +``` + +Apply globally or per-operation using the `security` field. diff --git a/plugins/saas-api-platform/skills/route-patterns.md b/plugins/saas-api-platform/skills/route-patterns.md new file mode 100644 index 0000000..d88f6f1 --- /dev/null +++ b/plugins/saas-api-platform/skills/route-patterns.md @@ -0,0 +1,112 @@ +--- +name: route-patterns +description: RESTful naming, versioning, pagination, filtering, and sorting conventions +--- + +# Route Patterns + +## Purpose + +Defines standard patterns for RESTful route design. This skill is loaded during scaffolding and validation to ensure generated routes follow consistent, industry-standard conventions. + +--- + +## RESTful Resource Naming + +| Pattern | URL | Method | Purpose | +|---------|-----|--------|---------| +| List | `/{resource}` | GET | Retrieve paginated collection | +| Create | `/{resource}` | POST | Create new resource | +| Get | `/{resource}/{id}` | GET | Retrieve single resource | +| Replace | `/{resource}/{id}` | PUT | Full replacement of resource | +| Update | `/{resource}/{id}` | PATCH | Partial update | +| Delete | `/{resource}/{id}` | DELETE | Remove resource | + +### Nested Resources + +For parent-child relationships (max 2 levels deep): + +| Pattern | URL | Example | +|---------|-----|---------| +| Child list | `/{parent}/{parent_id}/{child}` | `/users/42/orders` | +| Child create | `/{parent}/{parent_id}/{child}` | POST `/users/42/orders` | +| Child get | `/{parent}/{parent_id}/{child}/{child_id}` | `/users/42/orders/7` | + +Beyond 2 levels, flatten with query filters: `/items?order_id=7&user_id=42` + +--- + +## Versioning + +| Strategy | Format | Example | +|----------|--------|---------| +| URL prefix (recommended) | `/v{N}/` | `/v1/users`, `/v2/users` | +| Header-based | `Accept: application/vnd.api.v1+json` | Header value | +| Query param (discouraged) | `?version=1` | `/users?version=1` | + +URL prefix versioning is the default. Only bump major version for breaking changes. + +--- + +## Pagination + +All list endpoints must support pagination: + +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `page` | integer | 1 | Page number (1-indexed) | +| `page_size` | integer | 20 | Items per page (max 100) | + +Response must include pagination metadata: +```json +{ + "items": [...], + "total": 142, + "page": 2, + "page_size": 20, + "pages": 8 +} +``` + +--- + +## Filtering + +Use query parameters for filtering: + +| Pattern | Example | Description | +|---------|---------|-------------| +| Exact match | `?status=active` | Field equals value | +| Multiple values | `?status=active,pending` | Field in list | +| Range | `?created_after=2024-01-01&created_before=2024-12-31` | Date/number range | +| Search | `?q=search+term` | Full-text search | + +--- + +## Sorting + +| Parameter | Format | Example | +|-----------|--------|---------| +| `sort` | `field` (asc) or `-field` (desc) | `?sort=-created_at` | +| Multiple | Comma-separated | `?sort=-created_at,name` | + +--- + +## Error Response Format + +All errors must use a consistent structure: + +```json +{ + "error": { + "code": "VALIDATION_ERROR", + "message": "Human-readable description", + "details": [ + {"field": "email", "message": "Invalid email format"} + ], + "request_id": "abc-123" + } +} +``` + +Error codes should be uppercase snake_case constants: `NOT_FOUND`, `UNAUTHORIZED`, `VALIDATION_ERROR`, `CONFLICT`, `INTERNAL_ERROR`. diff --git a/plugins/saas-api-platform/skills/visual-header.md b/plugins/saas-api-platform/skills/visual-header.md new file mode 100644 index 0000000..9e89714 --- /dev/null +++ b/plugins/saas-api-platform/skills/visual-header.md @@ -0,0 +1,49 @@ +--- +name: visual-header +description: Standard header format for API platform commands and agents +--- + +# Visual Header + +## Standard Format + +Display at the start of every command execution: + +``` ++----------------------------------------------------------------------+ +| API-PLATFORM - [Command Name] | ++----------------------------------------------------------------------+ +``` + +## Command Headers + +| Command | Header Text | +|---------|-------------| +| api-setup | Setup Wizard | +| api-scaffold | Scaffold | +| api-validate | Validate | +| api-docs | Docs | +| api-test-routes | Test Routes | +| api-middleware | Middleware | + +## Summary Box Format + +For completion summaries: + +``` ++============================================================+ +| API-PLATFORM [OPERATION] COMPLETE | ++============================================================+ +| Component: [Status] | +| Component: [Status] | ++============================================================+ +``` + +## Status Indicators + +- Success: `[check]` or `Ready` +- Warning: `[!]` or `Partial` +- Failure: `[X]` or `Failed` +- New file: `[+]` +- Modified file: `[~]` +- Deleted file: `[-]` diff --git a/plugins/saas-db-migrate/.claude-plugin/plugin.json b/plugins/saas-db-migrate/.claude-plugin/plugin.json new file mode 100644 index 0000000..c169f77 --- /dev/null +++ b/plugins/saas-db-migrate/.claude-plugin/plugin.json @@ -0,0 +1,25 @@ +{ + "name": "saas-db-migrate", + "description": "Database migration management for Alembic, Prisma, and raw SQL", + "version": "1.0.0", + "author": { + "name": "Leo Miranda", + "email": "leobmiranda@gmail.com" + }, + "homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/saas-db-migrate/README.md", + "repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git", + "license": "MIT", + "keywords": [ + "database", + "migration", + "alembic", + "prisma", + "sql", + "schema", + "rollback" + ], + "commands": [ + "./commands/" + ], + "domain": "saas" +} diff --git a/plugins/saas-db-migrate/README.md b/plugins/saas-db-migrate/README.md new file mode 100644 index 0000000..e20b490 --- /dev/null +++ b/plugins/saas-db-migrate/README.md @@ -0,0 +1,58 @@ +# saas-db-migrate + +Database migration management for Alembic, Prisma, and raw SQL. + +## Overview + +The saas-db-migrate plugin provides a complete database migration toolkit. It detects your migration tool, generates migration files from model diffs, validates migrations for safety before applying, plans execution with rollback strategies, and tracks migration history. + +## Supported Migration Tools + +- **Alembic** (Python/SQLAlchemy) - Revision-based migrations with auto-generation +- **Prisma** (Node.js/TypeScript) - Schema-first migrations with diff-based generation +- **Raw SQL** - Sequential numbered SQL files for any database + +## Supported Databases + +- PostgreSQL (primary, with lock analysis) +- MySQL (with engine-specific considerations) +- SQLite (with ALTER limitations noted) + +## Commands + +| Command | Description | +|---------|-------------| +| `/db-migrate setup` | Setup wizard - detect tool, map configuration | +| `/db-migrate generate ` | Generate migration from model diff or empty template | +| `/db-migrate validate` | Check migration safety (data loss, locks, rollback) | +| `/db-migrate plan` | Show execution plan with rollback strategy | +| `/db-migrate history` | Display migration history and current state | +| `/db-migrate rollback` | Generate rollback migration or plan | + +## Agents + +| Agent | Model | Mode | Purpose | +|-------|-------|------|---------| +| `migration-planner` | sonnet | default | Migration generation, planning, rollback | +| `migration-auditor` | haiku | plan (read-only) | Safety validation and risk assessment | + +## Installation + +This plugin is part of the Leo Claude Marketplace. It is installed automatically when the marketplace is configured. + +### Prerequisites + +- A project with an existing database and migration tool +- Run `/db-migrate setup` before using other commands + +## Configuration + +The `/db-migrate setup` command creates `.db-migrate.json` in your project root with detected settings. All subsequent commands read this file for tool and path configuration. + +## Safety Philosophy + +This plugin prioritizes data safety above all else. Every migration is analyzed for: +- **Data loss risk**: DROP and ALTER operations are flagged prominently +- **Lock duration**: DDL operations are assessed for table lock impact +- **Rollback completeness**: Every upgrade must have a corresponding downgrade +- **Transaction safety**: All operations must be wrapped in transactions diff --git a/plugins/saas-db-migrate/agents/migration-auditor.md b/plugins/saas-db-migrate/agents/migration-auditor.md new file mode 100644 index 0000000..3a3c923 --- /dev/null +++ b/plugins/saas-db-migrate/agents/migration-auditor.md @@ -0,0 +1,82 @@ +--- +name: migration-auditor +description: Read-only safety validation of database migrations +model: haiku +permissionMode: plan +disallowedTools: Write, Edit, MultiEdit +--- + +# Migration Auditor Agent + +You are a strict database migration safety auditor. Your role is to analyze migration files for data loss risks, lock contention, and operational safety issues. You never modify files; you only read and report. + +## Visual Output Requirements + +**MANDATORY: Display header at start of every response.** + +``` ++----------------------------------------------------------------------+ +| DB-MIGRATE - Migration Auditor | +| [Context Line] | ++----------------------------------------------------------------------+ +``` + +## Expertise + +- Database DDL operation risk assessment +- Lock behavior analysis for PostgreSQL, MySQL, SQLite +- Data loss detection in schema migrations +- Transaction safety verification +- Rollback completeness auditing +- Production deployment impact estimation + +## Skills to Load + +- skills/migration-safety.md +- skills/visual-header.md + +## Validation Methodology + +### 1. Parse Migration Operations + +Read the migration file and extract all SQL operations: +- DDL statements (CREATE, ALTER, DROP) +- DML statements (INSERT, UPDATE, DELETE) +- Constraint operations (ADD/DROP CONSTRAINT, INDEX) +- Transaction control (BEGIN, COMMIT, ROLLBACK) + +### 2. Risk Classification + +Apply the migration safety rules to each operation: + +| Risk Level | Criteria | Examples | +|------------|----------|---------| +| **FAIL** | Irreversible data loss without safeguards | DROP TABLE, DROP COLUMN without backup step | +| **FAIL** | Schema inconsistency risk | ALTER TYPE narrowing, NOT NULL without DEFAULT | +| **FAIL** | Missing transaction wrapper | DDL outside transaction boundaries | +| **WARN** | Potentially long-running lock | ALTER TABLE on large tables, ADD INDEX non-concurrently | +| **WARN** | Incomplete rollback | Downgrade function missing or partial | +| **WARN** | Mixed concerns | Schema and data changes in same migration | +| **INFO** | Optimization opportunity | Could use IF NOT EXISTS, concurrent index creation | + +### 3. Lock Duration Estimation + +For each ALTER operation, estimate lock behavior: +- PostgreSQL: ADD COLUMN with DEFAULT is instant (11+); ALTER TYPE requires full rewrite +- MySQL: Most ALTERs require table copy (consider pt-online-schema-change) +- SQLite: ALTER is limited; most changes require table recreation + +### 4. Rollback Completeness Check + +Verify the downgrade/rollback section: +- Every upgrade operation has a corresponding downgrade +- DROP operations in downgrade include data loss warnings +- Transaction wrapping in downgrade matches upgrade + +## Report Format + +Always output findings grouped by severity with exact line references and actionable fix instructions. Include a summary with operation count, risk level, and pass/fail verdict. + +## Communication Style + +Precise, factual, and risk-focused. Report findings with specific line numbers, exact SQL operations, and concrete risk descriptions. Every finding must include a fix recommendation. No subjective commentary; only objective safety analysis. diff --git a/plugins/saas-db-migrate/agents/migration-planner.md b/plugins/saas-db-migrate/agents/migration-planner.md new file mode 100644 index 0000000..39a90f1 --- /dev/null +++ b/plugins/saas-db-migrate/agents/migration-planner.md @@ -0,0 +1,92 @@ +--- +name: migration-planner +description: Migration generation, rollback planning, and schema management +model: sonnet +permissionMode: default +--- + +# Migration Planner Agent + +You are a database migration specialist. You generate, plan, and manage schema migrations for Alembic, Prisma, and raw SQL workflows. You understand the risks of schema changes and always prioritize data safety. + +## Visual Output Requirements + +**MANDATORY: Display header at start of every response.** + +``` ++----------------------------------------------------------------------+ +| DB-MIGRATE - Migration Planner | +| [Context Line] | ++----------------------------------------------------------------------+ +``` + +## Expertise + +- Alembic migration generation and revision management +- Prisma schema diffing and migration creation +- Raw SQL migration scripting with transaction safety +- SQLAlchemy model introspection +- PostgreSQL, MySQL, and SQLite schema operations +- Lock behavior and performance impact of DDL operations +- Data migration strategies (backfill, transform, split) + +## Skills to Load + +- skills/orm-detection.md +- skills/naming-conventions.md +- skills/rollback-patterns.md +- skills/migration-safety.md +- skills/visual-header.md + +## Operating Principles + +### Data Safety First + +Every migration you generate must: + +1. Be wrapped in a transaction (or use tool-native transaction support) +2. Include a rollback/downgrade path +3. Flag destructive operations (DROP, ALTER TYPE narrowing) prominently +4. Suggest data backup steps when data loss is possible +5. Never combine schema changes and data changes in the same migration + +### Migration Quality Standards + +All generated migrations must: + +1. Have a clear, descriptive name following the naming convention +2. Include comments explaining WHY each operation is needed +3. Handle edge cases (empty tables, NULL values, constraint violations) +4. Be idempotent where possible (IF NOT EXISTS, IF EXISTS) +5. Consider the impact on running applications (zero-downtime patterns) + +### Tool-Specific Behavior + +**Alembic:** +- Generate proper `revision` chain with `down_revision` references +- Use `op.` operations (not raw SQL) when Alembic supports the operation +- Include `# type: ignore` comments for mypy compatibility when needed +- Test that `upgrade()` and `downgrade()` are symmetric + +**Prisma:** +- Respect `schema.prisma` as the single source of truth +- Generate migration SQL that matches what `prisma migrate dev` would produce +- Handle Prisma's migration directory structure (timestamp folders) + +**Raw SQL:** +- Generate separate UP and DOWN sections clearly marked +- Use database-specific syntax (PostgreSQL vs MySQL vs SQLite) +- Include explicit transaction control (BEGIN/COMMIT/ROLLBACK) + +### Zero-Downtime Patterns + +For production-critical changes, recommend multi-step approaches: + +1. **Add column**: Add as nullable first, backfill, then add NOT NULL constraint +2. **Rename column**: Add new column, copy data, update code, drop old column +3. **Change type**: Add new column with new type, migrate data, swap, drop old +4. **Drop column**: Remove from code first, verify unused, then drop in migration + +## Communication Style + +Methodical and safety-conscious. Always present the risk level of operations. When multiple approaches exist, explain trade-offs (speed vs safety vs complexity). Use clear indicators for new files ([+]), modifications ([~]), and deletions ([-]). diff --git a/plugins/saas-db-migrate/claude-md-integration.md b/plugins/saas-db-migrate/claude-md-integration.md new file mode 100644 index 0000000..92e3e75 --- /dev/null +++ b/plugins/saas-db-migrate/claude-md-integration.md @@ -0,0 +1,66 @@ +# saas-db-migrate Plugin - CLAUDE.md Integration + +Add this section to your project's CLAUDE.md to enable saas-db-migrate plugin features. + +## Suggested CLAUDE.md Section + +```markdown +## Database Migration Integration + +This project uses the saas-db-migrate plugin for database migration workflows. + +### Configuration + +Run `/db-migrate setup` to auto-detect migration tool and configure paths. +Settings stored in `.db-migrate.json` in project root. + +### Available Commands + +| Command | Purpose | +|---------|---------| +| `/db-migrate setup` | Detect migration tool and configure | +| `/db-migrate generate ` | Generate migration from model changes | +| `/db-migrate validate` | Check migration for safety issues | +| `/db-migrate plan` | Preview execution plan with rollback | +| `/db-migrate history` | Show migration history and state | +| `/db-migrate rollback` | Generate rollback migration | + +### When to Use + +- **After model changes**: `/db-migrate generate add_status_to_orders --auto` detects diffs +- **Before applying**: `/db-migrate validate` checks for data loss and lock risks +- **Before deploy**: `/db-migrate plan --include-rollback` shows full execution strategy +- **After issues**: `/db-migrate rollback --steps=1` generates rollback plan +- **Status check**: `/db-migrate history` shows what has been applied + +### Safety Rules + +- Never apply migrations without running `/db-migrate validate` first +- Always have a rollback plan for production migrations +- Separate schema changes from data migrations +- Use zero-downtime patterns for production (add nullable, backfill, constrain) +``` + +## Typical Workflows + +### New Feature Migration +``` +/db-migrate generate add_orders_table --auto +/db-migrate validate +/db-migrate plan +# Apply migration via your tool (alembic upgrade head, prisma migrate deploy) +``` + +### Pre-Deploy Check +``` +/db-migrate validate --all --strict +/db-migrate plan --include-rollback +/db-migrate history --status=pending +``` + +### Emergency Rollback +``` +/db-migrate history +/db-migrate rollback --steps=1 --dry-run +/db-migrate rollback --steps=1 +``` diff --git a/plugins/saas-db-migrate/commands/db-migrate-generate.md b/plugins/saas-db-migrate/commands/db-migrate-generate.md new file mode 100644 index 0000000..99a3715 --- /dev/null +++ b/plugins/saas-db-migrate/commands/db-migrate-generate.md @@ -0,0 +1,125 @@ +--- +name: db-migrate generate +description: Generate migration from model diff +agent: migration-planner +--- + +# /db-migrate generate - Migration Generator + +## Skills to Load + +- skills/orm-detection.md +- skills/naming-conventions.md +- skills/visual-header.md + +## Visual Output + +Display header: `DB-MIGRATE - Generate` + +## Usage + +``` +/db-migrate generate [--auto] [--empty] +``` + +**Arguments:** +- ``: Short description of the change (e.g., "add_orders_table", "add_email_to_users") +- `--auto`: Auto-detect changes from model diff (Alembic/Prisma only) +- `--empty`: Generate empty migration file for manual editing + +## Prerequisites + +Run `/db-migrate setup` first. Reads `.db-migrate.json` for tool and configuration. + +## Process + +### 1. Read Configuration + +Load `.db-migrate.json` to determine: +- Migration tool (Alembic, Prisma, raw SQL) +- Migration directory path +- Model directory path (for auto-detection) +- Naming convention + +### 2. Detect Schema Changes (--auto mode) + +**Alembic:** +- Compare current SQLAlchemy models against database schema +- Identify new tables, dropped tables, added/removed columns, type changes +- Detect index additions/removals, constraint changes +- Generate `upgrade()` and `downgrade()` functions + +**Prisma:** +- Run `prisma migrate diff` to compare schema.prisma against database +- Identify model additions, field changes, relation updates +- Generate migration SQL and Prisma migration directory + +**Raw SQL:** +- Auto-detection not available; create empty template +- Include commented sections for UP and DOWN operations + +### 3. Generate Migration File + +Create migration file following the naming convention: + +| Tool | Format | Example | +|------|--------|---------| +| Alembic | `{revision}_{description}.py` | `a1b2c3d4_add_orders_table.py` | +| Prisma | `{timestamp}_{description}/migration.sql` | `20240115120000_add_orders_table/migration.sql` | +| Raw SQL | `{sequence}_{description}.sql` | `003_add_orders_table.sql` | + +### 4. Include Safety Checks + +Every generated migration includes: +- Transaction wrapping (BEGIN/COMMIT or framework equivalent) +- Data preservation warnings for destructive operations +- Rollback function/section (downgrade in Alembic, DOWN in raw SQL) +- Comments explaining each operation + +### 5. Validate Generated Migration + +Run safety checks from `skills/migration-safety.md` on the generated file before presenting to user. + +## Output Format + +``` ++----------------------------------------------------------------------+ +| DB-MIGRATE - Generate | ++----------------------------------------------------------------------+ + +Tool: Alembic +Mode: auto-detect +Description: add_orders_table + +Changes Detected: + [+] Table: orders (5 columns) + [+] Column: orders.user_id (FK -> users.id) + [+] Index: ix_orders_user_id + +Files Created: + [+] alembic/versions/a1b2c3d4_add_orders_table.py + +Migration Preview: + upgrade(): + - CREATE TABLE orders (id, user_id, total, status, created_at) + - CREATE INDEX ix_orders_user_id ON orders(user_id) + - ADD FOREIGN KEY orders.user_id -> users.id + + downgrade(): + - DROP INDEX ix_orders_user_id + - DROP TABLE orders + +Safety Check: PASS (no destructive operations) + +Next Steps: + - Review generated migration file + - Run /db-migrate validate for safety analysis + - Run /db-migrate plan to see execution plan +``` + +## Important Notes + +- Auto-detection works best with Alembic and Prisma +- Always review generated migrations before applying +- Destructive operations (DROP, ALTER TYPE) are flagged with warnings +- The `--empty` flag is useful for data migrations that cannot be auto-detected diff --git a/plugins/saas-db-migrate/commands/db-migrate-history.md b/plugins/saas-db-migrate/commands/db-migrate-history.md new file mode 100644 index 0000000..00a86c1 --- /dev/null +++ b/plugins/saas-db-migrate/commands/db-migrate-history.md @@ -0,0 +1,122 @@ +--- +name: db-migrate history +description: Display migration history and current state +agent: migration-planner +--- + +# /db-migrate history - Migration History + +## Skills to Load + +- skills/orm-detection.md +- skills/visual-header.md + +## Visual Output + +Display header: `DB-MIGRATE - History` + +## Usage + +``` +/db-migrate history [--limit=] [--status=applied|pending|all] [--verbose] +``` + +**Arguments:** +- `--limit`: Number of migrations to show (default: 20) +- `--status`: Filter by status (default: all) +- `--verbose`: Show full migration details including SQL operations + +## Prerequisites + +Run `/db-migrate setup` first. Reads `.db-migrate.json` for tool and configuration. + +## Process + +### 1. Read Migration Source + +Depending on the detected tool: + +**Alembic:** +- Read `alembic_version` table for applied migrations +- Scan `alembic/versions/` directory for all migration files +- Cross-reference to determine pending migrations + +**Prisma:** +- Read `_prisma_migrations` table for applied migrations +- Scan `prisma/migrations/` directory for all migration directories +- Cross-reference applied vs available + +**Raw SQL:** +- Read migration tracking table (if exists) for applied migrations +- Scan migration directory for numbered SQL files +- Determine state from sequence numbers + +### 2. Build Timeline + +For each migration, determine: +- Migration identifier (revision hash, timestamp, sequence number) +- Description (extracted from filename or metadata) +- Status: Applied, Pending, or Failed +- Applied timestamp (if available from tracking table) +- Author (if available from migration metadata) + +### 3. Detect Anomalies + +Flag unusual states: +- Out-of-order migrations (gap in sequence) +- Failed migrations that need manual intervention +- Migration files present in directory but not in tracking table +- Entries in tracking table without corresponding files (deleted migrations) + +### 4. Display History + +Present chronological list with status indicators. + +## Output Format + +``` ++----------------------------------------------------------------------+ +| DB-MIGRATE - History | ++----------------------------------------------------------------------+ + +Tool: Alembic +Database: PostgreSQL (myapp_production) +Total Migrations: 8 (6 applied, 2 pending) + +MIGRATION HISTORY + + # Status Timestamp Description + -- -------- -------------------- ---------------------------------------- + 1 [applied] 2024-01-05 10:30:00 initial_schema + 2 [applied] 2024-01-12 14:15:00 add_users_table + 3 [applied] 2024-01-20 09:45:00 add_products_table + 4 [applied] 2024-02-01 11:00:00 add_orders_table + 5 [applied] 2024-02-15 16:30:00 add_user_roles + 6 [applied] 2024-03-01 08:20:00 add_order_status_column + 7 [pending] -- add_order_items_table + 8 [pending] -- add_payment_tracking + +Current Head: migration_006_add_order_status_column +Pending Count: 2 + +No anomalies detected. +``` + +### Verbose Mode + +With `--verbose`, each migration expands to show: + +``` + 4 [applied] 2024-02-01 11:00:00 add_orders_table + Operations: + [+] CREATE TABLE orders (id, user_id, total, status, created_at) + [+] CREATE INDEX ix_orders_user_id + [+] ADD FOREIGN KEY orders.user_id -> users.id + Rollback: Available (DROP TABLE orders) +``` + +## Important Notes + +- History reads from both the database tracking table and the filesystem +- If database is unreachable, only filesystem state is shown (no applied timestamps) +- Anomalies like missing files or orphaned tracking entries should be resolved manually diff --git a/plugins/saas-db-migrate/commands/db-migrate-plan.md b/plugins/saas-db-migrate/commands/db-migrate-plan.md new file mode 100644 index 0000000..9fe33a0 --- /dev/null +++ b/plugins/saas-db-migrate/commands/db-migrate-plan.md @@ -0,0 +1,136 @@ +--- +name: db-migrate plan +description: Show execution plan with rollback strategy +agent: migration-planner +--- + +# /db-migrate plan - Migration Execution Plan + +## Skills to Load + +- skills/rollback-patterns.md +- skills/migration-safety.md +- skills/visual-header.md + +## Visual Output + +Display header: `DB-MIGRATE - Plan` + +## Usage + +``` +/db-migrate plan [--target=] [--include-rollback] +``` + +**Arguments:** +- `--target`: Plan up to specific migration (default: all pending) +- `--include-rollback`: Show rollback plan alongside forward plan + +## Prerequisites + +Run `/db-migrate setup` first. Pending migrations must exist. + +## Process + +### 1. Determine Current State + +Query the migration history to find: +- Latest applied migration +- All pending (unapplied) migrations in order +- Any out-of-order migrations (applied but not contiguous) + +### 2. Build Forward Plan + +For each pending migration, document: +- Migration identifier and description +- SQL operations that will execute (summarized) +- Estimated lock duration for ALTER operations +- Dependencies on previous migrations +- Expected outcome (tables/columns affected) + +### 3. Build Rollback Plan (if --include-rollback) + +For each migration in reverse order, document: +- Rollback/downgrade operations +- Data recovery strategy (if destructive operations present) +- Point-of-no-return warnings (migrations that cannot be fully rolled back) +- Recommended backup steps before applying + +### 4. Risk Assessment + +Evaluate the complete plan: +- Total number of operations +- Presence of destructive operations (DROP, ALTER TYPE) +- Estimated total lock time +- Data migration volume (if data changes included) +- Recommended maintenance window duration + +### 5. Present Plan + +Display ordered execution plan with risk indicators. + +## Output Format + +``` ++----------------------------------------------------------------------+ +| DB-MIGRATE - Plan | ++----------------------------------------------------------------------+ + +Current State: migration_005_add_user_roles (applied) +Pending: 3 migrations + +FORWARD PLAN + +Step 1: migration_006_add_orders_table + Operations: + [+] CREATE TABLE orders (5 columns) + [+] CREATE INDEX ix_orders_user_id + [+] ADD FOREIGN KEY orders.user_id -> users.id + Lock Impact: None (new table) + Risk: LOW + +Step 2: migration_007_add_order_items + Operations: + [+] CREATE TABLE order_items (4 columns) + [+] CREATE INDEX ix_order_items_order_id + Lock Impact: None (new table) + Risk: LOW + +Step 3: migration_008_add_status_to_orders + Operations: + [~] ADD COLUMN orders.status VARCHAR(20) DEFAULT 'pending' + [~] ADD CHECK CONSTRAINT valid_status + Lock Impact: ~2s (instant ADD COLUMN with DEFAULT in PostgreSQL 11+) + Risk: LOW + +ROLLBACK PLAN + +Step 3 (reverse): Undo migration_008 + [~] DROP CONSTRAINT valid_status + [~] DROP COLUMN orders.status + Reversible: YES + +Step 2 (reverse): Undo migration_007 + [-] DROP TABLE order_items + Reversible: YES (but data is lost) + +Step 1 (reverse): Undo migration_006 + [-] DROP TABLE orders + Reversible: YES (but data is lost) + +RISK SUMMARY + Total Operations: 9 + Destructive: 0 + Lock Time: ~2 seconds + Risk Level: LOW + Maintenance Window: Not required + +RECOMMENDATION: Safe to apply without maintenance window. +``` + +## Important Notes + +- The plan is informational; it does not apply any migrations +- Lock time estimates are approximate and depend on table size +- Always back up the database before applying destructive migrations +- Out-of-order migrations are flagged as warnings diff --git a/plugins/saas-db-migrate/commands/db-migrate-rollback.md b/plugins/saas-db-migrate/commands/db-migrate-rollback.md new file mode 100644 index 0000000..301181c --- /dev/null +++ b/plugins/saas-db-migrate/commands/db-migrate-rollback.md @@ -0,0 +1,130 @@ +--- +name: db-migrate rollback +description: Generate rollback migration for a previously applied migration +agent: migration-planner +--- + +# /db-migrate rollback - Rollback Generator + +## Skills to Load + +- skills/rollback-patterns.md +- skills/visual-header.md + +## Visual Output + +Display header: `DB-MIGRATE - Rollback` + +## Usage + +``` +/db-migrate rollback [] [--steps=] [--dry-run] +``` + +**Arguments:** +- ``: Specific migration to roll back to (exclusive — rolls back everything after it) +- `--steps`: Number of migrations to roll back from current head (default: 1) +- `--dry-run`: Show what would be rolled back without generating files + +## Prerequisites + +Run `/db-migrate setup` first. Target migrations must have rollback/downgrade operations defined. + +## Process + +### 1. Identify Rollback Target + +Determine which migrations to reverse: +- If `` specified: roll back all migrations applied after it +- If `--steps=N`: roll back the last N applied migrations +- Default: roll back the single most recent migration + +### 2. Check Rollback Feasibility + +For each migration to roll back, verify: + +| Check | Result | Action | +|-------|--------|--------| +| Downgrade function exists | Yes | Proceed | +| Downgrade function exists | No | FAIL: Cannot auto-rollback; manual intervention needed | +| Migration contains DROP TABLE | N/A | WARN: Data cannot be restored by rollback | +| Migration contains data changes | N/A | WARN: DML changes may not be fully reversible | +| Later migrations depend on this | Yes | Must roll back dependents first | + +### 3. Generate Rollback + +Depending on the tool: + +**Alembic:** +- Generate `alembic downgrade ` command +- Show the downgrade SQL that will execute +- If downgrade function is incomplete, generate supplementary migration + +**Prisma:** +- Generate rollback migration SQL based on diff +- Create new migration directory with rollback operations + +**Raw SQL:** +- Generate new numbered migration file with reverse operations +- Include transaction wrapping and safety checks + +### 4. Data Recovery Plan + +If rolled-back migrations included destructive operations: +- Recommend backup restoration for lost data +- Suggest data export before rollback +- Identify tables/columns that will be recreated empty + +### 5. Present Rollback Plan + +Show the complete rollback strategy with warnings. + +## Output Format + +``` ++----------------------------------------------------------------------+ +| DB-MIGRATE - Rollback | ++----------------------------------------------------------------------+ + +Mode: Roll back 2 steps +Tool: Alembic + +ROLLBACK PLAN + +Step 1: Undo migration_008_add_status_to_orders + Operations: + [-] DROP CONSTRAINT valid_status + [-] DROP COLUMN orders.status + Data Impact: Column data will be lost (12,450 rows) + Reversible: Partially (column recreated empty on re-apply) + +Step 2: Undo migration_007_add_order_items + Operations: + [-] DROP TABLE order_items + Data Impact: Table and all data will be lost (3,200 rows) + Reversible: Partially (table recreated empty on re-apply) + +WARNINGS + [!] 2 operations will cause data loss + [!] Back up affected tables before proceeding + +COMMANDS TO EXECUTE + alembic downgrade -2 + # Or: alembic downgrade migration_006_add_orders_table + +Generated Files: + (No new files — Alembic uses existing downgrade functions) + +RECOMMENDED PRE-ROLLBACK STEPS + 1. pg_dump --table=order_items myapp > order_items_backup.sql + 2. pg_dump --table=orders --column-inserts myapp > orders_status_backup.sql + 3. Review downgrade SQL with /db-migrate plan --include-rollback +``` + +## Important Notes + +- Rollback does NOT execute migrations; it generates the plan and/or files +- Always back up data before rolling back destructive migrations +- Some migrations are irreversible (data-only changes without backup) +- Use `--dry-run` to preview without creating any files +- After rollback, verify application compatibility with the older schema diff --git a/plugins/saas-db-migrate/commands/db-migrate-setup.md b/plugins/saas-db-migrate/commands/db-migrate-setup.md new file mode 100644 index 0000000..f14a4e9 --- /dev/null +++ b/plugins/saas-db-migrate/commands/db-migrate-setup.md @@ -0,0 +1,92 @@ +--- +name: db-migrate setup +description: Setup wizard for migration tool detection and configuration +agent: migration-planner +--- + +# /db-migrate setup - Migration Platform Setup Wizard + +## Skills to Load + +- skills/orm-detection.md +- skills/visual-header.md + +## Visual Output + +Display header: `DB-MIGRATE - Setup Wizard` + +## Usage + +``` +/db-migrate setup +``` + +## Workflow + +### Phase 1: Migration Tool Detection + +Scan the project for migration tool indicators: + +| File / Pattern | Tool | Confidence | +|----------------|------|------------| +| `alembic.ini` in project root | Alembic | High | +| `alembic/` directory | Alembic | High | +| `sqlalchemy` in requirements | Alembic (likely) | Medium | +| `prisma/schema.prisma` | Prisma | High | +| `@prisma/client` in package.json | Prisma | High | +| `migrations/` with numbered `.sql` files | Raw SQL | Medium | +| `flyway.conf` | Flyway (raw SQL) | High | +| `knexfile.js` or `knexfile.ts` | Knex | High | + +If no tool detected, ask user to select one. + +### Phase 2: Configuration Mapping + +Identify existing migration configuration: + +- **Migration directory**: Where migration files live +- **Model directory**: Where ORM models are defined (for auto-generation) +- **Database URL**: Connection string location (env var, config file) +- **Naming convention**: How migration files are named +- **Current state**: Latest applied migration + +### Phase 3: Database Connection Test + +Attempt to verify database connectivity: + +- Read connection string from detected location +- Test connection (read-only) +- Report database type (PostgreSQL, MySQL, SQLite) +- Report current schema version if detectable + +### Phase 4: Validation + +Display detected configuration summary and ask for confirmation. + +## Output Format + +``` ++----------------------------------------------------------------------+ +| DB-MIGRATE - Setup Wizard | ++----------------------------------------------------------------------+ + +Migration Tool: Alembic 1.13.1 +ORM: SQLAlchemy 2.0.25 +Database: PostgreSQL 16.1 +Migration Dir: ./alembic/versions/ +Model Dir: ./app/models/ +DB URL Source: DATABASE_URL env var + +Current State: + Latest Migration: 2024_01_15_add_orders_table (applied) + Pending: 0 migrations + +Configuration saved to .db-migrate.json +``` + +## Important Notes + +- This command does NOT run any migrations; it only detects and configures +- Database connection test is read-only (SELECT 1) +- If `.db-migrate.json` already exists, offer to update or keep +- All subsequent commands rely on this configuration diff --git a/plugins/saas-db-migrate/commands/db-migrate-validate.md b/plugins/saas-db-migrate/commands/db-migrate-validate.md new file mode 100644 index 0000000..3c36610 --- /dev/null +++ b/plugins/saas-db-migrate/commands/db-migrate-validate.md @@ -0,0 +1,127 @@ +--- +name: db-migrate validate +description: Check migration safety before applying +agent: migration-auditor +--- + +# /db-migrate validate - Migration Safety Validator + +## Skills to Load + +- skills/migration-safety.md +- skills/visual-header.md + +## Visual Output + +Display header: `DB-MIGRATE - Validate` + +## Usage + +``` +/db-migrate validate [] [--all] [--strict] +``` + +**Arguments:** +- ``: Specific migration to validate (default: latest unapplied) +- `--all`: Validate all unapplied migrations +- `--strict`: Treat warnings as errors + +## Prerequisites + +Run `/db-migrate setup` first. Migration files must exist in the configured directory. + +## Process + +### 1. Identify Target Migrations + +Determine which migrations to validate: +- Specific file if provided +- All unapplied migrations if `--all` +- Latest unapplied migration by default + +### 2. Parse Migration Operations + +Read each migration file and extract SQL operations: +- Table creation/deletion +- Column additions, modifications, removals +- Index operations +- Constraint changes +- Data manipulation (INSERT, UPDATE, DELETE) +- Custom SQL blocks + +### 3. Safety Analysis + +Apply safety rules from `skills/migration-safety.md`: + +| Check | Severity | Description | +|-------|----------|-------------| +| DROP TABLE | FAIL | Permanent data loss; requires explicit acknowledgment | +| DROP COLUMN | FAIL | Data loss; must confirm column is unused | +| ALTER COLUMN type (narrowing) | FAIL | Data truncation risk (e.g., VARCHAR(255) to VARCHAR(50)) | +| ALTER COLUMN type (widening) | WARN | Safe but verify application handles new type | +| ALTER COLUMN NOT NULL (existing data) | FAIL | May fail if NULLs exist; needs DEFAULT or backfill | +| RENAME TABLE/COLUMN | WARN | Application code must be updated simultaneously | +| Large table ALTER | WARN | May lock table for extended time; consider batching | +| Missing transaction wrapper | FAIL | Partial migrations leave inconsistent state | +| Missing rollback/downgrade | WARN | Cannot undo if problems occur | +| Data migration in schema migration | WARN | Should be separate migration | +| No-op migration | INFO | Migration has no effect | + +### 4. Lock Duration Estimation + +For ALTER operations on existing tables, estimate lock impact: +- Table size (if database connection available) +- Operation type (ADD COLUMN is instant in PostgreSQL, ALTER TYPE is not) +- Concurrent operation risk + +### 5. Generate Report + +Group findings by severity with actionable recommendations. + +## Output Format + +``` ++----------------------------------------------------------------------+ +| DB-MIGRATE - Validate | ++----------------------------------------------------------------------+ + +Target: alembic/versions/b3c4d5e6_drop_legacy_columns.py +Tool: Alembic + +FINDINGS + +FAIL (2) + 1. DROP COLUMN users.legacy_email + Risk: Permanent data loss for 12,450 rows with values + Fix: Verify column is unused, add data backup step, or + rename column first and drop in a future migration + + 2. ALTER COLUMN orders.total VARCHAR(10) -> VARCHAR(5) + Risk: Data truncation for values longer than 5 characters + Fix: Check max actual length: SELECT MAX(LENGTH(total)) FROM orders + If safe, document in migration comment + +WARN (1) + 1. Missing downgrade for DROP COLUMN + Risk: Cannot rollback this migration + Fix: Add downgrade() that re-creates column (data will be lost) + +INFO (1) + 1. Migration includes both schema and data changes + Suggestion: Separate into two migrations for cleaner rollback + +SUMMARY + Operations: 4 (2 DDL, 2 DML) + FAIL: 2 (must fix before applying) + WARN: 1 (should fix) + INFO: 1 (improve) + +VERDICT: FAIL (2 blocking issues) +``` + +## Exit Guidance + +- FAIL: Do not apply migration until issues are resolved +- WARN: Review carefully; proceed with caution +- INFO: Suggestions for improvement; safe to proceed +- `--strict`: All WARN become FAIL diff --git a/plugins/saas-db-migrate/commands/db-migrate.md b/plugins/saas-db-migrate/commands/db-migrate.md new file mode 100644 index 0000000..ed8b74c --- /dev/null +++ b/plugins/saas-db-migrate/commands/db-migrate.md @@ -0,0 +1,19 @@ +--- +name: db-migrate +description: Database migration toolkit — generate, validate, and manage schema migrations +--- + +# /db-migrate + +Database migration management for Alembic, Prisma, and raw SQL. + +## Sub-commands + +| Sub-command | Description | +|-------------|-------------| +| `/db-migrate setup` | Setup wizard for migration tool detection | +| `/db-migrate generate` | Generate migration from model diff | +| `/db-migrate validate` | Check migration safety | +| `/db-migrate plan` | Show execution plan with rollback strategy | +| `/db-migrate history` | Display migration history | +| `/db-migrate rollback` | Generate rollback migration | diff --git a/plugins/saas-db-migrate/skills/migration-safety.md b/plugins/saas-db-migrate/skills/migration-safety.md new file mode 100644 index 0000000..2057436 --- /dev/null +++ b/plugins/saas-db-migrate/skills/migration-safety.md @@ -0,0 +1,119 @@ +--- +name: migration-safety +description: Rules for detecting destructive operations, data loss risks, and long-running locks +--- + +# Migration Safety + +## Purpose + +Defines safety rules for analyzing database migrations. This skill is loaded by both the `migration-planner` (during generation) and `migration-auditor` (during validation) agents to ensure migrations do not cause data loss or operational issues. + +--- + +## Destructive Operations + +### FAIL-Level (Block Migration) + +| Operation | Risk | Detection Pattern | +|-----------|------|-------------------| +| `DROP TABLE` | Complete data loss | `DROP TABLE` without preceding backup/export | +| `DROP COLUMN` | Column data loss | `DROP COLUMN` without verification step | +| `ALTER COLUMN` type narrowing | Data truncation | VARCHAR(N) to smaller N, INTEGER to SMALLINT | +| `ALTER COLUMN` SET NOT NULL | Failure if NULLs exist | `SET NOT NULL` without DEFAULT or backfill | +| `TRUNCATE TABLE` | All rows deleted | `TRUNCATE` in migration file | +| `DELETE FROM` without WHERE | All rows deleted | `DELETE FROM table` without WHERE clause | +| Missing transaction | Partial migration risk | DDL statements outside BEGIN/COMMIT | + +### WARN-Level (Report, Continue) + +| Operation | Risk | Detection Pattern | +|-----------|------|-------------------| +| `RENAME TABLE` | App code must update | `ALTER TABLE ... RENAME TO` | +| `RENAME COLUMN` | App code must update | `ALTER TABLE ... RENAME COLUMN` | +| `ALTER COLUMN` type widening | Usually safe but verify | INTEGER to BIGINT, VARCHAR to TEXT | +| `CREATE INDEX` (non-concurrent) | Table lock during build | `CREATE INDEX` without `CONCURRENTLY` | +| Large table ALTER | Extended lock time | Any ALTER on tables with 100K+ rows | +| Mixed schema + data migration | Complex rollback | DML and DDL in same migration file | +| Missing downgrade/rollback | Cannot undo | No downgrade function or DOWN section | + +### INFO-Level (Suggestions) + +| Operation | Suggestion | Detection Pattern | +|-----------|-----------|-------------------| +| No-op migration | Remove or document why | Empty upgrade function | +| Missing IF EXISTS/IF NOT EXISTS | Add for idempotency | `CREATE TABLE` without `IF NOT EXISTS` | +| Non-concurrent index on PostgreSQL | Use CONCURRENTLY | `CREATE INDEX` could be `CREATE INDEX CONCURRENTLY` | + +--- + +## Lock Duration Rules + +### PostgreSQL + +| Operation | Lock Type | Duration | +|-----------|-----------|----------| +| ADD COLUMN (no default) | ACCESS EXCLUSIVE | Instant (metadata only) | +| ADD COLUMN with DEFAULT | ACCESS EXCLUSIVE | Instant (PG 11+) | +| ALTER COLUMN TYPE | ACCESS EXCLUSIVE | Full table rewrite | +| DROP COLUMN | ACCESS EXCLUSIVE | Instant (metadata only) | +| CREATE INDEX | SHARE | Proportional to table size | +| CREATE INDEX CONCURRENTLY | SHARE UPDATE EXCLUSIVE | Longer but non-blocking | +| ADD CONSTRAINT (CHECK) | ACCESS EXCLUSIVE | Scans entire table | +| ADD CONSTRAINT NOT VALID + VALIDATE | Split: instant + non-blocking | Recommended for large tables | + +### MySQL + +| Operation | Lock Type | Duration | +|-----------|-----------|----------| +| Most ALTER TABLE | Table copy | Proportional to table size | +| ADD COLUMN (last position) | Instant (8.0+ some cases) | Depends on engine | +| CREATE INDEX | Table copy or instant | Engine-dependent | + +--- + +## Recommended Patterns + +### Safe Column Addition +```sql +-- Good: nullable column, no lock +ALTER TABLE users ADD COLUMN middle_name VARCHAR(100); + +-- Then backfill in batches (separate migration): +UPDATE users SET middle_name = '' WHERE middle_name IS NULL; + +-- Then add constraint (separate migration): +ALTER TABLE users ALTER COLUMN middle_name SET NOT NULL; +``` + +### Safe Column Removal +```sql +-- Step 1: Remove from application code first +-- Step 2: Verify column is unused (no queries reference it) +-- Step 3: Drop in migration +ALTER TABLE users DROP COLUMN IF EXISTS legacy_field; +``` + +### Safe Type Change +```sql +-- Step 1: Add new column +ALTER TABLE orders ADD COLUMN amount_new NUMERIC(10,2); +-- Step 2: Backfill (separate migration) +UPDATE orders SET amount_new = amount::NUMERIC(10,2); +-- Step 3: Swap columns (separate migration) +ALTER TABLE orders DROP COLUMN amount; +ALTER TABLE orders RENAME COLUMN amount_new TO amount; +``` + +--- + +## Pre-Migration Checklist + +Before applying any migration in production: + +1. Database backup completed and verified +2. Migration validated with `/db-migrate validate` +3. Execution plan reviewed with `/db-migrate plan` +4. Rollback strategy documented and tested +5. Maintenance window scheduled (if required by lock analysis) +6. Application deployment coordinated (if schema change affects code) diff --git a/plugins/saas-db-migrate/skills/naming-conventions.md b/plugins/saas-db-migrate/skills/naming-conventions.md new file mode 100644 index 0000000..5e4d7d8 --- /dev/null +++ b/plugins/saas-db-migrate/skills/naming-conventions.md @@ -0,0 +1,104 @@ +--- +name: naming-conventions +description: Migration file naming rules — timestamp prefixes, descriptive suffixes, ordering +--- + +# Naming Conventions + +## Purpose + +Defines standard naming patterns for migration files across all supported tools. This skill ensures consistent, descriptive, and correctly ordered migration files. + +--- + +## General Rules + +1. **Use lowercase with underscores** for descriptions (snake_case) +2. **Be descriptive but concise**: Describe WHAT changes, not WHY +3. **Use verb prefixes**: `add_`, `drop_`, `rename_`, `alter_`, `create_`, `remove_` +4. **Include the table name** when the migration affects a single table +5. **Never use generic names** like `migration_1`, `update`, `fix` + +--- + +## Tool-Specific Patterns + +### Alembic + +**Format:** `{revision_hash}_{description}.py` + +The revision hash is auto-generated by Alembic. The description is provided by the user. + +| Example | Description | +|---------|-------------| +| `a1b2c3d4_create_users_table.py` | Initial table creation | +| `e5f6g7h8_add_email_to_users.py` | Add column | +| `i9j0k1l2_drop_legacy_users_columns.py` | Remove columns | +| `m3n4o5p6_rename_orders_total_to_amount.py` | Rename column | +| `q7r8s9t0_add_index_on_orders_user_id.py` | Add index | + +**Description rules for Alembic:** +- Max 60 characters for the description portion +- No spaces (use underscores) +- Alembic auto-generates the revision hash + +### Prisma + +**Format:** `{YYYYMMDDHHMMSS}_{description}/migration.sql` + +| Example | Description | +|---------|-------------| +| `20240115120000_create_users_table/migration.sql` | Initial table | +| `20240120093000_add_email_to_users/migration.sql` | Add column | +| `20240201110000_add_orders_table/migration.sql` | New table | + +**Description rules for Prisma:** +- Prisma generates the timestamp automatically with `prisma migrate dev` +- Description is the `--name` argument +- Use snake_case, no spaces + +### Raw SQL + +**Format:** `{NNN}_{description}.sql` + +Sequential numbering with zero-padded prefix: + +| Example | Description | +|---------|-------------| +| `001_create_users_table.sql` | First migration | +| `002_add_email_to_users.sql` | Second migration | +| `003_create_orders_table.sql` | Third migration | + +**Numbering rules:** +- Zero-pad to 3 digits minimum (001, 002, ..., 999) +- If project exceeds 999, use 4 digits (0001, 0002, ...) +- Never reuse numbers, even for deleted migrations +- Gaps in sequence are acceptable (001, 002, 005 is fine) + +--- + +## Description Verb Prefixes + +| Prefix | Use When | +|--------|----------| +| `create_` | New table | +| `add_` | New column, index, or constraint to existing table | +| `drop_` | Remove table | +| `remove_` | Remove column, index, or constraint | +| `rename_` | Rename table or column | +| `alter_` | Change column type or constraint | +| `backfill_` | Data-only migration (populate column values) | +| `merge_` | Combine tables or columns | +| `split_` | Separate table or column into multiple | + +--- + +## Anti-Patterns + +| Bad Name | Problem | Better Name | +|----------|---------|-------------| +| `migration_1.py` | Not descriptive | `create_users_table.py` | +| `update.sql` | Too vague | `add_status_to_orders.sql` | +| `fix_bug.py` | Describes why, not what | `alter_email_column_length.py` | +| `changes.sql` | Not descriptive | `add_index_on_users_email.sql` | +| `final_migration.py` | Nothing is ever final | `remove_deprecated_columns.py` | diff --git a/plugins/saas-db-migrate/skills/orm-detection.md b/plugins/saas-db-migrate/skills/orm-detection.md new file mode 100644 index 0000000..0c90105 --- /dev/null +++ b/plugins/saas-db-migrate/skills/orm-detection.md @@ -0,0 +1,94 @@ +--- +name: orm-detection +description: Detect Alembic, Prisma, or raw SQL migration tools and locate configuration files +--- + +# ORM Detection + +## Purpose + +Identify the database migration tool in use and map its configuration. This skill is loaded by the `migration-planner` agent during setup and migration generation to ensure tool-appropriate output. + +--- + +## Detection Rules + +### Alembic Detection + +| Indicator | Location | Confidence | +|-----------|----------|------------| +| `alembic.ini` file | Project root | High | +| `alembic/` directory with `env.py` | Project root | High | +| `alembic/versions/` directory | Within alembic dir | High | +| `sqlalchemy` + `alembic` in requirements | `requirements.txt`, `pyproject.toml` | Medium | +| `from alembic import op` in Python files | `*.py` in versions dir | High | + +**Alembic Configuration Files:** +- `alembic.ini` — Main config (database URL, migration directory) +- `alembic/env.py` — Migration environment (model imports, target metadata) +- `alembic/versions/` — Migration files directory + +**Model Location:** +- Look for `Base = declarative_base()` or `class Base(DeclarativeBase)` in Python files +- Check `target_metadata` in `env.py` to find the models module +- Common locations: `app/models/`, `models/`, `src/models/` + +### Prisma Detection + +| Indicator | Location | Confidence | +|-----------|----------|------------| +| `prisma/schema.prisma` file | Project root | High | +| `@prisma/client` in package.json | `package.json` | High | +| `prisma/migrations/` directory | Within prisma dir | High | +| `npx prisma` in scripts | `package.json` scripts | Medium | + +**Prisma Configuration Files:** +- `prisma/schema.prisma` — Schema definition (models, datasource, generator) +- `prisma/migrations/` — Migration directories (timestamp-named) +- `.env` — `DATABASE_URL` connection string + +### Raw SQL Detection + +| Indicator | Location | Confidence | +|-----------|----------|------------| +| `migrations/` dir with numbered `.sql` files | Project root | Medium | +| `flyway.conf` | Project root | High (Flyway) | +| `knexfile.js` or `knexfile.ts` | Project root | High (Knex) | +| `db/migrate/` directory | Project root | Medium (Rails-style) | + +**Raw SQL Configuration:** +- Migration directory location +- Naming pattern (sequential numbers, timestamps) +- Tracking table name (if database-tracked) + +--- + +## Database Connection Detection + +Look for connection strings in this order: + +1. `DATABASE_URL` environment variable +2. `.env` file in project root +3. `alembic.ini` `sqlalchemy.url` setting +4. `prisma/schema.prisma` `datasource` block +5. Application config files (`config.py`, `config.js`, `settings.py`) + +**Database Type Detection:** +- `postgresql://` or `postgres://` — PostgreSQL +- `mysql://` — MySQL +- `sqlite:///` — SQLite +- `mongodb://` — MongoDB (not supported for SQL migrations) + +--- + +## Version Detection + +**Alembic**: Parse from `pip show alembic` or `requirements.txt` pin +**Prisma**: Parse from `package.json` `@prisma/client` version +**SQLAlchemy**: Parse from requirements; important for feature compatibility (1.4 vs 2.0 API) + +--- + +## Ambiguous Projects + +If multiple migration tools are detected (e.g., Alembic for backend + Prisma for a separate service), ask the user which one to target. Store selection in `.db-migrate.json`. diff --git a/plugins/saas-db-migrate/skills/rollback-patterns.md b/plugins/saas-db-migrate/skills/rollback-patterns.md new file mode 100644 index 0000000..cf0227e --- /dev/null +++ b/plugins/saas-db-migrate/skills/rollback-patterns.md @@ -0,0 +1,157 @@ +--- +name: rollback-patterns +description: Standard rollback generation patterns, reverse operations, and data backup strategies +--- + +# Rollback Patterns + +## Purpose + +Defines patterns for generating safe rollback migrations. This skill is loaded by the `migration-planner` agent when generating migrations (to include downgrade sections) and when creating explicit rollback plans. + +--- + +## Reverse Operation Map + +| Forward Operation | Reverse Operation | Data Preserved | +|-------------------|-------------------|----------------| +| CREATE TABLE | DROP TABLE | No (all data lost) | +| DROP TABLE | CREATE TABLE (empty) | No (must restore from backup) | +| ADD COLUMN | DROP COLUMN | No (column data lost) | +| DROP COLUMN | ADD COLUMN (nullable) | No (must restore from backup) | +| RENAME TABLE | RENAME TABLE (back) | Yes | +| RENAME COLUMN | RENAME COLUMN (back) | Yes | +| ADD INDEX | DROP INDEX | Yes (data unaffected) | +| DROP INDEX | CREATE INDEX | Yes (data unaffected) | +| ADD CONSTRAINT | DROP CONSTRAINT | Yes | +| DROP CONSTRAINT | ADD CONSTRAINT | Yes (if data still valid) | +| ALTER COLUMN TYPE | ALTER COLUMN TYPE (back) | Depends on conversion | +| INSERT rows | DELETE matching rows | Yes (if identifiable) | +| UPDATE rows | UPDATE with original values | Only if originals saved | +| DELETE rows | INSERT saved rows | Only if backed up | + +--- + +## Rollback Classification + +### Fully Reversible (Green) + +Operations that can be undone with no data loss: +- RENAME operations (table, column) +- ADD/DROP INDEX +- ADD/DROP CONSTRAINT (when data satisfies constraint) +- ADD COLUMN (drop it in rollback) + +### Partially Reversible (Yellow) + +Operations where structure is restored but data is lost: +- CREATE TABLE (rollback = DROP TABLE; data lost) +- DROP COLUMN (rollback = ADD COLUMN; column data gone) +- ALTER COLUMN TYPE narrowing then widening (precision lost) + +### Irreversible (Red) + +Operations that cannot be meaningfully undone: +- DROP TABLE without backup (data permanently gone) +- TRUNCATE TABLE without backup +- DELETE without WHERE without backup +- Data transformation that loses information (e.g., hash, round) + +--- + +## Backup Strategies + +### Pre-Migration Table Backup + +For migrations that will cause data loss, generate backup commands: + +**PostgreSQL:** +```sql +-- Full table backup +CREATE TABLE _backup_users_20240115 AS SELECT * FROM users; + +-- Column-only backup +CREATE TABLE _backup_users_email_20240115 AS + SELECT id, legacy_email FROM users; +``` + +**Export to file:** +```bash +pg_dump --table=users --column-inserts dbname > users_backup_20240115.sql +``` + +### Restoration Commands + +Include restoration commands in rollback section: + +```sql +-- Restore from backup table +INSERT INTO users (id, legacy_email) + SELECT id, legacy_email FROM _backup_users_email_20240115; + +-- Clean up backup +DROP TABLE IF EXISTS _backup_users_email_20240115; +``` + +--- + +## Alembic Downgrade Patterns + +```python +def downgrade(): + # Reverse of upgrade, in opposite order + op.drop_index('ix_orders_user_id', table_name='orders') + op.drop_table('orders') +``` + +For complex downgrades with data restoration: +```python +def downgrade(): + # Re-create dropped column + op.add_column('users', sa.Column('legacy_email', sa.String(255), nullable=True)) + # Note: Data cannot be restored automatically + # Restore from backup: _backup_users_email_YYYYMMDD +``` + +--- + +## Prisma Rollback Patterns + +Prisma does not have native downgrade support. Generate a new migration that reverses the operations: + +```sql +-- Rollback: undo add_orders_table +DROP TABLE IF EXISTS "order_items"; +DROP TABLE IF EXISTS "orders"; +``` + +--- + +## Raw SQL Rollback Patterns + +Always include DOWN section in migration files: + +```sql +-- UP +CREATE TABLE orders ( + id SERIAL PRIMARY KEY, + user_id INTEGER REFERENCES users(id), + total DECIMAL(10,2) NOT NULL +); + +-- DOWN +DROP TABLE IF EXISTS orders; +``` + +--- + +## Point-of-No-Return Identification + +Flag migrations that cross the point of no return: + +1. **Data deletion without backup step**: Mark as irreversible +2. **Type narrowing that truncates data**: Data is permanently altered +3. **Hash/encrypt transformations**: Original values unrecoverable +4. **Aggregate/merge operations**: Individual records lost + +When a migration includes irreversible operations, the rollback section must clearly state: "This migration cannot be fully rolled back. Data backup is required before applying." diff --git a/plugins/saas-db-migrate/skills/visual-header.md b/plugins/saas-db-migrate/skills/visual-header.md new file mode 100644 index 0000000..bcba765 --- /dev/null +++ b/plugins/saas-db-migrate/skills/visual-header.md @@ -0,0 +1,56 @@ +--- +name: visual-header +description: Standard header format for db-migrate commands and agents +--- + +# Visual Header + +## Standard Format + +Display at the start of every command execution: + +``` ++----------------------------------------------------------------------+ +| DB-MIGRATE - [Command Name] | ++----------------------------------------------------------------------+ +``` + +## Command Headers + +| Command | Header Text | +|---------|-------------| +| db-migrate-setup | Setup Wizard | +| db-migrate-generate | Generate | +| db-migrate-validate | Validate | +| db-migrate-plan | Plan | +| db-migrate-history | History | +| db-migrate-rollback | Rollback | + +## Summary Box Format + +For completion summaries: + +``` ++============================================================+ +| DB-MIGRATE [OPERATION] COMPLETE | ++============================================================+ +| Component: [Status] | +| Component: [Status] | ++============================================================+ +``` + +## Status Indicators + +- Success: `[check]` or `Ready` +- Warning: `[!]` or `Partial` +- Failure: `[X]` or `Failed` +- New file: `[+]` +- Modified file: `[~]` +- Deleted file: `[-]` + +## Risk Level Indicators + +- LOW: Safe operation, no data loss risk +- MEDIUM: Reversible but requires attention +- HIGH: Potential data loss, backup required +- CRITICAL: Irreversible data loss, explicit approval required diff --git a/plugins/saas-react-platform/.claude-plugin/plugin.json b/plugins/saas-react-platform/.claude-plugin/plugin.json new file mode 100644 index 0000000..7948e92 --- /dev/null +++ b/plugins/saas-react-platform/.claude-plugin/plugin.json @@ -0,0 +1,26 @@ +{ + "name": "saas-react-platform", + "version": "1.0.0", + "description": "React frontend development toolkit with component scaffolding, routing, and state management", + "author": { + "name": "Leo Miranda", + "email": "leobmiranda@gmail.com" + }, + "homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/saas-react-platform/README.md", + "repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git", + "license": "MIT", + "keywords": [ + "react", + "nextjs", + "vite", + "typescript", + "components", + "routing", + "state-management", + "frontend" + ], + "commands": [ + "./commands/" + ], + "domain": "saas" +} diff --git a/plugins/saas-react-platform/README.md b/plugins/saas-react-platform/README.md new file mode 100644 index 0000000..a421927 --- /dev/null +++ b/plugins/saas-react-platform/README.md @@ -0,0 +1,73 @@ +# saas-react-platform Plugin + +React frontend development toolkit with component scaffolding, routing, state management, and anti-pattern detection for Claude Code. + +## Overview + +The saas-react-platform plugin provides a complete React development toolkit that adapts to your project's framework (Next.js, Vite, CRA, Remix). It generates TypeScript-first components, configures routing, sets up state management patterns, and audits your component tree for anti-patterns. + +Key features: +- **Framework-aware**: Detects Next.js (App Router/Pages Router), Vite, CRA, Remix +- **TypeScript-first**: Every generated file includes proper types, generics, and interfaces +- **Component scaffolding**: UI, page, layout, and form component templates with tests +- **Routing setup**: File-based routing (Next.js) and client-side routing (React Router) +- **State patterns**: Context, Zustand, and Redux Toolkit with guided selection +- **Anti-pattern detection**: Component tree analysis, hook compliance, TypeScript strictness + +## Installation + +This plugin is part of the Leo Claude Marketplace. Install via the marketplace or copy the `plugins/saas-react-platform/` directory to your Claude Code plugins path. + +## Commands + +| Command | Description | +|---------|-------------| +| `/react setup` | Setup wizard — detect framework, TypeScript, CSS approach | +| `/react component` | Scaffold component with props, types, and tests | +| `/react route` | Add route with page component, layout, and error boundary | +| `/react state` | Set up state management (Context, Zustand, Redux Toolkit) | +| `/react hook` | Generate custom hook with types and tests | +| `/react lint` | Validate component tree and detect anti-patterns | + +## Quick Start + +``` +/react setup # Detect project configuration +/react component UserProfile --type page # Scaffold a page component +/react route dashboard --protected # Add protected route +/react state auth --pattern context # Set up auth context +/react hook useDebounce --type lifecycle # Generate custom hook +/react lint # Audit component tree +``` + +## Agents + +| Agent | Model | Role | +|-------|-------|------| +| `react-architect` | Sonnet | Component design, routing, state management, hook generation | +| `react-auditor` | Haiku | Read-only component tree analysis and anti-pattern detection | + +## Skills + +| Skill | Purpose | +|-------|---------| +| `framework-detection` | Detect Next.js, Vite, CRA, Remix, TypeScript, CSS approach | +| `component-patterns` | Functional components, prop typing, exports, co-located tests | +| `state-patterns` | React Context, Zustand, Redux Toolkit selection and templates | +| `routing-conventions` | File-based and client-side routing patterns | +| `typescript-patterns` | Utility types, generics, discriminated unions for React | +| `visual-header` | Standard visual output formatting | + +## Supported Frameworks + +| Framework | Version | Routing | Status | +|-----------|---------|---------|--------| +| Next.js (App Router) | 13.4+ | File-based (`app/`) | Full support | +| Next.js (Pages Router) | 12+ | File-based (`pages/`) | Full support | +| Vite + React | 4+ | React Router | Full support | +| Create React App | 5+ | React Router | Full support | +| Remix | 2+ | File-based (`routes/`) | Basic support | + +## License + +MIT License — Part of the Leo Claude Marketplace. diff --git a/plugins/saas-react-platform/agents/react-architect.md b/plugins/saas-react-platform/agents/react-architect.md new file mode 100644 index 0000000..0243bf0 --- /dev/null +++ b/plugins/saas-react-platform/agents/react-architect.md @@ -0,0 +1,115 @@ +--- +name: react-architect +description: Component design, routing setup, and state management for React projects. Use when scaffolding components, adding routes, setting up state patterns, or generating custom hooks. +model: sonnet +permissionMode: default +--- + +# React Architect Agent + +You are a React frontend architecture specialist. Your role is to scaffold components, configure routing, set up state management patterns, and generate custom hooks following modern React best practices with full TypeScript support. + +## Visual Output Requirements + +**MANDATORY: Display header at start of every response.** + +``` ++----------------------------------------------------------------------+ +| REACT-PLATFORM - [Command Name] | +| [Context Line] | ++----------------------------------------------------------------------+ +``` + +## Trigger Conditions + +Activate this agent when: +- User runs `/react setup` +- User runs `/react component ` +- User runs `/react route ` +- User runs `/react state ` +- User runs `/react hook ` + +## Skills to Load + +- skills/framework-detection.md +- skills/component-patterns.md +- skills/state-patterns.md +- skills/routing-conventions.md +- skills/typescript-patterns.md +- skills/visual-header.md + +## Core Principles + +### Framework Awareness +Always adapt output to the detected framework: +- Next.js App Router: server components by default, `'use client'` directive when needed +- Next.js Pages Router: `getServerSideProps`/`getStaticProps` for data fetching +- Vite + React: client-side routing with React Router or TanStack Router +- Use project-specific conventions detected during `/react setup` + +### TypeScript First +- Every component gets a typed props interface +- Every hook gets typed parameters and return values +- Use generics for reusable patterns (e.g., `useLocalStorage`) +- Prefer discriminated unions over optional props for variant states +- Avoid `any` — use `unknown` with type guards when type is uncertain + +### Co-location +- Tests next to components (`Button.test.tsx` beside `Button.tsx`) +- Types in same file unless complex (then `Button.types.ts`) +- Styles co-located (CSS Modules, Tailwind, or styled-components) +- Stories co-located if Storybook detected (`Button.stories.tsx`) + +### Composition Over Inheritance +- Functional components exclusively (no class components) +- Composition via `children` prop and render props +- Custom hooks for shared logic extraction +- Higher-order components only as last resort (prefer hooks) + +### Performance by Default +- Use `React.memo` for expensive pure components +- Use `useCallback` for handlers passed as props to memoized children +- Use `useMemo` for expensive computations +- Lazy load page components with `React.lazy()` and `Suspense` +- Avoid unnecessary re-renders: extract static JSX outside component + +## Operating Modes + +### Setup Mode +- Detect framework, TypeScript, CSS approach, test runner +- Analyze existing project structure and conventions +- Store configuration for consistent scaffolding + +### Component Mode +- Generate component file with typed props +- Generate test file with render and interaction tests +- Update barrel files if applicable + +### Route Mode +- Create page component at correct path for routing system +- Add layout, error boundary, loading state as needed +- Update router config for client-side routing + +### State Mode +- Scaffold Context, Zustand, or Redux Toolkit store +- Generate typed actions, selectors, hooks +- Wire into application provider tree + +### Hook Mode +- Generate custom hook with full TypeScript types +- Include cleanup, error handling, loading states +- Generate test with renderHook + +## Error Handling + +| Error | Response | +|-------|----------| +| Not a React project | "No React dependency found in package.json. Run `npm create vite@latest` or `npx create-next-app` first." | +| TypeScript not configured | WARN: generate `.jsx` files, suggest adding TypeScript | +| Component name conflict | Ask user to confirm overwrite or choose different name | +| Unknown CSS framework | Default to inline styles, suggest configuring via `/react setup` | +| State library not installed | Display install command, ask user to install first | + +## Communication Style + +Practical and instructive. Show the generated code with clear comments explaining each section. After scaffolding, display a usage example showing how to import and use the created component/hook/route in the project. Mention any manual steps required (e.g., adding navigation links, installing dependencies). diff --git a/plugins/saas-react-platform/agents/react-auditor.md b/plugins/saas-react-platform/agents/react-auditor.md new file mode 100644 index 0000000..9902305 --- /dev/null +++ b/plugins/saas-react-platform/agents/react-auditor.md @@ -0,0 +1,119 @@ +--- +name: react-auditor +description: Read-only analysis of React component tree and anti-pattern detection. Use when linting components, detecting code smells, or auditing TypeScript usage. +model: haiku +permissionMode: plan +disallowedTools: Write, Edit, MultiEdit +--- + +# React Auditor Agent + +You are a strict React code quality auditor. Your role is to analyze component trees, detect anti-patterns, validate TypeScript usage, and report issues with actionable fixes. You never modify files — analysis and reporting only. + +## Visual Output Requirements + +**MANDATORY: Display header at start of every response.** + +``` ++----------------------------------------------------------------------+ +| REACT-PLATFORM - Lint | +| [Target Path] | ++----------------------------------------------------------------------+ +``` + +## Trigger Conditions + +Activate this agent when: +- User runs `/react lint [path]` +- Architect agent requests component tree validation + +## Skills to Load + +- skills/component-patterns.md +- skills/typescript-patterns.md +- skills/visual-header.md + +## Audit Categories + +### Component Quality (Always Checked) +- Missing or incomplete prop types +- Missing key prop in list rendering +- Index used as key in dynamic lists +- Inline function definitions in JSX props +- Components exceeding 200 lines +- Mixed concerns (data fetching + rendering in one component) +- Missing error boundaries on page components +- Unused props declared but never referenced + +### Hook Compliance (Always Checked) +- Hooks called conditionally or inside loops +- Missing cleanup functions in effects with subscriptions +- Stale closures from missing dependency array entries +- Over-specified dependency arrays causing unnecessary re-renders + +### State Architecture (Always Checked) +- Prop drilling (same prop through 3+ levels) +- State that could be derived from other state/props +- Multiple sequential setState calls in event handlers +- Context providers wrapping entire app for localized state +- Unnecessary global state for ephemeral UI values + +### TypeScript Strictness (--strict Mode Only) +- Explicit or implicit `any` usage +- Components without explicit return type annotation +- Non-null assertion operator (`!`) usage +- Excessive type assertions (`as`) indicating design issues +- Missing generic constraints on reusable components + +## Severity Definitions + +| Level | Criteria | Action Required | +|-------|----------|-----------------| +| **FAIL** | Missing key prop, conditional hook calls, broken Rules of Hooks | Must fix — these cause runtime errors | +| **WARN** | Inline functions, large components, prop drilling, missing cleanup | Should fix — affects performance or maintainability | +| **INFO** | Missing displayName, missing return type, derived state opportunities | Consider for improvement | + +## Report Format + +``` ++----------------------------------------------------------------------+ +| REACT-PLATFORM - Lint | +| [path] | ++----------------------------------------------------------------------+ + +Files Scanned: N +Components Analyzed: N +Hooks Analyzed: N + +FAIL (N) + 1. [file:line] Description + Found: problematic code + Fix: corrective action + +WARN (N) + 1. [file:line] Description + Suggestion: improvement + +INFO (N) + 1. [file] Description + Note: context + +SUMMARY + Components: N clean, N with issues + Hooks: N clean, N with issues + +VERDICT: PASS | FAIL (N blocking issues) +``` + +## Error Handling + +| Error | Response | +|-------|----------| +| No React files found | "No .tsx/.jsx files found in target path." | +| Invalid path | "Path not found: {path}" | +| Parse error in file | WARN: "Could not parse {file}: {error}" — skip file, continue | +| Empty directory | "No files to analyze in {path}" | + +## Communication Style + +Precise and actionable. Every finding includes: exact file and line, what was found, and how to fix it. Group findings by severity. Prioritize FAIL issues that cause runtime errors over style issues. Include a clear PASS/FAIL verdict at the end. diff --git a/plugins/saas-react-platform/claude-md-integration.md b/plugins/saas-react-platform/claude-md-integration.md new file mode 100644 index 0000000..24a6aff --- /dev/null +++ b/plugins/saas-react-platform/claude-md-integration.md @@ -0,0 +1,81 @@ +# saas-react-platform Plugin - CLAUDE.md Integration + +Add this section to your project's CLAUDE.md to enable saas-react-platform plugin features. + +## Suggested CLAUDE.md Section + +```markdown +## React Development (saas-react-platform) + +This project uses the saas-react-platform plugin for React frontend development. + +### Configuration + +**Framework**: Auto-detected from package.json and project structure +**TypeScript**: Auto-detected from tsconfig.json +**CSS Approach**: Auto-detected (Tailwind, CSS Modules, styled-components) + +### Available Commands + +| Command | Purpose | +|---------|---------| +| `/react setup` | Configure framework detection and conventions | +| `/react component` | Scaffold component with types and tests | +| `/react route` | Add route with page, layout, error boundary | +| `/react state` | Set up state management pattern | +| `/react hook` | Generate custom hook with types and tests | +| `/react lint` | Audit component tree for anti-patterns | + +### Component Conventions + +- Functional components only (no class components) +- TypeScript interfaces for all props +- Co-located tests (`Component.test.tsx` beside `Component.tsx`) +- Barrel files (`index.ts`) for clean imports + +### State Management Guide + +| Complexity | Pattern | Example | +|------------|---------|---------| +| Simple (theme, locale) | React Context | `/react state theme --pattern context` | +| Medium (cart, filters) | Zustand | `/react state cart --pattern zustand` | +| Complex (entities, async) | Redux Toolkit | `/react state products --pattern redux` | + +### Typical Workflow + +``` +/react setup # First-time detection +/react component DataTable --type ui # Scaffold component +/react route dashboard --layout DashboardLayout # Add route +/react lint src/components/ # Check for issues +``` +``` + +## Environment Variables + +No environment variables required. All configuration is auto-detected from the project structure. + +## Typical Workflows + +### New Feature Development +``` +/react component FeatureName --type page # Create page component +/react route feature-name --layout MainLayout # Add route +/react state featureData --pattern zustand # Set up state +/react hook useFeatureData --type data # Create data hook +/react lint src/features/feature-name/ # Validate +``` + +### Component Library +``` +/react component Button --type ui # Presentational component +/react component Modal --type ui # Another component +/react lint src/components/ # Audit all components +``` + +### Form Development +``` +/react component ContactForm --type form # Form with validation +/react hook useContactForm --type form # Form logic hook +/react lint src/components/ContactForm/ # Check form patterns +``` diff --git a/plugins/saas-react-platform/commands/react-component.md b/plugins/saas-react-platform/commands/react-component.md new file mode 100644 index 0000000..9efc3a0 --- /dev/null +++ b/plugins/saas-react-platform/commands/react-component.md @@ -0,0 +1,74 @@ +--- +name: react component +--- + +# /react component - Scaffold React Component + +## Skills to Load +- skills/component-patterns.md +- skills/typescript-patterns.md +- skills/visual-header.md + +## Visual Output + +Display header: `REACT-PLATFORM - Component` + +## Usage + +``` +/react component [--type ] [--props ] [--no-test] +``` + +## Workflow + +### 1. Validate Component Name +- Must be PascalCase (e.g., `UserProfile`, `DataTable`) +- Reject reserved React names (`Component`, `Fragment`, `Suspense`) +- Check for existing component with same name — prompt before overwriting + +### 2. Determine Component Type +- `ui` (default): Presentational component — props in, JSX out, no side effects +- `page`: Page-level component with data fetching, loading/error states +- `layout`: Layout wrapper with children prop and optional sidebar/header slots +- `form`: Form component with controlled inputs, validation, submit handler + +### 3. Generate Component File +Using `skills/component-patterns.md` and `skills/typescript-patterns.md`: +- Create functional component with typed props interface +- Apply component type template (ui, page, layout, form) +- Use project's CSS approach (Tailwind classes, CSS modules import, styled-components) +- Include JSDoc comment block with `@component` and `@example` +- Export according to project convention (default or named) + +### 4. Generate Test File +Unless `--no-test` specified: +- Create co-located test file (`ComponentName.test.tsx`) +- Include basic render test +- Include props variation tests for each required prop +- Include accessibility test if `@testing-library/jest-dom` is available +- Use project's test runner (Jest or Vitest) + +### 5. Generate Types File (if complex props) +If more than 5 props or nested types: +- Create separate types file (`ComponentName.types.ts`) +- Export props interface and any supporting types +- Import types in component file + +### 6. Update Barrel File +If project uses barrel files (`index.ts`): +- Add export to nearest `index.ts` +- Create `index.ts` if component is in its own directory + +### 7. Summary +- Display created files with paths +- Show component usage example in JSX + +## Examples + +``` +/react component Button # Basic UI component +/react component UserProfile --type page # Page with data fetching +/react component DashboardLayout --type layout # Layout wrapper +/react component LoginForm --type form # Form with validation +/react component DataTable --props data:T[],columns:Column[],onSort:Function +``` diff --git a/plugins/saas-react-platform/commands/react-hook.md b/plugins/saas-react-platform/commands/react-hook.md new file mode 100644 index 0000000..f912ffa --- /dev/null +++ b/plugins/saas-react-platform/commands/react-hook.md @@ -0,0 +1,80 @@ +--- +name: react hook +--- + +# /react hook - Generate Custom Hook + +## Skills to Load +- skills/typescript-patterns.md +- skills/component-patterns.md +- skills/visual-header.md + +## Visual Output + +Display header: `REACT-PLATFORM - Custom Hook` + +## Usage + +``` +/react hook [--type ] [--no-test] +``` + +## Workflow + +### 1. Validate Hook Name +- Must start with `use` (e.g., `useAuth`, `useDebounce`, `useLocalStorage`) +- Must be camelCase after the `use` prefix +- Check for existing hook with same name — prompt before overwriting + +### 2. Determine Hook Type +- `data` (default): Hooks that manage data fetching, caching, or transformation + - Template includes: state for data/loading/error, fetch function, cleanup + - Example: `useUsers`, `useApiCall`, `useInfiniteScroll` +- `ui`: Hooks that manage UI state or DOM interactions + - Template includes: ref handling, event listeners, cleanup on unmount + - Example: `useMediaQuery`, `useClickOutside`, `useIntersectionObserver` +- `form`: Hooks that manage form state and validation + - Template includes: values state, errors state, handlers, validation, submit + - Example: `useForm`, `useFieldValidation`, `useMultiStepForm` +- `lifecycle`: Hooks that wrap React lifecycle patterns + - Template includes: effect with cleanup, dependency management + - Example: `useDebounce`, `useInterval`, `usePrevious` + +### 3. Generate Hook File +- Create hook file in project's hooks directory (`src/hooks/` or `hooks/`) +- Include TypeScript generics where appropriate (e.g., `useLocalStorage`) +- Include proper cleanup in `useEffect` return functions +- Follow React hooks rules: no conditional hooks, stable dependency arrays +- Include JSDoc with `@param`, `@returns`, and `@example` + +### 4. Generate Test File +Unless `--no-test` specified: +- Create test file using `@testing-library/react-hooks` or `renderHook` from `@testing-library/react` +- Test initial state +- Test state transitions after actions +- Test cleanup behavior +- Test error states (for data hooks) +- Use `act()` wrapper for state updates + +### 5. Generate Types +- Export hook parameter types and return type as named interfaces +- Use generics for reusable hooks (e.g., `useLocalStorage(key: string, initialValue: T): [T, (value: T) => void]`) +- Include discriminated union types for loading/error/success states + +### 6. Update Barrel File +If hooks directory has `index.ts`: +- Add export for new hook + +### 7. Summary +- Display created files +- Show usage example in a component + +## Examples + +``` +/react hook useAuth --type data # Auth state management +/react hook useDebounce --type lifecycle # Debounced value hook +/react hook useClickOutside --type ui # Click outside detection +/react hook useContactForm --type form # Form with validation +/react hook useLocalStorage # Generic localStorage hook +``` diff --git a/plugins/saas-react-platform/commands/react-lint.md b/plugins/saas-react-platform/commands/react-lint.md new file mode 100644 index 0000000..190d743 --- /dev/null +++ b/plugins/saas-react-platform/commands/react-lint.md @@ -0,0 +1,127 @@ +--- +name: react lint +--- + +# /react lint - Component Tree Analysis + +## Skills to Load +- skills/component-patterns.md +- skills/typescript-patterns.md +- skills/visual-header.md + +## Visual Output + +Display header: `REACT-PLATFORM - Lint` + +## Usage + +``` +/react lint [path] [--fix] [--strict] +``` + +## Workflow + +### 1. Scan Target +- Default path: project root (all `.tsx`, `.ts`, `.jsx`, `.js` files) +- If specific path provided, scan only that directory/file +- Exclude: `node_modules/`, `dist/`, `build/`, `.next/`, `coverage/` + +### 2. Component Structure Analysis +Check each component file for: + +| Check | Severity | Description | +|-------|----------|-------------| +| Missing prop types | FAIL | Components without TypeScript interface or PropTypes | +| Unused props | WARN | Props declared in interface but never used in JSX | +| Missing display name | INFO | Components without `displayName` (matters for DevTools) | +| Inline function props | WARN | Functions defined inline in JSX (`onClick={() => ...}`) | +| Missing key prop | FAIL | List rendering without `key` prop | +| Index as key | WARN | Using array index as `key` in dynamic lists | +| Large component | WARN | Component exceeds 200 lines (suggest splitting) | +| Mixed concerns | WARN | Data fetching + rendering in same component | +| Missing error boundary | INFO | Page components without error boundary | + +### 3. Hook Analysis +Check custom hooks for: + +| Check | Severity | Description | +|-------|----------|-------------| +| Missing cleanup | WARN | `useEffect` with subscription/listener but no cleanup return | +| Stale closure | WARN | State variables used in effect without being in dependency array | +| Conditional hook call | FAIL | Hook called inside condition, loop, or after early return | +| Missing dependency | WARN | Values used in effect but missing from dependency array | + +### 4. State Management Analysis +Check state patterns for: + +| Check | Severity | Description | +|-------|----------|-------------| +| Prop drilling | WARN | Same prop passed through 3+ component levels | +| Unnecessary state | INFO | State that could be derived from existing state or props | +| Multiple setState calls | INFO | Sequential `setState` calls that could be a single update | +| Context overuse | WARN | Context provider wrapping entire app for localized state | + +### 5. TypeScript Analysis (--strict mode) +Additional checks when `--strict` enabled: + +| Check | Severity | Description | +|-------|----------|-------------| +| `any` type usage | WARN | Explicit or implicit `any` in component code | +| Missing return type | INFO | Components without explicit return type | +| Non-null assertion | WARN | Use of `!` operator instead of proper null checking | +| Type assertion overuse | WARN | Frequent `as` casts suggesting type design issues | + +### 6. Report + +``` ++----------------------------------------------------------------------+ +| REACT-PLATFORM - Lint | +| /src/components | ++----------------------------------------------------------------------+ + +Files Scanned: 24 +Components Analyzed: 18 +Hooks Analyzed: 6 + +FAIL (2) + 1. [src/components/UserList.tsx:45] Missing key prop + Found: inside .map() + Fix: Add key={user.id} to + + 2. [src/hooks/useData.ts:12] Conditional hook call + Found: if (enabled) { useState(...) } + Fix: Move hook before condition, use enabled as guard inside + +WARN (3) + 1. [src/components/Dashboard.tsx] Large component (287 lines) + Suggestion: Extract chart section into DashboardCharts component + + 2. [src/components/Form.tsx:23] Inline function prop + Found: onChange={() => setValue(e.target.value)} + Suggestion: Extract to named handler function + + 3. [src/hooks/useWebSocket.ts:18] Missing cleanup + Found: useEffect with addEventListener but no removeEventListener + Fix: Return cleanup function from useEffect + +INFO (1) + 1. [src/components/Card.tsx] Missing displayName + Suggestion: Add Card.displayName = 'Card' + +SUMMARY + Components: 16 clean, 2 with issues + Hooks: 5 clean, 1 with issues + Anti-patterns: 0 FAIL, 3 WARN, 1 INFO + +VERDICT: FAIL (2 blocking issues) +``` + +## Examples + +``` +/react lint # Lint entire project +/react lint src/components/ # Lint specific directory +/react lint src/components/Form.tsx # Lint single file +/react lint --strict # Include TypeScript checks +/react lint --fix # Auto-fix where possible +``` diff --git a/plugins/saas-react-platform/commands/react-route.md b/plugins/saas-react-platform/commands/react-route.md new file mode 100644 index 0000000..14e1463 --- /dev/null +++ b/plugins/saas-react-platform/commands/react-route.md @@ -0,0 +1,78 @@ +--- +name: react route +--- + +# /react route - Add Route + +## Skills to Load +- skills/routing-conventions.md +- skills/framework-detection.md +- skills/visual-header.md + +## Visual Output + +Display header: `REACT-PLATFORM - Route` + +## Usage + +``` +/react route [--dynamic ] [--layout ] [--protected] [--error-boundary] +``` + +## Workflow + +### 1. Detect Routing System +Using `skills/framework-detection.md` and `skills/routing-conventions.md`: +- Next.js App Router: file-based routing in `app/` directory +- Next.js Pages Router: file-based routing in `pages/` directory +- React Router (Vite/CRA): route definitions in router config +- Remix: file-based routing with loader/action conventions + +### 2. Create Page Component +- Generate page component file at the correct path for the routing system: + - App Router: `app//page.tsx` + - Pages Router: `pages/.tsx` or `pages//index.tsx` + - React Router: `src/pages/.tsx` +- Include loading and error state handling +- If `--dynamic `: create dynamic route segment (`[param]` or `:param`) +- If `--protected`: wrap with authentication check or redirect + +### 3. Create Layout (if requested) +If `--layout` specified: +- App Router: create `app//layout.tsx` +- Pages Router: create layout component and wrap page +- React Router: create layout route component with `` + +### 4. Create Error Boundary +If `--error-boundary` or for page-type routes by default: +- App Router: create `app//error.tsx` +- Other frameworks: create ErrorBoundary wrapper component +- Include fallback UI with retry action +- Log errors to console (placeholder for error reporting service) + +### 5. Create Loading State +For App Router projects: +- Create `app//loading.tsx` with skeleton UI +For other frameworks: +- Include Suspense boundary in page component + +### 6. Update Router Config (if applicable) +For React Router projects: +- Add route entry to router configuration file +- Include lazy loading with `React.lazy()` for code splitting +- Wire up layout if specified + +### 7. Summary +- Display created files with paths +- Show route URL pattern +- Note any manual steps required (e.g., adding navigation links) + +## Examples + +``` +/react route dashboard # /dashboard page +/react route users --dynamic id # /users/:id dynamic route +/react route settings --layout SettingsLayout # /settings with layout +/react route admin --protected # /admin with auth guard +/react route products --dynamic slug --error-boundary # Full setup +``` diff --git a/plugins/saas-react-platform/commands/react-setup.md b/plugins/saas-react-platform/commands/react-setup.md new file mode 100644 index 0000000..e732996 --- /dev/null +++ b/plugins/saas-react-platform/commands/react-setup.md @@ -0,0 +1,65 @@ +--- +name: react setup +--- + +# /react setup - React Project Setup Wizard + +## Skills to Load +- skills/framework-detection.md +- skills/visual-header.md + +## Visual Output + +Display header: `REACT-PLATFORM - Setup Wizard` + +## Usage + +``` +/react setup +``` + +## Workflow + +### Phase 1: Framework Detection +- Scan for `package.json` to confirm Node.js project +- Detect framework using `skills/framework-detection.md`: + - Next.js (check for `next` in dependencies, `next.config.*`) + - Vite + React (check for `vite` and `@vitejs/plugin-react`) + - Create React App (check for `react-scripts`) + - Remix (check for `@remix-run/react`) +- Detect App Router vs Pages Router for Next.js projects +- Identify TypeScript usage (`tsconfig.json`, `.tsx` files) + +### Phase 2: Project Structure Analysis +- Scan directory structure for existing patterns: + - Component directory: `src/components/`, `components/`, `app/components/` + - Page directory: `src/pages/`, `app/`, `src/app/` + - Hook directory: `src/hooks/`, `hooks/` + - Test patterns: `__tests__/`, `*.test.tsx`, `*.spec.tsx` +- Detect existing barrel files (`index.ts` re-exports) +- Check for existing state management (Redux store, Zustand stores, Context providers) + +### Phase 3: Convention Configuration +- Confirm or override detected patterns: + - Component naming: PascalCase (default) + - File naming: PascalCase (`Button.tsx`) or kebab-case (`button.tsx`) + - Test co-location: same directory (`Button.test.tsx`) or `__tests__/` subdirectory + - CSS approach: CSS Modules, Tailwind, styled-components, vanilla extract + - Export style: named exports, default exports, or barrel files + +### Phase 4: Summary +- Display detected configuration: + - Framework and version + - TypeScript: yes/no + - Component directory + - Routing pattern + - State management (if detected) + - CSS approach + - Test runner (Jest, Vitest) +- Store configuration for future commands + +## Important Notes + +- Uses Bash, Read, Write, AskUserQuestion tools +- Does not install packages — only detects and configures +- Configuration stored in project for consistent scaffolding diff --git a/plugins/saas-react-platform/commands/react-state.md b/plugins/saas-react-platform/commands/react-state.md new file mode 100644 index 0000000..e5cdf54 --- /dev/null +++ b/plugins/saas-react-platform/commands/react-state.md @@ -0,0 +1,77 @@ +--- +name: react state +--- + +# /react state - State Management Setup + +## Skills to Load +- skills/state-patterns.md +- skills/visual-header.md + +## Visual Output + +Display header: `REACT-PLATFORM - State Management` + +## Usage + +``` +/react state [--pattern ] [--actions ] +``` + +## Workflow + +### 1. Analyze State Requirements +- Ask user about the state scope: + - **Local**: Component-level state (suggest `useState`/`useReducer` — no scaffolding needed) + - **Shared**: Cross-component state within a feature (suggest Context or Zustand) + - **Global**: App-wide state with complex logic (suggest Redux Toolkit or Zustand) +- If `--pattern` specified, skip detection and use requested pattern +- Check `package.json` for existing state libraries + +### 2. Select Pattern +Using `skills/state-patterns.md`: +- **React Context**: For simple shared state (theme, auth, locale). No additional dependencies. +- **Zustand**: For medium complexity. Minimal boilerplate, good DevTools support. +- **Redux Toolkit**: For complex state with middleware, async thunks, entity adapters. +- If library not installed, ask user to install it first (display exact `npm install` command) + +### 3. Generate Store + +#### Context Pattern +- Create context file with typed state interface +- Create provider component with `useReducer` for state + dispatch +- Create custom hook (`use`) with context validation +- Create action types and reducer function + +#### Zustand Pattern +- Create store file with typed state and actions +- Include DevTools middleware (if zustand version supports it) +- Create selector hooks for computed/derived state +- Include persist middleware setup (optional, ask user) + +#### Redux Toolkit Pattern +- Create slice file with `createSlice` (state, reducers, extraReducers) +- Create async thunks with `createAsyncThunk` if API calls needed +- Create typed hooks (`useAppDispatch`, `useAppSelector`) if not existing +- Add slice to root store configuration +- Create selector functions for memoized state access + +### 4. Generate Actions +If `--actions` specified: +- Create action creators/reducers for each named action +- Type the action payloads +- Include in the store/slice definition + +### 5. Summary +- Display created files +- Show usage example with import and hook usage +- List available actions/selectors + +## Examples + +``` +/react state auth --pattern context --actions login,logout,setUser +/react state cart --pattern zustand --actions addItem,removeItem,clearCart +/react state products --pattern redux --actions fetchAll,fetchById,updateProduct +/react state theme --pattern context --actions toggle,setMode +``` diff --git a/plugins/saas-react-platform/commands/react.md b/plugins/saas-react-platform/commands/react.md new file mode 100644 index 0000000..90a8b7c --- /dev/null +++ b/plugins/saas-react-platform/commands/react.md @@ -0,0 +1,18 @@ +--- +description: React development toolkit — component scaffolding, routing, state management, and linting +--- + +# /react + +React frontend development toolkit with component scaffolding, routing, state management, and anti-pattern detection. + +## Sub-commands + +| Sub-command | Description | +|-------------|-------------| +| `/react setup` | Setup wizard for React project detection and configuration | +| `/react component` | Scaffold component with props, types, and tests | +| `/react route` | Add route with page component and error boundary | +| `/react state` | Set up state management pattern (Context, Zustand, Redux Toolkit) | +| `/react hook` | Generate custom hook with types and tests | +| `/react lint` | Validate component tree and detect anti-patterns | diff --git a/plugins/saas-react-platform/skills/component-patterns.md b/plugins/saas-react-platform/skills/component-patterns.md new file mode 100644 index 0000000..bfe53ed --- /dev/null +++ b/plugins/saas-react-platform/skills/component-patterns.md @@ -0,0 +1,134 @@ +--- +name: component-patterns +description: Component structure conventions including functional components, prop typing, exports, and co-located tests +--- + +# Component Patterns + +## Purpose + +Define standard patterns for React component scaffolding. This skill ensures all generated components follow consistent structure, typing, export conventions, and test co-location. + +--- + +## Component File Structure + +Every component file follows this order: + +```typescript +// 1. Imports (external first, then internal, then styles) +import { type FC } from 'react'; +import { Button } from '@/components/ui/Button'; +import styles from './ComponentName.module.css'; + +// 2. Types (inline for simple, separate file for complex) +interface ComponentNameProps { + title: string; + onAction: () => void; + children?: React.ReactNode; +} + +// 3. Component definition +/** + * Brief description of what this component does. + * + * @component + * @example + * console.log('clicked')} /> + */ +const ComponentName: FC = ({ title, onAction, children }) => { + return ( +
+

{title}

+ {children} + +
+ ); +}; + +// 4. Display name (for DevTools) +ComponentName.displayName = 'ComponentName'; + +// 5. Export +export default ComponentName; +``` + +## Component Type Templates + +### UI Component (presentational) +- Props in, JSX out — no side effects, no data fetching +- Pure function: same props always produce same output +- Accept `className` prop for style override flexibility +- Accept `children` if component is a container/wrapper + +### Page Component +- Includes data fetching (server component in App Router, `useEffect` in client) +- Loading state with skeleton placeholder +- Error state with retry action +- `'use client'` directive only if client interactivity required (App Router) + +### Layout Component +- Accepts `children: React.ReactNode` as required prop +- Optional slot props for sidebar, header, footer +- Handles responsive behavior +- Wraps with error boundary + +### Form Component +- Controlled inputs with `useState` or form library (`react-hook-form`) +- Typed form values interface +- Validation schema (Zod recommended) +- Submit handler with loading state +- Error display per field and form-level + +## Test Co-location Patterns + +Test file sits next to component file: + +``` +src/components/Button/ + Button.tsx + Button.test.tsx + Button.module.css (if CSS Modules) + index.ts (barrel file) +``` + +### Minimum Test Coverage + +```typescript +import { render, screen } from '@testing-library/react'; +import userEvent from '@testing-library/user-event'; +import ComponentName from './ComponentName'; + +describe('ComponentName', () => { + it('renders without crashing', () => { + render( {}} />); + expect(screen.getByText('Test')).toBeInTheDocument(); + }); + + it('calls onAction when button clicked', async () => { + const onAction = vi.fn(); + render(); + await userEvent.click(screen.getByRole('button')); + expect(onAction).toHaveBeenCalledOnce(); + }); +}); +``` + +## Barrel File Convention + +Each component directory exports through `index.ts`: + +```typescript +export { default as ComponentName } from './ComponentName'; +export type { ComponentNameProps } from './ComponentName'; +``` + +## Anti-Patterns to Avoid + +| Pattern | Why | Alternative | +|---------|-----|-------------| +| Class components | Legacy API, verbose | Functional components + hooks | +| `React.FC` with children | Children always optional, incorrect type narrowing | Explicit `children` prop in interface | +| Prop spreading `{...props}` | Obscures expected interface | Explicitly destructure needed props | +| `useEffect` for derived state | Unnecessary render cycle | Compute during render or `useMemo` | +| `forwardRef` without `displayName` | Unnamed in DevTools | Always set `displayName` | diff --git a/plugins/saas-react-platform/skills/framework-detection.md b/plugins/saas-react-platform/skills/framework-detection.md new file mode 100644 index 0000000..0210f85 --- /dev/null +++ b/plugins/saas-react-platform/skills/framework-detection.md @@ -0,0 +1,96 @@ +--- +name: framework-detection +description: Detect React framework, TypeScript configuration, routing structure, and project conventions +--- + +# Framework Detection + +## Purpose + +Analyze a project's `package.json`, configuration files, and directory structure to determine the React framework, TypeScript usage, routing pattern, and CSS approach. This skill is loaded at the start of setup and routing commands to adapt output to the project's specific toolchain. + +--- + +## Framework Detection Rules + +Check `package.json` dependencies and devDependencies in this order (first match wins): + +| Framework | Detection Criteria | Routing | +|-----------|-------------------|---------| +| **Next.js (App Router)** | `next` in deps + `app/` directory exists | File-based (`app/page.tsx`) | +| **Next.js (Pages Router)** | `next` in deps + `pages/` directory exists (no `app/`) | File-based (`pages/index.tsx`) | +| **Remix** | `@remix-run/react` in deps | File-based with loaders (`routes/`) | +| **Vite + React** | `vite` in deps + `@vitejs/plugin-react` | Client-side (react-router or tanstack-router) | +| **Create React App** | `react-scripts` in deps | Client-side (react-router) | +| **Gatsby** | `gatsby` in deps | File-based (`src/pages/`) | + +## TypeScript Detection + +| Signal | Conclusion | +|--------|------------| +| `tsconfig.json` exists | TypeScript project | +| `typescript` in devDependencies | TypeScript project | +| `.tsx` files in `src/` or `app/` | TypeScript project | +| None of the above | JavaScript project — generate `.jsx` files | + +## CSS Approach Detection + +Check in order: + +| Signal | Approach | +|--------|----------| +| `tailwindcss` in deps + `tailwind.config.*` | Tailwind CSS | +| `*.module.css` or `*.module.scss` files exist | CSS Modules | +| `styled-components` in deps | styled-components | +| `@emotion/react` in deps | Emotion | +| `vanilla-extract` in deps | Vanilla Extract | +| None detected | Plain CSS or inline styles | + +## Test Runner Detection + +| Signal | Runner | +|--------|--------| +| `vitest` in devDependencies | Vitest | +| `jest` in devDependencies or `jest.config.*` exists | Jest | +| `@testing-library/react` in deps | Testing Library (works with both) | +| `cypress` in deps | Cypress (E2E, not unit) | + +## State Management Detection + +| Signal | Library | +|--------|---------| +| `zustand` in dependencies | Zustand | +| `@reduxjs/toolkit` in dependencies | Redux Toolkit | +| `recoil` in dependencies | Recoil | +| `jotai` in dependencies | Jotai | +| `mobx-react` in dependencies | MobX | +| Files with `createContext` + `useReducer` pattern | React Context (built-in) | + +## Directory Structure Patterns + +Common patterns to detect and respect: + +| Pattern | Typical Path | Detection | +|---------|-------------|-----------| +| Feature-based | `src/features//components/` | `features/` directory with subdirectories | +| Component-based | `src/components//` | `components/` with PascalCase subdirectories | +| Flat components | `src/components/*.tsx` | `components/` with files only, no subdirectories | +| Atomic design | `src/components/atoms/`, `molecules/`, `organisms/` | Atomic naming directories | + +## Output + +Store detected configuration as a reference object for other skills: + +```json +{ + "framework": "nextjs-app", + "typescript": true, + "css_approach": "tailwind", + "test_runner": "vitest", + "state_management": "zustand", + "component_dir": "src/components", + "pages_dir": "app", + "hooks_dir": "src/hooks", + "structure_pattern": "feature-based" +} +``` diff --git a/plugins/saas-react-platform/skills/routing-conventions.md b/plugins/saas-react-platform/skills/routing-conventions.md new file mode 100644 index 0000000..77b8651 --- /dev/null +++ b/plugins/saas-react-platform/skills/routing-conventions.md @@ -0,0 +1,174 @@ +--- +name: routing-conventions +description: File-based routing (Next.js), react-router conventions, dynamic routes, layouts, and middleware +--- + +# Routing Conventions + +## Purpose + +Define routing patterns for each supported framework. This skill ensures route scaffolding produces the correct file structure, naming conventions, and framework-specific boilerplate. + +--- + +## Next.js App Router (v13.4+) + +### File Conventions + +| File | Purpose | +|------|---------| +| `page.tsx` | Route UI — required to make segment publicly accessible | +| `layout.tsx` | Shared layout wrapping child pages — persists across navigations | +| `loading.tsx` | Loading UI shown while page is loading (Suspense boundary) | +| `error.tsx` | Error UI shown when page throws (must be client component) | +| `not-found.tsx` | 404 UI for segment | +| `route.ts` | API route handler (GET, POST, etc.) | + +### Route Patterns + +``` +app/ + page.tsx # / + about/page.tsx # /about + blog/page.tsx # /blog + blog/[slug]/page.tsx # /blog/:slug (dynamic) + dashboard/ + layout.tsx # Shared dashboard layout + page.tsx # /dashboard + settings/page.tsx # /dashboard/settings + (marketing)/ # Route group (no URL segment) + pricing/page.tsx # /pricing +``` + +### Dynamic Routes + +| Pattern | File Path | URL Match | +|---------|-----------|-----------| +| Dynamic segment | `[id]/page.tsx` | `/users/123` | +| Catch-all | `[...slug]/page.tsx` | `/docs/a/b/c` | +| Optional catch-all | `[[...slug]]/page.tsx` | `/docs` or `/docs/a/b` | + +### Server vs Client Components + +- Pages are Server Components by default +- Add `'use client'` directive only when using: `useState`, `useEffect`, `onClick`, browser APIs +- Pass data from server to client via props, not through context + +## Next.js Pages Router (Legacy) + +### File Conventions + +``` +pages/ + index.tsx # / + about.tsx # /about + blog/index.tsx # /blog + blog/[slug].tsx # /blog/:slug + _app.tsx # App wrapper (layouts) + _document.tsx # HTML document customization + 404.tsx # Custom 404 page + api/users.ts # API route: /api/users +``` + +### Data Fetching + +| Method | When | Use Case | +|--------|------|----------| +| `getServerSideProps` | Every request | Dynamic data, auth-gated pages | +| `getStaticProps` | Build time | Blog posts, marketing pages | +| `getStaticPaths` | Build time | Dynamic routes with static generation | + +## React Router (v6+) + +### Route Definition + +```typescript +// router.tsx +import { createBrowserRouter, RouterProvider } from 'react-router-dom'; +import { lazy, Suspense } from 'react'; + +const Dashboard = lazy(() => import('./pages/Dashboard')); +const Settings = lazy(() => import('./pages/Settings')); + +const router = createBrowserRouter([ + { + path: '/', + element: , + errorElement: , + children: [ + { index: true, element: }, + { + path: 'dashboard', + element: }>, + }, + { + path: 'users/:id', + element: , + loader: userLoader, + }, + ], + }, +]); +``` + +### Layout Pattern + +```typescript +// layouts/RootLayout.tsx +import { Outlet } from 'react-router-dom'; + +export function RootLayout() { + return ( +
+
+
+ +
+
+
+ ); +} +``` + +## Protected Routes + +### Pattern: Auth Guard Component + +```typescript +function ProtectedRoute({ children }: { children: React.ReactNode }) { + const { isAuthenticated, isLoading } = useAuth(); + + if (isLoading) return ; + if (!isAuthenticated) return ; + + return <>{children}; +} +``` + +### App Router: Middleware + +```typescript +// middleware.ts (project root) +import { NextResponse } from 'next/server'; +import type { NextRequest } from 'next/server'; + +export function middleware(request: NextRequest) { + const token = request.cookies.get('session'); + if (!token && request.nextUrl.pathname.startsWith('/dashboard')) { + return NextResponse.redirect(new URL('/login', request.url)); + } + return NextResponse.next(); +} + +export const config = { matcher: ['/dashboard/:path*'] }; +``` + +## Error Boundaries + +Every page route should have an error boundary: + +- App Router: `error.tsx` file in route segment (automatically client component) +- React Router: `errorElement` prop on route definition +- Fallback: Generic `ErrorBoundary` component wrapping page content + +Include retry functionality and user-friendly error message. Log error details to console (placeholder for error reporting service). diff --git a/plugins/saas-react-platform/skills/state-patterns.md b/plugins/saas-react-platform/skills/state-patterns.md new file mode 100644 index 0000000..a4d3363 --- /dev/null +++ b/plugins/saas-react-platform/skills/state-patterns.md @@ -0,0 +1,203 @@ +--- +name: state-patterns +description: State management patterns — React Context for simple, Zustand for medium, Redux Toolkit for complex +--- + +# State Management Patterns + +## Purpose + +Guide state management decisions and provide scaffolding templates for React Context, Zustand, and Redux Toolkit. This skill helps select the right pattern based on complexity and generates consistent store implementations. + +--- + +## Decision Framework + +| Criteria | Context | Zustand | Redux Toolkit | +|----------|---------|---------|---------------| +| **Scope** | Single feature, few consumers | Multiple features, medium consumers | App-wide, many consumers | +| **Complexity** | Simple values (theme, locale, auth) | Medium (cart, form wizard, filters) | Complex (normalized entities, async workflows) | +| **Async logic** | Manual with `useEffect` | Built-in with async actions | `createAsyncThunk` with lifecycle | +| **DevTools** | None built-in | Optional middleware | Full Redux DevTools integration | +| **Dependencies** | None (built-in React) | ~2KB, zero config | ~12KB, more boilerplate | +| **Learning curve** | Low | Low | Medium-High | + +### Quick Decision + +- Need to share a simple value across a few components? **Context** +- Need a store with some async logic and moderate complexity? **Zustand** +- Need normalized state, middleware, complex async flows, or strict patterns? **Redux Toolkit** + +## React Context Template + +```typescript +// stores/auth-context.tsx +import { createContext, useContext, useReducer, type ReactNode } from 'react'; + +// State type +interface AuthState { + user: User | null; + isAuthenticated: boolean; + isLoading: boolean; +} + +// Action types +type AuthAction = + | { type: 'LOGIN'; payload: User } + | { type: 'LOGOUT' } + | { type: 'SET_LOADING'; payload: boolean }; + +// Initial state +const initialState: AuthState = { + user: null, + isAuthenticated: false, + isLoading: false, +}; + +// Reducer +function authReducer(state: AuthState, action: AuthAction): AuthState { + switch (action.type) { + case 'LOGIN': + return { ...state, user: action.payload, isAuthenticated: true, isLoading: false }; + case 'LOGOUT': + return { ...state, user: null, isAuthenticated: false }; + case 'SET_LOADING': + return { ...state, isLoading: action.payload }; + default: + return state; + } +} + +// Context +const AuthContext = createContext<{ + state: AuthState; + dispatch: React.Dispatch; +} | null>(null); + +// Provider +export function AuthProvider({ children }: { children: ReactNode }) { + const [state, dispatch] = useReducer(authReducer, initialState); + return ( + + {children} + + ); +} + +// Hook with validation +export function useAuth() { + const context = useContext(AuthContext); + if (!context) { + throw new Error('useAuth must be used within AuthProvider'); + } + return context; +} +``` + +## Zustand Template + +```typescript +// stores/cart-store.ts +import { create } from 'zustand'; +import { devtools, persist } from 'zustand/middleware'; + +interface CartItem { + id: string; + name: string; + price: number; + quantity: number; +} + +interface CartState { + items: CartItem[]; + addItem: (item: Omit) => void; + removeItem: (id: string) => void; + clearCart: () => void; + totalPrice: () => number; +} + +export const useCartStore = create()( + devtools( + persist( + (set, get) => ({ + items: [], + addItem: (item) => set((state) => { + const existing = state.items.find((i) => i.id === item.id); + if (existing) { + return { items: state.items.map((i) => + i.id === item.id ? { ...i, quantity: i.quantity + 1 } : i + )}; + } + return { items: [...state.items, { ...item, quantity: 1 }] }; + }), + removeItem: (id) => set((state) => ({ + items: state.items.filter((i) => i.id !== id), + })), + clearCart: () => set({ items: [] }), + totalPrice: () => get().items.reduce( + (sum, item) => sum + item.price * item.quantity, 0 + ), + }), + { name: 'cart-storage' } + ) + ) +); +``` + +## Redux Toolkit Template + +```typescript +// store/slices/productsSlice.ts +import { createSlice, createAsyncThunk, type PayloadAction } from '@reduxjs/toolkit'; + +// Async thunk +export const fetchProducts = createAsyncThunk( + 'products/fetchAll', + async (_, { rejectWithValue }) => { + try { + const response = await fetch('/api/products'); + return await response.json(); + } catch (error) { + return rejectWithValue('Failed to fetch products'); + } + } +); + +// Slice +const productsSlice = createSlice({ + name: 'products', + initialState: { + items: [] as Product[], + status: 'idle' as 'idle' | 'loading' | 'succeeded' | 'failed', + error: null as string | null, + }, + reducers: { + updateProduct: (state, action: PayloadAction) => { + const index = state.items.findIndex((p) => p.id === action.payload.id); + if (index !== -1) state.items[index] = action.payload; + }, + }, + extraReducers: (builder) => { + builder + .addCase(fetchProducts.pending, (state) => { state.status = 'loading'; }) + .addCase(fetchProducts.fulfilled, (state, action) => { + state.status = 'succeeded'; + state.items = action.payload; + }) + .addCase(fetchProducts.rejected, (state, action) => { + state.status = 'failed'; + state.error = action.payload as string; + }); + }, +}); + +export const { updateProduct } = productsSlice.actions; +export default productsSlice.reducer; +``` + +## When NOT to Use Global State + +- Form input values (use local `useState` or `react-hook-form`) +- UI toggle state (modal open/close) unless shared across routes +- Computed values derivable from existing state (compute inline or `useMemo`) +- Server cache data (use TanStack Query or SWR instead of Redux) diff --git a/plugins/saas-react-platform/skills/typescript-patterns.md b/plugins/saas-react-platform/skills/typescript-patterns.md new file mode 100644 index 0000000..e993aa7 --- /dev/null +++ b/plugins/saas-react-platform/skills/typescript-patterns.md @@ -0,0 +1,137 @@ +--- +name: typescript-patterns +description: Utility types, generics for components, discriminated unions for props, and strict null checks +--- + +# TypeScript Patterns for React + +## Purpose + +Define TypeScript patterns specific to React component development. This skill ensures generated code uses idiomatic TypeScript with proper generic constraints, discriminated unions, and utility types. + +--- + +## Props Interface Conventions + +### Basic Props +```typescript +interface ButtonProps { + label: string; + onClick: () => void; + variant?: 'primary' | 'secondary' | 'ghost'; + disabled?: boolean; + className?: string; +} +``` + +### Props with Children +```typescript +interface CardProps { + title: string; + children: React.ReactNode; // Explicit, not via FC +} +``` + +### Discriminated Union Props +Use when a component has mutually exclusive modes: +```typescript +type AlertProps = + | { variant: 'success'; message: string } + | { variant: 'error'; message: string; retry: () => void } + | { variant: 'loading'; progress?: number }; +``` + +### Extending HTML Element Props +```typescript +interface InputProps extends Omit, 'size'> { + label: string; + error?: string; + size?: 'sm' | 'md' | 'lg'; // Custom size, not HTML size +} +``` + +## Generic Component Patterns + +### Generic List Component +```typescript +interface ListProps { + items: T[]; + renderItem: (item: T, index: number) => React.ReactNode; + keyExtractor: (item: T) => string; + emptyMessage?: string; +} + +function List({ items, renderItem, keyExtractor, emptyMessage }: ListProps) { + if (items.length === 0) return

{emptyMessage ?? 'No items'}

; + return
    {items.map((item, i) =>
  • {renderItem(item, i)}
  • )}
; +} +``` + +### Generic Hook +```typescript +function useLocalStorage(key: string, initialValue: T): [T, (value: T | ((prev: T) => T)) => void] { + // Implementation +} +``` + +## Utility Types for React + +| Type | Use Case | Example | +|------|----------|---------| +| `React.ReactNode` | Any renderable content | `children: React.ReactNode` | +| `React.ReactElement` | JSX element only (not string/number) | `icon: React.ReactElement` | +| `React.ComponentPropsWithRef<'div'>` | All div props including ref | Extending native elements | +| `React.MouseEventHandler` | Typed event handler | `onClick: React.MouseEventHandler` | +| `React.ChangeEvent` | Input change event | `(e: React.ChangeEvent) => void` | +| `React.FormEvent` | Form submit event | `onSubmit: React.FormEventHandler` | +| `React.CSSProperties` | Inline style object | `style?: React.CSSProperties` | + +## Common Utility Patterns + +### Required Pick +Make specific properties required from an otherwise optional interface: +```typescript +type RequiredName = Required> & Omit; +``` + +### Extract Prop Types from Component +```typescript +type ButtonProps = React.ComponentProps; +``` + +### Async State Pattern +```typescript +type AsyncState = + | { status: 'idle' } + | { status: 'loading' } + | { status: 'success'; data: T } + | { status: 'error'; error: string }; +``` + +## Strict Null Checking Patterns + +### Guard Hooks +```typescript +function useRequiredContext(context: React.Context, name: string): T { + const value = useContext(context); + if (value === null) throw new Error(`${name} must be used within its Provider`); + return value; +} +``` + +### Narrowing with Type Guards +```typescript +function isUser(value: unknown): value is User { + return typeof value === 'object' && value !== null && 'id' in value && 'email' in value; +} +``` + +## Things to Avoid + +| Anti-Pattern | Why | Alternative | +|-------------|-----|-------------| +| `React.FC` | Implicit children, no generics | Explicit typed function | +| `any` for event handlers | Loses type safety | `React.MouseEvent` | +| `as` for DOM queries | Runtime type mismatch risk | Type guards or `instanceof` | +| `!` non-null assertion | Hides potential null bugs | Conditional rendering or optional chaining | +| `enum` for prop variants | Not tree-shakeable, numeric by default | String union types | diff --git a/plugins/saas-react-platform/skills/visual-header.md b/plugins/saas-react-platform/skills/visual-header.md new file mode 100644 index 0000000..1638976 --- /dev/null +++ b/plugins/saas-react-platform/skills/visual-header.md @@ -0,0 +1,28 @@ +# Visual Header Skill + +Standard visual header for saas-react-platform commands. + +## Header Template + +``` ++----------------------------------------------------------------------+ +| REACT-PLATFORM - [Context] | ++----------------------------------------------------------------------+ +``` + +## Context Values by Command + +| Command | Context | +|---------|---------| +| `/react setup` | Setup Wizard | +| `/react component` | Component | +| `/react route` | Route | +| `/react state` | State Management | +| `/react hook` | Custom Hook | +| `/react lint` | Lint | +| Agent mode (react-architect) | Architecture | +| Agent mode (react-auditor) | Audit | + +## Usage + +Display header at the start of every command response before proceeding with the operation. diff --git a/plugins/saas-test-pilot/.claude-plugin/plugin.json b/plugins/saas-test-pilot/.claude-plugin/plugin.json new file mode 100644 index 0000000..e5a3ac9 --- /dev/null +++ b/plugins/saas-test-pilot/.claude-plugin/plugin.json @@ -0,0 +1,26 @@ +{ + "name": "saas-test-pilot", + "version": "1.0.0", + "description": "Test automation toolkit for unit, integration, and end-to-end testing", + "author": { + "name": "Leo Miranda", + "email": "leobmiranda@gmail.com" + }, + "homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/saas-test-pilot/README.md", + "repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git", + "license": "MIT", + "keywords": [ + "testing", + "unit-tests", + "integration-tests", + "e2e", + "coverage", + "fixtures", + "mocks", + "test-generation" + ], + "commands": [ + "./commands/" + ], + "domain": "saas" +} diff --git a/plugins/saas-test-pilot/README.md b/plugins/saas-test-pilot/README.md new file mode 100644 index 0000000..a18d5b2 --- /dev/null +++ b/plugins/saas-test-pilot/README.md @@ -0,0 +1,58 @@ +# saas-test-pilot + +Test automation toolkit for unit, integration, and end-to-end testing. + +## Overview + +saas-test-pilot provides intelligent test generation, coverage analysis, fixture management, and E2E scenario creation. It detects your project's test framework automatically and generates tests following best practices for pytest, Jest, Vitest, Playwright, and Cypress. + +## Commands + +| Command | Description | +|---------|-------------| +| `/test setup` | Detect framework, configure test runner, initialize test structure | +| `/test generate` | Generate test cases for functions, classes, or modules | +| `/test coverage` | Analyze coverage and identify untested paths by risk | +| `/test fixtures` | Generate or manage test fixtures, factories, and mocks | +| `/test e2e` | Generate end-to-end test scenarios with page objects | +| `/test run` | Run tests with formatted output and failure analysis | + +## Agents + +| Agent | Model | Mode | Role | +|-------|-------|------|------| +| test-architect | sonnet | acceptEdits | Test generation, fixtures, E2E design | +| coverage-analyst | haiku | plan (read-only) | Coverage analysis and gap detection | + +## Skills + +| Skill | Purpose | +|-------|---------| +| framework-detection | Auto-detect pytest/Jest/Vitest/Playwright and config files | +| test-patterns | AAA, BDD, page object model, and other test design patterns | +| mock-patterns | Mocking strategies: mock vs stub vs spy, DI patterns | +| coverage-analysis | Gap detection, risk scoring, prioritization | +| fixture-management | conftest.py patterns, factory_boy, shared fixtures | +| visual-header | Consistent command output headers | + +## Supported Frameworks + +### Unit / Integration +- **Python:** pytest, unittest +- **JavaScript/TypeScript:** Jest, Vitest, Mocha + +### End-to-End +- **Playwright** (recommended) +- **Cypress** + +### Coverage +- **Python:** pytest-cov (coverage.py) +- **JavaScript:** istanbul/nyc, c8, vitest built-in + +## Installation + +This plugin is part of the Leo Claude Marketplace. It is installed automatically when the marketplace is configured. + +## License + +MIT diff --git a/plugins/saas-test-pilot/agents/coverage-analyst.md b/plugins/saas-test-pilot/agents/coverage-analyst.md new file mode 100644 index 0000000..c1db26a --- /dev/null +++ b/plugins/saas-test-pilot/agents/coverage-analyst.md @@ -0,0 +1,60 @@ +--- +name: coverage-analyst +description: Read-only test coverage analysis and gap detection +model: haiku +permissionMode: plan +disallowedTools: Write, Edit, MultiEdit +--- + +# Coverage Analyst Agent + +You are a test coverage specialist focused on identifying untested code paths and prioritizing test gaps by risk. + +## Visual Output Requirements + +**MANDATORY: Display header at start of every response.** + +``` ++----------------------------------------------------------------------+ +| TEST-PILOT - Coverage Analysis | ++----------------------------------------------------------------------+ +``` + +## Core Principles + +1. **Coverage is a metric, not a goal** — 100% coverage does not mean correct code. Focus on meaningful coverage of critical paths. + +2. **Risk-based prioritization** — Not all uncovered code is equally important. Auth, payments, and data persistence gaps matter more than formatting helpers. + +3. **Branch coverage over line coverage** — Line coverage hides untested conditional branches. Always report branch coverage when available. + +4. **Actionable recommendations** — Every gap reported must include a concrete suggestion for what test to write. + +## Analysis Approach + +When analyzing coverage: + +1. **Parse coverage data** — Read `.coverage`, `coverage.xml`, `lcov.info`, or equivalent reports. Extract per-file and per-function metrics. + +2. **Identify gap categories:** + - Uncovered error handlers (catch/except blocks) + - Untested conditional branches + - Dead code (unreachable paths) + - Missing integration test coverage + - Untested configuration variations + +3. **Risk-score each gap:** + - **Critical (5):** Authentication, authorization, data mutation, payment processing + - **High (4):** API endpoints, input validation, data transformation + - **Medium (3):** Business logic, workflow transitions + - **Low (2):** Logging, formatting, display helpers + - **Informational (1):** Comments, documentation generation + +4. **Report with context** — Show the uncovered code, explain why it matters, and suggest the test to write. + +## Output Style + +- Present findings as a prioritized table +- Include file paths and line numbers +- Quantify the coverage impact of suggested tests +- Never suggest deleting code just to improve coverage numbers diff --git a/plugins/saas-test-pilot/agents/test-architect.md b/plugins/saas-test-pilot/agents/test-architect.md new file mode 100644 index 0000000..76a383e --- /dev/null +++ b/plugins/saas-test-pilot/agents/test-architect.md @@ -0,0 +1,62 @@ +--- +name: test-architect +description: Test generation, fixture creation, and e2e scenario design +model: sonnet +permissionMode: acceptEdits +--- + +# Test Architect Agent + +You are a senior test engineer specializing in test design, generation, and automation across Python and JavaScript/TypeScript ecosystems. + +## Visual Output Requirements + +**MANDATORY: Display header at start of every response.** + +``` ++----------------------------------------------------------------------+ +| TEST-PILOT - [Command Context] | ++----------------------------------------------------------------------+ +``` + +## Core Principles + +1. **Tests are documentation** — Every test should clearly communicate what behavior it verifies and why that behavior matters. + +2. **Isolation first** — Tests must not depend on execution order, shared mutable state, or external services unless explicitly testing integration. + +3. **Realistic data** — Use representative data that exercises real code paths. Avoid trivial values like "test" or "foo" that miss edge cases. + +4. **One assertion per concept** — Each test should verify a single logical behavior. Multiple assertions are fine when they validate the same concept. + +## Expertise + +- **Python:** pytest, unittest, pytest-mock, factory_boy, hypothesis, pytest-asyncio +- **JavaScript/TypeScript:** Jest, Vitest, Testing Library, Playwright, Cypress +- **Patterns:** Arrange-Act-Assert, Given-When-Then, Page Object Model, Test Data Builder +- **Coverage:** Branch coverage analysis, mutation testing concepts, risk-based prioritization + +## Test Generation Approach + +When generating tests: + +1. **Read the source code thoroughly** — Understand all branches, error paths, and edge cases before writing any test. + +2. **Map the dependency graph** — Identify what needs mocking vs what can be tested directly. Prefer real implementations when feasible. + +3. **Start with the happy path** — Establish the baseline behavior before testing error conditions. + +4. **Cover boundaries systematically:** + - Empty/null/undefined inputs + - Type boundaries (int max, string length limits) + - Collection boundaries (empty, single, many) + - Temporal boundaries (expired, concurrent, sequential) + +5. **Name tests descriptively** — `test_login_with_expired_token_returns_401` over `test_login_3`. + +## Output Style + +- Show generated code with clear comments +- Explain non-obvious mock choices +- Note any assumptions about the code under test +- Flag areas where manual review is recommended diff --git a/plugins/saas-test-pilot/claude-md-integration.md b/plugins/saas-test-pilot/claude-md-integration.md new file mode 100644 index 0000000..a9660e4 --- /dev/null +++ b/plugins/saas-test-pilot/claude-md-integration.md @@ -0,0 +1,36 @@ +# Test Pilot Integration + +Add to your project's CLAUDE.md: + +## Test Automation + +This project uses saas-test-pilot for test generation and coverage analysis. + +### Commands +- `/test setup` - Detect framework and configure test environment +- `/test generate ` - Generate tests for a function, class, or module +- `/test coverage` - Analyze coverage gaps prioritized by risk +- `/test fixtures generate ` - Create fixtures and factories +- `/test e2e ` - Generate E2E test scenarios +- `/test run` - Execute tests with formatted output + +### Supported Frameworks +- Python: pytest, unittest +- JavaScript/TypeScript: Jest, Vitest +- E2E: Playwright, Cypress + +### Test Organization +Tests follow the standard structure: +``` +tests/ + conftest.py # Shared fixtures + unit/ # Unit tests (fast, isolated) + integration/ # Integration tests (database, APIs) + e2e/ # End-to-end tests (browser, full stack) + fixtures/ # Shared test data and response mocks +``` + +### Coverage Targets +- coverage-analyst provides risk-based gap analysis +- Focus on branch coverage, not just line coverage +- Critical modules (auth, payments) require higher thresholds diff --git a/plugins/saas-test-pilot/commands/test-coverage.md b/plugins/saas-test-pilot/commands/test-coverage.md new file mode 100644 index 0000000..c68a0f4 --- /dev/null +++ b/plugins/saas-test-pilot/commands/test-coverage.md @@ -0,0 +1,83 @@ +--- +name: test coverage +description: Analyze test coverage, identify untested paths, and prioritize gaps by risk +--- + +# /test coverage + +Analyze test coverage and identify gaps prioritized by risk. + +## Visual Output + +``` ++----------------------------------------------------------------------+ +| TEST-PILOT - Coverage Analysis | ++----------------------------------------------------------------------+ +``` + +## Usage + +``` +/test coverage [] [--threshold=80] [--format=summary|detailed] +``` + +**Target:** File, directory, or module to analyze (defaults to entire project) +**Threshold:** Minimum acceptable coverage percentage +**Format:** Output detail level + +## Skills to Load + +- skills/coverage-analysis.md + +## Process + +1. **Discover Coverage Data** + - Look for existing coverage reports: `.coverage`, `coverage.xml`, `lcov.info`, `coverage/` + - If no report exists, attempt to run coverage: `pytest --cov`, `npx vitest --coverage` + - Parse coverage data into structured format + +2. **Analyze Gaps** + - Identify uncovered lines, branches, and functions + - Classify gaps by type: + - Error handling paths (catch/except blocks) + - Conditional branches (if/else, switch/case) + - Edge case logic (boundary checks, null guards) + - Integration points (API calls, database queries) + +3. **Risk Assessment** + - Score each gap by: + - Complexity of uncovered code (cyclomatic complexity) + - Criticality of the module (auth, payments, data persistence) + - Frequency of changes (git log analysis) + - Proximity to user input (trust boundary distance) + +4. **Generate Report** + - Overall coverage metrics + - Per-file breakdown + - Prioritized gap list with risk scores + - Suggested test cases for top gaps + +## Output Format + +``` +## Coverage Report + +### Overall: 74% lines | 61% branches + +### Files Below Threshold (80%) +| File | Lines | Branches | Risk | +|------|-------|----------|------| +| src/auth/login.py | 52% | 38% | HIGH | +| src/api/handlers.py | 67% | 55% | MEDIUM | + +### Top 5 Coverage Gaps (by risk) +1. **src/auth/login.py:45-62** — OAuth error handling + Risk: HIGH | Uncovered: 18 lines | Suggestion: test invalid token flow +2. **src/api/handlers.py:89-104** — Rate limit branch + Risk: MEDIUM | Uncovered: 16 lines | Suggestion: test 429 response + +### Recommendations +- Focus on auth module — highest risk, lowest coverage +- Add branch coverage to CI threshold +- 12 new test cases would bring coverage to 85% +``` diff --git a/plugins/saas-test-pilot/commands/test-e2e.md b/plugins/saas-test-pilot/commands/test-e2e.md new file mode 100644 index 0000000..6ee8b7e --- /dev/null +++ b/plugins/saas-test-pilot/commands/test-e2e.md @@ -0,0 +1,86 @@ +--- +name: test e2e +description: Generate end-to-end test scenarios with page object models and user flows +--- + +# /test e2e + +Generate end-to-end test scenarios for web applications or API workflows. + +## Visual Output + +``` ++----------------------------------------------------------------------+ +| TEST-PILOT - E2E Tests | ++----------------------------------------------------------------------+ +``` + +## Usage + +``` +/test e2e [--framework=playwright|cypress] [--flow=] +``` + +**Target:** Application area, URL path, or feature name +**Framework:** E2E framework (auto-detected if not specified) +**Flow:** Specific user flow to test (e.g., "login", "checkout", "signup") + +## Skills to Load + +- skills/test-patterns.md + +## Process + +1. **Analyze Application** + - Detect E2E framework from config files + - Identify routes/pages from router configuration + - Map user-facing features and critical paths + - Detect authentication requirements + +2. **Design Test Scenarios** + - Map user journeys (happy path first) + - Identify critical business flows: + - Authentication (login, logout, password reset) + - Data creation (forms, uploads, submissions) + - Navigation (routing, deep links, breadcrumbs) + - Error states (404, network failures, validation) + - Define preconditions and test data needs + +3. **Generate Page Objects** + - Create page object classes for each page/component + - Encapsulate selectors and interactions + - Keep assertions in test files, not page objects + - Use data-testid attributes where possible + +4. **Write Test Files** + - One test file per user flow or feature area + - Include setup (authentication, test data) and teardown (cleanup) + - Use descriptive test names that read as user stories + - Add retry logic for flaky network operations + - Include screenshot capture on failure + +5. **Verify** + - Check selectors reference valid elements + - Confirm test data setup is complete + - Validate timeout values are reasonable + +## Output Format + +``` +## E2E Tests: Login Flow + +### Page Objects Created +- pages/LoginPage.ts — login form interactions +- pages/DashboardPage.ts — post-login verification + +### Test Scenarios (5) +1. test_successful_login_redirects_to_dashboard +2. test_invalid_credentials_shows_error +3. test_empty_form_shows_validation +4. test_remember_me_persists_session +5. test_locked_account_shows_message + +### Test Data Requirements +- Valid user credentials (use test seed) +- Locked account fixture +``` diff --git a/plugins/saas-test-pilot/commands/test-fixtures.md b/plugins/saas-test-pilot/commands/test-fixtures.md new file mode 100644 index 0000000..1394bd1 --- /dev/null +++ b/plugins/saas-test-pilot/commands/test-fixtures.md @@ -0,0 +1,87 @@ +--- +name: test fixtures +description: Generate or manage test fixtures, factories, and mock data +--- + +# /test fixtures + +Generate and organize test fixtures, factories, and mock data. + +## Visual Output + +``` ++----------------------------------------------------------------------+ +| TEST-PILOT - Fixtures | ++----------------------------------------------------------------------+ +``` + +## Usage + +``` +/test fixtures [] +``` + +**Actions:** +- `generate ` — Create fixture/factory for a data model +- `list` — Show existing fixtures and their usage +- `audit` — Find unused or duplicate fixtures +- `organize` — Restructure fixtures into standard layout + +## Skills to Load + +- skills/fixture-management.md +- skills/mock-patterns.md + +## Process + +### Generate + +1. **Analyze Target Model** + - Read model/schema definition (ORM model, Pydantic, TypeScript interface) + - Map field types, constraints, and relationships + - Identify required vs optional fields + +2. **Create Fixture** + - Python: generate conftest.py fixture or factory_boy factory + - JavaScript: generate factory function or test helper + - Include realistic sample data (not just "test123") + - Handle relationships (foreign keys, nested objects) + - Create variants (minimal, full, edge-case) + +3. **Place Fixture** + - Follow project conventions for fixture location + - Add to appropriate conftest.py or fixtures directory + - Import from shared location, not duplicated per test + +### List + +1. Scan test directories for fixture definitions +2. Map each fixture to its consumers (which tests use it) +3. Display fixture tree with usage counts + +### Audit + +1. Find fixtures with zero consumers +2. Detect duplicate/near-duplicate fixtures +3. Identify fixtures with hardcoded data that should be parameterized + +## Output Format + +``` +## Fixture: UserFactory + +### Generated for: models.User +### Location: tests/conftest.py + +### Variants +- user_factory() — standard user with defaults +- admin_factory() — user with is_admin=True +- minimal_user() — only required fields + +### Fields +| Field | Type | Default | Notes | +|-------|------|---------|-------| +| email | str | faker.email() | unique | +| name | str | faker.name() | — | +| role | enum | "viewer" | — | +``` diff --git a/plugins/saas-test-pilot/commands/test-generate.md b/plugins/saas-test-pilot/commands/test-generate.md new file mode 100644 index 0000000..4a5c07b --- /dev/null +++ b/plugins/saas-test-pilot/commands/test-generate.md @@ -0,0 +1,84 @@ +--- +name: test generate +description: Generate test cases for functions, classes, or modules with appropriate patterns +--- + +# /test generate + +Generate comprehensive test cases for specified code targets. + +## Visual Output + +``` ++----------------------------------------------------------------------+ +| TEST-PILOT - Generate Tests | ++----------------------------------------------------------------------+ +``` + +## Usage + +``` +/test generate [--type=unit|integration] [--style=aaa|bdd] +``` + +**Target:** File path, class name, function name, or module path +**Type:** Test type — defaults to unit +**Style:** Test style — defaults to arrange-act-assert (aaa) + +## Skills to Load + +- skills/test-patterns.md +- skills/mock-patterns.md +- skills/framework-detection.md + +## Process + +1. **Analyze Target** + - Read the target source code + - Identify public functions, methods, and classes + - Map input types, return types, and exceptions + - Detect dependencies that need mocking + +2. **Determine Test Strategy** + - Pure functions: direct input/output tests + - Functions with side effects: mock external calls + - Class methods: test through public interface + - Integration points: setup/teardown with real or fake dependencies + +3. **Generate Test Cases** + - Happy path: standard inputs produce expected outputs + - Edge cases: empty inputs, None/null, boundary values + - Error paths: invalid inputs, exceptions, error conditions + - Type variations: different valid types if applicable + +4. **Write Test File** + - Follow project conventions for test file location + - Use detected framework syntax (pytest/Jest/Vitest) + - Include docstrings explaining each test case + - Group related tests in classes or describe blocks + +5. **Verify** + - Check test file compiles/parses + - Verify imports are correct + - Confirm mock targets match actual module paths + +## Output Format + +``` +## Generated Tests for `module.function_name` + +### Test File: tests/unit/test_module.py + +### Test Cases (7 total) +1. test_function_returns_expected_for_valid_input +2. test_function_handles_empty_input +3. test_function_raises_on_invalid_type +4. test_function_boundary_values +5. test_function_none_input +6. test_function_large_input +7. test_function_concurrent_calls (if applicable) + +### Dependencies Mocked +- database.connection (unittest.mock.patch) +- external_api.client (fixture) +``` diff --git a/plugins/saas-test-pilot/commands/test-run.md b/plugins/saas-test-pilot/commands/test-run.md new file mode 100644 index 0000000..f299a37 --- /dev/null +++ b/plugins/saas-test-pilot/commands/test-run.md @@ -0,0 +1,90 @@ +--- +name: test run +description: Run tests with formatted output, filtering, and failure analysis +--- + +# /test run + +Execute tests with structured output and intelligent failure analysis. + +## Visual Output + +``` ++----------------------------------------------------------------------+ +| TEST-PILOT - Run Tests | ++----------------------------------------------------------------------+ +``` + +## Usage + +``` +/test run [] [--type=unit|integration|e2e|all] [--verbose] [--failfast] +``` + +**Target:** File, directory, test name pattern, or marker/tag +**Type:** Test category to run (defaults to unit) +**Verbose:** Show full output including passing tests +**Failfast:** Stop on first failure + +## Skills to Load + +- skills/framework-detection.md + +## Process + +1. **Detect Test Runner** + - Identify framework from project configuration + - Build appropriate command: + - pytest: `pytest -v --tb=short` + - Jest: `npx jest --verbose` + - Vitest: `npx vitest run ` + - Apply type filter if specified (markers, tags, directories) + +2. **Execute Tests** + - Run the test command + - Capture stdout, stderr, and exit code + - Parse test results into structured data + +3. **Format Results** + - Group by status: passed, failed, skipped, errors + - Show failure details with: + - Test name and location + - Assertion message + - Relevant code snippet + - Suggested fix if pattern is recognizable + +4. **Analyze Failures** + - Common patterns: + - Import errors: missing dependency or wrong path + - Assertion errors: expected vs actual mismatch + - Timeout errors: slow operation or missing mock + - Setup errors: missing fixture or database state + - Suggest corrective action for each failure type + +5. **Summary** + - Total/passed/failed/skipped counts + - Duration + - Coverage delta if coverage is enabled + +## Output Format + +``` +## Test Results + +### Summary: 45 passed, 2 failed, 1 skipped (12.3s) + +### Failures + +1. FAIL test_user_login_with_expired_token (tests/test_auth.py:67) + AssertionError: Expected 401, got 200 + Cause: Token expiry check not applied before validation + Fix: Verify token_service.is_expired() is called in login handler + +2. FAIL test_export_csv_large_dataset (tests/test_export.py:134) + TimeoutError: Operation timed out after 30s + Cause: No pagination in export query + Fix: Add batch processing or mock the database call + +### Skipped +- test_redis_cache_eviction — requires Redis (marker: @needs_redis) +``` diff --git a/plugins/saas-test-pilot/commands/test-setup.md b/plugins/saas-test-pilot/commands/test-setup.md new file mode 100644 index 0000000..5b28f03 --- /dev/null +++ b/plugins/saas-test-pilot/commands/test-setup.md @@ -0,0 +1,70 @@ +--- +name: test setup +description: Detect test framework, configure test runner, and initialize test structure +--- + +# /test setup + +Setup wizard for test automation. Detects existing frameworks or helps choose one. + +## Visual Output + +``` ++----------------------------------------------------------------------+ +| TEST-PILOT - Setup | ++----------------------------------------------------------------------+ +``` + +## Skills to Load + +- skills/framework-detection.md + +## Process + +1. **Project Detection** + - Scan for existing test directories (`tests/`, `test/`, `__tests__/`, `spec/`) + - Detect language from file extensions and config files + - Identify existing test framework configuration + +2. **Framework Detection** + - Python: check for pytest.ini, setup.cfg [tool.pytest], pyproject.toml [tool.pytest], conftest.py, unittest patterns + - JavaScript/TypeScript: check for jest.config.js/ts, vitest.config.ts, .mocharc.yml, karma.conf.js + - E2E: check for playwright.config.ts, cypress.config.js, selenium configs + +3. **Configuration Review** + - Show detected framework and version + - Show test directory structure + - Show coverage configuration if present + - Show CI/CD test integration if found + +4. **Recommendations** + - If no framework detected: recommend based on language and project type + - If framework found but no coverage: suggest coverage setup + - If no test directory structure: propose standard layout + - If missing conftest/setup files: offer to create them + +## Output Format + +``` +## Test Environment + +### Detected Framework +- Language: Python 3.x +- Framework: pytest 8.x +- Config: pyproject.toml [tool.pytest.ini_options] + +### Test Structure +tests/ + conftest.py + unit/ + integration/ + +### Coverage +- Tool: pytest-cov +- Current: 72% line coverage + +### Recommendations +- [ ] Add conftest.py fixtures for database connection +- [ ] Configure pytest-xdist for parallel execution +- [ ] Add coverage threshold to CI pipeline +``` diff --git a/plugins/saas-test-pilot/commands/test.md b/plugins/saas-test-pilot/commands/test.md new file mode 100644 index 0000000..53ec9b8 --- /dev/null +++ b/plugins/saas-test-pilot/commands/test.md @@ -0,0 +1,18 @@ +--- +description: Test automation — generate tests, analyze coverage, manage fixtures +--- + +# /test + +Test automation toolkit for unit, integration, and end-to-end testing. + +## Sub-commands + +| Sub-command | Description | +|-------------|-------------| +| `/test setup` | Setup wizard — detect framework, configure test runner | +| `/test generate` | Generate test cases for functions, classes, or modules | +| `/test coverage` | Analyze coverage and identify untested paths | +| `/test fixtures` | Generate or manage test fixtures and mocks | +| `/test e2e` | Generate end-to-end test scenarios | +| `/test run` | Run tests with formatted output | diff --git a/plugins/saas-test-pilot/skills/coverage-analysis.md b/plugins/saas-test-pilot/skills/coverage-analysis.md new file mode 100644 index 0000000..a1d72f2 --- /dev/null +++ b/plugins/saas-test-pilot/skills/coverage-analysis.md @@ -0,0 +1,63 @@ +--- +description: Coverage gap detection, risk scoring, and prioritization +--- + +# Coverage Analysis Skill + +## Overview + +Systematic approach to identifying, scoring, and prioritizing test coverage gaps. Coverage data is a tool for finding untested behavior, not a target to maximize blindly. + +## Coverage Types + +| Type | Measures | Tool Support | +|------|----------|-------------| +| **Line** | Which lines executed | All tools | +| **Branch** | Which conditional paths taken | pytest-cov, istanbul, c8 | +| **Function** | Which functions called | istanbul, c8 | +| **Statement** | Which statements executed | istanbul | + +Branch coverage is the minimum useful metric. Line coverage alone hides untested else-branches and short-circuit evaluations. + +## Gap Classification + +### By Code Pattern + +| Pattern | Risk Level | Priority | +|---------|------------|----------| +| Exception handlers (catch/except) | HIGH | Test both the trigger and the handling | +| Auth/permission checks | CRITICAL | Must test both allowed and denied | +| Input validation branches | HIGH | Test valid, invalid, and boundary | +| Default/fallback cases | MEDIUM | Often untested but triggered in production | +| Configuration variations | MEDIUM | Test with different config values | +| Logging/metrics code | LOW | Usually not worth dedicated tests | + +### By Module Criticality + +Score modules 1-5 based on: +- **Data integrity** — Does it write to database/files? (+2) +- **Security boundary** — Does it handle auth/authz? (+2) +- **User-facing** — Does failure affect users directly? (+1) +- **Frequency of change** — Changed often in git log? (+1) +- **Dependency count** — Many callers depend on it? (+1) + +## Prioritization Formula + +``` +Priority = (Module Criticality * 2) + (Gap Risk Level) - (Test Complexity) +``` + +Where Test Complexity: +- 1: Simple unit test, no mocks needed +- 2: Requires basic mocking +- 3: Requires complex setup (database, fixtures) +- 4: Requires infrastructure (message queue, external service) +- 5: Requires E2E or manual testing + +## Reporting Guidelines + +- Always show current coverage alongside target +- Group gaps by module, sorted by priority +- For each gap: file, line range, description, suggested test +- Estimate coverage improvement if top-N gaps are addressed +- Never recommend deleting code to improve coverage diff --git a/plugins/saas-test-pilot/skills/fixture-management.md b/plugins/saas-test-pilot/skills/fixture-management.md new file mode 100644 index 0000000..57db814 --- /dev/null +++ b/plugins/saas-test-pilot/skills/fixture-management.md @@ -0,0 +1,88 @@ +--- +description: Fixture organization, factories, shared test data, and conftest patterns +--- + +# Fixture Management Skill + +## Overview + +Patterns for organizing test fixtures, factories, and shared test data. Well-structured fixtures reduce test maintenance and improve readability. + +## Python Fixtures (pytest) + +### conftest.py Hierarchy + +``` +tests/ + conftest.py # Shared across all tests (db connection, auth) + unit/ + conftest.py # Unit-specific fixtures (mocked services) + integration/ + conftest.py # Integration-specific (real db, test server) +``` + +Fixtures in parent conftest.py are available to all child directories. Keep fixtures at the narrowest scope possible. + +### Fixture Scopes + +| Scope | Lifetime | Use For | +|-------|----------|---------| +| `function` | Each test | Default. Mutable data, unique state | +| `class` | Each test class | Shared setup within a class | +| `module` | Each test file | Expensive setup shared across file | +| `session` | Entire test run | Database connection, compiled assets | + +### Factory Pattern (factory_boy) + +Use factories for complex model creation: +- Define a factory per model with sensible defaults +- Override only what the specific test needs +- Use `SubFactory` for relationships +- Use `LazyAttribute` for computed fields +- Use `Sequence` for unique values + +## JavaScript Fixtures + +### Factory Functions + +``` +function createUser(overrides = {}) { + return { + id: generateId(), + name: "Test User", + email: "test@example.com", + ...overrides + }; +} +``` + +### Shared Test Data + +- Place in `__tests__/fixtures/` or `test/fixtures/` +- Export factory functions, not static objects (avoid mutation between tests) +- Use builder pattern for complex objects with many optional fields + +## Database Fixtures + +### Seeding Strategies + +| Strategy | Speed | Isolation | Complexity | +|----------|-------|-----------|------------| +| Transaction rollback | Fast | Good | Medium | +| Truncate + re-seed | Medium | Perfect | Low | +| Separate test database | Fast | Perfect | High | +| In-memory database | Fastest | Perfect | Medium | + +### API Response Fixtures + +- Store in `tests/fixtures/responses/` as JSON files +- Name by endpoint and scenario: `get_user_200.json`, `get_user_404.json` +- Update fixtures when API contracts change +- Use fixture loading helpers to avoid hardcoded paths + +## Anti-Patterns + +- Global mutable fixtures shared between tests +- Fixtures that depend on other fixtures in unpredictable order +- Overly specific fixtures that break when models change +- Fixtures with magic values whose meaning is unclear diff --git a/plugins/saas-test-pilot/skills/framework-detection.md b/plugins/saas-test-pilot/skills/framework-detection.md new file mode 100644 index 0000000..4a014e4 --- /dev/null +++ b/plugins/saas-test-pilot/skills/framework-detection.md @@ -0,0 +1,56 @@ +--- +description: Detect test frameworks, locate config files, and identify test runner +--- + +# Framework Detection Skill + +## Overview + +Detect the test framework and runner used by the current project based on configuration files, dependencies, and directory structure. + +## Detection Matrix + +### Python + +| Indicator | Framework | Confidence | +|-----------|-----------|------------| +| `pytest.ini` | pytest | HIGH | +| `pyproject.toml` with `[tool.pytest]` | pytest | HIGH | +| `setup.cfg` with `[tool:pytest]` | pytest | HIGH | +| `conftest.py` in project root or tests/ | pytest | HIGH | +| `tests/test_*.py` with `import unittest` | unittest | MEDIUM | +| `tox.ini` with pytest commands | pytest | MEDIUM | + +### JavaScript / TypeScript + +| Indicator | Framework | Confidence | +|-----------|-----------|------------| +| `jest.config.js` or `jest.config.ts` | Jest | HIGH | +| `package.json` with `"jest"` config | Jest | HIGH | +| `vitest.config.ts` or `vitest.config.js` | Vitest | HIGH | +| `.mocharc.yml` or `.mocharc.json` | Mocha | HIGH | +| `karma.conf.js` | Karma | MEDIUM | + +### E2E Frameworks + +| Indicator | Framework | Confidence | +|-----------|-----------|------------| +| `playwright.config.ts` | Playwright | HIGH | +| `cypress.config.js` or `cypress.config.ts` | Cypress | HIGH | +| `cypress/` directory | Cypress | MEDIUM | + +## Config File Locations + +Search order for each framework: +1. Project root +2. `tests/` or `test/` directory +3. Inside `pyproject.toml`, `package.json`, or `setup.cfg` (inline config) + +## Output + +When detection completes, report: +- Detected framework name and version (from lock file or dependency list) +- Config file path +- Test directory path +- Coverage tool if configured (pytest-cov, istanbul, c8) +- CI integration if found (.github/workflows, .gitlab-ci.yml, Jenkinsfile) diff --git a/plugins/saas-test-pilot/skills/mock-patterns.md b/plugins/saas-test-pilot/skills/mock-patterns.md new file mode 100644 index 0000000..46efb4d --- /dev/null +++ b/plugins/saas-test-pilot/skills/mock-patterns.md @@ -0,0 +1,83 @@ +--- +description: Mocking, stubbing, and dependency injection strategies for tests +--- + +# Mock Patterns Skill + +## Overview + +Mocking strategies and best practices for isolating code under test from external dependencies. + +## When to Mock + +| Situation | Mock? | Reason | +|-----------|-------|--------| +| External API calls | Yes | Unreliable, slow, costs money | +| Database queries | Depends | Mock for unit, real for integration | +| File system | Depends | Mock for unit, tmpdir for integration | +| Time/date functions | Yes | Deterministic tests | +| Random/UUID generation | Yes | Reproducible tests | +| Pure utility functions | No | Fast, deterministic, no side effects | +| Internal business logic | No | Test the real thing | + +## Python Mocking + +### unittest.mock / pytest-mock + +``` +patch("module.path.to.dependency") # Replaces at import location +patch.object(MyClass, "method") # Replaces on specific class +MagicMock(return_value=expected) # Creates callable mock +MagicMock(side_effect=Exception("e")) # Raises on call +``` + +**Critical rule:** Patch where the dependency is USED, not where it is DEFINED. +- If `views.py` imports `from services import send_email`, patch `views.send_email`, NOT `services.send_email`. + +### pytest-mock (preferred) + +Use the `mocker` fixture for cleaner syntax: +- `mocker.patch("module.function")` — auto-cleanup after test +- `mocker.spy(obj, "method")` — record calls without replacing + +## JavaScript Mocking + +### Jest + +``` +jest.mock("./module") // Auto-mock entire module +jest.spyOn(object, "method") // Spy without replacing +jest.fn().mockReturnValue(value) // Create mock function +``` + +### Vitest + +``` +vi.mock("./module") // Same API as Jest +vi.spyOn(object, "method") +vi.fn().mockReturnValue(value) +``` + +## Mock vs Stub vs Spy + +| Type | Behavior | Use When | +|------|----------|----------| +| **Mock** | Replace entirely, return fake data | Isolating from external service | +| **Stub** | Provide canned responses | Controlling specific return values | +| **Spy** | Record calls, keep real behavior | Verifying interactions without changing behavior | + +## Dependency Injection Patterns + +Prefer DI over mocking when possible: +- Constructor injection: pass dependencies as constructor args +- Function parameters: accept collaborators as arguments with defaults +- Context managers: swap implementations via context + +DI makes tests simpler and avoids brittle mock paths. + +## Anti-Patterns + +- Mocking too deep (mock chains: `mock.return_value.method.return_value`) +- Asserting on mock call counts instead of outcomes +- Mocking the system under test +- Not resetting mocks between tests (use autouse fixtures or afterEach) diff --git a/plugins/saas-test-pilot/skills/test-patterns.md b/plugins/saas-test-pilot/skills/test-patterns.md new file mode 100644 index 0000000..170e080 --- /dev/null +++ b/plugins/saas-test-pilot/skills/test-patterns.md @@ -0,0 +1,83 @@ +--- +description: Test design patterns for unit, integration, and e2e tests +--- + +# Test Patterns Skill + +## Overview + +Standard test design patterns organized by test type. Use these as templates when generating tests. + +## Unit Test Patterns + +### Arrange-Act-Assert (AAA) + +The default pattern for unit tests: + +``` +Arrange: Set up test data and dependencies +Act: Call the function under test +Assert: Verify the result matches expectations +``` + +- Keep Arrange minimal — only what this specific test needs +- Act should be a single function call +- Assert one logical concept (multiple assertions allowed if same concept) + +### Parameterized Tests + +Use when testing the same logic with different inputs: +- pytest: `@pytest.mark.parametrize("input,expected", [...])` +- Jest: `test.each([...])("description %s", (input, expected) => {...})` + +Best for: validation functions, parsers, formatters, math operations. + +### Exception Testing + +Verify error conditions explicitly: +- pytest: `with pytest.raises(ValueError, match="expected message")` +- Jest: `expect(() => fn()).toThrow("expected message")` + +Always assert the exception type AND message content. + +## Integration Test Patterns + +### Setup/Teardown + +Use fixtures or beforeEach/afterEach for: +- Database connections and seeded data +- Temporary files and directories +- Mock server instances +- Environment variable overrides + +### Transaction Rollback + +For database integration tests, wrap each test in a transaction that rolls back: +- Ensures test isolation without slow re-seeding +- pytest: `@pytest.fixture(autouse=True)` with session-scoped DB and function-scoped transaction + +## E2E Test Patterns + +### Page Object Model + +Encapsulate page interactions in reusable classes: +- One class per page or significant component +- Methods return page objects for chaining +- Selectors defined as class properties +- No assertions inside page objects + +### User Flow Pattern + +Structure E2E tests as user stories: +1. Setup — authenticate, navigate to starting point +2. Action — perform the user's workflow steps +3. Verification — check the final state +4. Cleanup — reset any created data + +## Anti-Patterns to Avoid + +- Testing implementation details instead of behavior +- Mocking the thing you are testing +- Tests that depend on execution order +- Assertions on exact error messages from third-party libraries +- Sleeping instead of waiting for conditions diff --git a/plugins/saas-test-pilot/skills/visual-header.md b/plugins/saas-test-pilot/skills/visual-header.md new file mode 100644 index 0000000..32d5354 --- /dev/null +++ b/plugins/saas-test-pilot/skills/visual-header.md @@ -0,0 +1,27 @@ +# Visual Header Skill + +Standard visual header for saas-test-pilot commands. + +## Header Template + +``` ++----------------------------------------------------------------------+ +| TEST-PILOT - [Context] | ++----------------------------------------------------------------------+ +``` + +## Context Values by Command + +| Command | Context | +|---------|---------| +| `/test setup` | Setup | +| `/test generate` | Generate Tests | +| `/test coverage` | Coverage Analysis | +| `/test fixtures` | Fixtures | +| `/test e2e` | E2E Tests | +| `/test run` | Run Tests | +| Agent mode | Test Automation | + +## Usage + +Display header at the start of every command response before proceeding with the operation. diff --git a/plugins/viz-platform/agents/design-reviewer.md b/plugins/viz-platform/agents/design-reviewer.md index e552ab0..305d3e1 100644 --- a/plugins/viz-platform/agents/design-reviewer.md +++ b/plugins/viz-platform/agents/design-reviewer.md @@ -24,8 +24,8 @@ You are a strict design system compliance auditor. Your role is to review code f ## Trigger Conditions Activate this agent when: -- User runs `/design-review ` -- User runs `/design-gate ` +- User runs `/viz design-review ` +- User runs `/viz design-gate ` - Projman orchestrator requests design domain gate check - Code review includes DMC/Dash components @@ -48,7 +48,7 @@ Activate this agent when: ### Review Mode (default) -Triggered by `/design-review ` +Triggered by `/viz design-review ` **Characteristics:** - Produces detailed report with all findings @@ -59,7 +59,7 @@ Triggered by `/design-review ` ### Gate Mode -Triggered by `/design-gate ` or projman orchestrator domain gate +Triggered by `/viz design-gate ` or projman orchestrator domain gate **Characteristics:** - Binary PASS/FAIL output @@ -136,7 +136,7 @@ Blocking Issues (2): 2. app/components/nav.py:12 - Component 'dmc.Navbar' not found Fix: Use 'dmc.AppShell.Navbar' (DMC v0.14+) -Run /design-review for full audit report. +Run /viz design-review for full audit report. ``` ### Review Mode Output @@ -270,7 +270,7 @@ When called as a domain gate by projman orchestrator: ## Example Interactions -**User**: `/design-review src/pages/` +**User**: `/viz design-review src/pages/` **Agent**: 1. Scans all .py files in src/pages/ 2. Identifies DMC component usage @@ -279,7 +279,7 @@ When called as a domain gate by projman orchestrator: 5. Runs accessibility validation 6. Returns full review report -**User**: `/design-gate src/` +**User**: `/viz design-gate src/` **Agent**: 1. Scans all .py files 2. Identifies FAIL-level issues only diff --git a/plugins/viz-platform/claude-md-integration.md b/plugins/viz-platform/claude-md-integration.md index c09f03c..51bc53f 100644 --- a/plugins/viz-platform/claude-md-integration.md +++ b/plugins/viz-platform/claude-md-integration.md @@ -10,12 +10,12 @@ Add this snippet to your project's CLAUDE.md to enable viz-platform capabilities This project uses viz-platform for Dash Mantine Components dashboards. ### Available Commands -- `/viz-component {name}` - Inspect DMC component props -- `/viz-chart {type}` - Create Plotly charts (line, bar, scatter, pie, area, histogram, box, heatmap, sunburst, treemap) -- `/viz-dashboard {template}` - Create layouts (basic, sidebar, tabs, split) -- `/viz-theme {name}` - Apply a theme -- `/viz-theme-new {name}` - Create custom theme -- `/viz-theme-css {name}` - Export theme as CSS +- `/viz component {name}` - Inspect DMC component props +- `/viz chart {type}` - Create Plotly charts (line, bar, scatter, pie, area, histogram, box, heatmap, sunburst, treemap) +- `/viz dashboard {template}` - Create layouts (basic, sidebar, tabs, split) +- `/viz theme {name}` - Apply a theme +- `/viz theme-new {name}` - Create custom theme +- `/viz theme-css {name}` - Export theme as CSS ### MCP Tools Available - **DMC**: list_components, get_component_props, validate_component @@ -44,13 +44,13 @@ If using with data-platform, add this section: ## Data + Visualization Workflow ### Data Loading (data-platform) -- `/data-ingest {file}` - Load CSV, Parquet, or JSON -- `/data-schema {table}` - View database schema -- `/data-profile {data_ref}` - Statistical summary +- `/data ingest {file}` - Load CSV, Parquet, or JSON +- `/data schema {table}` - View database schema +- `/data profile {data_ref}` - Statistical summary ### Visualization (viz-platform) -- `/viz-chart {type}` - Create charts from loaded data -- `/viz-dashboard {template}` - Build dashboard layouts +- `/viz chart {type}` - Create charts from loaded data +- `/viz dashboard {template}` - Build dashboard layouts ### Workflow Pattern 1. Load data: `read_csv("data.csv")` → returns `data_ref` diff --git a/plugins/viz-platform/commands/accessibility-check.md b/plugins/viz-platform/commands/viz-accessibility-check.md similarity index 80% rename from plugins/viz-platform/commands/accessibility-check.md rename to plugins/viz-platform/commands/viz-accessibility-check.md index 3a95785..0b8ca1e 100644 --- a/plugins/viz-platform/commands/accessibility-check.md +++ b/plugins/viz-platform/commands/viz-accessibility-check.md @@ -1,8 +1,9 @@ --- +name: viz accessibility-check description: Validate color accessibility for color blind users --- -# Accessibility Check +# /viz accessibility-check ## Skills to Load - skills/mcp-tools-reference.md @@ -21,7 +22,7 @@ Validate theme or chart colors for color blind accessibility. ## Usage ``` -/accessibility-check {target} +/viz accessibility-check {target} ``` ## Arguments @@ -41,5 +42,5 @@ accessibility_validate_theme(theme_name="corporate") ## Related Commands -- `/viz-theme-new {name}` - Create accessible theme -- `/viz-chart {type}` - Create chart (check colors after) +- `/viz theme-new {name}` - Create accessible theme +- `/viz chart {type}` - Create chart (check colors after) diff --git a/plugins/viz-platform/commands/viz-breakpoints.md b/plugins/viz-platform/commands/viz-breakpoints.md index 5672814..9ada745 100644 --- a/plugins/viz-platform/commands/viz-breakpoints.md +++ b/plugins/viz-platform/commands/viz-breakpoints.md @@ -1,8 +1,9 @@ --- +name: viz breakpoints description: Configure responsive breakpoints for dashboard layouts --- -# Viz Breakpoints +# /viz breakpoints ## Skills to Load - skills/mcp-tools-reference.md @@ -21,7 +22,7 @@ Configure responsive breakpoints for mobile-first design across screen sizes. ## Usage ``` -/viz-breakpoints {layout_ref} +/viz breakpoints {layout_ref} ``` ## Arguments @@ -30,7 +31,7 @@ Configure responsive breakpoints for mobile-first design across screen sizes. ## Workflow -1. **User invokes**: `/viz-breakpoints my-dashboard` +1. **User invokes**: `/viz breakpoints my-dashboard` 2. **Agent asks**: Which breakpoints to customize? (shows current settings) 3. **Agent asks**: Mobile column count? (xs, typically 1-2) 4. **Agent asks**: Tablet column count? (md, typically 4-6) @@ -39,5 +40,5 @@ Configure responsive breakpoints for mobile-first design across screen sizes. ## Related Commands -- `/viz-dashboard {template}` - Create layout with default breakpoints -- `/viz-theme {name}` - Theme includes default spacing values +- `/viz dashboard {template}` - Create layout with default breakpoints +- `/viz theme {name}` - Theme includes default spacing values diff --git a/plugins/viz-platform/commands/viz-chart-export.md b/plugins/viz-platform/commands/viz-chart-export.md index 1f98799..b865b5a 100644 --- a/plugins/viz-platform/commands/viz-chart-export.md +++ b/plugins/viz-platform/commands/viz-chart-export.md @@ -1,8 +1,9 @@ --- +name: viz chart-export description: Export a Plotly chart to PNG, SVG, or PDF format --- -# Viz Chart Export +# /viz chart-export ## Skills to Load - skills/mcp-tools-reference.md @@ -21,7 +22,7 @@ Export a Plotly chart to static image formats. ## Usage ``` -/viz-chart-export {format} +/viz chart-export {format} ``` ## Arguments @@ -38,5 +39,5 @@ Requires `kaleido` package: `pip install kaleido` ## Related Commands -- `/viz-chart {type}` - Create a chart -- `/viz-theme {name}` - Apply theme before export +- `/viz chart {type}` - Create a chart +- `/viz theme {name}` - Apply theme before export diff --git a/plugins/viz-platform/commands/viz-chart.md b/plugins/viz-platform/commands/viz-chart.md index 0dc130a..7edebf1 100644 --- a/plugins/viz-platform/commands/viz-chart.md +++ b/plugins/viz-platform/commands/viz-chart.md @@ -1,8 +1,9 @@ --- +name: viz chart description: Create a Plotly chart with theme integration --- -# Viz Chart +# /viz chart ## Skills to Load - skills/mcp-tools-reference.md @@ -21,7 +22,7 @@ Create a Plotly chart with automatic theme token application. ## Usage ``` -/viz-chart {type} +/viz chart {type} ``` ## Arguments @@ -36,6 +37,6 @@ chart_create(chart_type="line", data_ref="df", x="date", y="value", theme=None) ## Related Commands -- `/viz-chart-export {format}` - Export chart to image -- `/viz-theme {name}` - Apply theme to charts -- `/viz-dashboard` - Create layout with charts +- `/viz chart-export {format}` - Export chart to image +- `/viz theme {name}` - Apply theme to charts +- `/viz dashboard` - Create layout with charts diff --git a/plugins/viz-platform/commands/viz-component.md b/plugins/viz-platform/commands/viz-component.md index c32f284..12b0fe0 100644 --- a/plugins/viz-platform/commands/viz-component.md +++ b/plugins/viz-platform/commands/viz-component.md @@ -1,8 +1,9 @@ --- +name: viz component description: Inspect Dash Mantine Component props and validation --- -# Viz Component +# /viz component ## Skills to Load - skills/mcp-tools-reference.md @@ -21,7 +22,7 @@ Inspect a DMC component's props, types, and defaults. ## Usage ``` -/viz-component {name} +/viz component {name} ``` ## Arguments @@ -38,5 +39,5 @@ validate_component(component="Button", props={"variant": "filled"}) ## Related Commands -- `/viz-chart {type}` - Create charts -- `/viz-dashboard {template}` - Create layouts +- `/viz chart {type}` - Create charts +- `/viz dashboard {template}` - Create layouts diff --git a/plugins/viz-platform/commands/viz-dashboard.md b/plugins/viz-platform/commands/viz-dashboard.md index 1ffd99c..76033d5 100644 --- a/plugins/viz-platform/commands/viz-dashboard.md +++ b/plugins/viz-platform/commands/viz-dashboard.md @@ -1,8 +1,9 @@ --- +name: viz dashboard description: Create a dashboard layout with the layout-builder agent --- -# Viz Dashboard +# /viz dashboard ## Skills to Load - skills/mcp-tools-reference.md @@ -21,7 +22,7 @@ Create a dashboard layout with filters, grids, and sections. ## Usage ``` -/viz-dashboard {template} +/viz dashboard {template} ``` ## Arguments @@ -37,5 +38,5 @@ Activates **layout-builder** agent which orchestrates: ## Related Commands -- `/viz-breakpoints {layout}` - Configure responsive breakpoints -- `/viz-chart {type}` - Add charts to layout +- `/viz breakpoints {layout}` - Configure responsive breakpoints +- `/viz chart {type}` - Add charts to layout diff --git a/plugins/viz-platform/commands/design-gate.md b/plugins/viz-platform/commands/viz-design-gate.md similarity index 80% rename from plugins/viz-platform/commands/design-gate.md rename to plugins/viz-platform/commands/viz-design-gate.md index 127e737..6767a23 100644 --- a/plugins/viz-platform/commands/design-gate.md +++ b/plugins/viz-platform/commands/viz-design-gate.md @@ -1,4 +1,5 @@ --- +name: viz design-gate description: Design system compliance gate (pass/fail) for sprint execution gate_contract: v1 arguments: @@ -7,20 +8,20 @@ arguments: required: true --- -# /design-gate +# /viz design-gate Binary pass/fail validation for design system compliance. Used by projman orchestrator during sprint execution to gate issue completion. ## Usage ``` -/design-gate +/viz design-gate ``` **Examples:** ``` -/design-gate ./app/pages/dashboard.py -/design-gate ./app/components/ +/viz design-gate ./app/pages/dashboard.py +/viz design-gate ./app/components/ ``` ## What It Does @@ -56,7 +57,7 @@ Blocking Issues (2): 2. app/components/nav.py:12 - Component 'dmc.Navbar' not found Fix: Use 'dmc.AppShell.Navbar' (DMC v0.14+) -Run /design-review for full audit report. +Run /viz design-review for full audit report. ``` ## Integration with projman @@ -68,13 +69,13 @@ This command is automatically invoked by the projman orchestrator when: 3. The orchestrator passes the path of changed files **Gate behavior:** -- PASS → Issue can be marked complete -- FAIL → Issue stays open, blocker comment added +- PASS -> Issue can be marked complete +- FAIL -> Issue stays open, blocker comment added -## Differences from /design-review +## Differences from /viz design-review -| Aspect | /design-gate | /design-review | -|--------|--------------|----------------| +| Aspect | /viz design-gate | /viz design-review | +|--------|------------------|---------------------| | Output | Binary PASS/FAIL | Detailed report | | Severity | FAIL only | FAIL + WARN + INFO | | Purpose | Automation gate | Human review | @@ -86,7 +87,7 @@ This command is automatically invoked by the projman orchestrator when: - **Sprint execution**: Automatic quality gates - **Quick validation**: Fast pass/fail without full report -For detailed findings, use `/design-review` instead. +For detailed findings, use `/viz design-review` instead. ## Requirements diff --git a/plugins/viz-platform/commands/design-review.md b/plugins/viz-platform/commands/viz-design-review.md similarity index 81% rename from plugins/viz-platform/commands/design-review.md rename to plugins/viz-platform/commands/viz-design-review.md index 172e730..a19b7ff 100644 --- a/plugins/viz-platform/commands/design-review.md +++ b/plugins/viz-platform/commands/viz-design-review.md @@ -1,4 +1,5 @@ --- +name: viz design-review description: Audit codebase for design system compliance arguments: - name: path @@ -6,21 +7,21 @@ arguments: required: true --- -# /design-review +# /viz design-review Scans target path for Dash Mantine Components usage and validates against design system standards. ## Usage ``` -/design-review +/viz design-review ``` **Examples:** ``` -/design-review ./app/pages/ -/design-review ./app/components/dashboard.py -/design-review . +/viz design-review ./app/pages/ +/viz design-review ./app/components/dashboard.py +/viz design-review . ``` ## What It Does @@ -60,9 +61,9 @@ Each finding includes: ## Related Commands -- `/design-gate` - Binary pass/fail for sprint execution (no detailed report) -- `/viz-component` - Inspect individual DMC component props -- `/viz-theme` - Check active theme configuration +- `/viz design-gate` - Binary pass/fail for sprint execution (no detailed report) +- `/viz component` - Inspect individual DMC component props +- `/viz theme` - Check active theme configuration ## Requirements diff --git a/plugins/viz-platform/commands/viz-setup.md b/plugins/viz-platform/commands/viz-setup.md index 2ac4cf1..4d13aaf 100644 --- a/plugins/viz-platform/commands/viz-setup.md +++ b/plugins/viz-platform/commands/viz-setup.md @@ -1,8 +1,9 @@ --- +name: viz setup description: Interactive setup wizard for viz-platform plugin --- -# Viz Setup +# /viz setup ## Visual Output @@ -41,5 +42,5 @@ Verify MCP server loads, display summary, prompt session restart. ## Related Commands -- `/viz-component {name}` - Inspect component props -- `/viz-chart {type}` - Create a chart +- `/viz component {name}` - Inspect component props +- `/viz chart {type}` - Create a chart diff --git a/plugins/viz-platform/commands/viz-theme-css.md b/plugins/viz-platform/commands/viz-theme-css.md index 0522d1a..dfccff0 100644 --- a/plugins/viz-platform/commands/viz-theme-css.md +++ b/plugins/viz-platform/commands/viz-theme-css.md @@ -1,8 +1,9 @@ --- +name: viz theme-css description: Export a theme as CSS custom properties --- -# Viz Theme CSS +# /viz theme-css ## Skills to Load - skills/mcp-tools-reference.md @@ -21,7 +22,7 @@ Export a theme's design tokens as CSS custom properties. ## Usage ``` -/viz-theme-css {name} +/viz theme-css {name} ``` ## Arguments @@ -38,5 +39,5 @@ Use cases: external CSS, design handoff, documentation, other frameworks. ## Related Commands -- `/viz-theme {name}` - Apply a theme -- `/viz-theme-new {name}` - Create a new theme +- `/viz theme {name}` - Apply a theme +- `/viz theme-new {name}` - Create a new theme diff --git a/plugins/viz-platform/commands/viz-theme-new.md b/plugins/viz-platform/commands/viz-theme-new.md index c569615..3458108 100644 --- a/plugins/viz-platform/commands/viz-theme-new.md +++ b/plugins/viz-platform/commands/viz-theme-new.md @@ -1,8 +1,9 @@ --- +name: viz theme-new description: Create a new custom theme with design tokens --- -# Viz Theme New +# /viz theme-new ## Skills to Load - skills/mcp-tools-reference.md @@ -21,7 +22,7 @@ Create a new custom theme with specified design tokens. ## Usage ``` -/viz-theme-new {name} +/viz theme-new {name} ``` ## Arguments @@ -37,5 +38,5 @@ theme_validate(theme_name="corporate") ## Related Commands -- `/viz-theme {name}` - Apply a theme -- `/viz-theme-css {name}` - Export theme as CSS +- `/viz theme {name}` - Apply a theme +- `/viz theme-css {name}` - Export theme as CSS diff --git a/plugins/viz-platform/commands/viz-theme.md b/plugins/viz-platform/commands/viz-theme.md index dae6781..4ccc2a0 100644 --- a/plugins/viz-platform/commands/viz-theme.md +++ b/plugins/viz-platform/commands/viz-theme.md @@ -1,8 +1,9 @@ --- +name: viz theme description: Apply an existing theme to the current context --- -# Viz Theme +# /viz theme ## Skills to Load - skills/mcp-tools-reference.md @@ -21,7 +22,7 @@ Apply an existing theme to activate its design tokens. ## Usage ``` -/viz-theme {name} +/viz theme {name} ``` ## Arguments @@ -41,5 +42,5 @@ When activated, new charts/layouts automatically use theme tokens. ## Related Commands -- `/viz-theme-new {name}` - Create a new theme -- `/viz-theme-css {name}` - Export theme as CSS +- `/viz theme-new {name}` - Create a new theme +- `/viz theme-css {name}` - Export theme as CSS diff --git a/plugins/viz-platform/commands/viz.md b/plugins/viz-platform/commands/viz.md new file mode 100644 index 0000000..6d1bdea --- /dev/null +++ b/plugins/viz-platform/commands/viz.md @@ -0,0 +1,24 @@ +--- +description: Visualization tools with DMC validation, charts, and theming +--- + +# /viz + +Visualization tools with Dash Mantine Components validation, Plotly charts, and theming. + +## Sub-commands + +| Sub-command | Description | +|-------------|-------------| +| `/viz theme` | Apply existing theme to visualizations | +| `/viz theme-new` | Create new custom theme with design tokens | +| `/viz theme-css` | Export theme as CSS custom properties | +| `/viz component` | Inspect DMC component props and validation | +| `/viz dashboard` | Create dashboard layouts with filters and grids | +| `/viz chart` | Create Plotly charts with theme integration | +| `/viz chart-export` | Export charts to PNG, SVG, PDF via kaleido | +| `/viz breakpoints` | Configure responsive layout breakpoints | +| `/viz accessibility-check` | Color blind validation (WCAG contrast ratios) | +| `/viz design-review` | Detailed design system audits | +| `/viz design-gate` | Binary pass/fail design system validation | +| `/viz setup` | Setup wizard for viz-platform MCP server |