1 Commits

Author SHA1 Message Date
249f8794ad feat(projman): add domain gate checks to orchestrator agent
Add domain-consultation skill and gate check responsibilities to the
orchestrator agent. Before marking issues complete, the orchestrator
now checks for Domain/* labels and invokes the appropriate gate command
(/design-gate for Domain/Viz, /data-gate for Domain/Data).

Includes graceful degradation when gate commands are unavailable.

Closes #362

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-02-01 18:11:58 -05:00
161 changed files with 2240 additions and 9156 deletions

View File

@@ -1,205 +0,0 @@
{
"name": "leo-claude-mktplace",
"owner": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"metadata": {
"description": "Project management plugins with Gitea and NetBox integrations",
"version": "7.0.0"
},
"plugins": [
{
"name": "projman",
"version": "3.4.0",
"description": "Sprint planning and project management with Gitea integration",
"source": "./plugins/projman",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/projman/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "development",
"tags": ["sprint", "agile", "gitea", "project-management"],
"license": "MIT"
},
{
"name": "doc-guardian",
"version": "1.1.0",
"description": "Automatic documentation drift detection and synchronization",
"source": "./plugins/doc-guardian",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/doc-guardian/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "productivity",
"tags": ["documentation", "drift-detection", "sync"],
"license": "MIT"
},
{
"name": "code-sentinel",
"version": "1.0.1",
"description": "Security scanning and code refactoring tools",
"source": "./plugins/code-sentinel",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/code-sentinel/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "security",
"tags": ["security-scan", "refactoring", "vulnerabilities"],
"license": "MIT"
},
{
"name": "project-hygiene",
"version": "0.1.0",
"description": "Post-task cleanup hook that removes temp files and manages orphaned files",
"source": "./plugins/project-hygiene",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/project-hygiene/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "productivity",
"tags": ["cleanup", "automation", "hygiene"],
"license": "MIT"
},
{
"name": "cmdb-assistant",
"version": "1.2.0",
"description": "NetBox CMDB integration with data quality validation and machine registration",
"source": "./plugins/cmdb-assistant",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/cmdb-assistant/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "infrastructure",
"tags": ["cmdb", "netbox", "dcim", "ipam", "data-quality", "validation"],
"license": "MIT"
},
{
"name": "claude-config-maintainer",
"version": "1.2.0",
"description": "CLAUDE.md and settings.local.json optimization for Claude Code projects",
"source": "./plugins/claude-config-maintainer",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/claude-config-maintainer/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "development",
"tags": ["claude-md", "configuration", "optimization"],
"license": "MIT"
},
{
"name": "clarity-assist",
"version": "1.2.0",
"description": "Prompt optimization and requirement clarification with ND-friendly accommodations",
"source": "./plugins/clarity-assist",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/clarity-assist/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "productivity",
"tags": ["prompts", "requirements", "clarification", "nd-friendly"],
"license": "MIT"
},
{
"name": "git-flow",
"version": "1.2.0",
"description": "Git workflow automation with intelligent commit messages and branch management",
"source": "./plugins/git-flow",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/git-flow/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "development",
"tags": ["git", "workflow", "commits", "branching"],
"license": "MIT"
},
{
"name": "pr-review",
"version": "1.1.0",
"description": "Multi-agent pull request review with confidence scoring and actionable feedback",
"source": "./plugins/pr-review",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/pr-review/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "development",
"tags": ["code-review", "pull-requests", "security", "quality"],
"license": "MIT"
},
{
"name": "data-platform",
"version": "1.3.0",
"description": "Data engineering tools with pandas, PostgreSQL/PostGIS, and dbt integration",
"source": "./plugins/data-platform",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/data-platform/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "data",
"tags": ["pandas", "postgresql", "postgis", "dbt", "data-engineering", "etl"],
"license": "MIT"
},
{
"name": "viz-platform",
"version": "1.1.0",
"description": "Visualization tools with Dash Mantine Components validation, Plotly charts, and theming",
"source": "./plugins/viz-platform",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/viz-platform/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "visualization",
"tags": ["dash", "plotly", "mantine", "charts", "dashboards", "theming", "dmc"],
"license": "MIT"
},
{
"name": "contract-validator",
"version": "1.2.0",
"description": "Cross-plugin compatibility validation and Claude.md agent verification",
"source": "./plugins/contract-validator",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/contract-validator/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "development",
"tags": ["validation", "contracts", "compatibility", "agents", "interfaces", "cross-plugin"],
"license": "MIT"
}
]
}

View File

@@ -1,109 +0,0 @@
{
"name": "leo-claude-mktplace",
"owner": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"metadata": {
"description": "Project management plugins with Gitea and NetBox integrations",
"version": "7.0.0"
},
"plugins": [
{
"name": "projman",
"version": "3.4.0",
"description": "Sprint planning and project management with Gitea integration",
"source": "./plugins/projman",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/projman/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "development",
"tags": ["sprint", "agile", "gitea", "project-management"],
"license": "MIT"
},
{
"name": "git-flow",
"version": "1.2.0",
"description": "Git workflow automation with intelligent commit messages and branch management",
"source": "./plugins/git-flow",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/git-flow/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "development",
"tags": ["git", "workflow", "commits", "branching"],
"license": "MIT"
},
{
"name": "pr-review",
"version": "1.1.0",
"description": "Multi-agent pull request review with confidence scoring and actionable feedback",
"source": "./plugins/pr-review",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/pr-review/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "development",
"tags": ["code-review", "pull-requests", "security", "quality"],
"license": "MIT"
},
{
"name": "clarity-assist",
"version": "1.2.0",
"description": "Prompt optimization and requirement clarification with ND-friendly accommodations",
"source": "./plugins/clarity-assist",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/clarity-assist/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "productivity",
"tags": ["prompts", "requirements", "clarification", "nd-friendly"],
"license": "MIT"
},
{
"name": "code-sentinel",
"version": "1.0.1",
"description": "Security scanning and code refactoring tools",
"source": "./plugins/code-sentinel",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/code-sentinel/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "security",
"tags": ["security-scan", "refactoring", "vulnerabilities"],
"license": "MIT"
},
{
"name": "doc-guardian",
"version": "1.1.0",
"description": "Automatic documentation drift detection and synchronization",
"source": "./plugins/doc-guardian",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/doc-guardian/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "productivity",
"tags": ["documentation", "drift-detection", "sync"],
"license": "MIT"
}
]
}

View File

@@ -6,7 +6,7 @@
},
"metadata": {
"description": "Project management plugins with Gitea and NetBox integrations",
"version": "7.0.0"
"version": "5.5.0"
},
"plugins": [
{
@@ -25,6 +25,102 @@
"tags": ["sprint", "agile", "gitea", "project-management"],
"license": "MIT"
},
{
"name": "doc-guardian",
"version": "1.1.0",
"description": "Automatic documentation drift detection and synchronization",
"source": "./plugins/doc-guardian",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/doc-guardian/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "productivity",
"tags": ["documentation", "drift-detection", "sync"],
"license": "MIT"
},
{
"name": "code-sentinel",
"version": "1.0.1",
"description": "Security scanning and code refactoring tools",
"source": "./plugins/code-sentinel",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/code-sentinel/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "security",
"tags": ["security-scan", "refactoring", "vulnerabilities"],
"license": "MIT"
},
{
"name": "project-hygiene",
"version": "0.1.0",
"description": "Post-task cleanup hook that removes temp files and manages orphaned files",
"source": "./plugins/project-hygiene",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/project-hygiene/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "productivity",
"tags": ["cleanup", "automation", "hygiene"],
"license": "MIT"
},
{
"name": "cmdb-assistant",
"version": "1.2.0",
"description": "NetBox CMDB integration with data quality validation and machine registration",
"source": "./plugins/cmdb-assistant",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/cmdb-assistant/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "infrastructure",
"tags": ["cmdb", "netbox", "dcim", "ipam", "data-quality", "validation"],
"license": "MIT"
},
{
"name": "claude-config-maintainer",
"version": "1.1.0",
"description": "CLAUDE.md optimization and maintenance for Claude Code projects",
"source": "./plugins/claude-config-maintainer",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/claude-config-maintainer/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "development",
"tags": ["claude-md", "configuration", "optimization"],
"license": "MIT"
},
{
"name": "clarity-assist",
"version": "1.2.0",
"description": "Prompt optimization and requirement clarification with ND-friendly accommodations",
"source": "./plugins/clarity-assist",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/clarity-assist/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "productivity",
"tags": ["prompts", "requirements", "clarification", "nd-friendly"],
"license": "MIT"
},
{
"name": "git-flow",
"version": "1.2.0",
@@ -58,51 +154,51 @@
"license": "MIT"
},
{
"name": "clarity-assist",
"name": "data-platform",
"version": "1.2.0",
"description": "Prompt optimization and requirement clarification with ND-friendly accommodations",
"source": "./plugins/clarity-assist",
"description": "Data engineering tools with pandas, PostgreSQL/PostGIS, and dbt integration",
"source": "./plugins/data-platform",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/clarity-assist/README.md",
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/data-platform/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "productivity",
"tags": ["prompts", "requirements", "clarification", "nd-friendly"],
"category": "data",
"tags": ["pandas", "postgresql", "postgis", "dbt", "data-engineering", "etl"],
"license": "MIT"
},
{
"name": "code-sentinel",
"version": "1.0.1",
"description": "Security scanning and code refactoring tools",
"source": "./plugins/code-sentinel",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/code-sentinel/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "security",
"tags": ["security-scan", "refactoring", "vulnerabilities"],
"license": "MIT"
},
{
"name": "doc-guardian",
"name": "viz-platform",
"version": "1.1.0",
"description": "Automatic documentation drift detection and synchronization",
"source": "./plugins/doc-guardian",
"description": "Visualization tools with Dash Mantine Components validation, Plotly charts, and theming",
"source": "./plugins/viz-platform",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/doc-guardian/README.md",
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/viz-platform/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "productivity",
"tags": ["documentation", "drift-detection", "sync"],
"category": "visualization",
"tags": ["dash", "plotly", "mantine", "charts", "dashboards", "theming", "dmc"],
"license": "MIT"
},
{
"name": "contract-validator",
"version": "1.2.0",
"description": "Cross-plugin compatibility validation and Claude.md agent verification",
"source": "./plugins/contract-validator",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/contract-validator/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "development",
"tags": ["validation", "contracts", "compatibility", "agents", "interfaces", "cross-plugin"],
"license": "MIT"
}
]

View File

@@ -1,249 +0,0 @@
# CLAUDE.md
This file provides guidance to Claude Code when working with code in this repository.
## Project Overview
**Repository:** leo-claude-mktplace
**Version:** 3.0.1
**Status:** Production Ready
A plugin marketplace for Claude Code containing:
| Plugin | Description | Version |
|--------|-------------|---------|
| `projman` | Sprint planning and project management with Gitea integration | 3.0.0 |
| `git-flow` | Git workflow automation with smart commits and branch management | 1.0.0 |
| `pr-review` | Multi-agent PR review with confidence scoring | 1.0.0 |
| `clarity-assist` | Prompt optimization with ND-friendly accommodations | 1.0.0 |
| `doc-guardian` | Automatic documentation drift detection and synchronization | 1.0.0 |
| `code-sentinel` | Security scanning and code refactoring tools | 1.0.0 |
| `claude-config-maintainer` | CLAUDE.md optimization and maintenance | 1.0.0 |
| `cmdb-assistant` | NetBox CMDB integration for infrastructure management | 1.0.0 |
| `project-hygiene` | Post-task cleanup automation via hooks | 0.1.0 |
## Quick Start
```bash
# Validate marketplace compliance
./scripts/validate-marketplace.sh
# Setup commands (in a target project with plugin installed)
/initial-setup # First time: full setup wizard
/project-init # New project: quick config
/project-sync # After repo move: sync config
# Run projman commands
/sprint-plan # Start sprint planning
/sprint-status # Check progress
/review # Pre-close code quality review
/test-check # Verify tests before close
/sprint-close # Complete sprint
```
## Repository Structure
```
leo-claude-mktplace/
├── .claude-plugin/
│ └── marketplace.json # Marketplace manifest
├── mcp-servers/ # SHARED MCP servers (v3.0.0+)
│ ├── gitea/ # Gitea MCP (issues, PRs, wiki)
│ └── netbox/ # NetBox MCP (CMDB)
├── plugins/
│ ├── projman/ # Sprint management
│ │ ├── .claude-plugin/plugin.json
│ │ ├── .mcp.json
│ │ ├── mcp-servers/gitea -> ../../../mcp-servers/gitea # SYMLINK
│ │ ├── commands/ # 12 commands (incl. setup)
│ │ ├── hooks/ # SessionStart mismatch detection
│ │ ├── agents/ # 4 agents
│ │ └── skills/label-taxonomy/
│ ├── git-flow/ # Git workflow automation
│ │ ├── .claude-plugin/plugin.json
│ │ ├── commands/ # 8 commands
│ │ └── agents/
│ ├── pr-review/ # Multi-agent PR review
│ │ ├── .claude-plugin/plugin.json
│ │ ├── .mcp.json
│ │ ├── mcp-servers/gitea -> ../../../mcp-servers/gitea # SYMLINK
│ │ ├── commands/ # 6 commands (incl. setup)
│ │ ├── hooks/ # SessionStart mismatch detection
│ │ └── agents/ # 5 agents
│ ├── clarity-assist/ # Prompt optimization (NEW v3.0.0)
│ │ ├── .claude-plugin/plugin.json
│ │ ├── commands/ # 2 commands
│ │ └── agents/
│ ├── doc-guardian/ # Documentation drift detection
│ ├── code-sentinel/ # Security scanning & refactoring
│ ├── claude-config-maintainer/
│ ├── cmdb-assistant/
│ └── project-hygiene/
├── scripts/
│ ├── setup.sh, post-update.sh
│ └── validate-marketplace.sh # Marketplace compliance validation
└── docs/
├── CANONICAL-PATHS.md # Single source of truth for paths
└── CONFIGURATION.md # Centralized configuration guide
```
## CRITICAL: Rules You MUST Follow
### File Operations
- **NEVER** create files in repository root unless listed in "Allowed Root Files"
- **NEVER** modify `.gitignore` without explicit permission
- **ALWAYS** use `.scratch/` for temporary/exploratory work
- **ALWAYS** verify paths against `docs/CANONICAL-PATHS.md` before creating files
### Plugin Development
- **plugin.json MUST be in `.claude-plugin/` directory** (not plugin root)
- **Every plugin MUST be listed in marketplace.json**
- **MCP servers are SHARED at root** with symlinks from plugins
- **MCP server venv path**: `${CLAUDE_PLUGIN_ROOT}/mcp-servers/{name}/.venv/bin/python`
- **CLI tools forbidden** - Use MCP tools exclusively (never `tea`, `gh`, etc.)
### Hooks (Valid Events Only)
`PreToolUse`, `PostToolUse`, `UserPromptSubmit`, `SessionStart`, `SessionEnd`, `Notification`, `Stop`, `SubagentStop`, `PreCompact`
**INVALID:** `task-completed`, `file-changed`, `git-commit-msg-needed`
### Allowed Root Files
`CLAUDE.md`, `README.md`, `LICENSE`, `CHANGELOG.md`, `.gitignore`, `.env.example`
### Allowed Root Directories
`.claude/`, `.claude-plugin/`, `.claude-plugins/`, `.scratch/`, `docs/`, `hooks/`, `mcp-servers/`, `plugins/`, `scripts/`
## Architecture
### Four-Agent Model (projman)
| Agent | Personality | Responsibilities |
|-------|-------------|------------------|
| **Planner** | Thoughtful, methodical | Sprint planning, architecture analysis, issue creation, lesson search |
| **Orchestrator** | Concise, action-oriented | Sprint execution, parallel batching, Git operations, lesson capture |
| **Executor** | Implementation-focused | Code implementation, branch management, MR creation |
| **Code Reviewer** | Thorough, practical | Pre-close quality review, security scan, test verification |
### MCP Server Tools (Gitea)
| Category | Tools |
|----------|-------|
| Issues | `list_issues`, `get_issue`, `create_issue`, `update_issue`, `add_comment` |
| Labels | `get_labels`, `suggest_labels`, `create_label` |
| Milestones | `list_milestones`, `get_milestone`, `create_milestone`, `update_milestone` |
| Dependencies | `list_issue_dependencies`, `create_issue_dependency`, `get_execution_order` |
| Wiki | `list_wiki_pages`, `get_wiki_page`, `create_wiki_page`, `create_lesson`, `search_lessons` |
| **Pull Requests** | `list_pull_requests`, `get_pull_request`, `get_pr_diff`, `get_pr_comments`, `create_pr_review`, `add_pr_comment` *(NEW v3.0.0)* |
| Validation | `validate_repo_org`, `get_branch_protection` |
### Hybrid Configuration
| Level | Location | Purpose |
|-------|----------|---------|
| System | `~/.config/claude/gitea.env` | Credentials (GITEA_API_URL, GITEA_API_TOKEN) |
| Project | `.env` in project root | Repository specification (GITEA_ORG, GITEA_REPO) |
**Note:** `GITEA_ORG` is at project level since different projects may belong to different organizations.
### Branch-Aware Security
| Branch Pattern | Mode | Capabilities |
|----------------|------|--------------|
| `development`, `feat/*` | Development | Full access |
| `staging` | Staging | Read-only code, can create issues |
| `main`, `master` | Production | Read-only, emergency only |
## Label Taxonomy
43 labels total: 27 organization + 16 repository
**Organization:** Agent/2, Complexity/3, Efforts/5, Priority/4, Risk/3, Source/4, Type/6
**Repository:** Component/9, Tech/7
Sync with `/labels-sync` command.
## Lessons Learned System
Stored in Gitea Wiki under `lessons-learned/sprints/`.
**Workflow:**
1. Orchestrator captures at sprint close via MCP tools
2. Planner searches at sprint start using `search_lessons`
3. Tags enable cross-project discovery
## Common Operations
### Adding a New Plugin
1. Create `plugins/{name}/.claude-plugin/plugin.json`
2. Add entry to `.claude-plugin/marketplace.json` with category, tags, license
3. Create `README.md` and `claude-md-integration.md`
4. If using MCP server, create symlink: `ln -s ../../../mcp-servers/{server} plugins/{name}/mcp-servers/{server}`
5. Run `./scripts/validate-marketplace.sh`
6. Update `CHANGELOG.md`
### Adding a Command to projman
1. Create `plugins/projman/commands/{name}.md`
2. Update `plugins/projman/README.md`
3. Update marketplace description if significant
### Validation
```bash
./scripts/validate-marketplace.sh # Validates all manifests
```
## Path Verification Protocol
**Before creating any file:**
1. Read `docs/CANONICAL-PATHS.md`
2. List all paths to be created/modified
3. Verify each against canonical paths
4. If not in canonical paths, STOP and ask
## Documentation Index
| Document | Purpose |
|----------|---------|
| `docs/CANONICAL-PATHS.md` | **Single source of truth** for paths |
| `docs/COMMANDS-CHEATSHEET.md` | All commands quick reference with workflow examples |
| `docs/CONFIGURATION.md` | Centralized setup guide |
| `docs/UPDATING.md` | Update guide for the marketplace |
| `plugins/projman/CONFIGURATION.md` | Quick reference (links to central) |
| `plugins/projman/README.md` | Projman full documentation |
## Versioning and Changelog Rules
### Version Display
**The marketplace version is displayed ONLY in the main `README.md` title.**
- Format: `# Leo Claude Marketplace - vX.Y.Z`
- Do NOT add version numbers to individual plugin documentation titles
- Do NOT add version numbers to configuration guides
- Do NOT add version numbers to CLAUDE.md or other docs
### Changelog Maintenance (MANDATORY)
**`CHANGELOG.md` is the authoritative source for version history.**
When releasing a new version:
1. Update main `README.md` title with new version
2. Update `CHANGELOG.md` with:
- Version number and date: `## [X.Y.Z] - YYYY-MM-DD`
- **Added**: New features, commands, files
- **Changed**: Modifications to existing functionality
- **Fixed**: Bug fixes
- **Removed**: Deleted features, files, deprecated items
3. Update `marketplace.json` metadata version
4. Update plugin `plugin.json` versions if plugin-specific changes
### Version Format
- Follow [Semantic Versioning](https://semver.org/): MAJOR.MINOR.PATCH
- MAJOR: Breaking changes
- MINOR: New features, backward compatible
- PATCH: Bug fixes, minor improvements
---
**Last Updated:** 2026-01-20

View File

@@ -1,65 +0,0 @@
# Doc Guardian Queue - cleared after sync on 2026-02-02
2026-02-02T11:41:00 | .claude-plugin | /home/lmiranda/claude-plugins-work/.claude-plugin/marketplace.json | CLAUDE.md .claude-plugin/marketplace.json
2026-02-02T13:35:48 | skills | /home/lmiranda/claude-plugins-work/plugins/projman/skills/sprint-approval.md | README.md
2026-02-02T13:36:03 | commands | /home/lmiranda/claude-plugins-work/plugins/projman/commands/sprint-start.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-02T13:36:16 | agents | /home/lmiranda/claude-plugins-work/plugins/projman/agents/orchestrator.md | README.md CLAUDE.md
2026-02-02T13:39:07 | commands | /home/lmiranda/claude-plugins-work/plugins/projman/commands/rfc.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-02T13:39:15 | commands | /home/lmiranda/claude-plugins-work/plugins/projman/commands/setup.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-02T13:39:32 | skills | /home/lmiranda/claude-plugins-work/plugins/projman/skills/rfc-workflow.md | README.md
2026-02-02T13:43:14 | skills | /home/lmiranda/claude-plugins-work/plugins/projman/skills/rfc-templates.md | README.md
2026-02-02T13:44:55 | skills | /home/lmiranda/claude-plugins-work/plugins/projman/skills/sprint-lifecycle.md | README.md
2026-02-02T13:45:04 | skills | /home/lmiranda/claude-plugins-work/plugins/projman/skills/label-taxonomy/labels-reference.md | README.md
2026-02-02T13:45:14 | commands | /home/lmiranda/claude-plugins-work/plugins/projman/commands/sprint-plan.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-02T13:45:48 | commands | /home/lmiranda/claude-plugins-work/plugins/projman/commands/review.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-02T13:46:07 | commands | /home/lmiranda/claude-plugins-work/plugins/projman/commands/sprint-close.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-02T13:46:21 | commands | /home/lmiranda/claude-plugins-work/plugins/projman/commands/sprint-status.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-02T13:46:38 | agents | /home/lmiranda/claude-plugins-work/plugins/projman/agents/planner.md | README.md CLAUDE.md
2026-02-02T13:46:57 | agents | /home/lmiranda/claude-plugins-work/plugins/projman/agents/code-reviewer.md | README.md CLAUDE.md
2026-02-02T13:49:13 | commands | /home/lmiranda/claude-plugins-work/plugins/viz-platform/commands/design-gate.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-02T13:49:24 | commands | /home/lmiranda/claude-plugins-work/plugins/data-platform/commands/data-gate.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-02T13:49:35 | skills | /home/lmiranda/claude-plugins-work/plugins/projman/skills/domain-consultation.md | README.md
2026-02-02T13:50:04 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/validation_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-02-02T13:50:59 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/server.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-02-02T13:51:32 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/tests/test_validation_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-02-02T13:51:49 | skills | /home/lmiranda/claude-plugins-work/plugins/contract-validator/skills/validation-rules.md | README.md
2026-02-02T13:52:07 | skills | /home/lmiranda/claude-plugins-work/plugins/contract-validator/skills/mcp-tools-reference.md | README.md
2026-02-02T13:59:09 | skills | /home/lmiranda/claude-plugins-work/plugins/projman/skills/progress-tracking.md | README.md
2026-02-02T14:01:34 | commands | /home/lmiranda/claude-plugins-work/plugins/projman/commands/test.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:08:38 | commands | /home/lmiranda/claude-plugins-work/plugins/git-flow/commands/git-commit.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:08:39 | commands | /home/lmiranda/claude-plugins-work/plugins/git-flow/commands/git-commit-push.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:08:40 | commands | /home/lmiranda/claude-plugins-work/plugins/git-flow/commands/git-commit-merge.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:08:41 | commands | /home/lmiranda/claude-plugins-work/plugins/git-flow/commands/git-commit-sync.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:08:49 | commands | /home/lmiranda/claude-plugins-work/plugins/cmdb-assistant/commands/cmdb-setup.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:08:50 | commands | /home/lmiranda/claude-plugins-work/plugins/pr-review/commands/project-init.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:08:51 | skills | /home/lmiranda/claude-plugins-work/plugins/cmdb-assistant/skills/visual-header.md | README.md
2026-02-03T21:08:51 | commands | /home/lmiranda/claude-plugins-work/plugins/projman/commands/pm-review.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:08:53 | commands | /home/lmiranda/claude-plugins-work/plugins/projman/commands/pm-test.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:08:54 | skills | /home/lmiranda/claude-plugins-work/plugins/projman/skills/review-checklist.md | README.md
2026-02-03T21:08:55 | skills | /home/lmiranda/claude-plugins-work/plugins/projman/skills/visual-output.md | README.md
2026-02-03T21:08:58 | skills | /home/lmiranda/claude-plugins-work/plugins/projman/skills/setup-workflows.md | README.md
2026-02-03T21:08:59 | skills | /home/lmiranda/claude-plugins-work/plugins/git-flow/skills/sync-workflow.md | README.md
2026-02-03T21:09:00 | skills | /home/lmiranda/claude-plugins-work/plugins/git-flow/skills/commit-conventions.md | README.md
2026-02-03T21:09:00 | skills | /home/lmiranda/claude-plugins-work/plugins/git-flow/skills/merge-workflow.md | README.md
2026-02-03T21:09:08 | commands | /home/lmiranda/claude-plugins-work/plugins/projman/commands/pm-setup.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:09:08 | commands | /home/lmiranda/claude-plugins-work/plugins/data-platform/commands/data-setup.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:09:10 | commands | /home/lmiranda/claude-plugins-work/plugins/data-platform/commands/data-run.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:09:10 | commands | /home/lmiranda/claude-plugins-work/plugins/contract-validator/commands/cv-setup.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:09:11 | commands | /home/lmiranda/claude-plugins-work/plugins/projman/commands/pm-debug.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:09:13 | agents | /home/lmiranda/claude-plugins-work/plugins/contract-validator/agents/full-validation.md | README.md CLAUDE.md
2026-02-03T21:09:14 | commands | /home/lmiranda/claude-plugins-work/plugins/data-platform/commands/data-ingest.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:09:18 | commands | /home/lmiranda/claude-plugins-work/plugins/data-platform/commands/data-profile.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:09:18 | commands | /home/lmiranda/claude-plugins-work/plugins/viz-platform/commands/viz-setup.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:09:20 | commands | /home/lmiranda/claude-plugins-work/plugins/data-platform/commands/data-schema.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:09:20 | commands | /home/lmiranda/claude-plugins-work/plugins/viz-platform/commands/viz-theme.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:09:23 | commands | /home/lmiranda/claude-plugins-work/plugins/viz-platform/commands/viz-theme-new.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:09:24 | commands | /home/lmiranda/claude-plugins-work/plugins/data-platform/commands/data-explain.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:09:26 | commands | /home/lmiranda/claude-plugins-work/plugins/data-platform/commands/data-lineage.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:09:26 | commands | /home/lmiranda/claude-plugins-work/plugins/viz-platform/commands/viz-theme-css.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:09:29 | commands | /home/lmiranda/claude-plugins-work/plugins/viz-platform/commands/viz-chart.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:09:32 | commands | /home/lmiranda/claude-plugins-work/plugins/viz-platform/commands/viz-chart-export.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:09:33 | commands | /home/lmiranda/claude-plugins-work/plugins/data-platform/commands/data-review.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:09:35 | commands | /home/lmiranda/claude-plugins-work/plugins/viz-platform/commands/viz-dashboard.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:09:38 | commands | /home/lmiranda/claude-plugins-work/plugins/viz-platform/commands/viz-component.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:09:40 | commands | /home/lmiranda/claude-plugins-work/plugins/viz-platform/commands/viz-breakpoints.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:09:46 | commands | /home/lmiranda/claude-plugins-work/plugins/viz-platform/commands/design-review.md | docs/COMMANDS-CHEATSHEET.md README.md
2026-02-03T21:10:22 | commands | /home/lmiranda/claude-plugins-work/plugins/viz-platform/commands/accessibility-check.md | docs/COMMANDS-CHEATSHEET.md README.md

1
.env
View File

@@ -1 +0,0 @@
GITEA_REPO=personal-projects/leo-claude-mktplace

8
.gitignore vendored
View File

@@ -84,13 +84,6 @@ Thumbs.db
# Claude Code
.claude/settings.local.json
.claude/history/
.claude/backups/
# Doc Guardian transient files
.doc-guardian-queue
# Development convenience links
.marketplaces-link
# Logs
logs/
@@ -132,5 +125,4 @@ site/
*credentials*
*secret*
*token*
!**/token-budget-report.md
!.gitkeep

View File

@@ -1,24 +0,0 @@
{
"mcpServers": {
"gitea": {
"command": "./mcp-servers/gitea/run.sh",
"args": []
},
"netbox": {
"command": "./mcp-servers/netbox/run.sh",
"args": []
},
"viz-platform": {
"command": "./mcp-servers/viz-platform/run.sh",
"args": []
},
"data-platform": {
"command": "./mcp-servers/data-platform/run.sh",
"args": []
},
"contract-validator": {
"command": "./mcp-servers/contract-validator/run.sh",
"args": []
}
}
}

View File

@@ -1,8 +0,0 @@
{
"mcpServers": {
"gitea": {
"command": "./mcp-servers/gitea/run.sh",
"args": []
}
}
}

View File

@@ -3,6 +3,22 @@
"gitea": {
"command": "./mcp-servers/gitea/run.sh",
"args": []
},
"netbox": {
"command": "./mcp-servers/netbox/run.sh",
"args": []
},
"viz-platform": {
"command": "./mcp-servers/viz-platform/run.sh",
"args": []
},
"data-platform": {
"command": "./mcp-servers/data-platform/run.sh",
"args": []
},
"contract-validator": {
"command": "./mcp-servers/contract-validator/run.sh",
"args": []
}
}
}

View File

@@ -4,403 +4,6 @@ All notable changes to the Leo Claude Marketplace will be documented in this fil
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [Unreleased]
### Added
- **marketplace:** Lean/full profile switching to reduce token overhead
- New script: `scripts/switch-profile.sh` to toggle between profiles
- Lean profile: 6 plugins (projman, git-flow, pr-review, clarity-assist, code-sentinel, doc-guardian)
- Lean MCP: gitea only (reduces ~12k tokens of MCP server overhead)
- Full profile: all 12 plugins and 5 MCP servers
- New files: `.mcp-lean.json`, `.mcp-full.json`, `marketplace-lean.json`, `marketplace-full.json`
- Default is now lean profile for reduced daily overhead
- **projman:** Token usage estimation reporting at sprint workflow boundaries
- New skill: `token-budget-report.md` with MCP overhead and skill loading estimation model
- Token report displayed at end of `/sprint-plan` and `/sprint-close`
- On-demand via `/sprint-status --tokens`
- Helps identify which phases and components consume the most context budget
### Changed
- **projman:** `/sprint-status` now uses conditional skill loading for reduced token overhead
- Only loads `mcp-tools-reference.md` by default (~1.5k tokens vs ~5k)
- `--diagram` flag loads `dependency-management.md` and `progress-tracking.md`
- `--tokens` flag loads `token-budget-report.md`
- Estimated savings: ~3.5k tokens per status check
### Fixed
- **docs:** Stale command references in data-platform visual-header.md and viz-platform claude-md-integration.md updated to v7.0.0 namespaced names
- **docs:** git-flow visual-header.md and git-status.md quick actions updated to namespaced commands
- **docs:** projman/CONFIGURATION.md and docs/DEBUGGING-CHECKLIST.md updated with correct command names
---
## [7.0.0] - 2026-02-03
### BREAKING CHANGES
#### Command Namespace Rename
All generic command names are now prefixed with their plugin's namespace to eliminate collisions across the marketplace. This is a **breaking change** for consuming projects — update your CLAUDE.md integration snippets.
**Full Rename Map:**
| Plugin | Old | New |
|--------|-----|-----|
| projman | `/setup` | `/pm-setup` |
| projman | `/review` | `/pm-review` |
| projman | `/test` | `/pm-test` |
| projman | `/debug` | `/pm-debug` |
| git-flow | `/commit` | `/git-commit` |
| git-flow | `/commit-push` | `/git-commit-push` |
| git-flow | `/commit-merge` | `/git-commit-merge` |
| git-flow | `/commit-sync` | `/git-commit-sync` |
| pr-review | `/initial-setup` | `/pr-setup` |
| cmdb-assistant | `/initial-setup` | `/cmdb-setup` |
| data-platform | `/initial-setup` | `/data-setup` |
| data-platform | `/run` | `/data-run` |
| data-platform | `/ingest` | `/data-ingest` |
| data-platform | `/profile` | `/data-profile` |
| data-platform | `/schema` | `/data-schema` |
| data-platform | `/explain` | `/data-explain` |
| data-platform | `/lineage` | `/data-lineage` |
| viz-platform | `/initial-setup` | `/viz-setup` |
| viz-platform | `/theme` | `/viz-theme` |
| viz-platform | `/theme-new` | `/viz-theme-new` |
| viz-platform | `/theme-css` | `/viz-theme-css` |
| viz-platform | `/chart` | `/viz-chart` |
| viz-platform | `/chart-export` | `/viz-chart-export` |
| viz-platform | `/dashboard` | `/viz-dashboard` |
| viz-platform | `/component` | `/viz-component` |
| viz-platform | `/breakpoints` | `/viz-breakpoints` |
| contract-validator | `/initial-setup` | `/cv-setup` |
**Migration:** Update your project's CLAUDE.md integration snippets to use the new command names. Run `/plugin list` to verify installed plugins are using v7.0.0+.
**Unchanged:** Commands already using plugin-namespaced prefixes (`/sprint-*`, `/cmdb-*`, `/labels-sync`, `/branch-*`, `/git-status`, `/git-config`, `/pr-review`, `/pr-summary`, `/pr-findings`, `/pr-diff`, `/project-init`, `/project-sync`, `/config-*`, `/design-*`, `/data-quality`, `/data-review`, `/data-gate`, `/lineage-viz`, `/dbt-test`, `/accessibility-check`, `/validate-contracts`, `/check-agent`, `/list-interfaces`, `/dependency-graph`, `/doc-audit`, `/doc-sync`, `/security-scan`, `/refactor`, `/refactor-dry`, `/clarify`, `/suggest-version`, `/proposal-status`, `/rfc`, `/change-audit`, `/ip-conflicts`) are **not affected**.
### Added
#### Plan-Then-Batch Skill Optimization (projman)
New execution pattern that separates cognitive work from mechanical API operations, reducing skill-related token consumption by ~76-83% during sprint workflows.
- **`skills/batch-execution.md`** — New skill defining the plan-then-batch protocol:
- Phase 1: Cognitive work with all skills loaded
- Phase 2: Execution manifest (structured plan of all API operations)
- Phase 3: Batch execute API calls using only frontmatter skills
- Phase 4: Batch report with success/failure summary
- Error handling: continue on individual failures, report at end
- **Frontmatter skill promotion:**
- Planner agent: `mcp-tools-reference` and `batch-execution` promoted to frontmatter (auto-injected, zero re-read cost)
- Orchestrator agent: same promotion
- Eliminates per-operation skill file re-reads during API execution loops
- **Phase-based skill loading:**
- Planner: 3 phases (validation → analysis → approval) with explicit "read once" instructions
- Orchestrator: 2 phases (startup → dispatch) with same pattern
- New `## Skill Loading Protocol` section replaces flat `## Skills to Load` in agent files
### Changed
- **`planning-workflow.md`** — Steps 8-10 restructured:
- Step 8: "Draft Issue Specifications" (no API calls — resolve all parameters first)
- Step 8a: "Batch Execute Issue Creation" (tight API loop, frontmatter skills only)
- Step 9: Merged into Step 8a (dependencies created in batch)
- Step 10: Milestone creation moved before batch (must exist for assignment)
- **Agent matrix updated:**
- Planner: `body text (14)``frontmatter (2) + body text (12)`
- Orchestrator: `body text (12)``frontmatter (2) + body text (10)`
- **`docs/CONFIGURATION.md`** — New "Phase-Based Skill Loading" subsection documenting the pattern
### Token Impact
| Scenario | Before | After | Savings |
|----------|--------|-------|---------|
| 6-issue sprint (planning) | ~23,800 lines | ~5,600 lines | ~76% |
| 10-issue sprint (planning) | ~35,000 lines | ~7,000 lines | ~80% |
| 8-issue status updates (orchestrator) | ~9,600 lines | ~1,600 lines | ~83% |
---
## [5.10.0] - 2026-02-03
### Added
#### NetBox MCP Server: Module-Based Tool Filtering
Environment-variable-driven module filtering to reduce token consumption:
- **New config option**: `NETBOX_ENABLED_MODULES` in `~/.config/claude/netbox.env`
- **Token savings**: ~15,000 tokens (from ~19,810 to ~4,500) with recommended config
- **Default behavior**: All modules enabled if env var unset (backward compatible)
- **Startup logging**: Shows enabled modules and tool count on initialization
- **Routing guard**: Clear error message when calling disabled module's tools
**Recommended configuration for cmdb-assistant users:**
```bash
NETBOX_ENABLED_MODULES=dcim,ipam,virtualization,extras
```
This enables ~43 tools covering all cmdb-assistant commands while staying well below the 25K token warning threshold.
### Fixed
#### cmdb-assistant Documentation: Incorrect Tool Names
Fixed documentation referencing non-existent `virtualization_*` tool names:
| File | Wrong | Correct |
|------|-------|---------|
| `claude-md-integration.md` | `virtualization_list_virtual_machines` | `virt_list_vms` |
| `claude-md-integration.md` | `virtualization_create_virtual_machine` | `virt_create_vm` |
| `cmdb-search.md` | `virtualization_list_virtual_machines` | `virt_list_vms` |
Also fixed NetBox README.md tool name references for virtualization, wireless, and circuits modules.
#### Gitea MCP Server: Standardized Build Backend
Changed `mcp-servers/gitea/pyproject.toml` from hatchling to setuptools:
- Matches all other MCP servers (contract-validator, viz-platform, data-platform)
- Updated license format to PEP 639 compliance
- Added pytest configuration for consistency
---
## [5.9.0] - 2026-02-03
### Added
#### Plugin Installation Scripts
New scripts for installing marketplace plugins into consumer projects:
- **`scripts/install-plugin.sh`** — Install a plugin to a consumer project
- Adds MCP server entry to target's `.mcp.json` (if plugin has MCP server)
- Appends integration snippet to target's `CLAUDE.md`
- Idempotent: safe to run multiple times
- Validates plugin exists and target path is valid
- **`scripts/uninstall-plugin.sh`** — Remove a plugin from a consumer project
- Removes MCP server entry from `.mcp.json`
- Removes integration section from `CLAUDE.md`
- **`scripts/list-installed.sh`** — Show installed plugins in a project
- Lists fully installed, partially installed, and available plugins
- Shows plugin versions and descriptions
**Usage:**
```bash
./scripts/install-plugin.sh data-platform ~/projects/personal-portfolio
./scripts/list-installed.sh ~/projects/personal-portfolio
./scripts/uninstall-plugin.sh data-platform ~/projects/personal-portfolio
```
**Documentation:** `docs/CONFIGURATION.md` updated with "Installing Plugins to Consumer Projects" section.
### Fixed
#### Plugin Installation Scripts — MCP Mapping & Section Markers
**MCP Server Mapping:**
- Added `mcp_servers` field to plugin.json for plugins that use shared MCP servers
- `projman` and `pr-review` now correctly install `gitea` MCP server
- `cmdb-assistant` now correctly installs `netbox` MCP server
- Scripts read MCP server names from plugin.json instead of assuming plugin name = server name
**CLAUDE.md Section Markers:**
- Install script now wraps integration content with HTML comment markers:
`<!-- BEGIN marketplace-plugin: {name} -->` and `<!-- END marketplace-plugin: {name} -->`
- Uninstall script uses markers for precise section removal (no more code block false positives)
- Backward compatible: falls back to legacy header detection for pre-marker installations
**Plugins updated with `mcp_servers` field:**
- `projman``["gitea"]`
- `pr-review``["gitea"]`
- `cmdb-assistant``["netbox"]`
- `data-platform``["data-platform"]`
- `viz-platform``["viz-platform"]`
- `contract-validator``["contract-validator"]`
#### Agent Model Selection
Per-agent model selection using Claude Code's now-supported `model` frontmatter field.
- All 25 marketplace agents assigned appropriate model (`sonnet`, `haiku`, or `inherit`)
- Model assignment based on reasoning depth, tool complexity, and latency requirements
- Documentation added to `CLAUDE.md` and `docs/CONFIGURATION.md`
**Supported values:** `sonnet` (default), `opus`, `haiku`, `inherit`
**Model assignments:**
| Model | Agent Types |
|-------|-------------|
| sonnet | Planner, Orchestrator, Executor, Code Reviewer, Coordinator, Security Reviewers, Data Advisor, Design Reviewer, etc. |
| haiku | Maintainability Auditor, Test Validator, Component Check, Theme Setup, Git Assistant, Data Ingestion, Agent Check |
### Fixed
#### Agent Frontmatter Standardization
- Fixed viz-platform and data-platform agents using non-standard `agent:` field (now `name:`)
- Removed non-standard `triggers:` field from domain agents (trigger info already in agent body)
- Added missing frontmatter to 13 agents across pr-review, viz-platform, contract-validator, clarity-assist, git-flow, doc-guardian, code-sentinel, cmdb-assistant, and data-platform
- All 25 agents now have consistent `name`, `description`, and `model` fields
### Changed
#### Agent Frontmatter Hardening v3
Comprehensive agent-level configuration using Claude Code's supported frontmatter fields.
**permissionMode added to all 25 agents:**
- `bypassPermissions` (1): Executor — full autonomy with code-sentinel + Code Reviewer safety nets
- `acceptEdits` (7): Orchestrator, Data Ingestion, Theme Setup, Refactor Advisor, Doc Analyzer, Git Assistant, Maintainer
- `default` (7): Planner, Code Reviewer, Data Advisor, Layout Builder, Full Validation, Clarity Coach, CMDB Assistant
- `plan` (10): All pr-review agents (5), Data Analysis, Design Reviewer, Component Check, Agent Check, Security Reviewer (code-sentinel)
**disallowedTools added to 12 agents:**
- All `plan`-mode agents (10) + Code Reviewer + Clarity Coach receive `disallowedTools: Write, Edit, MultiEdit`
- Enforces read-only contracts at platform level (defense-in-depth with `permissionMode`)
**Model promotions:**
- Planner: `sonnet``opus` (architectural reasoning benefits from deeper analysis)
- Code Reviewer: `sonnet``opus` (quality gate benefits from thorough review)
**skills frontmatter on 3 agents:**
- Executor: 7 safety-critical skills auto-injected (branch-security, runaway-detection, etc.)
- Code Reviewer: 4 review skills auto-injected
- Maintainer: 2 config skills auto-injected
- Body text `## Skills to Load` removed for these agents to avoid duplication
**Documentation:**
- `CLAUDE.md` and `docs/CONFIGURATION.md` updated with complete agent configuration matrix
- New subsections: permissionMode Guide, disallowedTools Guide, skills Frontmatter Guide
---
## [5.8.0] - 2026-02-02
### Added
#### claude-config-maintainer v1.2.0 - Settings Audit Feature
New commands for auditing and optimizing `settings.local.json` permission configurations:
- **`/config-audit-settings`** — Audit `settings.local.json` permissions with 100-point scoring across redundancy, coverage, safety alignment, and profile fit
- **`/config-optimize-settings`** — Apply permission optimizations with dry-run, named profiles (`conservative`, `reviewed`, `autonomous`), and consolidation modes
- **`/config-permissions-map`** — Generate Mermaid diagram of review layer coverage and permission gaps
- **`skills/settings-optimization.md`** — Comprehensive skill for permission pattern analysis, consolidation rules, review-layer-aware recommendations, and named profiles
**Key Features:**
- Settings Efficiency Score (100 points) alongside existing CLAUDE.md score
- Review layer verification — agent reads `hooks/hooks.json` from installed plugins before recommending auto-allow patterns
- Three named profiles: `conservative` (prompts for most writes), `reviewed` (for projects with ≥2 review layers), `autonomous` (sandboxed environments)
- Pattern consolidation detection: duplicates, subsets, merge candidates, stale entries, conflicts
#### Projman Hardening Sprint
Targeted improvements to safety gates, command structure, lifecycle tracking, and cross-plugin contracts.
**Sprint Lifecycle State Machine:**
- New `skills/sprint-lifecycle.md` - defines valid states and transitions via milestone metadata
- States: idle -> Sprint/Planning -> Sprint/Executing -> Sprint/Reviewing -> idle
- All sprint commands check and set lifecycle state on entry/exit
- Out-of-order calls produce warnings with guidance, `--force` override available
**Sprint Dispatch Log:**
- Orchestrator now maintains a structured dispatch log during execution
- Records task dispatch, completion, failure, gate checks, and resume events
- Enables timeline reconstruction after interrupted sessions
**Gate Contract Versioning:**
- Gate commands (`/design-gate`, `/data-gate`) declare `gate_contract: v1` in frontmatter
- `domain-consultation.md` Gate Command Reference includes expected contract version
- `validate_workflow_integration` now checks contract version compatibility
- Mismatch produces WARNING, missing contract produces INFO suggestion
**Shared Visual Output Skill:**
- New `skills/visual-output.md` - single source of truth for projman visual headers
- All 4 agent files reference the skill instead of inline templates
- Phase Registry maps agents to emoji and phase names
### Changed
**Sprint Approval Gate Hardened:**
- Approval is now a hard block, not a warning (was "recommended", now required)
- `--force` flag added to bypass in emergencies (logged to milestone)
- Consistent language across sprint-approval.md, sprint-start.md, and orchestrator.md
**RFC Commands Normalized:**
- 5 individual commands (`/rfc-create`, `/rfc-list`, `/rfc-review`, `/rfc-approve`, `/rfc-reject`) consolidated into `/rfc create|list|review|approve|reject`
- `/clear-cache` absorbed into `/setup --clear-cache`
- Command count reduced from 17 to 12
**`/test` Command Documentation Expanded:**
- Sprint integration section (pre-close verification workflow)
- Concrete usage examples for all modes
- Edge cases table
- DO NOT rules for both modes
### Removed
- `plugins/projman/commands/rfc-create.md` (replaced by `/rfc create`)
- `plugins/projman/commands/rfc-list.md` (replaced by `/rfc list`)
- `plugins/projman/commands/rfc-review.md` (replaced by `/rfc review`)
- `plugins/projman/commands/rfc-approve.md` (replaced by `/rfc approve`)
- `plugins/projman/commands/rfc-reject.md` (replaced by `/rfc reject`)
- `plugins/projman/commands/clear-cache.md` (replaced by `/setup --clear-cache`)
---
## [5.7.1] - 2026-02-02
### Added
- **contract-validator**: New `validate_workflow_integration` MCP tool — validates domain plugins expose required advisory interfaces (gate command, review command, advisory agent)
- **contract-validator**: New `MISSING_INTEGRATION` issue type for workflow integration validation
### Fixed
- `scripts/setup.sh` banner version updated from v5.1.0 to v5.7.1
### Reverted
- **marketplace.json**: Removed `integrates_with` field — Claude Code schema does not support custom plugin fields (causes marketplace load failure)
---
## [5.7.0] - 2026-02-02
### Added
- **data-platform**: New `data-advisor` agent for data integrity, schema, and dbt compliance validation
- **data-platform**: New `data-integrity-audit.md` skill defining audit rules, severity levels, and scanning strategies
- **data-platform**: New `/data-gate` command for binary pass/fail data integrity gates (projman integration)
- **data-platform**: New `/data-review` command for comprehensive data integrity audits
### Changed
- Domain Advisory Pattern now fully operational for both Viz and Data domains
- projman orchestrator `Domain/Data` gates now resolve to live `/data-gate` command (previously fell through to "gate unavailable" warning)
---
## [5.6.0] - 2026-02-01
### Added
- **Domain Advisory Pattern**: Cross-plugin integration enabling projman to consult domain-specific plugins during sprint lifecycle
- **projman**: New `domain-consultation.md` skill for domain detection and gate protocols
- **viz-platform**: New `design-reviewer` agent for design system compliance auditing
- **viz-platform**: New `design-system-audit.md` skill defining audit rules and severity levels
- **viz-platform**: New `/design-review` command for detailed design system audits
- **viz-platform**: New `/design-gate` command for binary pass/fail validation gates
- **Labels**: New `Domain/Viz` and `Domain/Data` labels for domain routing
### Changed
- **projman planner**: Now loads domain-consultation skill and performs domain detection during planning
- **projman orchestrator**: Now runs domain gates before marking Domain/* labeled issues as complete
---
## [5.5.0] - 2026-02-01
### Added

View File

@@ -129,7 +129,7 @@ These plugins exist in source but are **NOT relevant** to this project's workflo
| **viz-platform** | For dashboard projects (Dash, Plotly) |
| **cmdb-assistant** | For infrastructure projects (NetBox) |
**Do NOT suggest** `/data-ingest`, `/data-profile`, `/viz-chart`, `/cmdb-*` commands - they don't apply here.
**Do NOT suggest** `/ingest`, `/profile`, `/chart`, `/cmdb-*` commands - they don't apply here.
### Key Distinction
@@ -146,7 +146,7 @@ When user says "fix the sprint-plan command", edit the SOURCE code.
## Project Overview
**Repository:** leo-claude-mktplace
**Version:** 7.0.0
**Version:** 5.4.0
**Status:** Production Ready
A plugin marketplace for Claude Code containing:
@@ -161,7 +161,7 @@ A plugin marketplace for Claude Code containing:
| `code-sentinel` | Security scanning and code refactoring tools | 1.0.1 |
| `claude-config-maintainer` | CLAUDE.md optimization and maintenance | 1.0.0 |
| `cmdb-assistant` | NetBox CMDB integration for infrastructure management | 1.2.0 |
| `data-platform` | pandas, PostgreSQL, and dbt integration for data engineering | 1.3.0 |
| `data-platform` | pandas, PostgreSQL, and dbt integration for data engineering | 1.1.0 |
| `viz-platform` | DMC validation, Plotly charts, and theming for dashboards | 1.1.0 |
| `contract-validator` | Cross-plugin compatibility validation and agent verification | 1.1.0 |
| `project-hygiene` | Post-task cleanup automation via hooks | 0.1.0 |
@@ -180,16 +180,16 @@ A plugin marketplace for Claude Code containing:
| Category | Commands |
|----------|----------|
| **Setup** | `/pm-setup` (modes: `--full`, `--quick`, `--sync`) |
| **Setup** | `/setup` (modes: `--full`, `--quick`, `--sync`) |
| **Sprint** | `/sprint-plan`, `/sprint-start`, `/sprint-status` (with `--diagram`), `/sprint-close` |
| **Quality** | `/pm-review`, `/pm-test` (modes: `run`, `gen`) |
| **Quality** | `/review`, `/test` (modes: `run`, `gen`) |
| **Versioning** | `/suggest-version` |
| **PR Review** | `/pr-review`, `/pr-summary`, `/pr-findings`, `/pr-diff` |
| **Docs** | `/doc-audit`, `/doc-sync`, `/changelog-gen`, `/doc-coverage`, `/stale-docs` |
| **Security** | `/security-scan`, `/refactor`, `/refactor-dry` |
| **Config** | `/config-analyze`, `/config-optimize`, `/config-diff`, `/config-lint` |
| **Validation** | `/validate-contracts`, `/check-agent`, `/list-interfaces`, `/dependency-graph` |
| **Debug** | `/pm-debug` (modes: `report`, `review`) |
| **Debug** | `/debug` (modes: `report`, `review`) |
### Plugin Commands - NOT RELEVANT to This Project
@@ -197,8 +197,8 @@ These commands are being developed but don't apply to this project's workflow:
| Category | Commands | For Projects Using |
|----------|----------|-------------------|
| **Data** | `/data-ingest`, `/data-profile`, `/data-schema`, `/data-lineage`, `/dbt-test` | pandas, PostgreSQL, dbt |
| **Visualization** | `/viz-component`, `/viz-chart`, `/viz-dashboard`, `/viz-theme` | Dash, Plotly dashboards |
| **Data** | `/ingest`, `/profile`, `/schema`, `/lineage`, `/dbt-test` | pandas, PostgreSQL, dbt |
| **Visualization** | `/component`, `/chart`, `/dashboard`, `/theme` | Dash, Plotly dashboards |
| **CMDB** | `/cmdb-search`, `/cmdb-device`, `/cmdb-sync` | NetBox infrastructure |
## Repository Structure
@@ -271,61 +271,6 @@ leo-claude-mktplace/
| **Executor** | Implementation-focused | Code implementation, branch management, MR creation |
| **Code Reviewer** | Thorough, practical | Pre-close quality review, security scan, test verification |
### Agent Frontmatter Configuration
Agents specify their configuration in frontmatter using Claude Code's supported fields. Reference: https://code.claude.com/docs/en/sub-agents
**Supported frontmatter fields:**
| Field | Required | Default | Description |
|-------|----------|---------|-------------|
| `name` | Yes | — | Unique identifier, lowercase + hyphens |
| `description` | Yes | — | When Claude should delegate to this subagent |
| `model` | No | `inherit` | `sonnet`, `opus`, `haiku`, or `inherit` |
| `permissionMode` | No | `default` | Controls permission prompts: `default`, `acceptEdits`, `dontAsk`, `bypassPermissions`, `plan` |
| `disallowedTools` | No | none | Comma-separated tools to remove from agent's toolset |
| `skills` | No | none | Comma-separated skills auto-injected into context at startup |
| `hooks` | No | none | Lifecycle hooks scoped to this subagent |
**Complete agent matrix:**
| Plugin | Agent | `model` | `permissionMode` | `disallowedTools` | `skills` |
|--------|-------|---------|-------------------|--------------------|----------|
| projman | planner | opus | default | — | frontmatter (2) + body text (12) |
| projman | orchestrator | sonnet | acceptEdits | — | frontmatter (2) + body text (10) |
| projman | executor | sonnet | bypassPermissions | — | frontmatter (7) |
| projman | code-reviewer | opus | default | Write, Edit, MultiEdit | frontmatter (4) |
| pr-review | coordinator | sonnet | plan | Write, Edit, MultiEdit | — |
| pr-review | security-reviewer | sonnet | plan | Write, Edit, MultiEdit | — |
| pr-review | performance-analyst | sonnet | plan | Write, Edit, MultiEdit | — |
| pr-review | maintainability-auditor | haiku | plan | Write, Edit, MultiEdit | — |
| pr-review | test-validator | haiku | plan | Write, Edit, MultiEdit | — |
| data-platform | data-advisor | sonnet | default | — | — |
| data-platform | data-analysis | sonnet | plan | Write, Edit, MultiEdit | — |
| data-platform | data-ingestion | haiku | acceptEdits | — | — |
| viz-platform | design-reviewer | sonnet | plan | Write, Edit, MultiEdit | — |
| viz-platform | layout-builder | sonnet | default | — | — |
| viz-platform | component-check | haiku | plan | Write, Edit, MultiEdit | — |
| viz-platform | theme-setup | haiku | acceptEdits | — | — |
| contract-validator | full-validation | sonnet | default | — | — |
| contract-validator | agent-check | haiku | plan | Write, Edit, MultiEdit | — |
| code-sentinel | security-reviewer | sonnet | plan | Write, Edit, MultiEdit | — |
| code-sentinel | refactor-advisor | sonnet | acceptEdits | — | — |
| doc-guardian | doc-analyzer | sonnet | acceptEdits | — | — |
| clarity-assist | clarity-coach | sonnet | default | Write, Edit, MultiEdit | — |
| git-flow | git-assistant | haiku | acceptEdits | — | — |
| claude-config-maintainer | maintainer | sonnet | acceptEdits | — | frontmatter (2) |
| cmdb-assistant | cmdb-assistant | sonnet | default | — | — |
**Design principles:**
- `bypassPermissions` is granted to exactly ONE agent (Executor) which has code-sentinel PreToolUse hook + Code Reviewer downstream as safety nets.
- `plan` mode is assigned to all pure analysis agents (pr-review, read-only validators).
- `disallowedTools: Write, Edit, MultiEdit` provides defense-in-depth on agents that should never write files.
- `skills` frontmatter is used for agents with ≤7 skills where guaranteed loading is safety-critical. Agents with 8+ skills use body text `## Skills to Load` for selective loading.
- `hooks` (agent-scoped) is reserved for future use (v6.0+).
Override any field by editing the agent's `.md` file in `plugins/{plugin}/agents/`.
### MCP Server Tools (Gitea)
| Category | Tools |
@@ -449,12 +394,12 @@ See `docs/DEBUGGING-CHECKLIST.md` for systematic troubleshooting.
| Symptom | Likely Cause | Fix |
|---------|--------------|-----|
| "X MCP servers failed" | Missing venv in installed path | `cd ~/.claude/plugins/marketplaces/leo-claude-mktplace && ./scripts/setup.sh` |
| MCP tools not available | Venv missing or .mcp.json misconfigured | Run `/pm-debug report` to diagnose |
| MCP tools not available | Venv missing or .mcp.json misconfigured | Run `/debug report` to diagnose |
| Changes not taking effect | Editing source, not installed | Reinstall plugin or edit installed path |
**Debug Commands:**
- `/pm-debug report` - Run full diagnostics, create issue if needed
- `/pm-debug review` - Investigate and propose fixes
- `/debug report` - Run full diagnostics, create issue if needed
- `/debug review` - Investigate and propose fixes
## Versioning Workflow
@@ -508,4 +453,4 @@ The script will:
---
**Last Updated:** 2026-02-03
**Last Updated:** 2026-01-30

View File

@@ -1,4 +1,4 @@
# Leo Claude Marketplace - v7.0.0
# Leo Claude Marketplace - v5.5.0
A collection of Claude Code plugins for project management, infrastructure automation, and development workflows.
@@ -12,7 +12,6 @@ A collection of Claude Code plugins for project management, infrastructure autom
AI-guided sprint planning with full Gitea integration. Transforms a proven 15-sprint workflow into a distributable plugin.
- Four-agent model: Planner, Orchestrator, Executor, Code Reviewer
- Plan-then-batch execution: skills loaded once per phase, API calls batched for ~80% token savings
- Intelligent label suggestions from 43-label taxonomy
- Lessons learned capture via Gitea Wiki
- Native issue dependencies with parallel execution
@@ -20,7 +19,7 @@ AI-guided sprint planning with full Gitea integration. Transforms a proven 15-sp
- Branch-aware security (development/staging/production)
- Pre-sprint-close code quality review and test verification
**Commands:** `/sprint-plan`, `/sprint-start`, `/sprint-status`, `/sprint-close`, `/labels-sync`, `/pm-setup`, `/pm-review`, `/pm-test`, `/pm-debug`, `/suggest-version`, `/proposal-status`, `/rfc`
**Commands:** `/sprint-plan`, `/sprint-start`, `/sprint-status`, `/sprint-close`, `/labels-sync`, `/setup`, `/review`, `/test`, `/debug`, `/suggest-version`, `/proposal-status`, `/clear-cache`, `/rfc-create`, `/rfc-list`, `/rfc-review`, `/rfc-approve`, `/rfc-reject`
#### [git-flow](./plugins/git-flow) *NEW in v3.0.0*
**Git Workflow Automation**
@@ -33,7 +32,7 @@ Smart git operations with intelligent commit messages and branch management.
- Merge and cleanup automation
- Protected branch awareness
**Commands:** `/git-commit`, `/git-commit-push`, `/git-commit-merge`, `/git-commit-sync`, `/branch-start`, `/branch-cleanup`, `/git-status`, `/git-config`
**Commands:** `/commit`, `/commit-push`, `/commit-merge`, `/commit-sync`, `/branch-start`, `/branch-cleanup`, `/git-status`, `/git-config`
#### [pr-review](./plugins/pr-review) *NEW in v3.0.0*
**Multi-Agent PR Review**
@@ -45,14 +44,14 @@ Comprehensive pull request review using specialized agents.
- Actionable feedback with suggested fixes
- Gitea integration for automated review submission
**Commands:** `/pr-review`, `/pr-summary`, `/pr-findings`, `/pr-diff`, `/pr-setup`, `/project-init`, `/project-sync`
**Commands:** `/pr-review`, `/pr-summary`, `/pr-findings`, `/pr-diff`, `/initial-setup`, `/project-init`, `/project-sync`
#### [claude-config-maintainer](./plugins/claude-config-maintainer)
**CLAUDE.md and Settings Optimization**
**CLAUDE.md Optimization and Maintenance**
Analyze, optimize, and create CLAUDE.md configuration files. Audit and optimize settings.local.json permissions.
Analyze, optimize, and create CLAUDE.md configuration files for Claude Code projects.
**Commands:** `/analyze`, `/optimize`, `/init`, `/config-diff`, `/config-lint`, `/config-audit-settings`, `/config-optimize-settings`, `/config-permissions-map`
**Commands:** `/config-analyze`, `/config-optimize`, `/config-init`, `/config-diff`, `/config-lint`
#### [contract-validator](./plugins/contract-validator) *NEW in v5.0.0*
**Cross-Plugin Compatibility Validation**
@@ -65,7 +64,7 @@ Validate plugin marketplaces for command conflicts, tool overlaps, and broken ag
- Data flow validation for agent sequences
- Markdown or JSON reports with actionable suggestions
**Commands:** `/validate-contracts`, `/check-agent`, `/list-interfaces`, `/dependency-graph`, `/cv-setup`
**Commands:** `/validate-contracts`, `/check-agent`, `/list-interfaces`, `/dependency-graph`, `/initial-setup`
### Productivity
@@ -108,7 +107,7 @@ Security vulnerability detection and code refactoring tools.
Full CRUD operations for network infrastructure management directly from Claude Code.
**Commands:** `/cmdb-setup`, `/cmdb-search`, `/cmdb-device`, `/cmdb-ip`, `/cmdb-site`, `/cmdb-audit`, `/cmdb-register`, `/cmdb-sync`, `/cmdb-topology`, `/change-audit`, `/ip-conflicts`
**Commands:** `/initial-setup`, `/cmdb-search`, `/cmdb-device`, `/cmdb-ip`, `/cmdb-site`, `/cmdb-audit`, `/cmdb-register`, `/cmdb-sync`, `/cmdb-topology`, `/change-audit`, `/ip-conflicts`
### Data Engineering
@@ -123,7 +122,7 @@ Comprehensive data engineering toolkit with persistent DataFrame storage.
- 100k row limit with chunking support
- Auto-detection of dbt projects
**Commands:** `/data-ingest`, `/data-profile`, `/data-schema`, `/data-explain`, `/data-lineage`, `/lineage-viz`, `/data-run`, `/dbt-test`, `/data-quality`, `/data-review`, `/data-gate`, `/data-setup`
**Commands:** `/ingest`, `/profile`, `/schema`, `/explain`, `/lineage`, `/lineage-viz`, `/run`, `/dbt-test`, `/data-quality`, `/initial-setup`
### Visualization
@@ -139,22 +138,7 @@ Visualization toolkit with version-locked component validation and design token
- 5 Page tools for multi-page app structure
- Dual theme storage: user-level and project-level
**Commands:** `/viz-chart`, `/viz-chart-export`, `/viz-dashboard`, `/viz-theme`, `/viz-theme-new`, `/viz-theme-css`, `/viz-component`, `/accessibility-check`, `/viz-breakpoints`, `/design-review`, `/design-gate`, `/viz-setup`
## Domain Advisory Pattern
The marketplace supports cross-plugin domain advisory integration:
- **Domain Detection**: projman automatically detects when issues involve specialized domains (frontend/viz, data engineering)
- **Acceptance Criteria**: Domain-specific acceptance criteria are added to issues during planning
- **Execution Gates**: Domain validation gates (`/design-gate`, `/data-gate`) run before issue completion
- **Extensible**: New domains can be added by creating advisory agents and gate commands
**Current Domains:**
| Domain | Plugin | Gate Command |
|--------|--------|--------------|
| Visualization | viz-platform | `/design-gate` |
| Data | data-platform | `/data-gate` |
**Commands:** `/chart`, `/chart-export`, `/dashboard`, `/theme`, `/theme-new`, `/theme-css`, `/component`, `/accessibility-check`, `/breakpoints`, `/initial-setup`
## MCP Servers
@@ -216,7 +200,7 @@ Cross-plugin compatibility validation tools.
| Category | Tools |
|----------|-------|
| Parse | `parse_plugin_interface`, `parse_claude_md_agents` |
| Validation | `validate_compatibility`, `validate_agent_refs`, `validate_data_flow`, `validate_workflow_integration` |
| Validation | `validate_compatibility`, `validate_agent_refs`, `validate_data_flow` |
| Report | `generate_compatibility_report`, `list_issues` |
## Installation
@@ -255,7 +239,7 @@ Add to `.claude/settings.json` in your target project:
After installing plugins, run the setup wizard:
```
/pm-setup
/initial-setup
```
The wizard handles everything:
@@ -313,10 +297,10 @@ After installing plugins, the `/plugin` command may show `(no content)` - this i
| clarity-assist | `/clarity-assist:clarify` |
| doc-guardian | `/doc-guardian:doc-audit` |
| code-sentinel | `/code-sentinel:security-scan` |
| claude-config-maintainer | `/claude-config-maintainer:analyze` |
| claude-config-maintainer | `/claude-config-maintainer:config-analyze` |
| cmdb-assistant | `/cmdb-assistant:cmdb-search` |
| data-platform | `/data-platform:data-ingest` |
| viz-platform | `/viz-platform:viz-chart` |
| data-platform | `/data-platform:ingest` |
| viz-platform | `/viz-platform:chart` |
| contract-validator | `/contract-validator:validate-contracts` |
## Repository Structure

View File

@@ -182,42 +182,10 @@ MCP servers are **shared at repository root** and configured in `.mcp.json`.
| MCP configuration | `.mcp.json` | `.mcp.json` (at repo root) |
| Shared MCP server | `mcp-servers/{server}/` | `mcp-servers/gitea/` |
| MCP server code | `mcp-servers/{server}/mcp_server/` | `mcp-servers/gitea/mcp_server/` |
| MCP venv (local) | `mcp-servers/{server}/.venv/` | `mcp-servers/gitea/.venv/` |
| MCP venv | `mcp-servers/{server}/.venv/` | `mcp-servers/gitea/.venv/` |
**Note:** Plugins do NOT have their own `mcp-servers/` directories. All MCP servers are shared at root and configured via `.mcp.json`.
### MCP Venv Paths - CRITICAL
**Venvs live in a CACHE directory that SURVIVES marketplace updates.**
When checking for venvs, ALWAYS check in this order:
| Priority | Path | Survives Updates? |
|----------|------|-------------------|
| 1 (CHECK FIRST) | `~/.cache/claude-mcp-venvs/leo-claude-mktplace/{server}/.venv/` | YES |
| 2 (fallback) | `{marketplace}/mcp-servers/{server}/.venv/` | NO |
**Why cache first?**
- Marketplace directory gets WIPED on every update/reinstall
- Cache directory SURVIVES updates
- False "venv missing" errors waste hours of debugging
**Pattern for hooks checking venvs:**
```bash
CACHE_VENV="$HOME/.cache/claude-mcp-venvs/leo-claude-mktplace/{server}/.venv/bin/python"
LOCAL_VENV="$MARKETPLACE_ROOT/mcp-servers/{server}/.venv/bin/python"
if [[ -f "$CACHE_VENV" ]]; then
VENV_PATH="$CACHE_VENV"
elif [[ -f "$LOCAL_VENV" ]]; then
VENV_PATH="$LOCAL_VENV"
else
echo "venv missing"
fi
```
**See lesson learned:** [Startup Hooks Must Check Venv Cache Path First](https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/wiki/lessons/patterns/startup-hooks-must-check-venv-cache-path-first)
### Documentation Paths
| Type | Location |

View File

@@ -9,27 +9,32 @@ Quick reference for all commands in the Leo Claude Marketplace.
| Plugin | Command | Auto | Manual | Description |
|--------|---------|:----:|:------:|-------------|
| **projman** | `/sprint-plan` | | X | Start sprint planning with AI-guided architecture analysis and issue creation |
| **projman** | `/sprint-start` | | X | Begin sprint execution with dependency analysis and parallel task coordination (requires approval or `--force`) |
| **projman** | `/sprint-start` | | X | Begin sprint execution with dependency analysis and parallel task coordination |
| **projman** | `/sprint-status` | | X | Check current sprint progress (add `--diagram` for Mermaid visualization) |
| **projman** | `/pm-review` | | X | Pre-sprint-close code quality review (debug artifacts, security, error handling) |
| **projman** | `/pm-test` | | X | Run tests (`/pm-test run`) or generate tests (`/pm-test gen <target>`) |
| **projman** | `/review` | | X | Pre-sprint-close code quality review (debug artifacts, security, error handling) |
| **projman** | `/test` | | X | Run tests (`/test run`) or generate tests (`/test gen <target>`) |
| **projman** | `/sprint-close` | | X | Complete sprint and capture lessons learned to Gitea Wiki |
| **projman** | `/labels-sync` | | X | Synchronize label taxonomy from Gitea |
| **projman** | `/pm-setup` | | X | Auto-detect mode or use `--full`, `--quick`, `--sync`, `--clear-cache` |
| **projman** | *SessionStart hook* | X | | Detects git remote vs .env mismatch, warns to run `/pm-setup --sync` |
| **projman** | `/pm-debug` | | X | Diagnostics (`/pm-debug report`) or investigate (`/pm-debug review`) |
| **projman** | `/setup` | | X | Auto-detect mode or use `--full`, `--quick`, `--sync` |
| **projman** | *SessionStart hook* | X | | Detects git remote vs .env mismatch, warns to run `/setup --sync` |
| **projman** | `/debug` | | X | Diagnostics (`/debug report`) or investigate (`/debug review`) |
| **projman** | `/suggest-version` | | X | Analyze CHANGELOG and recommend semantic version bump |
| **projman** | `/proposal-status` | | X | View proposal and implementation hierarchy with status |
| **projman** | `/rfc` | | X | RFC lifecycle management (`/rfc create\|list\|review\|approve\|reject`) |
| **git-flow** | `/git-commit` | | X | Create commit with auto-generated conventional message |
| **git-flow** | `/git-commit-push` | | X | Commit and push to remote in one operation |
| **git-flow** | `/git-commit-merge` | | X | Commit current changes, then merge into target branch |
| **git-flow** | `/git-commit-sync` | | X | Full sync: commit, push, and sync with upstream/base branch |
| **projman** | `/clear-cache` | | X | Clear plugin cache to force fresh configuration reload |
| **projman** | `/rfc-create` | | X | Create new RFC from conversation or clarified spec |
| **projman** | `/rfc-list` | | X | List all RFCs grouped by status |
| **projman** | `/rfc-review` | | X | Submit Draft RFC for review |
| **projman** | `/rfc-approve` | | X | Approve RFC in Review status for sprint planning |
| **projman** | `/rfc-reject` | | X | Reject RFC with documented reason |
| **git-flow** | `/commit` | | X | Create commit with auto-generated conventional message |
| **git-flow** | `/commit-push` | | X | Commit and push to remote in one operation |
| **git-flow** | `/commit-merge` | | X | Commit current changes, then merge into target branch |
| **git-flow** | `/commit-sync` | | X | Full sync: commit, push, and sync with upstream/base branch |
| **git-flow** | `/branch-start` | | X | Create new feature/fix/chore branch with naming conventions |
| **git-flow** | `/branch-cleanup` | | X | Remove merged branches locally and optionally on remote |
| **git-flow** | `/git-status` | | X | Enhanced git status with recommendations |
| **git-flow** | `/git-config` | | X | Configure git-flow settings for the project |
| **pr-review** | `/pr-setup` | | X | Setup wizard for pr-review (shares Gitea MCP with projman) |
| **pr-review** | `/initial-setup` | | X | Setup wizard for pr-review (shares Gitea MCP with projman) |
| **pr-review** | `/project-init` | | X | Quick project setup for PR reviews |
| **pr-review** | `/project-sync` | | X | Sync config with git remote after repo move/rename |
| **pr-review** | *SessionStart hook* | X | | Detects git remote vs .env mismatch |
@@ -54,10 +59,7 @@ Quick reference for all commands in the Leo Claude Marketplace.
| **claude-config-maintainer** | `/config-init` | | X | Initialize new CLAUDE.md for a project |
| **claude-config-maintainer** | `/config-diff` | | X | Track CLAUDE.md changes over time with behavioral impact |
| **claude-config-maintainer** | `/config-lint` | | X | Lint CLAUDE.md for anti-patterns and best practices |
| **claude-config-maintainer** | `/config-audit-settings` | | X | Audit settings.local.json permissions (100-point score) |
| **claude-config-maintainer** | `/config-optimize-settings` | | X | Optimize permissions (profiles, consolidation, dry-run) |
| **claude-config-maintainer** | `/config-permissions-map` | | X | Visual review layer + permission coverage map |
| **cmdb-assistant** | `/cmdb-setup` | | X | Setup wizard for NetBox MCP server |
| **cmdb-assistant** | `/initial-setup` | | X | Setup wizard for NetBox MCP server |
| **cmdb-assistant** | `/cmdb-search` | | X | Search NetBox for devices, IPs, sites |
| **cmdb-assistant** | `/cmdb-device` | | X | Manage network devices (create, view, update, delete) |
| **cmdb-assistant** | `/cmdb-ip` | | X | Manage IP addresses and prefixes |
@@ -69,37 +71,33 @@ Quick reference for all commands in the Leo Claude Marketplace.
| **cmdb-assistant** | `/change-audit` | | X | NetBox audit trail queries with filtering |
| **cmdb-assistant** | `/ip-conflicts` | | X | Detect IP conflicts and overlapping prefixes |
| **project-hygiene** | *PostToolUse hook* | X | | Removes temp files, warns about unexpected root files |
| **data-platform** | `/data-ingest` | | X | Load data from CSV, Parquet, JSON into DataFrame |
| **data-platform** | `/data-profile` | | X | Generate data profiling report with statistics |
| **data-platform** | `/data-schema` | | X | Explore database schemas, tables, columns |
| **data-platform** | `/data-explain` | | X | Explain query execution plan |
| **data-platform** | `/data-lineage` | | X | Show dbt model lineage and dependencies |
| **data-platform** | `/data-run` | | X | Run dbt models with validation |
| **data-platform** | `/ingest` | | X | Load data from CSV, Parquet, JSON into DataFrame |
| **data-platform** | `/profile` | | X | Generate data profiling report with statistics |
| **data-platform** | `/schema` | | X | Explore database schemas, tables, columns |
| **data-platform** | `/explain` | | X | Explain query execution plan |
| **data-platform** | `/lineage` | | X | Show dbt model lineage and dependencies |
| **data-platform** | `/run` | | X | Run dbt models with validation |
| **data-platform** | `/lineage-viz` | | X | dbt lineage visualization as Mermaid diagrams |
| **data-platform** | `/dbt-test` | | X | Formatted dbt test runner with summary and failure details |
| **data-platform** | `/data-quality` | | X | DataFrame quality checks (nulls, duplicates, types, outliers) |
| **data-platform** | `/data-setup` | | X | Setup wizard for data-platform MCP servers |
| **data-platform** | `/initial-setup` | | X | Setup wizard for data-platform MCP servers |
| **data-platform** | *SessionStart hook* | X | | Checks PostgreSQL connection (non-blocking warning) |
| **viz-platform** | `/viz-setup` | | X | Setup wizard for viz-platform MCP server |
| **viz-platform** | `/viz-chart` | | X | Create Plotly charts with theme integration |
| **viz-platform** | `/viz-dashboard` | | X | Create dashboard layouts with filters and grids |
| **viz-platform** | `/viz-theme` | | X | Apply existing theme to visualizations |
| **viz-platform** | `/viz-theme-new` | | X | Create new custom theme with design tokens |
| **viz-platform** | `/viz-theme-css` | | X | Export theme as CSS custom properties |
| **viz-platform** | `/viz-component` | | X | Inspect DMC component props and validation |
| **viz-platform** | `/viz-chart-export` | | X | Export charts to PNG, SVG, PDF via kaleido |
| **viz-platform** | `/initial-setup` | | X | Setup wizard for viz-platform MCP server |
| **viz-platform** | `/chart` | | X | Create Plotly charts with theme integration |
| **viz-platform** | `/dashboard` | | X | Create dashboard layouts with filters and grids |
| **viz-platform** | `/theme` | | X | Apply existing theme to visualizations |
| **viz-platform** | `/theme-new` | | X | Create new custom theme with design tokens |
| **viz-platform** | `/theme-css` | | X | Export theme as CSS custom properties |
| **viz-platform** | `/component` | | X | Inspect DMC component props and validation |
| **viz-platform** | `/chart-export` | | X | Export charts to PNG, SVG, PDF via kaleido |
| **viz-platform** | `/accessibility-check` | | X | Color blind validation (WCAG contrast ratios) |
| **viz-platform** | `/viz-breakpoints` | | X | Configure responsive layout breakpoints |
| **viz-platform** | `/design-review` | | X | Detailed design system audits |
| **viz-platform** | `/design-gate` | | X | Binary pass/fail design system validation gates |
| **viz-platform** | `/breakpoints` | | X | Configure responsive layout breakpoints |
| **viz-platform** | *SessionStart hook* | X | | Checks DMC version (non-blocking warning) |
| **data-platform** | `/data-review` | | X | Comprehensive data integrity audits |
| **data-platform** | `/data-gate` | | X | Binary pass/fail data integrity gates |
| **contract-validator** | `/validate-contracts` | | X | Full marketplace compatibility validation |
| **contract-validator** | `/check-agent` | | X | Validate single agent definition |
| **contract-validator** | `/list-interfaces` | | X | Show all plugin interfaces |
| **contract-validator** | `/dependency-graph` | | X | Mermaid visualization of plugin dependencies |
| **contract-validator** | `/cv-setup` | | X | Setup wizard for contract-validator MCP |
| **contract-validator** | `/initial-setup` | | X | Setup wizard for contract-validator MCP |
---
@@ -107,7 +105,7 @@ Quick reference for all commands in the Leo Claude Marketplace.
| Category | Plugins | Primary Use |
|----------|---------|-------------|
| **Setup** | projman, pr-review, cmdb-assistant, data-platform, viz-platform, contract-validator | `/pm-setup`, `/pr-setup`, `/cmdb-setup`, `/data-setup`, `/viz-setup`, `/cv-setup` |
| **Setup** | projman, pr-review, cmdb-assistant, data-platform | `/setup`, `/initial-setup` |
| **Task Planning** | projman, clarity-assist | Sprint management, requirement clarification |
| **Code Quality** | code-sentinel, pr-review | Security scanning, PR reviews |
| **Documentation** | doc-guardian, claude-config-maintainer | Doc sync, CLAUDE.md maintenance |
@@ -142,11 +140,11 @@ Full workflow from idea to implementation using RFCs:
```
1. /clarify # Clarify the feature idea
2. /rfc create # Create RFC from clarified spec
2. /rfc-create # Create RFC from clarified spec
... refine RFC content ...
3. /rfc review 0001 # Submit RFC for review
3. /rfc-review 0001 # Submit RFC for review
... review discussion ...
4. /rfc approve 0001 # Approve RFC for implementation
4. /rfc-approve 0001 # Approve RFC for implementation
5. /sprint-plan # Select approved RFC for sprint
... implement feature ...
6. /sprint-close # Complete sprint, RFC marked Implemented
@@ -163,10 +161,10 @@ A typical workflow for planning and executing a feature sprint:
4. /sprint-start # Begin execution with dependency ordering
5. /branch-start feat/... # Create feature branch
... implement features ...
6. /git-commit # Commit with conventional message
6. /commit # Commit with conventional message
7. /sprint-status --diagram # Check progress with visualization
8. /pm-review # Pre-close quality review
9. /pm-test run # Verify test coverage
8. /review # Pre-close quality review
9. /test run # Verify test coverage
10. /sprint-close # Capture lessons learned
```
@@ -178,8 +176,8 @@ Quick daily workflow with git-flow:
1. /git-status # Check current state
2. /branch-start fix/... # Start bugfix branch
... make changes ...
3. /git-commit # Auto-generate commit message
4. /git-commit-push # Push to remote
3. /commit # Auto-generate commit message
4. /commit-push # Push to remote
5. /branch-cleanup # Clean merged branches
```
@@ -213,8 +211,8 @@ Safe refactoring with preview:
1. /refactor-dry # Preview opportunities
2. /security-scan # Baseline security check
3. /refactor # Apply improvements
4. /pm-test run # Verify nothing broke
5. /git-commit # Commit with descriptive message
4. /test run # Verify nothing broke
5. /commit # Commit with descriptive message
```
### Example 6: Infrastructure Documentation
@@ -233,12 +231,12 @@ Managing infrastructure with CMDB:
Working with data pipelines:
```
1. /data-ingest file.csv # Load data into DataFrame
2. /data-profile # Generate data profiling report
3. /data-schema # Explore database schemas
4. /data-lineage model_name # View dbt model dependencies
5. /data-run model_name # Execute dbt models
6. /data-explain "SELECT ..." # Analyze query execution plan
1. /ingest file.csv # Load data into DataFrame
2. /profile # Generate data profiling report
3. /schema # Explore database schemas
4. /lineage model_name # View dbt model dependencies
5. /run model_name # Execute dbt models
6. /explain "SELECT ..." # Analyze query execution plan
```
### Example 7: First-Time Setup (New Machine)
@@ -246,7 +244,7 @@ Working with data pipelines:
Setting up the marketplace for the first time:
```
1. /pm-setup --full # Full setup: MCP + system config + project
1. /setup --full # Full setup: MCP + system config + project
# → Follow prompts for Gitea URL, org
# → Add token manually when prompted
# → Confirm repository name
@@ -260,7 +258,7 @@ Setting up the marketplace for the first time:
Adding a new project when system config exists:
```
1. /pm-setup --quick # Quick project setup (auto-detected)
1. /setup --quick # Quick project setup (auto-detected)
# → Confirms detected repo name
# → Creates .env
2. /labels-sync # Sync Gitea labels
@@ -272,8 +270,8 @@ Adding a new project when system config exists:
## Quick Tips
- **Hooks run automatically** - doc-guardian and code-sentinel protect you without manual invocation
- **Use `/git-commit` over `git commit`** - generates better commit messages following conventions
- **Run `/pm-review` before `/sprint-close`** - catches issues before closing the sprint
- **Use `/commit` over `git commit`** - generates better commit messages following conventions
- **Run `/review` before `/sprint-close`** - catches issues before closing the sprint
- **Use `/clarify` for vague requests** - especially helpful for complex requirements
- **`/refactor-dry` is safe** - always preview before applying refactoring changes
@@ -296,4 +294,4 @@ Ensure credentials are configured in `~/.config/claude/gitea.env`, `~/.config/cl
---
*Last Updated: 2026-02-02*
*Last Updated: 2026-01-30*

View File

@@ -9,7 +9,7 @@ Centralized configuration documentation for all plugins and MCP servers in the L
**After installing the marketplace and plugins via Claude Code:**
```
/pm-setup
/setup
```
The interactive wizard auto-detects what's needed and handles everything except manually adding your API tokens.
@@ -25,8 +25,8 @@ The interactive wizard auto-detects what's needed and handles everything except
└─────────────────────────────────────────────────────────────────────────────┘
/pm-setup --full
(or /pm-setup auto-detects)
/setup --full
(or /setup auto-detects)
┌──────────────────────────────┼──────────────────────────────┐
▼ ▼ ▼
@@ -79,7 +79,7 @@ The interactive wizard auto-detects what's needed and handles everything except
┌───────────────┴───────────────┐
▼ ▼
/pm-setup --quick /pm-setup
/setup --quick /setup
(explicit mode) (auto-detects mode)
│ │
│ ┌──────────┴──────────┐
@@ -109,7 +109,7 @@ The interactive wizard auto-detects what's needed and handles everything except
## What Runs Automatically vs User Interaction
### `/pm-setup --full` - Full Setup
### `/setup --full` - Full Setup
| Phase | Type | What Happens |
|-------|------|--------------|
@@ -121,7 +121,7 @@ The interactive wizard auto-detects what's needed and handles everything except
| **6. Project Config** | Automated | Creates `.env` file, checks `.gitignore` |
| **7. Validation** | Automated | Tests API connectivity, shows summary |
### `/pm-setup --quick` - Quick Project Setup
### `/setup --quick` - Quick Project Setup
| Phase | Type | What Happens |
|-------|------|--------------|
@@ -136,10 +136,10 @@ The interactive wizard auto-detects what's needed and handles everything except
| Mode | When to Use | What It Does |
|------|-------------|--------------|
| `/pm-setup` | Any time | Auto-detects: runs full, quick, or sync as needed |
| `/pm-setup --full` | First time on a machine | Full setup: MCP server + system config + project config |
| `/pm-setup --quick` | Starting a new project | Quick setup: project config only (assumes system is ready) |
| `/pm-setup --sync` | After repo move/rename | Updates .env to match current git remote |
| `/setup` | Any time | Auto-detects: runs full, quick, or sync as needed |
| `/setup --full` | First time on a machine | Full setup: MCP server + system config + project config |
| `/setup --quick` | Starting a new project | Quick setup: project config only (assumes system is ready) |
| `/setup --sync` | After repo move/rename | Updates .env to match current git remote |
**Auto-detection logic:**
1. No system config → **full** mode
@@ -148,9 +148,9 @@ The interactive wizard auto-detects what's needed and handles everything except
4. Both exist, match → already configured, offer to reconfigure
**Typical workflow:**
1. Install plugin → run `/pm-setup` (auto-runs full mode)
2. Start new project → run `/pm-setup` (auto-runs quick mode)
3. Repository moved? → run `/pm-setup` (auto-runs sync mode)
1. Install plugin → run `/setup` (auto-runs full mode)
2. Start new project → run `/setup` (auto-runs quick mode)
3. Repository moved? → run `/setup` (auto-runs sync mode)
---
@@ -182,7 +182,7 @@ This marketplace uses a **hybrid configuration** approach:
**Benefits:**
- Single token per service (update once, use everywhere)
- Easy multi-project setup (just run `/pm-setup` in each project)
- Easy multi-project setup (just run `/setup` in each project)
- Security (tokens never committed to git, never typed into AI chat)
- Project isolation (each project can override defaults)
@@ -190,7 +190,7 @@ This marketplace uses a **hybrid configuration** approach:
## Prerequisites
Before running `/pm-setup`:
Before running `/setup`:
1. **Python 3.10+** installed
```bash
@@ -213,7 +213,7 @@ Before running `/pm-setup`:
Run the setup wizard in Claude Code:
```
/pm-setup
/setup
```
The wizard will guide you through each step interactively and auto-detect the appropriate mode.
@@ -387,18 +387,17 @@ PR_REVIEW_AUTO_SUBMIT=false
| Plugin | System Config | Project Config | Setup Command |
|--------|---------------|----------------|---------------|
| **projman** | gitea.env | .env (GITEA_REPO=owner/repo) | `/pm-setup` |
| **pr-review** | gitea.env | .env (GITEA_REPO=owner/repo) | `/pr-setup` |
| **projman** | gitea.env | .env (GITEA_REPO=owner/repo) | `/setup` |
| **pr-review** | gitea.env | .env (GITEA_REPO=owner/repo) | `/initial-setup` |
| **git-flow** | git-flow.env (optional) | .env (optional) | None needed |
| **clarity-assist** | None | None | None needed |
| **cmdb-assistant** | netbox.env | None | `/cmdb-setup` |
| **data-platform** | postgres.env | .env (optional) | `/data-setup` |
| **viz-platform** | None | .env (optional DMC_VERSION) | `/viz-setup` |
| **cmdb-assistant** | netbox.env | None | `/initial-setup` |
| **data-platform** | postgres.env | .env (optional) | `/initial-setup` |
| **viz-platform** | None | .env (optional DMC_VERSION) | `/initial-setup` |
| **doc-guardian** | None | None | None needed |
| **code-sentinel** | None | None | None needed |
| **project-hygiene** | None | None | None needed |
| **claude-config-maintainer** | None | None | None needed |
| **contract-validator** | None | None | `/cv-setup` |
---
@@ -408,230 +407,18 @@ Once system-level config is set up, adding new projects is simple:
```
cd ~/projects/new-project
/pm-setup
/setup
```
The command auto-detects that system config exists and runs quick project setup.
---
## Installing Plugins to Consumer Projects
The marketplace provides scripts to install plugins into consumer projects. This sets up the MCP server connections and adds CLAUDE.md integration snippets.
### Install a Plugin
```bash
cd /path/to/leo-claude-mktplace
./scripts/install-plugin.sh <plugin-name> <target-project-path>
```
**Examples:**
```bash
# Install data-platform to a portfolio project
./scripts/install-plugin.sh data-platform ~/projects/personal-portfolio
# Install multiple plugins
./scripts/install-plugin.sh viz-platform ~/projects/personal-portfolio
./scripts/install-plugin.sh projman ~/projects/personal-portfolio
```
**What it does:**
1. Validates the plugin exists in the marketplace
2. Adds MCP server entry to target's `.mcp.json` (if plugin has MCP server)
3. Appends integration snippet to target's `CLAUDE.md`
4. Reports changes and lists available commands
**After installation:** Restart your Claude Code session for MCP tools to become available.
### Uninstall a Plugin
```bash
./scripts/uninstall-plugin.sh <plugin-name> <target-project-path>
```
Removes the MCP server entry and CLAUDE.md integration section.
### List Installed Plugins
```bash
./scripts/list-installed.sh <target-project-path>
```
Shows which marketplace plugins are installed, partially installed, or available.
**Output example:**
```
✓ Fully Installed:
PLUGIN VERSION DESCRIPTION
------ ------- -----------
data-platform 1.3.0 pandas, PostgreSQL, and dbt integration...
viz-platform 1.1.0 DMC validation, Plotly charts, and theming...
○ Available (not installed):
projman 3.4.0 Sprint planning and project management...
```
### Plugins with MCP Servers
Not all plugins have MCP servers. The install script handles this automatically:
| Plugin | Has MCP Server | Notes |
|--------|---------------|-------|
| data-platform | ✓ | pandas, PostgreSQL, dbt tools |
| viz-platform | ✓ | DMC validation, chart, theme tools |
| contract-validator | ✓ | Plugin compatibility validation |
| cmdb-assistant | ✓ (via netbox) | NetBox CMDB tools |
| projman | ✓ (via gitea) | Issue, wiki, PR tools |
| pr-review | ✓ (via gitea) | PR review tools |
| git-flow | ✗ | Commands only |
| doc-guardian | ✗ | Commands and hooks only |
| code-sentinel | ✗ | Commands and hooks only |
| clarity-assist | ✗ | Commands only |
### Script Requirements
- **jq** must be installed (`sudo apt install jq`)
- Scripts are idempotent (safe to run multiple times)
---
## Agent Frontmatter Configuration
Agents specify their configuration in frontmatter using Claude Code's supported fields. Reference: https://code.claude.com/docs/en/sub-agents
### Supported Frontmatter Fields
| Field | Required | Default | Description |
|-------|----------|---------|-------------|
| `name` | Yes | — | Unique identifier, lowercase + hyphens |
| `description` | Yes | — | When Claude should delegate to this subagent |
| `model` | No | `inherit` | `sonnet`, `opus`, `haiku`, or `inherit` |
| `permissionMode` | No | `default` | Controls permission prompts: `default`, `acceptEdits`, `dontAsk`, `bypassPermissions`, `plan` |
| `disallowedTools` | No | none | Comma-separated tools to remove from agent's toolset |
| `skills` | No | none | Comma-separated skills auto-injected into context at startup |
| `hooks` | No | none | Lifecycle hooks scoped to this subagent |
### Complete Agent Matrix
| Plugin | Agent | `model` | `permissionMode` | `disallowedTools` | `skills` |
|--------|-------|---------|-------------------|--------------------|----------|
| projman | planner | opus | default | — | frontmatter (2) + body text (12) |
| projman | orchestrator | sonnet | acceptEdits | — | frontmatter (2) + body text (10) |
| projman | executor | sonnet | bypassPermissions | — | frontmatter (7) |
| projman | code-reviewer | opus | default | Write, Edit, MultiEdit | frontmatter (4) |
| pr-review | coordinator | sonnet | plan | Write, Edit, MultiEdit | — |
| pr-review | security-reviewer | sonnet | plan | Write, Edit, MultiEdit | — |
| pr-review | performance-analyst | sonnet | plan | Write, Edit, MultiEdit | — |
| pr-review | maintainability-auditor | haiku | plan | Write, Edit, MultiEdit | — |
| pr-review | test-validator | haiku | plan | Write, Edit, MultiEdit | — |
| data-platform | data-advisor | sonnet | default | — | — |
| data-platform | data-analysis | sonnet | plan | Write, Edit, MultiEdit | — |
| data-platform | data-ingestion | haiku | acceptEdits | — | — |
| viz-platform | design-reviewer | sonnet | plan | Write, Edit, MultiEdit | — |
| viz-platform | layout-builder | sonnet | default | — | — |
| viz-platform | component-check | haiku | plan | Write, Edit, MultiEdit | — |
| viz-platform | theme-setup | haiku | acceptEdits | — | — |
| contract-validator | full-validation | sonnet | default | — | — |
| contract-validator | agent-check | haiku | plan | Write, Edit, MultiEdit | — |
| code-sentinel | security-reviewer | sonnet | plan | Write, Edit, MultiEdit | — |
| code-sentinel | refactor-advisor | sonnet | acceptEdits | — | — |
| doc-guardian | doc-analyzer | sonnet | acceptEdits | — | — |
| clarity-assist | clarity-coach | sonnet | default | Write, Edit, MultiEdit | — |
| git-flow | git-assistant | haiku | acceptEdits | — | — |
| claude-config-maintainer | maintainer | sonnet | acceptEdits | — | frontmatter (2) |
| cmdb-assistant | cmdb-assistant | sonnet | default | — | — |
### Design Principles
- `bypassPermissions` is granted to exactly ONE agent (Executor) which has code-sentinel PreToolUse hook + Code Reviewer downstream as safety nets.
- `plan` mode is assigned to all pure analysis agents (pr-review, read-only validators).
- `disallowedTools: Write, Edit, MultiEdit` provides defense-in-depth on agents that should never write files.
- `skills` frontmatter is used for agents with ≤7 skills where guaranteed loading is safety-critical. Agents with 8+ skills use body text `## Skills to Load` for selective loading.
- `hooks` (agent-scoped) is reserved for future use (v6.0+).
Override any field by editing the agent's `.md` file in `plugins/{plugin}/agents/`.
### permissionMode Guide
| Value | Prompts for file ops? | Prompts for Bash? | Prompts for MCP? | Use when |
|-------|-----------------------|-------------------|-------------------|----------|
| `default` | Yes | Yes | No (MCP bypasses permissions) | You want full visibility |
| `acceptEdits` | No | Yes | No | Core job is file read/write, Bash visibility useful |
| `dontAsk` | No | No (most) | No | Even Bash prompts are friction |
| `bypassPermissions` | No | No | No | Agent has downstream safety layers |
| `plan` | N/A (read-only) | N/A (read-only) | No | Pure analysis, no modifications |
### disallowedTools Guide
Use `disallowedTools` to remove specific tools from an agent's toolset. This is a blacklist — the agent inherits all tools from the main thread, then the listed tools are removed.
Prefer `disallowedTools` over `tools` (whitelist) because:
- New MCP servers are automatically available without updating every agent.
- Less configuration to maintain.
- Easier to audit — you only list what's blocked.
Common patterns:
- `disallowedTools: Write, Edit, MultiEdit` — read-only agent, cannot modify files.
- `disallowedTools: Bash` — no shell access (rare, most agents need at least read-only Bash).
### skills Frontmatter Guide
The `skills` field auto-injects skill file contents into the agent's context window at startup. The agent does NOT need to read the files — they are already present.
**When to use frontmatter `skills`:**
- Agent has ≤7 skills.
- Skills are safety-critical (e.g., `branch-security`, `runaway-detection`).
- You need guaranteed loading — no risk of the agent skipping a skill.
**When to keep body text `## Skills to Load`:**
- Agent has 8+ skills (context window cost too high for full injection).
- Skills are situational — not all needed for every invocation.
- Agent benefits from selective loading based on the specific task.
Skill names in frontmatter are resolved relative to the plugin's `skills/` directory. Use the filename without the `.md` extension.
### Phase-Based Skill Loading (Body Text)
For agents with 8+ skills, use **phase-based loading** in the agent body text. This structures skill reads into logical phases, with explicit instructions to read each skill exactly once.
**Pattern:**
```markdown
## Skill Loading Protocol
**Frontmatter skills (auto-injected, always available — DO NOT re-read these):**
- `skill-a` — description
- `skill-b` — description
**Phase 1 skills — read ONCE at session start:**
- skills/validation-skill.md
- skills/safety-skill.md
**Phase 2 skills — read ONCE when entering main work:**
- skills/workflow-skill.md
- skills/domain-skill.md
**CRITICAL: Read each skill file exactly ONCE. Do NOT re-read skill files between MCP API calls.**
```
**Benefits:**
- Frontmatter skills (always needed) are auto-injected — zero file read cost
- Phase skills are read once at the appropriate time — not re-read per API call
- `batch-execution` skill provides protocol for API-heavy phases
- ~76-83% reduction in skill-related token consumption for typical sprints
**Currently applied to:**
- Planner agent: 2 frontmatter + 12 body text (3 phases)
- Orchestrator agent: 2 frontmatter + 10 body text (2 phases)
---
## Automatic Validation Features
### API Validation
When running `/pm-setup`, the command:
When running `/setup`, the command:
1. **Detects** organization and repository from git remote URL
2. **Validates** via Gitea API: `GET /api/v1/repos/{org}/{repo}`
@@ -646,7 +433,7 @@ When you start a Claude Code session, a hook automatically:
1. Reads `GITEA_REPO` (in `owner/repo` format) from `.env`
2. Compares with current `git remote get-url origin`
3. **Warns** if mismatch detected: "Repository location mismatch. Run `/pm-setup --sync` to update."
3. **Warns** if mismatch detected: "Repository location mismatch. Run `/setup --sync` to update."
This helps when you:
- Move a repository to a different organization
@@ -741,7 +528,7 @@ cat .env
3. **Never type tokens into AI chat**
- Always edit config files directly in your editor
- The `/pm-setup` wizard respects this
- The `/setup` wizard respects this
4. **Rotate tokens periodically**
- Every 6-12 months

View File

@@ -279,8 +279,8 @@ Error: Could not find a suitable TLS CA certificate bundle, invalid path:
Use these commands for automated checking:
- `/pm-debug report` - Run full diagnostics, create issue if problems found
- `/pm-debug review` - Investigate existing diagnostic issues and propose fixes
- `/debug report` - Run full diagnostics, create issue if problems found
- `/debug review` - Investigate existing diagnostic issues and propose fixes
---

View File

@@ -46,9 +46,9 @@ cd ~/.claude/plugins/marketplaces/leo-claude-mktplace && ./scripts/setup.sh
## After Updating: Re-run Setup if Needed
### When to Re-run Setup
### When to Re-run `/initial-setup`
You typically **don't need** to re-run setup after updates. However, re-run your plugin's setup command (e.g., `/pm-setup`, `/pr-setup`, `/cmdb-setup`) if:
You typically **don't need** to re-run setup after updates. However, re-run if:
- Changelog mentions **new required environment variables**
- Changelog mentions **breaking changes** to configuration
@@ -97,7 +97,7 @@ When updating, review if changes affect the setup workflow:
1. **Check for setup command changes:**
```bash
git diff HEAD~1 plugins/*/commands/*-setup.md
git diff HEAD~1 plugins/*/commands/initial-setup.md
git diff HEAD~1 plugins/*/commands/project-init.md
git diff HEAD~1 plugins/*/commands/project-sync.md
```
@@ -114,7 +114,7 @@ When updating, review if changes affect the setup workflow:
**If setup commands changed:**
- Review what's new (new validation steps, new prompts, etc.)
- Consider re-running your plugin's setup command or `/project-init` to benefit from improvements
- Consider re-running `/initial-setup` or `/project-init` to benefit from improvements
- Existing configurations remain valid unless changelog notes breaking changes
**If hooks changed:**
@@ -142,7 +142,7 @@ deactivate
### Configuration no longer works
1. Check CHANGELOG.md for breaking changes
2. Run your plugin's setup command (e.g., `/pm-setup`) to re-validate and fix configuration
2. Run `/initial-setup` to re-validate and fix configuration
3. Compare your config files with documentation in `docs/CONFIGURATION.md`
### MCP server won't start after update

View File

@@ -1,20 +0,0 @@
2026-01-26T14:36:42 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/parse_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-01-26T14:37:38 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/parse_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-01-26T14:37:48 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/parse_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-01-26T14:38:05 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/parse_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-01-26T14:38:55 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/parse_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-01-26T14:39:35 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/parse_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-01-26T14:40:19 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/parse_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-01-26T15:02:30 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/tests/test_parse_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-01-26T15:02:37 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/tests/test_parse_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-01-26T15:03:41 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/tests/test_report_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-02-02T10:56:19 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/validation_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-02-02T10:57:49 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/tests/test_validation_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-02-02T10:58:22 | skills | /home/lmiranda/claude-plugins-work/plugins/contract-validator/skills/mcp-tools-reference.md | README.md
2026-02-02T10:58:38 | skills | /home/lmiranda/claude-plugins-work/plugins/contract-validator/skills/validation-rules.md | README.md
2026-02-02T10:59:13 | .claude-plugin | /home/lmiranda/claude-plugins-work/.claude-plugin/marketplace.json | CLAUDE.md .claude-plugin/marketplace.json
2026-02-02T13:55:33 | skills | /home/lmiranda/claude-plugins-work/plugins/projman/skills/visual-output.md | README.md
2026-02-02T13:55:41 | agents | /home/lmiranda/claude-plugins-work/plugins/projman/agents/planner.md | README.md CLAUDE.md
2026-02-02T13:55:55 | agents | /home/lmiranda/claude-plugins-work/plugins/projman/agents/orchestrator.md | README.md CLAUDE.md
2026-02-02T13:56:14 | agents | /home/lmiranda/claude-plugins-work/plugins/projman/agents/executor.md | README.md CLAUDE.md
2026-02-02T13:56:34 | agents | /home/lmiranda/claude-plugins-work/plugins/projman/agents/code-reviewer.md | README.md CLAUDE.md

View File

@@ -131,28 +131,6 @@ class ContractValidatorMCPServer:
"required": ["agent_name", "claude_md_path"]
}
),
Tool(
name="validate_workflow_integration",
description="Validate that a domain plugin exposes the required advisory interfaces (gate command, review command, advisory agent) expected by projman's domain-consultation skill. Also checks gate contract version compatibility.",
inputSchema={
"type": "object",
"properties": {
"plugin_path": {
"type": "string",
"description": "Path to the domain plugin directory"
},
"domain_label": {
"type": "string",
"description": "The Domain/* label it claims to handle, e.g. Domain/Viz"
},
"expected_contract": {
"type": "string",
"description": "Expected contract version (e.g., 'v1'). If provided, validates the gate command's contract matches."
}
},
"required": ["plugin_path", "domain_label"]
}
),
# Report tools (to be implemented in #188)
Tool(
name="generate_compatibility_report",
@@ -220,8 +198,6 @@ class ContractValidatorMCPServer:
result = await self._validate_agent_refs(**arguments)
elif name == "validate_data_flow":
result = await self._validate_data_flow(**arguments)
elif name == "validate_workflow_integration":
result = await self._validate_workflow_integration(**arguments)
elif name == "generate_compatibility_report":
result = await self._generate_compatibility_report(**arguments)
elif name == "list_issues":
@@ -265,17 +241,6 @@ class ContractValidatorMCPServer:
"""Validate agent data flow"""
return await self.validation_tools.validate_data_flow(agent_name, claude_md_path)
async def _validate_workflow_integration(
self,
plugin_path: str,
domain_label: str,
expected_contract: str = None
) -> dict:
"""Validate domain plugin exposes required advisory interfaces"""
return await self.validation_tools.validate_workflow_integration(
plugin_path, domain_label, expected_contract
)
# Report tool implementations (Issue #188)
async def _generate_compatibility_report(self, marketplace_path: str, format: str = "markdown") -> dict:

View File

@@ -26,7 +26,6 @@ class IssueType(str, Enum):
OPTIONAL_DEPENDENCY = "optional_dependency"
UNDECLARED_OUTPUT = "undeclared_output"
INVALID_SEQUENCE = "invalid_sequence"
MISSING_INTEGRATION = "missing_integration"
class ValidationIssue(BaseModel):
@@ -66,18 +65,6 @@ class DataFlowResult(BaseModel):
issues: list[ValidationIssue] = []
class WorkflowIntegrationResult(BaseModel):
"""Result of workflow integration validation for domain plugins"""
plugin_name: str
domain_label: str
valid: bool
gate_command_found: bool
gate_contract: Optional[str] = None # Contract version declared by gate command
review_command_found: bool
advisory_agent_found: bool
issues: list[ValidationIssue] = []
class ValidationTools:
"""Tools for validating plugin compatibility and agent references"""
@@ -349,145 +336,3 @@ class ValidationTools:
)
return result.model_dump()
async def validate_workflow_integration(
self,
plugin_path: str,
domain_label: str,
expected_contract: Optional[str] = None
) -> dict:
"""
Validate that a domain plugin exposes required advisory interfaces.
Checks for:
- Gate command (e.g., /design-gate, /data-gate) - REQUIRED
- Gate contract version (gate_contract in frontmatter) - INFO if missing
- Review command (e.g., /design-review, /data-review) - recommended
- Advisory agent referencing the domain label - recommended
Args:
plugin_path: Path to the domain plugin directory
domain_label: The Domain/* label it claims to handle (e.g., Domain/Viz)
expected_contract: Expected contract version (e.g., 'v1'). If provided,
validates the gate command's contract matches.
Returns:
Validation result with found interfaces and issues
"""
import re
plugin_path_obj = Path(plugin_path)
issues = []
# Extract plugin name from path
plugin_name = plugin_path_obj.name
if not plugin_path_obj.exists():
return {
"error": f"Plugin directory not found: {plugin_path}",
"plugin_path": plugin_path,
"domain_label": domain_label
}
# Extract domain short name from label (e.g., "Domain/Viz" -> "viz", "Domain/Data" -> "data")
domain_short = domain_label.split("/")[-1].lower() if "/" in domain_label else domain_label.lower()
# Check for gate command
commands_dir = plugin_path_obj / "commands"
gate_command_found = False
gate_contract = None
gate_patterns = ["pass", "fail", "PASS", "FAIL", "Binary pass/fail", "gate"]
if commands_dir.exists():
for cmd_file in commands_dir.glob("*.md"):
if "gate" in cmd_file.name.lower():
# Verify it's actually a gate command by checking content
content = cmd_file.read_text()
if any(pattern in content for pattern in gate_patterns):
gate_command_found = True
# Parse frontmatter for gate_contract
frontmatter_match = re.match(r'^---\n(.*?)\n---', content, re.DOTALL)
if frontmatter_match:
frontmatter = frontmatter_match.group(1)
contract_match = re.search(r'gate_contract:\s*(\S+)', frontmatter)
if contract_match:
gate_contract = contract_match.group(1)
break
if not gate_command_found:
issues.append(ValidationIssue(
severity=IssueSeverity.ERROR,
issue_type=IssueType.MISSING_INTEGRATION,
message=f"Plugin '{plugin_name}' lacks a gate command for domain '{domain_label}'",
location=str(commands_dir),
suggestion=f"Create commands/{domain_short}-gate.md with binary PASS/FAIL output"
))
# Check for review command
review_command_found = False
if commands_dir.exists():
for cmd_file in commands_dir.glob("*.md"):
if "review" in cmd_file.name.lower() and "gate" not in cmd_file.name.lower():
review_command_found = True
break
if not review_command_found:
issues.append(ValidationIssue(
severity=IssueSeverity.WARNING,
issue_type=IssueType.MISSING_INTEGRATION,
message=f"Plugin '{plugin_name}' lacks a review command for domain '{domain_label}'",
location=str(commands_dir),
suggestion=f"Create commands/{domain_short}-review.md for detailed audits"
))
# Check for advisory agent
agents_dir = plugin_path_obj / "agents"
advisory_agent_found = False
if agents_dir.exists():
for agent_file in agents_dir.glob("*.md"):
content = agent_file.read_text()
# Check if agent references the domain label or gate command
if domain_label in content or f"{domain_short}-gate" in content.lower() or "advisor" in agent_file.name.lower() or "reviewer" in agent_file.name.lower():
advisory_agent_found = True
break
if not advisory_agent_found:
issues.append(ValidationIssue(
severity=IssueSeverity.WARNING,
issue_type=IssueType.MISSING_INTEGRATION,
message=f"Plugin '{plugin_name}' lacks an advisory agent for domain '{domain_label}'",
location=str(agents_dir) if agents_dir.exists() else str(plugin_path_obj),
suggestion=f"Create agents/{domain_short}-advisor.md referencing '{domain_label}'"
))
# Check gate contract version
if gate_command_found:
if not gate_contract:
issues.append(ValidationIssue(
severity=IssueSeverity.INFO,
issue_type=IssueType.MISSING_INTEGRATION,
message=f"Gate command does not declare a contract version",
location=str(commands_dir),
suggestion="Consider adding `gate_contract: v1` to frontmatter for version tracking"
))
elif expected_contract and gate_contract != expected_contract:
issues.append(ValidationIssue(
severity=IssueSeverity.WARNING,
issue_type=IssueType.INTERFACE_MISMATCH,
message=f"Contract version mismatch: gate declares {gate_contract}, projman expects {expected_contract}",
location=str(commands_dir),
suggestion=f"Update domain-consultation.md Gate Command Reference table to {gate_contract}, or update gate command to {expected_contract}"
))
result = WorkflowIntegrationResult(
plugin_name=plugin_name,
domain_label=domain_label,
valid=gate_command_found, # Only gate is required for validity
gate_command_found=gate_command_found,
gate_contract=gate_contract,
review_command_found=review_command_found,
advisory_agent_found=advisory_agent_found,
issues=issues
)
return result.model_dump()

View File

@@ -254,261 +254,3 @@ async def test_validate_data_flow_missing_producer(validation_tools, tmp_path):
# Should have warning about missing producer
warning_issues = [i for i in result["issues"] if i["severity"].value == "warning"]
assert len(warning_issues) > 0
# --- Workflow Integration Tests ---
@pytest.fixture
def domain_plugin_complete(tmp_path):
"""Create a complete domain plugin with gate, review, and advisory agent"""
plugin_dir = tmp_path / "viz-platform"
plugin_dir.mkdir()
(plugin_dir / ".claude-plugin").mkdir()
(plugin_dir / "commands").mkdir()
(plugin_dir / "agents").mkdir()
# Gate command with PASS/FAIL pattern
gate_cmd = plugin_dir / "commands" / "design-gate.md"
gate_cmd.write_text("""# /design-gate
Binary pass/fail validation gate for design system compliance.
## Output
- **PASS**: All design system checks passed
- **FAIL**: Design system violations detected
""")
# Review command
review_cmd = plugin_dir / "commands" / "design-review.md"
review_cmd.write_text("""# /design-review
Comprehensive design system audit.
""")
# Advisory agent
agent = plugin_dir / "agents" / "design-reviewer.md"
agent.write_text("""# design-reviewer
Design system compliance auditor.
Handles issues with `Domain/Viz` label.
""")
return str(plugin_dir)
@pytest.fixture
def domain_plugin_missing_gate(tmp_path):
"""Create domain plugin with review and agent but no gate command"""
plugin_dir = tmp_path / "data-platform"
plugin_dir.mkdir()
(plugin_dir / ".claude-plugin").mkdir()
(plugin_dir / "commands").mkdir()
(plugin_dir / "agents").mkdir()
# Review command (but no gate)
review_cmd = plugin_dir / "commands" / "data-review.md"
review_cmd.write_text("""# /data-review
Data integrity audit.
""")
# Advisory agent
agent = plugin_dir / "agents" / "data-advisor.md"
agent.write_text("""# data-advisor
Data integrity advisor for Domain/Data issues.
""")
return str(plugin_dir)
@pytest.fixture
def domain_plugin_minimal(tmp_path):
"""Create minimal plugin with no commands or agents"""
plugin_dir = tmp_path / "minimal-plugin"
plugin_dir.mkdir()
(plugin_dir / ".claude-plugin").mkdir()
readme = plugin_dir / "README.md"
readme.write_text("# Minimal Plugin\n\nNo commands or agents.")
return str(plugin_dir)
@pytest.mark.asyncio
async def test_validate_workflow_integration_complete(validation_tools, domain_plugin_complete):
"""Test complete domain plugin returns valid with all interfaces found"""
result = await validation_tools.validate_workflow_integration(
domain_plugin_complete,
"Domain/Viz"
)
assert "error" not in result
assert result["valid"] is True
assert result["gate_command_found"] is True
assert result["review_command_found"] is True
assert result["advisory_agent_found"] is True
# May have INFO issue about missing contract version (not an error/warning)
error_or_warning = [i for i in result["issues"]
if i["severity"].value in ("error", "warning")]
assert len(error_or_warning) == 0
@pytest.mark.asyncio
async def test_validate_workflow_integration_missing_gate(validation_tools, domain_plugin_missing_gate):
"""Test plugin missing gate command returns invalid with ERROR"""
result = await validation_tools.validate_workflow_integration(
domain_plugin_missing_gate,
"Domain/Data"
)
assert "error" not in result
assert result["valid"] is False
assert result["gate_command_found"] is False
assert result["review_command_found"] is True
assert result["advisory_agent_found"] is True
# Should have one ERROR for missing gate
error_issues = [i for i in result["issues"] if i["severity"].value == "error"]
assert len(error_issues) == 1
assert "gate" in error_issues[0]["message"].lower()
@pytest.mark.asyncio
async def test_validate_workflow_integration_minimal(validation_tools, domain_plugin_minimal):
"""Test minimal plugin returns invalid with multiple issues"""
result = await validation_tools.validate_workflow_integration(
domain_plugin_minimal,
"Domain/Test"
)
assert "error" not in result
assert result["valid"] is False
assert result["gate_command_found"] is False
assert result["review_command_found"] is False
assert result["advisory_agent_found"] is False
# Should have one ERROR (gate) and two WARNINGs (review, agent)
error_issues = [i for i in result["issues"] if i["severity"].value == "error"]
warning_issues = [i for i in result["issues"] if i["severity"].value == "warning"]
assert len(error_issues) == 1
assert len(warning_issues) == 2
@pytest.mark.asyncio
async def test_validate_workflow_integration_nonexistent_plugin(validation_tools, tmp_path):
"""Test error when plugin directory doesn't exist"""
result = await validation_tools.validate_workflow_integration(
str(tmp_path / "nonexistent"),
"Domain/Test"
)
assert "error" in result
assert "not found" in result["error"].lower()
# --- Gate Contract Version Tests ---
@pytest.fixture
def domain_plugin_with_contract(tmp_path):
"""Create domain plugin with gate_contract: v1 in frontmatter"""
plugin_dir = tmp_path / "viz-platform-versioned"
plugin_dir.mkdir()
(plugin_dir / ".claude-plugin").mkdir()
(plugin_dir / "commands").mkdir()
(plugin_dir / "agents").mkdir()
# Gate command with gate_contract in frontmatter
gate_cmd = plugin_dir / "commands" / "design-gate.md"
gate_cmd.write_text("""---
description: Design system compliance gate (pass/fail)
gate_contract: v1
---
# /design-gate
Binary pass/fail validation gate for design system compliance.
## Output
- **PASS**: All design system checks passed
- **FAIL**: Design system violations detected
""")
# Review command
review_cmd = plugin_dir / "commands" / "design-review.md"
review_cmd.write_text("""# /design-review
Comprehensive design system audit.
""")
# Advisory agent
agent = plugin_dir / "agents" / "design-reviewer.md"
agent.write_text("""# design-reviewer
Design system compliance auditor for Domain/Viz issues.
""")
return str(plugin_dir)
@pytest.mark.asyncio
async def test_validate_workflow_contract_match(validation_tools, domain_plugin_with_contract):
"""Test that matching expected_contract produces no warning"""
result = await validation_tools.validate_workflow_integration(
domain_plugin_with_contract,
"Domain/Viz",
expected_contract="v1"
)
assert "error" not in result
assert result["valid"] is True
assert result["gate_contract"] == "v1"
# Should have no warnings about contract mismatch
warning_issues = [i for i in result["issues"] if i["severity"].value == "warning"]
contract_warnings = [i for i in warning_issues if "contract" in i["message"].lower()]
assert len(contract_warnings) == 0
@pytest.mark.asyncio
async def test_validate_workflow_contract_mismatch(validation_tools, domain_plugin_with_contract):
"""Test that mismatched expected_contract produces WARNING"""
result = await validation_tools.validate_workflow_integration(
domain_plugin_with_contract,
"Domain/Viz",
expected_contract="v2" # Gate has v1
)
assert "error" not in result
assert result["valid"] is True # Contract mismatch doesn't affect validity
assert result["gate_contract"] == "v1"
# Should have warning about contract mismatch
warning_issues = [i for i in result["issues"] if i["severity"].value == "warning"]
contract_warnings = [i for i in warning_issues if "contract" in i["message"].lower()]
assert len(contract_warnings) == 1
assert "mismatch" in contract_warnings[0]["message"].lower()
assert "v1" in contract_warnings[0]["message"]
assert "v2" in contract_warnings[0]["message"]
@pytest.mark.asyncio
async def test_validate_workflow_no_contract(validation_tools, domain_plugin_complete):
"""Test that missing gate_contract produces INFO suggestion"""
result = await validation_tools.validate_workflow_integration(
domain_plugin_complete,
"Domain/Viz"
)
assert "error" not in result
assert result["valid"] is True
assert result["gate_contract"] is None
# Should have info issue about missing contract
info_issues = [i for i in result["issues"] if i["severity"].value == "info"]
contract_info = [i for i in info_issues if "contract" in i["message"].lower()]
assert len(contract_info) == 1
assert "does not declare" in contract_info[0]["message"].lower()

View File

@@ -1,6 +0,0 @@
2026-02-03T14:09:25 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/gitea/tests/test_config.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-02-03T14:09:33 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/gitea/tests/test_gitea_client.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-02-03T14:10:22 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/gitea/tests/test_issues.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-02-03T14:17:12 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/gitea/README.md | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-02-03T14:18:27 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/gitea/CHANGELOG.md | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-02-03T14:18:41 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/gitea/TESTING.md | docs/COMMANDS-CHEATSHEET.md CLAUDE.md

View File

@@ -1,92 +0,0 @@
# Changelog
All notable changes to the Gitea MCP Server will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [1.3.0] - 2026-02-03
### Added
- Pull request tools (7 tools):
- `list_pull_requests` - List PRs from repository
- `get_pull_request` - Get specific PR details
- `get_pr_diff` - Get PR diff content
- `get_pr_comments` - Get comments on a PR
- `create_pr_review` - Create PR review (approve/request changes/comment)
- `add_pr_comment` - Add comment to PR
- `create_pull_request` - Create new pull request
- Label creation tools (3 tools):
- `create_label` - Create repo-level label
- `create_org_label` - Create organization-level label
- `create_label_smart` - Auto-detect org vs repo for label creation
- Validation tools (2 tools):
- `validate_repo_org` - Check if repo belongs to organization
- `get_branch_protection` - Get branch protection rules
### Changed
- Total tools increased from 20 to 36
- Updated test suite to 64 tests (was 42)
### Fixed
- Test fixtures updated to use `owner/repo` format
- Fixed aggregate_issues tests to pass required `org` argument
## [1.2.0] - 2026-01-28
### Added
- Milestone management tools (5 tools):
- `list_milestones` - List all milestones
- `get_milestone` - Get specific milestone
- `create_milestone` - Create new milestone
- `update_milestone` - Update existing milestone
- `delete_milestone` - Delete a milestone
- Issue dependency tools (4 tools):
- `list_issue_dependencies` - List blocking issues
- `create_issue_dependency` - Create dependency between issues
- `remove_issue_dependency` - Remove dependency
- `get_execution_order` - Calculate parallelizable execution order
## [1.1.0] - 2026-01-21
### Added
- Wiki and lessons learned tools (7 tools):
- `list_wiki_pages` - List all wiki pages
- `get_wiki_page` - Get specific wiki page content
- `create_wiki_page` - Create new wiki page
- `update_wiki_page` - Update existing wiki page
- `create_lesson` - Create lessons learned entry
- `search_lessons` - Search lessons by query/tags
- `allocate_rfc_number` - Get next available RFC number
- Automatic git remote URL detection for repository configuration
- Support for SSH, HTTPS, and HTTP git URL formats
### Changed
- Configuration now uses `owner/repo` format exclusively
- Removed separate `GITEA_OWNER` configuration (now derived from repo path)
## [1.0.0] - 2025-01-06
### Added
- Initial release with 8 core tools:
- `list_issues` - List issues from repository
- `get_issue` - Get specific issue details
- `create_issue` - Create new issue with labels
- `update_issue` - Update existing issue
- `add_comment` - Add comment to issue
- `get_labels` - Get all labels (org + repo)
- `suggest_labels` - Intelligent label suggestion
- `aggregate_issues` - Cross-repository issue aggregation (PMO mode)
- Hybrid configuration system (system + project level)
- Branch-aware security model
- Mode detection (project vs company/PMO)
- 42 unit tests with mocks
- Comprehensive documentation
[Unreleased]: https://github.com/owner/repo/compare/v1.3.0...HEAD
[1.3.0]: https://github.com/owner/repo/compare/v1.2.0...v1.3.0
[1.2.0]: https://github.com/owner/repo/compare/v1.1.0...v1.2.0
[1.1.0]: https://github.com/owner/repo/compare/v1.0.0...v1.1.0
[1.0.0]: https://github.com/owner/repo/releases/tag/v1.0.0

View File

@@ -19,9 +19,8 @@ The Gitea MCP Server provides Claude Code with direct access to Gitea for issue
- **Hybrid Configuration**: System-level credentials + project-level paths
- **PMO Support**: Multi-repository aggregation for organization-wide views
### Tools Provided (36 total)
### Tools Provided
#### Issue Management (6 tools)
| Tool | Description | Mode |
|------|-------------|------|
| `list_issues` | List issues from repository | Both |
@@ -29,61 +28,9 @@ The Gitea MCP Server provides Claude Code with direct access to Gitea for issue
| `create_issue` | Create new issue with labels | Both |
| `update_issue` | Update existing issue | Both |
| `add_comment` | Add comment to issue | Both |
| `aggregate_issues` | Cross-repository issue aggregation | PMO Only |
#### Label Management (5 tools)
| Tool | Description | Mode |
|------|-------------|------|
| `get_labels` | Get all labels (org + repo) | Both |
| `suggest_labels` | Intelligent label suggestion | Both |
| `create_label` | Create repo-level label | Both |
| `create_org_label` | Create organization-level label | Both |
| `create_label_smart` | Auto-detect org vs repo for label creation | Both |
#### Wiki & Lessons Learned (7 tools)
| Tool | Description | Mode |
|------|-------------|------|
| `list_wiki_pages` | List all wiki pages | Both |
| `get_wiki_page` | Get specific wiki page content | Both |
| `create_wiki_page` | Create new wiki page | Both |
| `update_wiki_page` | Update existing wiki page | Both |
| `create_lesson` | Create lessons learned entry | Both |
| `search_lessons` | Search lessons by query/tags | Both |
| `allocate_rfc_number` | Get next available RFC number | Both |
#### Milestone Management (5 tools)
| Tool | Description | Mode |
|------|-------------|------|
| `list_milestones` | List all milestones | Both |
| `get_milestone` | Get specific milestone | Both |
| `create_milestone` | Create new milestone | Both |
| `update_milestone` | Update existing milestone | Both |
| `delete_milestone` | Delete a milestone | Both |
#### Issue Dependencies (4 tools)
| Tool | Description | Mode |
|------|-------------|------|
| `list_issue_dependencies` | List blocking issues | Both |
| `create_issue_dependency` | Create dependency between issues | Both |
| `remove_issue_dependency` | Remove dependency | Both |
| `get_execution_order` | Calculate parallelizable execution order | Both |
#### Pull Request Tools (7 tools)
| Tool | Description | Mode |
|------|-------------|------|
| `list_pull_requests` | List PRs from repository | Both |
| `get_pull_request` | Get specific PR details | Both |
| `get_pr_diff` | Get PR diff content | Both |
| `get_pr_comments` | Get comments on a PR | Both |
| `create_pr_review` | Create PR review (approve/request changes) | Both |
| `add_pr_comment` | Add comment to PR | Both |
| `create_pull_request` | Create new pull request | Both |
#### Validation Tools (2 tools)
| Tool | Description | Mode |
|------|-------------|------|
| `validate_repo_org` | Check if repo belongs to organization | Both |
| `get_branch_protection` | Get branch protection rules | Both |
| `aggregate_issues` | Cross-repository issue aggregation | PMO Only |
## Architecture
@@ -93,20 +40,15 @@ The Gitea MCP Server provides Claude Code with direct access to Gitea for issue
mcp-servers/gitea/
├── .venv/ # Python virtual environment
├── requirements.txt # Python dependencies
├── run.sh # Entry point script
├── mcp_server/
│ ├── __init__.py
│ ├── server.py # MCP server entry point (36 tools)
│ ├── config.py # Configuration loader with auto-detection
│ ├── server.py # MCP server entry point
│ ├── config.py # Configuration loader
│ ├── gitea_client.py # Gitea API client
│ └── tools/
│ ├── __init__.py
│ ├── issues.py # Issue management tools
── labels.py # Label management tools
│ ├── wiki.py # Wiki & lessons learned tools
│ ├── milestones.py # Milestone management tools
│ ├── dependencies.py # Issue dependency tools
│ └── pull_requests.py # Pull request tools
│ ├── issues.py # Issue tools
── labels.py # Label tools
├── tests/
│ ├── __init__.py
│ ├── test_config.py
@@ -114,8 +56,7 @@ mcp-servers/gitea/
│ ├── test_issues.py
│ └── test_labels.py
├── README.md # This file
── TESTING.md # Testing instructions
└── CHANGELOG.md # Version history
── TESTING.md # Testing instructions
```
### Mode Detection
@@ -170,6 +111,7 @@ mkdir -p ~/.config/claude
cat > ~/.config/claude/gitea.env << EOF
GITEA_API_URL=https://gitea.example.com/api/v1
GITEA_API_TOKEN=your_gitea_token_here
GITEA_OWNER=bandit
EOF
chmod 600 ~/.config/claude/gitea.env
@@ -195,34 +137,14 @@ For company/PMO mode, omit the `.env` file or don't set `GITEA_REPO`.
**Required Variables**:
- `GITEA_API_URL` - Gitea API endpoint (e.g., `https://gitea.example.com/api/v1`)
- `GITEA_API_TOKEN` - Personal access token with repo permissions
- `GITEA_OWNER` - Organization or user name (e.g., `bandit`)
### Project-Level Configuration
**File**: `<project-root>/.env`
**Optional Variables**:
- `GITEA_REPO` - Repository in `owner/repo` format (enables project mode)
### Automatic Repository Detection
If `GITEA_REPO` is not set, the server auto-detects the repository from your git remote:
**Supported URL Formats**:
- SSH: `ssh://git@gitea.example.com:22/owner/repo.git`
- SSH short: `git@gitea.example.com:owner/repo.git`
- HTTPS: `https://gitea.example.com/owner/repo.git`
- HTTP: `http://gitea.example.com/owner/repo.git`
The repository is extracted as `owner/repo` format automatically.
### Project Directory Detection
The server finds your project directory using these strategies (in order):
1. `CLAUDE_PROJECT_DIR` environment variable (highest priority)
2. `PWD` environment variable (if `.git` or `.env` present)
3. Current working directory (if `.git` or `.env` present)
4. Falls back to company/PMO mode if no project found
- `GITEA_REPO` - Repository name (enables project mode)
### Generating Gitea API Token
@@ -298,13 +220,13 @@ suggestions = await label_tools.suggest_labels(context)
### Unit Tests
Run all 64 unit tests with mocks:
Run all 42 unit tests with mocks:
```bash
pytest tests/ -v
```
Expected: `64 passed`
Expected: `42 passed in 0.57s`
### Integration Tests
@@ -405,15 +327,11 @@ See [TESTING.md](./TESTING.md#troubleshooting) for more details.
### Project Structure
- `config.py` - Hybrid configuration loader with auto-detection
- `config.py` - Hybrid configuration loader with mode detection
- `gitea_client.py` - Synchronous Gitea API client using requests
- `tools/issues.py` - Issue management with branch detection
- `tools/labels.py` - Label management and intelligent suggestions
- `tools/wiki.py` - Wiki pages and lessons learned
- `tools/milestones.py` - Milestone CRUD operations
- `tools/dependencies.py` - Issue dependency tracking
- `tools/pull_requests.py` - PR review and management
- `server.py` - MCP server with 36 tools over JSON-RPC 2.0 stdio
- `tools/issues.py` - Async wrappers with branch detection
- `tools/labels.py` - Label management and suggestion
- `server.py` - MCP server with JSON-RPC 2.0 over stdio
### Adding New Tools
@@ -456,14 +374,18 @@ def list_issues(self, state='open', labels=None, repo=None):
## Changelog
See [CHANGELOG.md](./CHANGELOG.md) for full version history.
### v1.0.0 (2025-01-06) - Phase 1 Complete
### Recent Updates
- **v1.3.0** - Pull request tools (7 tools), label creation tools (3)
- **v1.2.0** - Milestone management (5 tools), issue dependencies (4 tools)
- **v1.1.0** - Wiki & lessons learned system (7 tools)
- **v1.0.0** - Initial release with core issue/label tools (8 tools)
✅ Initial implementation:
- Configuration management (hybrid system + project)
- Gitea API client with all CRUD operations
- MCP server with 8 tools
- Issue tools with branch detection
- Label tools with intelligent suggestions
- Mode detection (project vs company)
- Branch-aware security model
- 42 unit tests (100% passing)
- Comprehensive documentation
## License
@@ -485,6 +407,6 @@ For issues or questions:
---
**Built for**: Leo Claude Marketplace - Project Management Plugins
**Tools**: 36
**Phase**: 1 (Complete)
**Status**: ✅ Production Ready
**Last Updated**: 2026-02-03
**Last Updated**: 2025-01-06

View File

@@ -28,7 +28,7 @@ source .venv/bin/activate # Linux/Mac
### Running All Tests
Run all 64 unit tests:
Run all 42 unit tests:
```bash
pytest tests/ -v
@@ -36,7 +36,7 @@ pytest tests/ -v
Expected output:
```
============================== 64 passed ==============================
============================== 42 passed in 0.57s ==============================
```
### Running Specific Test Files
@@ -532,7 +532,7 @@ python -m mcp_server.server
After completing all tests, verify:
- ✅ All 64 unit tests pass
- ✅ All 42 unit tests pass
- ✅ MCP server starts without errors
- ✅ Configuration loads correctly
- ✅ Gitea API client connects successfully
@@ -548,7 +548,7 @@ After completing all tests, verify:
Phase 1 is complete when:
1. **All unit tests pass** (64/64)
1. **All unit tests pass** (42/42)
2. **MCP server starts without errors**
3. **Can list issues from Gitea**
4. **Can create issues with labels** (in development mode)

View File

@@ -1,30 +0,0 @@
"""
Gitea MCP Server package.
Provides MCP tools for Gitea integration via JSON-RPC 2.0.
For external consumers (e.g., HTTP transport), use:
from mcp_server import get_tool_definitions, create_tool_dispatcher, GiteaClient
# Get tool schemas
tools = get_tool_definitions()
# Create dispatcher bound to a client
client = GiteaClient()
dispatch = create_tool_dispatcher(client)
result = await dispatch("list_issues", {"state": "open"})
"""
__version__ = "1.0.0"
from .tool_registry import get_tool_definitions, create_tool_dispatcher
from .gitea_client import GiteaClient
from .config import GiteaConfig
__all__ = [
"__version__",
"get_tool_definitions",
"create_tool_dispatcher",
"GiteaClient",
"GiteaConfig",
]

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,43 +0,0 @@
[build-system]
requires = ["setuptools>=61.0", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "gitea-mcp-server"
version = "1.0.0"
description = "MCP Server for Gitea integration - provides issue, label, wiki, milestone, dependency, and PR tools"
readme = "README.md"
requires-python = ">=3.10"
license = {text = "MIT"}
authors = [
{ name = "Leo Miranda" }
]
keywords = ["mcp", "gitea", "claude", "tools"]
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
]
dependencies = [
"mcp>=0.9.0",
"python-dotenv>=1.0.0",
"requests>=2.31.0",
"pydantic>=2.5.0",
]
[project.optional-dependencies]
test = [
"pytest>=7.4.3",
"pytest-asyncio>=0.23.0",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["mcp_server*"]
[tool.pytest.ini_options]
asyncio_mode = "auto"
testpaths = ["tests"]

View File

@@ -28,6 +28,7 @@ def test_load_system_config(tmp_path, monkeypatch):
assert result['api_url'] == 'https://test.com/api/v1'
assert result['api_token'] == 'test_token'
assert result['owner'] == 'test_owner'
assert result['mode'] == 'company' # No repo specified
assert result['repo'] is None

View File

@@ -14,7 +14,8 @@ def mock_config():
mock_instance.load.return_value = {
'api_url': 'https://test.com/api/v1',
'api_token': 'test_token',
'repo': 'test_owner/test_repo', # Combined owner/repo format
'owner': 'test_owner',
'repo': 'test_repo',
'mode': 'project'
}
yield mock_cfg
@@ -30,7 +31,8 @@ def test_client_initialization(gitea_client):
"""Test client initializes with correct configuration"""
assert gitea_client.base_url == 'https://test.com/api/v1'
assert gitea_client.token == 'test_token'
assert gitea_client.repo == 'test_owner/test_repo' # Combined format
assert gitea_client.owner == 'test_owner'
assert gitea_client.repo == 'test_repo'
assert gitea_client.mode == 'project'
assert 'Authorization' in gitea_client.session.headers
assert gitea_client.session.headers['Authorization'] == 'token test_token'
@@ -90,20 +92,15 @@ def test_create_issue(gitea_client):
}
mock_response.raise_for_status = Mock()
# Mock is_org_repo to avoid network call during label resolution
with patch.object(gitea_client, 'is_org_repo', return_value=True):
# Mock get_org_labels and get_labels for label resolution
with patch.object(gitea_client, 'get_org_labels', return_value=[{'name': 'Type/Bug', 'id': 1}]):
with patch.object(gitea_client, 'get_labels', return_value=[]):
with patch.object(gitea_client.session, 'post', return_value=mock_response):
issue = gitea_client.create_issue(
title='New Issue',
body='Issue body',
labels=['Type/Bug']
)
with patch.object(gitea_client.session, 'post', return_value=mock_response):
issue = gitea_client.create_issue(
title='New Issue',
body='Issue body',
labels=['Type/Bug']
)
assert issue['title'] == 'New Issue'
gitea_client.session.post.assert_called_once()
assert issue['title'] == 'New Issue'
gitea_client.session.post.assert_called_once()
def test_update_issue(gitea_client):
@@ -164,7 +161,7 @@ def test_get_org_labels(gitea_client):
mock_response.raise_for_status = Mock()
with patch.object(gitea_client.session, 'get', return_value=mock_response):
labels = gitea_client.get_org_labels(org='test_owner')
labels = gitea_client.get_org_labels()
assert len(labels) == 2
@@ -179,7 +176,7 @@ def test_list_repos(gitea_client):
mock_response.raise_for_status = Mock()
with patch.object(gitea_client.session, 'get', return_value=mock_response):
repos = gitea_client.list_repos(org='test_owner')
repos = gitea_client.list_repos()
assert len(repos) == 2
assert repos[0]['name'] == 'repo1'
@@ -199,7 +196,7 @@ def test_aggregate_issues(gitea_client):
[{'number': 2, 'title': 'Issue 2'}] # repo2
])
aggregated = gitea_client.aggregate_issues(org='test_owner', state='open')
aggregated = gitea_client.aggregate_issues(state='open')
assert 'repo1' in aggregated
assert 'repo2' in aggregated
@@ -208,13 +205,14 @@ def test_aggregate_issues(gitea_client):
def test_no_repo_specified_error(gitea_client):
"""Test error when repository not specified or invalid format"""
"""Test error when repository not specified"""
# Create client without repo
with patch('mcp_server.gitea_client.GiteaConfig') as mock_cfg:
mock_instance = mock_cfg.return_value
mock_instance.load.return_value = {
'api_url': 'https://test.com/api/v1',
'api_token': 'test_token',
'owner': 'test_owner',
'repo': None, # No repo
'mode': 'company'
}
@@ -223,7 +221,7 @@ def test_no_repo_specified_error(gitea_client):
with pytest.raises(ValueError) as exc_info:
client.list_issues()
assert "Use 'owner/repo' format" in str(exc_info.value)
assert "Repository not specified" in str(exc_info.value)
# ========================================

View File

@@ -119,26 +119,22 @@ async def test_aggregate_issues_company_mode(issue_tools):
'repo2': [{'number': 2}]
})
aggregated = await issue_tools.aggregate_issues(org='test_owner')
aggregated = await issue_tools.aggregate_issues()
assert 'repo1' in aggregated
assert 'repo2' in aggregated
@pytest.mark.asyncio
async def test_aggregate_issues_project_mode(issue_tools):
"""Test that aggregate_issues works in project mode with org argument"""
async def test_aggregate_issues_project_mode_error(issue_tools):
"""Test that aggregate_issues fails in project mode"""
issue_tools.gitea.mode = 'project'
with patch.object(issue_tools, '_get_current_branch', return_value='development'):
issue_tools.gitea.aggregate_issues = Mock(return_value={
'repo1': [{'number': 1}]
})
with pytest.raises(ValueError) as exc_info:
await issue_tools.aggregate_issues()
# aggregate_issues now works in any mode when org is provided
aggregated = await issue_tools.aggregate_issues(org='test_owner')
assert 'repo1' in aggregated
assert "only available in company mode" in str(exc_info.value)
def test_branch_detection():

View File

@@ -79,69 +79,6 @@ Add to your Claude Code MCP configuration (`~/.config/claude/mcp.json` or projec
1. **System-level** (`~/.config/claude/netbox.env`): Credentials and defaults
2. **Project-level** (`.env` in current directory): Optional overrides
## Module Filtering (Token Optimization)
By default, the NetBox MCP server registers all 182 tools across 8 modules, consuming ~19,810 tokens of context. For most workflows, you only need a subset of modules.
### Configuration
Add `NETBOX_ENABLED_MODULES` to your `~/.config/claude/netbox.env`:
```bash
# Enable only specific modules (comma-separated)
NETBOX_ENABLED_MODULES=dcim,ipam,virtualization,extras
```
If unset, all modules are enabled (backward compatible).
### Available Modules
| Module | Tool Count | Description | cmdb-assistant Commands |
|--------|------------|-------------|------------------------|
| `dcim` | ~60 | Sites, devices, racks, interfaces, cables | `/cmdb-device`, `/cmdb-site`, `/cmdb-search`, `/cmdb-topology` |
| `ipam` | ~40 | IP addresses, prefixes, VLANs, VRFs | `/cmdb-ip`, `/ip-conflicts`, `/cmdb-search` |
| `virtualization` | ~20 | Clusters, VMs, VM interfaces | `/cmdb-search`, `/cmdb-audit`, `/cmdb-register` |
| `extras` | ~12 | Tags, journal entries, audit log | `/change-audit`, `/cmdb-register` |
| `circuits` | ~15 | Providers, circuits, terminations | — |
| `tenancy` | ~12 | Tenants, contacts | — |
| `vpn` | ~15 | Tunnels, IKE/IPSec policies, L2VPN | — |
| `wireless` | ~8 | Wireless LANs, links, groups | — |
### Recommended Configurations
**For cmdb-assistant users** (~43 tools, ~4,500 tokens):
```bash
NETBOX_ENABLED_MODULES=dcim,ipam,virtualization,extras
```
**Basic infrastructure** (~100 tools):
```bash
NETBOX_ENABLED_MODULES=dcim,ipam
```
**Full CMDB** (all modules, ~182 tools):
```bash
# Omit NETBOX_ENABLED_MODULES or set to all modules
NETBOX_ENABLED_MODULES=dcim,ipam,circuits,virtualization,tenancy,vpn,wireless,extras
```
### Startup Logging
On startup, the server logs enabled modules and tool count:
```
NetBox MCP Server initialized: 43 tools registered (modules: dcim, extras, ipam, virtualization)
```
### Disabled Tool Behavior
Calling a tool from a disabled module returns a clear error:
```
Tool 'circuits_list_circuits' is not available (module 'circuits' not enabled).
Enabled modules: dcim, extras, ipam, virtualization
```
## Available Tools
### DCIM (Data Center Infrastructure Management)
@@ -191,18 +128,18 @@ Enabled modules: dcim, extras, ipam, virtualization
| `circuits_create_provider` | Create a provider |
| `circuits_list_circuits` | List circuits |
| `circuits_create_circuit` | Create a circuit |
| `circ_list_terminations` | List terminations |
| `circuits_list_circuit_terminations` | List terminations |
| ... and more |
### Virtualization
| Tool | Description |
|------|-------------|
| `virt_list_clusters` | List clusters |
| `virt_create_cluster` | Create a cluster |
| `virt_list_vms` | List VMs |
| `virt_create_vm` | Create a VM |
| `virt_list_vm_ifaces` | List VM interfaces |
| `virtualization_list_clusters` | List clusters |
| `virtualization_create_cluster` | Create a cluster |
| `virtualization_list_virtual_machines` | List VMs |
| `virtualization_create_virtual_machine` | Create a VM |
| `virtualization_list_vm_interfaces` | List VM interfaces |
| ... and more |
### Tenancy
@@ -230,9 +167,9 @@ Enabled modules: dcim, extras, ipam, virtualization
| Tool | Description |
|------|-------------|
| `wlan_list_lans` | List wireless LANs |
| `wlan_create_lan` | Create a WLAN |
| `wlan_list_links` | List wireless links |
| `wireless_list_wireless_lans` | List wireless LANs |
| `wireless_create_wireless_lan` | Create a WLAN |
| `wireless_list_wireless_links` | List wireless links |
| ... and more |
### Extras

View File

@@ -9,17 +9,11 @@ from pathlib import Path
from dotenv import load_dotenv
import os
import logging
from typing import Dict, List, Optional, Set
from typing import Dict, Optional
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# All available NetBox modules
ALL_MODULES = frozenset([
'dcim', 'ipam', 'circuits', 'virtualization',
'tenancy', 'vpn', 'wireless', 'extras'
])
class NetBoxConfig:
"""Configuration loader for NetBox MCP Server"""
@@ -29,7 +23,6 @@ class NetBoxConfig:
self.api_token: Optional[str] = None
self.verify_ssl: bool = True
self.timeout: int = 30
self.enabled_modules: Set[str] = set(ALL_MODULES)
def load(self) -> Dict[str, any]:
"""
@@ -80,9 +73,6 @@ class NetBoxConfig:
self.timeout = 30
logger.warning(f"Invalid NETBOX_TIMEOUT value '{timeout_str}', using default 30")
# Module filtering
self.enabled_modules = self._load_enabled_modules()
# Validate required variables
self._validate()
@@ -94,8 +84,7 @@ class NetBoxConfig:
'api_url': self.api_url,
'api_token': self.api_token,
'verify_ssl': self.verify_ssl,
'timeout': self.timeout,
'enabled_modules': self.enabled_modules
'timeout': self.timeout
}
def _validate(self) -> None:
@@ -117,40 +106,3 @@ class NetBoxConfig:
f"Missing required configuration: {', '.join(missing)}\n"
"Check your ~/.config/claude/netbox.env file"
)
def _load_enabled_modules(self) -> Set[str]:
"""
Load enabled modules from NETBOX_ENABLED_MODULES environment variable.
Format: Comma-separated list of module names.
Example: NETBOX_ENABLED_MODULES=dcim,ipam,virtualization,extras
Returns:
Set of enabled module names. If env var is unset/empty, returns all modules.
"""
modules_str = os.getenv('NETBOX_ENABLED_MODULES', '').strip()
if not modules_str:
logger.info("NETBOX_ENABLED_MODULES not set, all modules enabled (default)")
return set(ALL_MODULES)
# Parse comma-separated list, strip whitespace
requested = {m.strip().lower() for m in modules_str.split(',') if m.strip()}
# Validate module names
invalid = requested - ALL_MODULES
if invalid:
logger.warning(
f"Unknown modules in NETBOX_ENABLED_MODULES: {', '.join(sorted(invalid))}. "
f"Valid modules: {', '.join(sorted(ALL_MODULES))}"
)
# Return only valid modules
enabled = requested & ALL_MODULES
if not enabled:
logger.warning("No valid modules enabled, falling back to all modules")
return set(ALL_MODULES)
logger.info(f"Enabled modules: {', '.join(sorted(enabled))}")
return enabled

View File

@@ -8,12 +8,11 @@ Tenancy, VPN, Wireless, and Extras.
import asyncio
import logging
import json
from typing import Optional, Set
from mcp.server import Server
from mcp.server.stdio import stdio_server
from mcp.types import Tool, TextContent
from .config import NetBoxConfig, ALL_MODULES
from .config import NetBoxConfig
from .netbox_client import NetBoxClient
from .tools.dcim import DCIMTools
from .tools.ipam import IPAMTools
@@ -1454,49 +1453,6 @@ TOOL_NAME_MAP = {
}
# Map tool name prefixes to module names.
# This handles both full prefixes and shortened prefixes used in TOOL_NAME_MAP.
PREFIX_TO_MODULE = {
'dcim': 'dcim',
'ipam': 'ipam',
'circuits': 'circuits',
'circ': 'circuits', # Shortened prefix
'virtualization': 'virtualization',
'virt': 'virtualization', # Shortened prefix
'tenancy': 'tenancy',
'vpn': 'vpn',
'wireless': 'wireless',
'wlan': 'wireless', # Shortened prefix
'extras': 'extras',
}
def _get_tool_module(tool_name: str) -> Optional[str]:
"""
Determine which module a tool belongs to.
Checks TOOL_NAME_MAP first for shortened names, then falls back to prefix extraction.
Args:
tool_name: The tool name (e.g., 'dcim_list_devices', 'virt_list_vms')
Returns:
Module name (e.g., 'dcim', 'virtualization') or None if unknown
"""
# Check mapped short names first
if tool_name in TOOL_NAME_MAP:
category, _ = TOOL_NAME_MAP[tool_name]
return category
# Fall back to prefix extraction
parts = tool_name.split('_', 1)
if len(parts) < 2:
return None
prefix = parts[0]
return PREFIX_TO_MODULE.get(prefix)
class NetBoxMCPServer:
"""MCP Server for NetBox integration"""
@@ -1504,8 +1460,6 @@ class NetBoxMCPServer:
self.server = Server("netbox-mcp")
self.config = None
self.client = None
self.enabled_modules: Set[str] = set(ALL_MODULES)
# Tool instances - only instantiated for enabled modules
self.dcim_tools = None
self.ipam_tools = None
self.circuits_tools = None
@@ -1520,39 +1474,18 @@ class NetBoxMCPServer:
try:
config_loader = NetBoxConfig()
self.config = config_loader.load()
self.enabled_modules = self.config['enabled_modules']
self.client = NetBoxClient()
self.dcim_tools = DCIMTools(self.client)
self.ipam_tools = IPAMTools(self.client)
self.circuits_tools = CircuitsTools(self.client)
self.virtualization_tools = VirtualizationTools(self.client)
self.tenancy_tools = TenancyTools(self.client)
self.vpn_tools = VPNTools(self.client)
self.wireless_tools = WirelessTools(self.client)
self.extras_tools = ExtrasTools(self.client)
# Conditionally instantiate tool classes for enabled modules only
if 'dcim' in self.enabled_modules:
self.dcim_tools = DCIMTools(self.client)
if 'ipam' in self.enabled_modules:
self.ipam_tools = IPAMTools(self.client)
if 'circuits' in self.enabled_modules:
self.circuits_tools = CircuitsTools(self.client)
if 'virtualization' in self.enabled_modules:
self.virtualization_tools = VirtualizationTools(self.client)
if 'tenancy' in self.enabled_modules:
self.tenancy_tools = TenancyTools(self.client)
if 'vpn' in self.enabled_modules:
self.vpn_tools = VPNTools(self.client)
if 'wireless' in self.enabled_modules:
self.wireless_tools = WirelessTools(self.client)
if 'extras' in self.enabled_modules:
self.extras_tools = ExtrasTools(self.client)
# Count tools that will be registered
tool_count = sum(
1 for name in TOOL_DEFINITIONS
if _get_tool_module(name) in self.enabled_modules
)
modules_str = ', '.join(sorted(self.enabled_modules))
logger.info(
f"NetBox MCP Server initialized: {tool_count} tools registered "
f"(modules: {modules_str})"
)
logger.info(f"NetBox MCP Server initialized for {self.config['api_url']}")
except Exception as e:
logger.error(f"Failed to initialize: {e}")
raise
@@ -1562,14 +1495,9 @@ class NetBoxMCPServer:
@self.server.list_tools()
async def list_tools() -> list[Tool]:
"""Return list of available tools, filtered by enabled modules"""
"""Return list of available tools"""
tools = []
for name, definition in TOOL_DEFINITIONS.items():
# Filter tools by enabled modules
module = _get_tool_module(name)
if module not in self.enabled_modules:
continue
tools.append(Tool(
name=name,
description=definition['description'],
@@ -1604,14 +1532,6 @@ class NetBoxMCPServer:
'virtualization_list_virtual_machines') to meet the 28-character
limit. TOOL_NAME_MAP handles the translation to actual method names.
"""
# Check module is enabled (routing guard)
module = _get_tool_module(name)
if module and module not in self.enabled_modules:
raise ValueError(
f"Tool '{name}' is not available (module '{module}' not enabled). "
f"Enabled modules: {', '.join(sorted(self.enabled_modules))}"
)
# Check if this is a mapped short name
if name in TOOL_NAME_MAP:
category, method_name = TOOL_NAME_MAP[name]

View File

@@ -1,5 +0,0 @@
2026-01-26T11:40:11 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/viz-platform/registry/dmc_2_5.json | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-01-26T13:46:31 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/viz-platform/tests/test_chart_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-01-26T13:46:32 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/viz-platform/tests/test_theme_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-01-26T13:46:34 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/viz-platform/tests/test_theme_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-01-26T13:46:35 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/viz-platform/tests/test_theme_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md

View File

@@ -1,6 +1,6 @@
{
"name": "clarity-assist",
"version": "1.2.0",
"version": "1.0.0",
"description": "Prompt optimization and requirement clarification with ND-friendly accommodations",
"author": {
"name": "Leo Miranda",

View File

@@ -1,11 +1,3 @@
---
name: clarity-coach
description: Patient, structured coach helping users articulate requirements clearly. Uses neurodivergent-friendly communication patterns.
model: sonnet
permissionMode: default
disallowedTools: Write, Edit, MultiEdit
---
# Clarity Coach Agent
## Visual Output Requirements

View File

@@ -1,7 +1,7 @@
{
"name": "claude-config-maintainer",
"version": "1.2.0",
"description": "Maintains and optimizes CLAUDE.md and settings.local.json configuration files for Claude Code projects",
"version": "1.0.0",
"description": "Maintains and optimizes CLAUDE.md configuration files for Claude Code projects",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
@@ -14,9 +14,7 @@
"configuration",
"optimization",
"claude-md",
"developer-tools",
"settings",
"permissions"
"developer-tools"
],
"commands": ["./commands/"]
}

View File

@@ -1,9 +1,6 @@
---
name: maintainer
description: CLAUDE.md optimization and maintenance agent
model: sonnet
permissionMode: acceptEdits
skills: visual-header, settings-optimization
---
# CLAUDE.md Maintainer Agent
@@ -117,54 +114,7 @@ Report plugin coverage percentage and offer to add missing integrations:
- Display the integration content that would be added
- Ask user for confirmation before modifying CLAUDE.md
### 2. Audit Settings Files
When auditing settings files, perform:
#### A. Permission Analysis
Read `.claude/settings.local.json` (primary) and check `.claude/settings.json` and `~/.claude.json` project entries (secondary).
Evaluate using `skills/settings-optimization.md`:
**Redundancy:**
- Duplicate entries in allow/deny arrays
- Subset patterns covered by broader patterns
- Patterns that could be merged
**Coverage:**
- Common safe tools missing from allow list
- MCP server tools not covered
- Directory scopes with no matching permission
**Safety Alignment:**
- Deny rules cover secrets and destructive commands
- Allow rules don't bypass active review layers
- No overly broad patterns without justification
**Profile Fit:**
- Compare against recommended profile for the project's review architecture
- Identify specific additions/removals to reach target profile
#### B. Review Layer Verification
Before recommending auto-allow patterns, verify active review layers:
1. Read `plugins/*/hooks/hooks.json` for each installed plugin
2. Map hook types (PreToolUse, PostToolUse) to tool matchers (Write, Edit, Bash)
3. Confirm plugins are listed in `.claude-plugin/marketplace.json`
4. Only recommend auto-allow for scopes covered by ≥2 verified review layers
#### C. Settings Efficiency Score (100 points)
| Category | Points |
|----------|--------|
| Redundancy | 25 |
| Coverage | 25 |
| Safety Alignment | 25 |
| Profile Fit | 25 |
### 3. Optimize CLAUDE.md Structure
### 2. Optimize CLAUDE.md Structure
**Recommended Structure:**
@@ -199,7 +149,7 @@ Common issues and solutions.
- Use headers that scan easily
- Include examples where they add clarity
### 4. Apply Best Practices
### 3. Apply Best Practices
**DO:**
- Use clear, direct language
@@ -216,7 +166,7 @@ Common issues and solutions.
- Add generic advice that applies to all projects
- Use emojis unless project requires them
### 5. Generate Improvement Reports
### 4. Generate Improvement Reports
After analyzing a CLAUDE.md, provide:
@@ -252,7 +202,7 @@ Suggested Actions:
Would you like me to implement these improvements?
```
### 6. Insert Plugin Integrations
### 5. Insert Plugin Integrations
When adding plugin integration content to CLAUDE.md:
@@ -287,7 +237,7 @@ Add this integration to CLAUDE.md?
- Allow users to skip specific plugins they don't want documented
- Preserve existing CLAUDE.md structure and content
### 7. Create New CLAUDE.md Files
### 6. Create New CLAUDE.md Files
When creating a new CLAUDE.md:

View File

@@ -1,6 +1,6 @@
## CLAUDE.md Maintenance (claude-config-maintainer)
This project uses the **claude-config-maintainer** plugin to analyze and optimize CLAUDE.md and settings.local.json configuration files.
This project uses the **claude-config-maintainer** plugin to analyze and optimize CLAUDE.md configuration files.
### Available Commands
@@ -9,13 +9,8 @@ This project uses the **claude-config-maintainer** plugin to analyze and optimiz
| `/config-analyze` | Analyze CLAUDE.md for optimization opportunities with 100-point scoring |
| `/config-optimize` | Automatically optimize CLAUDE.md structure and content |
| `/config-init` | Initialize a new CLAUDE.md file for a project |
| `/config-diff` | Track CLAUDE.md changes over time with behavioral impact analysis |
| `/config-lint` | Lint CLAUDE.md for anti-patterns and best practices (31 rules) |
| `/config-audit-settings` | Audit settings.local.json permissions with 100-point scoring |
| `/config-optimize-settings` | Optimize permission patterns and apply named profiles |
| `/config-permissions-map` | Visual map of review layers and permission coverage |
### CLAUDE.md Scoring System
### Scoring System
The analysis uses a 100-point scoring system across four categories:
@@ -26,31 +21,10 @@ The analysis uses a 100-point scoring system across four categories:
| Completeness | 25 | Overview, quick start, critical rules, workflows |
| Conciseness | 25 | Efficiency, no repetition, appropriate length |
### Settings Scoring System
The settings audit uses a 100-point scoring system across four categories:
| Category | Points | What It Measures |
|----------|--------|------------------|
| Redundancy | 25 | No duplicates, no subset patterns, efficient rules |
| Coverage | 25 | Common tools allowed, MCP servers covered |
| Safety Alignment | 25 | Deny rules for secrets/destructive ops, review layers verified |
| Profile Fit | 25 | Alignment with recommended profile for review layer count |
### Permission Profiles
| Profile | Use Case |
|---------|----------|
| `conservative` | New users, minimal auto-allow, prompts for most writes |
| `reviewed` | Projects with 2+ review layers (code-sentinel, doc-guardian, PR review) |
| `autonomous` | Trusted CI/sandboxed environments only |
### Usage Guidelines
- Run `/config-analyze` periodically to assess CLAUDE.md quality
- Run `/config-audit-settings` to check permission efficiency
- Target a score of **70+/100** for effective Claude Code operation
- Address HIGH priority issues first when optimizing
- Use `/config-init` when setting up new projects to start with best practices
- Use `/config-permissions-map` to visualize review layer coverage
- Re-analyze after making changes to verify improvements

View File

@@ -1,204 +0,0 @@
---
name: config-audit-settings
description: Audit settings.local.json for permission optimization opportunities
---
# /config-audit-settings
Audit Claude Code `settings.local.json` permissions with 100-point scoring across redundancy, coverage, safety alignment, and profile fit.
## Skills to Load
Before executing, load:
- `skills/visual-header.md`
- `skills/settings-optimization.md`
## Visual Output
```
+-----------------------------------------------------------------+
| CONFIG-MAINTAINER - Settings Audit |
+-----------------------------------------------------------------+
```
## Usage
```
/config-audit-settings # Full audit with recommendations
/config-audit-settings --diagram # Include Mermaid diagram of review layer coverage
```
## Workflow
### Step 1: Locate Settings Files
Search in order:
1. `.claude/settings.local.json` (primary target)
2. `.claude/settings.json` (shared config)
3. `~/.claude.json` project entry (legacy)
Report which format is in use.
### Step 2: Parse Permission Arrays
Extract and analyze:
- `permissions.allow` array
- `permissions.deny` array
- `permissions.ask` array (if present)
- Legacy `allowedTools` array (if legacy format)
### Step 3: Run Pattern Consolidation Analysis
Using `settings-optimization.md` Section 3, detect:
| Check | Description |
|-------|-------------|
| Duplicates | Exact same pattern appearing multiple times |
| Subsets | Narrower patterns covered by broader ones |
| Merge candidates | 4+ similar patterns that could be consolidated |
| Overly broad | Unscoped tool permissions (e.g., `Bash` without pattern) |
| Stale entries | Patterns referencing non-existent paths |
| Conflicts | Same pattern in both allow and deny |
### Step 4: Detect Active Marketplace Hooks
Read `plugins/*/hooks/hooks.json` files:
```bash
# Check each plugin's hooks
plugins/code-sentinel/hooks/hooks.json # PreToolUse security
plugins/doc-guardian/hooks/hooks.json # PostToolUse drift detection
plugins/project-hygiene/hooks/hooks.json # PostToolUse cleanup
plugins/data-platform/hooks/hooks.json # PostToolUse schema diff
plugins/contract-validator/hooks/hooks.json # Plugin validation
```
Parse each to identify:
- Hook event type (PreToolUse, PostToolUse)
- Tool matchers (Write, Edit, MultiEdit, Bash)
- Whether hook is command type (reliable) or prompt type (unreliable)
### Step 5: Map Review Layers to Directory Scopes
For each directory scope in `settings-optimization.md` Section 4:
1. Count how many review layers are verified active
2. Determine if auto-allow is justified (≥2 layers required)
3. Note any scopes that lack coverage
### Step 6: Compare Against Recommended Profile
Based on review layer count:
- 0-1 layers: Recommend `conservative` profile
- 2+ layers: Recommend `reviewed` profile
- CI/sandboxed: May recommend `autonomous` profile
Calculate profile fit percentage.
### Step 7: Generate Scored Report
Calculate scores using `settings-optimization.md` Section 6.
## Output Format
```
Settings Efficiency Score: XX/100
Redundancy: XX/25
Coverage: XX/25
Safety Alignment: XX/25
Profile Fit: XX/25
Current Profile: [closest match or "custom"]
Recommended Profile: [target based on review layers]
Issues Found:
🔴 CRITICAL: [description]
🟠 HIGH: [description]
🟡 MEDIUM: [description]
🔵 LOW: [description]
Active Review Layers Detected:
✓ code-sentinel (PreToolUse: Write|Edit|MultiEdit)
✓ doc-guardian (PostToolUse: Write|Edit|MultiEdit)
✓ project-hygiene (PostToolUse: Write|Edit)
✗ data-platform schema-diff (not detected)
Recommendations:
1. [specific action with pattern]
2. [specific action with pattern]
...
Follow-Up Actions:
1. Run /config-optimize-settings to apply recommendations
2. Run /config-optimize-settings --dry-run to preview first
3. Run /config-optimize-settings --profile=reviewed to apply profile
```
## Diagram Output (--diagram flag)
When `--diagram` is specified, generate a Mermaid flowchart showing:
**Before generating:** Read `/mnt/skills/user/mermaid-diagrams/SKILL.md` for diagram requirements.
**Diagram structure:**
- Left column: File operation types (Write, Edit, Bash)
- Middle: Review layers that intercept each operation
- Right column: Current permission status (auto-allowed, prompted, denied)
**Color coding:**
- PreToolUse hooks: Blue
- PostToolUse hooks: Green
- Sprint Approval: Amber
- PR Review: Purple
Example structure:
```mermaid
flowchart LR
subgraph Operations
W[Write]
E[Edit]
B[Bash]
end
subgraph Review Layers
CS[code-sentinel]
DG[doc-guardian]
PR[pr-review]
end
subgraph Permission
A[Auto-allowed]
P[Prompted]
D[Denied]
end
W --> CS
W --> DG
E --> CS
E --> DG
CS --> A
DG --> A
B --> P
classDef preHook fill:#e3f2fd
classDef postHook fill:#e8f5e9
classDef prReview fill:#f3e5f5
class CS preHook
class DG postHook
class PR prReview
```
## Issue Severity Levels
| Severity | Icon | Examples |
|----------|------|----------|
| CRITICAL | 🔴 | Unscoped `Bash` in allow, missing deny for secrets |
| HIGH | 🟠 | Overly broad patterns, missing MCP coverage |
| MEDIUM | 🟡 | Subset redundancy, merge candidates |
| LOW | 🔵 | Exact duplicates, minor optimizations |
## DO NOT
- Modify any files (this is audit only)
- Recommend `autonomous` profile unless explicitly sandboxed environment
- Recommend auto-allow for scopes with <2 verified review layers
- Skip hook verification before making recommendations

View File

@@ -1,243 +0,0 @@
---
name: config-optimize-settings
description: Optimize settings.local.json permissions based on audit recommendations
---
# /config-optimize-settings
Optimize Claude Code `settings.local.json` permission patterns and apply named profiles.
## Skills to Load
Before executing, load:
- `skills/visual-header.md`
- `skills/settings-optimization.md`
- `skills/pre-change-protocol.md`
## Visual Output
```
+-----------------------------------------------------------------+
| CONFIG-MAINTAINER - Settings Optimization |
+-----------------------------------------------------------------+
```
## Usage
```
/config-optimize-settings # Apply audit recommendations
/config-optimize-settings --dry-run # Preview only, no changes
/config-optimize-settings --profile=reviewed # Apply named profile
/config-optimize-settings --consolidate-only # Only merge/dedupe, no new rules
```
## Options
| Option | Description |
|--------|-------------|
| `--dry-run` | Preview changes without applying |
| `--profile=NAME` | Apply named profile (`conservative`, `reviewed`, `autonomous`) |
| `--consolidate-only` | Only deduplicate and merge patterns, don't add new rules |
| `--no-backup` | Skip backup (not recommended) |
## Workflow
### Step 1: Run Audit Analysis
Execute the same analysis as `/config-audit-settings`:
1. Locate settings file
2. Parse permission arrays
3. Detect issues (duplicates, subsets, merge candidates, etc.)
4. Verify active review layers
5. Calculate current score
### Step 2: Generate Optimization Plan
Based on audit results, create a change plan:
**For `--consolidate-only`:**
- Remove exact duplicates
- Remove subset patterns covered by broader patterns
- Merge similar patterns (4+ threshold)
- Remove stale patterns for non-existent paths
- Remove conflicting allow entries that are already denied
**For `--profile=NAME`:**
- Calculate diff between current permissions and target profile
- Show additions and removals
- Preserve any custom deny rules not in profile
**For default (full optimization):**
- Apply all consolidation changes
- Add recommended patterns based on verified review layers
- Suggest profile alignment if appropriate
### Step 3: Show Before/After Preview
**MANDATORY:** Always show preview before applying changes.
```
Current Settings:
allow: [12 patterns]
deny: [4 patterns]
Proposed Changes:
REMOVE from allow (redundant):
- Write(plugins/projman/*) [covered by Write(plugins/**)]
- Write(plugins/git-flow/*) [covered by Write(plugins/**)]
- Bash(git status) [covered by Bash(git *)]
ADD to allow (recommended):
+ Bash(npm *) [2 review layers active]
+ Bash(pytest *) [2 review layers active]
ADD to deny (security):
+ Bash(curl * | bash*) [missing safety rule]
After Optimization:
allow: [10 patterns]
deny: [5 patterns]
Score Impact: 67/100 → 85/100 (+18 points)
```
### Step 4: Request User Approval
Ask for confirmation before proceeding:
```
Apply these changes to .claude/settings.local.json?
[1] Yes, apply changes
[2] No, cancel
[3] Apply partial (select which changes)
```
### Step 5: Create Backup
**Before any write operation:**
```bash
# Backup location
.claude/backups/settings.local.json.{YYYYMMDD-HHMMSS}
```
Create the `.claude/backups/` directory if it doesn't exist.
### Step 6: Apply Changes
Write the optimized `settings.local.json` file.
### Step 7: Verify
Re-read the file and re-calculate the score to confirm improvement.
```
Optimization Complete!
Backup saved: .claude/backups/settings.local.json.20260202-143022
Settings Efficiency Score: 85/100 (+18 from 67)
Redundancy: 25/25 (+8)
Coverage: 22/25 (+5)
Safety Alignment: 23/25 (+3)
Profile Fit: 15/25 (+2)
Changes applied:
- Removed 3 redundant patterns
- Added 2 recommended patterns
- Added 1 safety deny rule
```
## Profile Application
When using `--profile=NAME`:
### `conservative`
```
Switching to conservative profile...
This profile:
- Allows: Read, Glob, Grep, LS, basic Bash commands
- Allows: Write/Edit only for docs/
- Denies: .env*, secrets/, rm -rf, sudo
All other Write/Edit operations will prompt for approval.
```
### `reviewed`
```
Switching to reviewed profile...
Prerequisites verified:
✓ code-sentinel hook active (PreToolUse)
✓ doc-guardian hook active (PostToolUse)
✓ 2+ review layers detected
This profile:
- Allows: All file operations (Edit, Write, MultiEdit)
- Allows: Scoped Bash commands (git, npm, python, etc.)
- Denies: .env*, secrets/, rm -rf, sudo, curl|bash
```
### `autonomous`
```
⚠️ WARNING: Autonomous profile requested
This profile allows unscoped Bash execution.
Only use in fully sandboxed environments (CI, containers).
Confirm this is a sandboxed environment?
[1] Yes, this is sandboxed - apply autonomous profile
[2] No, cancel
```
## Safety Rules
1. **ALWAYS backup before writing** (unless `--no-backup`)
2. **NEVER remove deny rules without explicit confirmation**
3. **NEVER add unscoped `Bash` to allow** — always use scoped patterns
4. **Preview is MANDATORY** before applying changes
5. **Verify review layers** before recommending broad permissions
## Output Format
### Dry Run Output
```
+-----------------------------------------------------------------+
| CONFIG-MAINTAINER - Settings Optimization |
+-----------------------------------------------------------------+
DRY RUN - No changes will be made
[... preview content ...]
To apply these changes, run:
/config-optimize-settings
```
### Applied Output
```
+-----------------------------------------------------------------+
| CONFIG-MAINTAINER - Settings Optimization |
+-----------------------------------------------------------------+
Optimization Applied Successfully
Backup: .claude/backups/settings.local.json.20260202-143022
[... summary of changes ...]
Score: 67/100 → 85/100
```
## DO NOT
- Apply changes without showing preview
- Remove deny rules silently
- Add unscoped `Bash` permission
- Skip backup without explicit `--no-backup` flag
- Apply `autonomous` profile without sandbox confirmation
- Recommend broad permissions without verifying review layers

View File

@@ -1,256 +0,0 @@
---
name: config-permissions-map
description: Generate visual map of review layers and permission coverage
---
# /config-permissions-map
Generate a Mermaid diagram showing the relationship between file operations, review layers, and permission status.
## Skills to Load
Before executing, load:
- `skills/visual-header.md`
- `skills/settings-optimization.md`
Also read: `/mnt/skills/user/mermaid-diagrams/SKILL.md` (for diagram requirements)
## Visual Output
```
+-----------------------------------------------------------------+
| CONFIG-MAINTAINER - Permissions Map |
+-----------------------------------------------------------------+
```
## Usage
```
/config-permissions-map # Generate and display diagram
/config-permissions-map --save # Save diagram to .mermaid file
```
## Workflow
### Step 1: Detect Active Hooks
Read all plugin hooks from the marketplace:
```
plugins/code-sentinel/hooks/hooks.json
plugins/doc-guardian/hooks/hooks.json
plugins/project-hygiene/hooks/hooks.json
plugins/data-platform/hooks/hooks.json
plugins/contract-validator/hooks/hooks.json
plugins/cmdb-assistant/hooks/hooks.json
```
For each hook, extract:
- Event type (PreToolUse, PostToolUse, SessionStart, etc.)
- Tool matchers (Write, Edit, MultiEdit, Bash patterns)
- Hook command/script
### Step 2: Map Hooks to File Scopes
Create a mapping of which review layers cover which operations:
| Operation | PreToolUse Hooks | PostToolUse Hooks | Other Gates |
|-----------|------------------|-------------------|-------------|
| Write | code-sentinel | doc-guardian, project-hygiene | PR review |
| Edit | code-sentinel | doc-guardian, project-hygiene | PR review |
| MultiEdit | code-sentinel | doc-guardian | PR review |
| Bash(git *) | git-flow | — | — |
### Step 3: Read Current Permissions
Load `.claude/settings.local.json` and parse:
- `allow` array → auto-allowed operations
- `deny` array → blocked operations
- `ask` array → always-prompted operations
### Step 4: Generate Mermaid Flowchart
**Diagram requirements (from mermaid-diagrams skill):**
- Use `classDef` for styling
- Maximum 3 colors (blue, green, amber/purple)
- Semantic arrow labels
- Left-to-right flow
**Structure:**
```mermaid
flowchart LR
subgraph ops[File Operations]
direction TB
W[Write]
E[Edit]
ME[MultiEdit]
BG[Bash git]
BN[Bash npm]
BO[Bash other]
end
subgraph pre[PreToolUse Hooks]
direction TB
CS[code-sentinel<br/>Security Scan]
GF[git-flow<br/>Branch Check]
end
subgraph post[PostToolUse Hooks]
direction TB
DG[doc-guardian<br/>Drift Detection]
PH[project-hygiene<br/>Cleanup]
DP[data-platform<br/>Schema Diff]
end
subgraph perm[Permission Status]
direction TB
AA[Auto-Allowed]
PR[Prompted]
DN[Denied]
end
W -->|intercepted| CS
W -->|tracked| DG
E -->|intercepted| CS
E -->|tracked| DG
BG -->|checked| GF
CS -->|passed| AA
DG -->|logged| AA
GF -->|valid| AA
BO -->|no hook| PR
classDef preHook fill:#e3f2fd,stroke:#1976d2
classDef postHook fill:#e8f5e9,stroke:#388e3c
classDef sprint fill:#fff3e0,stroke:#f57c00
classDef prReview fill:#f3e5f5,stroke:#7b1fa2
classDef allowed fill:#c8e6c9,stroke:#2e7d32
classDef prompted fill:#fff9c4,stroke:#f9a825
classDef denied fill:#ffcdd2,stroke:#c62828
class CS,GF preHook
class DG,PH,DP postHook
class AA allowed
class PR prompted
class DN denied
```
### Step 5: Generate Coverage Summary Table
```
Review Layer Coverage Summary
=============================
| Directory Scope | Layers | Status | Recommendation |
|--------------------------|--------|-----------------|----------------|
| plugins/*/commands/*.md | 3 | ✓ Auto-allowed | — |
| plugins/*/skills/*.md | 2 | ✓ Auto-allowed | — |
| mcp-servers/**/*.py | 3 | ✓ Auto-allowed | — |
| docs/** | 2 | ✓ Auto-allowed | — |
| scripts/*.sh | 2 | ⚠ Prompted | Consider auto-allow |
| .env* | 0 | ✗ Denied | Correct - secrets |
| Root directory | 1 | ⚠ Prompted | Keep prompted |
Legend:
✓ = Covered by ≥2 review layers, auto-allowed
⚠ = Fewer than 2 layers or not allowed
✗ = Explicitly denied
```
### Step 6: Identify Gaps
Report any gaps in coverage:
```
Coverage Gaps Detected:
1. Bash(npm *) — not in allow list, but npm operations are common
→ 2 review layers active, could be auto-allowed
2. mcp__data-platform__* — MCP server configured but tools not allowed
→ Add to allow list to avoid prompts
3. scripts/*.sh — 2 review layers but still prompted
→ Consider adding Write(scripts/**) to allow
```
### Step 7: Output Diagram
Display the Mermaid diagram inline.
If `--save` flag is used:
- Save to `.claude/permissions-map.mermaid`
- Report the file path
## Output Format
```
+-----------------------------------------------------------------+
| CONFIG-MAINTAINER - Permissions Map |
+-----------------------------------------------------------------+
Review Layer Status
===================
PreToolUse Hooks (intercept before operation):
✓ code-sentinel — Write, Edit, MultiEdit
✓ git-flow — Bash(git checkout *), Bash(git commit *)
PostToolUse Hooks (track after operation):
✓ doc-guardian — Write, Edit, MultiEdit
✓ project-hygiene — Write, Edit
✗ data-platform — not detected
Other Review Gates:
✓ Sprint Approval (projman milestone workflow)
✓ PR Review (pr-review multi-agent)
Permissions Flow Diagram
========================
```mermaid
[diagram here]
```
Coverage Summary
================
[table here]
Gaps & Recommendations
======================
[gaps list here]
```
## File Output (--save flag)
When `--save` is specified:
```
Diagram saved to: .claude/permissions-map.mermaid
To view:
- Open in VS Code with Mermaid extension
- Paste into https://mermaid.live
- Include in documentation with ```mermaid code fence
```
## Color Scheme
| Element | Color | Hex |
|---------|-------|-----|
| PreToolUse hooks | Blue | #e3f2fd |
| PostToolUse hooks | Green | #e8f5e9 |
| Sprint/Planning gates | Amber | #fff3e0 |
| PR Review | Purple | #f3e5f5 |
| Auto-allowed | Light green | #c8e6c9 |
| Prompted | Light yellow | #fff9c4 |
| Denied | Light red | #ffcdd2 |
## DO NOT
- Generate diagrams without reading the mermaid-diagrams skill
- Use more than 3 primary colors in the diagram
- Skip the coverage summary table
- Fail to identify coverage gaps

View File

@@ -1,377 +0,0 @@
# Settings Optimization Skill
This skill provides comprehensive knowledge for auditing and optimizing Claude Code `settings.local.json` permission configurations.
---
## Section 1: Settings File Locations & Format
Claude Code uses two configuration formats for permissions:
### Newer Format (Recommended)
**Primary target:** `.claude/settings.local.json` (project-local, gitignored)
**Secondary locations:**
- `.claude/settings.json` (shared, committed)
- `~/.claude.json` (legacy global config)
```json
{
"permissions": {
"allow": ["Edit", "Write(plugins/**)", "Bash(git *)"],
"deny": ["Read(.env*)", "Bash(rm *)"],
"ask": ["Bash(pip install *)"]
}
}
```
**Field meanings:**
- `allow`: Operations auto-approved without prompting
- `deny`: Operations blocked entirely
- `ask`: Operations that always prompt (overrides allow)
### Legacy Format
Found in `~/.claude.json` with per-project entries:
```json
{
"projects": {
"/path/to/project": {
"allowedTools": ["Read", "Write", "Bash(git *)"]
}
}
}
```
**Detection strategy:**
1. Check `.claude/settings.local.json` first (primary)
2. Check `.claude/settings.json` (shared)
3. Check `~/.claude.json` for project entry (legacy)
4. Report which format is in use
---
## Section 2: Permission Rule Syntax Reference
| Pattern | Meaning |
|---------|---------|
| `Tool` or `Tool(*)` | Allow all uses of that tool |
| `Bash(npm run build)` | Exact command match |
| `Bash(npm run test *)` | Prefix match (space+asterisk = word boundary) |
| `Bash(npm*)` | Prefix match without word boundary |
| `Write(plugins/**)` | Glob — all files recursively under `plugins/` |
| `Write(plugins/projman/*)` | Glob — direct children only |
| `Read(.env*)` | Pattern matching `.env`, `.env.local`, etc. |
| `mcp__gitea__*` | All tools from the gitea MCP server |
| `mcp__netbox__list_*` | Specific MCP tool pattern |
| `WebFetch(domain:github.com)` | Domain-restricted web fetch |
### Important Nuances
**Word boundary matching:**
- `Bash(ls *)` (with space) matches `ls -la` but NOT `lsof`
- `Bash(ls*)` (no space) matches both `ls -la` AND `lsof`
**Precedence rules:**
- `deny` rules take precedence over `allow` rules
- `ask` rules override both (always prompts even if allowed)
- More specific patterns do NOT override broader patterns
**Command operators:**
- Piped commands (`cmd1 | cmd2`) may not match individual command rules (known Claude Code limitation)
- Shell operators (`&&`, `||`) — Claude Code is aware of these and won't let prefix rules bypass them
- Commands with redirects (`>`, `>>`, `<`) are evaluated as complete strings
---
## Section 3: Pattern Consolidation Rules
The audit detects these optimization opportunities:
| Issue | Example | Recommendation |
|-------|---------|----------------|
| **Exact duplicates** | `Write(plugins/**)` listed twice | Remove duplicate |
| **Subset redundancy** | `Write(plugins/projman/*)` when `Write(plugins/**)` exists | Remove the narrower pattern — already covered |
| **Merge candidates** | `Write(plugins/projman/*)`, `Write(plugins/git-flow/*)`, `Write(plugins/pr-review/*)` ... (4+ similar patterns) | Merge to `Write(plugins/**)` |
| **Overly broad** | `Bash` (no specifier = allows ALL bash) | Flag as security concern, suggest scoped patterns |
| **Stale patterns** | `Write(plugins/old-plugin/**)` for a plugin that no longer exists | Remove stale entry |
| **Missing MCP permissions** | MCP servers in `.mcp.json` but no `mcp__servername__*` in allow | Suggest adding if server is trusted |
| **Conflicting rules** | Same pattern in both `allow` and `deny` | Flag conflict — deny wins, but allow is dead weight |
### Consolidation Algorithm
1. **Deduplicate:** Remove exact duplicates from each array
2. **Subset elimination:** For each pattern, check if a broader pattern exists
- `Write(plugins/projman/*)` is subset of `Write(plugins/**)`
- `Bash(git status)` is subset of `Bash(git *)`
3. **Merge detection:** If 4+ patterns share a common prefix, suggest merge
- Threshold: 4 patterns minimum before suggesting consolidation
4. **Stale detection:** Cross-reference file patterns against actual filesystem
5. **Conflict detection:** Check for patterns appearing in multiple arrays
---
## Section 4: Review-Layer-Aware Recommendations
This is the key section. Map upstream review processes to directory scopes:
| Directory Scope | Active Review Layers | Auto-Allow Recommendation |
|----------------|---------------------|---------------------------|
| `plugins/*/commands/*.md` | Sprint approval, PR review, doc-guardian PostToolUse | `Write(plugins/*/commands/**)` — 3 layers cover this |
| `plugins/*/skills/*.md` | Sprint approval, PR review | `Write(plugins/*/skills/**)` — 2 layers |
| `plugins/*/agents/*.md` | Sprint approval, PR review, contract-validator | `Write(plugins/*/agents/**)` — 3 layers |
| `mcp-servers/*/mcp_server/*.py` | Code-sentinel PreToolUse, sprint approval, PR review | `Write(mcp-servers/**)` + `Edit(mcp-servers/**)` — sentinel catches secrets |
| `docs/*.md` | Doc-guardian PostToolUse, PR review | `Write(docs/**)` + `Edit(docs/**)` |
| `.claude-plugin/*.json` | validate-marketplace.sh, PR review | `Write(.claude-plugin/**)` |
| `scripts/*.sh` | Code-sentinel, PR review | `Write(scripts/**)` — with caution flag |
| `CLAUDE.md`, `CHANGELOG.md`, `README.md` | Doc-guardian, PR review | `Write(CLAUDE.md)`, `Write(CHANGELOG.md)`, `Write(README.md)` |
### Critical Rule: Hook Verification
**Before recommending auto-allow for a scope, the agent MUST verify the hook is actually configured.**
Read the relevant `plugins/*/hooks/hooks.json` file:
- If code-sentinel's hook is missing or disabled, do NOT recommend auto-allowing `mcp-servers/**` writes
- If doc-guardian's hook is missing, do NOT recommend auto-allowing `docs/**` without caution
- Count the number of verified review layers before making recommendations
**Minimum threshold:** Recommend auto-allow only for scopes covered by ≥2 verified review layers.
---
## Section 5: Permission Profiles
Three named profiles for different project contexts:
### `conservative` (Default for New Users)
Minimal permissions, prompts for most write operations:
```json
{
"permissions": {
"allow": [
"Read",
"Glob",
"Grep",
"LS",
"Write(docs/**)",
"Edit(docs/**)",
"Bash(git status *)",
"Bash(git diff *)",
"Bash(git log *)",
"Bash(cat *)",
"Bash(ls *)",
"Bash(head *)",
"Bash(tail *)",
"Bash(wc *)",
"Bash(grep *)"
],
"deny": [
"Read(.env*)",
"Read(./secrets/**)",
"Bash(rm -rf *)",
"Bash(sudo *)"
]
}
}
```
### `reviewed` (Projects with ≥2 Upstream Review Layers)
This is the target profile for projects using the marketplace's multi-layer review architecture:
```json
{
"permissions": {
"allow": [
"Read",
"Glob",
"Grep",
"LS",
"Edit",
"Write",
"MultiEdit",
"Bash(git *)",
"Bash(python *)",
"Bash(pip install *)",
"Bash(cd *)",
"Bash(cat *)",
"Bash(ls *)",
"Bash(head *)",
"Bash(tail *)",
"Bash(wc *)",
"Bash(grep *)",
"Bash(find *)",
"Bash(mkdir *)",
"Bash(cp *)",
"Bash(mv *)",
"Bash(touch *)",
"Bash(chmod *)",
"Bash(source *)",
"Bash(echo *)",
"Bash(sed *)",
"Bash(awk *)",
"Bash(sort *)",
"Bash(uniq *)",
"Bash(diff *)",
"Bash(jq *)",
"Bash(npm *)",
"Bash(npx *)",
"Bash(node *)",
"Bash(pytest *)",
"Bash(python -m *)",
"Bash(./scripts/*)",
"WebFetch",
"WebSearch"
],
"deny": [
"Read(.env*)",
"Read(./secrets/**)",
"Bash(rm -rf *)",
"Bash(sudo *)",
"Bash(curl * | bash*)",
"Bash(wget * | bash*)"
]
}
}
```
### `autonomous` (Trusted CI/Sandboxed Environments Only)
Maximum permissions for automated environments:
```json
{
"permissions": {
"allow": [
"Read",
"Glob",
"Grep",
"LS",
"Edit",
"Write",
"MultiEdit",
"Bash",
"WebFetch",
"WebSearch"
],
"deny": [
"Read(.env*)",
"Read(./secrets/**)",
"Bash(rm -rf /)",
"Bash(sudo *)"
]
}
}
```
**Warning:** The `autonomous` profile allows unscoped `Bash` — only use in fully sandboxed environments.
---
## Section 6: Scoring Criteria (Settings Efficiency Score — 100 points)
| Category | Points | What It Measures |
|----------|--------|------------------|
| **Redundancy** | 25 | No duplicates, no subset patterns, merged where possible |
| **Coverage** | 25 | Common tools allowed, MCP servers covered, no unnecessary gaps |
| **Safety Alignment** | 25 | Deny rules cover secrets, destructive commands; review layers verified |
| **Profile Fit** | 25 | How close to recommended profile for the project's review layer count |
### Scoring Breakdown
**Redundancy (25 points):**
- 25: No duplicates, no subsets, patterns are consolidated
- 20: 1-2 minor redundancies
- 15: 3-5 redundancies or 1 merge candidate group
- 10: 6+ redundancies or 2+ merge candidate groups
- 5: Significant redundancy (10+ issues)
- 0: Severe redundancy (20+ issues)
**Coverage (25 points):**
- 25: All common tools allowed, MCP servers covered
- 20: Missing 1-2 common tool patterns
- 15: Missing 3-5 patterns or 1 MCP server
- 10: Missing 6+ patterns or 2+ MCP servers
- 5: Significant gaps causing frequent prompts
- 0: Minimal coverage (prompts on most operations)
**Safety Alignment (25 points):**
- 25: Deny rules cover secrets + destructive ops, review layers verified
- 20: Minor gaps (e.g., missing one secret pattern)
- 15: Overly broad allow without review layer coverage
- 10: Missing deny rules for secrets or destructive commands
- 5: Unsafe patterns without review layer justification
- 0: Security concerns (e.g., unscoped `Bash` without review layers)
**Profile Fit (25 points):**
- 25: Matches recommended profile exactly
- 20: Within 90% of recommended profile
- 15: Within 80% of recommended profile
- 10: Within 70% of recommended profile
- 5: Significant deviation from recommended profile
- 0: No alignment with any named profile
### Score Interpretation
| Score Range | Status | Meaning |
|-------------|--------|---------|
| 90-100 | Optimized | Minimal prompt interruptions, safety maintained |
| 70-89 | Good | Minor consolidation opportunities |
| 50-69 | Needs Work | Significant redundancy or missing permissions |
| Below 50 | Poor | Likely getting constant approval prompts unnecessarily |
---
## Section 7: Hook Detection Method
To verify which review layers are active, read these files:
| File | Hook Type | Tool Matcher | Purpose |
|------|-----------|--------------|---------|
| `plugins/code-sentinel/hooks/hooks.json` | PreToolUse | Write\|Edit\|MultiEdit | Blocks hardcoded secrets |
| `plugins/doc-guardian/hooks/hooks.json` | PostToolUse | Write\|Edit\|MultiEdit | Tracks documentation drift |
| `plugins/project-hygiene/hooks/hooks.json` | PostToolUse | Write\|Edit | Cleanup tracking |
| `plugins/data-platform/hooks/hooks.json` | PostToolUse | Edit\|Write | Schema diff detection |
| `plugins/cmdb-assistant/hooks/hooks.json` | PreToolUse | (if exists) | Input validation |
### Verification Process
1. **Read each hooks.json file**
2. **Parse the JSON to find hook configurations**
3. **Check the `type` field** — must be `"command"` (not `"prompt"`)
4. **Check the `event` field** — maps to when hook runs
5. **Check the `tools` array** — which operations are intercepted
6. **Verify plugin is in marketplace** — check `.claude-plugin/marketplace.json`
### Example Hook Structure
```json
{
"hooks": [
{
"event": "PreToolUse",
"type": "command",
"command": "./hooks/security-check.sh",
"tools": ["Write", "Edit", "MultiEdit"]
}
]
}
```
### Review Layer Count
Count verified review layers for each scope:
| Layer | Verification |
|-------|-------------|
| Sprint approval | Check if projman plugin is installed (milestone workflow) |
| PR review | Check if pr-review plugin is installed |
| code-sentinel PreToolUse | hooks.json exists with PreToolUse on Write/Edit |
| doc-guardian PostToolUse | hooks.json exists with PostToolUse on Write/Edit |
| contract-validator | Plugin installed + hooks present |
**Recommendation threshold:** Only recommend auto-allow for scopes with ≥2 verified layers.

View File

@@ -47,27 +47,6 @@ This skill defines the standard visual header for claude-config-maintainer comma
+-----------------------------------------------------------------+
```
### /config-audit-settings
```
+-----------------------------------------------------------------+
| CONFIG-MAINTAINER - Settings Audit |
+-----------------------------------------------------------------+
```
### /config-optimize-settings
```
+-----------------------------------------------------------------+
| CONFIG-MAINTAINER - Settings Optimization |
+-----------------------------------------------------------------+
```
### /config-permissions-map
```
+-----------------------------------------------------------------+
| CONFIG-MAINTAINER - Permissions Map |
+-----------------------------------------------------------------+
```
## Usage
Display the header at the start of command execution, before any analysis or output.

View File

@@ -1,10 +1,3 @@
---
name: cmdb-assistant
description: Infrastructure management assistant specialized in NetBox CMDB operations. Use for device management, IP addressing, and infrastructure queries.
model: sonnet
permissionMode: default
---
# CMDB Assistant Agent
You are an infrastructure management assistant specialized in NetBox CMDB operations.

View File

@@ -32,9 +32,9 @@ The following NetBox MCP tools are available for infrastructure management:
- `ipam_list_available_ips`, `ipam_create_available_ip` - IP allocation
**Virtualization:**
- `virt_list_vms`, `virt_create_vm`, `virt_update_vm`, `virt_delete_vm` - VM management
- `virt_list_clusters`, `virt_create_cluster`, `virt_update_cluster`, `virt_delete_cluster` - Cluster management
- `virt_list_vm_ifaces`, `virt_create_vm_iface` - VM interface management
- `virtualization_list_virtual_machines`, `virtualization_create_virtual_machine` - VM management
- `virtualization_list_clusters`, `virtualization_create_cluster` - Cluster management
- `virtualization_list_vm_interfaces` - VM interface management
**Circuits:**
- `circuits_list_circuits`, `circuits_create_circuit` - Circuit management

View File

@@ -31,7 +31,7 @@ When the user provides a search query, determine the best approach:
3. **Site search**: Use `dcim_list_sites` with name filter
4. **Prefix search**: Use `ipam_list_prefixes` with prefix or within filter
5. **VLAN search**: Use `ipam_list_vlans` with vid or name filter
6. **VM search**: Use `virt_list_vms` with name filter
6. **VM search**: Use `virtualization_list_virtual_machines` with name filter
For broad searches, query multiple endpoints and consolidate results.

View File

@@ -18,7 +18,7 @@ Configure the cmdb-assistant plugin with NetBox integration.
## Usage
```
/cmdb-setup
/initial-setup
```
## Instructions

View File

@@ -24,7 +24,7 @@ Standard visual header for cmdb-assistant commands.
| `/cmdb-topology` | Topology |
| `/change-audit` | Change Audit |
| `/ip-conflicts` | IP Conflict Detection |
| `/cmdb-setup` | Setup Wizard |
| `/initial-setup` | Setup Wizard |
| Agent mode | Infrastructure Management |
## Usage

View File

@@ -1,8 +1,5 @@
---
name: refactor-advisor
description: Code structure and refactoring specialist. Use when analyzing code quality, design patterns, or planning refactoring work.
model: sonnet
permissionMode: acceptEdits
description: Code structure and refactoring specialist
---
# Refactor Advisor Agent

View File

@@ -1,9 +1,6 @@
---
name: security-reviewer
description: Security-focused code review agent
model: sonnet
permissionMode: plan
disallowedTools: Write, Edit, MultiEdit
---
# Security Reviewer Agent

View File

@@ -1,6 +1,6 @@
{
"name": "contract-validator",
"version": "1.2.0",
"version": "1.1.0",
"description": "Cross-plugin compatibility validation and Claude.md agent verification",
"author": {
"name": "Leo Miranda",

View File

@@ -1,9 +1,6 @@
---
name: agent-check
description: Agent definition validator for quick verification
model: haiku
permissionMode: plan
disallowedTools: Write, Edit, MultiEdit
---
# Agent Check Agent

View File

@@ -1,10 +1,3 @@
---
name: full-validation
description: Contract validation specialist for comprehensive cross-plugin compatibility validation of the entire marketplace.
model: sonnet
permissionMode: default
---
# Full Validation Agent
You are a contract validation specialist. Your role is to perform comprehensive cross-plugin compatibility validation for the entire marketplace.
@@ -100,5 +93,5 @@ You are a contract validation specialist. Your role is to perform comprehensive
2. Parses all README.md files
3. Runs 66 pairwise compatibility checks
4. Finds 3 errors, 4 warnings
5. Reports: "Command conflict: pluginA and pluginB both define /setup"
5. Reports: "Command conflict: projman and data-platform both define /initial-setup"
6. Suggests: "Rename one command to avoid ambiguity"

View File

@@ -2,7 +2,7 @@
description: Interactive setup wizard for contract-validator plugin
---
# /cv-setup - Contract Validator Setup Wizard
# /initial-setup - Contract-Validator Setup Wizard
## Skills to Load
- skills/visual-output.md

View File

@@ -30,7 +30,6 @@
- Use `validate_compatibility` for pairwise checks
- Use `validate_agent_refs` for CLAUDE.md agents
- Use `validate_data_flow` for data sequences
- Use `validate_workflow_integration` for domain plugin advisory interfaces
5. **Generate report**:
- Use `generate_compatibility_report` for full report

View File

@@ -16,7 +16,6 @@ Available MCP tools for contract-validator operations.
| `validate_compatibility` | Check two plugins for conflicts |
| `validate_agent_refs` | Check agent tool references exist |
| `validate_data_flow` | Verify data flow through agent sequence |
| `validate_workflow_integration` | Check domain plugin exposes required advisory interfaces and gate contract version |
### Report Tools
| Tool | Description |
@@ -54,17 +53,9 @@ Available MCP tools for contract-validator operations.
3. Build Mermaid diagram from results
```
### Workflow Integration Check
```
1. validate_workflow_integration(plugin_path, domain_label) # Check single domain plugin
2. validate_workflow_integration(plugin_path, domain_label, expected_contract="v1") # With contract version check
3. For each domain in domain-consultation.md detection rules:
validate_workflow_integration(domain_plugin_path, domain_label, expected_contract)
```
## Error Handling
If MCP tools fail:
1. Check if `/cv-setup` has been run
1. Check if `/initial-setup` has been run
2. Verify session was restarted after setup
3. Check MCP server venv exists and is valid

View File

@@ -30,15 +30,6 @@ Rules for validating plugin compatibility and agent definitions.
3. Check for orphaned data references
4. Ensure required data is available at each step
### Workflow Integration Checks
1. Gate command exists in plugin's commands/ directory
2. Gate command produces binary PASS/FAIL output
3. Review command exists (WARNING if missing, not ERROR)
4. Advisory agent exists referencing the domain label
5. Gate command declares `gate_contract` version in frontmatter
6. If expected version provided, gate contract version matches expected
- Severity: ERROR for missing gate, WARNING for missing review/agent or contract mismatch, INFO for missing contract
## Severity Levels
| Level | Meaning | Action |
@@ -55,8 +46,6 @@ Rules for validating plugin compatibility and agent definitions.
| Data flow gap | Producer not called before consumer | Reorder workflow steps |
| Name conflict | Two plugins use same command | Rename one command |
| Orphan reference | Data produced but never consumed | Remove or use the data |
| Missing gate command | Domain plugin lacks /X-gate command | Create commands/{domain}-gate.md |
| Missing advisory agent | Domain plugin has no reviewer agent | Create agents/{domain}-advisor.md |
## MCP Tools
@@ -65,5 +54,4 @@ Rules for validating plugin compatibility and agent definitions.
| `validate_compatibility` | Check two plugins for conflicts |
| `validate_agent_refs` | Check agent tool references |
| `validate_data_flow` | Verify data flow sequences |
| `validate_workflow_integration` | Check domain plugin advisory interfaces |
| `list_issues` | Filter issues by severity or type |

View File

@@ -1,6 +1,6 @@
{
"name": "data-platform",
"version": "1.3.0",
"version": "1.1.0",
"description": "Data engineering tools with pandas, PostgreSQL/PostGIS, and dbt integration",
"author": {
"name": "Leo Miranda",

View File

@@ -1,318 +0,0 @@
---
name: data-advisor
description: Reviews code for data integrity, schema validity, and dbt compliance using data-platform MCP tools. Use when validating database operations or data pipelines.
model: sonnet
permissionMode: default
---
# Data Advisor Agent
You are a strict data integrity auditor. Your role is to review code for proper schema usage, dbt compliance, lineage integrity, and data quality standards.
## Visual Output Requirements
**MANDATORY: Display header at start of every response.**
```
+----------------------------------------------------------------------+
| DATA-PLATFORM - Data Advisor |
| [Target Path] |
+----------------------------------------------------------------------+
```
## Trigger Conditions
Activate this agent when:
- User runs `/data-review <path>`
- User runs `/data-gate <path>`
- Projman orchestrator requests data domain gate check
- Code review includes database operations, dbt models, or data pipelines
## Skills to Load
- skills/data-integrity-audit.md
- skills/mcp-tools-reference.md
## Available MCP Tools
### PostgreSQL (Schema Validation)
| Tool | Purpose |
|------|---------|
| `pg_connect` | Verify database is reachable |
| `pg_tables` | List tables, verify existence |
| `pg_columns` | Get column details, verify types and constraints |
| `pg_schemas` | List available schemas |
| `pg_query` | Run diagnostic queries (SELECT only in review context) |
### PostGIS (Spatial Validation)
| Tool | Purpose |
|------|---------|
| `st_tables` | List tables with geometry columns |
| `st_geometry_type` | Verify geometry types |
| `st_srid` | Verify coordinate reference systems |
| `st_extent` | Verify spatial extent is reasonable |
### dbt (Project Validation)
| Tool | Purpose |
|------|---------|
| `dbt_parse` | Validate project structure (ALWAYS run first) |
| `dbt_compile` | Verify SQL renders correctly |
| `dbt_test` | Run data tests |
| `dbt_build` | Combined run + test |
| `dbt_ls` | List all resources (models, tests, sources) |
| `dbt_lineage` | Get model dependency graph |
| `dbt_docs_generate` | Generate documentation for inspection |
### pandas (Data Validation)
| Tool | Purpose |
|------|---------|
| `describe` | Statistical summary for data quality checks |
| `head` | Preview data for structural verification |
| `list_data` | Check for stale DataFrames |
## Operating Modes
### Review Mode (default)
Triggered by `/data-review <path>`
**Characteristics:**
- Produces detailed report with all findings
- Groups findings by severity (FAIL/WARN/INFO)
- Includes actionable recommendations with fixes
- Does NOT block - informational only
- Shows category compliance status
### Gate Mode
Triggered by `/data-gate <path>` or projman orchestrator domain gate
**Characteristics:**
- Binary PASS/FAIL output
- Only reports FAIL-level issues
- Returns exit status for automation integration
- Blocks completion on FAIL
- Compact output for CI/CD pipelines
## Audit Workflow
### 1. Receive Target Path
Accept file or directory path from command invocation.
### 2. Determine Scope
Analyze target to identify what type of data work is present:
| Pattern | Type | Checks to Run |
|---------|------|---------------|
| `dbt_project.yml` present | dbt project | Full dbt validation |
| `*.sql` files in dbt path | dbt models | Model compilation, lineage |
| `*.py` with `pg_query`/`pg_execute` | Database operations | Schema validation |
| `schema.yml` files | dbt schemas | Schema drift detection |
| Migration files (`*_migration.sql`) | Schema changes | Full PostgreSQL + dbt checks |
### 3. Run Database Checks (if applicable)
```
1. pg_connect → verify database reachable
If fails: WARN, continue with file-based checks
2. pg_tables → verify expected tables exist
If missing: FAIL
3. pg_columns on affected tables → verify types
If mismatch: FAIL
```
### 4. Run dbt Checks (if applicable)
```
1. dbt_parse → validate project
If fails: FAIL immediately (project broken)
2. dbt_ls → catalog all resources
Record models, tests, sources
3. dbt_lineage on target models → check integrity
Orphaned refs: FAIL
4. dbt_compile on target models → verify SQL
Compilation errors: FAIL
5. dbt_test --select <targets> → run tests
Test failures: FAIL
6. Cross-reference tests → models without tests
Missing tests: WARN
```
### 5. Run PostGIS Checks (if applicable)
```
1. st_tables → list spatial tables
If none found: skip PostGIS checks
2. st_srid → verify SRID correct
Unexpected SRID: FAIL
3. st_geometry_type → verify expected types
Wrong type: WARN
4. st_extent → sanity check bounding box
Unreasonable extent: FAIL
```
### 6. Scan Python Code (manual patterns)
For Python files with database operations:
| Pattern | Issue | Severity |
|---------|-------|----------|
| `f"SELECT * FROM {table}"` | SQL injection risk | WARN |
| `f"INSERT INTO {table}"` | Unparameterized mutation | WARN |
| `pg_execute` without WHERE in DELETE/UPDATE | Dangerous mutation | WARN |
| Hardcoded connection strings | Credential exposure | WARN |
### 7. Generate Report
Output format depends on operating mode (see templates in `skills/data-integrity-audit.md`).
## Report Formats
### Gate Mode Output
**PASS:**
```
DATA GATE: PASS
No blocking data integrity violations found.
```
**FAIL:**
```
DATA GATE: FAIL
Blocking Issues (2):
1. dbt/models/staging/stg_census.sql - Compilation error: column 'census_yr' not found
Fix: Column was renamed to 'census_year' in source table. Update model.
2. portfolio_app/toronto/loaders/census.py:67 - References table 'census_raw' which does not exist
Fix: Table was renamed to 'census_demographics' in migration 003.
Run /data-review for full audit report.
```
### Review Mode Output
```
+----------------------------------------------------------------------+
| DATA-PLATFORM - Data Integrity Audit |
| /path/to/project |
+----------------------------------------------------------------------+
Target: /path/to/project
Scope: 12 files scanned, 8 models checked, 3 tables verified
FINDINGS
FAIL (2)
1. [dbt/models/staging/stg_census.sql] Compilation error
Error: column 'census_yr' does not exist
Fix: Column was renamed to 'census_year'. Update SELECT clause.
2. [portfolio_app/loaders/census.py:67] Missing table reference
Error: Table 'census_raw' does not exist
Fix: Table renamed to 'census_demographics' in migration 003.
WARN (3)
1. [dbt/models/marts/dim_neighbourhoods.sql] Missing dbt test
Issue: No unique test on neighbourhood_id
Suggestion: Add unique test to schema.yml
2. [portfolio_app/toronto/queries.py:45] Hardcoded SQL
Issue: f"SELECT * FROM {table_name}" without parameterization
Suggestion: Use parameterized queries
3. [dbt/models/staging/stg_legacy.sql] Orphaned model
Issue: No downstream consumers or exposures
Suggestion: Remove if unused or add to exposure
INFO (1)
1. [dbt/models/marts/fct_demographics.sql] Documentation gap
Note: Model description missing in schema.yml
Suggestion: Add description for discoverability
SUMMARY
Schema: 2 issues
Lineage: Intact
dbt: 1 failure
PostGIS: Not applicable
VERDICT: FAIL (2 blocking issues)
```
## Severity Definitions
| Level | Criteria | Action Required |
|-------|----------|-----------------|
| **FAIL** | dbt parse/compile fails, missing tables/columns, type mismatches, broken lineage, invalid SRID | Must fix before completion |
| **WARN** | Missing tests, hardcoded SQL, schema drift, orphaned models | Should fix |
| **INFO** | Documentation gaps, optimization opportunities | Consider for improvement |
## Error Handling
| Error | Response |
|-------|----------|
| Database not reachable | WARN: "PostgreSQL unavailable, skipping schema checks" - continue |
| No dbt_project.yml | Skip dbt checks silently - not an error |
| No PostGIS tables | Skip PostGIS checks silently - not an error |
| MCP tool fails | WARN: "Tool {name} failed: {error}" - continue with remaining |
| Empty path | PASS: "No data artifacts found in target path" |
| Invalid path | Error: "Path not found: {path}" |
## Integration with projman
When called as a domain gate by projman orchestrator:
1. Receive path from orchestrator (changed files for the issue)
2. Determine what type of data work changed
3. Run audit in gate mode
4. Return structured result:
```
Gate: data
Status: PASS | FAIL
Blocking: N issues
Summary: Brief description
```
5. Orchestrator decides whether to proceed based on gate status
## Example Interactions
**User**: `/data-review dbt/models/staging/`
**Agent**:
1. Scans all .sql files in staging/
2. Runs dbt_parse to validate project
3. Runs dbt_compile on each model
4. Checks lineage for orphaned refs
5. Cross-references test coverage
6. Returns detailed report
**User**: `/data-gate portfolio_app/toronto/`
**Agent**:
1. Scans for Python files with pg_query/pg_execute
2. Checks if referenced tables exist
3. Validates column types
4. Returns PASS if clean, FAIL with blocking issues if not
5. Compact output for automation
## Communication Style
Technical and precise. Report findings with exact locations, specific violations, and actionable fixes:
- "Table `census_demographics` column `population` is `varchar(50)` in PostgreSQL but referenced as `integer` in `stg_census.sql` line 14. This will cause a runtime cast error."
- "Model `dim_neighbourhoods` has no `unique` test on `neighbourhood_id`. Add to `schema.yml` to prevent duplicates."
- "Spatial extent for `toronto_boundaries` shows global coordinates (-180 to 180). Expected Toronto bbox (~-79.6 to -79.1 longitude). Likely missing ST_Transform or wrong SRID on import."

View File

@@ -1,9 +1,6 @@
---
name: data-analysis
description: Data analysis specialist for exploration and profiling
model: sonnet
permissionMode: plan
disallowedTools: Write, Edit, MultiEdit
---
# Data Analysis Agent

View File

@@ -1,10 +1,3 @@
---
name: data-ingestion
description: Data ingestion specialist for loading, transforming, and preparing data for analysis.
model: haiku
permissionMode: acceptEdits
---
# Data Ingestion Agent
You are a data ingestion specialist. Your role is to help users load, transform, and prepare data for analysis.

View File

@@ -18,12 +18,12 @@ This project uses the data-platform plugin for data engineering workflows.
| Command | Purpose |
|---------|---------|
| `/data-ingest` | Load data from files or database |
| `/data-profile` | Generate statistical profile |
| `/data-schema` | Show schema information |
| `/data-explain` | Explain dbt model |
| `/data-lineage` | Show data lineage |
| `/data-run` | Execute dbt models |
| `/ingest` | Load data from files or database |
| `/profile` | Generate statistical profile |
| `/schema` | Show schema information |
| `/explain` | Explain dbt model |
| `/lineage` | Show data lineage |
| `/run` | Execute dbt models |
### data_ref Convention
@@ -36,9 +36,9 @@ DataFrames are stored with references. Use meaningful names:
### dbt Workflow
1. Always validate before running: `/data-run` includes automatic `dbt_parse`
1. Always validate before running: `/run` includes automatic `dbt_parse`
2. For dbt 1.9+, check for deprecated syntax before commits
3. Use `/data-lineage` to understand impact of changes
3. Use `/lineage` to understand impact of changes
### Database Access
@@ -69,22 +69,22 @@ DATA_PLATFORM_MAX_ROWS=100000
### Data Exploration
```
/data-ingest data/raw_customers.csv
/data-profile raw_customers
/data-schema
/ingest data/raw_customers.csv
/profile raw_customers
/schema
```
### ETL Development
```
/data-schema orders # Understand source
/data-explain stg_orders # Understand transformation
/data-run stg_orders # Test the model
/data-lineage fct_orders # Check downstream impact
/schema orders # Understand source
/explain stg_orders # Understand transformation
/run stg_orders # Test the model
/lineage fct_orders # Check downstream impact
```
### Database Analysis
```
/data-schema # List all tables
/schema # List all tables
pg_columns orders # Detailed schema
st_tables # Find spatial data
```

View File

@@ -1,105 +0,0 @@
---
description: Data integrity compliance gate (pass/fail) for sprint execution
gate_contract: v1
arguments:
- name: path
description: File or directory to validate
required: true
---
# /data-gate
Binary pass/fail validation for data integrity compliance. Used by projman orchestrator during sprint execution to gate issue completion.
## Usage
```
/data-gate <path>
```
**Examples:**
```
/data-gate ./dbt/models/staging/
/data-gate ./portfolio_app/toronto/parsers/
/data-gate ./dbt/
```
## What It Does
1. **Activates** the `data-advisor` agent in gate mode
2. **Loads** the `skills/data-integrity-audit.md` skill
3. **Determines scope** from target path:
- dbt project directory: full dbt validation (parse, compile, test, lineage)
- Python files with database operations: schema validation
- SQL files: dbt model validation
- Mixed: all applicable checks
4. **Checks only FAIL-level violations:**
- dbt parse failures (project broken)
- dbt compilation errors (SQL invalid)
- Missing tables/columns referenced in code
- Data type mismatches that cause runtime errors
- Broken lineage (orphaned model references)
- PostGIS SRID mismatches
5. **Returns binary result:**
- `PASS` - No blocking violations found
- `FAIL` - One or more blocking violations
## Output
### On PASS
```
DATA GATE: PASS
No blocking data integrity violations found.
```
### On FAIL
```
DATA GATE: FAIL
Blocking Issues (2):
1. dbt/models/staging/stg_census.sql - Compilation error: column 'census_yr' not found
Fix: Column was renamed to 'census_year' in source table. Update model.
2. portfolio_app/toronto/loaders/census.py:67 - References table 'census_raw' which does not exist
Fix: Table was renamed to 'census_demographics' in migration 003.
Run /data-review for full audit report.
```
## Integration with projman
This command is automatically invoked by the projman orchestrator when:
1. An issue has the `Domain/Data` label
2. The orchestrator is about to mark the issue as complete
3. The orchestrator passes the path of changed files
**Gate behavior:**
- PASS: Issue can be marked complete
- FAIL: Issue stays open, blocker comment added with failure details
## Differences from /data-review
| Aspect | /data-gate | /data-review |
|--------|------------|--------------|
| Output | Binary PASS/FAIL | Detailed report with all severities |
| Severity | FAIL only | FAIL + WARN + INFO |
| Purpose | Automation gate | Human review |
| Verbosity | Minimal | Comprehensive |
| Speed | Skips INFO checks | Full scan |
## When to Use
- **Sprint execution**: Automatic quality gates via projman
- **CI/CD pipelines**: Automated data integrity checks
- **Quick validation**: Fast pass/fail without full report
- **Pre-merge checks**: Verify data changes before integration
For detailed findings including warnings and suggestions, use `/data-review` instead.
## Requirements
- data-platform MCP server must be running
- For dbt checks: dbt project must be configured (auto-detected via `dbt_project.yml`)
- For PostgreSQL checks: connection configured in `~/.config/claude/postgres.env`
- If database or dbt unavailable: applicable checks skipped with warning (non-blocking degradation)

View File

@@ -1,149 +0,0 @@
---
description: Audit data integrity, schema validity, and dbt compliance
arguments:
- name: path
description: File, directory, or dbt project to audit
required: true
---
# /data-review
Comprehensive data integrity audit producing a detailed report with findings at all severity levels. For human review and standalone codebase auditing.
## Usage
```
/data-review <path>
```
**Examples:**
```
/data-review ./dbt/
/data-review ./portfolio_app/toronto/
/data-review ./dbt/models/marts/
```
## What It Does
1. **Activates** the `data-advisor` agent in review mode
2. **Scans target path** to determine scope:
- Identifies dbt project files (.sql models, schema.yml, sources.yml)
- Identifies Python files with database operations
- Identifies migration files
- Identifies PostGIS usage
3. **Runs all check categories:**
- Schema validity (PostgreSQL tables, columns, types)
- dbt project health (parse, compile, test, lineage)
- PostGIS compliance (SRID, geometry types, extent)
- Data type consistency
- Code patterns (unsafe SQL, hardcoded queries)
4. **Produces detailed report** with all severity levels (FAIL, WARN, INFO)
5. **Provides actionable recommendations** for each finding
## Output Format
```
+----------------------------------------------------------------------+
| DATA-PLATFORM - Data Integrity Audit |
| /path/to/project |
+----------------------------------------------------------------------+
Target: /path/to/project
Scope: N files scanned, N models checked, N tables verified
FINDINGS
FAIL (N)
1. [location] violation description
Fix: actionable fix
WARN (N)
1. [location] warning description
Suggestion: improvement suggestion
INFO (N)
1. [location] info description
Note: context
SUMMARY
Schema: Valid | N issues
Lineage: Intact | N orphaned
dbt: Passes | N failures
PostGIS: Valid | N issues | Not applicable
VERDICT: PASS | FAIL (N blocking issues)
```
## When to Use
### Before Sprint Planning
Audit data layer health to identify tech debt and inform sprint scope.
```
/data-review ./dbt/
```
### During Code Review
Get detailed data integrity findings alongside code review comments.
```
/data-review ./dbt/models/staging/stg_new_source.sql
```
### After Migrations
Verify schema changes didn't break anything downstream.
```
/data-review ./migrations/
```
### Periodic Health Checks
Regular data infrastructure audits for proactive maintenance.
```
/data-review ./data_pipeline/
```
### New Project Onboarding
Understand the current state of data architecture.
```
/data-review .
```
## Severity Levels
| Level | Meaning | Gate Impact |
|-------|---------|-------------|
| **FAIL** | Blocking issues that will cause runtime errors | Would block `/data-gate` |
| **WARN** | Quality issues that should be addressed | Does not block gate |
| **INFO** | Suggestions for improvement | Does not block gate |
## Differences from /data-gate
`/data-review` gives you the full picture. `/data-gate` gives the orchestrator a yes/no.
| Aspect | /data-gate | /data-review |
|--------|------------|--------------|
| Output | Binary PASS/FAIL | Detailed report |
| Severity | FAIL only | FAIL + WARN + INFO |
| Purpose | Automation | Human review |
| Verbosity | Minimal | Comprehensive |
| Speed | Fast (skips INFO) | Thorough |
Use `/data-review` when you want to understand.
Use `/data-gate` when you want to automate.
## Requirements
- data-platform MCP server must be running
- For dbt checks: dbt project must be configured (auto-detected via `dbt_project.yml`)
- For PostgreSQL checks: connection configured in `~/.config/claude/postgres.env`
**Graceful degradation:** If database or dbt unavailable, applicable checks are skipped with a note in the report rather than failing entirely.
## Skills Used
- `skills/data-integrity-audit.md` - Audit rules and patterns
- `skills/mcp-tools-reference.md` - MCP tool reference
## Related Commands
- `/data-gate` - Binary pass/fail for automation
- `/data-lineage` - Visualize dbt model dependencies
- `/data-schema` - Explore database schema

View File

@@ -1,4 +1,4 @@
# /data-explain - dbt Model Explanation
# /explain - dbt Model Explanation
## Skills to Load
- skills/dbt-workflow.md
@@ -13,7 +13,7 @@ Display header: `DATA-PLATFORM - Model Explanation`
## Usage
```
/data-explain <model_name>
/explain <model_name>
```
## Workflow
@@ -26,8 +26,8 @@ Display header: `DATA-PLATFORM - Model Explanation`
## Examples
```
/data-explain dim_customers
/data-explain fct_orders
/explain dim_customers
/explain fct_orders
```
## Required MCP Tools

View File

@@ -1,4 +1,4 @@
# /data-ingest - Data Ingestion
# /ingest - Data Ingestion
## Skills to Load
- skills/mcp-tools-reference.md
@@ -11,7 +11,7 @@ Display header: `DATA-PLATFORM - Ingest`
## Usage
```
/data-ingest [source]
/ingest [source]
```
## Workflow
@@ -31,9 +31,9 @@ Display header: `DATA-PLATFORM - Ingest`
## Examples
```
/data-ingest data/sales.csv
/data-ingest data/customers.parquet
/data-ingest "SELECT * FROM orders WHERE created_at > '2024-01-01'"
/ingest data/sales.csv
/ingest data/customers.parquet
/ingest "SELECT * FROM orders WHERE created_at > '2024-01-01'"
```
## Required MCP Tools

View File

@@ -1,4 +1,4 @@
# /data-setup - Data Platform Setup Wizard
# /initial-setup - Data Platform Setup Wizard
## Skills to Load
- skills/setup-workflow.md
@@ -11,7 +11,7 @@ Display header: `DATA-PLATFORM - Setup Wizard`
## Usage
```
/data-setup
/initial-setup
```
## Workflow

View File

@@ -1,4 +1,4 @@
# /data-lineage - Data Lineage Visualization
# /lineage - Data Lineage Visualization
## Skills to Load
- skills/lineage-analysis.md
@@ -12,7 +12,7 @@ Display header: `DATA-PLATFORM - Lineage`
## Usage
```
/data-lineage <model_name> [--depth N]
/lineage <model_name> [--depth N]
```
## Workflow
@@ -25,8 +25,8 @@ Display header: `DATA-PLATFORM - Lineage`
## Examples
```
/data-lineage dim_customers
/data-lineage fct_orders --depth 3
/lineage dim_customers
/lineage fct_orders --depth 3
```
## Required MCP Tools

View File

@@ -1,4 +1,4 @@
# /data-profile - Data Profiling
# /profile - Data Profiling
## Skills to Load
- skills/data-profiling.md
@@ -12,7 +12,7 @@ Display header: `DATA-PLATFORM - Data Profile`
## Usage
```
/data-profile <data_ref>
/profile <data_ref>
```
## Workflow
@@ -27,8 +27,8 @@ Execute `skills/data-profiling.md` profiling workflow:
## Examples
```
/data-profile sales_data
/data-profile df_a1b2c3d4
/profile sales_data
/profile df_a1b2c3d4
```
## Required MCP Tools

View File

@@ -1,4 +1,4 @@
# /data-run - Execute dbt Models
# /run - Execute dbt Models
## Skills to Load
- skills/dbt-workflow.md
@@ -12,7 +12,7 @@ Display header: `DATA-PLATFORM - dbt Run`
## Usage
```
/data-run [model_selection] [--full-refresh]
/run [model_selection] [--full-refresh]
```
## Workflow
@@ -30,11 +30,11 @@ See `skills/dbt-workflow.md` for full selection patterns.
## Examples
```
/data-run # Run all models
/data-run dim_customers # Run specific model
/data-run +fct_orders # Run model and upstream
/data-run tag:daily # Run models with tag
/data-run --full-refresh # Rebuild incremental models
/run # Run all models
/run dim_customers # Run specific model
/run +fct_orders # Run model and upstream
/run tag:daily # Run models with tag
/run --full-refresh # Rebuild incremental models
```
## Required MCP Tools

View File

@@ -1,4 +1,4 @@
# /data-schema - Schema Exploration
# /schema - Schema Exploration
## Skills to Load
- skills/mcp-tools-reference.md
@@ -11,7 +11,7 @@ Display header: `DATA-PLATFORM - Schema Explorer`
## Usage
```
/data-schema [table_name | data_ref]
/schema [table_name | data_ref]
```
## Workflow
@@ -30,9 +30,9 @@ Display header: `DATA-PLATFORM - Schema Explorer`
## Examples
```
/data-schema # List all tables and DataFrames
/data-schema customers # Show table schema
/data-schema sales_data # Show DataFrame schema
/schema # List all tables and DataFrames
/schema customers # Show table schema
/schema sales_data # Show DataFrame schema
```
## Required MCP Tools

View File

@@ -5,26 +5,11 @@
PREFIX="[data-platform]"
# Check if MCP venv exists - check cache first, then local
CACHE_VENV="$HOME/.cache/claude-mcp-venvs/leo-claude-mktplace/data-platform/.venv/bin/python"
# Check if MCP venv exists
PLUGIN_ROOT="${CLAUDE_PLUGIN_ROOT:-$(dirname "$(dirname "$(realpath "$0")")")}"
MARKETPLACE_ROOT="$(dirname "$(dirname "$PLUGIN_ROOT")")"
LOCAL_VENV="$MARKETPLACE_ROOT/mcp-servers/data-platform/.venv/bin/python"
VENV_PATH="$PLUGIN_ROOT/mcp-servers/data-platform/.venv/bin/python"
# Check cache first (preferred), then local symlink
CACHE_VENV_DIR="$HOME/.cache/claude-mcp-venvs/leo-claude-mktplace/data-platform/.venv"
LOCAL_VENV_DIR="$MARKETPLACE_ROOT/mcp-servers/data-platform/.venv"
if [[ -f "$CACHE_VENV" ]]; then
VENV_PATH="$CACHE_VENV"
# Auto-create symlink in installed marketplace if missing
if [[ ! -e "$LOCAL_VENV_DIR" && -d "$CACHE_VENV_DIR" ]]; then
mkdir -p "$(dirname "$LOCAL_VENV_DIR")" 2>/dev/null
ln -sf "$CACHE_VENV_DIR" "$LOCAL_VENV_DIR" 2>/dev/null
fi
elif [[ -f "$LOCAL_VENV" ]]; then
VENV_PATH="$LOCAL_VENV"
else
if [[ ! -f "$VENV_PATH" ]]; then
echo "$PREFIX MCP venv missing - run /initial-setup or setup.sh"
exit 0
fi

View File

@@ -1,307 +0,0 @@
---
name: data-integrity-audit
description: Rules and patterns for auditing data integrity, schema validity, and dbt compliance
---
# Data Integrity Audit
## Purpose
Defines what "data valid" means for the data-platform domain. This skill is loaded by the `data-advisor` agent for both review and gate modes during sprint execution and standalone audits.
---
## What to Check
| Check Category | What It Validates | MCP Tools Used |
|----------------|-------------------|----------------|
| **Schema Validity** | Tables exist, columns have correct types, constraints present, no orphaned columns | `pg_tables`, `pg_columns`, `pg_schemas` |
| **dbt Project Health** | Project parses without errors, models compile, tests defined for critical models | `dbt_parse`, `dbt_compile`, `dbt_test`, `dbt_ls` |
| **Lineage Integrity** | No orphaned models (referenced but missing), no circular dependencies, upstream sources exist | `dbt_lineage`, `dbt_ls` |
| **Data Type Consistency** | DataFrame dtypes match expected schema, no silent type coercion, date formats consistent | `describe`, `head`, `pg_columns` |
| **PostGIS Compliance** | Spatial tables have correct SRID, geometry types match expectations, extent is reasonable | `st_tables`, `st_geometry_type`, `st_srid`, `st_extent` |
| **Query Safety** | SELECT queries used for reads (not raw SQL for mutations), parameterized patterns | Code review - manual pattern check |
---
## Common Violations
### FAIL-Level Violations (Block Gate)
| Violation | Detection Method | Example |
|-----------|-----------------|---------|
| dbt parse failure | `dbt_parse` returns error | Project YAML invalid, missing ref targets |
| dbt compilation error | `dbt_compile` fails | SQL syntax error, undefined column reference |
| Missing table/column | `pg_tables`, `pg_columns` lookup | Code references `census_raw` but table doesn't exist |
| Type mismatch | Compare `pg_columns` vs dbt schema | Column is `varchar` in DB but model expects `integer` |
| Broken lineage | `dbt_lineage` shows orphaned refs | Model references `stg_old_format` which doesn't exist |
| PostGIS SRID mismatch | `st_srid` returns unexpected value | Geometry column has SRID 0 instead of 4326 |
| Unreasonable spatial extent | `st_extent` returns global bbox | Toronto data shows coordinates in China |
### WARN-Level Violations (Report, Don't Block)
| Violation | Detection Method | Example |
|-----------|-----------------|---------|
| Missing dbt tests | `dbt_ls` shows model without test | `dim_customers` has no `unique` test on `customer_id` |
| Undocumented columns | dbt schema.yml missing descriptions | Model columns have no documentation |
| Schema drift | `pg_columns` vs dbt schema.yml | Column exists in DB but not in dbt YAML |
| Hardcoded SQL | Scan Python for string concatenation | `f"SELECT * FROM {table}"` without parameterization |
| Orphaned model | `dbt_lineage` shows no downstream | `stg_legacy` has no consumers and no exposure |
### INFO-Level Violations (Suggestions Only)
| Violation | Detection Method | Example |
|-----------|-----------------|---------|
| Missing indexes | Query pattern suggests need | Frequent filter on non-indexed column |
| Documentation gaps | dbt docs incomplete | Missing model description |
| Unused models | `dbt_ls` vs actual queries | Model exists but never selected |
| Optimization opportunity | `describe` shows data patterns | Column has low cardinality, could be enum |
---
## Severity Classification
| Severity | When to Apply | Gate Behavior |
|----------|--------------|---------------|
| **FAIL** | Broken lineage, models that won't compile, missing tables/columns, data type mismatches that cause runtime errors, invalid SRID | Blocks issue completion |
| **WARN** | Missing dbt tests, undocumented columns, schema drift, hardcoded SQL, orphaned models | Does NOT block gate, included in review report |
| **INFO** | Optimization opportunities, documentation gaps, unused models | Review report only |
### Severity Decision Tree
```
Is the dbt project broken (parse/compile fails)?
YES -> FAIL
NO -> Does code reference non-existent tables/columns?
YES -> FAIL
NO -> Would this cause a runtime error?
YES -> FAIL
NO -> Does it violate data quality standards?
YES -> WARN
NO -> Is it an optimization/documentation suggestion?
YES -> INFO
NO -> Not a violation
```
---
## Scanning Strategy
### For dbt Projects
1. **Parse validation** (ALWAYS FIRST)
```
dbt_parse → if fails, immediate FAIL (project is broken)
```
2. **Catalog resources**
```
dbt_ls → list all models, tests, sources, exposures
```
3. **Lineage check**
```
dbt_lineage on changed models → check upstream/downstream integrity
```
4. **Compilation check**
```
dbt_compile on changed models → verify SQL renders correctly
```
5. **Test execution**
```
dbt_test --select <changed_models> → verify tests pass
```
6. **Test coverage audit**
```
Cross-reference dbt_ls tests against model list → flag models without tests (WARN)
```
### For PostgreSQL Schema Changes
1. **Table verification**
```
pg_tables → verify expected tables exist
```
2. **Column validation**
```
pg_columns on affected tables → verify types match expectations
```
3. **Schema comparison**
```
Compare pg_columns output against dbt schema.yml → flag drift
```
### For PostGIS/Spatial Data
1. **Spatial table scan**
```
st_tables → list tables with geometry columns
```
2. **SRID validation**
```
st_srid → verify SRID is correct for expected region
Expected: 4326 (WGS84) for GPS data, local projections for regional data
```
3. **Geometry type check**
```
st_geometry_type → verify expected types (Point, Polygon, etc.)
```
4. **Extent sanity check**
```
st_extent → verify bounding box is reasonable for expected region
Toronto data should be ~(-79.6 to -79.1, 43.6 to 43.9)
```
### For DataFrame/pandas Operations
1. **Data quality check**
```
describe → check for unexpected nulls, type issues, outliers
```
2. **Structure verification**
```
head → verify data structure matches expectations
```
3. **Memory management**
```
list_data → verify no stale DataFrames from previous failed runs
```
### For Python Code (Manual Scan)
1. **SQL injection patterns**
- Scan for f-strings with table/column names
- Check for string concatenation in queries
- Look for `.format()` calls with SQL
2. **Mutation safety**
- `pg_execute` usage should be intentional, not accidental
- Verify DELETE/UPDATE have WHERE clauses
3. **Credential exposure**
- No hardcoded connection strings
- No credentials in code (check for `.env` usage)
---
## Report Templates
### Gate Mode (Compact)
```
DATA GATE: PASS
No blocking data integrity violations found.
```
or
```
DATA GATE: FAIL
Blocking Issues (N):
1. <location> - <violation description>
Fix: <actionable fix>
2. <location> - <violation description>
Fix: <actionable fix>
Run /data-review for full audit report.
```
### Review Mode (Detailed)
```
+----------------------------------------------------------------------+
| DATA-PLATFORM - Data Integrity Audit |
| [Target Path] |
+----------------------------------------------------------------------+
Target: <scanned path or project>
Scope: N files scanned, N models checked, N tables verified
FINDINGS
FAIL (N)
1. [location] violation description
Fix: actionable fix
2. [location] violation description
Fix: actionable fix
WARN (N)
1. [location] warning description
Suggestion: improvement suggestion
2. [location] warning description
Suggestion: improvement suggestion
INFO (N)
1. [location] info description
Note: context
SUMMARY
Schema: Valid | N issues
Lineage: Intact | N orphaned
dbt: Passes | N failures
PostGIS: Valid | N issues | Not applicable
VERDICT: PASS | FAIL (N blocking issues)
```
---
## Skip Patterns
Do not flag violations in:
- `**/tests/**` - Test files may have intentional violations
- `**/__pycache__/**` - Compiled files
- `**/fixtures/**` - Test fixtures
- `**/.scratch/**` - Temporary working files
- Files with `# noqa: data-audit` comment
- Migration files marked as historical
---
## Error Handling
| Scenario | Behavior |
|----------|----------|
| Database not reachable (`pg_connect` fails) | WARN, skip PostgreSQL checks, continue with file-based |
| dbt not configured (no `dbt_project.yml`) | Skip dbt checks entirely, not an error |
| No PostGIS tables found | Skip PostGIS checks, not an error |
| MCP tool call fails | Report as WARN with tool name, continue with remaining checks |
| No data files in scanned path | Report "No data artifacts found" - PASS (nothing to fail) |
| Empty directory | Report "No files found in path" - PASS |
---
## Integration Notes
### projman Orchestrator
When called as a domain gate:
1. Orchestrator detects `Domain/Data` label on issue
2. Orchestrator identifies changed files
3. Orchestrator invokes `/data-gate <path>`
4. Agent runs gate mode scan
5. Returns PASS/FAIL to orchestrator
6. Orchestrator decides whether to complete issue
### Standalone Usage
For manual audits:
1. User runs `/data-review <path>`
2. Agent runs full review mode scan
3. Returns detailed report with all severity levels
4. User decides on actions

View File

@@ -14,18 +14,16 @@ Display at the start of every command execution:
| Command | Header Text |
|---------|-------------|
| data-setup | Setup Wizard |
| data-ingest | Ingest |
| data-profile | Data Profile |
| data-schema | Schema Explorer |
| initial-setup | Setup Wizard |
| ingest | Ingest |
| profile | Data Profile |
| schema | Schema Explorer |
| data-quality | Data Quality |
| data-run | dbt Run |
| run | dbt Run |
| dbt-test | dbt Tests |
| data-lineage | Lineage |
| lineage | Lineage |
| lineage-viz | Lineage Visualization |
| data-explain | Model Explanation |
| data-review | Data Review |
| data-gate | Data Gate |
| explain | Model Explanation |
## Summary Box Format

View File

@@ -1,7 +1,7 @@
{
"name": "doc-guardian",
"description": "Automatic documentation drift detection and synchronization",
"version": "1.1.0",
"version": "1.0.0",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"

View File

@@ -1,8 +1,5 @@
---
name: doc-analyzer
description: Specialized agent for documentation analysis and drift detection. Use when detecting or fixing discrepancies between code and documentation.
model: sonnet
permissionMode: acceptEdits
description: Specialized agent for documentation analysis and drift detection
---
# Documentation Analyzer Agent

View File

@@ -1,6 +1,6 @@
{
"name": "git-flow",
"version": "1.2.0",
"version": "1.0.0",
"description": "Git workflow automation with intelligent commit messages and branch management",
"author": {
"name": "Leo Miranda",

View File

@@ -1,10 +1,3 @@
---
name: git-assistant
description: Git workflow assistant for complex git operations, conflict resolution, and repository history management.
model: haiku
permissionMode: acceptEdits
---
# Git Assistant Agent
## Visual Output Requirements

View File

@@ -40,9 +40,9 @@ Use conventional commits:
| Command | Use Case |
|---------|----------|
| `/git-commit` | Create commit with smart message |
| `/git-commit-push` | Commit and push |
| `/git-commit-merge` | Commit and merge to base |
| `/commit` | Create commit with smart message |
| `/commit-push` | Commit and push |
| `/commit-merge` | Commit and merge to base |
| `/branch-start` | Start new branch |
| `/git-status` | Enhanced status |

View File

@@ -1,10 +1,10 @@
---
name: git-commit-merge
name: commit-merge
description: Commit current changes and merge branch into target
agent: git-assistant
---
# /git-commit-merge - Commit and Merge
# /commit-merge - Commit and Merge
## Skills
@@ -29,7 +29,7 @@ Commit current changes, then merge the current branch into a target branch.
## Workflow
1. **Display header** - Show GIT-FLOW Commit & Merge header
2. **Run /git-commit** - Execute standard commit workflow
2. **Run /commit** - Execute standard commit workflow
3. **Identify target** - Prompt for target branch if not specified
4. **Select strategy** - Merge commit, squash, or rebase (per merge-workflow.md)
5. **Execute merge** - Switch to target, pull, merge, push

View File

@@ -1,10 +1,10 @@
---
name: git-commit-push
name: commit-push
description: Create a commit and push to remote in one operation
agent: git-assistant
---
# /git-commit-push - Commit and Push
# /commit-push - Commit and Push
## Skills
@@ -28,7 +28,7 @@ Create a commit and push to the remote repository in one operation.
## Workflow
1. **Display header** - Show GIT-FLOW Commit & Push header
2. **Run /git-commit** - Execute standard commit workflow
2. **Run /commit** - Execute standard commit workflow
3. **Check upstream** - Set up tracking if needed (`git push -u`)
4. **Push** - Push to remote
5. **Handle conflicts** - Offer rebase/merge/force if push fails (per sync-workflow.md)

View File

@@ -1,10 +1,10 @@
---
name: git-commit-sync
name: commit-sync
description: Commit, push, and sync with base branch
agent: git-assistant
---
# /git-commit-sync - Commit, Push, and Sync
# /commit-sync - Commit, Push, and Sync
## Skills
@@ -28,7 +28,7 @@ Full sync operation: commit local changes, push to remote, sync with upstream/ba
## Workflow
1. **Display header** - Show GIT-FLOW Commit Sync header
2. **Run /git-commit** - Execute standard commit workflow
2. **Run /commit** - Execute standard commit workflow
3. **Push to remote** - Push committed changes
4. **Fetch with prune** - `git fetch --all --prune`
5. **Sync with base** - Rebase on base branch (per sync-workflow.md)

View File

@@ -1,10 +1,10 @@
---
name: git-commit
name: commit
description: Create a git commit with auto-generated conventional commit message
agent: git-assistant
---
# /git-commit - Smart Commit
# /commit - Smart Commit
## Skills

View File

@@ -51,6 +51,6 @@ Unstaged:
2. Ready to commit with 1 staged file
--- Quick Actions ---
/git-commit - Commit staged changes
/git-commit-push - Commit and push
/commit - Commit staged changes
/commit-push - Commit and push
```

View File

@@ -6,7 +6,7 @@ Defines conventional commit message format for consistent, parseable commit hist
## When to Use
- Generating commit messages in `/git-commit`
- Generating commit messages in `/commit`
- Validating user-provided commit messages
- Explaining commit format to users

View File

@@ -6,7 +6,7 @@ Defines merge strategies, conflict resolution approaches, and post-merge cleanup
## When to Use
- Merging feature branches in `/git-commit-merge`
- Merging feature branches in `/commit-merge`
- Resolving conflicts during sync operations
- Cleaning up after successful merges

View File

@@ -6,8 +6,8 @@ Defines push/pull patterns, rebase strategies, upstream tracking, and stale bran
## When to Use
- Pushing commits in `/git-commit-push`
- Full sync operations in `/git-commit-sync`
- Pushing commits in `/commit-push`
- Full sync operations in `/commit-sync`
- Detecting and reporting stale branches
## Push Workflow

View File

@@ -19,28 +19,28 @@ Standard header format for consistent visual output across all git-flow commands
## Command Headers
### /git-commit
### /commit
```
+----------------------------------------------------------------------+
| GIT-FLOW Smart Commit |
+----------------------------------------------------------------------+
```
### /git-commit-push
### /commit-push
```
+----------------------------------------------------------------------+
| GIT-FLOW Commit & Push |
+----------------------------------------------------------------------+
```
### /git-commit-sync
### /commit-sync
```
+----------------------------------------------------------------------+
| GIT-FLOW Commit Sync |
+----------------------------------------------------------------------+
```
### /git-commit-merge
### /commit-merge
```
+----------------------------------------------------------------------+
| GIT-FLOW Commit & Merge |

View File

@@ -1,11 +1,3 @@
---
name: coordinator
description: Review coordinator that orchestrates the multi-agent PR review process. Dispatches to specialized reviewers, aggregates findings, and produces the final review report. Use proactively after code changes.
model: sonnet
permissionMode: plan
disallowedTools: Write, Edit, MultiEdit
---
# Coordinator Agent
## Visual Output Requirements

View File

@@ -1,11 +1,3 @@
---
name: maintainability-auditor
description: Identifies code complexity, duplication, naming issues, and architecture concerns in PR changes.
model: haiku
permissionMode: plan
disallowedTools: Write, Edit, MultiEdit
---
# Maintainability Auditor Agent
## Visual Output Requirements

View File

@@ -1,11 +1,3 @@
---
name: performance-analyst
description: Performance-focused code reviewer that identifies performance issues, inefficiencies, and optimization opportunities.
model: sonnet
permissionMode: plan
disallowedTools: Write, Edit, MultiEdit
---
# Performance Analyst Agent
## Visual Output Requirements

View File

@@ -1,9 +1,6 @@
---
name: security-reviewer
description: Security-focused code reviewer for PR analysis
model: sonnet
permissionMode: plan
disallowedTools: Write, Edit, MultiEdit
---
# Security Reviewer Agent

View File

@@ -1,11 +1,3 @@
---
name: test-validator
description: Test quality reviewer that validates test coverage, test quality, and testing practices in PR changes.
model: haiku
permissionMode: plan
disallowedTools: Write, Edit, MultiEdit
---
# Test Validator Agent
## Visual Output Requirements

View File

@@ -17,13 +17,13 @@ Display header: `PR-REVIEW - Project Setup`
Fast setup when system-level config already exists.
**Use when:** Already ran `/pr-setup`, starting new project
**Use when:** Already ran `/initial-setup`, starting new project
## Workflow
### Pre-Flight Check
Verify `~/.config/claude/gitea.env` exists. If missing: redirect to `/pr-setup`
Verify `~/.config/claude/gitea.env` exists. If missing: redirect to `/initial-setup`
### Project Setup

View File

@@ -5,18 +5,11 @@
PREFIX="[pr-review]"
# Check if MCP venv exists - check cache first, then local
CACHE_VENV="$HOME/.cache/claude-mcp-venvs/leo-claude-mktplace/gitea/.venv/bin/python"
# Check if MCP venv exists
PLUGIN_ROOT="${CLAUDE_PLUGIN_ROOT:-$(dirname "$(dirname "$(realpath "$0")")")}"
MARKETPLACE_ROOT="$(dirname "$(dirname "$PLUGIN_ROOT")")"
LOCAL_VENV="$MARKETPLACE_ROOT/mcp-servers/gitea/.venv/bin/python"
VENV_PATH="$PLUGIN_ROOT/mcp-servers/gitea/.venv/bin/python"
# Check cache first (preferred), then local
if [[ -f "$CACHE_VENV" ]]; then
VENV_PATH="$CACHE_VENV"
elif [[ -f "$LOCAL_VENV" ]]; then
VENV_PATH="$LOCAL_VENV"
else
if [[ ! -f "$VENV_PATH" ]]; then
echo "$PREFIX MCP venvs missing - run setup.sh from installed marketplace"
exit 0
fi

Some files were not shown because too many files have changed in this diff Show More