Compare commits
64 Commits
v5.0.0
...
5c4e97a3f6
| Author | SHA1 | Date | |
|---|---|---|---|
| 5c4e97a3f6 | |||
| 351be5a40d | |||
| 67944a7e1c | |||
| e37653f956 | |||
| 235e72d3d7 | |||
| ba8e86e31c | |||
| 67f330be6c | |||
| 445b744196 | |||
| ad73c526b7 | |||
| 26310d05f0 | |||
| 459550e7d3 | |||
| a69a4d19d0 | |||
| f2a62627d0 | |||
| 0abf510ec0 | |||
| 008187a0a4 | |||
| 4bd15e5deb | |||
| 8234683bc3 | |||
| 5b3da8da85 | |||
| 894e015c01 | |||
| a66a2bc519 | |||
| b8851a0ae3 | |||
| aee199e6cf | |||
| 223a2d626a | |||
| b7fce0fafd | |||
| 551c60fb45 | |||
| af6a42b2ac | |||
| 7cae21f7c9 | |||
| 8048fba931 | |||
| 1b36ca77ab | |||
| eb85ea31bb | |||
| 8627d9e968 | |||
| 3da9adf44e | |||
| bcb24ae641 | |||
| c8ede3c30b | |||
| fb1c664309 | |||
| 90f19dfc0f | |||
| 75492b0d38 | |||
| 54bb347ee1 | |||
| 51bcc26ea9 | |||
| d813147ca7 | |||
| dbb6d46fa4 | |||
| e7050e2ad8 | |||
| 206f1c378e | |||
| 35380594b4 | |||
| 0055c9ecf2 | |||
| a74a048898 | |||
| 37676d4645 | |||
| 7492cfad66 | |||
| 59db9ea0b0 | |||
| 9234cf1add | |||
| a21199d3db | |||
| 1abda1ca0f | |||
| 0118bc7b9b | |||
| bbb822db16 | |||
| 08e1dcb1f5 | |||
| ec7141a5aa | |||
| 1b029d97b8 | |||
| 4ed3ed7e14 | |||
| c5232bd7bf | |||
| f9e23fd6eb | |||
| 457ed9c9ff | |||
| dadb4d3576 | |||
| ba771f100f | |||
| 2b9cb5defd |
@@ -6,7 +6,7 @@
|
||||
},
|
||||
"metadata": {
|
||||
"description": "Project management plugins with Gitea and NetBox integrations",
|
||||
"version": "5.0.0"
|
||||
"version": "5.1.0"
|
||||
},
|
||||
"plugins": [
|
||||
{
|
||||
@@ -75,8 +75,8 @@
|
||||
},
|
||||
{
|
||||
"name": "cmdb-assistant",
|
||||
"version": "1.0.0",
|
||||
"description": "NetBox CMDB integration for infrastructure management",
|
||||
"version": "1.1.0",
|
||||
"description": "NetBox CMDB integration with data quality validation and machine registration",
|
||||
"source": "./plugins/cmdb-assistant",
|
||||
"author": {
|
||||
"name": "Leo Miranda",
|
||||
@@ -86,7 +86,7 @@
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||
"mcpServers": ["./.mcp.json"],
|
||||
"category": "infrastructure",
|
||||
"tags": ["cmdb", "netbox", "dcim", "ipam"],
|
||||
"tags": ["cmdb", "netbox", "dcim", "ipam", "data-quality", "validation"],
|
||||
"license": "MIT"
|
||||
},
|
||||
{
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -31,6 +31,8 @@ venv/
|
||||
ENV/
|
||||
env/
|
||||
.venv/
|
||||
.venv
|
||||
**/.venv
|
||||
|
||||
# PyCharm
|
||||
.idea/
|
||||
|
||||
73
CHANGELOG.md
73
CHANGELOG.md
@@ -4,6 +4,79 @@ All notable changes to the Leo Claude Marketplace will be documented in this fil
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
### Added
|
||||
|
||||
#### Sprint 3: Hooks (V5.2.0 Plugin Enhancements)
|
||||
Implementation of 6 foundational hooks across 4 plugins.
|
||||
|
||||
**git-flow v1.1.0:**
|
||||
- **Commit message enforcement hook** - PreToolUse hook validates conventional commit format on all `git commit` commands (not just `/commit`). Blocks invalid commits with format guidance.
|
||||
- **Branch name validation hook** - PreToolUse hook validates branch naming on `git checkout -b` and `git switch -c`. Enforces `type/description` format, lowercase, max 50 chars.
|
||||
|
||||
**clarity-assist v1.1.0:**
|
||||
- **Vagueness detection hook** - UserPromptSubmit hook detects vague prompts and suggests `/clarify` when ambiguity, missing context, or unclear scope detected.
|
||||
|
||||
**data-platform v1.1.0:**
|
||||
- **Schema diff detection hook** - PostToolUse hook monitors edits to schema files (dbt models, SQL migrations). Warns on breaking changes (column removal, type narrowing, constraint addition).
|
||||
|
||||
**contract-validator v1.1.0:**
|
||||
- **SessionStart auto-validate hook** - Smart validation that only runs when plugin files changed since last check. Detects interface compatibility issues at session start.
|
||||
- **Breaking change detection hook** - PostToolUse hook monitors plugin interface files (README.md, plugin.json). Warns when changes would break consumers.
|
||||
|
||||
**Sprint Completed:**
|
||||
- Milestone: Sprint 3 - Hooks (closed 2026-01-28)
|
||||
- Issues: #225, #226, #227, #228, #229, #230
|
||||
- Wiki: [Change V5.2.0: Plugin Enhancements Proposal](https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/wiki/Change-V5.2.0:-Plugin-Enhancements-Proposal)
|
||||
- Lessons: Background agent permissions, agent runaway detection, MCP branch detection bug
|
||||
|
||||
### Known Issues
|
||||
- **MCP Bug #231:** Branch detection in Gitea MCP runs from installed plugin directory, not user's project directory. Workaround: close issues via Gitea web UI.
|
||||
|
||||
---
|
||||
|
||||
#### Gitea MCP Server - create_pull_request Tool
|
||||
- **`create_pull_request`**: Create new pull requests via MCP
|
||||
- Parameters: title, body, head (source branch), base (target branch), labels
|
||||
- Branch-aware security: only allowed on development/feature branches
|
||||
- Completes the PR lifecycle (was previously missing - only had list/get/review/comment)
|
||||
|
||||
#### cmdb-assistant v1.1.0 - Data Quality Validation
|
||||
- **SessionStart Hook**: Tests NetBox API connectivity at session start
|
||||
- Warns if VMs exist without site assignment
|
||||
- Warns if devices exist without platform
|
||||
- Non-blocking: displays warning, doesn't prevent work
|
||||
- **PreToolUse Hook**: Validates input parameters before VM/device operations
|
||||
- Warns about missing site, tenant, platform
|
||||
- Non-blocking: suggests best practices without blocking
|
||||
- **`/cmdb-audit` Command**: Comprehensive data quality analysis
|
||||
- Scopes: all, vms, devices, naming, roles
|
||||
- Identifies Critical/High/Medium/Low issues
|
||||
- Provides prioritized remediation recommendations
|
||||
- **`/cmdb-register` Command**: Register current machine into NetBox
|
||||
- Discovers system info: hostname, platform, hardware, network interfaces
|
||||
- Discovers running apps: Docker containers, systemd services
|
||||
- Creates device with interfaces, IPs, and sets primary IP
|
||||
- Creates cluster and VMs for Docker containers
|
||||
- **`/cmdb-sync` Command**: Sync machine state with NetBox
|
||||
- Compares current state with NetBox record
|
||||
- Shows diff of changes (interfaces, IPs, containers)
|
||||
- Updates with user confirmation
|
||||
- Supports --full and --dry-run flags
|
||||
- **NetBox Best Practices Skill**: Reference documentation
|
||||
- Dependency order for object creation
|
||||
- Naming conventions (`{role}-{site}-{number}`, `{env}-{app}-{number}`)
|
||||
- Role consolidation guidance
|
||||
- Site/tenant/platform assignment requirements
|
||||
- **Agent Enhancement**: Updated cmdb-assistant agent with validation requirements
|
||||
- Proactive suggestions for missing fields
|
||||
- Naming convention checks
|
||||
- Dependency order enforcement
|
||||
- Duplicate prevention
|
||||
|
||||
---
|
||||
|
||||
## [5.0.0] - 2026-01-26
|
||||
|
||||
### Added
|
||||
|
||||
@@ -50,7 +50,7 @@ See `docs/DEBUGGING-CHECKLIST.md` for details on cache timing.
|
||||
## Project Overview
|
||||
|
||||
**Repository:** leo-claude-mktplace
|
||||
**Version:** 5.0.0
|
||||
**Version:** 5.1.0
|
||||
**Status:** Production Ready
|
||||
|
||||
A plugin marketplace for Claude Code containing:
|
||||
|
||||
44
README.md
44
README.md
@@ -1,4 +1,4 @@
|
||||
# Leo Claude Marketplace - v5.0.0
|
||||
# Leo Claude Marketplace - v5.1.0
|
||||
|
||||
A collection of Claude Code plugins for project management, infrastructure automation, and development workflows.
|
||||
|
||||
@@ -19,7 +19,7 @@ AI-guided sprint planning with full Gitea integration. Transforms a proven 15-sp
|
||||
- Branch-aware security (development/staging/production)
|
||||
- Pre-sprint-close code quality review and test verification
|
||||
|
||||
**Commands:** `/sprint-plan`, `/sprint-start`, `/sprint-status`, `/sprint-close`, `/labels-sync`, `/initial-setup`, `/project-init`, `/project-sync`, `/review`, `/test-check`, `/test-gen`, `/debug-report`, `/debug-review`
|
||||
**Commands:** `/sprint-plan`, `/sprint-start`, `/sprint-status`, `/sprint-close`, `/labels-sync`, `/initial-setup`, `/project-init`, `/project-sync`, `/review`, `/test-check`, `/test-gen`, `/debug-report`, `/debug-review`, `/suggest-version`, `/proposal-status`
|
||||
|
||||
#### [git-flow](./plugins/git-flow/README.md) *NEW in v3.0.0*
|
||||
**Git Workflow Automation**
|
||||
@@ -53,6 +53,19 @@ Analyze, optimize, and create CLAUDE.md configuration files for Claude Code proj
|
||||
|
||||
**Commands:** `/config-analyze`, `/config-optimize`, `/config-init`
|
||||
|
||||
#### [contract-validator](./plugins/contract-validator/README.md) *NEW in v5.0.0*
|
||||
**Cross-Plugin Compatibility Validation**
|
||||
|
||||
Validate plugin marketplaces for command conflicts, tool overlaps, and broken agent references.
|
||||
|
||||
- Interface parsing from plugin README.md files
|
||||
- Agent extraction from CLAUDE.md definitions
|
||||
- Pairwise compatibility checks between all plugins
|
||||
- Data flow validation for agent sequences
|
||||
- Markdown or JSON reports with actionable suggestions
|
||||
|
||||
**Commands:** `/validate-contracts`, `/check-agent`, `/list-interfaces`, `/initial-setup`
|
||||
|
||||
### Productivity
|
||||
|
||||
#### [clarity-assist](./plugins/clarity-assist/README.md) *NEW in v3.0.0*
|
||||
@@ -94,11 +107,11 @@ Security vulnerability detection and code refactoring tools.
|
||||
|
||||
Full CRUD operations for network infrastructure management directly from Claude Code.
|
||||
|
||||
**Commands:** `/initial-setup`, `/cmdb-search`, `/cmdb-device`, `/cmdb-ip`, `/cmdb-site`
|
||||
**Commands:** `/initial-setup`, `/cmdb-search`, `/cmdb-device`, `/cmdb-ip`, `/cmdb-site`, `/cmdb-audit`, `/cmdb-register`, `/cmdb-sync`
|
||||
|
||||
### Data Engineering
|
||||
|
||||
#### [data-platform](./plugins/data-platform/README.md) *NEW*
|
||||
#### [data-platform](./plugins/data-platform/README.md) *NEW in v4.0.0*
|
||||
**pandas, PostgreSQL/PostGIS, and dbt Integration**
|
||||
|
||||
Comprehensive data engineering toolkit with persistent DataFrame storage.
|
||||
@@ -113,7 +126,7 @@ Comprehensive data engineering toolkit with persistent DataFrame storage.
|
||||
|
||||
### Visualization
|
||||
|
||||
#### [viz-platform](./plugins/viz-platform/README.md) *NEW*
|
||||
#### [viz-platform](./plugins/viz-platform/README.md) *NEW in v4.0.0*
|
||||
**Dash Mantine Components Validation and Theming**
|
||||
|
||||
Visualization toolkit with version-locked component validation and design token theming.
|
||||
@@ -157,7 +170,7 @@ Comprehensive NetBox REST API integration for infrastructure management.
|
||||
| Virtualization | Clusters, VMs, Interfaces |
|
||||
| Extras | Tags, Custom Fields, Audit Log |
|
||||
|
||||
### Data Platform MCP Server (shared) *NEW*
|
||||
### Data Platform MCP Server (shared) *NEW in v4.0.0*
|
||||
|
||||
pandas, PostgreSQL/PostGIS, and dbt integration for data engineering.
|
||||
|
||||
@@ -168,7 +181,7 @@ pandas, PostgreSQL/PostGIS, and dbt integration for data engineering.
|
||||
| PostGIS | `st_tables`, `st_geometry_type`, `st_srid`, `st_extent` |
|
||||
| dbt | `dbt_parse`, `dbt_run`, `dbt_test`, `dbt_build`, `dbt_compile`, `dbt_ls`, `dbt_docs_generate`, `dbt_lineage` |
|
||||
|
||||
### Viz Platform MCP Server (shared) *NEW*
|
||||
### Viz Platform MCP Server (shared) *NEW in v4.0.0*
|
||||
|
||||
Dash Mantine Components validation and visualization tools.
|
||||
|
||||
@@ -180,6 +193,16 @@ Dash Mantine Components validation and visualization tools.
|
||||
| Theme | `theme_create`, `theme_extend`, `theme_validate`, `theme_export_css`, `theme_list`, `theme_activate` |
|
||||
| Page | `page_create`, `page_add_navbar`, `page_set_auth`, `page_list`, `page_get_app_config` |
|
||||
|
||||
### Contract Validator MCP Server (shared) *NEW in v5.0.0*
|
||||
|
||||
Cross-plugin compatibility validation tools.
|
||||
|
||||
| Category | Tools |
|
||||
|----------|-------|
|
||||
| Parse | `parse_plugin_interface`, `parse_claude_md_agents` |
|
||||
| Validation | `validate_compatibility`, `validate_agent_refs`, `validate_data_flow` |
|
||||
| Report | `generate_compatibility_report`, `list_issues` |
|
||||
|
||||
## Installation
|
||||
|
||||
### Prerequisites
|
||||
@@ -278,6 +301,7 @@ After installing plugins, the `/plugin` command may show `(no content)` - this i
|
||||
| cmdb-assistant | `/cmdb-assistant:cmdb-search` |
|
||||
| data-platform | `/data-platform:ingest` |
|
||||
| viz-platform | `/viz-platform:chart` |
|
||||
| contract-validator | `/contract-validator:validate-contracts` |
|
||||
|
||||
## Repository Structure
|
||||
|
||||
@@ -289,14 +313,16 @@ leo-claude-mktplace/
|
||||
│ ├── gitea/ # Gitea MCP (issues, PRs, wiki)
|
||||
│ ├── netbox/ # NetBox MCP (CMDB)
|
||||
│ ├── data-platform/ # Data engineering (pandas, PostgreSQL, dbt)
|
||||
│ └── viz-platform/ # Visualization (DMC, Plotly, theming)
|
||||
│ ├── viz-platform/ # Visualization (DMC, Plotly, theming)
|
||||
│ └── contract-validator/ # Cross-plugin validation (v5.0.0)
|
||||
├── plugins/ # All plugins
|
||||
│ ├── projman/ # Sprint management
|
||||
│ ├── git-flow/ # Git workflow automation
|
||||
│ ├── pr-review/ # PR review
|
||||
│ ├── clarity-assist/ # Prompt optimization
|
||||
│ ├── data-platform/ # Data engineering
|
||||
│ ├── viz-platform/ # Visualization (NEW)
|
||||
│ ├── viz-platform/ # Visualization
|
||||
│ ├── contract-validator/ # Cross-plugin validation (NEW)
|
||||
│ ├── claude-config-maintainer/ # CLAUDE.md optimization
|
||||
│ ├── cmdb-assistant/ # NetBox CMDB integration
|
||||
│ ├── doc-guardian/ # Documentation drift detection
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
**This file defines ALL valid paths in this repository. No exceptions. No inference. No assumptions.**
|
||||
|
||||
Last Updated: 2026-01-26 (v5.0.0)
|
||||
Last Updated: 2026-01-27 (v5.1.0)
|
||||
|
||||
---
|
||||
|
||||
@@ -165,7 +165,11 @@ leo-claude-mktplace/
|
||||
│ ├── setup.sh # Initial setup (create venvs, config templates)
|
||||
│ ├── post-update.sh # Post-update (rebuild venvs, verify symlinks)
|
||||
│ ├── check-venv.sh # Check if venvs exist (for hooks)
|
||||
│ └── validate-marketplace.sh # Marketplace compliance validation
|
||||
│ ├── validate-marketplace.sh # Marketplace compliance validation
|
||||
│ ├── verify-hooks.sh # Verify all hooks use correct event types
|
||||
│ ├── setup-venvs.sh # Setup/repair MCP server venvs
|
||||
│ ├── venv-repair.sh # Repair broken venv symlinks
|
||||
│ └── release.sh # Release automation with version bumping
|
||||
├── CLAUDE.md
|
||||
├── README.md
|
||||
├── LICENSE
|
||||
|
||||
@@ -23,6 +23,7 @@ Quick reference for all commands in the Leo Claude Marketplace.
|
||||
| **projman** | `/debug-report` | | X | Run diagnostics and create structured issue in marketplace |
|
||||
| **projman** | `/debug-review` | | X | Investigate diagnostic issues and propose fixes with approval gates |
|
||||
| **projman** | `/suggest-version` | | X | Analyze CHANGELOG and recommend semantic version bump |
|
||||
| **projman** | `/proposal-status` | | X | View proposal and implementation hierarchy with status |
|
||||
| **git-flow** | `/commit` | | X | Create commit with auto-generated conventional message |
|
||||
| **git-flow** | `/commit-push` | | X | Commit and push to remote in one operation |
|
||||
| **git-flow** | `/commit-merge` | | X | Commit current changes, then merge into target branch |
|
||||
@@ -55,6 +56,9 @@ Quick reference for all commands in the Leo Claude Marketplace.
|
||||
| **cmdb-assistant** | `/cmdb-device` | | X | Manage network devices (create, view, update, delete) |
|
||||
| **cmdb-assistant** | `/cmdb-ip` | | X | Manage IP addresses and prefixes |
|
||||
| **cmdb-assistant** | `/cmdb-site` | | X | Manage sites, locations, racks, and regions |
|
||||
| **cmdb-assistant** | `/cmdb-audit` | | X | Data quality analysis (VMs, devices, naming, roles) |
|
||||
| **cmdb-assistant** | `/cmdb-register` | | X | Register current machine into NetBox with running apps |
|
||||
| **cmdb-assistant** | `/cmdb-sync` | | X | Sync machine state with NetBox (detect drift, update) |
|
||||
| **project-hygiene** | *PostToolUse hook* | X | | Removes temp files, warns about unexpected root files |
|
||||
| **data-platform** | `/ingest` | | X | Load data from CSV, Parquet, JSON into DataFrame |
|
||||
| **data-platform** | `/profile` | | X | Generate data profiling report with statistics |
|
||||
|
||||
17
mcp-servers/contract-validator/run.sh
Executable file
17
mcp-servers/contract-validator/run.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
CACHE_VENV="$HOME/.cache/claude-mcp-venvs/leo-claude-mktplace/contract-validator/.venv"
|
||||
LOCAL_VENV="$SCRIPT_DIR/.venv"
|
||||
|
||||
if [[ -f "$CACHE_VENV/bin/python" ]]; then
|
||||
PYTHON="$CACHE_VENV/bin/python"
|
||||
elif [[ -f "$LOCAL_VENV/bin/python" ]]; then
|
||||
PYTHON="$LOCAL_VENV/bin/python"
|
||||
else
|
||||
echo "ERROR: No venv found. Run: ./scripts/setup-venvs.sh" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "$SCRIPT_DIR"
|
||||
export PYTHONPATH="$SCRIPT_DIR"
|
||||
exec "$PYTHON" -m mcp_server.server "$@"
|
||||
@@ -330,7 +330,7 @@ class PandasTools:
|
||||
return {'error': f'DataFrame not found: {data_ref}'}
|
||||
|
||||
try:
|
||||
filtered = df.query(condition)
|
||||
filtered = df.query(condition).reset_index(drop=True)
|
||||
result_name = name or f"{data_ref}_filtered"
|
||||
return self._check_and_store(
|
||||
filtered,
|
||||
|
||||
17
mcp-servers/data-platform/run.sh
Executable file
17
mcp-servers/data-platform/run.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
CACHE_VENV="$HOME/.cache/claude-mcp-venvs/leo-claude-mktplace/data-platform/.venv"
|
||||
LOCAL_VENV="$SCRIPT_DIR/.venv"
|
||||
|
||||
if [[ -f "$CACHE_VENV/bin/python" ]]; then
|
||||
PYTHON="$CACHE_VENV/bin/python"
|
||||
elif [[ -f "$LOCAL_VENV/bin/python" ]]; then
|
||||
PYTHON="$LOCAL_VENV/bin/python"
|
||||
else
|
||||
echo "ERROR: No venv found. Run: ./scripts/setup-venvs.sh" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "$SCRIPT_DIR"
|
||||
export PYTHONPATH="$SCRIPT_DIR"
|
||||
exec "$PYTHON" -m mcp_server.server "$@"
|
||||
@@ -787,3 +787,42 @@ class GiteaClient:
|
||||
response = self.session.post(url, json=data)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def create_pull_request(
|
||||
self,
|
||||
title: str,
|
||||
body: str,
|
||||
head: str,
|
||||
base: str,
|
||||
labels: Optional[List[str]] = None,
|
||||
repo: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Create a new pull request.
|
||||
|
||||
Args:
|
||||
title: PR title
|
||||
body: PR description/body
|
||||
head: Source branch name (the branch with changes)
|
||||
base: Target branch name (the branch to merge into)
|
||||
labels: Optional list of label names
|
||||
repo: Repository in 'owner/repo' format
|
||||
|
||||
Returns:
|
||||
Created pull request dictionary
|
||||
"""
|
||||
owner, target_repo = self._parse_repo(repo)
|
||||
url = f"{self.base_url}/repos/{owner}/{target_repo}/pulls"
|
||||
data = {
|
||||
'title': title,
|
||||
'body': body,
|
||||
'head': head,
|
||||
'base': base
|
||||
}
|
||||
if labels:
|
||||
label_ids = self._resolve_label_ids(labels, owner, target_repo)
|
||||
data['labels'] = label_ids
|
||||
logger.info(f"Creating PR '{title}' in {owner}/{target_repo}: {head} -> {base}")
|
||||
response = self.session.post(url, json=data)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
@@ -844,6 +844,41 @@ class GiteaMCPServer:
|
||||
},
|
||||
"required": ["pr_number", "body"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="create_pull_request",
|
||||
description="Create a new pull request",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"title": {
|
||||
"type": "string",
|
||||
"description": "PR title"
|
||||
},
|
||||
"body": {
|
||||
"type": "string",
|
||||
"description": "PR description/body"
|
||||
},
|
||||
"head": {
|
||||
"type": "string",
|
||||
"description": "Source branch name (the branch with changes)"
|
||||
},
|
||||
"base": {
|
||||
"type": "string",
|
||||
"description": "Target branch name (the branch to merge into)"
|
||||
},
|
||||
"labels": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "Optional list of label names"
|
||||
},
|
||||
"repo": {
|
||||
"type": "string",
|
||||
"description": "Repository name (owner/repo format)"
|
||||
}
|
||||
},
|
||||
"required": ["title", "body", "head", "base"]
|
||||
}
|
||||
)
|
||||
]
|
||||
|
||||
@@ -959,6 +994,8 @@ class GiteaMCPServer:
|
||||
result = await self.pr_tools.create_pr_review(**arguments)
|
||||
elif name == "add_pr_comment":
|
||||
result = await self.pr_tools.add_pr_comment(**arguments)
|
||||
elif name == "create_pull_request":
|
||||
result = await self.pr_tools.create_pull_request(**arguments)
|
||||
else:
|
||||
raise ValueError(f"Unknown tool: {name}")
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ Provides async wrappers for issue CRUD operations with:
|
||||
- Comprehensive error handling
|
||||
"""
|
||||
import asyncio
|
||||
import os
|
||||
import subprocess
|
||||
import logging
|
||||
from typing import List, Dict, Optional
|
||||
@@ -27,19 +28,34 @@ class IssueTools:
|
||||
"""
|
||||
self.gitea = gitea_client
|
||||
|
||||
def _get_project_directory(self) -> Optional[str]:
|
||||
"""
|
||||
Get the user's project directory from environment.
|
||||
|
||||
Returns:
|
||||
Project directory path or None if not set
|
||||
"""
|
||||
return os.environ.get('CLAUDE_PROJECT_DIR')
|
||||
|
||||
def _get_current_branch(self) -> str:
|
||||
"""
|
||||
Get current git branch.
|
||||
Get current git branch from user's project directory.
|
||||
|
||||
Uses CLAUDE_PROJECT_DIR environment variable to determine the correct
|
||||
directory for git operations, avoiding the bug where git runs from
|
||||
the installed plugin directory instead of the user's project.
|
||||
|
||||
Returns:
|
||||
Current branch name or 'unknown' if not in a git repo
|
||||
"""
|
||||
try:
|
||||
project_dir = self._get_project_directory()
|
||||
result = subprocess.run(
|
||||
['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True
|
||||
check=True,
|
||||
cwd=project_dir # Run git in project directory, not plugin directory
|
||||
)
|
||||
return result.stdout.strip()
|
||||
except subprocess.CalledProcessError:
|
||||
@@ -66,7 +82,13 @@ class IssueTools:
|
||||
return operation in ['list_issues', 'get_issue', 'get_labels', 'create_issue']
|
||||
|
||||
# Development branches (full access)
|
||||
if branch in ['development', 'develop'] or branch.startswith(('feat/', 'feature/', 'dev/')):
|
||||
# Include all common feature/fix branch patterns
|
||||
dev_prefixes = (
|
||||
'feat/', 'feature/', 'dev/',
|
||||
'fix/', 'bugfix/', 'hotfix/',
|
||||
'chore/', 'refactor/', 'docs/', 'test/'
|
||||
)
|
||||
if branch in ['development', 'develop'] or branch.startswith(dev_prefixes):
|
||||
return True
|
||||
|
||||
# Unknown branch - be restrictive
|
||||
|
||||
@@ -7,6 +7,7 @@ Provides async wrappers for PR operations with:
|
||||
- Comprehensive error handling
|
||||
"""
|
||||
import asyncio
|
||||
import os
|
||||
import subprocess
|
||||
import logging
|
||||
from typing import List, Dict, Optional
|
||||
@@ -27,19 +28,34 @@ class PullRequestTools:
|
||||
"""
|
||||
self.gitea = gitea_client
|
||||
|
||||
def _get_project_directory(self) -> Optional[str]:
|
||||
"""
|
||||
Get the user's project directory from environment.
|
||||
|
||||
Returns:
|
||||
Project directory path or None if not set
|
||||
"""
|
||||
return os.environ.get('CLAUDE_PROJECT_DIR')
|
||||
|
||||
def _get_current_branch(self) -> str:
|
||||
"""
|
||||
Get current git branch.
|
||||
Get current git branch from user's project directory.
|
||||
|
||||
Uses CLAUDE_PROJECT_DIR environment variable to determine the correct
|
||||
directory for git operations, avoiding the bug where git runs from
|
||||
the installed plugin directory instead of the user's project.
|
||||
|
||||
Returns:
|
||||
Current branch name or 'unknown' if not in a git repo
|
||||
"""
|
||||
try:
|
||||
project_dir = self._get_project_directory()
|
||||
result = subprocess.run(
|
||||
['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True
|
||||
check=True,
|
||||
cwd=project_dir # Run git in project directory, not plugin directory
|
||||
)
|
||||
return result.stdout.strip()
|
||||
except subprocess.CalledProcessError:
|
||||
@@ -69,7 +85,13 @@ class PullRequestTools:
|
||||
return operation in read_ops + ['add_pr_comment']
|
||||
|
||||
# Development branches (full access)
|
||||
if branch in ['development', 'develop'] or branch.startswith(('feat/', 'feature/', 'dev/')):
|
||||
# Include all common feature/fix branch patterns
|
||||
dev_prefixes = (
|
||||
'feat/', 'feature/', 'dev/',
|
||||
'fix/', 'bugfix/', 'hotfix/',
|
||||
'chore/', 'refactor/', 'docs/', 'test/'
|
||||
)
|
||||
if branch in ['development', 'develop'] or branch.startswith(dev_prefixes):
|
||||
return True
|
||||
|
||||
# Unknown branch - be restrictive
|
||||
@@ -272,3 +294,42 @@ class PullRequestTools:
|
||||
None,
|
||||
lambda: self.gitea.add_pr_comment(pr_number, body, repo)
|
||||
)
|
||||
|
||||
async def create_pull_request(
|
||||
self,
|
||||
title: str,
|
||||
body: str,
|
||||
head: str,
|
||||
base: str,
|
||||
labels: Optional[List[str]] = None,
|
||||
repo: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Create a new pull request (async wrapper with branch check).
|
||||
|
||||
Args:
|
||||
title: PR title
|
||||
body: PR description/body
|
||||
head: Source branch name (the branch with changes)
|
||||
base: Target branch name (the branch to merge into)
|
||||
labels: Optional list of label names
|
||||
repo: Override configured repo (for PMO multi-repo)
|
||||
|
||||
Returns:
|
||||
Created pull request dictionary
|
||||
|
||||
Raises:
|
||||
PermissionError: If operation not allowed on current branch
|
||||
"""
|
||||
if not self._check_branch_permissions('create_pull_request'):
|
||||
branch = self._get_current_branch()
|
||||
raise PermissionError(
|
||||
f"Cannot create PR on branch '{branch}'. "
|
||||
f"Switch to a development or feature branch to create PRs."
|
||||
)
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(
|
||||
None,
|
||||
lambda: self.gitea.create_pull_request(title, body, head, base, labels, repo)
|
||||
)
|
||||
|
||||
17
mcp-servers/gitea/run.sh
Executable file
17
mcp-servers/gitea/run.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
CACHE_VENV="$HOME/.cache/claude-mcp-venvs/leo-claude-mktplace/gitea/.venv"
|
||||
LOCAL_VENV="$SCRIPT_DIR/.venv"
|
||||
|
||||
if [[ -f "$CACHE_VENV/bin/python" ]]; then
|
||||
PYTHON="$CACHE_VENV/bin/python"
|
||||
elif [[ -f "$LOCAL_VENV/bin/python" ]]; then
|
||||
PYTHON="$LOCAL_VENV/bin/python"
|
||||
else
|
||||
echo "ERROR: No venv found. Run: ./scripts/setup-venvs.sh" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "$SCRIPT_DIR"
|
||||
export PYTHONPATH="$SCRIPT_DIR"
|
||||
exec "$PYTHON" -m mcp_server.server "$@"
|
||||
17
mcp-servers/netbox/run.sh
Executable file
17
mcp-servers/netbox/run.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
CACHE_VENV="$HOME/.cache/claude-mcp-venvs/leo-claude-mktplace/netbox/.venv"
|
||||
LOCAL_VENV="$SCRIPT_DIR/.venv"
|
||||
|
||||
if [[ -f "$CACHE_VENV/bin/python" ]]; then
|
||||
PYTHON="$CACHE_VENV/bin/python"
|
||||
elif [[ -f "$LOCAL_VENV/bin/python" ]]; then
|
||||
PYTHON="$LOCAL_VENV/bin/python"
|
||||
else
|
||||
echo "ERROR: No venv found. Run: ./scripts/setup-venvs.sh" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "$SCRIPT_DIR"
|
||||
export PYTHONPATH="$SCRIPT_DIR"
|
||||
exec "$PYTHON" -m mcp_server.server "$@"
|
||||
17
mcp-servers/viz-platform/run.sh
Executable file
17
mcp-servers/viz-platform/run.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
CACHE_VENV="$HOME/.cache/claude-mcp-venvs/leo-claude-mktplace/viz-platform/.venv"
|
||||
LOCAL_VENV="$SCRIPT_DIR/.venv"
|
||||
|
||||
if [[ -f "$CACHE_VENV/bin/python" ]]; then
|
||||
PYTHON="$CACHE_VENV/bin/python"
|
||||
elif [[ -f "$LOCAL_VENV/bin/python" ]]; then
|
||||
PYTHON="$LOCAL_VENV/bin/python"
|
||||
else
|
||||
echo "ERROR: No venv found. Run: ./scripts/setup-venvs.sh" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "$SCRIPT_DIR"
|
||||
export PYTHONPATH="$SCRIPT_DIR"
|
||||
exec "$PYTHON" -m mcp_server.server "$@"
|
||||
10
plugins/clarity-assist/hooks/hooks.json
Normal file
10
plugins/clarity-assist/hooks/hooks.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"hooks": {
|
||||
"UserPromptSubmit": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/hooks/vagueness-check.sh"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
216
plugins/clarity-assist/hooks/vagueness-check.sh
Executable file
216
plugins/clarity-assist/hooks/vagueness-check.sh
Executable file
@@ -0,0 +1,216 @@
|
||||
#!/bin/bash
|
||||
# clarity-assist vagueness detection hook
|
||||
# Analyzes user prompts for vagueness and suggests /clarity-assist when beneficial
|
||||
# All output MUST have [clarity-assist] prefix
|
||||
# This is a NON-BLOCKING hook - always exits 0
|
||||
|
||||
PREFIX="[clarity-assist]"
|
||||
|
||||
# Check if auto-suggest is enabled (default: true)
|
||||
AUTO_SUGGEST="${CLARITY_ASSIST_AUTO_SUGGEST:-true}"
|
||||
if [[ "$AUTO_SUGGEST" != "true" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Threshold for vagueness score (default: 0.6)
|
||||
THRESHOLD="${CLARITY_ASSIST_VAGUENESS_THRESHOLD:-0.6}"
|
||||
|
||||
# Read user prompt from stdin
|
||||
PROMPT=""
|
||||
if [[ -t 0 ]]; then
|
||||
# No stdin available
|
||||
exit 0
|
||||
else
|
||||
PROMPT=$(cat)
|
||||
fi
|
||||
|
||||
# Skip empty prompts
|
||||
if [[ -z "$PROMPT" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Skip if prompt is a command (starts with /)
|
||||
if [[ "$PROMPT" =~ ^[[:space:]]*/[a-zA-Z] ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Skip if prompt mentions specific files or paths
|
||||
if [[ "$PROMPT" =~ \.(py|js|ts|sh|md|json|yaml|yml|txt|css|html|go|rs|java|c|cpp|h)([[:space:]]|$|[^a-zA-Z]) ]] || \
|
||||
[[ "$PROMPT" =~ [/\\][a-zA-Z0-9_-]+[/\\] ]] || \
|
||||
[[ "$PROMPT" =~ (src|lib|test|docs|plugins|hooks|commands)/ ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Initialize vagueness score
|
||||
SCORE=0
|
||||
|
||||
# Count words in the prompt
|
||||
WORD_COUNT=$(echo "$PROMPT" | wc -w | tr -d ' ')
|
||||
|
||||
# ============================================================================
|
||||
# Vagueness Signal Detection
|
||||
# ============================================================================
|
||||
|
||||
# Signal 1: Very short prompts (< 10 words) are often vague
|
||||
if [[ "$WORD_COUNT" -lt 10 ]]; then
|
||||
# But very short specific commands are OK
|
||||
if [[ "$WORD_COUNT" -lt 3 ]]; then
|
||||
# Extremely short - probably intentional or a command
|
||||
:
|
||||
else
|
||||
SCORE=$(echo "$SCORE + 0.3" | bc)
|
||||
fi
|
||||
fi
|
||||
|
||||
# Signal 2: Vague action phrases (no specific outcome)
|
||||
VAGUE_ACTIONS=(
|
||||
"help me"
|
||||
"help with"
|
||||
"do something"
|
||||
"work on"
|
||||
"look at"
|
||||
"check this"
|
||||
"fix it"
|
||||
"fix this"
|
||||
"make it better"
|
||||
"make this better"
|
||||
"improve it"
|
||||
"improve this"
|
||||
"update this"
|
||||
"update it"
|
||||
"change it"
|
||||
"change this"
|
||||
"can you"
|
||||
"could you"
|
||||
"would you"
|
||||
"please help"
|
||||
)
|
||||
|
||||
PROMPT_LOWER=$(echo "$PROMPT" | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
for phrase in "${VAGUE_ACTIONS[@]}"; do
|
||||
if [[ "$PROMPT_LOWER" == *"$phrase"* ]]; then
|
||||
SCORE=$(echo "$SCORE + 0.2" | bc)
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# Signal 3: Ambiguous scope indicators
|
||||
AMBIGUOUS_SCOPE=(
|
||||
"somehow"
|
||||
"something"
|
||||
"somewhere"
|
||||
"anything"
|
||||
"whatever"
|
||||
"stuff"
|
||||
"things"
|
||||
"etc"
|
||||
"and so on"
|
||||
)
|
||||
|
||||
for word in "${AMBIGUOUS_SCOPE[@]}"; do
|
||||
if [[ "$PROMPT_LOWER" == *"$word"* ]]; then
|
||||
SCORE=$(echo "$SCORE + 0.15" | bc)
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# Signal 4: Missing context indicators (no reference to what/where)
|
||||
# Check if prompt lacks specificity markers
|
||||
HAS_SPECIFICS=false
|
||||
|
||||
# Specific technical terms suggest clarity
|
||||
SPECIFIC_MARKERS=(
|
||||
"function"
|
||||
"class"
|
||||
"method"
|
||||
"variable"
|
||||
"error"
|
||||
"bug"
|
||||
"test"
|
||||
"api"
|
||||
"endpoint"
|
||||
"database"
|
||||
"query"
|
||||
"component"
|
||||
"module"
|
||||
"service"
|
||||
"config"
|
||||
"install"
|
||||
"deploy"
|
||||
"build"
|
||||
"run"
|
||||
"execute"
|
||||
"create"
|
||||
"delete"
|
||||
"add"
|
||||
"remove"
|
||||
"implement"
|
||||
"refactor"
|
||||
"migrate"
|
||||
"upgrade"
|
||||
"debug"
|
||||
"log"
|
||||
"exception"
|
||||
"stack"
|
||||
"memory"
|
||||
"performance"
|
||||
"security"
|
||||
"auth"
|
||||
"token"
|
||||
"session"
|
||||
"route"
|
||||
"controller"
|
||||
"model"
|
||||
"view"
|
||||
"template"
|
||||
"schema"
|
||||
"migration"
|
||||
"commit"
|
||||
"branch"
|
||||
"merge"
|
||||
"pull"
|
||||
"push"
|
||||
)
|
||||
|
||||
for marker in "${SPECIFIC_MARKERS[@]}"; do
|
||||
if [[ "$PROMPT_LOWER" == *"$marker"* ]]; then
|
||||
HAS_SPECIFICS=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "$HAS_SPECIFICS" == false ]] && [[ "$WORD_COUNT" -gt 3 ]]; then
|
||||
SCORE=$(echo "$SCORE + 0.2" | bc)
|
||||
fi
|
||||
|
||||
# Signal 5: Question without context
|
||||
if [[ "$PROMPT" =~ \?$ ]] && [[ "$WORD_COUNT" -lt 8 ]]; then
|
||||
# Short questions without specifics are often vague
|
||||
if [[ "$HAS_SPECIFICS" == false ]]; then
|
||||
SCORE=$(echo "$SCORE + 0.15" | bc)
|
||||
fi
|
||||
fi
|
||||
|
||||
# Cap score at 1.0
|
||||
if (( $(echo "$SCORE > 1.0" | bc -l) )); then
|
||||
SCORE="1.0"
|
||||
fi
|
||||
|
||||
# ============================================================================
|
||||
# Output suggestion if score exceeds threshold
|
||||
# ============================================================================
|
||||
|
||||
# Compare score to threshold using bc
|
||||
if (( $(echo "$SCORE >= $THRESHOLD" | bc -l) )); then
|
||||
# Format score as percentage for display
|
||||
SCORE_PCT=$(echo "$SCORE * 100" | bc | cut -d'.' -f1)
|
||||
|
||||
# Gentle, non-blocking suggestion
|
||||
echo "$PREFIX Your prompt could benefit from more clarity."
|
||||
echo "$PREFIX Consider running /clarity-assist to refine your request."
|
||||
echo "$PREFIX (Vagueness score: ${SCORE_PCT}% - this is a suggestion, not a block)"
|
||||
fi
|
||||
|
||||
# Always exit 0 - this hook is non-blocking
|
||||
exit 0
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "cmdb-assistant",
|
||||
"version": "1.0.0",
|
||||
"description": "NetBox CMDB integration for infrastructure management - query, create, update, and manage network devices, IP addresses, sites, and more",
|
||||
"version": "1.1.0",
|
||||
"description": "NetBox CMDB integration with data quality validation - query, create, update, and manage network devices, IP addresses, sites, and more with best practices enforcement",
|
||||
"author": {
|
||||
"name": "Leo Miranda",
|
||||
"email": "leobmiranda@gmail.com"
|
||||
@@ -15,7 +15,9 @@
|
||||
"infrastructure",
|
||||
"network",
|
||||
"ipam",
|
||||
"dcim"
|
||||
"dcim",
|
||||
"data-quality",
|
||||
"validation"
|
||||
],
|
||||
"commands": ["./commands/"],
|
||||
"mcpServers": ["./.mcp.json"]
|
||||
|
||||
@@ -1,12 +1,8 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"netbox": {
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/mcp-servers/netbox/.venv/bin/python",
|
||||
"args": ["-m", "mcp_server.server"],
|
||||
"cwd": "${CLAUDE_PLUGIN_ROOT}/mcp-servers/netbox",
|
||||
"env": {
|
||||
"PYTHONPATH": "${CLAUDE_PLUGIN_ROOT}/mcp-servers/netbox"
|
||||
}
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/mcp-servers/netbox/run.sh",
|
||||
"args": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,14 @@
|
||||
|
||||
A Claude Code plugin for NetBox CMDB integration - query, create, update, and manage your network infrastructure directly from Claude Code.
|
||||
|
||||
## What's New in v1.1.0
|
||||
|
||||
- **Data Quality Validation**: Hooks for SessionStart and PreToolUse that check data quality and warn about missing fields
|
||||
- **Best Practices Skill**: Reference documentation for NetBox patterns (naming conventions, dependency order, role management)
|
||||
- **`/cmdb-audit`**: Analyze data quality across VMs, devices, naming conventions, and roles
|
||||
- **`/cmdb-register`**: Register the current machine into NetBox with all running applications (Docker containers, systemd services)
|
||||
- **`/cmdb-sync`**: Synchronize existing machine state with NetBox (detect drift, update with confirmation)
|
||||
|
||||
## Features
|
||||
|
||||
- **Full CRUD Operations**: Create, read, update, and delete across all NetBox modules
|
||||
@@ -9,6 +17,9 @@ A Claude Code plugin for NetBox CMDB integration - query, create, update, and ma
|
||||
- **IP Management**: Allocate IPs, manage prefixes, track VLANs
|
||||
- **Infrastructure Documentation**: Document servers, network devices, and connections
|
||||
- **Audit Trail**: Review changes and maintain infrastructure history
|
||||
- **Data Quality Validation**: Proactive checks for missing site, tenant, platform assignments
|
||||
- **Machine Registration**: Auto-discover and register servers with running applications
|
||||
- **Drift Detection**: Sync machine state and detect changes over time
|
||||
|
||||
## Installation
|
||||
|
||||
@@ -40,10 +51,14 @@ Add to your Claude Code plugins or marketplace configuration.
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `/initial-setup` | Interactive setup wizard for NetBox MCP server |
|
||||
| `/cmdb-search <query>` | Search for devices, IPs, sites, or any CMDB object |
|
||||
| `/cmdb-device <action>` | Manage network devices (list, create, update, delete) |
|
||||
| `/cmdb-ip <action>` | Manage IP addresses and prefixes |
|
||||
| `/cmdb-site <action>` | Manage sites and locations |
|
||||
| `/cmdb-audit [scope]` | Data quality analysis (all, vms, devices, naming, roles) |
|
||||
| `/cmdb-register` | Register current machine into NetBox with running apps |
|
||||
| `/cmdb-sync` | Sync machine state with NetBox (detect drift, update) |
|
||||
|
||||
## Agent
|
||||
|
||||
@@ -103,6 +118,15 @@ This plugin provides access to the full NetBox API:
|
||||
- **Wireless**: WLANs, Wireless Links
|
||||
- **Extras**: Tags, Custom Fields, Journal Entries, Audit Log
|
||||
|
||||
## Hooks
|
||||
|
||||
| Event | Purpose |
|
||||
|-------|---------|
|
||||
| `SessionStart` | Test NetBox connectivity, report data quality issues |
|
||||
| `PreToolUse` | Validate VM/device parameters before create/update |
|
||||
|
||||
Hooks are **non-blocking** - they emit warnings but never prevent operations.
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
@@ -115,13 +139,23 @@ cmdb-assistant/
|
||||
│ ├── cmdb-search.md # Search command
|
||||
│ ├── cmdb-device.md # Device management
|
||||
│ ├── cmdb-ip.md # IP management
|
||||
│ └── cmdb-site.md # Site management
|
||||
│ ├── cmdb-site.md # Site management
|
||||
│ ├── cmdb-audit.md # Data quality audit (NEW)
|
||||
│ ├── cmdb-register.md # Machine registration (NEW)
|
||||
│ └── cmdb-sync.md # Machine sync (NEW)
|
||||
├── hooks/
|
||||
│ ├── hooks.json # Hook configuration
|
||||
│ ├── startup-check.sh # SessionStart validation
|
||||
│ └── validate-input.sh # PreToolUse validation
|
||||
├── skills/
|
||||
│ └── netbox-patterns/
|
||||
│ └── SKILL.md # NetBox best practices reference
|
||||
├── agents/
|
||||
│ └── cmdb-assistant.md # Main assistant agent
|
||||
└── README.md
|
||||
```
|
||||
|
||||
The plugin uses the shared NetBox MCP server at `../mcp-servers/netbox/`.
|
||||
The plugin uses the shared NetBox MCP server at `mcp-servers/netbox/`.
|
||||
|
||||
## Configuration
|
||||
|
||||
|
||||
@@ -76,3 +76,132 @@ When presenting data:
|
||||
- Suggest corrective actions
|
||||
- For permission errors, note what access is needed
|
||||
- For validation errors, explain required fields/formats
|
||||
|
||||
## Data Quality Validation
|
||||
|
||||
**IMPORTANT:** Load the `netbox-patterns` skill for best practice reference.
|
||||
|
||||
Before ANY create or update operation, validate against NetBox best practices:
|
||||
|
||||
### VM Operations
|
||||
|
||||
**Required checks before `virt_create_vm` or `virt_update_vm`:**
|
||||
|
||||
1. **Cluster/Site Assignment** - VMs must have either cluster or site
|
||||
2. **Tenant Assignment** - Recommend if not provided
|
||||
3. **Platform Assignment** - Recommend for OS tracking
|
||||
4. **Naming Convention** - Check against `{env}-{app}-{number}` pattern
|
||||
5. **Role Assignment** - Recommend appropriate role
|
||||
|
||||
**If user provides no site/tenant, ASK:**
|
||||
|
||||
> "This VM has no site or tenant assigned. NetBox best practices recommend:
|
||||
> - **Site**: For location-based queries and power budgeting
|
||||
> - **Tenant**: For resource isolation and ownership tracking
|
||||
>
|
||||
> Would you like me to:
|
||||
> 1. Assign to an existing site/tenant (list available)
|
||||
> 2. Create new site/tenant first
|
||||
> 3. Proceed without (not recommended for production use)"
|
||||
|
||||
### Device Operations
|
||||
|
||||
**Required checks before `dcim_create_device` or `dcim_update_device`:**
|
||||
|
||||
1. **Site is REQUIRED** - Fail without it
|
||||
2. **Platform Assignment** - Recommend for OS tracking
|
||||
3. **Naming Convention** - Check against `{role}-{location}-{number}` pattern
|
||||
4. **Role Assignment** - Ensure appropriate role selected
|
||||
5. **After Creation** - Offer to set primary IP
|
||||
|
||||
### Cluster Operations
|
||||
|
||||
**Required checks before `virt_create_cluster`:**
|
||||
|
||||
1. **Site Scope** - Recommend assigning to site
|
||||
2. **Cluster Type** - Ensure appropriate type selected
|
||||
3. **Device Association** - Recommend linking to host device
|
||||
|
||||
### Role Management
|
||||
|
||||
**Before creating a new device role:**
|
||||
|
||||
1. List existing roles with `dcim_list_device_roles`
|
||||
2. Check if a more general role already exists
|
||||
3. Recommend role consolidation if >10 specific roles exist
|
||||
|
||||
**Example guidance:**
|
||||
|
||||
> "You're creating role 'nginx-web-server'. An existing 'web-server' role exists.
|
||||
> Consider using 'web-server' and tracking nginx via the platform field instead.
|
||||
> This reduces role fragmentation and improves maintainability."
|
||||
|
||||
## Dependency Order Enforcement
|
||||
|
||||
When creating multiple objects, follow this order:
|
||||
|
||||
```
|
||||
1. Regions → Sites → Locations → Racks
|
||||
2. Tenant Groups → Tenants
|
||||
3. Manufacturers → Device Types
|
||||
4. Device Roles, Platforms
|
||||
5. Devices (with site, role, type)
|
||||
6. Clusters (with type, optional site)
|
||||
7. VMs (with cluster)
|
||||
8. Interfaces → IP Addresses → Primary IP assignment
|
||||
```
|
||||
|
||||
**CRITICAL Rules:**
|
||||
- NEVER create a VM before its cluster exists
|
||||
- NEVER create a device before its site exists
|
||||
- NEVER create an interface before its device exists
|
||||
- NEVER create an IP before its interface exists (if assigning)
|
||||
|
||||
## Naming Convention Enforcement
|
||||
|
||||
When user provides a name, check against patterns:
|
||||
|
||||
| Object Type | Pattern | Example |
|
||||
|-------------|---------|---------|
|
||||
| Device | `{role}-{site}-{number}` | `web-dc1-01` |
|
||||
| VM | `{env}-{app}-{number}` or `{prefix}_{service}` | `prod-api-01` |
|
||||
| Cluster | `{site}-{type}` | `dc1-vmware`, `home-docker` |
|
||||
| Prefix | Include purpose in description | "Production /24 for web tier" |
|
||||
|
||||
**If name doesn't match patterns, warn:**
|
||||
|
||||
> "The name 'HotServ' doesn't follow naming conventions.
|
||||
> Suggested: `prod-hotserv-01` or `hotserv-cloud-01`.
|
||||
> Consistent naming improves searchability and automation compatibility.
|
||||
> Proceed with original name? [Y/n]"
|
||||
|
||||
## Duplicate Prevention
|
||||
|
||||
Before creating objects, always check for existing duplicates:
|
||||
|
||||
```
|
||||
# Before creating device
|
||||
dcim_list_devices name=<proposed-name>
|
||||
|
||||
# Before creating VM
|
||||
virt_list_vms name=<proposed-name>
|
||||
|
||||
# Before creating prefix
|
||||
ipam_list_prefixes prefix=<proposed-prefix>
|
||||
```
|
||||
|
||||
If duplicate found, inform user and suggest update instead of create.
|
||||
|
||||
## Available Commands
|
||||
|
||||
Users can invoke these commands for structured workflows:
|
||||
|
||||
| Command | Purpose |
|
||||
|---------|---------|
|
||||
| `/cmdb-search <query>` | Search across all CMDB objects |
|
||||
| `/cmdb-device <action>` | Device CRUD operations |
|
||||
| `/cmdb-ip <action>` | IP address and prefix management |
|
||||
| `/cmdb-site <action>` | Site and location management |
|
||||
| `/cmdb-audit [scope]` | Data quality analysis |
|
||||
| `/cmdb-register` | Register current machine |
|
||||
| `/cmdb-sync` | Sync machine state with NetBox |
|
||||
|
||||
195
plugins/cmdb-assistant/commands/cmdb-audit.md
Normal file
195
plugins/cmdb-assistant/commands/cmdb-audit.md
Normal file
@@ -0,0 +1,195 @@
|
||||
---
|
||||
description: Audit NetBox data quality and identify consistency issues
|
||||
---
|
||||
|
||||
# CMDB Data Quality Audit
|
||||
|
||||
Analyze NetBox data for quality issues and best practice violations.
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
/cmdb-audit [scope]
|
||||
```
|
||||
|
||||
**Scopes:**
|
||||
- `all` (default) - Full audit across all categories
|
||||
- `vms` - Virtual machines only
|
||||
- `devices` - Physical devices only
|
||||
- `naming` - Naming convention analysis
|
||||
- `roles` - Role fragmentation analysis
|
||||
|
||||
## Instructions
|
||||
|
||||
You are a data quality auditor for NetBox. Your job is to identify consistency issues and best practice violations.
|
||||
|
||||
**IMPORTANT:** Load the `netbox-patterns` skill for best practice reference.
|
||||
|
||||
### Phase 1: Data Collection
|
||||
|
||||
Run these MCP tool calls to gather data for analysis:
|
||||
|
||||
```
|
||||
1. virt_list_vms (no filters - get all)
|
||||
2. dcim_list_devices (no filters - get all)
|
||||
3. virt_list_clusters (no filters)
|
||||
4. dcim_list_sites
|
||||
5. tenancy_list_tenants
|
||||
6. dcim_list_device_roles
|
||||
7. dcim_list_platforms
|
||||
```
|
||||
|
||||
Store the results for analysis.
|
||||
|
||||
### Phase 2: Quality Checks
|
||||
|
||||
Analyze collected data for these issues by severity:
|
||||
|
||||
#### CRITICAL Issues (must fix immediately)
|
||||
|
||||
| Check | Detection |
|
||||
|-------|-----------|
|
||||
| VMs without cluster | `cluster` field is null AND `site` field is null |
|
||||
| Devices without site | `site` field is null |
|
||||
| Active devices without primary IP | `status=active` AND `primary_ip4` is null AND `primary_ip6` is null |
|
||||
|
||||
#### HIGH Issues (should fix soon)
|
||||
|
||||
| Check | Detection |
|
||||
|-------|-----------|
|
||||
| VMs without site | VM has no site (neither direct nor via cluster.site) |
|
||||
| VMs without tenant | `tenant` field is null |
|
||||
| Devices without platform | `platform` field is null |
|
||||
| Clusters not scoped to site | `site` field is null on cluster |
|
||||
| VMs without role | `role` field is null |
|
||||
|
||||
#### MEDIUM Issues (plan to address)
|
||||
|
||||
| Check | Detection |
|
||||
|-------|-----------|
|
||||
| Inconsistent naming | Names don't match patterns: devices=`{role}-{site}-{num}`, VMs=`{env}-{app}-{num}` |
|
||||
| Role fragmentation | More than 10 device roles with <3 assignments each |
|
||||
| Missing tags on production | Active resources without any tags |
|
||||
| Mixed naming separators | Some names use `_`, others use `-` |
|
||||
|
||||
#### LOW Issues (informational)
|
||||
|
||||
| Check | Detection |
|
||||
|-------|-----------|
|
||||
| Docker containers as VMs | Cluster type is "Docker Compose" - document this modeling choice |
|
||||
| VMs without description | `description` field is empty |
|
||||
| Sites without physical address | `physical_address` is empty |
|
||||
| Devices without serial | `serial` field is empty |
|
||||
|
||||
### Phase 3: Naming Convention Analysis
|
||||
|
||||
For naming scope, analyze patterns:
|
||||
|
||||
1. **Extract naming patterns** from existing objects
|
||||
2. **Identify dominant patterns** (most common conventions)
|
||||
3. **Flag outliers** that don't match dominant patterns
|
||||
4. **Suggest standardization** based on best practices
|
||||
|
||||
**Expected Patterns:**
|
||||
- Devices: `{role}-{location}-{number}` (e.g., `web-dc1-01`)
|
||||
- VMs: `{prefix}_{service}` or `{env}-{app}-{number}` (e.g., `prod-api-01`)
|
||||
- Clusters: `{site}-{type}` (e.g., `home-docker`)
|
||||
|
||||
### Phase 4: Role Analysis
|
||||
|
||||
For roles scope, analyze fragmentation:
|
||||
|
||||
1. **List all device roles** with assignment counts
|
||||
2. **Identify single-use roles** (only 1 device/VM)
|
||||
3. **Identify similar roles** that could be consolidated
|
||||
4. **Suggest consolidation** based on patterns
|
||||
|
||||
**Red Flags:**
|
||||
- More than 15 highly specific roles
|
||||
- Roles with technology in name (use platform instead)
|
||||
- Roles that duplicate functionality
|
||||
|
||||
### Phase 5: Report Generation
|
||||
|
||||
Present findings in this structure:
|
||||
|
||||
```markdown
|
||||
## CMDB Data Quality Audit Report
|
||||
|
||||
**Generated:** [timestamp]
|
||||
**Scope:** [scope parameter]
|
||||
|
||||
### Summary
|
||||
|
||||
| Metric | Count |
|
||||
|--------|-------|
|
||||
| Total VMs | X |
|
||||
| Total Devices | Y |
|
||||
| Total Clusters | Z |
|
||||
| **Total Issues** | **N** |
|
||||
|
||||
| Severity | Count |
|
||||
|----------|-------|
|
||||
| Critical | A |
|
||||
| High | B |
|
||||
| Medium | C |
|
||||
| Low | D |
|
||||
|
||||
### Critical Issues
|
||||
|
||||
[List each with specific object names and IDs]
|
||||
|
||||
**Example:**
|
||||
- VM `HotServ` (ID: 1) - No cluster or site assignment
|
||||
- Device `server-01` (ID: 5) - No site assignment
|
||||
|
||||
### High Issues
|
||||
|
||||
[List each with specific object names]
|
||||
|
||||
### Medium Issues
|
||||
|
||||
[Grouped by category with counts]
|
||||
|
||||
### Recommendations
|
||||
|
||||
1. **[Most impactful fix]** - affects N objects
|
||||
2. **[Second priority]** - affects M objects
|
||||
...
|
||||
|
||||
### Quick Fixes
|
||||
|
||||
Commands to fix common issues:
|
||||
|
||||
```
|
||||
# Assign site to VM
|
||||
virt_update_vm id=X site=Y
|
||||
|
||||
# Assign platform to device
|
||||
dcim_update_device id=X platform=Y
|
||||
```
|
||||
|
||||
### Next Steps
|
||||
|
||||
- Run `/cmdb-register` to properly register new machines
|
||||
- Use `/cmdb-sync` to update existing registrations
|
||||
- Consider bulk updates via NetBox web UI for >10 items
|
||||
```
|
||||
|
||||
## Scope-Specific Instructions
|
||||
|
||||
### For `vms` scope:
|
||||
Focus only on Virtual Machine checks. Skip device and role analysis.
|
||||
|
||||
### For `devices` scope:
|
||||
Focus only on Device checks. Skip VM and cluster analysis.
|
||||
|
||||
### For `naming` scope:
|
||||
Focus on naming convention analysis across all objects. Generate detailed pattern report.
|
||||
|
||||
### For `roles` scope:
|
||||
Focus on role fragmentation analysis. Generate consolidation recommendations.
|
||||
|
||||
## User Request
|
||||
|
||||
$ARGUMENTS
|
||||
322
plugins/cmdb-assistant/commands/cmdb-register.md
Normal file
322
plugins/cmdb-assistant/commands/cmdb-register.md
Normal file
@@ -0,0 +1,322 @@
|
||||
---
|
||||
description: Register the current machine into NetBox with all running applications
|
||||
---
|
||||
|
||||
# CMDB Machine Registration
|
||||
|
||||
Register the current machine into NetBox, including hardware info, network interfaces, and running applications (Docker containers, services).
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
/cmdb-register [--site <site-name>] [--tenant <tenant-name>] [--role <role-name>]
|
||||
```
|
||||
|
||||
**Options:**
|
||||
- `--site <name>`: Site to assign (will prompt if not provided)
|
||||
- `--tenant <name>`: Tenant for resource isolation (optional)
|
||||
- `--role <name>`: Device role (default: auto-detect based on services)
|
||||
|
||||
## Instructions
|
||||
|
||||
You are registering the current machine into NetBox. This is a multi-phase process that discovers local system information and creates corresponding NetBox objects.
|
||||
|
||||
**IMPORTANT:** Load the `netbox-patterns` skill for best practice reference.
|
||||
|
||||
### Phase 1: System Discovery (via Bash)
|
||||
|
||||
Gather system information using these commands:
|
||||
|
||||
#### 1.1 Basic Device Info
|
||||
|
||||
```bash
|
||||
# Hostname
|
||||
hostname
|
||||
|
||||
# OS/Platform info
|
||||
cat /etc/os-release 2>/dev/null || uname -a
|
||||
|
||||
# Hardware model (varies by system)
|
||||
# Raspberry Pi:
|
||||
cat /proc/device-tree/model 2>/dev/null || echo "Unknown"
|
||||
|
||||
# x86 systems:
|
||||
cat /sys/class/dmi/id/product_name 2>/dev/null || echo "Unknown"
|
||||
|
||||
# Serial number
|
||||
# Raspberry Pi:
|
||||
cat /proc/device-tree/serial-number 2>/dev/null || cat /proc/cpuinfo | grep Serial | cut -d: -f2 | tr -d ' ' 2>/dev/null
|
||||
|
||||
# x86 systems:
|
||||
cat /sys/class/dmi/id/product_serial 2>/dev/null || echo "Unknown"
|
||||
|
||||
# CPU info
|
||||
nproc
|
||||
|
||||
# Memory (MB)
|
||||
free -m | awk '/Mem:/ {print $2}'
|
||||
|
||||
# Disk (GB, root filesystem)
|
||||
df -BG / | awk 'NR==2 {print $2}' | tr -d 'G'
|
||||
```
|
||||
|
||||
#### 1.2 Network Interfaces
|
||||
|
||||
```bash
|
||||
# Get interfaces with IPs (JSON format)
|
||||
ip -j addr show 2>/dev/null || ip addr show
|
||||
|
||||
# Get default gateway interface
|
||||
ip route | grep default | awk '{print $5}' | head -1
|
||||
|
||||
# Get MAC addresses
|
||||
ip -j link show 2>/dev/null || ip link show
|
||||
```
|
||||
|
||||
#### 1.3 Running Applications
|
||||
|
||||
```bash
|
||||
# Docker containers (if docker available)
|
||||
docker ps --format '{"name":"{{.Names}}","image":"{{.Image}}","status":"{{.Status}}","ports":"{{.Ports}}"}' 2>/dev/null || echo "Docker not available"
|
||||
|
||||
# Docker Compose projects (check common locations)
|
||||
find ~/apps /home/*/apps -name "docker-compose.yml" -o -name "docker-compose.yaml" 2>/dev/null | head -20
|
||||
|
||||
# Systemd services (running)
|
||||
systemctl list-units --type=service --state=running --no-pager --plain 2>/dev/null | grep -v "^UNIT" | head -30
|
||||
```
|
||||
|
||||
### Phase 2: Pre-Registration Checks (via MCP)
|
||||
|
||||
Before creating objects, verify prerequisites:
|
||||
|
||||
#### 2.1 Check if Device Already Exists
|
||||
|
||||
```
|
||||
dcim_list_devices name=<hostname>
|
||||
```
|
||||
|
||||
**If device exists:**
|
||||
- Inform user and suggest `/cmdb-sync` instead
|
||||
- Ask if they want to proceed with re-registration (will update existing)
|
||||
|
||||
#### 2.2 Verify/Create Site
|
||||
|
||||
If `--site` provided:
|
||||
```
|
||||
dcim_list_sites name=<site-name>
|
||||
```
|
||||
|
||||
If site doesn't exist, ask user if they want to create it.
|
||||
|
||||
If no site provided, list available sites and ask user to choose:
|
||||
```
|
||||
dcim_list_sites
|
||||
```
|
||||
|
||||
#### 2.3 Verify/Create Platform
|
||||
|
||||
Based on OS detected, check if platform exists:
|
||||
```
|
||||
dcim_list_platforms name=<platform-name>
|
||||
```
|
||||
|
||||
**Platform naming:**
|
||||
- `Raspberry Pi OS (Bookworm)` for Raspberry Pi
|
||||
- `Ubuntu 24.04 LTS` for Ubuntu
|
||||
- `Debian 12` for Debian
|
||||
- Use format: `{OS Name} {Version}`
|
||||
|
||||
If platform doesn't exist, create it:
|
||||
```
|
||||
dcim_create_platform name=<platform-name> slug=<slug>
|
||||
```
|
||||
|
||||
#### 2.4 Verify/Create Device Role
|
||||
|
||||
Based on detected services:
|
||||
- If Docker containers found → `Docker Host`
|
||||
- If only basic services → `Server`
|
||||
- If specific role specified → Use that
|
||||
|
||||
```
|
||||
dcim_list_device_roles name=<role-name>
|
||||
```
|
||||
|
||||
### Phase 3: Device Registration (via MCP)
|
||||
|
||||
#### 3.1 Get/Create Manufacturer and Device Type
|
||||
|
||||
For Raspberry Pi:
|
||||
```
|
||||
dcim_list_manufacturers name="Raspberry Pi Foundation"
|
||||
dcim_list_device_types manufacturer_id=X model="Raspberry Pi 4 Model B"
|
||||
```
|
||||
|
||||
Create if not exists.
|
||||
|
||||
For generic x86:
|
||||
```
|
||||
dcim_list_manufacturers name=<detected-manufacturer>
|
||||
```
|
||||
|
||||
#### 3.2 Create Device
|
||||
|
||||
```
|
||||
dcim_create_device
|
||||
name=<hostname>
|
||||
device_type=<device_type_id>
|
||||
role=<role_id>
|
||||
site=<site_id>
|
||||
platform=<platform_id>
|
||||
tenant=<tenant_id> # if provided
|
||||
serial=<serial>
|
||||
description="Registered via cmdb-assistant"
|
||||
```
|
||||
|
||||
#### 3.3 Create Interfaces
|
||||
|
||||
For each network interface discovered:
|
||||
```
|
||||
dcim_create_interface
|
||||
device=<device_id>
|
||||
name=<interface_name> # eth0, wlan0, tailscale0, etc.
|
||||
type=<type> # 1000base-t, virtual, other
|
||||
mac_address=<mac>
|
||||
enabled=true
|
||||
```
|
||||
|
||||
**Interface type mapping:**
|
||||
- `eth*`, `enp*` → `1000base-t`
|
||||
- `wlan*` → `ieee802.11ax` (or appropriate wifi type)
|
||||
- `tailscale*`, `docker*`, `br-*` → `virtual`
|
||||
- `lo` → skip (loopback)
|
||||
|
||||
#### 3.4 Create IP Addresses
|
||||
|
||||
For each IP on each interface:
|
||||
```
|
||||
ipam_create_ip_address
|
||||
address=<ip/prefix> # e.g., "192.168.1.100/24"
|
||||
assigned_object_type="dcim.interface"
|
||||
assigned_object_id=<interface_id>
|
||||
status="active"
|
||||
description="Discovered via cmdb-register"
|
||||
```
|
||||
|
||||
#### 3.5 Set Primary IP
|
||||
|
||||
Identify primary IP (interface with default route):
|
||||
```
|
||||
dcim_update_device
|
||||
id=<device_id>
|
||||
primary_ip4=<primary_ip_id>
|
||||
```
|
||||
|
||||
### Phase 4: Container Registration (via MCP)
|
||||
|
||||
If Docker containers were discovered:
|
||||
|
||||
#### 4.1 Create/Get Cluster Type
|
||||
|
||||
```
|
||||
virt_list_cluster_types name="Docker Compose"
|
||||
```
|
||||
|
||||
Create if not exists:
|
||||
```
|
||||
virt_create_cluster_type name="Docker Compose" slug="docker-compose"
|
||||
```
|
||||
|
||||
#### 4.2 Create Cluster
|
||||
|
||||
For each Docker Compose project directory found:
|
||||
```
|
||||
virt_create_cluster
|
||||
name=<project-name> # e.g., "apps-hotport"
|
||||
type=<cluster_type_id>
|
||||
site=<site_id>
|
||||
description="Docker Compose stack on <hostname>"
|
||||
```
|
||||
|
||||
#### 4.3 Create VMs for Containers
|
||||
|
||||
For each running container:
|
||||
```
|
||||
virt_create_vm
|
||||
name=<container_name>
|
||||
cluster=<cluster_id>
|
||||
site=<site_id>
|
||||
role=<role_id> # Map container function to role
|
||||
status="active"
|
||||
vcpus=<cpu_shares> # Default 1.0 if unknown
|
||||
memory=<memory_mb> # Default 256 if unknown
|
||||
disk=<disk_gb> # Default 5 if unknown
|
||||
description=<container purpose>
|
||||
comments=<image, ports, volumes info>
|
||||
```
|
||||
|
||||
**Container role mapping:**
|
||||
- `*caddy*`, `*nginx*`, `*traefik*` → "Reverse Proxy"
|
||||
- `*db*`, `*postgres*`, `*mysql*`, `*redis*` → "Database"
|
||||
- `*webui*`, `*frontend*` → "Web Application"
|
||||
- Others → Infer from image name or use generic "Container"
|
||||
|
||||
### Phase 5: Documentation
|
||||
|
||||
#### 5.1 Add Journal Entry
|
||||
|
||||
```
|
||||
extras_create_journal_entry
|
||||
assigned_object_type="dcim.device"
|
||||
assigned_object_id=<device_id>
|
||||
comments="Device registered via /cmdb-register command\n\nDiscovered:\n- X network interfaces\n- Y IP addresses\n- Z Docker containers"
|
||||
```
|
||||
|
||||
### Phase 6: Summary Report
|
||||
|
||||
Present registration summary:
|
||||
|
||||
```markdown
|
||||
## Machine Registration Complete
|
||||
|
||||
### Device Created
|
||||
- **Name:** <hostname>
|
||||
- **Site:** <site>
|
||||
- **Platform:** <platform>
|
||||
- **Role:** <role>
|
||||
- **ID:** <device_id>
|
||||
- **URL:** https://netbox.example.com/dcim/devices/<id>/
|
||||
|
||||
### Network Interfaces
|
||||
| Interface | Type | MAC | IP Address |
|
||||
|-----------|------|-----|------------|
|
||||
| eth0 | 1000base-t | aa:bb:cc:dd:ee:ff | 192.168.1.100/24 |
|
||||
| tailscale0 | virtual | - | 100.x.x.x/32 |
|
||||
|
||||
### Primary IP: 192.168.1.100
|
||||
|
||||
### Docker Containers Registered (if applicable)
|
||||
**Cluster:** <cluster_name> (ID: <cluster_id>)
|
||||
|
||||
| Container | Role | vCPUs | Memory | Status |
|
||||
|-----------|------|-------|--------|--------|
|
||||
| media_jellyfin | Media Server | 2.0 | 2048MB | Active |
|
||||
| media_sonarr | Media Management | 1.0 | 512MB | Active |
|
||||
|
||||
### Next Steps
|
||||
- Run `/cmdb-sync` periodically to keep data current
|
||||
- Run `/cmdb-audit` to check data quality
|
||||
- Add tags for classification (env:*, team:*, etc.)
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
- **Device already exists:** Suggest `/cmdb-sync` or ask to proceed
|
||||
- **Site not found:** List available sites, offer to create new
|
||||
- **Docker not available:** Skip container registration, note in summary
|
||||
- **Permission denied:** Note which operations failed, suggest fixes
|
||||
|
||||
## User Request
|
||||
|
||||
$ARGUMENTS
|
||||
336
plugins/cmdb-assistant/commands/cmdb-sync.md
Normal file
336
plugins/cmdb-assistant/commands/cmdb-sync.md
Normal file
@@ -0,0 +1,336 @@
|
||||
---
|
||||
description: Synchronize current machine state with existing NetBox record
|
||||
---
|
||||
|
||||
# CMDB Machine Sync
|
||||
|
||||
Update an existing NetBox device record with the current machine state. Compares local system information with NetBox and applies changes.
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
/cmdb-sync [--full] [--dry-run]
|
||||
```
|
||||
|
||||
**Options:**
|
||||
- `--full`: Force refresh all fields, even unchanged ones
|
||||
- `--dry-run`: Show what would change without applying updates
|
||||
|
||||
## Instructions
|
||||
|
||||
You are synchronizing the current machine's state with its NetBox record. This involves comparing current system state with stored data and updating differences.
|
||||
|
||||
**IMPORTANT:** Load the `netbox-patterns` skill for best practice reference.
|
||||
|
||||
### Phase 1: Device Lookup (via MCP)
|
||||
|
||||
First, find the existing device record:
|
||||
|
||||
```bash
|
||||
# Get current hostname
|
||||
hostname
|
||||
```
|
||||
|
||||
```
|
||||
dcim_list_devices name=<hostname>
|
||||
```
|
||||
|
||||
**If device not found:**
|
||||
- Inform user: "Device '<hostname>' not found in NetBox"
|
||||
- Suggest: "Run `/cmdb-register` to register this machine first"
|
||||
- Exit sync
|
||||
|
||||
**If device found:**
|
||||
- Store device ID and all current field values
|
||||
- Fetch interfaces: `dcim_list_interfaces device_id=<device_id>`
|
||||
- Fetch IPs: `ipam_list_ip_addresses device_id=<device_id>`
|
||||
|
||||
Also check for associated clusters/VMs:
|
||||
```
|
||||
virt_list_clusters # Look for cluster associated with this device
|
||||
virt_list_vms cluster=<cluster_id> # If cluster found
|
||||
```
|
||||
|
||||
### Phase 2: Current State Discovery (via Bash)
|
||||
|
||||
Gather current system information (same as `/cmdb-register`):
|
||||
|
||||
```bash
|
||||
# Device info
|
||||
hostname
|
||||
cat /etc/os-release 2>/dev/null || uname -a
|
||||
nproc
|
||||
free -m | awk '/Mem:/ {print $2}'
|
||||
df -BG / | awk 'NR==2 {print $2}' | tr -d 'G'
|
||||
|
||||
# Network interfaces with IPs
|
||||
ip -j addr show 2>/dev/null || ip addr show
|
||||
|
||||
# Docker containers
|
||||
docker ps --format '{"name":"{{.Names}}","image":"{{.Image}}","status":"{{.Status}}"}' 2>/dev/null || echo "[]"
|
||||
```
|
||||
|
||||
### Phase 3: Comparison
|
||||
|
||||
Compare discovered state with NetBox record:
|
||||
|
||||
#### 3.1 Device Attributes
|
||||
|
||||
| Field | Compare |
|
||||
|-------|---------|
|
||||
| Platform | OS version changed? |
|
||||
| Status | Still active? |
|
||||
| Serial | Match? |
|
||||
| Description | Keep existing |
|
||||
|
||||
#### 3.2 Network Interfaces
|
||||
|
||||
| Change Type | Detection |
|
||||
|-------------|-----------|
|
||||
| New interface | Interface exists locally but not in NetBox |
|
||||
| Removed interface | Interface in NetBox but not locally |
|
||||
| Changed MAC | MAC address different |
|
||||
| Interface type | Type mismatch |
|
||||
|
||||
#### 3.3 IP Addresses
|
||||
|
||||
| Change Type | Detection |
|
||||
|-------------|-----------|
|
||||
| New IP | IP exists locally but not in NetBox |
|
||||
| Removed IP | IP in NetBox but not locally (on this device) |
|
||||
| Primary IP changed | Default route interface changed |
|
||||
|
||||
#### 3.4 Docker Containers
|
||||
|
||||
| Change Type | Detection |
|
||||
|-------------|-----------|
|
||||
| New container | Container running locally but no VM in cluster |
|
||||
| Stopped container | VM exists but container not running |
|
||||
| Resource change | vCPUs/memory different (if trackable) |
|
||||
|
||||
### Phase 4: Diff Report
|
||||
|
||||
Present changes to user:
|
||||
|
||||
```markdown
|
||||
## Sync Diff Report
|
||||
|
||||
**Device:** <hostname> (ID: <device_id>)
|
||||
**NetBox URL:** https://netbox.example.com/dcim/devices/<id>/
|
||||
|
||||
### Device Attributes
|
||||
| Field | NetBox Value | Current Value | Action |
|
||||
|-------|--------------|---------------|--------|
|
||||
| Platform | Ubuntu 22.04 | Ubuntu 24.04 | UPDATE |
|
||||
| Status | active | active | - |
|
||||
|
||||
### Network Interfaces
|
||||
|
||||
#### New Interfaces (will create)
|
||||
| Interface | Type | MAC | IPs |
|
||||
|-----------|------|-----|-----|
|
||||
| tailscale0 | virtual | - | 100.x.x.x/32 |
|
||||
|
||||
#### Removed Interfaces (will mark offline)
|
||||
| Interface | Type | Reason |
|
||||
|-----------|------|--------|
|
||||
| eth1 | 1000base-t | Not found locally |
|
||||
|
||||
#### Changed Interfaces
|
||||
| Interface | Field | Old | New |
|
||||
|-----------|-------|-----|-----|
|
||||
| eth0 | mac_address | aa:bb:cc:00:00:00 | aa:bb:cc:11:11:11 |
|
||||
|
||||
### IP Addresses
|
||||
|
||||
#### New IPs (will create)
|
||||
- 192.168.1.150/24 on eth0
|
||||
|
||||
#### Removed IPs (will unassign)
|
||||
- 192.168.1.100/24 from eth0
|
||||
|
||||
### Docker Containers
|
||||
|
||||
#### New Containers (will create VMs)
|
||||
| Container | Image | Role |
|
||||
|-----------|-------|------|
|
||||
| media_lidarr | linuxserver/lidarr | Media Management |
|
||||
|
||||
#### Stopped Containers (will mark offline)
|
||||
| Container | Last Status |
|
||||
|-----------|-------------|
|
||||
| media_bazarr | Exited |
|
||||
|
||||
### Summary
|
||||
- **Updates:** X
|
||||
- **Creates:** Y
|
||||
- **Removals/Offline:** Z
|
||||
```
|
||||
|
||||
### Phase 5: User Confirmation
|
||||
|
||||
If not `--dry-run`:
|
||||
|
||||
```
|
||||
The following changes will be applied:
|
||||
- Update device platform to "Ubuntu 24.04"
|
||||
- Create interface "tailscale0"
|
||||
- Create IP "100.x.x.x/32" on tailscale0
|
||||
- Create VM "media_lidarr" in cluster
|
||||
- Mark VM "media_bazarr" as offline
|
||||
|
||||
Proceed with sync? [Y/n]
|
||||
```
|
||||
|
||||
**Use AskUserQuestion** to get confirmation.
|
||||
|
||||
### Phase 6: Apply Updates (via MCP)
|
||||
|
||||
Only if user confirms (or `--full` specified):
|
||||
|
||||
#### 6.1 Device Updates
|
||||
|
||||
```
|
||||
dcim_update_device
|
||||
id=<device_id>
|
||||
platform=<new_platform_id>
|
||||
# ... other changed fields
|
||||
```
|
||||
|
||||
#### 6.2 Interface Updates
|
||||
|
||||
**For new interfaces:**
|
||||
```
|
||||
dcim_create_interface
|
||||
device=<device_id>
|
||||
name=<interface_name>
|
||||
type=<type>
|
||||
mac_address=<mac>
|
||||
enabled=true
|
||||
```
|
||||
|
||||
**For removed interfaces:**
|
||||
```
|
||||
dcim_update_interface
|
||||
id=<interface_id>
|
||||
enabled=false
|
||||
description="Marked offline by cmdb-sync - interface no longer present"
|
||||
```
|
||||
|
||||
**For changed interfaces:**
|
||||
```
|
||||
dcim_update_interface
|
||||
id=<interface_id>
|
||||
mac_address=<new_mac>
|
||||
```
|
||||
|
||||
#### 6.3 IP Address Updates
|
||||
|
||||
**For new IPs:**
|
||||
```
|
||||
ipam_create_ip_address
|
||||
address=<ip/prefix>
|
||||
assigned_object_type="dcim.interface"
|
||||
assigned_object_id=<interface_id>
|
||||
status="active"
|
||||
```
|
||||
|
||||
**For removed IPs:**
|
||||
```
|
||||
ipam_update_ip_address
|
||||
id=<ip_id>
|
||||
assigned_object_type=null
|
||||
assigned_object_id=null
|
||||
description="Unassigned by cmdb-sync"
|
||||
```
|
||||
|
||||
#### 6.4 Primary IP Update
|
||||
|
||||
If primary IP changed:
|
||||
```
|
||||
dcim_update_device
|
||||
id=<device_id>
|
||||
primary_ip4=<new_primary_ip_id>
|
||||
```
|
||||
|
||||
#### 6.5 Container/VM Updates
|
||||
|
||||
**For new containers:**
|
||||
```
|
||||
virt_create_vm
|
||||
name=<container_name>
|
||||
cluster=<cluster_id>
|
||||
status="active"
|
||||
# ... other fields
|
||||
```
|
||||
|
||||
**For stopped containers:**
|
||||
```
|
||||
virt_update_vm
|
||||
id=<vm_id>
|
||||
status="offline"
|
||||
description="Container stopped - detected by cmdb-sync"
|
||||
```
|
||||
|
||||
### Phase 7: Journal Entry
|
||||
|
||||
Document the sync:
|
||||
|
||||
```
|
||||
extras_create_journal_entry
|
||||
assigned_object_type="dcim.device"
|
||||
assigned_object_id=<device_id>
|
||||
comments="Device synced via /cmdb-sync command\n\nChanges applied:\n- <list of changes>"
|
||||
```
|
||||
|
||||
### Phase 8: Summary Report
|
||||
|
||||
```markdown
|
||||
## Sync Complete
|
||||
|
||||
**Device:** <hostname>
|
||||
**Sync Time:** <timestamp>
|
||||
|
||||
### Changes Applied
|
||||
- Updated platform: Ubuntu 22.04 → Ubuntu 24.04
|
||||
- Created interface: tailscale0 (ID: X)
|
||||
- Created IP: 100.x.x.x/32 (ID: Y)
|
||||
- Created VM: media_lidarr (ID: Z)
|
||||
- Marked VM offline: media_bazarr (ID: W)
|
||||
|
||||
### Current State
|
||||
- **Interfaces:** 4 (3 active, 1 offline)
|
||||
- **IP Addresses:** 5
|
||||
- **Containers/VMs:** 8 (7 active, 1 offline)
|
||||
|
||||
### Next Sync
|
||||
Run `/cmdb-sync` again after:
|
||||
- Adding/removing Docker containers
|
||||
- Changing network configuration
|
||||
- OS upgrades
|
||||
```
|
||||
|
||||
## Dry Run Mode
|
||||
|
||||
If `--dry-run` specified:
|
||||
- Complete Phase 1-4 (lookup, discovery, compare, diff report)
|
||||
- Skip Phase 5-8 (no confirmation, no updates, no journal)
|
||||
- End with: "Dry run complete. No changes applied. Run without --dry-run to apply."
|
||||
|
||||
## Full Sync Mode
|
||||
|
||||
If `--full` specified:
|
||||
- Skip user confirmation
|
||||
- Update all fields even if unchanged (force refresh)
|
||||
- Useful for ensuring NetBox matches current state exactly
|
||||
|
||||
## Error Handling
|
||||
|
||||
- **Device not found:** Suggest `/cmdb-register`
|
||||
- **Permission denied on updates:** Note which failed, continue with others
|
||||
- **Cluster not found:** Offer to create or skip container sync
|
||||
- **API errors:** Log error, continue with remaining updates
|
||||
|
||||
## User Request
|
||||
|
||||
$ARGUMENTS
|
||||
21
plugins/cmdb-assistant/hooks/hooks.json
Normal file
21
plugins/cmdb-assistant/hooks/hooks.json
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"hooks": {
|
||||
"SessionStart": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/hooks/startup-check.sh"
|
||||
}
|
||||
],
|
||||
"PreToolUse": [
|
||||
{
|
||||
"matcher": "mcp__plugin_cmdb-assistant_netbox__virt_create|mcp__plugin_cmdb-assistant_netbox__virt_update|mcp__plugin_cmdb-assistant_netbox__dcim_create|mcp__plugin_cmdb-assistant_netbox__dcim_update",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/hooks/validate-input.sh"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
66
plugins/cmdb-assistant/hooks/startup-check.sh
Executable file
66
plugins/cmdb-assistant/hooks/startup-check.sh
Executable file
@@ -0,0 +1,66 @@
|
||||
#!/bin/bash
|
||||
# cmdb-assistant SessionStart hook
|
||||
# Tests NetBox API connectivity and checks for data quality issues
|
||||
# All output MUST have [cmdb-assistant] prefix
|
||||
# Non-blocking: always exits 0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
PREFIX="[cmdb-assistant]"
|
||||
|
||||
# Load NetBox configuration
|
||||
NETBOX_CONFIG="$HOME/.config/claude/netbox.env"
|
||||
|
||||
if [[ ! -f "$NETBOX_CONFIG" ]]; then
|
||||
echo "$PREFIX NetBox not configured - run /cmdb-assistant:initial-setup"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Source config
|
||||
source "$NETBOX_CONFIG"
|
||||
|
||||
# Validate required variables
|
||||
if [[ -z "${NETBOX_API_URL:-}" ]] || [[ -z "${NETBOX_API_TOKEN:-}" ]]; then
|
||||
echo "$PREFIX Missing NETBOX_API_URL or NETBOX_API_TOKEN in config"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Quick API connectivity test (5s timeout)
|
||||
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" -m 5 \
|
||||
-H "Authorization: Token $NETBOX_API_TOKEN" \
|
||||
-H "Accept: application/json" \
|
||||
"${NETBOX_API_URL}/" 2>/dev/null || echo "000")
|
||||
|
||||
if [[ "$HTTP_CODE" == "000" ]]; then
|
||||
echo "$PREFIX NetBox API unreachable (timeout/connection error)"
|
||||
exit 0
|
||||
elif [[ "$HTTP_CODE" != "200" ]]; then
|
||||
echo "$PREFIX NetBox API returned HTTP $HTTP_CODE - check credentials"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check for VMs without site assignment (data quality)
|
||||
VMS_RESPONSE=$(curl -s -m 5 \
|
||||
-H "Authorization: Token $NETBOX_API_TOKEN" \
|
||||
-H "Accept: application/json" \
|
||||
"${NETBOX_API_URL}/virtualization/virtual-machines/?site__isnull=true&limit=1" 2>/dev/null || echo '{"count":0}')
|
||||
|
||||
VMS_NO_SITE=$(echo "$VMS_RESPONSE" | grep -o '"count":[0-9]*' | cut -d: -f2 || echo "0")
|
||||
|
||||
if [[ "$VMS_NO_SITE" -gt 0 ]]; then
|
||||
echo "$PREFIX $VMS_NO_SITE VMs without site assignment - run /cmdb-audit for details"
|
||||
fi
|
||||
|
||||
# Check for devices without platform
|
||||
DEVICES_RESPONSE=$(curl -s -m 5 \
|
||||
-H "Authorization: Token $NETBOX_API_TOKEN" \
|
||||
-H "Accept: application/json" \
|
||||
"${NETBOX_API_URL}/dcim/devices/?platform__isnull=true&limit=1" 2>/dev/null || echo '{"count":0}')
|
||||
|
||||
DEVICES_NO_PLATFORM=$(echo "$DEVICES_RESPONSE" | grep -o '"count":[0-9]*' | cut -d: -f2 || echo "0")
|
||||
|
||||
if [[ "$DEVICES_NO_PLATFORM" -gt 0 ]]; then
|
||||
echo "$PREFIX $DEVICES_NO_PLATFORM devices without platform - consider updating"
|
||||
fi
|
||||
|
||||
exit 0
|
||||
79
plugins/cmdb-assistant/hooks/validate-input.sh
Executable file
79
plugins/cmdb-assistant/hooks/validate-input.sh
Executable file
@@ -0,0 +1,79 @@
|
||||
#!/bin/bash
|
||||
# cmdb-assistant PreToolUse validation hook
|
||||
# Validates input parameters for create/update operations
|
||||
# NON-BLOCKING: Warns but allows operation to proceed (always exits 0)
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
PREFIX="[cmdb-assistant]"
|
||||
|
||||
# Read tool input from stdin
|
||||
INPUT=$(cat)
|
||||
|
||||
# Extract tool name from the input
|
||||
# Format varies, try to find tool_name or name field
|
||||
TOOL_NAME=""
|
||||
if echo "$INPUT" | grep -q '"tool_name"'; then
|
||||
TOOL_NAME=$(echo "$INPUT" | grep -o '"tool_name"[[:space:]]*:[[:space:]]*"[^"]*"' | head -1 | sed 's/.*"\([^"]*\)"$/\1/' || true)
|
||||
elif echo "$INPUT" | grep -q '"name"'; then
|
||||
TOOL_NAME=$(echo "$INPUT" | grep -o '"name"[[:space:]]*:[[:space:]]*"[^"]*"' | head -1 | sed 's/.*"\([^"]*\)"$/\1/' || true)
|
||||
fi
|
||||
|
||||
# If we can't determine the tool, exit silently
|
||||
if [[ -z "$TOOL_NAME" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# VM creation/update validation
|
||||
if echo "$TOOL_NAME" | grep -qE "virt_create_vm|virt_create_virtual_machine|virt_update_vm|virt_update_virtual_machine"; then
|
||||
WARNINGS=()
|
||||
|
||||
# Check for missing site
|
||||
if ! echo "$INPUT" | grep -qE '"site"[[:space:]]*:[[:space:]]*[0-9]'; then
|
||||
WARNINGS+=("no site assigned")
|
||||
fi
|
||||
|
||||
# Check for missing tenant
|
||||
if ! echo "$INPUT" | grep -qE '"tenant"[[:space:]]*:[[:space:]]*[0-9]'; then
|
||||
WARNINGS+=("no tenant assigned")
|
||||
fi
|
||||
|
||||
# Check for missing platform
|
||||
if ! echo "$INPUT" | grep -qE '"platform"[[:space:]]*:[[:space:]]*[0-9]'; then
|
||||
WARNINGS+=("no platform assigned")
|
||||
fi
|
||||
|
||||
if [[ ${#WARNINGS[@]} -gt 0 ]]; then
|
||||
echo "$PREFIX VM best practice: $(IFS=', '; echo "${WARNINGS[*]}") - consider assigning for data quality"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Device creation/update validation
|
||||
if echo "$TOOL_NAME" | grep -qE "dcim_create_device|dcim_update_device"; then
|
||||
WARNINGS=()
|
||||
|
||||
# Check for missing platform
|
||||
if ! echo "$INPUT" | grep -qE '"platform"[[:space:]]*:[[:space:]]*[0-9]'; then
|
||||
WARNINGS+=("no platform assigned")
|
||||
fi
|
||||
|
||||
# Check for missing tenant
|
||||
if ! echo "$INPUT" | grep -qE '"tenant"[[:space:]]*:[[:space:]]*[0-9]'; then
|
||||
WARNINGS+=("no tenant assigned")
|
||||
fi
|
||||
|
||||
if [[ ${#WARNINGS[@]} -gt 0 ]]; then
|
||||
echo "$PREFIX Device best practice: $(IFS=', '; echo "${WARNINGS[*]}") - consider assigning"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Cluster creation validation
|
||||
if echo "$TOOL_NAME" | grep -qE "virt_create_cluster"; then
|
||||
# Check for missing site scope
|
||||
if ! echo "$INPUT" | grep -qE '"site"[[:space:]]*:[[:space:]]*[0-9]'; then
|
||||
echo "$PREFIX Cluster best practice: no site scope - clusters should be scoped to a site"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Always allow operation (non-blocking)
|
||||
exit 0
|
||||
249
plugins/cmdb-assistant/skills/netbox-patterns/SKILL.md
Normal file
249
plugins/cmdb-assistant/skills/netbox-patterns/SKILL.md
Normal file
@@ -0,0 +1,249 @@
|
||||
---
|
||||
description: NetBox best practices for data quality and consistency based on official NetBox Labs guidelines
|
||||
---
|
||||
|
||||
# NetBox Best Practices Skill
|
||||
|
||||
Reference documentation for proper NetBox data modeling, following official NetBox Labs guidelines.
|
||||
|
||||
## CRITICAL: Dependency Order
|
||||
|
||||
Objects must be created in this order due to foreign key dependencies. Creating objects out of order results in validation errors.
|
||||
|
||||
```
|
||||
1. ORGANIZATION (no dependencies)
|
||||
├── Tenant Groups
|
||||
├── Tenants (optional: Tenant Group)
|
||||
├── Regions
|
||||
├── Site Groups
|
||||
└── Tags
|
||||
|
||||
2. SITES AND LOCATIONS
|
||||
├── Sites (optional: Region, Site Group, Tenant)
|
||||
└── Locations (requires: Site, optional: parent Location)
|
||||
|
||||
3. DCIM PREREQUISITES
|
||||
├── Manufacturers
|
||||
├── Device Types (requires: Manufacturer)
|
||||
├── Platforms
|
||||
├── Device Roles
|
||||
└── Rack Roles
|
||||
|
||||
4. RACKS
|
||||
└── Racks (requires: Site, optional: Location, Rack Role, Tenant)
|
||||
|
||||
5. DEVICES
|
||||
├── Devices (requires: Device Type, Role, Site; optional: Rack, Location)
|
||||
└── Interfaces (requires: Device)
|
||||
|
||||
6. VIRTUALIZATION
|
||||
├── Cluster Types
|
||||
├── Cluster Groups
|
||||
├── Clusters (requires: Cluster Type, optional: Site)
|
||||
├── Virtual Machines (requires: Cluster OR Site)
|
||||
└── VM Interfaces (requires: Virtual Machine)
|
||||
|
||||
7. IPAM
|
||||
├── VRFs (optional: Tenant)
|
||||
├── Prefixes (optional: VRF, Site, Tenant)
|
||||
├── IP Addresses (optional: VRF, Tenant, Interface)
|
||||
└── VLANs (optional: Site, Tenant)
|
||||
|
||||
8. CONNECTIONS (last)
|
||||
└── Cables (requires: endpoints)
|
||||
```
|
||||
|
||||
**Key Rule:** NEVER create a VM before its cluster exists. NEVER create a device before its site exists.
|
||||
|
||||
## HIGH: Site Assignment
|
||||
|
||||
**All infrastructure objects should have a site:**
|
||||
|
||||
| Object Type | Site Requirement |
|
||||
|-------------|------------------|
|
||||
| Devices | **REQUIRED** |
|
||||
| Racks | **REQUIRED** |
|
||||
| VMs | RECOMMENDED (via cluster or direct) |
|
||||
| Clusters | RECOMMENDED |
|
||||
| Prefixes | RECOMMENDED |
|
||||
| VLANs | RECOMMENDED |
|
||||
|
||||
**Why Sites Matter:**
|
||||
- Location-based queries and filtering
|
||||
- Power and capacity budgeting
|
||||
- Physical inventory tracking
|
||||
- Compliance and audit requirements
|
||||
|
||||
## HIGH: Tenant Usage
|
||||
|
||||
Use tenants for logical resource separation:
|
||||
|
||||
**When to Use Tenants:**
|
||||
- Multi-team environments (assign resources to teams)
|
||||
- Multi-customer scenarios (MSP, hosting)
|
||||
- Cost allocation requirements
|
||||
- Access control boundaries
|
||||
|
||||
**Apply Tenants To:**
|
||||
- Sites (who owns the physical location)
|
||||
- Devices (who operates the hardware)
|
||||
- VMs (who owns the workload)
|
||||
- Prefixes (who owns the IP space)
|
||||
- VLANs (who owns the network segment)
|
||||
|
||||
## HIGH: Platform Tracking
|
||||
|
||||
Platforms track OS/runtime information for automation and lifecycle management.
|
||||
|
||||
**Platform Examples:**
|
||||
| Device Type | Platform Examples |
|
||||
|-------------|-------------------|
|
||||
| Servers | Ubuntu 24.04, Windows Server 2022, RHEL 9 |
|
||||
| Network | Cisco IOS 17.x, Junos 23.x, Arista EOS |
|
||||
| Raspberry Pi | Raspberry Pi OS (Bookworm), Ubuntu Server ARM |
|
||||
| Containers | Docker Container (as runtime indicator) |
|
||||
|
||||
**Benefits:**
|
||||
- Vulnerability tracking (CVE correlation)
|
||||
- Configuration management integration
|
||||
- Lifecycle management (EOL tracking)
|
||||
- Automation targeting
|
||||
|
||||
## MEDIUM: Tag Conventions
|
||||
|
||||
Use tags for cross-cutting classification that spans object types.
|
||||
|
||||
**Recommended Tag Patterns:**
|
||||
|
||||
| Pattern | Purpose | Examples |
|
||||
|---------|---------|----------|
|
||||
| `env:*` | Environment classification | `env:production`, `env:staging`, `env:development` |
|
||||
| `app:*` | Application grouping | `app:web`, `app:database`, `app:monitoring` |
|
||||
| `team:*` | Ownership | `team:platform`, `team:infra`, `team:devops` |
|
||||
| `backup:*` | Backup policy | `backup:daily`, `backup:weekly`, `backup:none` |
|
||||
| `monitoring:*` | Monitoring level | `monitoring:critical`, `monitoring:standard` |
|
||||
|
||||
**Tags vs Custom Fields:**
|
||||
- Tags: Cross-object classification, multiple values, filtering
|
||||
- Custom Fields: Object-specific structured data, single values, reporting
|
||||
|
||||
## MEDIUM: Naming Conventions
|
||||
|
||||
Consistent naming improves searchability and automation compatibility.
|
||||
|
||||
**Recommended Patterns:**
|
||||
|
||||
| Object Type | Pattern | Examples |
|
||||
|-------------|---------|----------|
|
||||
| Devices | `{role}-{location}-{number}` | `web-dc1-01`, `db-cloud-02`, `fw-home-01` |
|
||||
| VMs | `{env}-{app}-{number}` | `prod-api-01`, `dev-worker-03` |
|
||||
| Clusters | `{site}-{type}` | `dc1-vmware`, `home-docker` |
|
||||
| Prefixes | Include purpose in description | "Production web tier /24" |
|
||||
| VLANs | `{site}-{function}` | `dc1-mgmt`, `home-iot` |
|
||||
|
||||
**Avoid:**
|
||||
- Inconsistent casing (mixing `HotServ` and `hotserv`)
|
||||
- Mixed separators (mixing `hhl_cluster` and `media-cluster`)
|
||||
- Generic names without context (`server1`, `vm2`)
|
||||
- Special characters other than hyphen
|
||||
|
||||
## MEDIUM: Role Consolidation
|
||||
|
||||
Avoid role fragmentation - use general roles with platform/tags for specificity.
|
||||
|
||||
**Instead of:**
|
||||
```
|
||||
nginx-web-server
|
||||
apache-web-server
|
||||
web-server-frontend
|
||||
web-server-api
|
||||
```
|
||||
|
||||
**Use:**
|
||||
```
|
||||
web-server (role) + platform (nginx/apache) + tags (frontend, api)
|
||||
```
|
||||
|
||||
**Recommended Role Categories:**
|
||||
|
||||
| Category | Roles |
|
||||
|----------|-------|
|
||||
| Infrastructure | `hypervisor`, `storage-server`, `network-device`, `firewall` |
|
||||
| Compute | `application-server`, `database-server`, `web-server`, `api-server` |
|
||||
| Services | `container-host`, `load-balancer`, `monitoring-server`, `backup-server` |
|
||||
| Development | `development-workstation`, `ci-runner`, `build-server` |
|
||||
| Containers | `reverse-proxy`, `database`, `cache`, `queue`, `worker` |
|
||||
|
||||
## Docker Containers as VMs
|
||||
|
||||
NetBox's Virtualization module can model Docker containers:
|
||||
|
||||
**Approach:**
|
||||
1. Create device for physical Docker host
|
||||
2. Create cluster (type: "Docker Compose" or "Docker Swarm")
|
||||
3. Associate cluster with host device
|
||||
4. Create VMs for each container in the cluster
|
||||
|
||||
**VM Fields for Containers:**
|
||||
- `name`: Container name (e.g., `media_jellyfin`)
|
||||
- `role`: Container function (e.g., `Media Server`)
|
||||
- `vcpus`: CPU limit/shares
|
||||
- `memory`: Memory limit (MB)
|
||||
- `disk`: Volume size estimate
|
||||
- `description`: Container purpose
|
||||
- `comments`: Image, ports, volumes, dependencies
|
||||
|
||||
**This is a pragmatic modeling choice** - containers aren't VMs, but the Virtualization module is the closest fit for tracking container workloads.
|
||||
|
||||
## Primary IP Workflow
|
||||
|
||||
To set a device/VM's primary IP:
|
||||
|
||||
1. Create interface on device/VM
|
||||
2. Create IP address assigned to interface
|
||||
3. Set IP as `primary_ip4` or `primary_ip6` on device/VM
|
||||
|
||||
**Why Primary IP Matters:**
|
||||
- Used for device connectivity checks
|
||||
- Displayed in device list views
|
||||
- Used by automation tools (NAPALM, Ansible)
|
||||
- Required for many integrations
|
||||
|
||||
## Data Quality Checklist
|
||||
|
||||
Before closing a sprint or audit:
|
||||
|
||||
- [ ] All VMs have site assignment (direct or via cluster)
|
||||
- [ ] All VMs have tenant assignment
|
||||
- [ ] All active devices have platform
|
||||
- [ ] All active devices have primary IP
|
||||
- [ ] Naming follows conventions
|
||||
- [ ] No orphaned prefixes (allocated but unused)
|
||||
- [ ] Tags applied consistently
|
||||
- [ ] Clusters scoped to sites
|
||||
- [ ] Roles not overly fragmented
|
||||
|
||||
## MCP Tool Reference
|
||||
|
||||
**Dependency Order for Creation:**
|
||||
```
|
||||
1. dcim_create_site
|
||||
2. dcim_create_manufacturer
|
||||
3. dcim_create_device_type
|
||||
4. dcim_create_device_role
|
||||
5. dcim_create_platform
|
||||
6. dcim_create_device
|
||||
7. virt_create_cluster_type
|
||||
8. virt_create_cluster
|
||||
9. virt_create_vm
|
||||
10. dcim_create_interface / virt_create_vm_interface
|
||||
11. ipam_create_ip_address
|
||||
12. dcim_update_device (set primary_ip4)
|
||||
```
|
||||
|
||||
**Lookup Before Create:**
|
||||
Always check if object exists before creating to avoid duplicates:
|
||||
```
|
||||
1. dcim_list_devices name=<hostname>
|
||||
2. If exists, update; if not, create
|
||||
```
|
||||
@@ -2,9 +2,8 @@
|
||||
"mcpServers": {
|
||||
"contract-validator": {
|
||||
"type": "stdio",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/mcp-servers/contract-validator/.venv/bin/python",
|
||||
"args": ["-m", "mcp_server.server"],
|
||||
"cwd": "${CLAUDE_PLUGIN_ROOT}/mcp-servers/contract-validator"
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/mcp-servers/contract-validator/run.sh",
|
||||
"args": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,6 +40,7 @@ pip install -r requirements.txt
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `/initial-setup` | Interactive setup wizard |
|
||||
| `/validate-contracts` | Full marketplace compatibility validation |
|
||||
| `/check-agent` | Validate single agent definition |
|
||||
| `/list-interfaces` | Show all plugin interfaces |
|
||||
|
||||
152
plugins/contract-validator/commands/initial-setup.md
Normal file
152
plugins/contract-validator/commands/initial-setup.md
Normal file
@@ -0,0 +1,152 @@
|
||||
---
|
||||
description: Interactive setup wizard for contract-validator plugin - verifies MCP server and shows capabilities
|
||||
---
|
||||
|
||||
# Contract-Validator Setup Wizard
|
||||
|
||||
This command sets up the contract-validator plugin for cross-plugin compatibility validation.
|
||||
|
||||
## Important Context
|
||||
|
||||
- **This command uses Bash, Read, Write, and AskUserQuestion tools** - NOT MCP tools
|
||||
- **MCP tools won't work until after setup + session restart**
|
||||
- **No external credentials required** - this plugin validates local files only
|
||||
|
||||
---
|
||||
|
||||
## Phase 1: Environment Validation
|
||||
|
||||
### Step 1.1: Check Python Version
|
||||
|
||||
```bash
|
||||
python3 --version
|
||||
```
|
||||
|
||||
Requires Python 3.10+. If below, stop setup and inform user:
|
||||
```
|
||||
Python 3.10 or higher is required. Please install it and run /initial-setup again.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: MCP Server Setup
|
||||
|
||||
### Step 2.1: Locate Contract-Validator MCP Server
|
||||
|
||||
```bash
|
||||
# If running from installed marketplace
|
||||
ls -la ~/.claude/plugins/marketplaces/leo-claude-mktplace/mcp-servers/contract-validator/ 2>/dev/null || echo "NOT_FOUND_INSTALLED"
|
||||
|
||||
# If running from source
|
||||
ls -la ~/claude-plugins-work/mcp-servers/contract-validator/ 2>/dev/null || echo "NOT_FOUND_SOURCE"
|
||||
```
|
||||
|
||||
Determine which path exists and use that as the MCP server path.
|
||||
|
||||
### Step 2.2: Check Virtual Environment
|
||||
|
||||
```bash
|
||||
ls -la /path/to/mcp-servers/contract-validator/.venv/bin/python 2>/dev/null && echo "VENV_EXISTS" || echo "VENV_MISSING"
|
||||
```
|
||||
|
||||
### Step 2.3: Create Virtual Environment (if missing)
|
||||
|
||||
```bash
|
||||
cd /path/to/mcp-servers/contract-validator && python3 -m venv .venv && source .venv/bin/activate && pip install --upgrade pip && pip install -r requirements.txt && deactivate
|
||||
```
|
||||
|
||||
**If pip install fails:**
|
||||
- Show the error to the user
|
||||
- Suggest: "Check your internet connection and try again."
|
||||
|
||||
---
|
||||
|
||||
## Phase 3: Validation
|
||||
|
||||
### Step 3.1: Verify MCP Server
|
||||
|
||||
```bash
|
||||
cd /path/to/mcp-servers/contract-validator && .venv/bin/python -c "from mcp_server.server import ContractValidatorMCPServer; print('MCP Server OK')"
|
||||
```
|
||||
|
||||
If this fails, check the error and report it to the user.
|
||||
|
||||
### Step 3.2: Summary
|
||||
|
||||
Display:
|
||||
|
||||
```
|
||||
╔════════════════════════════════════════════════════════════════╗
|
||||
║ CONTRACT-VALIDATOR SETUP COMPLETE ║
|
||||
╠════════════════════════════════════════════════════════════════╣
|
||||
║ MCP Server: ✓ Ready ║
|
||||
║ Parse Tools: ✓ Available (2 tools) ║
|
||||
║ Validation Tools: ✓ Available (3 tools) ║
|
||||
║ Report Tools: ✓ Available (2 tools) ║
|
||||
╚════════════════════════════════════════════════════════════════╝
|
||||
```
|
||||
|
||||
### Step 3.3: Session Restart Notice
|
||||
|
||||
---
|
||||
|
||||
**Session Restart Required**
|
||||
|
||||
Restart your Claude Code session for MCP tools to become available.
|
||||
|
||||
**After restart, you can:**
|
||||
- Run `/validate-contracts` to check all plugins for compatibility issues
|
||||
- Run `/check-agent` to validate a single agent definition
|
||||
- Run `/list-interfaces` to see all plugin commands and tools
|
||||
|
||||
---
|
||||
|
||||
## Available Tools
|
||||
|
||||
| Category | Tools | Description |
|
||||
|----------|-------|-------------|
|
||||
| Parse | `parse_plugin_interface`, `parse_claude_md_agents` | Extract interfaces from README.md and agents from CLAUDE.md |
|
||||
| Validation | `validate_compatibility`, `validate_agent_refs`, `validate_data_flow` | Check conflicts, tool references, and data flows |
|
||||
| Report | `generate_compatibility_report`, `list_issues` | Generate reports and filter issues |
|
||||
|
||||
---
|
||||
|
||||
## Available Commands
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `/validate-contracts` | Full marketplace compatibility validation |
|
||||
| `/check-agent` | Validate single agent definition |
|
||||
| `/list-interfaces` | Show all plugin interfaces |
|
||||
|
||||
---
|
||||
|
||||
## Use Cases
|
||||
|
||||
### 1. Pre-Release Validation
|
||||
Run `/validate-contracts` before releasing a new marketplace version to catch:
|
||||
- Command name conflicts between plugins
|
||||
- Missing tool references in agents
|
||||
- Broken data flows
|
||||
|
||||
### 2. Agent Development
|
||||
Run `/check-agent` when creating or modifying agents to verify:
|
||||
- All referenced tools exist
|
||||
- Data flows are valid
|
||||
- No undeclared dependencies
|
||||
|
||||
### 3. Plugin Audit
|
||||
Run `/list-interfaces` to get a complete view of:
|
||||
- All commands across plugins
|
||||
- All tools available
|
||||
- Potential overlap areas
|
||||
|
||||
---
|
||||
|
||||
## No Configuration Required
|
||||
|
||||
This plugin doesn't require any configuration files. It reads plugin manifests and README files directly from the filesystem.
|
||||
|
||||
**Paths it scans:**
|
||||
- Marketplace: `~/.claude/plugins/marketplaces/leo-claude-mktplace/plugins/`
|
||||
- Source (if available): `~/claude-plugins-work/plugins/`
|
||||
195
plugins/contract-validator/hooks/auto-validate.sh
Executable file
195
plugins/contract-validator/hooks/auto-validate.sh
Executable file
@@ -0,0 +1,195 @@
|
||||
#!/bin/bash
|
||||
# contract-validator SessionStart auto-validate hook
|
||||
# Validates plugin contracts only when plugin files have changed since last check
|
||||
# All output MUST have [contract-validator] prefix
|
||||
|
||||
PREFIX="[contract-validator]"
|
||||
|
||||
# ============================================================================
|
||||
# Configuration
|
||||
# ============================================================================
|
||||
|
||||
# Enable/disable auto-check (default: true)
|
||||
AUTO_CHECK="${CONTRACT_VALIDATOR_AUTO_CHECK:-true}"
|
||||
|
||||
# Cache location for storing last check hash
|
||||
CACHE_DIR="$HOME/.cache/claude-plugins/contract-validator"
|
||||
HASH_FILE="$CACHE_DIR/last-check.hash"
|
||||
|
||||
# Marketplace location (installed plugins)
|
||||
MARKETPLACE_PATH="$HOME/.claude/plugins/marketplaces/leo-claude-mktplace"
|
||||
|
||||
# ============================================================================
|
||||
# Early exit if disabled
|
||||
# ============================================================================
|
||||
|
||||
if [[ "$AUTO_CHECK" != "true" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# ============================================================================
|
||||
# Smart mode: Check if plugin files have changed
|
||||
# ============================================================================
|
||||
|
||||
# Function to compute hash of all plugin manifest files
|
||||
compute_plugin_hash() {
|
||||
local hash_input=""
|
||||
|
||||
if [[ -d "$MARKETPLACE_PATH/plugins" ]]; then
|
||||
# Hash all plugin.json, hooks.json, and agent files
|
||||
while IFS= read -r -d '' file; do
|
||||
if [[ -f "$file" ]]; then
|
||||
hash_input+="$(md5sum "$file" 2>/dev/null | cut -d' ' -f1)"
|
||||
fi
|
||||
done < <(find "$MARKETPLACE_PATH/plugins" \
|
||||
\( -name "plugin.json" -o -name "hooks.json" -o -name "*.md" -path "*/agents/*" \) \
|
||||
-print0 2>/dev/null | sort -z)
|
||||
fi
|
||||
|
||||
# Also include marketplace.json
|
||||
if [[ -f "$MARKETPLACE_PATH/.claude-plugin/marketplace.json" ]]; then
|
||||
hash_input+="$(md5sum "$MARKETPLACE_PATH/.claude-plugin/marketplace.json" 2>/dev/null | cut -d' ' -f1)"
|
||||
fi
|
||||
|
||||
# Compute final hash
|
||||
echo "$hash_input" | md5sum | cut -d' ' -f1
|
||||
}
|
||||
|
||||
# Ensure cache directory exists
|
||||
mkdir -p "$CACHE_DIR" 2>/dev/null
|
||||
|
||||
# Compute current hash
|
||||
CURRENT_HASH=$(compute_plugin_hash)
|
||||
|
||||
# Check if we have a previous hash
|
||||
if [[ -f "$HASH_FILE" ]]; then
|
||||
PREVIOUS_HASH=$(cat "$HASH_FILE" 2>/dev/null)
|
||||
|
||||
# If hashes match, no changes - skip validation
|
||||
if [[ "$CURRENT_HASH" == "$PREVIOUS_HASH" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# ============================================================================
|
||||
# Run validation (hashes differ or no cache)
|
||||
# ============================================================================
|
||||
|
||||
ISSUES_FOUND=0
|
||||
WARNINGS=""
|
||||
|
||||
# Function to add warning
|
||||
add_warning() {
|
||||
WARNINGS+=" - $1"$'\n'
|
||||
((ISSUES_FOUND++))
|
||||
}
|
||||
|
||||
# 1. Check all installed plugins have valid plugin.json
|
||||
if [[ -d "$MARKETPLACE_PATH/plugins" ]]; then
|
||||
for plugin_dir in "$MARKETPLACE_PATH/plugins"/*/; do
|
||||
if [[ -d "$plugin_dir" ]]; then
|
||||
plugin_name=$(basename "$plugin_dir")
|
||||
plugin_json="$plugin_dir/.claude-plugin/plugin.json"
|
||||
|
||||
if [[ ! -f "$plugin_json" ]]; then
|
||||
add_warning "$plugin_name: missing .claude-plugin/plugin.json"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Basic JSON validation
|
||||
if ! python3 -c "import json; json.load(open('$plugin_json'))" 2>/dev/null; then
|
||||
add_warning "$plugin_name: invalid JSON in plugin.json"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Check required fields
|
||||
if ! python3 -c "
|
||||
import json
|
||||
with open('$plugin_json') as f:
|
||||
data = json.load(f)
|
||||
required = ['name', 'version', 'description']
|
||||
missing = [r for r in required if r not in data]
|
||||
if missing:
|
||||
exit(1)
|
||||
" 2>/dev/null; then
|
||||
add_warning "$plugin_name: plugin.json missing required fields"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# 2. Check hooks.json files are properly formatted
|
||||
if [[ -d "$MARKETPLACE_PATH/plugins" ]]; then
|
||||
while IFS= read -r -d '' hooks_file; do
|
||||
plugin_name=$(basename "$(dirname "$(dirname "$hooks_file")")")
|
||||
|
||||
# Validate JSON
|
||||
if ! python3 -c "import json; json.load(open('$hooks_file'))" 2>/dev/null; then
|
||||
add_warning "$plugin_name: invalid JSON in hooks/hooks.json"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Validate hook structure
|
||||
if ! python3 -c "
|
||||
import json
|
||||
with open('$hooks_file') as f:
|
||||
data = json.load(f)
|
||||
if 'hooks' not in data:
|
||||
exit(1)
|
||||
valid_events = ['PreToolUse', 'PostToolUse', 'UserPromptSubmit', 'SessionStart', 'SessionEnd', 'Notification', 'Stop', 'SubagentStop', 'PreCompact']
|
||||
for event in data['hooks']:
|
||||
if event not in valid_events:
|
||||
exit(1)
|
||||
for hook in data['hooks'][event]:
|
||||
# Support both flat structure (type at top) and nested structure (matcher + hooks array)
|
||||
if 'type' in hook:
|
||||
# Flat structure: {type: 'command', command: '...'}
|
||||
pass
|
||||
elif 'matcher' in hook and 'hooks' in hook:
|
||||
# Nested structure: {matcher: '...', hooks: [{type: 'command', ...}]}
|
||||
for nested_hook in hook['hooks']:
|
||||
if 'type' not in nested_hook:
|
||||
exit(1)
|
||||
else:
|
||||
exit(1)
|
||||
" 2>/dev/null; then
|
||||
add_warning "$plugin_name: hooks.json has invalid structure or events"
|
||||
fi
|
||||
done < <(find "$MARKETPLACE_PATH/plugins" -path "*/hooks/hooks.json" -print0 2>/dev/null)
|
||||
fi
|
||||
|
||||
# 3. Check agent references are valid (agent files exist and are markdown)
|
||||
if [[ -d "$MARKETPLACE_PATH/plugins" ]]; then
|
||||
while IFS= read -r -d '' agent_file; do
|
||||
plugin_name=$(basename "$(dirname "$(dirname "$agent_file")")")
|
||||
agent_name=$(basename "$agent_file")
|
||||
|
||||
# Check file is not empty
|
||||
if [[ ! -s "$agent_file" ]]; then
|
||||
add_warning "$plugin_name: empty agent file $agent_name"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Check file has markdown content (at least a header)
|
||||
if ! grep -q '^#' "$agent_file" 2>/dev/null; then
|
||||
add_warning "$plugin_name: agent $agent_name missing markdown header"
|
||||
fi
|
||||
done < <(find "$MARKETPLACE_PATH/plugins" -path "*/agents/*.md" -print0 2>/dev/null)
|
||||
fi
|
||||
|
||||
# ============================================================================
|
||||
# Store new hash and report results
|
||||
# ============================================================================
|
||||
|
||||
# Always store the new hash (even if issues found - we don't want to recheck)
|
||||
echo "$CURRENT_HASH" > "$HASH_FILE"
|
||||
|
||||
# Report any issues found (non-blocking warning)
|
||||
if [[ $ISSUES_FOUND -gt 0 ]]; then
|
||||
echo "$PREFIX Plugin contract validation found $ISSUES_FOUND issue(s):"
|
||||
echo "$WARNINGS"
|
||||
echo "$PREFIX Run /validate-contracts for full details"
|
||||
fi
|
||||
|
||||
# Always exit 0 (non-blocking)
|
||||
exit 0
|
||||
174
plugins/contract-validator/hooks/breaking-change-check.sh
Executable file
174
plugins/contract-validator/hooks/breaking-change-check.sh
Executable file
@@ -0,0 +1,174 @@
|
||||
#!/bin/bash
|
||||
# contract-validator breaking change detection hook
|
||||
# Warns when plugin interface changes might break consumers
|
||||
# This is a PostToolUse hook - non-blocking, warnings only
|
||||
|
||||
PREFIX="[contract-validator]"
|
||||
|
||||
# Check if warnings are enabled (default: true)
|
||||
if [[ "${CONTRACT_VALIDATOR_BREAKING_WARN:-true}" != "true" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Read tool input from stdin
|
||||
INPUT=$(cat)
|
||||
|
||||
# Extract file_path from JSON input
|
||||
FILE_PATH=$(echo "$INPUT" | grep -o '"file_path"[[:space:]]*:[[:space:]]*"[^"]*"' | head -1 | sed 's/.*"file_path"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/')
|
||||
|
||||
# If no file_path found, exit silently
|
||||
if [ -z "$FILE_PATH" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check if file is a plugin interface file
|
||||
is_interface_file() {
|
||||
local file="$1"
|
||||
|
||||
case "$file" in
|
||||
*/plugin.json) return 0 ;;
|
||||
*/.claude-plugin/plugin.json) return 0 ;;
|
||||
*/hooks.json) return 0 ;;
|
||||
*/hooks/hooks.json) return 0 ;;
|
||||
*/.mcp.json) return 0 ;;
|
||||
*/agents/*.md) return 0 ;;
|
||||
*/commands/*.md) return 0 ;;
|
||||
*/skills/*.md) return 0 ;;
|
||||
esac
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# Exit if not an interface file
|
||||
if ! is_interface_file "$FILE_PATH"; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check if file exists and is in a git repo
|
||||
if [[ ! -f "$FILE_PATH" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Get the directory containing the file
|
||||
FILE_DIR=$(dirname "$FILE_PATH")
|
||||
FILE_NAME=$(basename "$FILE_PATH")
|
||||
|
||||
# Try to get the previous version from git
|
||||
cd "$FILE_DIR" 2>/dev/null || exit 0
|
||||
|
||||
# Check if we're in a git repo
|
||||
if ! git rev-parse --git-dir > /dev/null 2>&1; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Get previous version (HEAD version before current changes)
|
||||
PREV_CONTENT=$(git show HEAD:"$FILE_PATH" 2>/dev/null || echo "")
|
||||
|
||||
# If no previous version, this is a new file - no breaking changes possible
|
||||
if [ -z "$PREV_CONTENT" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Read current content
|
||||
CURR_CONTENT=$(cat "$FILE_PATH" 2>/dev/null || echo "")
|
||||
|
||||
if [ -z "$CURR_CONTENT" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
BREAKING_CHANGES=()
|
||||
|
||||
# Detect breaking changes based on file type
|
||||
case "$FILE_PATH" in
|
||||
*/plugin.json|*/.claude-plugin/plugin.json)
|
||||
# Check for removed or renamed fields in plugin.json
|
||||
|
||||
# Check if name changed
|
||||
PREV_NAME=$(echo "$PREV_CONTENT" | grep -o '"name"[[:space:]]*:[[:space:]]*"[^"]*"' | head -1)
|
||||
CURR_NAME=$(echo "$CURR_CONTENT" | grep -o '"name"[[:space:]]*:[[:space:]]*"[^"]*"' | head -1)
|
||||
if [ -n "$PREV_NAME" ] && [ "$PREV_NAME" != "$CURR_NAME" ]; then
|
||||
BREAKING_CHANGES+=("Plugin name changed - consumers may need updates")
|
||||
fi
|
||||
|
||||
# Check if version had major bump (semantic versioning)
|
||||
PREV_VER=$(echo "$PREV_CONTENT" | grep -o '"version"[[:space:]]*:[[:space:]]*"[^"]*"' | sed 's/.*"\([0-9]*\)\..*/\1/')
|
||||
CURR_VER=$(echo "$CURR_CONTENT" | grep -o '"version"[[:space:]]*:[[:space:]]*"[^"]*"' | sed 's/.*"\([0-9]*\)\..*/\1/')
|
||||
if [ -n "$PREV_VER" ] && [ -n "$CURR_VER" ] && [ "$CURR_VER" -gt "$PREV_VER" ] 2>/dev/null; then
|
||||
BREAKING_CHANGES+=("Major version bump detected - verify breaking changes documented")
|
||||
fi
|
||||
;;
|
||||
|
||||
*/hooks.json|*/hooks/hooks.json)
|
||||
# Check for removed hook events
|
||||
PREV_EVENTS=$(echo "$PREV_CONTENT" | grep -oE '"(PreToolUse|PostToolUse|UserPromptSubmit|SessionStart|SessionEnd|Notification|Stop|SubagentStop|PreCompact)"' | sort -u)
|
||||
CURR_EVENTS=$(echo "$CURR_CONTENT" | grep -oE '"(PreToolUse|PostToolUse|UserPromptSubmit|SessionStart|SessionEnd|Notification|Stop|SubagentStop|PreCompact)"' | sort -u)
|
||||
|
||||
# Find removed events
|
||||
REMOVED_EVENTS=$(comm -23 <(echo "$PREV_EVENTS") <(echo "$CURR_EVENTS") 2>/dev/null)
|
||||
if [ -n "$REMOVED_EVENTS" ]; then
|
||||
BREAKING_CHANGES+=("Hook events removed: $(echo $REMOVED_EVENTS | tr '\n' ' ')")
|
||||
fi
|
||||
|
||||
# Check for changed matchers
|
||||
PREV_MATCHERS=$(echo "$PREV_CONTENT" | grep -o '"matcher"[[:space:]]*:[[:space:]]*"[^"]*"' | sort -u)
|
||||
CURR_MATCHERS=$(echo "$CURR_CONTENT" | grep -o '"matcher"[[:space:]]*:[[:space:]]*"[^"]*"' | sort -u)
|
||||
if [ "$PREV_MATCHERS" != "$CURR_MATCHERS" ]; then
|
||||
BREAKING_CHANGES+=("Hook matchers changed - verify tool coverage")
|
||||
fi
|
||||
;;
|
||||
|
||||
*/.mcp.json)
|
||||
# Check for removed MCP servers
|
||||
PREV_SERVERS=$(echo "$PREV_CONTENT" | grep -o '"[^"]*"[[:space:]]*:' | grep -v "mcpServers" | sort -u)
|
||||
CURR_SERVERS=$(echo "$CURR_CONTENT" | grep -o '"[^"]*"[[:space:]]*:' | grep -v "mcpServers" | sort -u)
|
||||
|
||||
REMOVED_SERVERS=$(comm -23 <(echo "$PREV_SERVERS") <(echo "$CURR_SERVERS") 2>/dev/null)
|
||||
if [ -n "$REMOVED_SERVERS" ]; then
|
||||
BREAKING_CHANGES+=("MCP servers removed - tools may be unavailable")
|
||||
fi
|
||||
;;
|
||||
|
||||
*/agents/*.md)
|
||||
# Check if agent file was significantly reduced (might indicate removal of capabilities)
|
||||
PREV_LINES=$(echo "$PREV_CONTENT" | wc -l)
|
||||
CURR_LINES=$(echo "$CURR_CONTENT" | wc -l)
|
||||
|
||||
# If more than 50% reduction, warn
|
||||
if [ "$PREV_LINES" -gt 10 ] && [ "$CURR_LINES" -lt $((PREV_LINES / 2)) ]; then
|
||||
BREAKING_CHANGES+=("Agent definition significantly reduced - capabilities may be removed")
|
||||
fi
|
||||
|
||||
# Check if agent name/description changed in frontmatter
|
||||
PREV_DESC=$(echo "$PREV_CONTENT" | head -20 | grep -i "description" | head -1)
|
||||
CURR_DESC=$(echo "$CURR_CONTENT" | head -20 | grep -i "description" | head -1)
|
||||
if [ -n "$PREV_DESC" ] && [ "$PREV_DESC" != "$CURR_DESC" ]; then
|
||||
BREAKING_CHANGES+=("Agent description changed - verify consumer expectations")
|
||||
fi
|
||||
;;
|
||||
|
||||
*/commands/*.md|*/skills/*.md)
|
||||
# Check if command/skill was significantly changed
|
||||
PREV_LINES=$(echo "$PREV_CONTENT" | wc -l)
|
||||
CURR_LINES=$(echo "$CURR_CONTENT" | wc -l)
|
||||
|
||||
if [ "$PREV_LINES" -gt 10 ] && [ "$CURR_LINES" -lt $((PREV_LINES / 2)) ]; then
|
||||
BREAKING_CHANGES+=("Command/skill significantly reduced - behavior may change")
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
# Output warnings if any breaking changes detected
|
||||
if [[ ${#BREAKING_CHANGES[@]} -gt 0 ]]; then
|
||||
echo ""
|
||||
echo "$PREFIX WARNING: Potential breaking changes in $(basename "$FILE_PATH")"
|
||||
echo "$PREFIX ============================================"
|
||||
for change in "${BREAKING_CHANGES[@]}"; do
|
||||
echo "$PREFIX - $change"
|
||||
done
|
||||
echo "$PREFIX ============================================"
|
||||
echo "$PREFIX Consider updating CHANGELOG and notifying consumers"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Always exit 0 - non-blocking
|
||||
exit 0
|
||||
21
plugins/contract-validator/hooks/hooks.json
Normal file
21
plugins/contract-validator/hooks/hooks.json
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"hooks": {
|
||||
"SessionStart": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/hooks/auto-validate.sh"
|
||||
}
|
||||
],
|
||||
"PostToolUse": [
|
||||
{
|
||||
"matcher": "Edit|Write",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/hooks/breaking-change-check.sh"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -2,9 +2,8 @@
|
||||
"mcpServers": {
|
||||
"data-platform": {
|
||||
"type": "stdio",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/mcp-servers/data-platform/.venv/bin/python",
|
||||
"args": ["-m", "mcp_server.server"],
|
||||
"cwd": "${CLAUDE_PLUGIN_ROOT}/mcp-servers/data-platform"
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/mcp-servers/data-platform/run.sh",
|
||||
"args": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,17 @@
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/hooks/startup-check.sh"
|
||||
}
|
||||
],
|
||||
"PostToolUse": [
|
||||
{
|
||||
"matcher": "Edit|Write",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/hooks/schema-diff-check.sh"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
138
plugins/data-platform/hooks/schema-diff-check.sh
Executable file
138
plugins/data-platform/hooks/schema-diff-check.sh
Executable file
@@ -0,0 +1,138 @@
|
||||
#!/bin/bash
|
||||
# data-platform schema diff detection hook
|
||||
# Warns about potentially breaking schema changes
|
||||
# This is a command hook - non-blocking, warnings only
|
||||
|
||||
PREFIX="[data-platform]"
|
||||
|
||||
# Check if warnings are enabled (default: true)
|
||||
if [[ "${DATA_PLATFORM_SCHEMA_WARN:-true}" != "true" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Read tool input from stdin (JSON with file_path)
|
||||
INPUT=$(cat)
|
||||
|
||||
# Extract file_path from JSON input
|
||||
FILE_PATH=$(echo "$INPUT" | grep -o '"file_path"[[:space:]]*:[[:space:]]*"[^"]*"' | head -1 | sed 's/.*"file_path"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/')
|
||||
|
||||
# If no file_path found, exit silently
|
||||
if [ -z "$FILE_PATH" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check if file is a schema-related file
|
||||
is_schema_file() {
|
||||
local file="$1"
|
||||
|
||||
# Check file extension
|
||||
case "$file" in
|
||||
*.sql) return 0 ;;
|
||||
*/migrations/*.py) return 0 ;;
|
||||
*/migrations/*.sql) return 0 ;;
|
||||
*/models/*.py) return 0 ;;
|
||||
*/models/*.sql) return 0 ;;
|
||||
*schema.prisma) return 0 ;;
|
||||
*schema.graphql) return 0 ;;
|
||||
*/dbt/models/*.sql) return 0 ;;
|
||||
*/dbt/models/*.yml) return 0 ;;
|
||||
*/alembic/versions/*.py) return 0 ;;
|
||||
esac
|
||||
|
||||
# Check directory patterns
|
||||
if echo "$file" | grep -qE "(migrations?|schemas?|models)/"; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# Exit if not a schema file
|
||||
if ! is_schema_file "$FILE_PATH"; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Read the file content (if it exists and is readable)
|
||||
if [[ ! -f "$FILE_PATH" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
FILE_CONTENT=$(cat "$FILE_PATH" 2>/dev/null || echo "")
|
||||
|
||||
if [[ -z "$FILE_CONTENT" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Detect breaking changes
|
||||
BREAKING_CHANGES=()
|
||||
|
||||
# Check for DROP COLUMN
|
||||
if echo "$FILE_CONTENT" | grep -qiE "DROP[[:space:]]+COLUMN"; then
|
||||
BREAKING_CHANGES+=("DROP COLUMN detected - may break existing queries")
|
||||
fi
|
||||
|
||||
# Check for DROP TABLE
|
||||
if echo "$FILE_CONTENT" | grep -qiE "DROP[[:space:]]+TABLE"; then
|
||||
BREAKING_CHANGES+=("DROP TABLE detected - data loss risk")
|
||||
fi
|
||||
|
||||
# Check for DROP INDEX
|
||||
if echo "$FILE_CONTENT" | grep -qiE "DROP[[:space:]]+INDEX"; then
|
||||
BREAKING_CHANGES+=("DROP INDEX detected - may impact query performance")
|
||||
fi
|
||||
|
||||
# Check for ALTER TYPE / MODIFY COLUMN type changes
|
||||
if echo "$FILE_CONTENT" | grep -qiE "ALTER[[:space:]]+.*(TYPE|COLUMN.*TYPE)"; then
|
||||
BREAKING_CHANGES+=("Column type change detected - may cause data truncation")
|
||||
fi
|
||||
|
||||
if echo "$FILE_CONTENT" | grep -qiE "MODIFY[[:space:]]+COLUMN"; then
|
||||
BREAKING_CHANGES+=("MODIFY COLUMN detected - verify data compatibility")
|
||||
fi
|
||||
|
||||
# Check for adding NOT NULL to existing column
|
||||
if echo "$FILE_CONTENT" | grep -qiE "ALTER[[:space:]]+.*SET[[:space:]]+NOT[[:space:]]+NULL"; then
|
||||
BREAKING_CHANGES+=("Adding NOT NULL constraint - existing NULL values will fail")
|
||||
fi
|
||||
|
||||
if echo "$FILE_CONTENT" | grep -qiE "ADD[[:space:]]+.*NOT[[:space:]]+NULL[^[:space:]]*[[:space:]]+DEFAULT"; then
|
||||
# Adding NOT NULL with DEFAULT is usually safe - don't warn
|
||||
:
|
||||
elif echo "$FILE_CONTENT" | grep -qiE "ADD[[:space:]]+.*NOT[[:space:]]+NULL"; then
|
||||
BREAKING_CHANGES+=("Adding NOT NULL column without DEFAULT - INSERT may fail")
|
||||
fi
|
||||
|
||||
# Check for RENAME TABLE/COLUMN
|
||||
if echo "$FILE_CONTENT" | grep -qiE "RENAME[[:space:]]+(TABLE|COLUMN|TO)"; then
|
||||
BREAKING_CHANGES+=("RENAME detected - update all references")
|
||||
fi
|
||||
|
||||
# Check for removing from Django/SQLAlchemy models (Python files)
|
||||
if [[ "$FILE_PATH" == *.py ]]; then
|
||||
if echo "$FILE_CONTENT" | grep -qE "^-[[:space:]]*[a-z_]+[[:space:]]*=.*Field\("; then
|
||||
BREAKING_CHANGES+=("Model field removal detected in Python ORM")
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check for Prisma schema changes
|
||||
if [[ "$FILE_PATH" == *schema.prisma ]]; then
|
||||
if echo "$FILE_CONTENT" | grep -qE "@relation.*onDelete.*Cascade"; then
|
||||
BREAKING_CHANGES+=("Cascade delete detected - verify data safety")
|
||||
fi
|
||||
fi
|
||||
|
||||
# Output warnings if any breaking changes detected
|
||||
if [[ ${#BREAKING_CHANGES[@]} -gt 0 ]]; then
|
||||
echo ""
|
||||
echo "$PREFIX WARNING: Potential breaking schema changes in $(basename "$FILE_PATH")"
|
||||
echo "$PREFIX ============================================"
|
||||
for change in "${BREAKING_CHANGES[@]}"; do
|
||||
echo "$PREFIX - $change"
|
||||
done
|
||||
echo "$PREFIX ============================================"
|
||||
echo "$PREFIX Review before deploying to production"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Always exit 0 - non-blocking
|
||||
exit 0
|
||||
102
plugins/git-flow/hooks/branch-check.sh
Executable file
102
plugins/git-flow/hooks/branch-check.sh
Executable file
@@ -0,0 +1,102 @@
|
||||
#!/bin/bash
|
||||
# git-flow branch name validation hook
|
||||
# Validates branch names follow the convention: <type>/<description>
|
||||
# Command hook - guaranteed predictable behavior
|
||||
|
||||
# Read tool input from stdin (JSON format)
|
||||
INPUT=$(cat)
|
||||
|
||||
# Extract command from JSON input
|
||||
# The Bash tool sends {"command": "..."} format
|
||||
COMMAND=$(echo "$INPUT" | grep -o '"command"[[:space:]]*:[[:space:]]*"[^"]*"' | head -1 | sed 's/.*"command"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/')
|
||||
|
||||
# If no command found, exit silently (allow)
|
||||
if [ -z "$COMMAND" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check if this is a branch creation command
|
||||
# Patterns: git checkout -b, git branch (without -d/-D), git switch -c/-C
|
||||
IS_BRANCH_CREATE=false
|
||||
BRANCH_NAME=""
|
||||
|
||||
# git checkout -b <branch>
|
||||
if echo "$COMMAND" | grep -qE 'git\s+checkout\s+(-b|--branch)\s+'; then
|
||||
IS_BRANCH_CREATE=true
|
||||
BRANCH_NAME=$(echo "$COMMAND" | sed -n 's/.*git\s\+checkout\s\+\(-b\|--branch\)\s\+\([^ ]*\).*/\2/p')
|
||||
fi
|
||||
|
||||
# git switch -c/-C <branch>
|
||||
if echo "$COMMAND" | grep -qE 'git\s+switch\s+(-c|-C|--create|--force-create)\s+'; then
|
||||
IS_BRANCH_CREATE=true
|
||||
BRANCH_NAME=$(echo "$COMMAND" | sed -n 's/.*git\s\+switch\s\+\(-c\|-C\|--create\|--force-create\)\s\+\([^ ]*\).*/\2/p')
|
||||
fi
|
||||
|
||||
# git branch <name> (without -d/-D/-m/-M which are delete/rename)
|
||||
if echo "$COMMAND" | grep -qE 'git\s+branch\s+[^-]' && ! echo "$COMMAND" | grep -qE 'git\s+branch\s+(-d|-D|-m|-M|--delete|--move|--list|--show-current)'; then
|
||||
IS_BRANCH_CREATE=true
|
||||
BRANCH_NAME=$(echo "$COMMAND" | sed -n 's/.*git\s\+branch\s\+\([^ -][^ ]*\).*/\1/p')
|
||||
fi
|
||||
|
||||
# If not a branch creation command, exit silently (allow)
|
||||
if [ "$IS_BRANCH_CREATE" = false ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# If we couldn't extract the branch name, exit silently (allow)
|
||||
if [ -z "$BRANCH_NAME" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Remove any quotes from branch name
|
||||
BRANCH_NAME=$(echo "$BRANCH_NAME" | tr -d '"' | tr -d "'")
|
||||
|
||||
# Skip validation for special branches
|
||||
case "$BRANCH_NAME" in
|
||||
main|master|develop|development|staging|release|hotfix)
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
|
||||
# Allowed branch types
|
||||
VALID_TYPES="feat|fix|chore|docs|refactor|test|perf|debug"
|
||||
|
||||
# Validate branch name format: <type>/<description>
|
||||
# Description: lowercase letters, numbers, hyphens only, max 50 chars total
|
||||
if ! echo "$BRANCH_NAME" | grep -qE "^($VALID_TYPES)/[a-z0-9][a-z0-9-]*$"; then
|
||||
echo ""
|
||||
echo "[git-flow] Branch name validation failed"
|
||||
echo ""
|
||||
echo "Branch: $BRANCH_NAME"
|
||||
echo ""
|
||||
echo "Expected format: <type>/<description>"
|
||||
echo ""
|
||||
echo "Valid types: feat, fix, chore, docs, refactor, test, perf, debug"
|
||||
echo ""
|
||||
echo "Description rules:"
|
||||
echo " - Lowercase letters, numbers, and hyphens only"
|
||||
echo " - Must start with letter or number"
|
||||
echo " - No spaces or special characters"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " feat/add-user-auth"
|
||||
echo " fix/login-timeout"
|
||||
echo " chore/update-deps"
|
||||
echo " docs/api-reference"
|
||||
echo ""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check total length (max 50 chars)
|
||||
if [ ${#BRANCH_NAME} -gt 50 ]; then
|
||||
echo ""
|
||||
echo "[git-flow] Branch name too long"
|
||||
echo ""
|
||||
echo "Branch: $BRANCH_NAME (${#BRANCH_NAME} chars)"
|
||||
echo "Maximum: 50 characters"
|
||||
echo ""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Valid branch name
|
||||
exit 0
|
||||
74
plugins/git-flow/hooks/commit-msg-check.sh
Executable file
74
plugins/git-flow/hooks/commit-msg-check.sh
Executable file
@@ -0,0 +1,74 @@
|
||||
#!/bin/bash
|
||||
# git-flow commit message validation hook
|
||||
# Validates git commit messages follow conventional commit format
|
||||
# PreToolUse hook for Bash commands - type: command
|
||||
|
||||
# Read tool input from stdin
|
||||
INPUT=$(cat)
|
||||
|
||||
# Use Python to properly parse JSON and extract the command
|
||||
COMMAND=$(echo "$INPUT" | python3 -c "import sys,json; d=json.load(sys.stdin); print(d.get('command',''))" 2>/dev/null)
|
||||
|
||||
# If no command or python failed, allow through
|
||||
if [ -z "$COMMAND" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check if it is a git commit command with -m flag
|
||||
if ! echo "$COMMAND" | grep -qE 'git\s+commit.*-m'; then
|
||||
# Not a git commit with -m, allow through
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Extract commit message - handle various quoting styles
|
||||
# Try double quotes first
|
||||
COMMIT_MSG=$(echo "$COMMAND" | sed -n 's/.*-m[[:space:]]*"\([^"]*\)".*/\1/p')
|
||||
# If empty, try single quotes
|
||||
if [ -z "$COMMIT_MSG" ]; then
|
||||
COMMIT_MSG=$(echo "$COMMAND" | sed -n "s/.*-m[[:space:]]*'\\([^']*\\)'.*/\\1/p")
|
||||
fi
|
||||
# If still empty, try HEREDOC pattern
|
||||
if [ -z "$COMMIT_MSG" ]; then
|
||||
if echo "$COMMAND" | grep -qE -- '-m[[:space:]]+"\$\(cat <<'; then
|
||||
# HEREDOC pattern - too complex to parse, allow through
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# If no message extracted, allow through
|
||||
if [ -z "$COMMIT_MSG" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Validate conventional commit format
|
||||
# Format: <type>(<scope>): <description>
|
||||
# or: <type>: <description>
|
||||
# Valid types: feat, fix, docs, style, refactor, perf, test, chore, build, ci
|
||||
|
||||
VALID_TYPES="feat|fix|docs|style|refactor|perf|test|chore|build|ci"
|
||||
|
||||
# Check if message matches conventional commit format
|
||||
if echo "$COMMIT_MSG" | grep -qE "^($VALID_TYPES)(\([a-zA-Z0-9_-]+\))?:[[:space:]]+.+"; then
|
||||
# Valid format
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Invalid format - output warning
|
||||
echo "[git-flow] WARNING: Commit message does not follow conventional commit format"
|
||||
echo ""
|
||||
echo "Expected format: <type>(<scope>): <description>"
|
||||
echo " or: <type>: <description>"
|
||||
echo ""
|
||||
echo "Valid types: feat, fix, docs, style, refactor, perf, test, chore, build, ci"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " feat(auth): add password reset functionality"
|
||||
echo " fix: resolve login timeout issue"
|
||||
echo " docs(readme): update installation instructions"
|
||||
echo ""
|
||||
echo "Your message: $COMMIT_MSG"
|
||||
echo ""
|
||||
echo "To proceed anyway, use /commit command which auto-generates valid messages."
|
||||
|
||||
# Exit with non-zero to block
|
||||
exit 1
|
||||
19
plugins/git-flow/hooks/hooks.json
Normal file
19
plugins/git-flow/hooks/hooks.json
Normal file
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"hooks": {
|
||||
"PreToolUse": [
|
||||
{
|
||||
"matcher": "Bash",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/hooks/branch-check.sh"
|
||||
},
|
||||
{
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/hooks/commit-msg-check.sh"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,12 +1,8 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"gitea": {
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/mcp-servers/gitea/.venv/bin/python",
|
||||
"args": ["-m", "mcp_server.server"],
|
||||
"cwd": "${CLAUDE_PLUGIN_ROOT}/mcp-servers/gitea",
|
||||
"env": {
|
||||
"PYTHONPATH": "${CLAUDE_PLUGIN_ROOT}/mcp-servers/gitea"
|
||||
}
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/mcp-servers/gitea/run.sh",
|
||||
"args": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,12 +1,8 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"gitea": {
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/mcp-servers/gitea/.venv/bin/python",
|
||||
"args": ["-m", "mcp_server.server"],
|
||||
"cwd": "${CLAUDE_PLUGIN_ROOT}/mcp-servers/gitea",
|
||||
"env": {
|
||||
"PYTHONPATH": "${CLAUDE_PLUGIN_ROOT}/mcp-servers/gitea"
|
||||
}
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/mcp-servers/gitea/run.sh",
|
||||
"args": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -278,6 +278,17 @@ Investigate diagnostic issues and propose fixes with human approval.
|
||||
|
||||
**When to use:** In the marketplace repo, to investigate and fix issues reported by `/debug-report`.
|
||||
|
||||
### `/suggest-version`
|
||||
Analyze CHANGELOG and recommend semantic version bump.
|
||||
|
||||
**What it does:**
|
||||
- Reads CHANGELOG.md `[Unreleased]` section
|
||||
- Analyzes changes to determine bump type (major/minor/patch)
|
||||
- Applies SemVer rules: breaking changes → major, features → minor, fixes → patch
|
||||
- Returns recommended version with rationale
|
||||
|
||||
**When to use:** Before creating a release to determine the appropriate version number.
|
||||
|
||||
## Code Quality Commands
|
||||
|
||||
The `/review` and `/test-check` commands complement the Executor agent by catching issues before work is marked complete. Run both commands before `/sprint-close` for a complete quality check.
|
||||
|
||||
@@ -108,7 +108,127 @@ git branch --show-current
|
||||
|
||||
## Your Responsibilities
|
||||
|
||||
### 1. Implement Features Following Specs
|
||||
### 1. Status Reporting (Structured Progress)
|
||||
|
||||
**CRITICAL: Post structured progress comments for visibility.**
|
||||
|
||||
**Standard Progress Comment Format:**
|
||||
```markdown
|
||||
## Progress Update
|
||||
**Status:** In Progress | Blocked | Failed
|
||||
**Phase:** [current phase name]
|
||||
**Tool Calls:** X (budget: Y)
|
||||
|
||||
### Completed
|
||||
- [x] Step 1
|
||||
- [x] Step 2
|
||||
|
||||
### In Progress
|
||||
- [ ] Current step (estimated: Z more calls)
|
||||
|
||||
### Blockers
|
||||
- None | [blocker description]
|
||||
|
||||
### Next
|
||||
- What happens after current step
|
||||
```
|
||||
|
||||
**When to Post Progress Comments:**
|
||||
- **Immediately on starting** - Post initial status
|
||||
- **Every 20-30 tool calls** - Show progress
|
||||
- **On phase transitions** - Moving from implementation to testing
|
||||
- **When blocked or encountering errors**
|
||||
- **Before budget limit** - If approaching turn limit
|
||||
|
||||
**Starting Work Example:**
|
||||
```
|
||||
add_comment(
|
||||
issue_number=45,
|
||||
body="""## Progress Update
|
||||
**Status:** In Progress
|
||||
**Phase:** Starting
|
||||
**Tool Calls:** 5 (budget: 100)
|
||||
|
||||
### Completed
|
||||
- [x] Read issue and acceptance criteria
|
||||
- [x] Created feature branch feat/45-jwt-service
|
||||
|
||||
### In Progress
|
||||
- [ ] Implementing JWT service
|
||||
|
||||
### Blockers
|
||||
- None
|
||||
|
||||
### Next
|
||||
- Create auth/jwt_service.py
|
||||
- Implement core token functions
|
||||
"""
|
||||
)
|
||||
```
|
||||
|
||||
**Blocked Example:**
|
||||
```
|
||||
add_comment(
|
||||
issue_number=45,
|
||||
body="""## Progress Update
|
||||
**Status:** Blocked
|
||||
**Phase:** Testing
|
||||
**Tool Calls:** 67 (budget: 100)
|
||||
|
||||
### Completed
|
||||
- [x] Implemented jwt_service.py
|
||||
- [x] Wrote unit tests
|
||||
|
||||
### In Progress
|
||||
- [ ] Running tests - BLOCKED
|
||||
|
||||
### Blockers
|
||||
- Missing PyJWT dependency in requirements.txt
|
||||
- Need orchestrator to add dependency
|
||||
|
||||
### Next
|
||||
- Resume after blocker resolved
|
||||
"""
|
||||
)
|
||||
```
|
||||
|
||||
**Failed Example:**
|
||||
```
|
||||
add_comment(
|
||||
issue_number=45,
|
||||
body="""## Progress Update
|
||||
**Status:** Failed
|
||||
**Phase:** Implementation
|
||||
**Tool Calls:** 89 (budget: 100)
|
||||
|
||||
### Completed
|
||||
- [x] Created jwt_service.py
|
||||
- [x] Implemented generate_token()
|
||||
|
||||
### In Progress
|
||||
- [ ] verify_token() - FAILED
|
||||
|
||||
### Blockers
|
||||
- Critical: Cannot decode tokens - algorithm mismatch
|
||||
- Attempted: HS256, HS384, RS256
|
||||
- Error: InvalidSignatureError consistently
|
||||
|
||||
### Next
|
||||
- Needs human investigation
|
||||
- Possible issue with secret key encoding
|
||||
"""
|
||||
)
|
||||
```
|
||||
|
||||
**NEVER report "completed" unless:**
|
||||
- All acceptance criteria are met
|
||||
- Tests pass
|
||||
- Code is committed and pushed
|
||||
- No unresolved errors
|
||||
|
||||
**If you cannot complete, report failure honestly.** The orchestrator needs accurate status to coordinate effectively.
|
||||
|
||||
### 2. Implement Features Following Specs
|
||||
|
||||
**You receive:**
|
||||
- Issue number and description
|
||||
@@ -122,7 +242,7 @@ git branch --show-current
|
||||
- Proper error handling
|
||||
- Edge case coverage
|
||||
|
||||
### 2. Follow Best Practices
|
||||
### 3. Follow Best Practices
|
||||
|
||||
**Code Quality Standards:**
|
||||
|
||||
@@ -150,7 +270,7 @@ git branch --show-current
|
||||
- Handle errors gracefully
|
||||
- Follow OWASP guidelines
|
||||
|
||||
### 3. Handle Edge Cases
|
||||
### 4. Handle Edge Cases
|
||||
|
||||
Always consider:
|
||||
- What if input is None/null/undefined?
|
||||
@@ -160,7 +280,7 @@ Always consider:
|
||||
- What if user doesn't have permission?
|
||||
- What if resource doesn't exist?
|
||||
|
||||
### 4. Apply Lessons Learned
|
||||
### 5. Apply Lessons Learned
|
||||
|
||||
Reference relevant lessons in your implementation:
|
||||
|
||||
@@ -179,7 +299,7 @@ def test_verify_expired_token(jwt_service):
|
||||
...
|
||||
```
|
||||
|
||||
### 5. Create Merge Requests (When Branch Protected)
|
||||
### 6. Create Merge Requests (When Branch Protected)
|
||||
|
||||
**MR Body Template - NO SUBTASKS:**
|
||||
|
||||
@@ -208,7 +328,7 @@ Closes #45
|
||||
|
||||
The issue already tracks subtasks. MR body should be summary only.
|
||||
|
||||
### 6. Auto-Close Issues via Commit Messages
|
||||
### 7. Auto-Close Issues via Commit Messages
|
||||
|
||||
**Always include closing keywords in commits:**
|
||||
|
||||
@@ -229,7 +349,7 @@ Closes #45"
|
||||
|
||||
This ensures issues auto-close when MR is merged.
|
||||
|
||||
### 7. Generate Completion Reports
|
||||
### 8. Generate Completion Reports
|
||||
|
||||
After implementation, provide a concise completion report:
|
||||
|
||||
@@ -304,18 +424,185 @@ As the executor, you interact with MCP tools for status updates:
|
||||
- Apply best practices
|
||||
- Deliver quality work
|
||||
|
||||
## Checkpointing (Save Progress for Resume)
|
||||
|
||||
**CRITICAL: Save checkpoints so work can be resumed if interrupted.**
|
||||
|
||||
**Checkpoint Comment Format:**
|
||||
```markdown
|
||||
## Checkpoint
|
||||
**Branch:** feat/45-jwt-service
|
||||
**Commit:** abc123 (or "uncommitted")
|
||||
**Phase:** [current phase]
|
||||
**Tool Calls:** 45
|
||||
|
||||
### Files Modified
|
||||
- auth/jwt_service.py (created)
|
||||
- tests/test_jwt.py (created)
|
||||
|
||||
### Completed Steps
|
||||
- [x] Created jwt_service.py skeleton
|
||||
- [x] Implemented generate_token()
|
||||
- [x] Implemented verify_token()
|
||||
|
||||
### Pending Steps
|
||||
- [ ] Write unit tests
|
||||
- [ ] Add token refresh logic
|
||||
- [ ] Commit and push
|
||||
|
||||
### State Notes
|
||||
[Any important context for resumption]
|
||||
```
|
||||
|
||||
**When to Save Checkpoints:**
|
||||
- After completing each major step (every 20-30 tool calls)
|
||||
- Before stopping due to budget limit
|
||||
- When encountering a blocker
|
||||
- After any commit
|
||||
|
||||
**Checkpoint Example:**
|
||||
```
|
||||
add_comment(
|
||||
issue_number=45,
|
||||
body="""## Checkpoint
|
||||
**Branch:** feat/45-jwt-service
|
||||
**Commit:** uncommitted (changes staged)
|
||||
**Phase:** Testing
|
||||
**Tool Calls:** 67
|
||||
|
||||
### Files Modified
|
||||
- auth/jwt_service.py (created, 120 lines)
|
||||
- auth/__init__.py (modified, added import)
|
||||
- tests/test_jwt.py (created, 50 lines, incomplete)
|
||||
|
||||
### Completed Steps
|
||||
- [x] Created auth/jwt_service.py
|
||||
- [x] Implemented generate_token() with HS256
|
||||
- [x] Implemented verify_token()
|
||||
- [x] Updated auth/__init__.py exports
|
||||
|
||||
### Pending Steps
|
||||
- [ ] Complete test_jwt.py (5 tests remaining)
|
||||
- [ ] Add token refresh logic
|
||||
- [ ] Commit changes
|
||||
- [ ] Push to remote
|
||||
|
||||
### State Notes
|
||||
- Using PyJWT 2.8.0
|
||||
- Secret key from JWT_SECRET env var
|
||||
- Tests use pytest fixtures in conftest.py
|
||||
"""
|
||||
)
|
||||
```
|
||||
|
||||
**Checkpoint on Interruption:**
|
||||
|
||||
If you must stop (budget, failure, blocker), ALWAYS post a checkpoint FIRST.
|
||||
|
||||
## Runaway Detection (Self-Monitoring)
|
||||
|
||||
**CRITICAL: Monitor yourself to prevent infinite loops and wasted resources.**
|
||||
|
||||
**Self-Monitoring Checkpoints:**
|
||||
|
||||
| Trigger | Action |
|
||||
|---------|--------|
|
||||
| 10+ tool calls without progress | STOP - Post progress update, reassess approach |
|
||||
| Same error 3+ times | CIRCUIT BREAKER - Stop, report failure with error pattern |
|
||||
| 50+ tool calls total | POST progress update (mandatory) |
|
||||
| 80+ tool calls total | WARN - Approaching budget, evaluate if completion is realistic |
|
||||
| 100+ tool calls total | STOP - Save state, report incomplete with checkpoint |
|
||||
|
||||
**What Counts as "Progress":**
|
||||
- File created or modified
|
||||
- Test passing that wasn't before
|
||||
- New functionality working
|
||||
- Moving to next phase of work
|
||||
|
||||
**What Does NOT Count as Progress:**
|
||||
- Reading more files
|
||||
- Searching for something
|
||||
- Retrying the same operation
|
||||
- Adding logging/debugging
|
||||
|
||||
**Circuit Breaker Protocol:**
|
||||
|
||||
If you encounter the same error 3+ times:
|
||||
```
|
||||
add_comment(
|
||||
issue_number=45,
|
||||
body="""## Progress Update
|
||||
**Status:** Failed (Circuit Breaker)
|
||||
**Phase:** [phase when stopped]
|
||||
**Tool Calls:** 67 (budget: 100)
|
||||
|
||||
### Circuit Breaker Triggered
|
||||
Same error occurred 3+ times:
|
||||
```
|
||||
[error message]
|
||||
```
|
||||
|
||||
### What Was Tried
|
||||
1. [first attempt]
|
||||
2. [second attempt]
|
||||
3. [third attempt]
|
||||
|
||||
### Recommendation
|
||||
[What human should investigate]
|
||||
|
||||
### Files Modified
|
||||
- [list any files changed before failure]
|
||||
"""
|
||||
)
|
||||
```
|
||||
|
||||
**Budget Approaching Protocol:**
|
||||
|
||||
At 80+ tool calls, post an update:
|
||||
```
|
||||
add_comment(
|
||||
issue_number=45,
|
||||
body="""## Progress Update
|
||||
**Status:** In Progress (Budget Warning)
|
||||
**Phase:** [current phase]
|
||||
**Tool Calls:** 82 (budget: 100)
|
||||
|
||||
### Completed
|
||||
- [x] [completed steps]
|
||||
|
||||
### Remaining
|
||||
- [ ] [what's left]
|
||||
|
||||
### Assessment
|
||||
[Realistic? Should I continue or stop and checkpoint?]
|
||||
"""
|
||||
)
|
||||
```
|
||||
|
||||
**Hard Stop at 100 Calls:**
|
||||
|
||||
If you reach 100 tool calls:
|
||||
1. STOP immediately
|
||||
2. Save current state
|
||||
3. Post checkpoint comment
|
||||
4. Report as incomplete (not failed)
|
||||
|
||||
## Critical Reminders
|
||||
|
||||
1. **Never use CLI tools** - Use MCP tools exclusively for Gitea
|
||||
2. **Branch naming** - Always use `feat/`, `fix/`, or `debug/` prefix with issue number
|
||||
3. **Branch check FIRST** - Never implement on staging/production
|
||||
4. **Follow specs precisely** - Respect architectural decisions
|
||||
5. **Apply lessons learned** - Reference in code and tests
|
||||
6. **Write tests** - Cover edge cases, not just happy path
|
||||
7. **Clean code** - Readable, maintainable, documented
|
||||
8. **No MR subtasks** - MR body should NOT have checklists
|
||||
9. **Use closing keywords** - `Closes #XX` in commit messages
|
||||
10. **Report thoroughly** - Complete summary when done
|
||||
2. **Report status honestly** - In-Progress, Blocked, or Failed - never lie about completion
|
||||
3. **Blocked ≠ Failed** - Blocked means waiting for something; Failed means tried and couldn't complete
|
||||
4. **Self-monitor** - Watch for runaway patterns, trigger circuit breaker when stuck
|
||||
5. **Branch naming** - Always use `feat/`, `fix/`, or `debug/` prefix with issue number
|
||||
6. **Branch check FIRST** - Never implement on staging/production
|
||||
7. **Follow specs precisely** - Respect architectural decisions
|
||||
8. **Apply lessons learned** - Reference in code and tests
|
||||
9. **Write tests** - Cover edge cases, not just happy path
|
||||
10. **Clean code** - Readable, maintainable, documented
|
||||
11. **No MR subtasks** - MR body should NOT have checklists
|
||||
12. **Use closing keywords** - `Closes #XX` in commit messages
|
||||
13. **Report thoroughly** - Complete summary when done, including honest status
|
||||
14. **Hard stop at 100 calls** - Save checkpoint and report incomplete
|
||||
|
||||
## Your Mission
|
||||
|
||||
|
||||
@@ -57,9 +57,42 @@ curl -X POST "https://gitea.../api/..."
|
||||
- Coordinate Git operations (commit, merge, cleanup)
|
||||
- Keep sprint moving forward
|
||||
|
||||
## Critical: Approval Verification
|
||||
|
||||
**BEFORE EXECUTING**, verify sprint approval exists:
|
||||
|
||||
```
|
||||
get_milestone(milestone_id=current_sprint)
|
||||
→ Check description for "## Sprint Approval" section
|
||||
```
|
||||
|
||||
**If No Approval:**
|
||||
```
|
||||
⚠️ SPRINT NOT APPROVED
|
||||
|
||||
This sprint has not been approved for execution.
|
||||
Please run /sprint-plan to approve the sprint first.
|
||||
```
|
||||
|
||||
**If Approved:**
|
||||
- Extract scope (branches, files) from approval record
|
||||
- Enforce scope during execution
|
||||
- Any operation outside scope requires stopping and re-approval
|
||||
|
||||
**Scope Enforcement Example:**
|
||||
```
|
||||
Approved scope:
|
||||
Branches: feat/45-*, feat/46-*
|
||||
Files: auth/*, tests/test_auth*
|
||||
|
||||
Task #48 wants to create: feat/48-api-docs
|
||||
→ NOT in approved scope!
|
||||
→ STOP and ask user to approve expanded scope
|
||||
```
|
||||
|
||||
## Critical: Branch Detection
|
||||
|
||||
**BEFORE DOING ANYTHING**, check the current git branch:
|
||||
**AFTER approval verification**, check the current git branch:
|
||||
|
||||
```bash
|
||||
git branch --show-current
|
||||
@@ -93,7 +126,44 @@ git branch --show-current
|
||||
|
||||
**Workflow:**
|
||||
|
||||
**A. Fetch Sprint Issues**
|
||||
**A. Fetch Sprint Issues and Detect Checkpoints**
|
||||
```
|
||||
list_issues(state="open", labels=["sprint-current"])
|
||||
```
|
||||
|
||||
**For each open issue, check for checkpoint comments:**
|
||||
```
|
||||
get_issue(issue_number=45) # Comments included
|
||||
→ Look for comments containing "## Checkpoint"
|
||||
```
|
||||
|
||||
**If Checkpoint Found:**
|
||||
```
|
||||
Checkpoint Detected for #45
|
||||
|
||||
Found checkpoint from previous session:
|
||||
Branch: feat/45-jwt-service
|
||||
Phase: Testing
|
||||
Tool Calls: 67
|
||||
Files Modified: 3
|
||||
Completed: 4/7 steps
|
||||
|
||||
Options:
|
||||
1. Resume from checkpoint (recommended)
|
||||
2. Start fresh (discard previous work)
|
||||
3. Review checkpoint details first
|
||||
|
||||
Would you like to resume?
|
||||
```
|
||||
|
||||
**Resume Protocol:**
|
||||
1. Verify branch exists: `git branch -a | grep feat/45-jwt-service`
|
||||
2. Switch to branch: `git checkout feat/45-jwt-service`
|
||||
3. Verify files match checkpoint
|
||||
4. Dispatch executor with checkpoint context
|
||||
5. Executor continues from pending steps
|
||||
|
||||
**B. Fetch Sprint Issues (Standard)**
|
||||
```
|
||||
list_issues(state="open", labels=["sprint-current"])
|
||||
```
|
||||
@@ -147,11 +217,56 @@ Relevant Lessons:
|
||||
Ready to start? I can dispatch multiple tasks in parallel.
|
||||
```
|
||||
|
||||
### 2. Parallel Task Dispatch
|
||||
### 2. File Conflict Prevention (Pre-Dispatch)
|
||||
|
||||
**When starting execution:**
|
||||
**BEFORE dispatching parallel agents, analyze file overlap.**
|
||||
|
||||
For independent tasks (same batch), spawn multiple Executor agents in parallel:
|
||||
**Conflict Detection Workflow:**
|
||||
|
||||
1. **Read each issue's checklist/body** to identify target files
|
||||
2. **Build file map** for all tasks in the batch
|
||||
3. **Check for overlap** - Same file in multiple tasks?
|
||||
4. **Sequentialize conflicts** - Don't parallelize if same file
|
||||
|
||||
**Example Analysis:**
|
||||
```
|
||||
Analyzing Batch 1 for conflicts:
|
||||
|
||||
#45 - Implement JWT service
|
||||
→ auth/jwt_service.py, auth/__init__.py, tests/test_jwt.py
|
||||
|
||||
#48 - Update API documentation
|
||||
→ docs/api.md, README.md
|
||||
|
||||
Overlap check: NONE
|
||||
Decision: Safe to parallelize ✅
|
||||
```
|
||||
|
||||
**If Conflict Detected:**
|
||||
```
|
||||
Analyzing Batch 2 for conflicts:
|
||||
|
||||
#46 - Build login endpoint
|
||||
→ api/routes/auth.py, auth/__init__.py
|
||||
|
||||
#49 - Add auth tests
|
||||
→ tests/test_auth.py, auth/__init__.py
|
||||
|
||||
Overlap: auth/__init__.py ⚠️
|
||||
Decision: Sequentialize - run #46 first, then #49
|
||||
```
|
||||
|
||||
**Conflict Resolution:**
|
||||
- Same file → MUST sequentialize
|
||||
- Same directory → Usually safe, review file names
|
||||
- Shared config → Sequentialize
|
||||
- Shared test fixture → Assign different fixture files or sequentialize
|
||||
|
||||
### 3. Parallel Task Dispatch
|
||||
|
||||
**After conflict check passes, dispatch parallel agents:**
|
||||
|
||||
For independent tasks (same batch) WITH NO FILE CONFLICTS, spawn multiple Executor agents in parallel:
|
||||
|
||||
```
|
||||
Dispatching Batch 1 (2 tasks in parallel):
|
||||
@@ -167,6 +282,14 @@ Task 2: #48 - Update API documentation
|
||||
Both tasks running in parallel. I'll monitor progress.
|
||||
```
|
||||
|
||||
**Branch Isolation:** Each task MUST have its own branch. Never have two agents work on the same branch.
|
||||
|
||||
**Sequential Merge Protocol:**
|
||||
1. Wait for task to complete
|
||||
2. Merge its branch to development
|
||||
3. Then merge next completed task
|
||||
4. Never merge simultaneously
|
||||
|
||||
**Branch Naming Convention (MANDATORY):**
|
||||
- Features: `feat/<issue-number>-<short-description>`
|
||||
- Bug fixes: `fix/<issue-number>-<short-description>`
|
||||
@@ -177,7 +300,7 @@ Both tasks running in parallel. I'll monitor progress.
|
||||
- `fix/46-login-timeout`
|
||||
- `debug/47-investigate-memory-leak`
|
||||
|
||||
### 3. Generate Lean Execution Prompts
|
||||
### 4. Generate Lean Execution Prompts
|
||||
|
||||
**NOT THIS (too verbose):**
|
||||
```
|
||||
@@ -222,11 +345,127 @@ Dependencies: None (can start immediately)
|
||||
Ready to start? Say "yes" and I'll monitor progress.
|
||||
```
|
||||
|
||||
### 4. Progress Tracking
|
||||
### 5. Status Label Management
|
||||
|
||||
**Monitor and Update:**
|
||||
**CRITICAL: Use Status labels to communicate issue state accurately.**
|
||||
|
||||
**Add Progress Comments:**
|
||||
**When dispatching a task:**
|
||||
```
|
||||
update_issue(
|
||||
issue_number=45,
|
||||
labels=["Status/In-Progress", ...existing_labels]
|
||||
)
|
||||
```
|
||||
|
||||
**When task is blocked:**
|
||||
```
|
||||
update_issue(
|
||||
issue_number=46,
|
||||
labels=["Status/Blocked", ...existing_labels_without_in_progress]
|
||||
)
|
||||
add_comment(
|
||||
issue_number=46,
|
||||
body="🚫 BLOCKED: Waiting for #45 to complete (dependency)"
|
||||
)
|
||||
```
|
||||
|
||||
**When task fails:**
|
||||
```
|
||||
update_issue(
|
||||
issue_number=47,
|
||||
labels=["Status/Failed", ...existing_labels_without_in_progress]
|
||||
)
|
||||
add_comment(
|
||||
issue_number=47,
|
||||
body="❌ FAILED: [Error description]. Needs investigation."
|
||||
)
|
||||
```
|
||||
|
||||
**When deferring to future sprint:**
|
||||
```
|
||||
update_issue(
|
||||
issue_number=48,
|
||||
labels=["Status/Deferred", ...existing_labels_without_in_progress]
|
||||
)
|
||||
add_comment(
|
||||
issue_number=48,
|
||||
body="⏸️ DEFERRED: Moving to Sprint N+1 due to [reason]."
|
||||
)
|
||||
```
|
||||
|
||||
**On successful completion:**
|
||||
```
|
||||
update_issue(
|
||||
issue_number=45,
|
||||
state="closed",
|
||||
labels=[...existing_labels_without_status] # Remove all Status/* labels
|
||||
)
|
||||
```
|
||||
|
||||
**Status Label Rules:**
|
||||
- Only ONE Status label at a time (In-Progress, Blocked, Failed, or Deferred)
|
||||
- Remove Status labels when closing successfully
|
||||
- Always add comment explaining status changes
|
||||
|
||||
### 6. Progress Tracking (Structured Comments)
|
||||
|
||||
**CRITICAL: Use structured progress comments for visibility.**
|
||||
|
||||
**Standard Progress Comment Format:**
|
||||
```markdown
|
||||
## Progress Update
|
||||
**Status:** In Progress | Blocked | Failed
|
||||
**Phase:** [current phase name]
|
||||
**Tool Calls:** X (budget: Y)
|
||||
|
||||
### Completed
|
||||
- [x] Step 1
|
||||
- [x] Step 2
|
||||
|
||||
### In Progress
|
||||
- [ ] Current step (estimated: Z more calls)
|
||||
|
||||
### Blockers
|
||||
- None | [blocker description]
|
||||
|
||||
### Next
|
||||
- What happens after current step
|
||||
```
|
||||
|
||||
**Example Progress Comment:**
|
||||
```
|
||||
add_comment(
|
||||
issue_number=45,
|
||||
body="""## Progress Update
|
||||
**Status:** In Progress
|
||||
**Phase:** Implementation
|
||||
**Tool Calls:** 45 (budget: 100)
|
||||
|
||||
### Completed
|
||||
- [x] Created auth/jwt_service.py
|
||||
- [x] Implemented generate_token()
|
||||
- [x] Implemented verify_token()
|
||||
|
||||
### In Progress
|
||||
- [ ] Writing unit tests (estimated: 20 more calls)
|
||||
|
||||
### Blockers
|
||||
- None
|
||||
|
||||
### Next
|
||||
- Run tests and fix any failures
|
||||
- Commit and push
|
||||
"""
|
||||
)
|
||||
```
|
||||
|
||||
**When to Post Progress Comments:**
|
||||
- After completing each major phase (every 20-30 tool calls)
|
||||
- When status changes (blocked, failed)
|
||||
- When encountering unexpected issues
|
||||
- Before approaching tool call budget limit
|
||||
|
||||
**Simple progress updates (for minor milestones):**
|
||||
```
|
||||
add_comment(
|
||||
issue_number=45,
|
||||
@@ -264,7 +503,7 @@ add_comment(
|
||||
- Notify that new tasks are ready for execution
|
||||
- Update the execution queue
|
||||
|
||||
### 5. Monitor Parallel Execution
|
||||
### 7. Monitor Parallel Execution
|
||||
|
||||
**Track multiple running tasks:**
|
||||
```
|
||||
@@ -282,7 +521,7 @@ Batch 2 (now unblocked):
|
||||
Starting #46 while #48 continues...
|
||||
```
|
||||
|
||||
### 6. Branch Protection Detection
|
||||
### 8. Branch Protection Detection
|
||||
|
||||
Before merging, check if development branch is protected:
|
||||
|
||||
@@ -312,7 +551,7 @@ Closes #45
|
||||
|
||||
**NEVER include subtask checklists in MR body.** The issue already has them.
|
||||
|
||||
### 7. Sprint Close - Capture Lessons Learned
|
||||
### 9. Sprint Close - Capture Lessons Learned
|
||||
|
||||
**Invoked by:** `/sprint-close`
|
||||
|
||||
@@ -564,6 +803,64 @@ Would you like me to handle git operations?
|
||||
- Document blockers promptly
|
||||
- Never let tasks slip through
|
||||
|
||||
## Runaway Detection (Monitoring Dispatched Agents)
|
||||
|
||||
**Monitor dispatched agents for runaway behavior:**
|
||||
|
||||
**Warning Signs:**
|
||||
- Agent running 30+ minutes with no progress comment
|
||||
- Progress comment shows "same phase" for 20+ tool calls
|
||||
- Error patterns repeating in progress comments
|
||||
|
||||
**Intervention Protocol:**
|
||||
|
||||
When you detect an agent may be stuck:
|
||||
|
||||
1. **Read latest progress comment** - Check tool call count and phase
|
||||
2. **If no progress in 20+ calls** - Consider stopping the agent
|
||||
3. **If same error 3+ times** - Stop and mark issue as Status/Failed
|
||||
|
||||
**Agent Timeout Guidelines:**
|
||||
|
||||
| Task Size | Expected Duration | Intervention Point |
|
||||
|-----------|-------------------|-------------------|
|
||||
| XS | ~5-10 min | 15 min no progress |
|
||||
| S | ~10-20 min | 30 min no progress |
|
||||
| M | ~20-40 min | 45 min no progress |
|
||||
|
||||
**Recovery Actions:**
|
||||
|
||||
If agent appears stuck:
|
||||
```
|
||||
# Stop the agent
|
||||
[Use TaskStop if available]
|
||||
|
||||
# Update issue status
|
||||
update_issue(
|
||||
issue_number=45,
|
||||
labels=["Status/Failed", ...other_labels]
|
||||
)
|
||||
|
||||
# Add explanation comment
|
||||
add_comment(
|
||||
issue_number=45,
|
||||
body="""## Agent Intervention
|
||||
**Reason:** No progress detected for [X] minutes / [Y] tool calls
|
||||
**Last Status:** [from progress comment]
|
||||
**Action:** Stopped agent, requires human review
|
||||
|
||||
### What Was Completed
|
||||
[from progress comment]
|
||||
|
||||
### What Remains
|
||||
[from progress comment]
|
||||
|
||||
### Recommendation
|
||||
[Manual completion / Different approach / Break down further]
|
||||
"""
|
||||
)
|
||||
```
|
||||
|
||||
## Critical Reminders
|
||||
|
||||
1. **Never use CLI tools** - Use MCP tools exclusively for Gitea
|
||||
@@ -572,14 +869,18 @@ Would you like me to handle git operations?
|
||||
4. **Parallel dispatch** - Run independent tasks simultaneously
|
||||
5. **Lean prompts** - Brief, actionable, not verbose documents
|
||||
6. **Branch naming** - `feat/`, `fix/`, `debug/` prefixes required
|
||||
7. **No MR subtasks** - MR body should NOT have checklists
|
||||
8. **Auto-check subtasks** - Mark issue subtasks complete on close
|
||||
9. **Track meticulously** - Update issues immediately, document blockers
|
||||
10. **Capture lessons** - At sprint close, interview thoroughly
|
||||
11. **Update wiki status** - At sprint close, update implementation and proposal pages
|
||||
12. **Link lessons to wiki** - Include lesson links in implementation completion summary
|
||||
13. **Update CHANGELOG** - MANDATORY at sprint close, never skip
|
||||
14. **Run suggest-version** - Check if release is needed after CHANGELOG update
|
||||
7. **Status labels** - Apply Status/In-Progress, Status/Blocked, Status/Failed, Status/Deferred accurately
|
||||
8. **One status at a time** - Remove old Status/* label before applying new one
|
||||
9. **Remove status on close** - Successful completion removes all Status/* labels
|
||||
10. **Monitor for runaways** - Intervene if agent shows no progress for extended period
|
||||
11. **No MR subtasks** - MR body should NOT have checklists
|
||||
12. **Auto-check subtasks** - Mark issue subtasks complete on close
|
||||
13. **Track meticulously** - Update issues immediately, document blockers
|
||||
14. **Capture lessons** - At sprint close, interview thoroughly
|
||||
15. **Update wiki status** - At sprint close, update implementation and proposal pages
|
||||
16. **Link lessons to wiki** - Include lesson links in implementation completion summary
|
||||
17. **Update CHANGELOG** - MANDATORY at sprint close, never skip
|
||||
18. **Run suggest-version** - Check if release is needed after CHANGELOG update
|
||||
|
||||
## Your Mission
|
||||
|
||||
|
||||
@@ -310,14 +310,55 @@ Think through the technical approach:
|
||||
- `[Sprint 17] fix: Resolve login timeout issue`
|
||||
- `[Sprint 18] refactor: Extract authentication module`
|
||||
|
||||
**Task Granularity Guidelines:**
|
||||
| Size | Scope | Example |
|
||||
|------|-------|---------|
|
||||
| **Small** | 1-2 hours, single file/component | Add validation to one field |
|
||||
| **Medium** | Half day, multiple files, one feature | Implement new API endpoint |
|
||||
| **Large** | Should be broken down | Full authentication system |
|
||||
**Task Sizing Rules (MANDATORY):**
|
||||
|
||||
**If a task is too large, break it down into smaller tasks.**
|
||||
| Effort | Files | Checklist Items | Max Tool Calls | Agent Scope |
|
||||
|--------|-------|-----------------|----------------|-------------|
|
||||
| **XS** | 1 file | 0-2 items | ~30 | Single function/fix |
|
||||
| **S** | 1 file | 2-4 items | ~50 | Single file feature |
|
||||
| **M** | 2-3 files | 4-6 items | ~80 | Multi-file feature |
|
||||
| **L** | MUST BREAK DOWN | - | - | Too large for one agent |
|
||||
| **XL** | MUST BREAK DOWN | - | - | Way too large |
|
||||
|
||||
**CRITICAL: L and XL tasks MUST be broken into subtasks.**
|
||||
|
||||
**Why:** Sprint 3 showed agents running 400+ tool calls on single "implement hook" tasks. This causes:
|
||||
- Long wait times (1+ hour per task)
|
||||
- No progress visibility
|
||||
- Resource exhaustion
|
||||
- Difficult debugging
|
||||
|
||||
**Task Scoping Checklist:**
|
||||
1. Can this be completed in one file? → XS or S
|
||||
2. Does it touch 2-3 files? → M (max)
|
||||
3. Does it touch 4+ files? → MUST break down
|
||||
4. Does it require complex decision-making? → MUST break down
|
||||
5. Would you estimate 50+ tool calls? → MUST break down
|
||||
|
||||
**Breaking Down Large Tasks:**
|
||||
|
||||
**BAD (L/XL - too broad):**
|
||||
```
|
||||
[Sprint 3] feat: Implement git-flow branch validation hook
|
||||
Labels: Efforts/L, ...
|
||||
```
|
||||
|
||||
**GOOD (broken into S/M tasks):**
|
||||
```
|
||||
[Sprint 3] feat: Create branch validation hook skeleton
|
||||
Labels: Efforts/S, ...
|
||||
|
||||
[Sprint 3] feat: Add prefix pattern validation (feat/, fix/, etc.)
|
||||
Labels: Efforts/S, ...
|
||||
|
||||
[Sprint 3] feat: Add issue number extraction and validation
|
||||
Labels: Efforts/S, ...
|
||||
|
||||
[Sprint 3] test: Add branch validation unit tests
|
||||
Labels: Efforts/S, ...
|
||||
```
|
||||
|
||||
**If a task is estimated L or XL, STOP and break it down before creating.**
|
||||
|
||||
**IMPORTANT: Include wiki implementation reference in issue body:**
|
||||
|
||||
@@ -479,5 +520,9 @@ Sprint 17 - User Authentication (Due: 2025-02-01)
|
||||
11. **Always use suggest_labels** - Don't guess labels
|
||||
12. **Always think through architecture** - Consider edge cases
|
||||
13. **Always cleanup local files** - Delete after migrating to wiki
|
||||
14. **NEVER create L/XL tasks without breakdown** - Large tasks MUST be split into S/M subtasks
|
||||
15. **Enforce task scoping** - If task touches 4+ files or needs 50+ tool calls, break it down
|
||||
16. **ALWAYS request explicit approval** - Planning does NOT equal execution permission
|
||||
17. **Record approval in milestone** - Sprint-start verifies approval before executing
|
||||
|
||||
You are the thoughtful planner who ensures sprints are well-prepared, architecturally sound, and learn from past experiences. Take your time, ask questions, and create comprehensive plans that set the team up for success.
|
||||
|
||||
@@ -136,6 +136,58 @@ The planner agent will:
|
||||
- Document dependency graph
|
||||
- Provide sprint overview with wiki links
|
||||
|
||||
11. **Request Sprint Approval**
|
||||
- Present approval request with scope summary
|
||||
- Capture explicit user approval
|
||||
- Record approval in milestone description
|
||||
- Approval scopes what sprint-start can execute
|
||||
|
||||
## Sprint Approval (MANDATORY)
|
||||
|
||||
**Planning DOES NOT equal execution permission.**
|
||||
|
||||
After creating issues, the planner MUST request explicit approval:
|
||||
|
||||
```
|
||||
Sprint 17 Planning Complete
|
||||
===========================
|
||||
|
||||
Created Issues:
|
||||
- #45: [Sprint 17] feat: JWT token generation
|
||||
- #46: [Sprint 17] feat: Login endpoint
|
||||
- #47: [Sprint 17] test: Auth tests
|
||||
|
||||
Execution Scope:
|
||||
- Branches: feat/45-*, feat/46-*, feat/47-*
|
||||
- Files: auth/*, api/routes/auth.py, tests/test_auth*
|
||||
- Dependencies: PyJWT, python-jose
|
||||
|
||||
⚠️ APPROVAL REQUIRED
|
||||
|
||||
Do you approve this sprint for execution?
|
||||
This grants permission for agents to:
|
||||
- Create and modify files in the listed scope
|
||||
- Create branches with the listed prefixes
|
||||
- Install listed dependencies
|
||||
|
||||
Type "approve sprint 17" to authorize execution.
|
||||
```
|
||||
|
||||
**On Approval:**
|
||||
1. Record approval in milestone description
|
||||
2. Note timestamp and scope
|
||||
3. Sprint-start will verify approval exists
|
||||
|
||||
**Approval Record Format:**
|
||||
```markdown
|
||||
## Sprint Approval
|
||||
**Approved:** 2026-01-28 14:30
|
||||
**Approver:** User
|
||||
**Scope:**
|
||||
- Branches: feat/45-*, feat/46-*, feat/47-*
|
||||
- Files: auth/*, api/routes/auth.py, tests/test_auth*
|
||||
```
|
||||
|
||||
## Issue Title Format (MANDATORY)
|
||||
|
||||
```
|
||||
@@ -155,15 +207,70 @@ The planner agent will:
|
||||
- `[Sprint 17] fix: Resolve login timeout issue`
|
||||
- `[Sprint 18] refactor: Extract authentication module`
|
||||
|
||||
## Task Granularity Guidelines
|
||||
## Task Sizing Rules (MANDATORY)
|
||||
|
||||
| Size | Scope | Example |
|
||||
|------|-------|---------|
|
||||
| **Small** | 1-2 hours, single file/component | Add validation to one field |
|
||||
| **Medium** | Half day, multiple files, one feature | Implement new API endpoint |
|
||||
| **Large** | Should be broken down | Full authentication system |
|
||||
**CRITICAL: Tasks sized L or XL MUST be broken down into smaller tasks.**
|
||||
|
||||
**If a task is too large, break it down into smaller tasks.**
|
||||
| Effort | Files | Checklist Items | Max Tool Calls | Agent Scope |
|
||||
|--------|-------|-----------------|----------------|-------------|
|
||||
| **XS** | 1 file | 0-2 items | ~30 | Single function/fix |
|
||||
| **S** | 1 file | 2-4 items | ~50 | Single file feature |
|
||||
| **M** | 2-3 files | 4-6 items | ~80 | Multi-file feature |
|
||||
| **L** | MUST BREAK DOWN | - | - | Too large for one agent |
|
||||
| **XL** | MUST BREAK DOWN | - | - | Way too large |
|
||||
|
||||
**Why This Matters:**
|
||||
- Agents running 400+ tool calls take 1+ hour, with no visibility
|
||||
- Large tasks lack clear completion criteria
|
||||
- Debugging failures is extremely difficult
|
||||
- Small tasks enable parallel execution
|
||||
|
||||
**Scoping Checklist:**
|
||||
1. Can this be completed in one file? → XS or S
|
||||
2. Does it touch 2-3 files? → M (maximum for single task)
|
||||
3. Does it touch 4+ files? → MUST break down
|
||||
4. Would you estimate 50+ tool calls? → MUST break down
|
||||
5. Does it require complex decision-making mid-task? → MUST break down
|
||||
|
||||
**Example Breakdown:**
|
||||
|
||||
**BAD (L - too broad):**
|
||||
```
|
||||
[Sprint 3] feat: Implement schema diff detection hook
|
||||
Labels: Efforts/L
|
||||
- Hook skeleton
|
||||
- Pattern detection for DROP/ALTER/RENAME
|
||||
- Warning output formatting
|
||||
- Integration with hooks.json
|
||||
```
|
||||
|
||||
**GOOD (broken into S tasks):**
|
||||
```
|
||||
[Sprint 3] feat: Create schema-diff-check.sh hook skeleton
|
||||
Labels: Efforts/S
|
||||
- [ ] Create hook file with standard header
|
||||
- [ ] Add file type detection for SQL/migrations
|
||||
- [ ] Exit 0 (non-blocking)
|
||||
|
||||
[Sprint 3] feat: Add DROP/ALTER pattern detection
|
||||
Labels: Efforts/S
|
||||
- [ ] Detect DROP COLUMN/TABLE/INDEX
|
||||
- [ ] Detect ALTER TYPE changes
|
||||
- [ ] Detect RENAME operations
|
||||
|
||||
[Sprint 3] feat: Add warning output formatting
|
||||
Labels: Efforts/S
|
||||
- [ ] Format breaking change warnings
|
||||
- [ ] Add hook prefix to output
|
||||
- [ ] Test output visibility
|
||||
|
||||
[Sprint 3] chore: Register hook in hooks.json
|
||||
Labels: Efforts/XS
|
||||
- [ ] Add PostToolUse:Edit hook entry
|
||||
- [ ] Test hook triggers on SQL edits
|
||||
```
|
||||
|
||||
**The planner MUST refuse to create L/XL tasks without breakdown.**
|
||||
|
||||
## MCP Tools Available
|
||||
|
||||
|
||||
@@ -6,6 +6,47 @@ description: Begin sprint execution with relevant lessons learned from previous
|
||||
|
||||
You are initiating sprint execution. The orchestrator agent will coordinate the work, analyze dependencies for parallel execution, search for relevant lessons learned, and guide you through the implementation process.
|
||||
|
||||
## Sprint Approval Verification
|
||||
|
||||
**CRITICAL: Sprint must be approved before execution.**
|
||||
|
||||
The orchestrator checks for approval in the milestone description:
|
||||
|
||||
```
|
||||
get_milestone(milestone_id=17)
|
||||
→ Check description for "## Sprint Approval" section
|
||||
```
|
||||
|
||||
**If Approval Missing:**
|
||||
```
|
||||
⚠️ SPRINT NOT APPROVED
|
||||
|
||||
Sprint 17 has not been approved for execution.
|
||||
The milestone description does not contain an approval record.
|
||||
|
||||
Please run /sprint-plan to:
|
||||
1. Review the sprint scope
|
||||
2. Approve the execution plan
|
||||
|
||||
Then run /sprint-start again.
|
||||
```
|
||||
|
||||
**If Approval Found:**
|
||||
```
|
||||
✓ Sprint Approval Verified
|
||||
Approved: 2026-01-28 14:30
|
||||
Scope:
|
||||
Branches: feat/45-*, feat/46-*, feat/47-*
|
||||
Files: auth/*, api/routes/auth.py, tests/test_auth*
|
||||
|
||||
Proceeding with execution within approved scope...
|
||||
```
|
||||
|
||||
**Scope Enforcement:**
|
||||
- Agents can ONLY create branches matching approved patterns
|
||||
- Agents can ONLY modify files within approved paths
|
||||
- Operations outside scope require re-approval via `/sprint-plan`
|
||||
|
||||
## Branch Detection
|
||||
|
||||
**CRITICAL:** Before proceeding, check the current git branch:
|
||||
@@ -25,7 +66,18 @@ If you are on a production or staging branch, you MUST stop and ask the user to
|
||||
|
||||
The orchestrator agent will:
|
||||
|
||||
1. **Fetch Sprint Issues**
|
||||
1. **Verify Sprint Approval**
|
||||
- Check milestone description for `## Sprint Approval` section
|
||||
- If no approval found, STOP and direct user to `/sprint-plan`
|
||||
- If approval found, extract scope (branches, files)
|
||||
- Agents operate ONLY within approved scope
|
||||
|
||||
2. **Detect Checkpoints (Resume Support)**
|
||||
- Check each open issue for `## Checkpoint` comments
|
||||
- If checkpoint found, offer to resume from that point
|
||||
- Resume preserves: branch, completed work, pending steps
|
||||
|
||||
3. **Fetch Sprint Issues**
|
||||
- Use `list_issues` to fetch open issues for the sprint
|
||||
- Identify priorities based on labels (Priority/Critical, Priority/High, etc.)
|
||||
|
||||
@@ -72,6 +124,67 @@ Parallel Execution Batches:
|
||||
|
||||
**Independent tasks in the same batch run in parallel.**
|
||||
|
||||
## File Conflict Prevention (MANDATORY)
|
||||
|
||||
**CRITICAL: Before dispatching parallel agents, check for file overlap.**
|
||||
|
||||
**Pre-Dispatch Conflict Check:**
|
||||
|
||||
1. **Identify target files** for each task in the batch
|
||||
2. **Check for overlap** - Do any tasks modify the same file?
|
||||
3. **If overlap detected** - Sequentialize those specific tasks
|
||||
|
||||
**Example Conflict Detection:**
|
||||
```
|
||||
Batch 1 Analysis:
|
||||
#45 - Implement JWT service
|
||||
Files: auth/jwt_service.py, auth/__init__.py, tests/test_jwt.py
|
||||
|
||||
#48 - Update API documentation
|
||||
Files: docs/api.md, README.md
|
||||
|
||||
Overlap: NONE → Safe to parallelize
|
||||
|
||||
Batch 2 Analysis:
|
||||
#46 - Build login endpoint
|
||||
Files: api/routes/auth.py, auth/__init__.py
|
||||
|
||||
#49 - Add auth tests
|
||||
Files: tests/test_auth.py, auth/__init__.py
|
||||
|
||||
Overlap: auth/__init__.py → CONFLICT!
|
||||
Action: Sequentialize #46 and #49 (run #46 first)
|
||||
```
|
||||
|
||||
**Conflict Resolution Rules:**
|
||||
|
||||
| Conflict Type | Action |
|
||||
|---------------|--------|
|
||||
| Same file in checklist | Sequentialize tasks |
|
||||
| Same directory | Review if safe, usually OK |
|
||||
| Shared test file | Sequentialize or assign different test files |
|
||||
| Shared config | Sequentialize |
|
||||
|
||||
**Branch Isolation Protocol:**
|
||||
|
||||
Even for parallel tasks, each MUST run on its own branch:
|
||||
```
|
||||
Task #45 → feat/45-jwt-service (isolated)
|
||||
Task #48 → feat/48-api-docs (isolated)
|
||||
```
|
||||
|
||||
**Sequential Merge After Completion:**
|
||||
```
|
||||
1. Task #45 completes → merge feat/45-jwt-service to development
|
||||
2. Task #48 completes → merge feat/48-api-docs to development
|
||||
3. Never merge simultaneously - always sequential to detect conflicts
|
||||
```
|
||||
|
||||
**If Merge Conflict Occurs:**
|
||||
1. Stop second task
|
||||
2. Resolve conflict manually or assign to human
|
||||
3. Resume/restart second task with updated base
|
||||
|
||||
## Branch Naming Convention (MANDATORY)
|
||||
|
||||
When creating branches for tasks:
|
||||
@@ -239,6 +352,61 @@ Batch 2 (now unblocked):
|
||||
Starting #46 while #48 continues...
|
||||
```
|
||||
|
||||
## Checkpoint Resume Support
|
||||
|
||||
If a previous session was interrupted (agent stopped, failure, budget exhausted), checkpoints enable resumption.
|
||||
|
||||
**Checkpoint Detection:**
|
||||
The orchestrator scans issue comments for `## Checkpoint` markers containing:
|
||||
- Branch name
|
||||
- Last commit hash
|
||||
- Completed/pending steps
|
||||
- Files modified
|
||||
|
||||
**Resume Flow:**
|
||||
```
|
||||
User: /sprint-start
|
||||
|
||||
Orchestrator: Checking for checkpoints...
|
||||
|
||||
Found checkpoint for #45 (JWT service):
|
||||
Branch: feat/45-jwt-service
|
||||
Last activity: 2 hours ago
|
||||
Progress: 4/7 steps completed
|
||||
Pending: Write tests, add refresh, commit
|
||||
|
||||
Options:
|
||||
1. Resume from checkpoint (recommended)
|
||||
2. Start fresh (lose previous work)
|
||||
3. Review checkpoint details
|
||||
|
||||
User: 1
|
||||
|
||||
Orchestrator: Resuming #45 from checkpoint...
|
||||
✓ Branch exists
|
||||
✓ Files match checkpoint
|
||||
✓ Dispatching executor with context
|
||||
|
||||
Executor continues from pending steps...
|
||||
```
|
||||
|
||||
**Checkpoint Format:**
|
||||
Executors save checkpoints after major steps:
|
||||
```markdown
|
||||
## Checkpoint
|
||||
**Branch:** feat/45-jwt-service
|
||||
**Commit:** abc123
|
||||
**Phase:** Testing
|
||||
|
||||
### Completed Steps
|
||||
- [x] Step 1
|
||||
- [x] Step 2
|
||||
|
||||
### Pending Steps
|
||||
- [ ] Step 3
|
||||
- [ ] Step 4
|
||||
```
|
||||
|
||||
## Getting Started
|
||||
|
||||
Simply invoke `/sprint-start` and the orchestrator will:
|
||||
|
||||
@@ -79,7 +79,12 @@ Completed Issues (3):
|
||||
|
||||
In Progress (2):
|
||||
#46: [Sprint 18] feat: Build login endpoint [Type/Feature, Priority/High]
|
||||
Status: In Progress | Phase: Implementation | Tool Calls: 45/100
|
||||
Progress: 3/5 steps | Current: Writing validation logic
|
||||
|
||||
#49: [Sprint 18] test: Add auth tests [Type/Test, Priority/Medium]
|
||||
Status: In Progress | Phase: Testing | Tool Calls: 30/100
|
||||
Progress: 2/4 steps | Current: Testing edge cases
|
||||
|
||||
Ready to Start (2):
|
||||
#50: [Sprint 18] feat: Integrate OAuth providers [Type/Feature, Priority/Low]
|
||||
@@ -137,12 +142,53 @@ Show only backend issues:
|
||||
list_issues(labels=["Component/Backend"])
|
||||
```
|
||||
|
||||
## Progress Comment Parsing
|
||||
|
||||
Agents post structured progress comments in this format:
|
||||
|
||||
```markdown
|
||||
## Progress Update
|
||||
**Status:** In Progress | Blocked | Failed
|
||||
**Phase:** [current phase name]
|
||||
**Tool Calls:** X (budget: Y)
|
||||
|
||||
### Completed
|
||||
- [x] Step 1
|
||||
|
||||
### In Progress
|
||||
- [ ] Current step
|
||||
|
||||
### Blockers
|
||||
- None | [blocker description]
|
||||
```
|
||||
|
||||
**To extract real-time progress:**
|
||||
1. Fetch issue comments: `get_issue(number)` includes recent comments
|
||||
2. Look for comments containing `## Progress Update`
|
||||
3. Parse the **Status:** line for current state
|
||||
4. Parse **Tool Calls:** for budget consumption
|
||||
5. Extract blockers from `### Blockers` section
|
||||
|
||||
**Progress Summary Display:**
|
||||
```
|
||||
In Progress Issues:
|
||||
#45: [Sprint 18] feat: JWT service
|
||||
Status: In Progress | Phase: Testing | Tool Calls: 67/100
|
||||
Completed: 4/6 steps | Current: Writing unit tests
|
||||
|
||||
#46: [Sprint 18] feat: Login endpoint
|
||||
Status: Blocked | Phase: Implementation | Tool Calls: 23/100
|
||||
Blocker: Waiting for JWT service (#45)
|
||||
```
|
||||
|
||||
## Blocker Detection
|
||||
|
||||
The command identifies blocked issues by:
|
||||
1. **Dependency Analysis** - Uses `list_issue_dependencies` to find unmet dependencies
|
||||
2. **Comment Keywords** - Checks for "blocked", "blocker", "waiting for"
|
||||
3. **Stale Issues** - Issues with no recent activity (>7 days)
|
||||
1. **Progress Comments** - Parse `### Blockers` section from structured comments
|
||||
2. **Status Labels** - Check for `Status/Blocked` label on issue
|
||||
3. **Dependency Analysis** - Uses `list_issue_dependencies` to find unmet dependencies
|
||||
4. **Comment Keywords** - Checks for "blocked", "blocker", "waiting for"
|
||||
5. **Stale Issues** - Issues with no recent activity (>7 days)
|
||||
|
||||
## When to Use
|
||||
|
||||
|
||||
@@ -5,13 +5,43 @@
|
||||
|
||||
PREFIX="[projman]"
|
||||
|
||||
# Check if MCP venv exists
|
||||
# Calculate paths
|
||||
PLUGIN_ROOT="${CLAUDE_PLUGIN_ROOT:-$(dirname "$(dirname "$(realpath "$0")")")}"
|
||||
VENV_PATH="$PLUGIN_ROOT/mcp-servers/gitea/.venv/bin/python"
|
||||
# Marketplace root is 2 levels up from plugin root (plugins/projman -> .)
|
||||
MARKETPLACE_ROOT="$(dirname "$(dirname "$PLUGIN_ROOT")")"
|
||||
VENV_REPAIR_SCRIPT="$MARKETPLACE_ROOT/scripts/venv-repair.sh"
|
||||
PLUGIN_CACHE="$HOME/.claude/plugins/cache/leo-claude-mktplace"
|
||||
|
||||
if [[ ! -f "$VENV_PATH" ]]; then
|
||||
echo "$PREFIX MCP venvs missing - run setup.sh from installed marketplace"
|
||||
exit 0
|
||||
# ============================================================================
|
||||
# Clear stale plugin cache (MUST run before MCP servers load)
|
||||
# ============================================================================
|
||||
# The cache at ~/.claude/plugins/cache/ holds versioned .mcp.json files.
|
||||
# After marketplace updates, cached configs may point to old paths.
|
||||
# Clearing forces Claude to read fresh configs from installed marketplace.
|
||||
|
||||
if [[ -d "$PLUGIN_CACHE" ]]; then
|
||||
rm -rf "$PLUGIN_CACHE"
|
||||
# Don't output anything - this should be silent and automatic
|
||||
fi
|
||||
|
||||
# ============================================================================
|
||||
# Auto-repair MCP venvs (runs before other checks)
|
||||
# ============================================================================
|
||||
|
||||
if [[ -x "$VENV_REPAIR_SCRIPT" ]]; then
|
||||
# Run venv repair - this creates symlinks to cached venvs
|
||||
# Only outputs messages if something needed fixing
|
||||
"$VENV_REPAIR_SCRIPT" 2>/dev/null || {
|
||||
echo "$PREFIX MCP venv setup failed - run: cd $MARKETPLACE_ROOT && ./scripts/setup-venvs.sh"
|
||||
exit 0
|
||||
}
|
||||
else
|
||||
# Fallback: just check if venv exists
|
||||
VENV_PATH="$PLUGIN_ROOT/mcp-servers/gitea/.venv/bin/python"
|
||||
if [[ ! -f "$VENV_PATH" ]]; then
|
||||
echo "$PREFIX MCP venvs missing - run setup.sh from installed marketplace"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check git remote vs .env config (only if .env exists)
|
||||
|
||||
@@ -13,9 +13,9 @@ description: Dynamic reference for Gitea label taxonomy (organization + reposito
|
||||
|
||||
This skill provides the current label taxonomy used for issue classification in Gitea. Labels are **fetched dynamically** from Gitea and should never be hardcoded.
|
||||
|
||||
**Current Taxonomy:** 43 labels (27 organization + 16 repository)
|
||||
**Current Taxonomy:** 47 labels (31 organization + 16 repository)
|
||||
|
||||
## Organization Labels (27)
|
||||
## Organization Labels (31)
|
||||
|
||||
Organization-level labels are shared across all repositories in your configured organization.
|
||||
|
||||
@@ -60,6 +60,12 @@ Organization-level labels are shared across all repositories in your configured
|
||||
- `Type/Test` (#1d76db) - Testing-related work (unit, integration, e2e)
|
||||
- `Type/Chore` (#fef2c0) - Maintenance, tooling, dependencies, build tasks
|
||||
|
||||
### Status (4)
|
||||
- `Status/In-Progress` (#0052cc) - Work is actively being done on this issue
|
||||
- `Status/Blocked` (#ff5630) - Blocked by external dependency or issue
|
||||
- `Status/Failed` (#de350b) - Implementation attempted but failed, needs investigation
|
||||
- `Status/Deferred` (#6554c0) - Moved to a future sprint or backlog
|
||||
|
||||
## Repository Labels (16)
|
||||
|
||||
Repository-level labels are specific to each project.
|
||||
@@ -168,6 +174,28 @@ When suggesting labels for issues, consider the following patterns:
|
||||
- Keywords: "deploy", "deployment", "docker", "infrastructure", "ci/cd", "production"
|
||||
- Example: "Deploy authentication service to production"
|
||||
|
||||
### Status Detection
|
||||
|
||||
**Status/In-Progress:**
|
||||
- Applied when: Agent starts working on an issue
|
||||
- Remove when: Work completes, fails, or is blocked
|
||||
- Example: Orchestrator applies when dispatching task to executor
|
||||
|
||||
**Status/Blocked:**
|
||||
- Applied when: Issue cannot proceed due to external dependency
|
||||
- Context: Waiting for another issue, external service, or decision
|
||||
- Example: "Blocked by #45 - need JWT service first"
|
||||
|
||||
**Status/Failed:**
|
||||
- Applied when: Implementation was attempted but failed
|
||||
- Context: Errors, permission issues, technical blockers
|
||||
- Example: Agent hit permission errors and couldn't complete
|
||||
|
||||
**Status/Deferred:**
|
||||
- Applied when: Work is moved to a future sprint
|
||||
- Context: Scope reduction, reprioritization
|
||||
- Example: "Moving to Sprint 5 due to scope constraints"
|
||||
|
||||
### Tech Detection
|
||||
|
||||
**Tech/Python:**
|
||||
|
||||
@@ -2,9 +2,8 @@
|
||||
"mcpServers": {
|
||||
"viz-platform": {
|
||||
"type": "stdio",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/mcp-servers/viz-platform/.venv/bin/python",
|
||||
"args": ["-m", "mcp_server.server"],
|
||||
"cwd": "${CLAUDE_PLUGIN_ROOT}/mcp-servers/viz-platform"
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/mcp-servers/viz-platform/run.sh",
|
||||
"args": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,17 +1,20 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# post-update.sh - Run after pulling updates
|
||||
# post-update.sh - Run after pulling updates or marketplace sync
|
||||
#
|
||||
# Usage: ./scripts/post-update.sh
|
||||
#
|
||||
# This script:
|
||||
# 1. Updates Python dependencies for MCP servers
|
||||
# 2. Validates configuration still works
|
||||
# 3. Reports any new manual steps from CHANGELOG
|
||||
# 1. Clears Claude plugin cache (forces fresh .mcp.json reads)
|
||||
# 2. Restores MCP venv symlinks (instant if cache exists)
|
||||
# 3. Creates venvs in external cache if missing (first run only)
|
||||
# 4. Shows recent changelog updates
|
||||
#
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
CLAUDE_PLUGIN_CACHE="$HOME/.claude/plugins/cache/leo-claude-mktplace"
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
REPO_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||
|
||||
@@ -19,42 +22,25 @@ REPO_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m'
|
||||
|
||||
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
|
||||
log_success() { echo -e "${GREEN}[OK]${NC} $1"; }
|
||||
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
|
||||
|
||||
update_mcp_server() {
|
||||
local server_name="$1"
|
||||
local server_path="$REPO_ROOT/mcp-servers/$server_name"
|
||||
|
||||
log_info "Updating $server_name dependencies..."
|
||||
|
||||
if [[ -d "$server_path/.venv" ]] && [[ -f "$server_path/requirements.txt" ]]; then
|
||||
cd "$server_path"
|
||||
source .venv/bin/activate
|
||||
pip install -q --upgrade pip
|
||||
pip install -q -r requirements.txt
|
||||
deactivate
|
||||
cd "$REPO_ROOT"
|
||||
log_success "$server_name dependencies updated"
|
||||
else
|
||||
log_warn "$server_name not fully set up - run ./scripts/setup.sh first"
|
||||
fi
|
||||
}
|
||||
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
|
||||
|
||||
check_changelog() {
|
||||
log_info "Checking CHANGELOG for recent updates..."
|
||||
|
||||
if [[ -f "$REPO_ROOT/CHANGELOG.md" ]]; then
|
||||
# Show the Unreleased section
|
||||
echo ""
|
||||
echo "Recent changes (from CHANGELOG.md):"
|
||||
echo "-----------------------------------"
|
||||
sed -n '/## \[Unreleased\]/,/## \[/p' "$REPO_ROOT/CHANGELOG.md" | head -30
|
||||
echo "-----------------------------------"
|
||||
echo ""
|
||||
local unreleased
|
||||
unreleased=$(sed -n '/## \[Unreleased\]/,/## \[/p' "$REPO_ROOT/CHANGELOG.md" | grep -E '^### ' | head -1 || true)
|
||||
if [[ -n "$unreleased" ]]; then
|
||||
echo ""
|
||||
log_info "Recent changes (from CHANGELOG.md):"
|
||||
echo "-----------------------------------"
|
||||
sed -n '/## \[Unreleased\]/,/## \[/p' "$REPO_ROOT/CHANGELOG.md" | head -20
|
||||
echo "-----------------------------------"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -64,23 +50,37 @@ main() {
|
||||
echo "=============================================="
|
||||
echo ""
|
||||
|
||||
# Shared MCP servers at repository root (v3.0.0+)
|
||||
update_mcp_server "gitea"
|
||||
update_mcp_server "netbox"
|
||||
update_mcp_server "data-platform"
|
||||
# Clear Claude plugin cache to force fresh .mcp.json reads
|
||||
# This cache holds versioned copies that become stale after updates
|
||||
if [[ -d "$CLAUDE_PLUGIN_CACHE" ]]; then
|
||||
log_info "Clearing Claude plugin cache..."
|
||||
rm -rf "$CLAUDE_PLUGIN_CACHE"
|
||||
log_success "Plugin cache cleared"
|
||||
fi
|
||||
|
||||
# Run venv-repair.sh to restore symlinks to external cache
|
||||
# This is instant if cache exists, or does full setup on first run
|
||||
if [[ -x "$SCRIPT_DIR/venv-repair.sh" ]]; then
|
||||
log_info "Restoring MCP venv symlinks..."
|
||||
if "$SCRIPT_DIR/venv-repair.sh"; then
|
||||
log_success "MCP venvs ready"
|
||||
else
|
||||
log_error "MCP venv setup failed"
|
||||
log_warn "Run: $SCRIPT_DIR/setup-venvs.sh for full setup"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
log_error "venv-repair.sh not found at $SCRIPT_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
check_changelog
|
||||
|
||||
echo ""
|
||||
log_success "Post-update complete!"
|
||||
echo ""
|
||||
echo "If you see new features in the changelog that require"
|
||||
echo "configuration changes, update your ~/.config/claude/*.env files."
|
||||
echo "IMPORTANT: Restart Claude Code for changes to take effect."
|
||||
echo "MCP servers will work immediately on next session start."
|
||||
}
|
||||
|
||||
main "$@"
|
||||
|
||||
# Clear plugin cache to ensure fresh hooks are loaded
|
||||
echo "Clearing plugin cache..."
|
||||
rm -rf ~/.claude/plugins/cache/leo-claude-mktplace/
|
||||
echo "Cache cleared"
|
||||
|
||||
281
scripts/setup-venvs.sh
Executable file
281
scripts/setup-venvs.sh
Executable file
@@ -0,0 +1,281 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# setup-venvs.sh - Smart MCP server venv management with external cache
|
||||
#
|
||||
# This script manages Python virtual environments for MCP servers in a
|
||||
# PERSISTENT location outside the marketplace directory, so they survive
|
||||
# marketplace updates.
|
||||
#
|
||||
# Features:
|
||||
# - Stores venvs in ~/.cache/claude-mcp-venvs/ (survives updates)
|
||||
# - Incremental installs (only missing packages)
|
||||
# - Hash-based change detection (skip if requirements unchanged)
|
||||
# - Can be called from SessionStart hooks safely
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/setup-venvs.sh # Full setup
|
||||
# ./scripts/setup-venvs.sh --check # Check only, no install
|
||||
# ./scripts/setup-venvs.sh --quick # Skip if hash unchanged
|
||||
# ./scripts/setup-venvs.sh gitea # Setup specific server only
|
||||
#
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# ============================================================================
|
||||
# Configuration
|
||||
# ============================================================================
|
||||
|
||||
# Persistent venv location (outside marketplace)
|
||||
VENV_CACHE_DIR="${HOME}/.cache/claude-mcp-venvs/leo-claude-mktplace"
|
||||
|
||||
# Script and repo paths
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
REPO_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||
|
||||
# MCP servers to manage
|
||||
MCP_SERVERS=(gitea netbox data-platform viz-platform contract-validator)
|
||||
|
||||
# Colors
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m'
|
||||
|
||||
# Flags
|
||||
CHECK_ONLY=false
|
||||
QUICK_MODE=false
|
||||
SPECIFIC_SERVER=""
|
||||
|
||||
# ============================================================================
|
||||
# Argument Parsing
|
||||
# ============================================================================
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--check)
|
||||
CHECK_ONLY=true
|
||||
shift
|
||||
;;
|
||||
--quick)
|
||||
QUICK_MODE=true
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
echo "Usage: $0 [OPTIONS] [SERVER]"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --check Check venv status without installing"
|
||||
echo " --quick Skip servers with unchanged requirements"
|
||||
echo " -h,--help Show this help"
|
||||
echo ""
|
||||
echo "Servers: ${MCP_SERVERS[*]}"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
SPECIFIC_SERVER="$1"
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# ============================================================================
|
||||
# Helper Functions
|
||||
# ============================================================================
|
||||
|
||||
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
|
||||
log_ok() { echo -e "${GREEN}[OK]${NC} $1"; }
|
||||
log_skip() { echo -e "${YELLOW}[SKIP]${NC} $1"; }
|
||||
log_error() { echo -e "${RED}[ERROR]${NC} $1" >&2; }
|
||||
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
|
||||
|
||||
# Calculate hash of requirements file(s)
|
||||
requirements_hash() {
|
||||
local server_path="$1"
|
||||
local hash_input=""
|
||||
|
||||
if [[ -f "$server_path/requirements.txt" ]]; then
|
||||
hash_input+=$(cat "$server_path/requirements.txt")
|
||||
fi
|
||||
if [[ -f "$server_path/pyproject.toml" ]]; then
|
||||
hash_input+=$(cat "$server_path/pyproject.toml")
|
||||
fi
|
||||
|
||||
echo "$hash_input" | sha256sum | cut -d' ' -f1
|
||||
}
|
||||
|
||||
# Check if requirements changed since last install
|
||||
requirements_changed() {
|
||||
local server_name="$1"
|
||||
local server_path="$2"
|
||||
local hash_file="$VENV_CACHE_DIR/$server_name/.requirements_hash"
|
||||
|
||||
local current_hash
|
||||
current_hash=$(requirements_hash "$server_path")
|
||||
|
||||
if [[ -f "$hash_file" ]]; then
|
||||
local stored_hash
|
||||
stored_hash=$(cat "$hash_file")
|
||||
if [[ "$current_hash" == "$stored_hash" ]]; then
|
||||
return 1 # Not changed
|
||||
fi
|
||||
fi
|
||||
return 0 # Changed or no hash file
|
||||
}
|
||||
|
||||
# Save requirements hash after successful install
|
||||
save_requirements_hash() {
|
||||
local server_name="$1"
|
||||
local server_path="$2"
|
||||
local hash_file="$VENV_CACHE_DIR/$server_name/.requirements_hash"
|
||||
|
||||
requirements_hash "$server_path" > "$hash_file"
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Main Setup Function
|
||||
# ============================================================================
|
||||
|
||||
setup_server() {
|
||||
local server_name="$1"
|
||||
local server_path="$REPO_ROOT/mcp-servers/$server_name"
|
||||
local venv_path="$VENV_CACHE_DIR/$server_name/.venv"
|
||||
|
||||
# Verify server exists in repo
|
||||
if [[ ! -d "$server_path" ]]; then
|
||||
log_error "$server_name: source directory not found at $server_path"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check-only mode
|
||||
if [[ "$CHECK_ONLY" == true ]]; then
|
||||
if [[ -f "$venv_path/bin/python" ]]; then
|
||||
log_ok "$server_name: venv exists"
|
||||
else
|
||||
log_error "$server_name: venv MISSING"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Quick mode: skip if requirements unchanged
|
||||
if [[ "$QUICK_MODE" == true ]] && [[ -f "$venv_path/bin/python" ]]; then
|
||||
if ! requirements_changed "$server_name" "$server_path"; then
|
||||
log_skip "$server_name: requirements unchanged"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
log_info "$server_name: setting up venv..."
|
||||
|
||||
# Create cache directory
|
||||
mkdir -p "$VENV_CACHE_DIR/$server_name"
|
||||
|
||||
# Create venv if missing
|
||||
if [[ ! -d "$venv_path" ]]; then
|
||||
python3 -m venv "$venv_path"
|
||||
log_ok "$server_name: venv created"
|
||||
fi
|
||||
|
||||
# Activate and install
|
||||
# shellcheck disable=SC1091
|
||||
source "$venv_path/bin/activate"
|
||||
|
||||
# Upgrade pip quietly
|
||||
pip install -q --upgrade pip
|
||||
|
||||
# Install requirements (incremental - pip handles already-installed)
|
||||
if [[ -f "$server_path/requirements.txt" ]]; then
|
||||
pip install -q -r "$server_path/requirements.txt"
|
||||
fi
|
||||
|
||||
# Install local package in editable mode if pyproject.toml exists
|
||||
if [[ -f "$server_path/pyproject.toml" ]]; then
|
||||
pip install -q -e "$server_path"
|
||||
log_ok "$server_name: package installed (editable)"
|
||||
fi
|
||||
|
||||
deactivate
|
||||
|
||||
# Save hash for quick mode
|
||||
save_requirements_hash "$server_name" "$server_path"
|
||||
|
||||
log_ok "$server_name: ready"
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Create Symlinks (for backward compatibility)
|
||||
# ============================================================================
|
||||
|
||||
create_symlinks() {
|
||||
log_info "Creating symlinks for backward compatibility..."
|
||||
|
||||
for server_name in "${MCP_SERVERS[@]}"; do
|
||||
local server_path="$REPO_ROOT/mcp-servers/$server_name"
|
||||
local venv_path="$VENV_CACHE_DIR/$server_name/.venv"
|
||||
local link_path="$server_path/.venv"
|
||||
|
||||
# Skip if source doesn't exist
|
||||
[[ ! -d "$server_path" ]] && continue
|
||||
|
||||
# Skip if venv not in cache
|
||||
[[ ! -d "$venv_path" ]] && continue
|
||||
|
||||
# Remove existing venv or symlink
|
||||
if [[ -L "$link_path" ]]; then
|
||||
rm "$link_path"
|
||||
elif [[ -d "$link_path" ]]; then
|
||||
log_warn "$server_name: removing old venv directory (now using cache)"
|
||||
rm -rf "$link_path"
|
||||
fi
|
||||
|
||||
# Create symlink
|
||||
ln -s "$venv_path" "$link_path"
|
||||
log_ok "$server_name: symlink created"
|
||||
done
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Main
|
||||
# ============================================================================
|
||||
|
||||
main() {
|
||||
echo "=============================================="
|
||||
echo " MCP Server Venv Manager"
|
||||
echo "=============================================="
|
||||
echo "Cache: $VENV_CACHE_DIR"
|
||||
echo ""
|
||||
|
||||
local failed=0
|
||||
|
||||
if [[ -n "$SPECIFIC_SERVER" ]]; then
|
||||
# Setup specific server
|
||||
if setup_server "$SPECIFIC_SERVER"; then
|
||||
: # success
|
||||
else
|
||||
failed=1
|
||||
fi
|
||||
else
|
||||
# Setup all servers
|
||||
for server in "${MCP_SERVERS[@]}"; do
|
||||
if ! setup_server "$server"; then
|
||||
((failed++)) || true
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Create symlinks for backward compatibility
|
||||
if [[ "$CHECK_ONLY" != true ]]; then
|
||||
create_symlinks
|
||||
fi
|
||||
|
||||
echo ""
|
||||
if [[ $failed -eq 0 ]]; then
|
||||
log_ok "All MCP servers ready"
|
||||
else
|
||||
log_error "$failed server(s) failed"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
main "$@"
|
||||
@@ -72,11 +72,16 @@ setup_shared_mcp() {
|
||||
log_success "$server_name venv created"
|
||||
fi
|
||||
|
||||
# Install/update dependencies
|
||||
# Install/update dependencies and local package
|
||||
if [[ -f "requirements.txt" ]]; then
|
||||
source .venv/bin/activate
|
||||
pip install -q --upgrade pip
|
||||
pip install -q -r requirements.txt
|
||||
# Install local package in editable mode (required for MCP server to work)
|
||||
if [[ -f "pyproject.toml" ]]; then
|
||||
pip install -q -e .
|
||||
log_success "$server_name package installed (editable mode)"
|
||||
fi
|
||||
deactivate
|
||||
log_success "$server_name dependencies installed"
|
||||
else
|
||||
@@ -125,6 +130,24 @@ verify_symlinks() {
|
||||
log_error "data-platform symlink missing"
|
||||
log_todo "Run: ln -s ../../../mcp-servers/data-platform plugins/data-platform/mcp-servers/data-platform"
|
||||
fi
|
||||
|
||||
# Check viz-platform -> viz-platform symlink
|
||||
local vizplatform_link="$REPO_ROOT/plugins/viz-platform/mcp-servers/viz-platform"
|
||||
if [[ -L "$vizplatform_link" ]]; then
|
||||
log_success "viz-platform symlink exists"
|
||||
else
|
||||
log_error "viz-platform symlink missing"
|
||||
log_todo "Run: ln -s ../../../mcp-servers/viz-platform plugins/viz-platform/mcp-servers/viz-platform"
|
||||
fi
|
||||
|
||||
# Check contract-validator -> contract-validator symlink
|
||||
local contractvalidator_link="$REPO_ROOT/plugins/contract-validator/mcp-servers/contract-validator"
|
||||
if [[ -L "$contractvalidator_link" ]]; then
|
||||
log_success "contract-validator symlink exists"
|
||||
else
|
||||
log_error "contract-validator symlink missing"
|
||||
log_todo "Run: ln -s ../../../mcp-servers/contract-validator plugins/contract-validator/mcp-servers/contract-validator"
|
||||
fi
|
||||
}
|
||||
|
||||
# --- Section 3: Config File Templates ---
|
||||
@@ -301,7 +324,7 @@ print_report() {
|
||||
# --- Main ---
|
||||
main() {
|
||||
echo "=============================================="
|
||||
echo " Leo Claude Marketplace Setup (v3.0.0)"
|
||||
echo " Leo Claude Marketplace Setup (v5.1.0)"
|
||||
echo "=============================================="
|
||||
echo ""
|
||||
|
||||
@@ -309,6 +332,8 @@ main() {
|
||||
setup_shared_mcp "gitea"
|
||||
setup_shared_mcp "netbox"
|
||||
setup_shared_mcp "data-platform"
|
||||
setup_shared_mcp "viz-platform"
|
||||
setup_shared_mcp "contract-validator"
|
||||
|
||||
# Verify symlinks from plugins to shared MCP servers
|
||||
verify_symlinks
|
||||
|
||||
169
scripts/venv-repair.sh
Executable file
169
scripts/venv-repair.sh
Executable file
@@ -0,0 +1,169 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# venv-repair.sh - Fast MCP venv auto-repair for SessionStart hooks
|
||||
#
|
||||
# This script is designed to run at session start. It:
|
||||
# 1. Checks if venvs exist in external cache (~/.cache/claude-mcp-venvs/)
|
||||
# 2. Creates symlinks from marketplace to cache (instant operation)
|
||||
# 3. Only runs pip install if cache is missing (first install)
|
||||
#
|
||||
# Output format: All messages prefixed with [mcp-venv] for hook display
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/venv-repair.sh # Auto-repair (default)
|
||||
# ./scripts/venv-repair.sh --silent # Silent mode (no output unless error)
|
||||
#
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# ============================================================================
|
||||
# Configuration
|
||||
# ============================================================================
|
||||
|
||||
PREFIX="[mcp-venv]"
|
||||
VENV_CACHE_DIR="${HOME}/.cache/claude-mcp-venvs/leo-claude-mktplace"
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
REPO_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||
|
||||
# MCP servers
|
||||
MCP_SERVERS=(gitea netbox data-platform viz-platform contract-validator)
|
||||
|
||||
# Parse args
|
||||
SILENT=false
|
||||
[[ "${1:-}" == "--silent" ]] && SILENT=true
|
||||
|
||||
log() {
|
||||
[[ "$SILENT" == true ]] && return
|
||||
echo "$PREFIX $1"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo "$PREFIX ERROR: $1" >&2
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Check if all venvs exist in cache
|
||||
# ============================================================================
|
||||
|
||||
cache_complete() {
|
||||
for server in "${MCP_SERVERS[@]}"; do
|
||||
local venv_python="$VENV_CACHE_DIR/$server/.venv/bin/python"
|
||||
[[ ! -f "$venv_python" ]] && return 1
|
||||
done
|
||||
return 0
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Create symlinks from marketplace to cache
|
||||
# ============================================================================
|
||||
|
||||
create_symlink() {
|
||||
local server_name="$1"
|
||||
local server_path="$REPO_ROOT/mcp-servers/$server_name"
|
||||
local venv_cache="$VENV_CACHE_DIR/$server_name/.venv"
|
||||
local venv_link="$server_path/.venv"
|
||||
|
||||
# Skip if server doesn't exist
|
||||
[[ ! -d "$server_path" ]] && return 0
|
||||
|
||||
# Skip if cache doesn't exist
|
||||
[[ ! -d "$venv_cache" ]] && return 1
|
||||
|
||||
# Already correct symlink?
|
||||
if [[ -L "$venv_link" ]]; then
|
||||
local target
|
||||
target=$(readlink "$venv_link")
|
||||
[[ "$target" == "$venv_cache" ]] && return 0
|
||||
rm "$venv_link"
|
||||
elif [[ -d "$venv_link" ]]; then
|
||||
# Old venv directory exists - back it up or remove
|
||||
rm -rf "$venv_link"
|
||||
fi
|
||||
|
||||
# Create symlink
|
||||
ln -s "$venv_cache" "$venv_link"
|
||||
return 0
|
||||
}
|
||||
|
||||
create_all_symlinks() {
|
||||
local created=0
|
||||
for server in "${MCP_SERVERS[@]}"; do
|
||||
if create_symlink "$server"; then
|
||||
((created++)) || true
|
||||
fi
|
||||
done
|
||||
[[ $created -gt 0 ]] && log "Restored $created venv symlinks"
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Full setup (only if cache missing)
|
||||
# ============================================================================
|
||||
|
||||
setup_server() {
|
||||
local server_name="$1"
|
||||
local server_path="$REPO_ROOT/mcp-servers/$server_name"
|
||||
local venv_path="$VENV_CACHE_DIR/$server_name/.venv"
|
||||
|
||||
[[ ! -d "$server_path" ]] && return 0
|
||||
|
||||
mkdir -p "$VENV_CACHE_DIR/$server_name"
|
||||
|
||||
# Create venv
|
||||
if [[ ! -d "$venv_path" ]]; then
|
||||
python3 -m venv "$venv_path"
|
||||
fi
|
||||
|
||||
# Install dependencies
|
||||
# shellcheck disable=SC1091
|
||||
source "$venv_path/bin/activate"
|
||||
pip install -q --upgrade pip
|
||||
|
||||
if [[ -f "$server_path/requirements.txt" ]]; then
|
||||
pip install -q -r "$server_path/requirements.txt"
|
||||
fi
|
||||
|
||||
if [[ -f "$server_path/pyproject.toml" ]]; then
|
||||
pip install -q -e "$server_path"
|
||||
fi
|
||||
|
||||
deactivate
|
||||
|
||||
# Save hash for future quick checks
|
||||
local hash_file="$VENV_CACHE_DIR/$server_name/.requirements_hash"
|
||||
{
|
||||
if [[ -f "$server_path/requirements.txt" ]]; then
|
||||
cat "$server_path/requirements.txt"
|
||||
fi
|
||||
if [[ -f "$server_path/pyproject.toml" ]]; then
|
||||
cat "$server_path/pyproject.toml"
|
||||
fi
|
||||
echo "" # Ensure non-empty input for sha256sum
|
||||
} | sha256sum | cut -d' ' -f1 > "$hash_file"
|
||||
}
|
||||
|
||||
full_setup() {
|
||||
log "First run - setting up MCP venvs (this only happens once)..."
|
||||
for server in "${MCP_SERVERS[@]}"; do
|
||||
log " Setting up $server..."
|
||||
setup_server "$server"
|
||||
done
|
||||
log "Setup complete. Future sessions will be instant."
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Main
|
||||
# ============================================================================
|
||||
|
||||
main() {
|
||||
# Fast path: cache exists, just ensure symlinks
|
||||
if cache_complete; then
|
||||
create_all_symlinks
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Slow path: need to create venvs (first install)
|
||||
full_setup
|
||||
create_all_symlinks
|
||||
}
|
||||
|
||||
main "$@"
|
||||
@@ -23,7 +23,7 @@ if [ -d ~/.claude/plugins/cache/leo-claude-mktplace ]; then
|
||||
fi
|
||||
|
||||
# Verify installed hooks are command type
|
||||
for plugin in doc-guardian code-sentinel projman pr-review project-hygiene data-platform; do
|
||||
for plugin in doc-guardian code-sentinel projman pr-review project-hygiene data-platform cmdb-assistant; do
|
||||
HOOK_FILE=~/.claude/plugins/marketplaces/leo-claude-mktplace/plugins/$plugin/hooks/hooks.json
|
||||
if [ -f "$HOOK_FILE" ]; then
|
||||
if grep -q '"type": "command"' "$HOOK_FILE" || grep -q '"type":"command"' "$HOOK_FILE"; then
|
||||
|
||||
Reference in New Issue
Block a user