Compare commits

1 Commits

Author SHA1 Message Date
2248184359 fix: protected branch detection and non-blocking hooks
- Add protected branch detection to /commit command (Step 1)
- Warn users before committing to protected branches
- Offer to create feature branch automatically
- Rewrite doc-guardian hook to be truly non-blocking
- Enforce strict [plugin-name] prefix in all hook outputs
- Add forbidden words list to prevent accidental blocking

Fixes #109, #110

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-23 11:05:12 -05:00
588 changed files with 20007 additions and 53007 deletions

View File

@@ -1,205 +0,0 @@
{
"name": "leo-claude-mktplace",
"owner": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"metadata": {
"description": "Project management plugins with Gitea and NetBox integrations",
"version": "7.1.0"
},
"plugins": [
{
"name": "projman",
"version": "7.1.0",
"description": "Sprint planning and project management with Gitea integration",
"source": "./plugins/projman",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/projman/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "development",
"tags": ["sprint", "agile", "gitea", "project-management"],
"license": "MIT"
},
{
"name": "doc-guardian",
"version": "7.1.0",
"description": "Automatic documentation drift detection and synchronization",
"source": "./plugins/doc-guardian",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/doc-guardian/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "productivity",
"tags": ["documentation", "drift-detection", "sync"],
"license": "MIT"
},
{
"name": "code-sentinel",
"version": "7.1.0",
"description": "Security scanning and code refactoring tools",
"source": "./plugins/code-sentinel",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/code-sentinel/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "security",
"tags": ["security-scan", "refactoring", "vulnerabilities"],
"license": "MIT"
},
{
"name": "project-hygiene",
"version": "7.1.0",
"description": "Post-task cleanup hook that removes temp files and manages orphaned files",
"source": "./plugins/project-hygiene",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/project-hygiene/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "productivity",
"tags": ["cleanup", "automation", "hygiene"],
"license": "MIT"
},
{
"name": "cmdb-assistant",
"version": "7.1.0",
"description": "NetBox CMDB integration with data quality validation and machine registration",
"source": "./plugins/cmdb-assistant",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/cmdb-assistant/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "infrastructure",
"tags": ["cmdb", "netbox", "dcim", "ipam", "data-quality", "validation"],
"license": "MIT"
},
{
"name": "claude-config-maintainer",
"version": "7.1.0",
"description": "CLAUDE.md and settings.local.json optimization for Claude Code projects",
"source": "./plugins/claude-config-maintainer",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/claude-config-maintainer/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "development",
"tags": ["claude-md", "configuration", "optimization"],
"license": "MIT"
},
{
"name": "clarity-assist",
"version": "7.1.0",
"description": "Prompt optimization and requirement clarification with ND-friendly accommodations",
"source": "./plugins/clarity-assist",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/clarity-assist/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "productivity",
"tags": ["prompts", "requirements", "clarification", "nd-friendly"],
"license": "MIT"
},
{
"name": "git-flow",
"version": "7.1.0",
"description": "Git workflow automation with intelligent commit messages and branch management",
"source": "./plugins/git-flow",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/git-flow/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "development",
"tags": ["git", "workflow", "commits", "branching"],
"license": "MIT"
},
{
"name": "pr-review",
"version": "7.1.0",
"description": "Multi-agent pull request review with confidence scoring and actionable feedback",
"source": "./plugins/pr-review",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/pr-review/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "development",
"tags": ["code-review", "pull-requests", "security", "quality"],
"license": "MIT"
},
{
"name": "data-platform",
"version": "7.1.0",
"description": "Data engineering tools with pandas, PostgreSQL/PostGIS, and dbt integration",
"source": "./plugins/data-platform",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/data-platform/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "data",
"tags": ["pandas", "postgresql", "postgis", "dbt", "data-engineering", "etl"],
"license": "MIT"
},
{
"name": "viz-platform",
"version": "7.1.0",
"description": "Visualization tools with Dash Mantine Components validation, Plotly charts, and theming",
"source": "./plugins/viz-platform",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/viz-platform/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "visualization",
"tags": ["dash", "plotly", "mantine", "charts", "dashboards", "theming", "dmc"],
"license": "MIT"
},
{
"name": "contract-validator",
"version": "7.1.0",
"description": "Cross-plugin compatibility validation and Claude.md agent verification",
"source": "./plugins/contract-validator",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/contract-validator/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "development",
"tags": ["validation", "contracts", "compatibility", "agents", "interfaces", "cross-plugin"],
"license": "MIT"
}
]
}

View File

@@ -1,109 +0,0 @@
{
"name": "leo-claude-mktplace",
"owner": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"metadata": {
"description": "Project management plugins with Gitea and NetBox integrations",
"version": "7.1.0"
},
"plugins": [
{
"name": "projman",
"version": "7.1.0",
"description": "Sprint planning and project management with Gitea integration",
"source": "./plugins/projman",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/projman/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "development",
"tags": ["sprint", "agile", "gitea", "project-management"],
"license": "MIT"
},
{
"name": "git-flow",
"version": "7.1.0",
"description": "Git workflow automation with intelligent commit messages and branch management",
"source": "./plugins/git-flow",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/git-flow/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "development",
"tags": ["git", "workflow", "commits", "branching"],
"license": "MIT"
},
{
"name": "pr-review",
"version": "7.1.0",
"description": "Multi-agent pull request review with confidence scoring and actionable feedback",
"source": "./plugins/pr-review",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/pr-review/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "development",
"tags": ["code-review", "pull-requests", "security", "quality"],
"license": "MIT"
},
{
"name": "clarity-assist",
"version": "7.1.0",
"description": "Prompt optimization and requirement clarification with ND-friendly accommodations",
"source": "./plugins/clarity-assist",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/clarity-assist/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "productivity",
"tags": ["prompts", "requirements", "clarification", "nd-friendly"],
"license": "MIT"
},
{
"name": "code-sentinel",
"version": "7.1.0",
"description": "Security scanning and code refactoring tools",
"source": "./plugins/code-sentinel",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/code-sentinel/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "security",
"tags": ["security-scan", "refactoring", "vulnerabilities"],
"license": "MIT"
},
{
"name": "doc-guardian",
"version": "7.1.0",
"description": "Automatic documentation drift detection and synchronization",
"source": "./plugins/doc-guardian",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/doc-guardian/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "productivity",
"tags": ["documentation", "drift-detection", "sync"],
"license": "MIT"
}
]
}

View File

@@ -6,12 +6,12 @@
},
"metadata": {
"description": "Project management plugins with Gitea and NetBox integrations",
"version": "9.1.2"
"version": "3.1.0"
},
"plugins": [
{
"name": "projman",
"version": "9.0.1",
"version": "3.1.0",
"description": "Sprint planning and project management with Gitea integration",
"source": "./plugins/projman",
"author": {
@@ -20,18 +20,14 @@
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/projman/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"mcpServers": ["./.mcp.json"],
"category": "development",
"tags": [
"sprint",
"agile",
"gitea",
"project-management"
],
"tags": ["sprint", "agile", "gitea", "project-management"],
"license": "MIT"
},
{
"name": "doc-guardian",
"version": "9.0.1",
"version": "1.0.0",
"description": "Automatic documentation drift detection and synchronization",
"source": "./plugins/doc-guardian",
"author": {
@@ -40,17 +36,14 @@
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/doc-guardian/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "productivity",
"tags": [
"documentation",
"drift-detection",
"sync"
],
"tags": ["documentation", "drift-detection", "sync"],
"license": "MIT"
},
{
"name": "code-sentinel",
"version": "9.0.1",
"version": "1.0.0",
"description": "Security scanning and code refactoring tools",
"source": "./plugins/code-sentinel",
"author": {
@@ -59,21 +52,15 @@
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/code-sentinel/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": [
"./hooks/hooks.json"
],
"hooks": ["./hooks/hooks.json"],
"category": "security",
"tags": [
"security-scan",
"refactoring",
"vulnerabilities"
],
"tags": ["security-scan", "refactoring", "vulnerabilities"],
"license": "MIT"
},
{
"name": "project-hygiene",
"version": "9.0.1",
"description": "Manual project hygiene checks — temp files, misplaced files, empty dirs, debug artifacts",
"version": "0.1.0",
"description": "Post-task cleanup hook that removes temp files and manages orphaned files",
"source": "./plugins/project-hygiene",
"author": {
"name": "Leo Miranda",
@@ -81,19 +68,15 @@
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/project-hygiene/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": ["./hooks/hooks.json"],
"category": "productivity",
"tags": [
"cleanup",
"hygiene",
"maintenance",
"manual-check"
],
"tags": ["cleanup", "automation", "hygiene"],
"license": "MIT"
},
{
"name": "cmdb-assistant",
"version": "9.0.1",
"description": "NetBox CMDB integration with data quality validation and machine registration",
"version": "1.0.0",
"description": "NetBox CMDB integration for infrastructure management",
"source": "./plugins/cmdb-assistant",
"author": {
"name": "Leo Miranda",
@@ -101,24 +84,15 @@
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/cmdb-assistant/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": [
"./hooks/hooks.json"
],
"mcpServers": ["./.mcp.json"],
"category": "infrastructure",
"tags": [
"cmdb",
"netbox",
"dcim",
"ipam",
"data-quality",
"validation"
],
"tags": ["cmdb", "netbox", "dcim", "ipam"],
"license": "MIT"
},
{
"name": "claude-config-maintainer",
"version": "9.0.1",
"description": "CLAUDE.md and settings.local.json optimization for Claude Code projects",
"version": "1.0.0",
"description": "CLAUDE.md optimization and maintenance for Claude Code projects",
"source": "./plugins/claude-config-maintainer",
"author": {
"name": "Leo Miranda",
@@ -127,16 +101,12 @@
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/claude-config-maintainer/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"category": "development",
"tags": [
"claude-md",
"configuration",
"optimization"
],
"tags": ["claude-md", "configuration", "optimization"],
"license": "MIT"
},
{
"name": "clarity-assist",
"version": "9.0.1",
"version": "1.0.0",
"description": "Prompt optimization and requirement clarification with ND-friendly accommodations",
"source": "./plugins/clarity-assist",
"author": {
@@ -145,21 +115,13 @@
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/clarity-assist/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": [
"./hooks/hooks.json"
],
"category": "productivity",
"tags": [
"prompts",
"requirements",
"clarification",
"nd-friendly"
],
"tags": ["prompts", "requirements", "clarification", "nd-friendly"],
"license": "MIT"
},
{
"name": "git-flow",
"version": "9.0.1",
"version": "1.0.0",
"description": "Git workflow automation with intelligent commit messages and branch management",
"source": "./plugins/git-flow",
"author": {
@@ -168,21 +130,13 @@
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/git-flow/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"hooks": [
"./hooks/hooks.json"
],
"category": "development",
"tags": [
"git",
"workflow",
"commits",
"branching"
],
"tags": ["git", "workflow", "commits", "branching"],
"license": "MIT"
},
{
"name": "pr-review",
"version": "9.0.1",
"version": "1.0.0",
"description": "Multi-agent pull request review with confidence scoring and actionable feedback",
"source": "./plugins/pr-review",
"author": {
@@ -191,252 +145,9 @@
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/pr-review/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"mcpServers": ["./.mcp.json"],
"category": "development",
"tags": [
"code-review",
"pull-requests",
"security",
"quality"
],
"license": "MIT"
},
{
"name": "data-platform",
"version": "9.0.1",
"description": "Data engineering tools with pandas, PostgreSQL/PostGIS, and dbt integration",
"source": "./plugins/data-platform",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/data-platform/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"category": "data",
"tags": [
"pandas",
"postgresql",
"postgis",
"dbt",
"data-engineering",
"etl"
],
"license": "MIT"
},
{
"name": "viz-platform",
"version": "9.0.1",
"description": "Visualization tools with Dash Mantine Components validation, Plotly charts, and theming",
"source": "./plugins/viz-platform",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/viz-platform/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"category": "visualization",
"tags": [
"dash",
"plotly",
"mantine",
"charts",
"dashboards",
"theming",
"dmc"
],
"license": "MIT"
},
{
"name": "contract-validator",
"version": "9.0.1",
"description": "Cross-plugin compatibility validation and Claude.md agent verification",
"source": "./plugins/contract-validator",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/contract-validator/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"category": "development",
"tags": [
"validation",
"contracts",
"compatibility",
"agents",
"interfaces",
"cross-plugin"
],
"license": "MIT"
},
{
"name": "saas-api-platform",
"version": "0.1.0",
"description": "REST and GraphQL API scaffolding for FastAPI and Express projects",
"source": "./plugins/saas-api-platform",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/saas-api-platform/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"category": "development",
"tags": [
"api",
"rest",
"graphql",
"fastapi",
"express",
"openapi"
],
"license": "MIT"
},
{
"name": "saas-db-migrate",
"version": "0.1.0",
"description": "Database migration management for Alembic, Prisma, and raw SQL",
"source": "./plugins/saas-db-migrate",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/saas-db-migrate/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"category": "development",
"tags": [
"database",
"migrations",
"alembic",
"prisma",
"sql",
"schema"
],
"license": "MIT"
},
{
"name": "saas-react-platform",
"version": "0.1.0",
"description": "React frontend development toolkit for Next.js and Vite projects",
"source": "./plugins/saas-react-platform",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/saas-react-platform/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"category": "development",
"tags": [
"react",
"nextjs",
"vite",
"typescript",
"frontend",
"components"
],
"license": "MIT"
},
{
"name": "saas-test-pilot",
"version": "0.1.0",
"description": "Test automation toolkit for pytest, Jest, Vitest, and Playwright",
"source": "./plugins/saas-test-pilot",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/saas-test-pilot/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"category": "development",
"tags": [
"testing",
"pytest",
"jest",
"vitest",
"playwright",
"coverage"
],
"license": "MIT"
},
{
"name": "data-seed",
"version": "0.1.0",
"description": "Test data generation and database seeding with relationship-aware profiles",
"source": "./plugins/data-seed",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/data-seed/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"category": "data",
"tags": [
"seed-data",
"test-data",
"faker",
"fixtures",
"database"
],
"license": "MIT"
},
{
"name": "ops-release-manager",
"version": "0.1.0",
"description": "Release management with semantic versioning, changelogs, and tag automation",
"source": "./plugins/ops-release-manager",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/ops-release-manager/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"category": "development",
"tags": [
"release",
"semver",
"changelog",
"versioning",
"tags"
],
"license": "MIT"
},
{
"name": "ops-deploy-pipeline",
"version": "0.1.0",
"description": "CI/CD deployment pipeline management for Docker Compose and systemd services",
"source": "./plugins/ops-deploy-pipeline",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/ops-deploy-pipeline/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"category": "infrastructure",
"tags": [
"deploy",
"docker-compose",
"systemd",
"caddy",
"cicd"
],
"license": "MIT"
},
{
"name": "debug-mcp",
"version": "0.1.0",
"description": "MCP server debugging, inspection, and development toolkit",
"source": "./plugins/debug-mcp",
"author": {
"name": "Leo Miranda",
"email": "leobmiranda@gmail.com"
},
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/debug-mcp/README.md",
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
"category": "development",
"tags": [
"mcp",
"debugging",
"diagnostics",
"server",
"development"
],
"tags": ["code-review", "pull-requests", "security", "quality"],
"license": "MIT"
}
]

View File

@@ -1,3 +0,0 @@
{
"model": "opusplan"
}

1
.env
View File

@@ -1 +0,0 @@
GITEA_REPO=personal-projects/leo-claude-mktplace

10
.gitignore vendored
View File

@@ -31,8 +31,6 @@ venv/
ENV/
env/
.venv/
.venv
**/.venv
# PyCharm
.idea/
@@ -84,13 +82,6 @@ Thumbs.db
# Claude Code
.claude/settings.local.json
.claude/history/
.claude/backups/
# Doc Guardian transient files
.doc-guardian-queue
# Development convenience links
.marketplaces-link
# Logs
logs/
@@ -132,5 +123,4 @@ site/
*credentials*
*secret*
*token*
!**/token-budget-report.md
!.gitkeep

View File

@@ -1,24 +0,0 @@
{
"mcpServers": {
"gitea": {
"command": "./mcp-servers/gitea/run.sh",
"args": []
},
"netbox": {
"command": "./mcp-servers/netbox/run.sh",
"args": []
},
"viz-platform": {
"command": "./mcp-servers/viz-platform/run.sh",
"args": []
},
"data-platform": {
"command": "./mcp-servers/data-platform/run.sh",
"args": []
},
"contract-validator": {
"command": "./mcp-servers/contract-validator/run.sh",
"args": []
}
}
}

View File

@@ -1,8 +0,0 @@
{
"mcpServers": {
"gitea": {
"command": "./mcp-servers/gitea/run.sh",
"args": []
}
}
}

View File

@@ -1,24 +0,0 @@
{
"mcpServers": {
"gitea": {
"command": "./mcp-servers/gitea/run.sh",
"args": []
},
"netbox": {
"command": "./mcp-servers/netbox/run.sh",
"args": []
},
"viz-platform": {
"command": "./mcp-servers/viz-platform/run.sh",
"args": []
},
"data-platform": {
"command": "./mcp-servers/data-platform/run.sh",
"args": []
},
"contract-validator": {
"command": "./mcp-servers/contract-validator/run.sh",
"args": []
}
}
}

File diff suppressed because it is too large Load Diff

534
CLAUDE.md
View File

@@ -1,186 +1,26 @@
# CLAUDE.md
## ⛔ MANDATORY BEHAVIOR RULES - READ FIRST
**These rules are NON-NEGOTIABLE. Violating them wastes the user's time and money.**
### 1. WHEN USER ASKS YOU TO CHECK SOMETHING - CHECK EVERYTHING
- Search ALL locations, not just where you think it is
- Check cache directories: `~/.claude/plugins/cache/`
- Check installed: `~/.claude/plugins/marketplaces/`
- Check source directories
- **NEVER say "no" or "that's not the issue" without exhaustive verification**
### 2. WHEN USER SAYS SOMETHING IS WRONG - BELIEVE THEM
- The user knows their system better than you
- Investigate thoroughly before disagreeing
- **Your confidence is often wrong. User's instincts are often right.**
### 3. NEVER SAY "DONE" WITHOUT VERIFICATION
- Run the actual command/script to verify
- Show the output to the user
- **"Done" means VERIFIED WORKING, not "I made changes"**
### 4. SHOW EXACTLY WHAT USER ASKS FOR
- If user asks for messages, show the MESSAGES
- If user asks for code, show the CODE
- **Do not interpret or summarize unless asked**
**FAILURE TO FOLLOW THESE RULES = WASTED USER TIME = UNACCEPTABLE**
---
This file provides guidance to Claude Code when working with code in this repository.
## ⛔ RULES - READ FIRST
### Behavioral Rules
| Rule | Summary |
|------|---------|
| **Check everything** | Search cache (`~/.claude/plugins/cache/`), installed (`~/.claude/plugins/marketplaces/`), and source (`~/claude-plugins-work/`) |
| **Believe the user** | User knows their system. Investigate before disagreeing. |
| **Verify before "done"** | Run commands, show output, check all locations. "Done" = verified working. |
| **Show what's asked** | Don't interpret or summarize unless asked. |
### After Plugin Updates
Run `./scripts/verify-hooks.sh`. If changes affect MCP servers or hooks, inform user to restart session.
**DO NOT clear cache mid-session** - breaks loaded MCP tools.
### NEVER USE CLI TOOLS FOR EXTERNAL SERVICES
- **FORBIDDEN:** `gh`, `tea`, `curl` to APIs, any CLI that talks to Gitea/GitHub/external services
- **REQUIRED:** Use MCP tools exclusively (`mcp__plugin_projman_gitea__*`, `mcp__plugin_pr-review_gitea__*`)
- **NO EXCEPTIONS.** Don't try CLI first. Don't fall back to CLI. MCP ONLY.
### NEVER PUSH DIRECTLY TO PROTECTED BRANCHES
- **FORBIDDEN:** `git push origin development`, `git push origin main`, `git push origin master`
- **REQUIRED:** Create feature branch → push feature branch → create PR via MCP
- If you accidentally commit to a protected branch locally: `git checkout -b fix/branch-name` then reset the protected branch
### Repository Rules
| Rule | Details |
|------|---------|
| **File creation** | Only in allowed paths. Use `.scratch/` for temp work. Verify against `docs/CANONICAL-PATHS.md` |
| **plugin.json location** | Must be in `.claude-plugin/` directory |
| **Hooks** | Use `hooks/hooks.json` (auto-discovered). Never inline in plugin.json |
| **MCP servers** | Defined in root `.mcp.json`. Use MCP tools, never CLI (`tea`, `gh`) |
| **Allowed root files** | `CLAUDE.md`, `README.md`, `LICENSE`, `CHANGELOG.md`, `.gitignore`, `.env.example` |
**Valid hook events:** `PreToolUse`, `PostToolUse`, `UserPromptSubmit`, `SessionStart`, `SessionEnd`, `Notification`, `Stop`, `SubagentStop`, `PreCompact`
### ⛔ MANDATORY: Before Any Code Change
**Claude MUST show this checklist BEFORE editing any file:**
#### 1. Impact Search Results
Run and show output of:
```bash
grep -rn "PATTERN" --include="*.sh" --include="*.md" --include="*.json" --include="*.py" | grep -v ".git"
```
#### 2. Files That Will Be Affected
Numbered list of every file to be modified, with the specific change for each.
#### 3. Files Searched But Not Changed (and why)
Proof that related files were checked and determined unchanged.
#### 4. Documentation That References This
List of docs that mention this feature/script/function.
**User verifies this list before Claude proceeds. If Claude skips this, STOP IMMEDIATELY.**
#### After Changes
Run the same grep and show results proving no references remain unaddressed.
---
## ⚠️ Development Context: We Build AND Use These Plugins
**This is a self-referential project.** We are:
1. **BUILDING** a plugin marketplace (source code in `plugins/`)
2. **USING** the installed marketplace to build it (dogfooding)
### Plugins ACTIVELY USED in This Project
These plugins are installed and should be used during development:
| Plugin | Used For |
|--------|----------|
| **projman** | Sprint planning, issue management, lessons learned |
| **git-flow** | Commits, branch management |
| **pr-review** | Pull request reviews |
| **doc-guardian** | Documentation drift detection |
| **code-sentinel** | Security scanning, refactoring |
| **clarity-assist** | Prompt clarification |
| **claude-config-maintainer** | CLAUDE.md optimization |
| **contract-validator** | Cross-plugin compatibility |
### Plugins NOT Used Here (Development Only)
These plugins exist in source but are **NOT relevant** to this project's workflow:
| Plugin | Why Not Used |
|--------|--------------|
| **data-platform** | For data engineering projects (pandas, PostgreSQL, dbt) |
| **viz-platform** | For dashboard projects (Dash, Plotly) |
| **cmdb-assistant** | For infrastructure projects (NetBox) |
| **saas-api-platform** | For REST/GraphQL API projects (FastAPI, Express) |
| **saas-db-migrate** | For database migration projects (Alembic, Prisma) |
| **saas-react-platform** | For React frontend projects (Next.js, Vite) |
| **saas-test-pilot** | For test automation projects (pytest, Jest, Playwright) |
| **data-seed** | For test data generation and seeding |
| **ops-release-manager** | For release management workflows |
| **ops-deploy-pipeline** | For deployment pipeline management |
| **debug-mcp** | For MCP server debugging and development |
**Do NOT suggest** `/data ingest`, `/data profile`, `/viz chart`, `/cmdb *`, `/api *`, `/db-migrate *`, `/react *`, `/test *`, `/seed *`, `/release *`, `/deploy *`, `/debug-mcp *` commands - they don't apply here.
### Key Distinction
| Context | Path | What To Do |
|---------|------|------------|
| **Editing plugin source** | `~/claude-plugins-work/plugins/` | Modify code, add features |
| **Using installed plugins** | `~/.claude/plugins/marketplaces/` | Run commands like `/sprint plan` |
When user says "run /sprint plan", use the INSTALLED plugin.
When user says "fix the sprint plan command", edit the SOURCE code.
---
## Project Overview
**Repository:** leo-claude-mktplace
**Version:** 9.1.2
**Version:** 3.0.1
**Status:** Production Ready
A plugin marketplace for Claude Code containing:
| Plugin | Description | Version |
|--------|-------------|---------|
| `projman` | Sprint planning and project management with Gitea integration | 9.0.1 |
| `git-flow` | Git workflow automation with smart commits and branch management | 9.0.1 |
| `pr-review` | Multi-agent PR review with confidence scoring | 9.0.1 |
| `clarity-assist` | Prompt optimization with ND-friendly accommodations | 9.0.1 |
| `doc-guardian` | Automatic documentation drift detection and synchronization | 9.0.1 |
| `code-sentinel` | Security scanning and code refactoring tools | 9.0.1 |
| `claude-config-maintainer` | CLAUDE.md optimization and maintenance | 9.0.1 |
| `cmdb-assistant` | NetBox CMDB integration for infrastructure management | 9.0.1 |
| `data-platform` | pandas, PostgreSQL, and dbt integration for data engineering | 9.0.1 |
| `viz-platform` | DMC validation, Plotly charts, and theming for dashboards | 9.0.1 |
| `contract-validator` | Cross-plugin compatibility validation and agent verification | 9.0.1 |
| `project-hygiene` | Manual project hygiene checks | 9.0.1 |
| `saas-api-platform` | REST/GraphQL API scaffolding for FastAPI and Express | 0.1.0 |
| `saas-db-migrate` | Database migration management for Alembic, Prisma, raw SQL | 0.1.0 |
| `saas-react-platform` | React frontend toolkit for Next.js and Vite | 0.1.0 |
| `saas-test-pilot` | Test automation for pytest, Jest, Vitest, Playwright | 0.1.0 |
| `data-seed` | Test data generation and database seeding | 0.1.0 |
| `ops-release-manager` | Release management with SemVer and changelog automation | 0.1.0 |
| `ops-deploy-pipeline` | Deployment pipeline for Docker Compose and systemd | 0.1.0 |
| `debug-mcp` | MCP server debugging and development toolkit | 0.1.0 |
| `projman` | Sprint planning and project management with Gitea integration | 3.0.0 |
| `git-flow` | Git workflow automation with smart commits and branch management | 1.0.0 |
| `pr-review` | Multi-agent PR review with confidence scoring | 1.0.0 |
| `clarity-assist` | Prompt optimization with ND-friendly accommodations | 1.0.0 |
| `doc-guardian` | Automatic documentation drift detection and synchronization | 1.0.0 |
| `code-sentinel` | Security scanning and code refactoring tools | 1.0.0 |
| `claude-config-maintainer` | CLAUDE.md optimization and maintenance | 1.0.0 |
| `cmdb-assistant` | NetBox CMDB integration for infrastructure management | 1.0.0 |
| `project-hygiene` | Post-task cleanup automation via hooks | 0.1.0 |
## Quick Start
@@ -189,110 +29,94 @@ A plugin marketplace for Claude Code containing:
./scripts/validate-marketplace.sh
# After updates
./scripts/post-update.sh # Rebuild venvs
./scripts/post-update.sh # Rebuild venvs, verify symlinks
```
### Plugin Commands - USE THESE in This Project
### Plugin Commands by Category
| Category | Commands |
|----------|----------|
| **Setup** | `/projman setup` (modes: `--full`, `--quick`, `--sync`) |
| **Sprint** | `/sprint plan`, `/sprint start`, `/sprint status` (with `--diagram`), `/sprint close` |
| **Quality** | `/sprint review`, `/sprint test` (modes: `run`, `gen`) |
| **Project** | `/project initiation`, `/project plan`, `/project status`, `/project close` |
| **ADR** | `/adr create`, `/adr list`, `/adr update`, `/adr supersede` |
| **RFC** | `/rfc create`, `/rfc list`, `/rfc review`, `/rfc approve`, `/rfc reject` |
| **PR Review** | `/pr review`, `/pr summary`, `/pr findings`, `/pr diff` |
| **Docs** | `/doc audit`, `/doc sync`, `/doc changelog-gen`, `/doc coverage`, `/doc stale-docs` |
| **Security** | `/sentinel scan`, `/sentinel refactor`, `/sentinel refactor-dry` |
| **Config** | `/claude-config analyze`, `/claude-config optimize`, `/claude-config diff`, `/claude-config lint` |
| **Validation** | `/cv validate`, `/cv check-agent`, `/cv list-interfaces`, `/cv dependency-graph`, `/cv status` |
| **Maintenance** | `/hygiene check` |
### Plugin Commands - NOT RELEVANT to This Project
These commands are being developed but don't apply to this project's workflow:
| Category | Commands | For Projects Using |
|----------|----------|-------------------|
| **Data** | `/data ingest`, `/data profile`, `/data schema`, `/data lineage`, `/data dbt-test` | pandas, PostgreSQL, dbt |
| **Visualization** | `/viz component`, `/viz chart`, `/viz dashboard`, `/viz theme` | Dash, Plotly dashboards |
| **CMDB** | `/cmdb search`, `/cmdb device`, `/cmdb sync` | NetBox infrastructure |
| **API** | `/api scaffold`, `/api validate`, `/api docs`, `/api middleware` | FastAPI, Express |
| **DB Migrate** | `/db-migrate generate`, `/db-migrate validate`, `/db-migrate plan` | Alembic, Prisma |
| **React** | `/react component`, `/react route`, `/react state`, `/react hook` | Next.js, Vite |
| **Testing** | `/test generate`, `/test coverage`, `/test fixtures`, `/test e2e` | pytest, Jest, Playwright |
| **Seeding** | `/seed generate`, `/seed profile`, `/seed apply` | Faker, test data |
| **Release** | `/release prepare`, `/release validate`, `/release tag` | SemVer releases |
| **Deploy** | `/deploy generate`, `/deploy validate`, `/deploy check` | Docker Compose, systemd |
| **Debug MCP** | `/debug-mcp status`, `/debug-mcp test`, `/debug-mcp logs` | MCP server development |
| **Setup** | `/initial-setup`, `/project-init`, `/project-sync` |
| **Sprint** | `/sprint-plan`, `/sprint-start`, `/sprint-status`, `/sprint-close` |
| **Quality** | `/review`, `/test-check`, `/test-gen` |
| **PR Review** | `/pr-review:initial-setup`, `/pr-review:project-init` |
| **Docs** | `/doc-audit`, `/doc-sync` |
| **Security** | `/security-scan`, `/refactor`, `/refactor-dry` |
| **Config** | `/config-analyze`, `/config-optimize` |
| **Debug** | `/debug-report`, `/debug-review` |
## Repository Structure
```
leo-claude-mktplace/
├── .claude-plugin/ # Marketplace manifest
── marketplace.json
├── marketplace-lean.json # Lean profile (6 core plugins)
── marketplace-full.json # Full profile (all plugins)
├── .mcp.json # MCP server configuration (all servers)
├── mcp-servers/ # SHARED MCP servers
│ ├── gitea/ # Gitea (issues, PRs, wiki)
│ ├── netbox/ # NetBox (DCIM, IPAM)
│ ├── data-platform/ # pandas, PostgreSQL, dbt
│ ├── viz-platform/ # DMC, Plotly, theming
│ └── contract-validator/ # Plugin compatibility validation
├── plugins/ # All plugins (20 total)
│ ├── projman/ # [core] Sprint management
├── .claude-plugin/
── marketplace.json # Marketplace manifest
├── mcp-servers/ # SHARED MCP servers (v3.0.0+)
── gitea/ # Gitea MCP (issues, PRs, wiki)
│ └── netbox/ # NetBox MCP (CMDB)
├── plugins/
│ ├── projman/ # Sprint management
│ │ ├── .claude-plugin/plugin.json
│ │ ├── commands/ # 19 commands
│ │ ├── .mcp.json
│ │ ├── mcp-servers/gitea -> ../../../mcp-servers/gitea # SYMLINK
│ │ ├── commands/ # 12 commands (incl. setup)
│ │ ├── hooks/ # SessionStart mismatch detection
│ │ ├── agents/ # 4 agents
│ │ └── skills/ # 23 reusable skill files
│ ├── git-flow/ # [core] Git workflow automation
│ ├── pr-review/ # [core] PR review
│ ├── clarity-assist/ # [core] Prompt optimization
├── doc-guardian/ # [core] Documentation drift detection
│ ├── code-sentinel/ # [core] Security scanning
│ ├── claude-config-maintainer/ # [core] CLAUDE.md optimization
│ ├── contract-validator/ # [core] Cross-plugin validation
│ ├── project-hygiene/ # [core] Manual cleanup checks
│ ├── cmdb-assistant/ # [ops] NetBox CMDB integration
│ ├── data-platform/ # [data] Data engineering
├── viz-platform/ # [data] Visualization
│ ├── data-seed/ # [data] Test data generation (scaffold)
│ ├── saas-api-platform/ # [saas] API scaffolding (scaffold)
│ ├── saas-db-migrate/ # [saas] DB migrations (scaffold)
├── saas-react-platform/ # [saas] React toolkit (scaffold)
│ ├── saas-test-pilot/ # [saas] Test automation (scaffold)
│ ├── ops-release-manager/ # [ops] Release management (scaffold)
│ ├── ops-deploy-pipeline/ # [ops] Deployment pipeline (scaffold)
── debug-mcp/ # [debug] MCP debugging (scaffold)
├── scripts/ # Setup and maintenance
├── setup.sh # Initial setup (create venvs, config)
│ ├── post-update.sh # Post-update (clear cache, changelog)
── setup-venvs.sh # MCP server venv management (cache-based)
│ ├── validate-marketplace.sh # Marketplace compliance validation
├── verify-hooks.sh # Hook inventory verification
── release.sh # Release automation with version bumping
│ ├── claude-launch.sh # Profile-based launcher
│ ├── install-plugin.sh # Install plugin to consumer project
│ ├── list-installed.sh # Show installed plugins in a project
│ └── uninstall-plugin.sh # Remove plugin from consumer project
├── docs/ # Documentation
│ ├── ARCHITECTURE.md # System architecture & plugin reference
│ ├── CANONICAL-PATHS.md # Authoritative path reference
│ ├── COMMANDS-CHEATSHEET.md # All commands quick reference
│ ├── CONFIGURATION.md # Centralized setup guide
│ ├── DEBUGGING-CHECKLIST.md # Systematic troubleshooting guide
│ ├── MIGRATION-v9.md # v8.x to v9.0.0 migration guide
│ └── UPDATING.md # Update guide
├── CLAUDE.md # Project instructions for Claude Code
├── README.md
├── CHANGELOG.md
├── LICENSE
└── .gitignore
│ │ └── skills/label-taxonomy/
│ ├── git-flow/ # Git workflow automation
│ ├── .claude-plugin/plugin.json
│ ├── commands/ # 8 commands
│ └── agents/
│ ├── pr-review/ # Multi-agent PR review
│ ├── .claude-plugin/plugin.json
│ ├── .mcp.json
│ ├── mcp-servers/gitea -> ../../../mcp-servers/gitea # SYMLINK
│ ├── commands/ # 6 commands (incl. setup)
│ ├── hooks/ # SessionStart mismatch detection
│ └── agents/ # 5 agents
│ ├── clarity-assist/ # Prompt optimization (NEW v3.0.0)
│ ├── .claude-plugin/plugin.json
│ ├── commands/ # 2 commands
│ └── agents/
│ ├── doc-guardian/ # Documentation drift detection
│ ├── code-sentinel/ # Security scanning & refactoring
│ ├── claude-config-maintainer/
── cmdb-assistant/
│ └── project-hygiene/
├── scripts/
│ ├── setup.sh, post-update.sh
── validate-marketplace.sh # Marketplace compliance validation
└── docs/
├── CANONICAL-PATHS.md # Single source of truth for paths
── CONFIGURATION.md # Centralized configuration guide
```
## CRITICAL: Rules You MUST Follow
### File Operations
- **NEVER** create files in repository root unless listed in "Allowed Root Files"
- **NEVER** modify `.gitignore` without explicit permission
- **ALWAYS** use `.scratch/` for temporary/exploratory work
- **ALWAYS** verify paths against `docs/CANONICAL-PATHS.md` before creating files
### Plugin Development
- **plugin.json MUST be in `.claude-plugin/` directory** (not plugin root)
- **Every plugin MUST be listed in marketplace.json**
- **MCP servers are SHARED at root** with symlinks from plugins
- **MCP server venv path**: `${CLAUDE_PLUGIN_ROOT}/mcp-servers/{name}/.venv/bin/python`
- **CLI tools forbidden** - Use MCP tools exclusively (never `tea`, `gh`, etc.)
### Hooks (Valid Events Only)
`PreToolUse`, `PostToolUse`, `UserPromptSubmit`, `SessionStart`, `SessionEnd`, `Notification`, `Stop`, `SubagentStop`, `PreCompact`
**INVALID:** `task-completed`, `file-changed`, `git-commit-msg-needed`
### Allowed Root Files
`CLAUDE.md`, `README.md`, `LICENSE`, `CHANGELOG.md`, `.gitignore`, `.env.example`
### Allowed Root Directories
`.claude/`, `.claude-plugin/`, `.claude-plugins/`, `.scratch/`, `docs/`, `hooks/`, `mcp-servers/`, `plugins/`, `scripts/`
## Architecture
### Four-Agent Model (projman)
@@ -304,71 +128,16 @@ leo-claude-mktplace/
| **Executor** | Implementation-focused | Code implementation, branch management, MR creation |
| **Code Reviewer** | Thorough, practical | Pre-close quality review, security scan, test verification |
### Agent Frontmatter Configuration
Agents specify their configuration in frontmatter using Claude Code's supported fields. Reference: https://code.claude.com/docs/en/sub-agents
**Supported frontmatter fields:**
| Field | Required | Default | Description |
|-------|----------|---------|-------------|
| `name` | Yes | — | Unique identifier, lowercase + hyphens |
| `description` | Yes | — | When Claude should delegate to this subagent |
| `model` | No | `inherit` | `sonnet`, `opus`, `haiku`, or `inherit` |
| `permissionMode` | No | `default` | Controls permission prompts: `default`, `acceptEdits`, `dontAsk`, `bypassPermissions`, `plan` |
| `disallowedTools` | No | none | Comma-separated tools to remove from agent's toolset |
| `skills` | No | none | Comma-separated skills auto-injected into context at startup |
| `hooks` | No | none | Lifecycle hooks scoped to this subagent |
**Complete agent matrix:**
| Plugin | Agent | `model` | `permissionMode` | `disallowedTools` | `skills` |
|--------|-------|---------|-------------------|--------------------|----------|
| projman | planner | opus | default | — | frontmatter (2) + body text (12) |
| projman | orchestrator | sonnet | acceptEdits | — | frontmatter (2) + body text (10) |
| projman | executor | sonnet | bypassPermissions | — | frontmatter (7) |
| projman | code-reviewer | opus | default | Write, Edit, MultiEdit | frontmatter (4) |
| pr-review | coordinator | sonnet | plan | Write, Edit, MultiEdit | — |
| pr-review | security-reviewer | sonnet | plan | Write, Edit, MultiEdit | — |
| pr-review | performance-analyst | sonnet | plan | Write, Edit, MultiEdit | — |
| pr-review | maintainability-auditor | haiku | plan | Write, Edit, MultiEdit | — |
| pr-review | test-validator | haiku | plan | Write, Edit, MultiEdit | — |
| data-platform | data-advisor | sonnet | default | — | — |
| data-platform | data-analysis | sonnet | plan | Write, Edit, MultiEdit | — |
| data-platform | data-ingestion | haiku | acceptEdits | — | — |
| viz-platform | design-reviewer | sonnet | plan | Write, Edit, MultiEdit | — |
| viz-platform | layout-builder | sonnet | default | — | — |
| viz-platform | component-check | haiku | plan | Write, Edit, MultiEdit | — |
| viz-platform | theme-setup | haiku | acceptEdits | — | — |
| contract-validator | full-validation | sonnet | default | — | — |
| contract-validator | agent-check | haiku | plan | Write, Edit, MultiEdit | — |
| code-sentinel | security-reviewer | sonnet | plan | Write, Edit, MultiEdit | — |
| code-sentinel | refactor-advisor | sonnet | acceptEdits | — | — |
| doc-guardian | doc-analyzer | sonnet | acceptEdits | — | — |
| clarity-assist | clarity-coach | sonnet | default | Write, Edit, MultiEdit | — |
| git-flow | git-assistant | haiku | acceptEdits | — | — |
| claude-config-maintainer | maintainer | sonnet | acceptEdits | — | frontmatter (2) |
| cmdb-assistant | cmdb-assistant | sonnet | default | — | — |
**Design principles:**
- `bypassPermissions` is granted to exactly ONE agent (Executor) which has code-sentinel PreToolUse hook + Code Reviewer downstream as safety nets.
- `plan` mode is assigned to all pure analysis agents (pr-review, read-only validators).
- `disallowedTools: Write, Edit, MultiEdit` provides defense-in-depth on agents that should never write files.
- `skills` frontmatter is used for agents with ≤7 skills where guaranteed loading is safety-critical. Agents with 8+ skills use body text `## Skills to Load` for selective loading.
- `hooks` (agent-scoped) is reserved for future use (v6.0+).
Override any field by editing the agent's `.md` file in `plugins/{plugin}/agents/`.
### MCP Server Tools (Gitea)
| Category | Tools |
|----------|-------|
| Issues | `list_issues`, `get_issue`, `create_issue`, `update_issue`, `add_comment`, `aggregate_issues` |
| Labels | `get_labels`, `suggest_labels`, `create_label`, `create_label_smart` |
| Milestones | `list_milestones`, `get_milestone`, `create_milestone`, `update_milestone`, `delete_milestone` |
| Dependencies | `list_issue_dependencies`, `create_issue_dependency`, `remove_issue_dependency`, `get_execution_order` |
| Wiki | `list_wiki_pages`, `get_wiki_page`, `create_wiki_page`, `update_wiki_page`, `create_lesson`, `search_lessons`, `allocate_rfc_number` |
| **Pull Requests** | `list_pull_requests`, `get_pull_request`, `get_pr_diff`, `get_pr_comments`, `create_pr_review`, `add_pr_comment` |
| Issues | `list_issues`, `get_issue`, `create_issue`, `update_issue`, `add_comment` |
| Labels | `get_labels`, `suggest_labels`, `create_label` |
| Milestones | `list_milestones`, `get_milestone`, `create_milestone`, `update_milestone` |
| Dependencies | `list_issue_dependencies`, `create_issue_dependency`, `get_execution_order` |
| Wiki | `list_wiki_pages`, `get_wiki_page`, `create_wiki_page`, `create_lesson`, `search_lessons` |
| **Pull Requests** | `list_pull_requests`, `get_pull_request`, `get_pr_diff`, `get_pr_comments`, `create_pr_review`, `add_pr_comment` *(NEW v3.0.0)* |
| Validation | `validate_repo_org`, `get_branch_protection` |
### Hybrid Configuration
@@ -388,28 +157,14 @@ Override any field by editing the agent's `.md` file in `plugins/{plugin}/agents
| `staging` | Staging | Read-only code, can create issues |
| `main`, `master` | Production | Read-only, emergency only |
### RFC System
Wiki-based Request for Comments system for tracking feature ideas from proposal through implementation.
**RFC Wiki Naming:**
- RFC pages: `RFC-NNNN: Short Title` (4-digit zero-padded)
- Index page: `RFC-Index` (auto-maintained)
**Lifecycle:** Draft → Review → Approved → Implementing → Implemented
**Integration with Sprint Planning:**
- `/sprint plan` detects approved RFCs and offers selection
- `/sprint close` updates RFC status on completion
## Label Taxonomy
58 labels total: 31 organization + 27 repository
43 labels total: 27 organization + 16 repository
**Organization:** Agent/2, Complexity/3, Efforts/5, Priority/4, Risk/3, Source/4, Status/4, Type/6
**Repository:** Component/9, Tech/7, Domain/2, Epic/5, RnD/4
**Organization:** Agent/2, Complexity/3, Efforts/5, Priority/4, Risk/3, Source/4, Type/6
**Repository:** Component/9, Tech/7
Sync with `/labels sync` command.
Sync with `/labels-sync` command.
## Lessons Learned System
@@ -424,37 +179,18 @@ Stored in Gitea Wiki under `lessons-learned/sprints/`.
### Adding a New Plugin
1. Create `plugins/{name}/.claude-plugin/plugin.json` (standard schema fields only — no custom fields)
2. Create `plugins/{name}/.claude-plugin/metadata.json` — must include `"domain"` field (`core`, `data`, `saas`, `ops`, or `debug`)
3. Add entry to `.claude-plugin/marketplace.json` with category, tags, license (no custom fields — Claude Code schema is strict)
4. Create `claude-md-integration.md`
5. If using new MCP server, add to root `mcp-servers/` and update `.mcp.json`
6. Run `./scripts/validate-marketplace.sh` — rejects plugins without valid `domain` field
7. Update `CHANGELOG.md`
**Domain field is required in metadata.json (v8.0.0+, moved from plugin.json in v9.1.2):**
```json
{
"domain": "core"
}
```
**Naming convention:** New plugins use domain prefix (`saas-*`, `ops-*`, `data-*`, `debug-*`). Core plugins have no prefix.
### Domain Assignments
| Domain | Plugins |
|--------|---------|
| `core` | projman, git-flow, pr-review, code-sentinel, doc-guardian, clarity-assist, contract-validator, claude-config-maintainer, project-hygiene |
| `data` | data-platform, viz-platform, data-seed |
| `saas` | saas-api-platform, saas-db-migrate, saas-react-platform, saas-test-pilot |
| `ops` | cmdb-assistant, ops-release-manager, ops-deploy-pipeline |
| `debug` | debug-mcp |
1. Create `plugins/{name}/.claude-plugin/plugin.json`
2. Add entry to `.claude-plugin/marketplace.json` with category, tags, license
3. Create `README.md` and `claude-md-integration.md`
4. If using MCP server, create symlink: `ln -s ../../../mcp-servers/{server} plugins/{name}/mcp-servers/{server}`
5. Run `./scripts/validate-marketplace.sh`
6. Update `CHANGELOG.md`
### Adding a Command to projman
1. Create `plugins/projman/commands/{name}.md`
2. Update marketplace description if significant
2. Update `plugins/projman/README.md`
3. Update marketplace description if significant
### Validation
@@ -475,14 +211,13 @@ Stored in Gitea Wiki under `lessons-learned/sprints/`.
| Document | Purpose |
|----------|---------|
| `docs/ARCHITECTURE.md` | System architecture and plugin reference |
| `docs/CANONICAL-PATHS.md` | **Single source of truth** for paths |
| `docs/COMMANDS-CHEATSHEET.md` | All commands quick reference |
| `docs/CONFIGURATION.md` | Centralized setup guide |
| `docs/DEBUGGING-CHECKLIST.md` | Systematic troubleshooting guide |
| `docs/MIGRATION-v9.md` | v8.x to v9.0.0 migration guide |
| `docs/UPDATING.md` | Update guide for the marketplace |
| `plugins/projman/CONFIGURATION.md` | Projman quick reference (links to central) |
| `plugins/projman/README.md` | Projman full documentation |
## Installation Paths
@@ -504,63 +239,20 @@ See `docs/DEBUGGING-CHECKLIST.md` for systematic troubleshooting.
| Symptom | Likely Cause | Fix |
|---------|--------------|-----|
| "X MCP servers failed" | Missing venv in installed path | `cd ~/.claude/plugins/marketplaces/leo-claude-mktplace && ./scripts/setup.sh` |
| MCP tools not available | Venv missing or .mcp.json misconfigured | Run `/cv status` to diagnose |
| MCP tools not available | Symlink broken or venv missing | Run `/debug-report` to diagnose |
| Changes not taking effect | Editing source, not installed | Reinstall plugin or edit installed path |
**Diagnostic Commands:**
- `/cv status` - Marketplace-wide health check (installation, MCP, configuration)
- `/hygiene check` - Project file organization and cleanup check
**Debug Commands:**
- `/debug-report` - Run full diagnostics, create issue if needed
- `/debug-review` - Investigate and propose fixes
## Versioning Workflow
## Versioning Rules
This project follows [SemVer](https://semver.org/) and [Keep a Changelog](https://keepachangelog.com).
### Version Locations (must stay in sync)
| Location | Format | Example |
|----------|--------|---------|
| Git tags | `vX.Y.Z` | `v3.2.0` |
| README.md title | `# Leo Claude Marketplace - vX.Y.Z` | `v3.2.0` |
| marketplace.json | `"version": "X.Y.Z"` | `3.2.0` |
| CHANGELOG.md | `## [X.Y.Z] - YYYY-MM-DD` | `[3.2.0] - 2026-01-24` |
### During Development
**All changes go under `[Unreleased]` in CHANGELOG.md.** Never create a versioned section until release time.
```markdown
## [Unreleased]
### Added
- New feature description
### Fixed
- Bug fix description
```
### Creating a Release
Use the release script to ensure consistency:
```bash
./scripts/release.sh 3.2.0
```
The script will:
1. Validate `[Unreleased]` section has content
2. Replace `[Unreleased]` with `[3.2.0] - YYYY-MM-DD`
3. Update README.md title
4. Update marketplace.json version
5. Commit and create git tag
### SemVer Guidelines
| Change Type | Version Bump | Example |
|-------------|--------------|---------|
| Bug fixes only | PATCH (x.y.**Z**) | 3.1.1 → 3.1.2 |
| New features (backwards compatible) | MINOR (x.**Y**.0) | 3.1.2 → 3.2.0 |
| Breaking changes | MAJOR (**X**.0.0) | 3.2.0 → 4.0.0 |
- Version displayed ONLY in main `README.md` title: `# Leo Claude Marketplace - vX.Y.Z`
- `CHANGELOG.md` is authoritative for version history
- Follow [SemVer](https://semver.org/): MAJOR.MINOR.PATCH
- On release: Update README title → CHANGELOG → marketplace.json → plugin.json files
---
**Last Updated:** 2026-02-07
**Last Updated:** 2026-01-22

352
README.md
View File

@@ -1,160 +1,130 @@
# Leo Claude Marketplace v9.1.2
# Leo Claude Marketplace - v3.1.1
A plugin marketplace for Claude Code providing sprint management, code review, security scanning, infrastructure automation, and development workflow tools. 20 plugins across 5 domains, backed by 5 shared MCP servers.
A collection of Claude Code plugins for project management, infrastructure automation, and development workflows.
## Plugins
### Core (9 plugins — v9.0.1)
### Development & Project Management
| Plugin | Description |
|--------|-------------|
| `projman` | Sprint planning and project management with Gitea integration |
| `git-flow` | Git workflow automation with intelligent commit messages and branch management |
| `pr-review` | Multi-agent pull request review with confidence scoring |
| `code-sentinel` | Security scanning and code refactoring tools |
| `doc-guardian` | Documentation drift detection and synchronization |
| `clarity-assist` | Prompt optimization with ND-friendly accommodations |
| `contract-validator` | Cross-plugin compatibility validation and agent verification |
| `claude-config-maintainer` | CLAUDE.md and settings.local.json optimization |
| `project-hygiene` | Manual project file cleanup checks |
#### [projman](./plugins/projman/README.md)
**Sprint Planning and Project Management**
### Data (3 plugins)
AI-guided sprint planning with full Gitea integration. Transforms a proven 15-sprint workflow into a distributable plugin.
| Plugin | Version | Description |
|--------|---------|-------------|
| `data-platform` | 9.0.1 | pandas, PostgreSQL/PostGIS, and dbt integration |
| `viz-platform` | 9.0.1 | Dash Mantine Components validation, Plotly charts, and theming |
| `data-seed` | 0.1.0 — scaffold | Test data generation and database seeding |
- Four-agent model: Planner, Orchestrator, Executor, Code Reviewer
- Intelligent label suggestions from 43-label taxonomy
- Lessons learned capture via Gitea Wiki
- Native issue dependencies with parallel execution
- Milestone management for sprint organization
- Branch-aware security (development/staging/production)
- Pre-sprint-close code quality review and test verification
### Ops (3 plugins)
**Commands:** `/sprint-plan`, `/sprint-start`, `/sprint-status`, `/sprint-close`, `/labels-sync`, `/initial-setup`, `/project-init`, `/project-sync`, `/review`, `/test-check`, `/test-gen`
| Plugin | Version | Description |
|--------|---------|-------------|
| `cmdb-assistant` | 9.0.1 | NetBox CMDB integration with data quality validation |
| `ops-release-manager` | 0.1.0 — scaffold | Release management with SemVer and changelog automation |
| `ops-deploy-pipeline` | 0.1.0 — scaffold | Deployment pipeline for Docker Compose and systemd |
#### [git-flow](./plugins/git-flow/README.md) *NEW in v3.0.0*
**Git Workflow Automation**
### SaaS (4 plugins — v0.1.0 scaffolds)
Smart git operations with intelligent commit messages and branch management.
| Plugin | Description |
|--------|-------------|
| `saas-api-platform` | REST/GraphQL API scaffolding for FastAPI and Express |
| `saas-db-migrate` | Database migration management for Alembic, Prisma, raw SQL |
| `saas-react-platform` | React frontend toolkit for Next.js and Vite |
| `saas-test-pilot` | Test automation for pytest, Jest, Vitest, Playwright |
- Auto-generated conventional commit messages
- Multiple workflow styles (simple, feature-branch, pr-required, trunk-based)
- Branch naming enforcement
- Merge and cleanup automation
- Protected branch awareness
### Debug (1 plugin — v0.1.0 scaffold)
**Commands:** `/commit`, `/commit-push`, `/commit-merge`, `/commit-sync`, `/branch-start`, `/branch-cleanup`, `/git-status`, `/git-config`
| Plugin | Description |
|--------|-------------|
| `debug-mcp` | MCP server debugging, inspection, and development toolkit |
#### [pr-review](./plugins/pr-review/README.md) *NEW in v3.0.0*
**Multi-Agent PR Review**
## Quick Start
Comprehensive pull request review using specialized agents.
### Launch with profiles
- Multi-agent review: Security, Performance, Maintainability, Tests
- Confidence scoring (only reports HIGH/MEDIUM confidence findings)
- Actionable feedback with suggested fixes
- Gitea integration for automated review submission
```bash
./scripts/claude-launch.sh [profile] [extra-args...]
```
**Commands:** `/pr-review`, `/pr-summary`, `/pr-findings`, `/initial-setup`, `/project-init`, `/project-sync`
| Profile | Plugins Loaded | Use Case |
|---------|----------------|----------|
| `sprint` | projman, git-flow, pr-review, code-sentinel, doc-guardian, clarity-assist | Default. Sprint planning and development |
| `review` | pr-review, code-sentinel | Lightweight code review |
| `data` | data-platform, viz-platform | Data engineering and visualization |
| `infra` | cmdb-assistant | Infrastructure/CMDB management |
| `full` | All 20 plugins | When you need everything |
#### [claude-config-maintainer](./plugins/claude-config-maintainer/README.md)
**CLAUDE.md Optimization and Maintenance**
```bash
./scripts/claude-launch.sh # Default sprint profile
./scripts/claude-launch.sh data --model opus # Data profile with Opus
./scripts/claude-launch.sh full # Load all plugins
```
Analyze, optimize, and create CLAUDE.md configuration files for Claude Code projects.
### Common commands
**Commands:** `/config-analyze`, `/config-optimize`, `/config-init`
```bash
/sprint plan # Plan a sprint with architecture analysis
/sprint start # Begin sprint execution
/gitflow commit --push # Commit with auto-generated message and push
/pr review # Full multi-agent PR review
/sentinel scan # Security audit
/doc audit # Check for documentation drift
/cv status # Marketplace health check
```
### Productivity
## Repository Structure
#### [clarity-assist](./plugins/clarity-assist/README.md) *NEW in v3.0.0*
**Prompt Optimization with ND Accommodations**
```
leo-claude-mktplace/
├── .claude-plugin/ # Marketplace manifest
│ ├── marketplace.json
│ ├── marketplace-lean.json # Lean profile (6 core plugins)
│ └── marketplace-full.json # Full profile (all plugins)
├── mcp-servers/ # Shared MCP servers
│ ├── gitea/ # Gitea (issues, PRs, wiki)
│ ├── netbox/ # NetBox (DCIM, IPAM)
│ ├── data-platform/ # pandas, PostgreSQL, dbt
│ ├── viz-platform/ # DMC, Plotly, theming
│ └── contract-validator/ # Plugin compatibility validation
├── plugins/ # All plugins (20 total)
│ ├── projman/ # [core] Sprint management
│ ├── git-flow/ # [core] Git workflow automation
│ ├── pr-review/ # [core] PR review
│ ├── clarity-assist/ # [core] Prompt optimization
│ ├── doc-guardian/ # [core] Documentation drift detection
│ ├── code-sentinel/ # [core] Security scanning
│ ├── claude-config-maintainer/ # [core] CLAUDE.md optimization
│ ├── contract-validator/ # [core] Cross-plugin validation
│ ├── project-hygiene/ # [core] Manual cleanup checks
│ ├── cmdb-assistant/ # [ops] NetBox CMDB integration
│ ├── data-platform/ # [data] Data engineering
│ ├── viz-platform/ # [data] Visualization
│ ├── data-seed/ # [data] Test data generation (scaffold)
│ ├── saas-api-platform/ # [saas] API scaffolding (scaffold)
│ ├── saas-db-migrate/ # [saas] DB migrations (scaffold)
│ ├── saas-react-platform/ # [saas] React toolkit (scaffold)
│ ├── saas-test-pilot/ # [saas] Test automation (scaffold)
│ ├── ops-release-manager/ # [ops] Release management (scaffold)
│ ├── ops-deploy-pipeline/ # [ops] Deployment pipeline (scaffold)
│ └── debug-mcp/ # [debug] MCP debugging (scaffold)
├── scripts/ # Setup and maintenance
│ ├── setup.sh # Initial setup (create venvs, config)
│ ├── post-update.sh # Post-update (clear cache, changelog)
│ ├── setup-venvs.sh # MCP server venv management (cache-based)
│ ├── validate-marketplace.sh # Marketplace compliance validation
│ ├── verify-hooks.sh # Hook inventory verification
│ ├── release.sh # Release automation with version bumping
│ ├── claude-launch.sh # Profile-based launcher
│ ├── install-plugin.sh # Install plugin to consumer project
│ ├── list-installed.sh # Show installed plugins in a project
│ └── uninstall-plugin.sh # Remove plugin from consumer project
├── docs/ # Documentation
│ ├── ARCHITECTURE.md # System architecture & plugin reference
│ ├── CANONICAL-PATHS.md # Authoritative path reference
│ ├── COMMANDS-CHEATSHEET.md # All commands quick reference
│ ├── CONFIGURATION.md # Centralized setup guide
│ ├── DEBUGGING-CHECKLIST.md # Systematic troubleshooting guide
│ ├── MIGRATION-v9.md # v8.x to v9.0.0 migration guide
│ └── UPDATING.md # Update guide
├── CLAUDE.md # Project instructions for Claude Code
├── README.md
├── CHANGELOG.md
├── LICENSE
└── .gitignore
```
Transform vague requests into clear specifications using structured methodology.
- 4-D methodology: Deconstruct, Diagnose, Develop, Deliver
- ND-friendly question patterns (option-based, chunked)
- Conflict detection and escalation protocols
**Commands:** `/clarify`, `/quick-clarify`
#### [doc-guardian](./plugins/doc-guardian/README.md)
**Documentation Lifecycle Management**
Automatic documentation drift detection and synchronization.
**Commands:** `/doc-audit`, `/doc-sync`
#### [project-hygiene](./plugins/project-hygiene/README.md)
**Post-Task Cleanup Automation**
Hook-based cleanup that runs after Claude completes work.
### Security
#### [code-sentinel](./plugins/code-sentinel/README.md)
**Security Scanning & Refactoring**
Security vulnerability detection and code refactoring tools.
**Commands:** `/security-scan`, `/refactor`, `/refactor-dry`
### Infrastructure
#### [cmdb-assistant](./plugins/cmdb-assistant/README.md)
**NetBox CMDB Integration**
Full CRUD operations for network infrastructure management directly from Claude Code.
**Commands:** `/initial-setup`, `/cmdb-search`, `/cmdb-device`, `/cmdb-ip`, `/cmdb-site`
## MCP Servers
All MCP servers are shared at repository root and configured in `.mcp.json`.
MCP servers are **shared at repository root** with **symlinks** from plugins that use them.
| Server | Used By | External System |
|--------|---------|-----------------|
| gitea | projman, pr-review | Gitea (issues, PRs, wiki, milestones) |
| netbox | cmdb-assistant | NetBox (DCIM, IPAM) |
| data-platform | data-platform | PostgreSQL, dbt |
| viz-platform | viz-platform | DMC component registry |
| contract-validator | contract-validator | Internal validation |
### Gitea MCP Server (shared)
Full Gitea API integration for project management.
| Category | Tools |
|----------|-------|
| Issues | `list_issues`, `get_issue`, `create_issue`, `update_issue`, `add_comment` |
| Labels | `get_labels`, `suggest_labels`, `create_label` |
| Wiki | `list_wiki_pages`, `get_wiki_page`, `create_wiki_page`, `create_lesson`, `search_lessons` |
| Milestones | `list_milestones`, `get_milestone`, `create_milestone`, `update_milestone` |
| Dependencies | `list_issue_dependencies`, `create_issue_dependency`, `get_execution_order` |
| **Pull Requests** | `list_pull_requests`, `get_pull_request`, `get_pr_diff`, `get_pr_comments`, `create_pr_review`, `add_pr_comment` *(NEW in v3.0.0)* |
| Validation | `validate_repo_org`, `get_branch_protection` |
### NetBox MCP Server (shared)
Comprehensive NetBox REST API integration for infrastructure management.
| Module | Coverage |
|--------|----------|
| DCIM | Sites, Racks, Devices, Interfaces, Cables |
| IPAM | Prefixes, IPs, VLANs, VRFs |
| Circuits | Providers, Circuits, Terminations |
| Virtualization | Clusters, VMs, Interfaces |
| Extras | Tags, Custom Fields, Audit Log |
## Installation
@@ -164,14 +134,16 @@ All MCP servers are shared at repository root and configured in `.mcp.json`.
- Python 3.10+
- Access to target services (Gitea, NetBox as needed)
### Add marketplace to Claude Code
### Add Marketplace to Claude Code
**Option 1 - CLI command (recommended):**
```bash
/plugin marketplace add https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git
```
Or add to `.claude/settings.json`:
**Option 2 - Settings file (for team distribution):**
Add to `.claude/settings.json` in your target project:
```json
{
"extraKnownMarketplaces": {
@@ -185,55 +157,113 @@ Or add to `.claude/settings.json`:
}
```
### Setup MCP servers
### Run Interactive Setup
After installing, create Python venvs for MCP servers:
```bash
cd ~/.claude/plugins/marketplaces/leo-claude-mktplace && ./scripts/setup.sh
```
Then restart Claude Code and run the interactive setup:
After installing plugins, run the setup wizard:
```
/projman setup
/initial-setup
```
See [CONFIGURATION.md](./docs/CONFIGURATION.md) for manual setup and advanced options.
The wizard handles everything:
- Sets up MCP server (Python venv + dependencies)
- Creates system config (`~/.config/claude/gitea.env`)
- Guides you through adding your API token
- Detects and validates your repository via API
- Creates project config (`.env`)
### Install to consumer projects
**For new projects** (when system is already configured):
```
/project-init
```
```bash
./scripts/install-plugin.sh <plugin-name> /path/to/project
./scripts/list-installed.sh /path/to/project
./scripts/uninstall-plugin.sh <plugin-name> /path/to/project
**After moving a repository:**
```
/project-sync
```
See [docs/CONFIGURATION.md](./docs/CONFIGURATION.md) for manual setup and advanced options.
## Verifying Plugin Installation
After installing plugins, the `/plugin` command may show `(no content)` - this is normal Claude Code behavior and doesn't indicate an error.
**To verify a plugin is installed correctly:**
1. **Check installed plugins list:**
```
/plugin list
```
Look for `✔ plugin-name · Installed`
2. **Test a plugin command directly:**
```
/git-flow:git-status
/projman:sprint-status
/clarity-assist:clarify
```
If the command executes and shows output, the plugin is working.
3. **Check for loading errors:**
```
/plugin list
```
Look for any `Plugin Loading Errors` section - this indicates manifest issues.
**Command format:** All plugin commands use the format `/plugin-name:command-name`
| Plugin | Test Command |
|--------|--------------|
| git-flow | `/git-flow:git-status` |
| projman | `/projman:sprint-status` |
| pr-review | `/pr-review:pr-summary` |
| clarity-assist | `/clarity-assist:clarify` |
| doc-guardian | `/doc-guardian:doc-audit` |
| code-sentinel | `/code-sentinel:security-scan` |
| claude-config-maintainer | `/claude-config-maintainer:config-analyze` |
| cmdb-assistant | `/cmdb-assistant:cmdb-search` |
## Repository Structure
```
leo-claude-mktplace/
├── .claude-plugin/ # Marketplace manifest
│ └── marketplace.json
├── mcp-servers/ # SHARED MCP servers (v3.0.0+)
│ ├── gitea/ # Gitea MCP (issues, PRs, wiki)
│ └── netbox/ # NetBox MCP (CMDB)
├── plugins/ # All plugins
│ ├── projman/ # Sprint management
│ ├── git-flow/ # Git workflow automation (NEW)
│ ├── pr-review/ # PR review (NEW)
│ ├── clarity-assist/ # Prompt optimization (NEW)
│ ├── claude-config-maintainer/ # CLAUDE.md optimization
│ ├── cmdb-assistant/ # NetBox CMDB integration
│ ├── doc-guardian/ # Documentation drift detection
│ ├── code-sentinel/ # Security scanning
│ └── project-hygiene/ # Cleanup automation
├── docs/ # Documentation
│ ├── CANONICAL-PATHS.md # Path reference
│ └── CONFIGURATION.md # Setup guide
└── scripts/ # Setup scripts
```
## Documentation
| Document | Description |
|----------|-------------|
| [CLAUDE.md](./CLAUDE.md) | Project instructions for Claude Code |
| [ARCHITECTURE.md](./docs/ARCHITECTURE.md) | System architecture and plugin reference |
| [COMMANDS-CHEATSHEET.md](./docs/COMMANDS-CHEATSHEET.md) | All commands quick reference |
| [CLAUDE.md](./CLAUDE.md) | Main project instructions |
| [CONFIGURATION.md](./docs/CONFIGURATION.md) | Centralized setup guide |
| [DEBUGGING-CHECKLIST.md](./docs/DEBUGGING-CHECKLIST.md) | Systematic troubleshooting guide |
| [COMMANDS-CHEATSHEET.md](./docs/COMMANDS-CHEATSHEET.md) | All commands quick reference |
| [UPDATING.md](./docs/UPDATING.md) | Update guide for the marketplace |
| [MIGRATION-v9.md](./docs/MIGRATION-v9.md) | v8.x to v9.0.0 migration guide |
| [CANONICAL-PATHS.md](./docs/CANONICAL-PATHS.md) | Authoritative path reference |
| [CHANGELOG.md](./CHANGELOG.md) | Version history |
## Validation
```bash
./scripts/validate-marketplace.sh # Marketplace compliance (manifests, domains, paths)
./scripts/verify-hooks.sh # Hook inventory (4 PreToolUse + 1 UserPromptSubmit)
```
## License
MIT License
## Support
- **Repository**: https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git
- **Issues**: Contact repository maintainer
- **Repository**: `https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git`

View File

@@ -1,184 +0,0 @@
# Architecture — Leo Claude Marketplace v9.1.0
## Overview
Plugin marketplace for Claude Code. 20 plugins across 5 domains, 5 shared MCP servers,
4 PreToolUse safety hooks + 1 UserPromptSubmit quality hook.
## System Architecture
### Plugin Domains
| Domain | Purpose | Plugins |
|--------|---------|---------|
| core | Development workflow | projman, git-flow, pr-review, code-sentinel, doc-guardian, clarity-assist, contract-validator, claude-config-maintainer, project-hygiene |
| data | Data engineering | data-platform, viz-platform, data-seed |
| saas | SaaS development | saas-api-platform, saas-db-migrate, saas-react-platform, saas-test-pilot |
| ops | Operations | cmdb-assistant, ops-release-manager, ops-deploy-pipeline |
| debug | Diagnostics | debug-mcp |
### MCP Servers (Shared at Root)
| Server | Plugins Using It | External System |
|--------|-------------------|-----------------|
| gitea | projman, pr-review | Gitea (issues, PRs, wiki) — uses published `gitea-mcp` package |
| netbox | cmdb-assistant | NetBox (DCIM, IPAM) |
| data-platform | data-platform | PostgreSQL, dbt |
| viz-platform | viz-platform | DMC registry |
| contract-validator | contract-validator | (internal validation) |
### Hook Architecture
| Plugin | Event | Trigger | Script |
|--------|-------|---------|--------|
| code-sentinel | PreToolUse | Write\|Edit\|MultiEdit | security-check.sh |
| git-flow | PreToolUse | Bash (branch naming) | branch-check.sh |
| git-flow | PreToolUse | Bash (git commit) | commit-msg-check.sh |
| cmdb-assistant | PreToolUse | MCP create/update | validate-input.sh |
| clarity-assist | UserPromptSubmit | All prompts | vagueness-check.sh |
No other hook types permitted. All workflow automation is via explicit commands.
### Agent Model (projman)
| Agent | Model | Permission Mode | Role |
|-------|-------|-----------------|------|
| Planner | opus | default | Sprint planning, architecture analysis, issue creation |
| Orchestrator | sonnet | acceptEdits | Sprint execution, parallel batching, lesson capture |
| Executor | sonnet | bypassPermissions | Code implementation, branch management |
| Code Reviewer | opus | default | Pre-close quality review, security, tests |
### Config Hierarchy
| Level | Location | Contains |
|-------|----------|----------|
| System | ~/.config/claude/{service}.env | Credentials |
| Project | .env in project root | Repo-specific config |
### Branch Security
| Pattern | Access |
|---------|--------|
| development, feat/*, fix/* | Full |
| staging, stage/* | Read-only code, can create issues |
| main, master, prod/* | READ-ONLY. Emergency only. |
### Launch Profiles
| Profile | Plugins |
|---------|---------|
| sprint | projman, git-flow, pr-review, code-sentinel, doc-guardian, clarity-assist |
| data | data-platform, viz-platform, data-seed |
| saas | saas-api-platform, saas-react-platform, saas-db-migrate, saas-test-pilot |
| ops | cmdb-assistant, ops-release-manager, ops-deploy-pipeline |
| review | pr-review, code-sentinel |
| debug | debug-mcp |
| full | all plugins |
---
## Plugin Reference
### Core Domain
#### projman (v9.0.1)
Sprint planning and project management with Gitea integration.
- **Commands:** /sprint (plan|start|status|close|review|test), /project (initiation|plan|status|close), /adr (create|list|update|supersede), /rfc (create|list|review|approve|reject), /labels sync, /projman setup
- **Agents:** planner, orchestrator, executor, code-reviewer
- **MCP:** gitea
#### git-flow (v9.0.1)
Git workflow automation with smart commits and branch management.
- **Commands:** /gitflow (commit|branch-start|branch-cleanup|status|config)
- **Commit flags:** --push, --merge, --sync
- **Agents:** git-assistant
- **Hooks:** PreToolUse (branch-check.sh, commit-msg-check.sh)
#### pr-review (v9.0.1)
Multi-agent PR review with confidence scoring.
- **Commands:** /pr (review|summary|findings|diff|setup|init|sync)
- **Agents:** coordinator, security-reviewer, performance-analyst, maintainability-auditor, test-validator
- **MCP:** gitea
#### code-sentinel (v9.0.1)
Security scanning and code refactoring.
- **Commands:** /sentinel (scan|refactor|refactor-dry)
- **Agents:** security-reviewer, refactor-advisor
- **Hooks:** PreToolUse (security-check.sh)
#### doc-guardian (v9.0.1)
Documentation drift detection and synchronization.
- **Commands:** /doc (audit|sync|changelog-gen|coverage|stale-docs)
- **Agents:** doc-analyzer
#### clarity-assist (v9.0.1)
Prompt optimization with ND-friendly accommodations.
- **Commands:** /clarity (clarify|quick-clarify)
- **Agents:** clarity-coach
- **Hooks:** UserPromptSubmit (vagueness-check.sh)
#### contract-validator (v9.0.1)
Cross-plugin compatibility validation.
- **Commands:** /cv (validate|check-agent|list-interfaces|dependency-graph|setup|status)
- **Agents:** full-validation, agent-check
- **MCP:** contract-validator
#### claude-config-maintainer (v9.0.1)
CLAUDE.md and settings optimization.
- **Commands:** /claude-config (analyze|optimize|init|diff|lint|audit-settings|optimize-settings|permissions-map)
- **Agents:** maintainer
#### project-hygiene (v9.0.1)
Manual project file cleanup checks.
- **Commands:** /hygiene check (--fix flag for auto-fix)
### Data Domain
#### data-platform (v9.0.1)
pandas, PostgreSQL, and dbt integration.
- **Commands:** /data (ingest|profile|schema|explain|lineage|lineage-viz|run|dbt-test|quality|review|gate|setup)
- **Agents:** data-advisor, data-analysis, data-ingestion
- **MCP:** data-platform
#### viz-platform (v9.0.1)
DMC validation, Plotly charts, and theming.
- **Commands:** /viz (setup|chart|chart-export|dashboard|theme|theme-new|theme-css|component|accessibility-check|breakpoints|design-review|design-gate)
- **Agents:** design-reviewer, layout-builder, component-check, theme-setup
- **MCP:** viz-platform
#### data-seed (v0.1.0)
Test data generation and database seeding. *Scaffold — not yet implemented.*
### SaaS Domain
#### saas-api-platform (v0.1.0)
REST/GraphQL API scaffolding for FastAPI and Express. *Scaffold.*
#### saas-db-migrate (v0.1.0)
Database migration management for Alembic, Prisma, raw SQL. *Scaffold.*
#### saas-react-platform (v0.1.0)
React frontend toolkit for Next.js and Vite. *Scaffold.*
#### saas-test-pilot (v0.1.0)
Test automation for pytest, Jest, Vitest, Playwright. *Scaffold.*
### Ops Domain
#### cmdb-assistant (v9.0.1)
NetBox CMDB integration for infrastructure management.
- **Commands:** /cmdb (search|device|ip|site|audit|register|sync|topology|change-audit|ip-conflicts|setup)
- **Agents:** cmdb-assistant
- **MCP:** netbox
- **Hooks:** PreToolUse (validate-input.sh)
#### ops-release-manager (v0.1.0)
Release management with SemVer and changelog automation. *Scaffold.*
#### ops-deploy-pipeline (v0.1.0)
Deployment pipeline for Docker Compose and systemd. *Scaffold.*
### Debug Domain
#### debug-mcp (v0.1.0)
MCP server debugging and diagnostics. *Scaffold.*

View File

@@ -2,7 +2,7 @@
**This file defines ALL valid paths in this repository. No exceptions. No inference. No assumptions.**
Last Updated: 2026-02-07 (v9.1.0)
Last Updated: 2026-01-20 (v3.0.0)
---
@@ -12,20 +12,16 @@ Last Updated: 2026-02-07 (v9.1.0)
leo-claude-mktplace/
├── .claude/ # Claude Code local settings
├── .claude-plugin/ # Marketplace manifest
── marketplace.json
│ ├── marketplace-lean.json # Lean profile (6 core plugins)
│ └── marketplace-full.json # Full profile (all plugins)
├── .mcp-lean.json # Lean profile MCP config (gitea only)
├── .mcp-full.json # Full profile MCP config (all servers)
── marketplace.json
├── .scratch/ # Transient work (auto-cleaned)
├── docs/ # All documentation
│ ├── ARCHITECTURE.md # System architecture and plugin reference
│ ├── architecture/ # Draw.io diagrams and specs
│ ├── CANONICAL-PATHS.md # This file - single source of truth
│ ├── COMMANDS-CHEATSHEET.md # All commands quick reference
│ ├── CONFIGURATION.md # Centralized configuration guide
│ ├── DEBUGGING-CHECKLIST.md # Systematic troubleshooting guide
│ ├── MIGRATION-v9.md # v8.x → v9.0.0 migration guide
│ └── UPDATING.md # Update guide
│ ├── UPDATING.md # Update guide
│ └── workflows/ # Workflow documentation
├── hooks/ # Shared hooks (if any)
├── mcp-servers/ # SHARED MCP servers (v3.0.0+)
│ ├── gitea/ # Gitea MCP server
│ │ ├── mcp_server/
@@ -41,51 +37,23 @@ leo-claude-mktplace/
│ │ │ └── pull_requests.py # NEW in v3.0.0
│ │ ├── requirements.txt
│ │ └── .venv/
── netbox/ # NetBox MCP server
│ │ ├── mcp_server/
│ │ ├── requirements.txt
│ │ └── .venv/
│ ├── data-platform/ # Data engineering MCP (NEW v4.0.0)
│ │ ├── mcp_server/
│ │ │ ├── server.py
│ │ │ ├── pandas_tools.py
│ │ │ ├── postgres_tools.py
│ │ │ └── dbt_tools.py
│ │ ├── requirements.txt
│ │ └── .venv/
│ ├── contract-validator/ # Contract validation MCP (NEW v5.0.0)
│ │ ├── mcp_server/
│ │ │ ├── server.py
│ │ │ ├── parse_tools.py
│ │ │ ├── validation_tools.py
│ │ │ └── report_tools.py
│ │ ├── tests/
│ │ ├── requirements.txt
│ │ └── .venv/
│ └── viz-platform/ # Visualization MCP (NEW v4.1.0)
── netbox/ # NetBox MCP server
│ ├── mcp_server/
│ │ ├── server.py
│ │ ├── config.py
│ │ ├── component_registry.py
│ │ ├── dmc_tools.py
│ │ ├── chart_tools.py
│ │ ├── layout_tools.py
│ │ ├── theme_tools.py
│ │ ├── theme_store.py
│ │ └── page_tools.py
│ ├── registry/ # DMC component JSON registries
│ ├── tests/ # 94 tests
│ ├── requirements.txt
│ └── .venv/
├── plugins/ # ALL plugins
│ ├── projman/ # Sprint management
│ │ ├── .claude-plugin/
│ │ ├── .mcp.json
│ │ ├── mcp-servers/
│ │ │ └── gitea -> ../../../mcp-servers/gitea # SYMLINK
│ │ ├── commands/
│ │ ├── agents/
│ │ ├── skills/
│ │ └── claude-md-integration.md
│ ├── doc-guardian/ # Documentation drift detection
│ │ ├── .claude-plugin/
│ │ ├── hooks/
│ │ ├── commands/
│ │ ├── agents/
│ │ ├── skills/
@@ -99,6 +67,9 @@ leo-claude-mktplace/
│ │ └── claude-md-integration.md
│ ├── cmdb-assistant/ # NetBox CMDB integration
│ │ ├── .claude-plugin/
│ │ ├── .mcp.json
│ │ ├── mcp-servers/
│ │ │ └── netbox -> ../../../mcp-servers/netbox # SYMLINK
│ │ ├── commands/
│ │ ├── agents/
│ │ └── claude-md-integration.md
@@ -109,100 +80,34 @@ leo-claude-mktplace/
│ │ └── claude-md-integration.md
│ ├── project-hygiene/
│ │ ├── .claude-plugin/
│ │ ├── commands/
│ │ ├── hooks/
│ │ └── claude-md-integration.md
│ ├── clarity-assist/
│ ├── clarity-assist/ # NEW in v3.0.0
│ │ ├── .claude-plugin/
│ │ ├── commands/
│ │ ├── agents/
│ │ ├── skills/
│ │ └── claude-md-integration.md
│ ├── git-flow/
│ ├── git-flow/ # NEW in v3.0.0
│ │ ├── .claude-plugin/
│ │ ├── commands/
│ │ ├── agents/
│ │ ├── skills/
│ │ └── claude-md-integration.md
── pr-review/
│ │ ├── .claude-plugin/
│ │ ├── commands/
│ │ ├── agents/
│ │ ├── skills/
│ │ └── claude-md-integration.md
│ ├── data-platform/
│ │ ├── .claude-plugin/
│ │ ├── commands/
│ │ ├── agents/
│ │ └── claude-md-integration.md
│ ├── contract-validator/
│ │ ├── .claude-plugin/
│ │ ├── commands/
│ │ ├── agents/
│ │ └── claude-md-integration.md
│ ├── viz-platform/
│ │ ├── .claude-plugin/
│ │ ├── commands/
│ │ ├── agents/
│ │ └── claude-md-integration.md
│ ├── saas-api-platform/ # REST/GraphQL API scaffolding (scaffold)
│ │ ├── .claude-plugin/
│ │ ├── commands/
│ │ ├── agents/
│ │ ├── skills/
│ │ └── claude-md-integration.md
│ ├── saas-db-migrate/ # Database migration management (scaffold)
│ │ ├── .claude-plugin/
│ │ ├── commands/
│ │ ├── agents/
│ │ ├── skills/
│ │ └── claude-md-integration.md
│ ├── saas-react-platform/ # React frontend toolkit (scaffold)
│ │ ├── .claude-plugin/
│ │ ├── commands/
│ │ ├── agents/
│ │ ├── skills/
│ │ └── claude-md-integration.md
│ ├── saas-test-pilot/ # Test automation (scaffold)
│ │ ├── .claude-plugin/
│ │ ├── commands/
│ │ ├── agents/
│ │ ├── skills/
│ │ └── claude-md-integration.md
│ ├── data-seed/ # Test data generation (scaffold)
│ │ ├── .claude-plugin/
│ │ ├── commands/
│ │ ├── agents/
│ │ ├── skills/
│ │ └── claude-md-integration.md
│ ├── ops-release-manager/ # Release management (scaffold)
│ │ ├── .claude-plugin/
│ │ ├── commands/
│ │ ├── agents/
│ │ ├── skills/
│ │ └── claude-md-integration.md
│ ├── ops-deploy-pipeline/ # Deployment pipeline (scaffold)
│ │ ├── .claude-plugin/
│ │ ├── commands/
│ │ ├── agents/
│ │ ├── skills/
│ │ └── claude-md-integration.md
│ └── debug-mcp/ # MCP debugging toolkit (scaffold)
── pr-review/ # NEW in v3.0.0
│ ├── .claude-plugin/
│ ├── .mcp.json
│ ├── mcp-servers/
│ │ └── gitea -> ../../../mcp-servers/gitea # SYMLINK
│ ├── commands/
│ ├── agents/
│ ├── skills/
│ └── claude-md-integration.md
├── scripts/ # Setup and maintenance scripts
│ ├── setup.sh # Initial setup (create venvs, config templates)
│ ├── post-update.sh # Post-update (clear cache, show changelog)
│ ├── setup-venvs.sh # Setup MCP server venvs (create only, never delete)
── validate-marketplace.sh # Marketplace compliance validation
│ ├── verify-hooks.sh # Verify all hooks use correct event types
│ ├── release.sh # Release automation with version bumping
│ ├── claude-launch.sh # Task-specific launcher with profile selection
│ ├── install-plugin.sh # Install plugin to consumer project
│ ├── list-installed.sh # Show installed plugins in a project
│ └── uninstall-plugin.sh # Remove plugin from consumer project
│ ├── post-update.sh # Post-update (rebuild venvs, verify symlinks)
│ ├── check-venv.sh # Check if venvs exist (for hooks)
── validate-marketplace.sh # Marketplace compliance validation
├── CLAUDE.md
├── README.md
├── LICENSE
@@ -214,103 +119,49 @@ leo-claude-mktplace/
## Path Patterns (MANDATORY)
### Phase 1a Paths (v8.1.0)
New files added in v8.1.0:
```
plugins/projman/commands/project.md
plugins/projman/commands/project-initiation.md
plugins/projman/commands/project-plan.md
plugins/projman/commands/project-status.md
plugins/projman/commands/project-close.md
plugins/projman/commands/adr.md
plugins/projman/commands/adr-create.md
plugins/projman/commands/adr-list.md
plugins/projman/commands/adr-update.md
plugins/projman/commands/adr-supersede.md
plugins/projman/skills/source-analysis.md
plugins/projman/skills/project-charter.md
plugins/projman/skills/adr-conventions.md
plugins/projman/skills/epic-conventions.md
plugins/projman/skills/wbs.md
plugins/projman/skills/risk-register.md
plugins/projman/skills/sprint-roadmap.md
plugins/projman/skills/wiki-conventions.md
plugins/project-hygiene/commands/hygiene-check.md
plugins/contract-validator/commands/cv-status.md
```
### Plugin Paths
| Context | Pattern | Example |
|---------|---------|---------|
| Plugin location | `plugins/{plugin-name}/` | `plugins/projman/` |
| Plugin manifest | `plugins/{plugin-name}/.claude-plugin/plugin.json` | `plugins/projman/.claude-plugin/plugin.json` |
| Plugin MCP mapping (optional) | `plugins/{plugin-name}/.claude-plugin/metadata.json` | `plugins/projman/.claude-plugin/metadata.json` |
| Plugin commands | `plugins/{plugin-name}/commands/` | `plugins/projman/commands/` |
| Plugin agents | `plugins/{plugin-name}/agents/` | `plugins/projman/agents/` |
| Plugin skills | `plugins/{plugin-name}/skills/` | `plugins/projman/skills/` |
| Plugin .mcp.json | `plugins/{plugin-name}/.mcp.json` | `plugins/projman/.mcp.json` |
| Plugin integration snippet | `plugins/{plugin-name}/claude-md-integration.md` | `plugins/projman/claude-md-integration.md` |
### MCP Server Paths
### MCP Server Paths (v3.0.0 Architecture)
MCP servers are **shared at repository root** and configured in `.mcp.json`.
MCP servers are **shared at repository root** with **symlinks** from plugins.
| Context | Pattern | Example |
|---------|---------|---------|
| MCP configuration | `.mcp.json` | `.mcp.json` (at repo root) |
| Shared MCP server | `mcp-servers/{server}/` | `mcp-servers/gitea/` |
| MCP server code | `mcp-servers/{server}/mcp_server/` | `mcp-servers/netbox/mcp_server/` |
| MCP venv (local) | `mcp-servers/{server}/.venv/` | `mcp-servers/gitea/.venv/` |
| MCP server code | `mcp-servers/{server}/mcp_server/` | `mcp-servers/gitea/mcp_server/` |
| MCP venv | `mcp-servers/{server}/.venv/` | `mcp-servers/gitea/.venv/` |
| Plugin symlink | `plugins/{plugin}/mcp-servers/{server}` | `plugins/projman/mcp-servers/gitea` |
**Note:** `mcp-servers/gitea/` is a thin wrapper — source code is in the published `gitea-mcp` package (Gitea PyPI). Other MCP servers still have local source code.
### Symlink Pattern
**Note:** Plugins do NOT have their own `mcp-servers/` directories. All MCP servers are shared at root and configured via `.mcp.json`.
### MCP Venv Paths - CRITICAL
**Venvs live in a CACHE directory that SURVIVES marketplace updates.**
When checking for venvs, ALWAYS check in this order:
| Priority | Path | Survives Updates? |
|----------|------|-------------------|
| 1 (CHECK FIRST) | `~/.cache/claude-mcp-venvs/leo-claude-mktplace/{server}/.venv/` | YES |
| 2 (fallback) | `{marketplace}/mcp-servers/{server}/.venv/` | NO |
**Why cache first?**
- Marketplace directory gets WIPED on every update/reinstall
- Cache directory SURVIVES updates
- False "venv missing" errors waste hours of debugging
**Pattern for hooks checking venvs:**
Plugins that use MCP servers create symlinks:
```bash
CACHE_VENV="$HOME/.cache/claude-mcp-venvs/leo-claude-mktplace/{server}/.venv/bin/python"
LOCAL_VENV="$MARKETPLACE_ROOT/mcp-servers/{server}/.venv/bin/python"
if [[ -f "$CACHE_VENV" ]]; then
VENV_PATH="$CACHE_VENV"
elif [[ -f "$LOCAL_VENV" ]]; then
VENV_PATH="$LOCAL_VENV"
else
echo "venv missing"
fi
# From plugin directory
ln -s ../../../mcp-servers/gitea plugins/projman/mcp-servers/gitea
```
**See lesson learned:** [Startup Hooks Must Check Venv Cache Path First](https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/wiki/lessons/patterns/startup-hooks-must-check-venv-cache-path-first)
The symlink target is relative: `../../../mcp-servers/{server}`
### Documentation Paths
| Type | Location |
|------|----------|
| Architecture & plugin reference | `docs/ARCHITECTURE.md` |
| Architecture diagrams | `docs/architecture/` |
| Workflow docs | `docs/workflows/` |
| This file | `docs/CANONICAL-PATHS.md` |
| Update guide | `docs/UPDATING.md` |
| Configuration guide | `docs/CONFIGURATION.md` |
| Commands cheat sheet | `docs/COMMANDS-CHEATSHEET.md` |
| Debugging checklist | `docs/DEBUGGING-CHECKLIST.md` |
| Migration guide (v8→v9) | `docs/MIGRATION-v9.md` |
---
@@ -328,12 +179,15 @@ fi
2. Verify each path against patterns in this file
3. Show verification to user before proceeding
### Relative Path Calculation
### Relative Path Calculation (v3.0.0)
From `.mcp.json` (at root) to `mcp-servers/gitea/`:
From `plugins/projman/.mcp.json` to shared `mcp-servers/gitea/`:
```
.mcp.json (at repository root)
→ Uses absolute installed path: ~/.claude/plugins/marketplaces/.../mcp-servers/gitea/run.sh
plugins/projman/.mcp.json
→ Uses ${CLAUDE_PLUGIN_ROOT}/mcp-servers/gitea/
→ Symlink at plugins/projman/mcp-servers/gitea points to ../../../mcp-servers/gitea
Result in .mcp.json: ${CLAUDE_PLUGIN_ROOT}/mcp-servers/gitea/.venv/bin/python
```
From `.claude-plugin/marketplace.json` to `plugins/projman/`:
@@ -352,77 +206,27 @@ Result: ./plugins/projman
| Wrong | Why | Correct |
|-------|-----|---------|
| `projman/` at root | Plugins go in `plugins/` | `plugins/projman/` |
| `mcp-servers/` inside plugins | MCP servers are shared at root | Use root `mcp-servers/` |
| Plugin-level `.mcp.json` | MCP config is at root | Use root `.mcp.json` |
| Hardcoding absolute paths in source | Breaks portability | Use relative paths or `${CLAUDE_PLUGIN_ROOT}` |
| Direct path in .mcp.json to root mcp-servers | Use symlink | Symlink at `plugins/{plugin}/mcp-servers/` |
| Creating new mcp-servers inside plugins | Use shared + symlink | Symlink to `mcp-servers/` |
| Hardcoding absolute paths | Breaks portability | Use `${CLAUDE_PLUGIN_ROOT}` |
---
## Architecture Note
## Architecture Note (v3.0.0)
MCP servers are **shared at repository root** and configured in a single `.mcp.json` file.
MCP servers are now **shared at repository root** with **symlinks** from plugins:
**Benefits:**
- Single source of truth for each MCP server
- Updates apply to all plugins automatically
- No duplication - clean plugin structure
- Simple configuration in one place
- Reduced duplication
- Symlinks work with Claude Code caching
**Configuration:**
All MCP servers are defined in `.mcp.json` at repository root:
```json
{
"mcpServers": {
"gitea": { "command": ".../mcp-servers/gitea/run.sh" },
"netbox": { "command": ".../mcp-servers/netbox/run.sh" },
"data-platform": { "command": ".../mcp-servers/data-platform/run.sh" },
"viz-platform": { "command": ".../mcp-servers/viz-platform/run.sh" },
"contract-validator": { "command": ".../mcp-servers/contract-validator/run.sh" }
}
}
**Symlink Pattern:**
```
---
## Domain Metadata
### Domain Field Location
Domain metadata is stored in `metadata.json` (v9.1.2+, moved from plugin.json/marketplace.json for Claude Code schema compliance):
| Location | Field | Example |
|----------|-------|---------|
| `plugins/{name}/.claude-plugin/metadata.json` | `"domain": "core"` | `plugins/projman/.claude-plugin/metadata.json` |
### Allowed Domain Values
| Domain | Purpose | Existing Plugins |
|--------|---------|-----------------|
| `core` | Development workflow plugins | projman, git-flow, pr-review, code-sentinel, doc-guardian, clarity-assist, contract-validator, claude-config-maintainer, project-hygiene |
| `data` | Data engineering and visualization | data-platform, viz-platform, data-seed |
| `ops` | Operations and infrastructure | cmdb-assistant, ops-release-manager, ops-deploy-pipeline |
| `saas` | SaaS application development | saas-api-platform, saas-db-migrate, saas-react-platform, saas-test-pilot |
| `debug` | Debugging and diagnostics | debug-mcp |
### Plugin Naming Convention
- **Core plugins:** No prefix (existing names never change)
- **New plugins:** Domain prefix: `saas-*`, `ops-*`, `data-*`, `debug-*`
- Domain is always in metadata — prefix is a naming convention, not a requirement
### Domain Query Examples
```bash
# List all plugins in a domain
for p in plugins/*; do
d=$(jq -r '.domain // empty' "$p/.claude-plugin/metadata.json" 2>/dev/null)
[[ "$d" == "saas" ]] && basename "$p"
done
# Count plugins per domain
for p in plugins/*; do
jq -r '.domain // empty' "$p/.claude-plugin/metadata.json" 2>/dev/null
done | sort | uniq -c | sort -rn
plugins/projman/mcp-servers/gitea -> ../../../mcp-servers/gitea
plugins/cmdb-assistant/mcp-servers/netbox -> ../../../mcp-servers/netbox
plugins/pr-review/mcp-servers/gitea -> ../../../mcp-servers/gitea
```
---
@@ -431,14 +235,6 @@ done | sort | uniq -c | sort -rn
| Date | Change | By |
|------|--------|-----|
| 2026-02-07 | v9.1.2: Moved domain field from plugin.json/marketplace.json to metadata.json for Claude Code schema compliance | Claude Code |
| 2026-02-07 | v9.1.0: Removed deleted dirs (architecture/, prompts/, project-lessons-learned/), added Phase 3 plugins, added ARCHITECTURE.md, MIGRATION-v9.md, updated Domain table, removed stale hooks/ dirs | Claude Code |
| 2026-02-06 | v8.0.0: Added domain metadata section, Phase 1a paths, future plugin paths | Claude Code |
| 2026-02-04 | v7.1.0: Added profile configs, prompts/, project-lessons-learned/, metadata.json, deprecated switch-profile.sh | Claude Code |
| 2026-01-30 | v5.5.0: Removed plugin-level mcp-servers symlinks - all MCP config now in root .mcp.json | Claude Code |
| 2026-01-26 | v5.0.0: Added contract-validator plugin and MCP server | Claude Code |
| 2026-01-26 | v4.1.0: Added viz-platform plugin and MCP server | Claude Code |
| 2026-01-25 | v4.0.0: Added data-platform plugin and MCP server | Claude Code |
| 2026-01-20 | v3.0.0: MCP servers moved to root with symlinks | Claude Code |
| 2026-01-20 | v3.0.0: Added clarity-assist, git-flow, pr-review plugins | Claude Code |
| 2026-01-20 | v3.0.0: Added docs/CONFIGURATION.md | Claude Code |

View File

@@ -1,24 +1,6 @@
# Plugin Commands Cheat Sheet
Quick reference for all commands in the Leo Claude Marketplace (v9.0.0+).
All commands follow the `/<noun> <action>` sub-command pattern.
## Invocation
Commands can be invoked in two ways:
1. **Via dispatch file:** `/doc audit` (routes through dispatch file to invoke `/doc-guardian:doc-audit`)
2. **Direct plugin-prefixed:** `/doc-guardian:doc-audit` (invokes command directly)
Both methods work identically. The dispatch file provides a user-friendly interface with `$ARGUMENTS` parsing, while the direct format bypasses the dispatcher.
If dispatch routing fails, use the direct plugin-prefixed format: `/<plugin-name>:<command-name>`.
**Examples:**
- `/sprint plan` → routes to `/projman:sprint-plan`
- `/doc audit` → routes to `/doc-guardian:doc-audit`
- `/pr review` → routes to `/pr-review:pr-review`
Quick reference for all commands in the Leo Claude Marketplace.
---
@@ -26,106 +8,53 @@ If dispatch routing fails, use the direct plugin-prefixed format: `/<plugin-name
| Plugin | Command | Auto | Manual | Description |
|--------|---------|:----:|:------:|-------------|
| **projman** | `/sprint plan` | | X | Start sprint planning with AI-guided architecture analysis and issue creation |
| **projman** | `/sprint start` | | X | Begin sprint execution with dependency analysis and parallel task coordination (requires approval or `--force`) |
| **projman** | `/sprint status` | | X | Check current sprint progress (add `--diagram` for Mermaid visualization) |
| **projman** | `/sprint review` | | X | Pre-sprint-close code quality review (debug artifacts, security, error handling) |
| **projman** | `/sprint test` | | X | Run tests (`/sprint test run`) or generate tests (`/sprint test gen <target>`) |
| **projman** | `/sprint close` | | X | Complete sprint and capture lessons learned to Gitea Wiki |
| **projman** | `/labels sync` | | X | Synchronize label taxonomy from Gitea |
| **projman** | `/projman setup` | | X | Auto-detect mode or use `--full`, `--quick`, `--sync`, `--clear-cache` |
| **projman** | `/rfc create` | | X | Create new RFC from conversation or spec |
| **projman** | `/rfc list` | | X | List all RFCs grouped by status |
| **projman** | `/rfc review` | | X | Submit RFC for maintainer review |
| **projman** | `/rfc approve` | | X | Approve RFC for sprint planning |
| **projman** | `/rfc reject` | | X | Reject RFC with documented reason |
| **projman** | `/project initiation` | | X | Discovery, source analysis, project charter |
| **projman** | `/project plan` | | X | WBS, risk register, sprint roadmap |
| **projman** | `/project status` | | X | Project health check across all sprints |
| **projman** | `/project close` | | X | Final retrospective and archival |
| **projman** | `/adr create` | | X | Create new Architecture Decision Record |
| **projman** | `/adr list` | | X | List all ADRs with status |
| **projman** | `/adr update` | | X | Update existing ADR |
| **projman** | `/adr supersede` | | X | Supersede ADR with new decision |
| **git-flow** | `/gitflow commit` | | X | Create commit with auto-generated conventional message. Flags: `--push`, `--merge`, `--sync` |
| **git-flow** | `/gitflow branch-start` | | X | Create new feature/fix/chore branch with naming conventions |
| **git-flow** | `/gitflow branch-cleanup` | | X | Remove merged branches locally and optionally on remote |
| **git-flow** | `/gitflow status` | | X | Enhanced git status with recommendations |
| **git-flow** | `/gitflow config` | | X | Configure git-flow settings for the project |
| **pr-review** | `/pr setup` | | X | Setup wizard for pr-review (shares Gitea MCP with projman) |
| **pr-review** | `/pr init` | | X | Quick project setup for PR reviews |
| **pr-review** | `/pr sync` | | X | Sync config with git remote after repo move/rename |
| **pr-review** | `/pr review` | | X | Full multi-agent PR review with confidence scoring |
| **pr-review** | `/pr summary` | | X | Quick summary of PR changes |
| **pr-review** | `/pr findings` | | X | List and filter review findings by category/severity |
| **pr-review** | `/pr diff` | | X | Formatted diff with inline review comments and annotations |
| **clarity-assist** | `/clarity clarify` | | X | Full 4-D prompt optimization with ND accommodations |
| **clarity-assist** | `/clarity quick-clarify` | | X | Rapid single-pass clarification for simple requests |
| **doc-guardian** | `/doc audit` | | X | Full documentation audit - scans for doc drift |
| **doc-guardian** | `/doc sync` | | X | Synchronize pending documentation updates |
| **doc-guardian** | `/doc changelog-gen` | | X | Generate changelog from conventional commits |
| **doc-guardian** | `/doc coverage` | | X | Documentation coverage metrics by function/class |
| **doc-guardian** | `/doc stale-docs` | | X | Flag documentation behind code changes |
| **code-sentinel** | `/sentinel scan` | | X | Full security audit (SQL injection, XSS, secrets, etc.) |
| **code-sentinel** | `/sentinel refactor` | | X | Apply refactoring patterns to improve code |
| **code-sentinel** | `/sentinel refactor-dry` | | X | Preview refactoring without applying changes |
| **projman** | `/sprint-plan` | | X | Start sprint planning with AI-guided architecture analysis and issue creation |
| **projman** | `/sprint-start` | | X | Begin sprint execution with dependency analysis and parallel task coordination |
| **projman** | `/sprint-status` | | X | Check current sprint progress and identify blockers |
| **projman** | `/review` | | X | Pre-sprint-close code quality review (debug artifacts, security, error handling) |
| **projman** | `/test-check` | | X | Run tests and verify coverage before sprint close |
| **projman** | `/sprint-close` | | X | Complete sprint and capture lessons learned to Gitea Wiki |
| **projman** | `/labels-sync` | | X | Synchronize label taxonomy from Gitea |
| **projman** | `/initial-setup` | | X | Full setup wizard: MCP server + system config + project config |
| **projman** | `/project-init` | | X | Quick project setup (assumes system config exists) |
| **projman** | `/project-sync` | | X | Sync config with git remote after repo move/rename |
| **projman** | *SessionStart hook* | X | | Detects git remote vs .env mismatch, warns to run /project-sync |
| **projman** | `/test-gen` | | X | Generate comprehensive tests for specified code |
| **projman** | `/debug-report` | | X | Run diagnostics and create structured issue in marketplace |
| **projman** | `/debug-review` | | X | Investigate diagnostic issues and propose fixes with approval gates |
| **git-flow** | `/commit` | | X | Create commit with auto-generated conventional message |
| **git-flow** | `/commit-push` | | X | Commit and push to remote in one operation |
| **git-flow** | `/commit-merge` | | X | Commit current changes, then merge into target branch |
| **git-flow** | `/commit-sync` | | X | Full sync: commit, push, and sync with upstream/base branch |
| **git-flow** | `/branch-start` | | X | Create new feature/fix/chore branch with naming conventions |
| **git-flow** | `/branch-cleanup` | | X | Remove merged branches locally and optionally on remote |
| **git-flow** | `/git-status` | | X | Enhanced git status with recommendations |
| **git-flow** | `/git-config` | | X | Configure git-flow settings for the project |
| **pr-review** | `/initial-setup` | | X | Setup wizard for pr-review (shares Gitea MCP with projman) |
| **pr-review** | `/project-init` | | X | Quick project setup for PR reviews |
| **pr-review** | `/project-sync` | | X | Sync config with git remote after repo move/rename |
| **pr-review** | *SessionStart hook* | X | | Detects git remote vs .env mismatch |
| **pr-review** | `/pr-review` | | X | Full multi-agent PR review with confidence scoring |
| **pr-review** | `/pr-summary` | | X | Quick summary of PR changes |
| **pr-review** | `/pr-findings` | | X | List and filter review findings by category/severity |
| **clarity-assist** | `/clarify` | | X | Full 4-D prompt optimization with ND accommodations |
| **clarity-assist** | `/quick-clarify` | | X | Rapid single-pass clarification for simple requests |
| **doc-guardian** | `/doc-audit` | | X | Full documentation audit - scans for doc drift |
| **doc-guardian** | `/doc-sync` | | X | Synchronize pending documentation updates |
| **doc-guardian** | *PostToolUse hook* | X | | Silently detects doc drift on Write/Edit |
| **code-sentinel** | `/security-scan` | | X | Full security audit (SQL injection, XSS, secrets, etc.) |
| **code-sentinel** | `/refactor` | | X | Apply refactoring patterns to improve code |
| **code-sentinel** | `/refactor-dry` | | X | Preview refactoring without applying changes |
| **code-sentinel** | *PreToolUse hook* | X | | Scans code before writing; blocks critical issues |
| **claude-config-maintainer** | `/claude-config analyze` | | X | Analyze CLAUDE.md for optimization opportunities |
| **claude-config-maintainer** | `/claude-config optimize` | | X | Optimize CLAUDE.md structure with preview/backup |
| **claude-config-maintainer** | `/claude-config init` | | X | Initialize new CLAUDE.md for a project |
| **claude-config-maintainer** | `/claude-config diff` | | X | Track CLAUDE.md changes over time with behavioral impact |
| **claude-config-maintainer** | `/claude-config lint` | | X | Lint CLAUDE.md for anti-patterns and best practices |
| **claude-config-maintainer** | `/claude-config audit-settings` | | X | Audit settings.local.json permissions (100-point score) |
| **claude-config-maintainer** | `/claude-config optimize-settings` | | X | Optimize permissions (profiles, consolidation, dry-run) |
| **claude-config-maintainer** | `/claude-config permissions-map` | | X | Visual review layer + permission coverage map |
| **cmdb-assistant** | `/cmdb setup` | | X | Setup wizard for NetBox MCP server |
| **cmdb-assistant** | `/cmdb search` | | X | Search NetBox for devices, IPs, sites |
| **cmdb-assistant** | `/cmdb device` | | X | Manage network devices (create, view, update, delete) |
| **cmdb-assistant** | `/cmdb ip` | | X | Manage IP addresses and prefixes |
| **cmdb-assistant** | `/cmdb site` | | X | Manage sites, locations, racks, and regions |
| **cmdb-assistant** | `/cmdb audit` | | X | Data quality analysis (VMs, devices, naming, roles) |
| **cmdb-assistant** | `/cmdb register` | | X | Register current machine into NetBox with running apps |
| **cmdb-assistant** | `/cmdb sync` | | X | Sync machine state with NetBox (detect drift, update) |
| **cmdb-assistant** | `/cmdb topology` | | X | Infrastructure topology diagrams (rack, network, site views) |
| **cmdb-assistant** | `/cmdb change-audit` | | X | NetBox audit trail queries with filtering |
| **cmdb-assistant** | `/cmdb ip-conflicts` | | X | Detect IP conflicts and overlapping prefixes |
| **project-hygiene** | `/hygiene check` | | X | Project file organization and cleanup check |
| **data-platform** | `/data ingest` | | X | Load data from CSV, Parquet, JSON into DataFrame |
| **data-platform** | `/data profile` | | X | Generate data profiling report with statistics |
| **data-platform** | `/data schema` | | X | Explore database schemas, tables, columns |
| **data-platform** | `/data explain` | | X | Explain query execution plan |
| **data-platform** | `/data lineage` | | X | Show dbt model lineage and dependencies |
| **data-platform** | `/data run` | | X | Run dbt models with validation |
| **data-platform** | `/data lineage-viz` | | X | dbt lineage visualization as Mermaid diagrams |
| **data-platform** | `/data dbt-test` | | X | Formatted dbt test runner with summary and failure details |
| **data-platform** | `/data quality` | | X | DataFrame quality checks (nulls, duplicates, types, outliers) |
| **data-platform** | `/data review` | | X | Comprehensive data integrity audits |
| **data-platform** | `/data gate` | | X | Binary pass/fail data integrity gates |
| **data-platform** | `/data setup` | | X | Setup wizard for data-platform MCP servers |
| **viz-platform** | `/viz setup` | | X | Setup wizard for viz-platform MCP server |
| **viz-platform** | `/viz chart` | | X | Create Plotly charts with theme integration |
| **viz-platform** | `/viz chart-export` | | X | Export charts to PNG, SVG, PDF via kaleido |
| **viz-platform** | `/viz dashboard` | | X | Create dashboard layouts with filters and grids |
| **viz-platform** | `/viz theme` | | X | Apply existing theme to visualizations |
| **viz-platform** | `/viz theme-new` | | X | Create new custom theme with design tokens |
| **viz-platform** | `/viz theme-css` | | X | Export theme as CSS custom properties |
| **viz-platform** | `/viz component` | | X | Inspect DMC component props and validation |
| **viz-platform** | `/viz accessibility-check` | | X | Color blind validation (WCAG contrast ratios) |
| **viz-platform** | `/viz breakpoints` | | X | Configure responsive layout breakpoints |
| **viz-platform** | `/viz design-review` | | X | Detailed design system audits |
| **viz-platform** | `/viz design-gate` | | X | Binary pass/fail design system validation gates |
| **contract-validator** | `/cv validate` | | X | Full marketplace compatibility validation |
| **contract-validator** | `/cv check-agent` | | X | Validate single agent definition |
| **contract-validator** | `/cv list-interfaces` | | X | Show all plugin interfaces |
| **contract-validator** | `/cv dependency-graph` | | X | Mermaid visualization of plugin dependencies |
| **contract-validator** | `/cv setup` | | X | Setup wizard for contract-validator MCP |
| **contract-validator** | `/cv status` | | X | Marketplace-wide health check (installation, MCP, configuration) |
---
## Migration from v8.x
All commands were renamed in v9.0.0 to follow `/<noun> <action>` pattern. See [MIGRATION-v9.md](./MIGRATION-v9.md) for the complete old-to-new mapping.
| **claude-config-maintainer** | `/config-analyze` | | X | Analyze CLAUDE.md for optimization opportunities |
| **claude-config-maintainer** | `/config-optimize` | | X | Optimize CLAUDE.md structure with preview/backup |
| **claude-config-maintainer** | `/config-init` | | X | Initialize new CLAUDE.md for a project |
| **cmdb-assistant** | `/initial-setup` | | X | Setup wizard for NetBox MCP server |
| **cmdb-assistant** | `/cmdb-search` | | X | Search NetBox for devices, IPs, sites |
| **cmdb-assistant** | `/cmdb-device` | | X | Manage network devices (create, view, update, delete) |
| **cmdb-assistant** | `/cmdb-ip` | | X | Manage IP addresses and prefixes |
| **cmdb-assistant** | `/cmdb-site` | | X | Manage sites, locations, racks, and regions |
| **project-hygiene** | *PostToolUse hook* | X | | Removes temp files, warns about unexpected root files |
---
@@ -133,16 +62,13 @@ All commands were renamed in v9.0.0 to follow `/<noun> <action>` pattern. See [M
| Category | Plugins | Primary Use |
|----------|---------|-------------|
| **Setup** | projman, pr-review, cmdb-assistant, data-platform, viz-platform, contract-validator | `/projman setup`, `/pr setup`, `/cmdb setup`, `/data setup`, `/viz setup`, `/cv setup` |
| **Setup** | projman, pr-review, cmdb-assistant | `/initial-setup`, `/project-init` |
| **Task Planning** | projman, clarity-assist | Sprint management, requirement clarification |
| **Code Quality** | code-sentinel, pr-review | Security scanning, PR reviews |
| **Documentation** | doc-guardian, claude-config-maintainer | Doc sync, CLAUDE.md maintenance |
| **Git Operations** | git-flow | Commits, branches, workflow automation |
| **Infrastructure** | cmdb-assistant | NetBox CMDB management |
| **Data Engineering** | data-platform | pandas, PostgreSQL, dbt operations |
| **Visualization** | viz-platform | DMC validation, Plotly charts, theming |
| **Validation** | contract-validator | Cross-plugin compatibility checks |
| **Maintenance** | project-hygiene | Manual cleanup via `/hygiene check` |
| **Maintenance** | project-hygiene | Automatic cleanup |
---
@@ -150,47 +76,32 @@ All commands were renamed in v9.0.0 to follow `/<noun> <action>` pattern. See [M
| Plugin | Hook Event | Behavior |
|--------|------------|----------|
| **code-sentinel** | PreToolUse (Write/Edit/MultiEdit) | Scans code before writing; blocks critical security issues |
| **git-flow** | PreToolUse (Bash) | Validates branch naming and commit message conventions |
| **cmdb-assistant** | PreToolUse (MCP create/update) | Validates input data before NetBox writes |
| **clarity-assist** | UserPromptSubmit | Detects vague prompts and suggests clarification |
| **projman** | SessionStart | Checks git remote vs .env; warns if mismatch detected |
| **pr-review** | SessionStart | Checks git remote vs .env; warns if mismatch detected |
| **doc-guardian** | PostToolUse (Write/Edit) | Silently tracks documentation drift |
| **code-sentinel** | PreToolUse (Write/Edit) | Scans for security issues; blocks critical vulnerabilities |
| **project-hygiene** | PostToolUse (Write/Edit) | Cleans temp files, warns about misplaced files |
---
## Dev Workflow Examples
### Example 0: RFC-Driven Feature Development
Full workflow from idea to implementation using RFCs:
```
1. /clarity clarify # Clarify the feature idea
2. /rfc create # Create RFC from clarified spec
... refine RFC content ...
3. /rfc review 0001 # Submit RFC for review
... review discussion ...
4. /rfc approve 0001 # Approve RFC for implementation
5. /sprint plan # Select approved RFC for sprint
... implement feature ...
6. /sprint close # Complete sprint, RFC marked Implemented
```
### Example 1: Starting a New Feature Sprint
A typical workflow for planning and executing a feature sprint:
```
1. /clarity clarify # Clarify requirements if vague
2. /sprint plan # Plan the sprint with architecture analysis
3. /labels sync # Ensure labels are up-to-date
4. /sprint start # Begin execution with dependency ordering
5. /gitflow branch-start feat/... # Create feature branch
1. /clarify # Clarify requirements if vague
2. /sprint-plan # Plan the sprint with architecture analysis
3. /labels-sync # Ensure labels are up-to-date
4. /sprint-start # Begin execution with dependency ordering
5. /branch-start feat/... # Create feature branch
... implement features ...
6. /gitflow commit # Commit with conventional message
7. /sprint status --diagram # Check progress with visualization
8. /sprint review # Pre-close quality review
9. /sprint test run # Verify test coverage
10. /sprint close # Capture lessons learned
6. /commit # Commit with conventional message
7. /sprint-status # Check progress mid-sprint
8. /review # Pre-close quality review
9. /test-check # Verify test coverage
10. /sprint-close # Capture lessons learned
```
### Example 2: Daily Development Cycle
@@ -198,12 +109,12 @@ A typical workflow for planning and executing a feature sprint:
Quick daily workflow with git-flow:
```
1. /gitflow status # Check current state
2. /gitflow branch-start fix/... # Start bugfix branch
1. /git-status # Check current state
2. /branch-start fix/... # Start bugfix branch
... make changes ...
3. /gitflow commit # Auto-generate commit message
4. /gitflow commit --push # Commit and push to remote
5. /gitflow branch-cleanup # Clean merged branches
3. /commit # Auto-generate commit message
4. /commit-push # Push to remote
5. /branch-cleanup # Clean merged branches
```
### Example 3: Pull Request Review Workflow
@@ -211,10 +122,10 @@ Quick daily workflow with git-flow:
Reviewing a PR before merge:
```
1. /pr summary # Quick overview of changes
2. /pr review # Full multi-agent review
3. /pr findings # Filter findings by severity
4. /sentinel scan # Deep security audit if needed
1. /pr-summary # Quick overview of changes
2. /pr-review # Full multi-agent review
3. /pr-findings # Filter findings by severity
4. /security-scan # Deep security audit if needed
```
### Example 4: Documentation Maintenance
@@ -222,10 +133,10 @@ Reviewing a PR before merge:
Keeping docs in sync:
```
1. /doc audit # Scan for documentation drift
2. /doc sync # Apply pending updates
3. /claude-config analyze # Check CLAUDE.md health
4. /claude-config optimize # Optimize if needed
1. /doc-audit # Scan for documentation drift
2. /doc-sync # Apply pending updates
3. /config-analyze # Check CLAUDE.md health
4. /config-optimize # Optimize if needed
```
### Example 5: Code Refactoring Session
@@ -233,11 +144,11 @@ Keeping docs in sync:
Safe refactoring with preview:
```
1. /sentinel refactor-dry # Preview opportunities
2. /sentinel scan # Baseline security check
3. /sentinel refactor # Apply improvements
4. /sprint test run # Verify nothing broke
5. /gitflow commit # Commit with descriptive message
1. /refactor-dry # Preview opportunities
2. /security-scan # Baseline security check
3. /refactor # Apply improvements
4. /test-check # Verify nothing broke
5. /commit # Commit with descriptive message
```
### Example 6: Infrastructure Documentation
@@ -245,23 +156,10 @@ Safe refactoring with preview:
Managing infrastructure with CMDB:
```
1. /cmdb search "server" # Find existing devices
2. /cmdb device view X # Check device details
3. /cmdb ip list # List available IPs
4. /cmdb site view Y # Check site info
```
### Example 6b: Data Engineering Workflow
Working with data pipelines:
```
1. /data ingest file.csv # Load data into DataFrame
2. /data profile # Generate data profiling report
3. /data schema # Explore database schemas
4. /data lineage model_name # View dbt model dependencies
5. /data run model_name # Execute dbt models
6. /data explain "SELECT ..." # Analyze query execution plan
1. /cmdb-search "server" # Find existing devices
2. /cmdb-device view X # Check device details
3. /cmdb-ip list # List available IPs
4. /cmdb-site view Y # Check site info
```
### Example 7: First-Time Setup (New Machine)
@@ -269,13 +167,13 @@ Working with data pipelines:
Setting up the marketplace for the first time:
```
1. /projman setup --full # Full setup: MCP + system config + project
1. /initial-setup # Full setup: MCP + system config + project
# → Follow prompts for Gitea URL, org
# → Add token manually when prompted
# → Confirm repository name
2. # Restart Claude Code session
3. /labels sync # Sync Gitea labels
4. /sprint plan # Plan first sprint
3. /labels-sync # Sync Gitea labels
4. /sprint-plan # Plan first sprint
```
### Example 8: New Project Setup (System Already Configured)
@@ -283,23 +181,22 @@ Setting up the marketplace for the first time:
Adding a new project when system config exists:
```
1. /projman setup --quick # Quick project setup (auto-detected)
1. /project-init # Quick project setup
# → Confirms detected repo name
# → Creates .env
2. /labels sync # Sync Gitea labels
3. /sprint plan # Plan first sprint
2. /labels-sync # Sync Gitea labels
3. /sprint-plan # Plan first sprint
```
---
## Quick Tips
- **Hooks run automatically** - code-sentinel and git-flow protect you without manual invocation
- **Use `/gitflow commit` over `git commit`** - generates better commit messages following conventions
- **Run `/sprint review` before `/sprint close`** - catches issues before closing the sprint
- **Use `/clarity clarify` for vague requests** - especially helpful for complex requirements
- **`/sentinel refactor-dry` is safe** - always preview before applying refactoring changes
- **`/gitflow commit --push`** replaces the old `/git-commit-push` - fewer commands to remember
- **Hooks run automatically** - doc-guardian and code-sentinel protect you without manual invocation
- **Use `/commit` over `git commit`** - generates better commit messages following conventions
- **Run `/review` before `/sprint-close`** - catches issues before closing the sprint
- **Use `/clarify` for vague requests** - especially helpful for complex requirements
- **`/refactor-dry` is safe** - always preview before applying refactoring changes
---
@@ -312,12 +209,9 @@ Some plugins require MCP server connectivity:
| projman | Gitea | Issues, PRs, wiki, labels, milestones |
| pr-review | Gitea | PR operations and reviews |
| cmdb-assistant | NetBox | Infrastructure CMDB |
| data-platform | pandas, PostgreSQL, dbt | DataFrames, database queries, dbt builds |
| viz-platform | viz-platform | DMC validation, charts, layouts, themes, pages |
| contract-validator | contract-validator | Plugin interface parsing, compatibility validation |
Ensure credentials are configured in `~/.config/claude/gitea.env`, `~/.config/claude/netbox.env`, or `~/.config/claude/postgres.env`.
Ensure credentials are configured in `~/.config/claude/gitea.env` or `~/.config/claude/netbox.env`.
---
*Last Updated: 2026-02-06*
*Last Updated: 2026-01-22*

View File

@@ -9,10 +9,10 @@ Centralized configuration documentation for all plugins and MCP servers in the L
**After installing the marketplace and plugins via Claude Code:**
```
/projman setup
/initial-setup
```
The interactive wizard auto-detects what's needed and handles everything except manually adding your API tokens.
The interactive wizard handles everything except manually adding your API tokens.
---
@@ -25,8 +25,7 @@ The interactive wizard auto-detects what's needed and handles everything except
└─────────────────────────────────────────────────────────────────────────────┘
/projman setup --full
(or /projman setup auto-detects)
/initial-setup
┌──────────────────────────────┼──────────────────────────────┐
▼ ▼ ▼
@@ -79,8 +78,8 @@ The interactive wizard auto-detects what's needed and handles everything except
┌───────────────┴───────────────┐
▼ ▼
/projman setup --quick /projman setup
(explicit mode) (auto-detects mode)
/project-init /initial-setup
(direct path) (smart detection)
│ │
│ ┌──────────┴──────────┐
│ ▼ ▼
@@ -109,7 +108,7 @@ The interactive wizard auto-detects what's needed and handles everything except
## What Runs Automatically vs User Interaction
### `/projman setup --full` - Full Setup
### `/initial-setup` - Full Setup
| Phase | Type | What Happens |
|-------|------|--------------|
@@ -121,7 +120,7 @@ The interactive wizard auto-detects what's needed and handles everything except
| **6. Project Config** | Automated | Creates `.env` file, checks `.gitignore` |
| **7. Validation** | Automated | Tests API connectivity, shows summary |
### `/projman setup --quick` - Quick Project Setup
### `/project-init` - Quick Project Setup
| Phase | Type | What Happens |
|-------|------|--------------|
@@ -132,25 +131,23 @@ The interactive wizard auto-detects what's needed and handles everything except
---
## One Command, Three Modes
## Three Commands for Different Scenarios
| Mode | When to Use | What It Does |
|------|-------------|--------------|
| `/projman setup` | Any time | Auto-detects: runs full, quick, or sync as needed |
| `/projman setup --full` | First time on a machine | Full setup: MCP server + system config + project config |
| `/projman setup --quick` | Starting a new project | Quick setup: project config only (assumes system is ready) |
| `/projman setup --sync` | After repo move/rename | Updates .env to match current git remote |
**Auto-detection logic:**
1. No system config → **full** mode
2. System config exists, no project config → **quick** mode
3. Both exist, git remote differs → **sync** mode
4. Both exist, match → already configured, offer to reconfigure
| Command | When to Use | What It Does |
|---------|-------------|--------------|
| `/initial-setup` | First time on a machine | Full setup: MCP server + system config + project config |
| `/project-init` | Starting a new project | Quick setup: project config only (assumes system is ready) |
| `/project-sync` | After repo move/rename | Updates .env to match current git remote |
**Typical workflow:**
1. Install plugin → run `/projman setup` (auto-runs full mode)
2. Start new project → run `/projman setup` (auto-runs quick mode)
3. Repository moved? → run `/projman setup` (auto-runs sync mode)
1. Install plugin → run `/initial-setup` (once per machine)
2. Start new project → run `/project-init` (once per project)
3. Repository moved? → run `/project-sync` (updates config)
**Smart features:**
- `/initial-setup` detects existing system config and offers quick project setup
- All commands validate org/repo via Gitea API before saving (auto-fills if verified)
- SessionStart hook automatically detects git remote vs .env mismatches
---
@@ -174,7 +171,8 @@ This marketplace uses a **hybrid configuration** approach:
│ PROJECT-LEVEL (once per project) │
│ <project-root>/.env │
├─────────────────────────────────────────────────────────────────┤
│ GITEA_REPORepository as owner/repo format
│ GITEA_ORG Organization for this project
│ GITEA_REPO │ Repository name for this project │
│ GIT_WORKFLOW_STYLE │ (optional) Override system default │
│ PR_REVIEW_* │ (optional) PR review settings │
└─────────────────────────────────────────────────────────────────┘
@@ -182,7 +180,7 @@ This marketplace uses a **hybrid configuration** approach:
**Benefits:**
- Single token per service (update once, use everywhere)
- Easy multi-project setup (just run `/projman setup` in each project)
- Easy multi-project setup (just run `/project-init` in each project)
- Security (tokens never committed to git, never typed into AI chat)
- Project isolation (each project can override defaults)
@@ -190,7 +188,7 @@ This marketplace uses a **hybrid configuration** approach:
## Prerequisites
Before running `/projman setup`:
Before running `/initial-setup`:
1. **Python 3.10+** installed
```bash
@@ -213,10 +211,10 @@ Before running `/projman setup`:
Run the setup wizard in Claude Code:
```
/projman setup
/initial-setup
```
The wizard will guide you through each step interactively and auto-detect the appropriate mode.
The wizard will guide you through each step interactively.
**Note:** After first-time setup, you'll need to restart your Claude Code session for MCP tools to become available.
@@ -264,7 +262,8 @@ In each project root:
```bash
cat > .env << 'EOF'
GITEA_REPO=your-organization/your-repo-name
GITEA_ORG=your-organization
GITEA_REPO=your-repo-name
EOF
```
@@ -308,7 +307,7 @@ GITEA_API_TOKEN=your_gitea_token_here
| `GITEA_API_URL` | Gitea API endpoint (with `/api/v1`) | `https://gitea.example.com/api/v1` |
| `GITEA_API_TOKEN` | Personal access token | `abc123...` |
**Note:** `GITEA_REPO` is configured at the project level in `owner/repo` format since different projects may belong to different organizations.
**Note:** `GITEA_ORG` is configured at the project level (see below) since different projects may belong to different organizations.
**Generating a Gitea Token:**
1. Log into Gitea → **User Icon** → **Settings**
@@ -363,8 +362,9 @@ GIT_CO_AUTHOR=true
Create `.env` in each project root:
```bash
# Required for projman, pr-review (use owner/repo format)
GITEA_REPO=your-organization/your-repo-name
# Required for projman, pr-review
GITEA_ORG=your-organization
GITEA_REPO=your-repo-name
# Optional: Override git-flow defaults
GIT_WORKFLOW_STYLE=pr-required
@@ -377,7 +377,8 @@ PR_REVIEW_AUTO_SUBMIT=false
| Variable | Required | Description |
|----------|----------|-------------|
| `GITEA_REPO` | Yes | Repository in `owner/repo` format (e.g., `my-org/my-repo`) |
| `GITEA_ORG` | Yes | Gitea organization for this project |
| `GITEA_REPO` | Yes | Repository name (must match Gitea exactly) |
| `GIT_WORKFLOW_STYLE` | No | Override system default |
| `PR_REVIEW_*` | No | PR review settings |
@@ -385,20 +386,17 @@ PR_REVIEW_AUTO_SUBMIT=false
## Plugin Configuration Summary
| Plugin | System Config | Project Config | Setup Command |
|--------|---------------|----------------|---------------|
| **projman** | gitea.env | .env (GITEA_REPO=owner/repo) | `/projman setup` |
| **pr-review** | gitea.env | .env (GITEA_REPO=owner/repo) | `/pr setup` |
| Plugin | System Config | Project Config | Setup Commands |
|--------|---------------|----------------|----------------|
| **projman** | gitea.env | .env (GITEA_ORG, GITEA_REPO) | `/initial-setup`, `/project-init`, `/project-sync` |
| **pr-review** | gitea.env | .env (GITEA_ORG, GITEA_REPO) | `/initial-setup`, `/project-init`, `/project-sync` |
| **git-flow** | git-flow.env (optional) | .env (optional) | None needed |
| **clarity-assist** | None | None | None needed |
| **cmdb-assistant** | netbox.env | None | `/cmdb setup` |
| **data-platform** | postgres.env | .env (optional) | `/data setup` |
| **viz-platform** | None | .env (optional DMC_VERSION) | `/viz setup` |
| **cmdb-assistant** | netbox.env | None | `/initial-setup` |
| **doc-guardian** | None | None | None needed |
| **code-sentinel** | None | None | None needed |
| **project-hygiene** | None | None | None needed |
| **claude-config-maintainer** | None | None | None needed |
| **contract-validator** | None | None | `/cv setup` |
---
@@ -406,224 +404,21 @@ PR_REVIEW_AUTO_SUBMIT=false
Once system-level config is set up, adding new projects is simple:
**Option 1: Use `/project-init` (faster)**
```
cd ~/projects/new-project
/projman setup
/project-init
```
The command auto-detects that system config exists and runs quick project setup.
---
## Installing Plugins to Consumer Projects
The marketplace provides scripts to install plugins into consumer projects. This sets up the MCP server connections and adds CLAUDE.md integration snippets.
### Install a Plugin
```bash
cd /path/to/leo-claude-mktplace
./scripts/install-plugin.sh <plugin-name> <target-project-path>
**Option 2: Use `/initial-setup` (auto-detects)**
```
cd ~/projects/new-project
/initial-setup
# → Detects system config exists
# → Offers "Quick project setup" option
```
**Examples:**
```bash
# Install data-platform to a portfolio project
./scripts/install-plugin.sh data-platform ~/projects/personal-portfolio
# Install multiple plugins
./scripts/install-plugin.sh viz-platform ~/projects/personal-portfolio
./scripts/install-plugin.sh projman ~/projects/personal-portfolio
```
**What it does:**
1. Validates the plugin exists in the marketplace
2. Adds MCP server entry to target's `.mcp.json` (if plugin has MCP server)
3. Appends integration snippet to target's `CLAUDE.md`
4. Reports changes and lists available commands
**After installation:** Restart your Claude Code session for MCP tools to become available.
### Uninstall a Plugin
```bash
./scripts/uninstall-plugin.sh <plugin-name> <target-project-path>
```
Removes the MCP server entry and CLAUDE.md integration section.
### List Installed Plugins
```bash
./scripts/list-installed.sh <target-project-path>
```
Shows which marketplace plugins are installed, partially installed, or available.
**Output example:**
```
✓ Fully Installed:
PLUGIN VERSION DESCRIPTION
------ ------- -----------
data-platform 1.3.0 pandas, PostgreSQL, and dbt integration...
viz-platform 1.1.0 DMC validation, Plotly charts, and theming...
○ Available (not installed):
projman 3.4.0 Sprint planning and project management...
```
### Plugins with MCP Servers
Not all plugins have MCP servers. The install script handles this automatically:
| Plugin | Has MCP Server | Notes |
|--------|---------------|-------|
| data-platform | ✓ | pandas, PostgreSQL, dbt tools |
| viz-platform | ✓ | DMC validation, chart, theme tools |
| contract-validator | ✓ | Plugin compatibility validation |
| cmdb-assistant | ✓ (via netbox) | NetBox CMDB tools |
| projman | ✓ (via gitea) | Issue, wiki, PR tools |
| pr-review | ✓ (via gitea) | PR review tools |
| git-flow | ✗ | Commands only |
| doc-guardian | ✗ | Commands only |
| code-sentinel | ✗ | Commands and hooks only |
| clarity-assist | ✗ | Commands only |
### Script Requirements
- **jq** must be installed (`sudo apt install jq`)
- Scripts are idempotent (safe to run multiple times)
---
## Agent Frontmatter Configuration
Agents specify their configuration in frontmatter using Claude Code's supported fields. Reference: https://code.claude.com/docs/en/sub-agents
### Supported Frontmatter Fields
| Field | Required | Default | Description |
|-------|----------|---------|-------------|
| `name` | Yes | — | Unique identifier, lowercase + hyphens |
| `description` | Yes | — | When Claude should delegate to this subagent |
| `model` | No | `inherit` | `sonnet`, `opus`, `haiku`, or `inherit` |
| `permissionMode` | No | `default` | Controls permission prompts: `default`, `acceptEdits`, `dontAsk`, `bypassPermissions`, `plan` |
| `disallowedTools` | No | none | Comma-separated tools to remove from agent's toolset |
| `skills` | No | none | Comma-separated skills auto-injected into context at startup |
| `hooks` | No | none | Lifecycle hooks scoped to this subagent |
### Complete Agent Matrix
| Plugin | Agent | `model` | `permissionMode` | `disallowedTools` | `skills` |
|--------|-------|---------|-------------------|--------------------|----------|
| projman | planner | opus | default | — | frontmatter (2) + body text (12) |
| projman | orchestrator | sonnet | acceptEdits | — | frontmatter (2) + body text (10) |
| projman | executor | sonnet | bypassPermissions | — | frontmatter (7) |
| projman | code-reviewer | opus | default | Write, Edit, MultiEdit | frontmatter (4) |
| pr-review | coordinator | sonnet | plan | Write, Edit, MultiEdit | — |
| pr-review | security-reviewer | sonnet | plan | Write, Edit, MultiEdit | — |
| pr-review | performance-analyst | sonnet | plan | Write, Edit, MultiEdit | — |
| pr-review | maintainability-auditor | haiku | plan | Write, Edit, MultiEdit | — |
| pr-review | test-validator | haiku | plan | Write, Edit, MultiEdit | — |
| data-platform | data-advisor | sonnet | default | — | — |
| data-platform | data-analysis | sonnet | plan | Write, Edit, MultiEdit | — |
| data-platform | data-ingestion | haiku | acceptEdits | — | — |
| viz-platform | design-reviewer | sonnet | plan | Write, Edit, MultiEdit | — |
| viz-platform | layout-builder | sonnet | default | — | — |
| viz-platform | component-check | haiku | plan | Write, Edit, MultiEdit | — |
| viz-platform | theme-setup | haiku | acceptEdits | — | — |
| contract-validator | full-validation | sonnet | default | — | — |
| contract-validator | agent-check | haiku | plan | Write, Edit, MultiEdit | — |
| code-sentinel | security-reviewer | sonnet | plan | Write, Edit, MultiEdit | — |
| code-sentinel | refactor-advisor | sonnet | acceptEdits | — | — |
| doc-guardian | doc-analyzer | sonnet | acceptEdits | — | — |
| clarity-assist | clarity-coach | sonnet | default | Write, Edit, MultiEdit | — |
| git-flow | git-assistant | haiku | acceptEdits | — | — |
| claude-config-maintainer | maintainer | sonnet | acceptEdits | — | frontmatter (2) |
| cmdb-assistant | cmdb-assistant | sonnet | default | — | — |
### Design Principles
- `bypassPermissions` is granted to exactly ONE agent (Executor) which has code-sentinel PreToolUse hook + Code Reviewer downstream as safety nets.
- `plan` mode is assigned to all pure analysis agents (pr-review, read-only validators).
- `disallowedTools: Write, Edit, MultiEdit` provides defense-in-depth on agents that should never write files.
- `skills` frontmatter is used for agents with ≤7 skills where guaranteed loading is safety-critical. Agents with 8+ skills use body text `## Skills to Load` for selective loading.
- `hooks` (agent-scoped) is reserved for future use (v6.0+).
Override any field by editing the agent's `.md` file in `plugins/{plugin}/agents/`.
### permissionMode Guide
| Value | Prompts for file ops? | Prompts for Bash? | Prompts for MCP? | Use when |
|-------|-----------------------|-------------------|-------------------|----------|
| `default` | Yes | Yes | No (MCP bypasses permissions) | You want full visibility |
| `acceptEdits` | No | Yes | No | Core job is file read/write, Bash visibility useful |
| `dontAsk` | No | No (most) | No | Even Bash prompts are friction |
| `bypassPermissions` | No | No | No | Agent has downstream safety layers |
| `plan` | N/A (read-only) | N/A (read-only) | No | Pure analysis, no modifications |
### disallowedTools Guide
Use `disallowedTools` to remove specific tools from an agent's toolset. This is a blacklist — the agent inherits all tools from the main thread, then the listed tools are removed.
Prefer `disallowedTools` over `tools` (whitelist) because:
- New MCP servers are automatically available without updating every agent.
- Less configuration to maintain.
- Easier to audit — you only list what's blocked.
Common patterns:
- `disallowedTools: Write, Edit, MultiEdit` — read-only agent, cannot modify files.
- `disallowedTools: Bash` — no shell access (rare, most agents need at least read-only Bash).
### skills Frontmatter Guide
The `skills` field auto-injects skill file contents into the agent's context window at startup. The agent does NOT need to read the files — they are already present.
**When to use frontmatter `skills`:**
- Agent has ≤7 skills.
- Skills are safety-critical (e.g., `branch-security`, `runaway-detection`).
- You need guaranteed loading — no risk of the agent skipping a skill.
**When to keep body text `## Skills to Load`:**
- Agent has 8+ skills (context window cost too high for full injection).
- Skills are situational — not all needed for every invocation.
- Agent benefits from selective loading based on the specific task.
Skill names in frontmatter are resolved relative to the plugin's `skills/` directory. Use the filename without the `.md` extension.
### Phase-Based Skill Loading (Body Text)
For agents with 8+ skills, use **phase-based loading** in the agent body text. This structures skill reads into logical phases, with explicit instructions to read each skill exactly once.
**Pattern:**
```markdown
## Skill Loading Protocol
**Frontmatter skills (auto-injected, always available — DO NOT re-read these):**
- `skill-a` — description
- `skill-b` — description
**Phase 1 skills — read ONCE at session start:**
- skills/validation-skill.md
- skills/safety-skill.md
**Phase 2 skills — read ONCE when entering main work:**
- skills/workflow-skill.md
- skills/domain-skill.md
**CRITICAL: Read each skill file exactly ONCE. Do NOT re-read skill files between MCP API calls.**
```
**Benefits:**
- Frontmatter skills (always needed) are auto-injected — zero file read cost
- Phase skills are read once at the appropriate time — not re-read per API call
- `batch-execution` skill provides protocol for API-heavy phases
- ~76-83% reduction in skill-related token consumption for typical sprints
**Currently applied to:**
- Planner agent: 2 frontmatter + 12 body text (3 phases)
- Orchestrator agent: 2 frontmatter + 10 body text (2 phases)
Both approaches work. Use `/project-init` when you know the system is already configured.
---
@@ -631,12 +426,12 @@ For agents with 8+ skills, use **phase-based loading** in the agent body text. T
### API Validation
When running `/projman setup`, the command:
When running `/initial-setup`, `/project-init`, or `/project-sync`, the commands:
1. **Detects** organization and repository from git remote URL
2. **Validates** via Gitea API: `GET /api/v1/repos/{org}/{repo}`
3. **Auto-fills** if repository exists and is accessible (no confirmation needed)
4. **Asks for confirmation** only if validation fails (404 or permission error)
1. **Detect** organization and repository from git remote URL
2. **Validate** via Gitea API: `GET /api/v1/repos/{org}/{repo}`
3. **Auto-fill** if repository exists and is accessible (no confirmation needed)
4. **Ask for confirmation** only if validation fails (404 or permission error)
This catches typos and permission issues before saving configuration.
@@ -644,9 +439,9 @@ This catches typos and permission issues before saving configuration.
When you start a Claude Code session, a hook automatically:
1. Reads `GITEA_REPO` (in `owner/repo` format) from `.env`
1. Reads `GITEA_ORG` and `GITEA_REPO` from `.env`
2. Compares with current `git remote get-url origin`
3. **Warns** if mismatch detected: "Repository location mismatch. Run `/projman setup --sync` to update."
3. **Warns** if mismatch detected: "Repository location mismatch. Run `/project-sync` to update."
This helps when you:
- Move a repository to a different organization
@@ -668,7 +463,7 @@ curl -H "Authorization: token $GITEA_API_TOKEN" "$GITEA_API_URL/user"
In Claude Code, after restarting your session:
```
/labels sync
/labels-sync
```
If this works, your setup is complete.
@@ -708,8 +503,9 @@ If you get 401, regenerate your token in Gitea.
# Check venv exists
ls /path/to/mcp-servers/gitea/.venv
# If missing, create venv (do NOT delete existing venvs)
# Reinstall if missing
cd /path/to/mcp-servers/gitea
rm -rf .venv
python3 -m venv .venv
source .venv/bin/activate
pip install -r requirements.txt
@@ -722,8 +518,7 @@ deactivate
# Check project .env
cat .env
# Verify GITEA_REPO is in owner/repo format and matches Gitea exactly
# Example: GITEA_REPO=my-org/my-repo
# Verify GITEA_REPO matches the Gitea repository name exactly
```
---
@@ -741,7 +536,7 @@ cat .env
3. **Never type tokens into AI chat**
- Always edit config files directly in your editor
- The `/projman setup` wizard respects this
- The `/initial-setup` wizard respects this
4. **Rotate tokens periodically**
- Every 6-12 months

View File

@@ -2,7 +2,7 @@
**Purpose:** Systematic approach to diagnose and fix plugin loading issues.
Last Updated: 2026-02-08
Last Updated: 2026-01-22
---
@@ -73,19 +73,25 @@ cd $RUNTIME && ./scripts/setup.sh
---
## Step 4: Verify MCP Configuration
## Step 4: Verify Symlink Resolution
Check `.mcp.json` at marketplace root is correctly configured:
Plugins use symlinks to shared MCP servers. Verify they resolve correctly:
```bash
RUNTIME=~/.claude/plugins/marketplaces/leo-claude-mktplace
# Check .mcp.json exists and has valid content
cat $RUNTIME/.mcp.json | jq '.mcpServers | keys'
# Check symlinks exist and resolve
readlink -f $RUNTIME/plugins/projman/mcp-servers/gitea
readlink -f $RUNTIME/plugins/pr-review/mcp-servers/gitea
readlink -f $RUNTIME/plugins/cmdb-assistant/mcp-servers/netbox
# Should list: gitea, netbox, data-platform, viz-platform, contract-validator
# Should resolve to:
# $RUNTIME/mcp-servers/gitea
# $RUNTIME/mcp-servers/netbox
```
**If broken:** Symlinks are relative. If directory structure differs, they'll break.
---
## Step 5: Test MCP Server Startup
@@ -95,9 +101,9 @@ Manually test if the MCP server can start:
```bash
RUNTIME=~/.claude/plugins/marketplaces/leo-claude-mktplace
# Test Gitea MCP (uses gitea-mcp package from registry)
# Test Gitea MCP
cd $RUNTIME/mcp-servers/gitea
.venv/bin/python -c "from gitea_mcp.server import main; print('OK')"
PYTHONPATH=. .venv/bin/python -c "from mcp_server.server import main; print('OK')"
# Test NetBox MCP
cd $RUNTIME/mcp-servers/netbox
@@ -122,7 +128,7 @@ cat ~/.config/claude/netbox.env
# Project-level config (in target project)
cat /path/to/project/.env
# Should contain: GITEA_REPO=owner/repo (e.g., my-org/my-repo)
# Should contain: GITEA_ORG, GITEA_REPO
```
---
@@ -159,8 +165,10 @@ echo -e "\n=== Virtual Environments ==="
[ -f "$RUNTIME/mcp-servers/gitea/.venv/bin/python" ] && echo "Gitea venv: OK" || echo "Gitea venv: MISSING"
[ -f "$RUNTIME/mcp-servers/netbox/.venv/bin/python" ] && echo "NetBox venv: OK" || echo "NetBox venv: MISSING"
echo -e "\n=== MCP Configuration ==="
[ -f "$RUNTIME/.mcp.json" ] && echo ".mcp.json: OK" || echo ".mcp.json: MISSING"
echo -e "\n=== Symlinks ==="
[ -L "$RUNTIME/plugins/projman/mcp-servers/gitea" ] && echo "projman->gitea: OK" || echo "projman->gitea: MISSING"
[ -L "$RUNTIME/plugins/pr-review/mcp-servers/gitea" ] && echo "pr-review->gitea: OK" || echo "pr-review->gitea: MISSING"
[ -L "$RUNTIME/plugins/cmdb-assistant/mcp-servers/netbox" ] && echo "cmdb-assistant->netbox: OK" || echo "cmdb-assistant->netbox: MISSING"
echo -e "\n=== Config Files ==="
[ -f ~/.config/claude/gitea.env ] && echo "gitea.env: OK" || echo "gitea.env: MISSING"
@@ -174,51 +182,10 @@ echo -e "\n=== Config Files ==="
| Issue | Symptom | Fix |
|-------|---------|-----|
| Missing venvs | "X MCP servers failed" | `cd ~/.claude/plugins/marketplaces/leo-claude-mktplace && ./scripts/setup.sh` |
| Missing .mcp.json | MCP tools not available | Check `.mcp.json` exists at marketplace root |
| Broken symlinks | MCP tools not available | Reinstall marketplace or manually recreate symlinks |
| Wrong path edits | Changes don't take effect | Edit installed path or reinstall after source changes |
| Missing credentials | MCP connection errors | Create `~/.config/claude/gitea.env` with API credentials |
| Invalid hook events | Hooks don't fire | Use only valid event names (see Step 7) |
| Gitea issues not closing | Merged to non-default branch | Manually close issues (see below) |
| MCP changes not taking effect | Session caching | Restart Claude Code session (see below) |
### Gitea Auto-Close Behavior
**Issue:** Using `Closes #XX` or `Fixes #XX` in commit/PR messages does NOT auto-close issues when merging to `development`.
**Root Cause:** Gitea only auto-closes issues when merging to the **default branch** (typically `main` or `master`). Merging to `development`, `staging`, or any other branch will NOT trigger auto-close.
**Workaround:**
1. Use the Gitea MCP tool to manually close issues after merging to development:
```
mcp__plugin_projman_gitea__update_issue(issue_number=XX, state="closed")
```
2. Or close issues via the Gitea web UI
3. The auto-close keywords will still work when the changes are eventually merged to `main`
**Recommendation:** Include the `Closes #XX` keywords in commits anyway - they'll work when the final merge to `main` happens.
### MCP Session Restart Requirement
**Issue:** Changes to MCP servers, hooks, or plugin configuration don't take effect immediately.
**Root Cause:** Claude Code loads MCP tools and plugin configuration at session start. These are cached in session memory and not reloaded dynamically.
**What requires a session restart:**
- MCP server code changes (Python files in `mcp-servers/`)
- Changes to `.mcp.json` files
- Changes to `hooks/hooks.json`
- Changes to `plugin.json`
- Adding new MCP tools or modifying tool signatures
**What does NOT require a restart:**
- Command/skill markdown files (`.md`) - these are read on invocation
- Agent markdown files - read when agent is invoked
**Correct workflow after plugin changes:**
1. Make changes to source files
2. Run `./scripts/verify-hooks.sh` to validate
3. Inform user: "Please restart Claude Code for changes to take effect"
4. **Do NOT clear cache mid-session** - see "Cache Clearing" section
---
@@ -230,57 +197,12 @@ echo -e "\n=== Config Files ==="
---
## Cache Clearing: When It's Safe vs Destructive
**⚠️ CRITICAL: Never clear plugin cache mid-session.**
### Why Cache Clearing Breaks MCP Tools
When Claude Code starts a session:
1. MCP tools are loaded from the cache directory
2. Tool definitions include **absolute paths** to the venv (e.g., `~/.claude/plugins/cache/.../venv/`)
3. These paths are cached in the session memory
4. Deleting the cache removes the venv, but the session still references the old paths
5. Any MCP tool making HTTP requests fails with TLS certificate errors
### When Cache Clearing is SAFE
| Scenario | Safe? | Action |
|----------|-------|--------|
| Before starting Claude Code | ✅ Yes | Clear cache, then start session |
| Between sessions | ✅ Yes | Clear cache after `/exit`, before next session |
| During a session | ❌ NO | Never - will break MCP tools |
| After plugin source edits | ❌ NO | Restart session instead |
### Recovery: MCP Tools Broken Mid-Session
If you accidentally cleared cache during a session and MCP tools fail:
```
Error: Could not find a suitable TLS CA certificate bundle, invalid path:
/home/.../.claude/plugins/cache/.../certifi/cacert.pem
```
**Fix:**
1. Exit the current session (`/exit` or Ctrl+C)
2. Start a new Claude Code session
3. MCP tools will reload from the reinstalled cache
### Correct Workflow for Plugin Development
1. Make changes to plugin source files
2. Run `./scripts/verify-hooks.sh` (verifies hook types)
3. Tell user: "Please restart Claude Code for changes to take effect"
4. **Do NOT clear cache** - session restart handles reloading
---
## Automated Diagnostics
Use these commands for automated checking:
- `/cv status` - Marketplace-wide health check (installation, MCP, configuration)
- `/hygiene check` - Project file organization and cleanup check
- `/debug-report` - Run full diagnostics, create issue if problems found
- `/debug-review` - Investigate existing diagnostic issues and propose fixes
---

View File

@@ -1,249 +0,0 @@
# Migration Guide: v8.x → v9.0.0
## Overview
v9.0.0 standardizes all commands to the `/<noun> <action>` sub-command pattern. Every command in the marketplace now follows this convention.
**Breaking change:** All old command names are removed. Update your workflows, scripts, and CLAUDE.md references.
---
## Command Invocation
The `/<noun> <action>` pattern is a **display convention** for user-friendly command invocation. Under the hood, Claude Code resolves commands by filename using hyphens.
### How It Works
| You Type | What Happens | Actual Command Loaded |
|----------|--------------|----------------------|
| `/doc audit` | Dispatch file `doc.md` receives `$ARGUMENTS="audit"` | Routes to `/doc-guardian:doc-audit` (file: `doc-audit.md`) |
| `/sprint plan` | Dispatch file `sprint.md` receives `$ARGUMENTS="plan"` | Routes to `/projman:sprint-plan` (file: `sprint-plan.md`) |
| `/doc-guardian:doc-audit` | Direct invocation (bypasses dispatch) | Loads `doc-audit.md` directly |
### Two Invocation Methods
1. **User-friendly (via dispatch):** `/doc audit` — space-separated, routes through dispatch file
2. **Direct (plugin-prefixed):** `/doc-guardian:doc-audit` — bypasses dispatch, invokes command directly
Both methods work identically. The dispatch file provides `$ARGUMENTS` parsing and a menu interface when invoked without arguments.
### Command Name Mapping
**Pattern:** Spaces in display names become hyphens in filenames.
| Display Name | Filename | Plugin-Prefixed |
|--------------|----------|-----------------|
| `/doc audit` | `doc-audit.md` | `/doc-guardian:doc-audit` |
| `/sprint plan` | `sprint-plan.md` | `/projman:sprint-plan` |
| `/pr review` | `pr-review.md` | `/pr-review:pr-review` |
| `/gitflow commit` | `gitflow-commit.md` | `/git-flow:gitflow-commit` |
If dispatch routing fails, use the direct plugin-prefixed format.
---
## Complete Command Mapping
### projman
| Old (v8.x) | New (v9.0.0) | Notes |
|-------------|--------------|-------|
| `/sprint-plan` | `/sprint plan` | |
| `/sprint-start` | `/sprint start` | |
| `/sprint-status` | `/sprint status` | |
| `/sprint-close` | `/sprint close` | |
| `/pm-review` | `/sprint review` | Moved under `/sprint` |
| `/pm-test` | `/sprint test` | Moved under `/sprint` |
| `/pm-setup` | `/projman setup` | Moved under `/projman` |
| `/pm-debug` | **Removed** | Deleted in v8.1.0 — migrated to `debug-mcp` plugin (Decision #11) |
| `/labels-sync` | `/labels sync` | |
| `/suggest-version` | **Removed** | Deleted in v8.1.0 — migrated to `ops-release-manager` plugin (Decision #18) |
| `/proposal-status` | **Removed** | Deleted in v8.1.0 — absorbed into `/project status` (Decision #19) |
| `/rfc <sub>` | `/rfc <sub>` | Unchanged |
| `/project <sub>` | `/project <sub>` | Unchanged |
| `/adr <sub>` | `/adr <sub>` | Unchanged |
### git-flow
| Old (v8.x) | New (v9.0.0) | Notes |
|-------------|--------------|-------|
| `/git-commit` | `/gitflow commit` | |
| `/git-commit-push` | `/gitflow commit --push` | **Consolidated** into flag |
| `/git-commit-merge` | `/gitflow commit --merge` | **Consolidated** into flag |
| `/git-commit-sync` | `/gitflow commit --sync` | **Consolidated** into flag |
| `/branch-start` | `/gitflow branch-start` | |
| `/branch-cleanup` | `/gitflow branch-cleanup` | |
| `/git-status` | `/gitflow status` | |
| `/git-config` | `/gitflow config` | |
**Note:** The three commit variants (`-push`, `-merge`, `-sync`) are now flags on `/gitflow commit`. This reduces 8 commands to 5.
### pr-review
| Old (v8.x) | New (v9.0.0) | Notes |
|-------------|--------------|-------|
| `/pr-review` | `/pr review` | |
| `/pr-summary` | `/pr summary` | |
| `/pr-findings` | `/pr findings` | |
| `/pr-diff` | `/pr diff` | |
| `/pr-setup` | `/pr setup` | |
| `/project-init` | `/pr init` | Renamed |
| `/project-sync` | `/pr sync` | Renamed |
### clarity-assist
| Old (v8.x) | New (v9.0.0) | Notes |
|-------------|--------------|-------|
| `/clarify` | `/clarity clarify` | |
| `/quick-clarify` | `/clarity quick-clarify` | |
### doc-guardian
| Old (v8.x) | New (v9.0.0) | Notes |
|-------------|--------------|-------|
| `/doc-audit` | `/doc audit` | |
| `/doc-sync` | `/doc sync` | |
| `/changelog-gen` | `/doc changelog-gen` | Moved under `/doc` |
| `/doc-coverage` | `/doc coverage` | |
| `/stale-docs` | `/doc stale-docs` | Moved under `/doc` |
### code-sentinel
| Old (v8.x) | New (v9.0.0) | Notes |
|-------------|--------------|-------|
| `/security-scan` | `/sentinel scan` | |
| `/refactor` | `/sentinel refactor` | |
| `/refactor-dry` | `/sentinel refactor-dry` | |
### claude-config-maintainer
| Old (v8.x) | New (v9.0.0) | Notes |
|-------------|--------------|-------|
| `/config-analyze` (or `/analyze`) | `/claude-config analyze` | |
| `/config-optimize` (or `/optimize`) | `/claude-config optimize` | |
| `/config-init` (or `/init`) | `/claude-config init` | |
| `/config-diff` | `/claude-config diff` | |
| `/config-lint` | `/claude-config lint` | |
| `/config-audit-settings` | `/claude-config audit-settings` | |
| `/config-optimize-settings` | `/claude-config optimize-settings` | |
| `/config-permissions-map` | `/claude-config permissions-map` | |
### contract-validator
| Old (v8.x) | New (v9.0.0) | Notes |
|-------------|--------------|-------|
| `/validate-contracts` | `/cv validate` | |
| `/check-agent` | `/cv check-agent` | |
| `/list-interfaces` | `/cv list-interfaces` | |
| `/dependency-graph` | `/cv dependency-graph` | |
| `/cv-setup` | `/cv setup` | |
| `/cv status` | `/cv status` | Unchanged |
### cmdb-assistant
| Old (v8.x) | New (v9.0.0) | Notes |
|-------------|--------------|-------|
| `/cmdb-setup` | `/cmdb setup` | |
| `/cmdb-search` | `/cmdb search` | |
| `/cmdb-device` | `/cmdb device` | |
| `/cmdb-ip` | `/cmdb ip` | |
| `/cmdb-site` | `/cmdb site` | |
| `/cmdb-audit` | `/cmdb audit` | |
| `/cmdb-register` | `/cmdb register` | |
| `/cmdb-sync` | `/cmdb sync` | |
| `/cmdb-topology` | `/cmdb topology` | |
| `/change-audit` | `/cmdb change-audit` | Moved under `/cmdb` |
| `/ip-conflicts` | `/cmdb ip-conflicts` | Moved under `/cmdb` |
### data-platform
| Old (v8.x) | New (v9.0.0) | Notes |
|-------------|--------------|-------|
| `/data-ingest` | `/data ingest` | |
| `/data-profile` | `/data profile` | |
| `/data-schema` | `/data schema` | |
| `/data-explain` | `/data explain` | |
| `/data-lineage` | `/data lineage` | |
| `/data-run` | `/data run` | |
| `/lineage-viz` | `/data lineage-viz` | Moved under `/data` |
| `/dbt-test` | `/data dbt-test` | Moved under `/data` |
| `/data-quality` | `/data quality` | |
| `/data-review` | `/data review` | |
| `/data-gate` | `/data gate` | |
| `/data-setup` | `/data setup` | |
### viz-platform
| Old (v8.x) | New (v9.0.0) | Notes |
|-------------|--------------|-------|
| `/viz-setup` | `/viz setup` | |
| `/viz-chart` | `/viz chart` | |
| `/viz-chart-export` | `/viz chart-export` | |
| `/viz-dashboard` | `/viz dashboard` | |
| `/viz-theme` | `/viz theme` | |
| `/viz-theme-new` | `/viz theme-new` | |
| `/viz-theme-css` | `/viz theme-css` | |
| `/viz-component` | `/viz component` | |
| `/accessibility-check` | `/viz accessibility-check` | Moved under `/viz` |
| `/viz-breakpoints` | `/viz breakpoints` | |
| `/design-review` | `/viz design-review` | Moved under `/viz` |
| `/design-gate` | `/viz design-gate` | Moved under `/viz` |
### project-hygiene
No changes — already used `/<noun> <action>` pattern.
| Command | Status |
|---------|--------|
| `/hygiene check` | Unchanged |
---
## Verifying Plugin Installation (v9.0.0)
Test commands use the new format:
| Plugin | Test Command |
|--------|--------------|
| git-flow | `/git-flow:gitflow-status` |
| projman | `/projman:sprint-status` |
| pr-review | `/pr-review:pr-summary` |
| clarity-assist | `/clarity-assist:clarity-clarify` |
| doc-guardian | `/doc-guardian:doc-audit` |
| code-sentinel | `/code-sentinel:sentinel-scan` |
| claude-config-maintainer | `/claude-config-maintainer:claude-config-analyze` |
| cmdb-assistant | `/cmdb-assistant:cmdb-search` |
| data-platform | `/data-platform:data-ingest` |
| viz-platform | `/viz-platform:viz-chart` |
| contract-validator | `/contract-validator:cv-validate` |
---
## CLAUDE.md Updates
If your project's CLAUDE.md references old command names, update them:
**Find old references:**
```bash
grep -rn '/sprint-plan\|/pm-setup\|/git-commit\|/pr-review\|/security-scan\|/config-analyze\|/validate-contracts\|/cmdb-search\|/data-ingest\|/viz-chart\b\|/clarify\b\|/doc-audit' CLAUDE.md
```
**Key patterns to search and replace:**
- `/sprint-plan``/sprint plan`
- `/pm-setup``/projman setup`
- `/pm-review``/sprint review`
- `/git-commit``/gitflow commit`
- `/pr-review``/pr review`
- `/security-scan``/sentinel scan`
- `/refactor``/sentinel refactor`
- `/config-analyze``/claude-config analyze`
- `/validate-contracts``/cv validate`
- `/clarify``/clarity clarify`
- `/doc-audit``/doc audit`
- `/cmdb-search``/cmdb search`
- `/data-ingest``/data ingest`
- `/viz-chart``/viz chart`
---
*Last Updated: 2026-02-06*

View File

@@ -38,7 +38,7 @@ cd ~/.claude/plugins/marketplaces/leo-claude-mktplace && ./scripts/setup.sh
## What the Post-Update Script Does
1. **Updates Python dependencies** for all 5 MCP servers (gitea, netbox, data-platform, viz-platform, contract-validator)
1. **Updates Python dependencies** for MCP servers (gitea, netbox)
2. **Shows recent changelog entries** so you know what changed
3. **Validates your configuration** is still compatible
@@ -46,9 +46,9 @@ cd ~/.claude/plugins/marketplaces/leo-claude-mktplace && ./scripts/setup.sh
## After Updating: Re-run Setup if Needed
### When to Re-run Setup
### When to Re-run `/initial-setup`
You typically **don't need** to re-run setup after updates. However, re-run your plugin's setup command (e.g., `/projman setup`, `/pr setup`, `/cmdb setup`) if:
You typically **don't need** to re-run setup after updates. However, re-run if:
- Changelog mentions **new required environment variables**
- Changelog mentions **breaking changes** to configuration
@@ -59,7 +59,7 @@ You typically **don't need** to re-run setup after updates. However, re-run your
If an update requires new project-level configuration:
```
/pr init
/project-init
```
This will detect existing settings and only add what's missing.
@@ -97,9 +97,9 @@ When updating, review if changes affect the setup workflow:
1. **Check for setup command changes:**
```bash
git diff HEAD~1 plugins/*/commands/*-setup.md
git diff HEAD~1 plugins/*/commands/pr-init.md
git diff HEAD~1 plugins/*/commands/pr-sync.md
git diff HEAD~1 plugins/*/commands/initial-setup.md
git diff HEAD~1 plugins/*/commands/project-init.md
git diff HEAD~1 plugins/*/commands/project-sync.md
```
2. **Check for hook changes:**
@@ -114,7 +114,7 @@ When updating, review if changes affect the setup workflow:
**If setup commands changed:**
- Review what's new (new validation steps, new prompts, etc.)
- Consider re-running your plugin's setup command or `/pr init` to benefit from improvements
- Consider re-running `/initial-setup` or `/project-init` to benefit from improvements
- Existing configurations remain valid unless changelog notes breaking changes
**If hooks changed:**
@@ -123,7 +123,7 @@ When updating, review if changes affect the setup workflow:
**If configuration structure changed:**
- Check if new variables are required
- Run `/pr sync` if repository detection logic improved
- Run `/project-sync` if repository detection logic improved
---
@@ -132,8 +132,10 @@ When updating, review if changes affect the setup workflow:
### Dependencies fail to install
```bash
# Install missing dependencies (do NOT delete .venv)
# Rebuild virtual environment
cd mcp-servers/gitea
rm -rf .venv
python3 -m venv .venv
source .venv/bin/activate
pip install -r requirements.txt
deactivate
@@ -142,7 +144,7 @@ deactivate
### Configuration no longer works
1. Check CHANGELOG.md for breaking changes
2. Run your plugin's setup command (e.g., `/projman setup`) to re-validate and fix configuration
2. Run `/initial-setup` to re-validate and fix configuration
3. Compare your config files with documentation in `docs/CONFIGURATION.md`
### MCP server won't start after update
@@ -157,13 +159,12 @@ cd ~/.claude/plugins/marketplaces/leo-claude-mktplace && ./scripts/setup.sh
If that doesn't work:
1. Check Python version: `python3 --version` (requires 3.10+)
2. Verify venvs exist in INSTALLED location:
2. Verify venv exists in INSTALLED location:
```bash
for server in gitea netbox data-platform viz-platform contract-validator; do
ls ~/.claude/plugins/marketplaces/leo-claude-mktplace/mcp-servers/$server/.venv && echo "$server: OK" || echo "$server: MISSING"
done
ls ~/.claude/plugins/marketplaces/leo-claude-mktplace/mcp-servers/gitea/.venv
ls ~/.claude/plugins/marketplaces/leo-claude-mktplace/mcp-servers/netbox/.venv
```
3. If missing, run setup.sh as shown above.
3. If missing, the symlinks won't resolve. Run setup.sh as shown above.
4. Restart Claude Code session
5. Check logs for specific errors

View File

@@ -0,0 +1,271 @@
# Agent Workflow - Draw.io Specification
**Target File:** `docs/architecture/agent-workflow.drawio`
**Purpose:** Shows when Planner, Orchestrator, Executor, and Code Reviewer agents trigger during sprint lifecycle.
**Diagram Type:** Swimlane / Sequence Diagram
---
## SWIMLANES
| ID | Label | Color | Position |
|----|-------|-------|----------|
| user-lane | User | #E3F2FD | 1 (leftmost) |
| planner-lane | Planner Agent | #4A90D9 | 2 |
| orchestrator-lane | Orchestrator Agent | #7CB342 | 3 |
| executor-lane | Executor Agent | #FF9800 | 4 |
| reviewer-lane | Code Reviewer Agent | #9C27B0 | 5 |
| gitea-lane | Gitea (Issues + Wiki) | #9E9E9E | 6 (rightmost) |
---
## PHASE 1: SPRINT PLANNING
### Nodes
| ID | Label | Type | Lane | Sequence |
|----|-------|------|------|----------|
| p1-start | /sprint-plan | rounded-rect | user-lane | 1 |
| p1-activate | Planner Activates | rectangle | planner-lane | 2 |
| p1-search-lessons | Search Lessons Learned | rectangle | planner-lane | 3 |
| p1-gitea-wiki-query | Query Past Lessons (Wiki) | rectangle | gitea-lane | 4 |
| p1-return-lessons | Return Relevant Lessons | rectangle | planner-lane | 5 |
| p1-clarify | Ask Clarifying Questions | diamond | planner-lane | 6 |
| p1-user-answers | Provide Answers | rectangle | user-lane | 7 |
| p1-create-issues | Create Issues with Labels | rectangle | planner-lane | 8 |
| p1-gitea-create | Store Issues | rectangle | gitea-lane | 9 |
| p1-plan-complete | Planning Complete | rounded-rect | planner-lane | 10 |
### Edges
| From | To | Label | Style |
|------|----|-------|-------|
| p1-start | p1-activate | invokes | solid |
| p1-activate | p1-search-lessons | | solid |
| p1-search-lessons | p1-gitea-wiki-query | REST API (search_lessons) | solid |
| p1-gitea-wiki-query | p1-return-lessons | lessons data | dashed |
| p1-return-lessons | p1-clarify | | solid |
| p1-clarify | p1-user-answers | questions | solid |
| p1-user-answers | p1-clarify | answers | dashed |
| p1-clarify | p1-create-issues | | solid |
| p1-create-issues | p1-gitea-create | REST API | solid |
| p1-gitea-create | p1-plan-complete | confirm | dashed |
---
## PHASE 2: SPRINT EXECUTION
### Nodes
| ID | Label | Type | Lane | Sequence |
|----|-------|------|------|----------|
| p2-start | /sprint-start | rounded-rect | user-lane | 11 |
| p2-orch-activate | Orchestrator Activates | rectangle | orchestrator-lane | 12 |
| p2-fetch-issues | Fetch Sprint Issues | rectangle | orchestrator-lane | 13 |
| p2-gitea-list | List Open Issues | rectangle | gitea-lane | 14 |
| p2-sequence | Sequence Work (Dependencies) | rectangle | orchestrator-lane | 15 |
| p2-dispatch | Dispatch Task | rectangle | orchestrator-lane | 16 |
| p2-exec-activate | Executor Activates | rectangle | executor-lane | 17 |
| p2-implement | Implement Task | rectangle | executor-lane | 18 |
| p2-update-status | Update Issue Status | rectangle | executor-lane | 19 |
| p2-gitea-update | Update Issue | rectangle | gitea-lane | 20 |
| p2-report | Report Completion | rectangle | executor-lane | 21 |
| p2-loop | More Tasks? | diamond | orchestrator-lane | 22 |
| p2-exec-complete | Execution Complete | rounded-rect | orchestrator-lane | 23 |
### Edges
| From | To | Label | Style |
|------|----|-------|-------|
| p2-start | p2-orch-activate | invokes | solid |
| p2-orch-activate | p2-fetch-issues | | solid |
| p2-fetch-issues | p2-gitea-list | REST API | solid |
| p2-gitea-list | p2-sequence | issues data | dashed |
| p2-sequence | p2-dispatch | parallel batching | solid |
| p2-dispatch | p2-exec-activate | execution prompt | solid |
| p2-exec-activate | p2-implement | | solid |
| p2-implement | p2-update-status | | solid |
| p2-update-status | p2-gitea-update | REST API | solid |
| p2-gitea-update | p2-report | confirm | dashed |
| p2-report | p2-loop | | solid |
| p2-loop | p2-dispatch | yes | solid |
| p2-loop | p2-exec-complete | no | solid |
---
## PHASE 2.5: CODE REVIEW (Pre-Close)
### Nodes
| ID | Label | Type | Lane | Sequence |
|----|-------|------|------|----------|
| p25-start | /review | rounded-rect | user-lane | 24 |
| p25-reviewer-activate | Code Reviewer Activates | rectangle | reviewer-lane | 25 |
| p25-scan-changes | Scan Recent Changes | rectangle | reviewer-lane | 26 |
| p25-check-quality | Check Code Quality | rectangle | reviewer-lane | 27 |
| p25-security-scan | Security Scan | rectangle | reviewer-lane | 28 |
| p25-report | Generate Review Report | rectangle | reviewer-lane | 29 |
| p25-complete | Review Complete | rounded-rect | reviewer-lane | 30 |
### Edges
| From | To | Label | Style |
|------|----|-------|-------|
| p25-start | p25-reviewer-activate | invokes | solid |
| p25-reviewer-activate | p25-scan-changes | | solid |
| p25-scan-changes | p25-check-quality | | solid |
| p25-check-quality | p25-security-scan | | solid |
| p25-security-scan | p25-report | | solid |
| p25-report | p25-complete | | solid |
---
## PHASE 3: SPRINT CLOSE
### Nodes
| ID | Label | Type | Lane | Sequence |
|----|-------|------|------|----------|
| p3-start | /sprint-close | rounded-rect | user-lane | 31 |
| p3-orch-activate | Orchestrator Activates | rectangle | orchestrator-lane | 32 |
| p3-review | Review Sprint | rectangle | orchestrator-lane | 33 |
| p3-gitea-status | Get Final Status | rectangle | gitea-lane | 34 |
| p3-capture | Capture Lessons Learned | rectangle | orchestrator-lane | 35 |
| p3-user-input | Confirm Lessons | diamond | user-lane | 36 |
| p3-create-wiki | Create Wiki Pages | rectangle | orchestrator-lane | 37 |
| p3-gitea-wiki-create | Store Lessons (Wiki) | rectangle | gitea-lane | 38 |
| p3-close-issues | Close Issues | rectangle | orchestrator-lane | 39 |
| p3-gitea-close | Mark Closed | rectangle | gitea-lane | 40 |
| p3-complete | Sprint Closed | rounded-rect | orchestrator-lane | 41 |
### Edges
| From | To | Label | Style |
|------|----|-------|-------|
| p3-start | p3-orch-activate | invokes | solid |
| p3-orch-activate | p3-review | | solid |
| p3-review | p3-gitea-status | REST API | solid |
| p3-gitea-status | p3-capture | status data | dashed |
| p3-capture | p3-user-input | proposed lessons | solid |
| p3-user-input | p3-create-wiki | confirmed | solid |
| p3-create-wiki | p3-gitea-wiki-create | REST API (create_lesson) | solid |
| p3-gitea-wiki-create | p3-close-issues | confirm | dashed |
| p3-close-issues | p3-gitea-close | REST API | solid |
| p3-gitea-close | p3-complete | confirm | dashed |
---
## LAYOUT NOTES
```
+--------+------------+---------------+------------+----------+------------------+
| User | Planner | Orchestrator | Executor | Reviewer | Gitea |
| | | | | | (Issues + Wiki) |
+--------+------------+---------------+------------+----------+------------------+
| | | | | | |
| PHASE 1: SPRINT PLANNING |
|-------------------------------------------------------------------------------|
| O | | | | | |
| | | | | | | |
| +---->| O | | | | |
| | | | | | | |
| | +----------|---------------|------------|--------->| O (Wiki Query) |
| | |<---------|---------------|------------|----------+ | |
| | | | | | | |
| | O<> | | | | |
| O<--->+ | | | | | |
| | | | | | | |
| | +----------|---------------|------------|--------->| O (Issues) |
| | O | | | | |
| | | | | | |
|-------------------------------------------------------------------------------|
| PHASE 2: SPRINT EXECUTION |
|-------------------------------------------------------------------------------|
| O | | | | | |
| | | | | | | |
| +-----|----------->| O | | | |
| | | | | | | |
| | | +-------------|------------|--------->| O (Issues) |
| | | |<------------|------------|----------+ | |
| | | | | | | |
| | | +------------>| O | | |
| | | | | | | |
| | | | +----------|--------->| O (Issues) |
| | | | |<---------|----------+ | |
| | | O<------------+ | | | |
| | | | | | | |
| | | O (loop) | | | |
| | | | | | |
|-------------------------------------------------------------------------------|
| PHASE 2.5: CODE REVIEW |
|-------------------------------------------------------------------------------|
| O | | | | | |
| | | | | | | |
| +-----|------------|---------------|----------->| O | |
| | | | | | | |
| | | | | O->O->O | |
| | | | | | | |
| | | | | O | |
| | | | | | |
|-------------------------------------------------------------------------------|
| PHASE 3: SPRINT CLOSE |
|-------------------------------------------------------------------------------|
| O | | | | | |
| | | | | | | |
| +-----|----------->| O | | | |
| | | +-------------|------------|--------->| O (Issues) |
| | | |<------------|------------|----------+ | |
| | | | | | | |
| O<----|-----------<+ | | | | |
| +-----|----------->| | | | | |
| | | +-------------|------------|--------->| O (Wiki Create) |
| | | |<------------|------------|----------+ | |
| | | +-------------|------------|--------->| O (Issues Close) |
| | | O | | | |
+--------+------------+---------------+------------+----------+------------------+
```
---
## COLOR LEGEND
| Color | Hex | Meaning |
|-------|-----|---------|
| Light Blue | #E3F2FD | User actions |
| Blue | #4A90D9 | Planner Agent |
| Green | #7CB342 | Orchestrator Agent |
| Orange | #FF9800 | Executor Agent |
| Purple | #9C27B0 | Code Reviewer Agent |
| Gray | #9E9E9E | External Services (Gitea) |
---
## SHAPE LEGEND
| Shape | Meaning |
|-------|---------|
| Rounded Rectangle | Start/End points (commands) |
| Rectangle | Process/Action |
| Diamond | Decision point |
| Cylinder | Data store (in component map) |
---
## ARROW LEGEND
| Style | Meaning |
|-------|---------|
| Solid | Action/Request |
| Dashed | Response/Data return |
---
## ARCHITECTURE NOTES
- **Gitea provides BOTH issue tracking AND wiki** (no separate wiki service)
- All wiki operations use Gitea REST API via MCP tools
- Lessons learned stored in Gitea Wiki under `lessons-learned/sprints/`
- MCP tools: `search_lessons`, `create_lesson`, `list_wiki_pages`, `get_wiki_page`
- Four-agent model: Planner, Orchestrator, Executor, Code Reviewer

View File

@@ -0,0 +1,139 @@
# Component Map - Draw.io Specification
**Target File:** `docs/architecture/component-map.drawio`
**Purpose:** Shows all plugins, MCP servers, hooks and their relationships.
---
## NODES
### Plugins (Blue - #4A90D9)
| ID | Label | Type | Color | Position |
|----|-------|------|-------|----------|
| projman | projman | rectangle | #4A90D9 | top-center |
| projman-pmo | projman-pmo (planned) | rectangle | #4A90D9 | top-right |
| project-hygiene | project-hygiene | rectangle | #4A90D9 | top-left |
| claude-config | claude-config-maintainer | rectangle | #4A90D9 | bottom-left |
| cmdb-assistant | cmdb-assistant | rectangle | #4A90D9 | bottom-right |
### MCP Servers (Green - #7CB342)
MCP servers are **bundled inside each plugin** that needs them.
| ID | Label | Type | Color | Position | Bundled In |
|----|-------|------|-------|----------|------------|
| gitea-mcp | Gitea MCP Server | rectangle | #7CB342 | middle-left | projman |
| netbox-mcp | NetBox MCP Server | rectangle | #7CB342 | middle-right | cmdb-assistant |
### External Systems (Gray - #9E9E9E)
| ID | Label | Type | Color | Position |
|----|-------|------|-------|----------|
| gitea-instance | Gitea\n(Issues + Wiki) | cylinder | #9E9E9E | bottom-left |
| netbox-instance | NetBox | cylinder | #9E9E9E | bottom-right |
### Configuration (Orange - #FF9800)
| ID | Label | Type | Color | Position |
|----|-------|------|-------|----------|
| system-config | System Config\n~/.config/claude/ | rectangle | #FF9800 | far-left |
| project-config | Project Config\n.env | rectangle | #FF9800 | far-right |
---
## EDGES
### Plugin to MCP Server Connections
| From | To | Label | Style | Arrow |
|------|----|-------|-------|-------|
| projman | gitea-mcp | bundled | solid | bidirectional |
| cmdb-assistant | netbox-mcp | bundled | solid | bidirectional |
### Plugin Dependencies
| From | To | Label | Style | Arrow |
|------|----|-------|-------|-------|
| projman-pmo | projman | depends on | dashed | forward |
### MCP Server to External System Connections
| From | To | Label | Style | Arrow |
|------|----|-------|-------|-------|
| gitea-mcp | gitea-instance | REST API | solid | forward |
| netbox-mcp | netbox-instance | REST API | solid | forward |
### Configuration Connections
| From | To | Label | Style | Arrow |
|------|----|-------|-------|-------|
| system-config | gitea-mcp | credentials | dashed | forward |
| system-config | netbox-mcp | credentials | dashed | forward |
| project-config | gitea-mcp | repo context | dashed | forward |
| project-config | netbox-mcp | site context | dashed | forward |
---
## GROUPS
| ID | Label | Contains | Style |
|----|-------|----------|-------|
| plugins-group | Plugins | projman, projman-pmo, project-hygiene, claude-config, cmdb-assistant | light blue border |
| external-group | External Services | gitea-instance, netbox-instance | light gray border |
| config-group | Configuration | system-config, project-config | light orange border |
---
## LAYOUT NOTES
```
+------------------------------------------------------------------+
| PLUGINS GROUP |
| +----------------+ +----------------+ +-------------------+ |
| | project- | | projman | | projman-pmo | |
| | hygiene | | [gitea-mcp] | | (planned) | |
| +----------------+ +-------+--------+ +-------------------+ |
| | |
| +----------------+ +-------------------+ |
| | claude-config | | cmdb-assistant | |
| | -maintainer | | [netbox-mcp] | |
| +----------------+ +--------+----------+ |
+------------------------------------------------------------------+
|
v
+------------------------------------------------------------------+
| EXTERNAL SERVICES GROUP |
| +-------------------+ +-------------------+ |
| | Gitea | | NetBox | |
| | (Issues + Wiki) | | | |
| +-------------------+ +-------------------+ |
+------------------------------------------------------------------+
CONFIG GROUP (left side): CONFIG GROUP (right side):
+-------------------+ +-------------------+
| System Config | | Project Config |
| ~/.config/claude/ | | .env |
+-------------------+ +-------------------+
```
---
## COLOR LEGEND
| Color | Hex | Meaning |
|-------|-----|---------|
| Blue | #4A90D9 | Plugins |
| Green | #7CB342 | MCP Servers (bundled in plugins) |
| Gray | #9E9E9E | External Systems |
| Orange | #FF9800 | Configuration |
---
## ARCHITECTURE NOTES
- MCP servers are **bundled inside plugins** (not shared at root)
- Gitea provides both issue tracking AND wiki (lessons learned)
- No separate Wiki.js - all wiki functionality uses Gitea Wiki
- Each plugin is self-contained for Claude Code caching

View File

@@ -1,20 +0,0 @@
2026-01-26T14:36:42 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/parse_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-01-26T14:37:38 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/parse_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-01-26T14:37:48 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/parse_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-01-26T14:38:05 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/parse_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-01-26T14:38:55 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/parse_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-01-26T14:39:35 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/parse_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-01-26T14:40:19 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/parse_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-01-26T15:02:30 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/tests/test_parse_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-01-26T15:02:37 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/tests/test_parse_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-01-26T15:03:41 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/tests/test_report_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-02-02T10:56:19 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/validation_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-02-02T10:57:49 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/tests/test_validation_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-02-02T10:58:22 | skills | /home/lmiranda/claude-plugins-work/plugins/contract-validator/skills/mcp-tools-reference.md | README.md
2026-02-02T10:58:38 | skills | /home/lmiranda/claude-plugins-work/plugins/contract-validator/skills/validation-rules.md | README.md
2026-02-02T10:59:13 | .claude-plugin | /home/lmiranda/claude-plugins-work/.claude-plugin/marketplace.json | CLAUDE.md .claude-plugin/marketplace.json
2026-02-02T13:55:33 | skills | /home/lmiranda/claude-plugins-work/plugins/projman/skills/visual-output.md | README.md
2026-02-02T13:55:41 | agents | /home/lmiranda/claude-plugins-work/plugins/projman/agents/planner.md | README.md CLAUDE.md
2026-02-02T13:55:55 | agents | /home/lmiranda/claude-plugins-work/plugins/projman/agents/orchestrator.md | README.md CLAUDE.md
2026-02-02T13:56:14 | agents | /home/lmiranda/claude-plugins-work/plugins/projman/agents/executor.md | README.md CLAUDE.md
2026-02-02T13:56:34 | agents | /home/lmiranda/claude-plugins-work/plugins/projman/agents/code-reviewer.md | README.md CLAUDE.md

View File

@@ -1,3 +0,0 @@
"""Contract Validator MCP Server - Cross-plugin compatibility validation."""
__version__ = "1.0.0"

View File

@@ -1,415 +0,0 @@
"""
Parse tools for extracting interfaces from plugin documentation.
Provides structured extraction of:
- Plugin interfaces from README.md (commands, agents, tools)
- Agent definitions from CLAUDE.md (tool sequences, workflows)
"""
import re
import os
from pathlib import Path
from typing import Optional
from pydantic import BaseModel
class ToolInfo(BaseModel):
"""Information about a single tool"""
name: str
category: Optional[str] = None
description: Optional[str] = None
class CommandInfo(BaseModel):
"""Information about a plugin command"""
name: str
description: Optional[str] = None
class AgentInfo(BaseModel):
"""Information about a plugin agent"""
name: str
description: Optional[str] = None
tools: list[str] = []
class PluginInterface(BaseModel):
"""Structured plugin interface extracted from README"""
plugin_name: str
description: Optional[str] = None
commands: list[CommandInfo] = []
agents: list[AgentInfo] = []
tools: list[ToolInfo] = []
tool_categories: dict[str, list[str]] = {}
features: list[str] = []
class ClaudeMdAgent(BaseModel):
"""Agent definition extracted from CLAUDE.md"""
name: str
personality: Optional[str] = None
responsibilities: list[str] = []
tool_refs: list[str] = []
workflow_steps: list[str] = []
class ParseTools:
"""Tools for parsing plugin documentation"""
async def parse_plugin_interface(self, plugin_path: str) -> dict:
"""
Parse plugin README.md to extract interface declarations.
Args:
plugin_path: Path to plugin directory or README.md file
Returns:
Structured interface with commands, agents, tools, etc.
"""
# Resolve path to README
path = Path(plugin_path)
if path.is_dir():
readme_path = path / "README.md"
else:
readme_path = path
if not readme_path.exists():
return {
"error": f"README.md not found at {readme_path}",
"plugin_path": plugin_path
}
content = readme_path.read_text()
plugin_name = self._extract_plugin_name(content, path)
interface = PluginInterface(
plugin_name=plugin_name,
description=self._extract_description(content),
commands=self._extract_commands(content),
agents=self._extract_agents_from_readme(content),
tools=self._extract_tools(content),
tool_categories=self._extract_tool_categories(content),
features=self._extract_features(content)
)
return interface.model_dump()
async def parse_claude_md_agents(self, claude_md_path: str) -> dict:
"""
Parse CLAUDE.md to extract agent definitions and tool sequences.
Args:
claude_md_path: Path to CLAUDE.md file
Returns:
List of agents with their tool sequences
"""
path = Path(claude_md_path)
if not path.exists():
return {
"error": f"CLAUDE.md not found at {path}",
"claude_md_path": claude_md_path
}
content = path.read_text()
agents = self._extract_agents_from_claude_md(content)
return {
"file": str(path),
"agents": [a.model_dump() for a in agents],
"agent_count": len(agents)
}
def _extract_plugin_name(self, content: str, path: Path) -> str:
"""Extract plugin name from content or path"""
# Try to get from H1 header
match = re.search(r'^#\s+(.+?)(?:\s+Plugin|\s*$)', content, re.MULTILINE)
if match:
name = match.group(1).strip()
# Handle cases like "# data-platform Plugin"
name = re.sub(r'\s*Plugin\s*$', '', name, flags=re.IGNORECASE)
return name
# Fall back to directory name
if path.is_dir():
return path.name
return path.parent.name
def _extract_description(self, content: str) -> Optional[str]:
"""Extract plugin description from first paragraph after title"""
# Get content after H1, before first H2
match = re.search(r'^#\s+.+?\n\n(.+?)(?=\n##|\n\n##|\Z)', content, re.MULTILINE | re.DOTALL)
if match:
desc = match.group(1).strip()
# Take first paragraph only
desc = desc.split('\n\n')[0].strip()
return desc
return None
def _extract_commands(self, content: str) -> list[CommandInfo]:
"""Extract commands from Commands section"""
commands = []
# Find Commands section
commands_section = self._extract_section(content, "Commands")
if not commands_section:
return commands
# Parse table format: | Command | Description |
# Only match actual command names (start with / or alphanumeric)
table_pattern = r'\|\s*`?(/[a-z][-a-z0-9]*)`?\s*\|\s*([^|]+)\s*\|'
for match in re.finditer(table_pattern, commands_section):
cmd_name = match.group(1).strip()
desc = match.group(2).strip()
# Skip header row and separators
if cmd_name.lower() in ('command', 'commands') or cmd_name.startswith('-'):
continue
commands.append(CommandInfo(
name=cmd_name,
description=desc
))
# Also look for ### `/command-name` format (with backticks)
cmd_header_pattern = r'^###\s+`(/[a-z][-a-z0-9]*)`\s*\n(.+?)(?=\n###|\n##|\Z)'
for match in re.finditer(cmd_header_pattern, commands_section, re.MULTILINE | re.DOTALL):
cmd_name = match.group(1).strip()
desc_block = match.group(2).strip()
# Get first line or paragraph as description
desc = desc_block.split('\n')[0].strip()
# Don't duplicate if already found in table
if not any(c.name == cmd_name for c in commands):
commands.append(CommandInfo(name=cmd_name, description=desc))
# Also look for ### /command-name format (without backticks)
cmd_header_pattern2 = r'^###\s+(/[a-z][-a-z0-9]*)\s*\n(.+?)(?=\n###|\n##|\Z)'
for match in re.finditer(cmd_header_pattern2, commands_section, re.MULTILINE | re.DOTALL):
cmd_name = match.group(1).strip()
desc_block = match.group(2).strip()
# Get first line or paragraph as description
desc = desc_block.split('\n')[0].strip()
# Don't duplicate if already found in table
if not any(c.name == cmd_name for c in commands):
commands.append(CommandInfo(name=cmd_name, description=desc))
return commands
def _extract_agents_from_readme(self, content: str) -> list[AgentInfo]:
"""Extract agents from Agents section in README"""
agents = []
# Find Agents section
agents_section = self._extract_section(content, "Agents")
if not agents_section:
return agents
# Parse table format: | Agent | Description |
# Only match actual agent names (alphanumeric with dashes/underscores)
table_pattern = r'\|\s*`?([a-z][-a-z0-9_]*)`?\s*\|\s*([^|]+)\s*\|'
for match in re.finditer(table_pattern, agents_section):
agent_name = match.group(1).strip()
desc = match.group(2).strip()
# Skip header row and separators
if agent_name.lower() in ('agent', 'agents') or agent_name.startswith('-'):
continue
agents.append(AgentInfo(name=agent_name, description=desc))
return agents
def _extract_tools(self, content: str) -> list[ToolInfo]:
"""Extract tool list from Tools Summary or similar section"""
tools = []
# Find Tools Summary section
tools_section = self._extract_section(content, "Tools Summary")
if not tools_section:
tools_section = self._extract_section(content, "Tools")
if not tools_section:
tools_section = self._extract_section(content, "MCP Server Tools")
if not tools_section:
return tools
# Parse category headers: ### category (N tools)
category_pattern = r'###\s*(.+?)\s*(?:\((\d+)\s*tools?\))?\s*\n([^#]+)'
for match in re.finditer(category_pattern, tools_section):
category = match.group(1).strip()
tool_list_text = match.group(3).strip()
# Extract tool names from backtick lists
tool_names = re.findall(r'`([a-z_]+)`', tool_list_text)
for name in tool_names:
tools.append(ToolInfo(name=name, category=category))
# Also look for inline tool lists without categories
inline_pattern = r'`([a-z_]+)`'
all_tool_names = set(t.name for t in tools)
for match in re.finditer(inline_pattern, tools_section):
name = match.group(1)
if name not in all_tool_names:
tools.append(ToolInfo(name=name))
all_tool_names.add(name)
return tools
def _extract_tool_categories(self, content: str) -> dict[str, list[str]]:
"""Extract tool categories with their tool lists"""
categories = {}
tools_section = self._extract_section(content, "Tools Summary")
if not tools_section:
tools_section = self._extract_section(content, "Tools")
if not tools_section:
return categories
# Parse category headers: ### category (N tools)
category_pattern = r'###\s*(.+?)\s*(?:\((\d+)\s*tools?\))?\s*\n([^#]+)'
for match in re.finditer(category_pattern, tools_section):
category = match.group(1).strip()
tool_list_text = match.group(3).strip()
# Extract tool names from backtick lists
tool_names = re.findall(r'`([a-z_]+)`', tool_list_text)
if tool_names:
categories[category] = tool_names
return categories
def _extract_features(self, content: str) -> list[str]:
"""Extract features from Features section"""
features = []
features_section = self._extract_section(content, "Features")
if not features_section:
return features
# Parse bullet points
bullet_pattern = r'^[-*]\s+\*\*(.+?)\*\*'
for match in re.finditer(bullet_pattern, features_section, re.MULTILINE):
features.append(match.group(1).strip())
return features
def _extract_section(self, content: str, section_name: str) -> Optional[str]:
"""Extract content of a markdown section by header name"""
# Match ## Section Name - include all content until next ## (same level or higher)
pattern = rf'^##\s+{re.escape(section_name)}(?:\s*\([^)]*\))?\s*\n(.*?)(?=\n##[^#]|\Z)'
match = re.search(pattern, content, re.MULTILINE | re.DOTALL | re.IGNORECASE)
if match:
return match.group(1).strip()
# Try ### level - include content until next ## or ###
pattern = rf'^###\s+{re.escape(section_name)}(?:\s*\([^)]*\))?\s*\n(.*?)(?=\n##|\n###[^#]|\Z)'
match = re.search(pattern, content, re.MULTILINE | re.DOTALL | re.IGNORECASE)
if match:
return match.group(1).strip()
return None
def _extract_agents_from_claude_md(self, content: str) -> list[ClaudeMdAgent]:
"""Extract agent definitions from CLAUDE.md"""
agents = []
# Look for Four-Agent Model section specifically
# Match section headers like "### Four-Agent Model (projman)" or "## Four-Agent Model"
agent_model_match = re.search(
r'^##[#]?\s+Four-Agent Model.*?\n(.*?)(?=\n##[^#]|\Z)',
content, re.MULTILINE | re.DOTALL
)
agent_model_section = agent_model_match.group(1) if agent_model_match else None
if agent_model_section:
# Parse agent table within this section
# | **Planner** | Thoughtful, methodical | Sprint planning, ... |
# Match rows where first cell starts with ** (bold) and contains a capitalized word
agent_table_pattern = r'\|\s*\*\*([A-Z][a-zA-Z\s]+?)\*\*\s*\|\s*([^|]+)\s*\|\s*([^|]+)\s*\|'
for match in re.finditer(agent_table_pattern, agent_model_section):
agent_name = match.group(1).strip()
personality = match.group(2).strip()
responsibilities = match.group(3).strip()
# Skip header rows and separator rows
if agent_name.lower() in ('agent', 'agents', '---', '-', ''):
continue
if 'personality' in personality.lower() or '---' in personality:
continue
# Skip if personality looks like tool names (contains backticks)
if '`' in personality:
continue
# Extract tool references from responsibilities
tool_refs = re.findall(r'`([a-z_]+)`', responsibilities)
# Split responsibilities by comma
resp_list = [r.strip() for r in responsibilities.split(',')]
agents.append(ClaudeMdAgent(
name=agent_name,
personality=personality,
responsibilities=resp_list,
tool_refs=tool_refs
))
# Also look for agents table in ## Agents section
agents_section = self._extract_section(content, "Agents")
if agents_section:
# Parse table: | Agent | Description |
table_pattern = r'\|\s*`?([a-z][-a-z0-9_]+)`?\s*\|\s*([^|]+)\s*\|'
for match in re.finditer(table_pattern, agents_section):
agent_name = match.group(1).strip()
desc = match.group(2).strip()
# Skip header rows
if agent_name.lower() in ('agent', 'agents', '---', '-'):
continue
# Check if agent already exists
if not any(a.name.lower() == agent_name.lower() for a in agents):
agents.append(ClaudeMdAgent(
name=agent_name,
responsibilities=[desc] if desc else []
))
# Look for workflow sections to enrich agent data
workflow_section = self._extract_section(content, "Workflow")
if workflow_section:
# Parse numbered steps
step_pattern = r'^\d+\.\s+(.+?)$'
workflow_steps = re.findall(step_pattern, workflow_section, re.MULTILINE)
# Associate workflow steps with agents mentioned
for agent in agents:
for step in workflow_steps:
if agent.name.lower() in step.lower():
agent.workflow_steps.append(step)
# Extract any tool references in the step
step_tools = re.findall(r'`([a-z_]+)`', step)
agent.tool_refs.extend(t for t in step_tools if t not in agent.tool_refs)
# Look for agent-specific sections (### Planner Agent)
agent_section_pattern = r'^###?\s+([A-Z][a-z]+(?:\s+[A-Z][a-z]+)?)\s+Agent\s*\n(.*?)(?=\n##|\n###|\Z)'
for match in re.finditer(agent_section_pattern, content, re.MULTILINE | re.DOTALL):
agent_name = match.group(1).strip()
section_content = match.group(2).strip()
# Check if agent already exists
existing = next((a for a in agents if a.name.lower() == agent_name.lower()), None)
if existing:
# Add tool refs from this section
tool_refs = re.findall(r'`([a-z_]+)`', section_content)
existing.tool_refs.extend(t for t in tool_refs if t not in existing.tool_refs)
else:
tool_refs = re.findall(r'`([a-z_]+)`', section_content)
agents.append(ClaudeMdAgent(
name=agent_name,
tool_refs=tool_refs
))
return agents

View File

@@ -1,337 +0,0 @@
"""
Report tools for generating compatibility reports and listing issues.
Provides:
- generate_compatibility_report: Full marketplace validation report
- list_issues: Filtered issue listing
"""
import os
from pathlib import Path
from datetime import datetime
from typing import Optional
from pydantic import BaseModel
from .parse_tools import ParseTools
from .validation_tools import ValidationTools, IssueSeverity, IssueType, ValidationIssue
class ReportSummary(BaseModel):
"""Summary statistics for a report"""
total_plugins: int = 0
total_commands: int = 0
total_agents: int = 0
total_tools: int = 0
total_issues: int = 0
errors: int = 0
warnings: int = 0
info: int = 0
class ReportTools:
"""Tools for generating reports and listing issues"""
def __init__(self):
self.parse_tools = ParseTools()
self.validation_tools = ValidationTools()
async def generate_compatibility_report(
self,
marketplace_path: str,
format: str = "markdown"
) -> dict:
"""
Generate a comprehensive compatibility report for all plugins.
Args:
marketplace_path: Path to marketplace root directory
format: Output format ("markdown" or "json")
Returns:
Full compatibility report with all findings
"""
marketplace = Path(marketplace_path)
plugins_dir = marketplace / "plugins"
if not plugins_dir.exists():
return {
"error": f"Plugins directory not found at {plugins_dir}",
"marketplace_path": marketplace_path
}
# Discover all plugins
plugins = []
for item in plugins_dir.iterdir():
if item.is_dir() and (item / ".claude-plugin").exists():
plugins.append(item)
if not plugins:
return {
"error": "No plugins found in marketplace",
"marketplace_path": marketplace_path
}
# Parse all plugin interfaces
interfaces = {}
all_issues = []
summary = ReportSummary(total_plugins=len(plugins))
for plugin_path in plugins:
interface = await self.parse_tools.parse_plugin_interface(str(plugin_path))
if "error" not in interface:
interfaces[interface["plugin_name"]] = interface
summary.total_commands += len(interface.get("commands", []))
summary.total_agents += len(interface.get("agents", []))
summary.total_tools += len(interface.get("tools", []))
# Run pairwise compatibility checks
plugin_names = list(interfaces.keys())
compatibility_results = []
for i, name_a in enumerate(plugin_names):
for name_b in plugin_names[i+1:]:
path_a = plugins_dir / self._find_plugin_dir(plugins_dir, name_a)
path_b = plugins_dir / self._find_plugin_dir(plugins_dir, name_b)
result = await self.validation_tools.validate_compatibility(
str(path_a), str(path_b)
)
if "error" not in result:
compatibility_results.append(result)
all_issues.extend(result.get("issues", []))
# Parse CLAUDE.md if exists
claude_md = marketplace / "CLAUDE.md"
agents_from_claude = []
if claude_md.exists():
agents_result = await self.parse_tools.parse_claude_md_agents(str(claude_md))
if "error" not in agents_result:
agents_from_claude = agents_result.get("agents", [])
# Validate each agent
for agent in agents_from_claude:
agent_result = await self.validation_tools.validate_agent_refs(
agent["name"],
str(claude_md),
[str(p) for p in plugins]
)
if "error" not in agent_result:
all_issues.extend(agent_result.get("issues", []))
# Count issues by severity
for issue in all_issues:
severity = issue.get("severity", "info")
if isinstance(severity, str):
severity_str = severity.lower()
else:
severity_str = severity.value if hasattr(severity, 'value') else str(severity).lower()
if "error" in severity_str:
summary.errors += 1
elif "warning" in severity_str:
summary.warnings += 1
else:
summary.info += 1
summary.total_issues = len(all_issues)
# Generate report
if format == "json":
return {
"generated_at": datetime.now().isoformat(),
"marketplace_path": marketplace_path,
"summary": summary.model_dump(),
"plugins": interfaces,
"compatibility_checks": compatibility_results,
"claude_md_agents": agents_from_claude,
"all_issues": all_issues
}
else:
# Generate markdown report
report = self._generate_markdown_report(
marketplace_path,
summary,
interfaces,
compatibility_results,
agents_from_claude,
all_issues
)
return {
"generated_at": datetime.now().isoformat(),
"marketplace_path": marketplace_path,
"summary": summary.model_dump(),
"report": report
}
def _find_plugin_dir(self, plugins_dir: Path, plugin_name: str) -> str:
"""Find plugin directory by name (handles naming variations)"""
# Try exact match first
for item in plugins_dir.iterdir():
if item.is_dir():
if item.name.lower() == plugin_name.lower():
return item.name
# Check plugin.json for name
plugin_json = item / ".claude-plugin" / "plugin.json"
if plugin_json.exists():
import json
try:
data = json.loads(plugin_json.read_text())
if data.get("name", "").lower() == plugin_name.lower():
return item.name
except:
pass
return plugin_name
def _generate_markdown_report(
self,
marketplace_path: str,
summary: ReportSummary,
interfaces: dict,
compatibility_results: list,
agents: list,
issues: list
) -> str:
"""Generate markdown formatted report"""
lines = [
"# Contract Validation Report",
"",
f"**Generated:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
f"**Marketplace:** `{marketplace_path}`",
"",
"## Summary",
"",
f"| Metric | Count |",
f"|--------|-------|",
f"| Plugins | {summary.total_plugins} |",
f"| Commands | {summary.total_commands} |",
f"| Agents | {summary.total_agents} |",
f"| Tools | {summary.total_tools} |",
f"| **Issues** | **{summary.total_issues}** |",
f"| - Errors | {summary.errors} |",
f"| - Warnings | {summary.warnings} |",
f"| - Info | {summary.info} |",
"",
]
# Plugin details
lines.extend([
"## Plugins",
"",
])
for name, interface in interfaces.items():
cmds = len(interface.get("commands", []))
agents_count = len(interface.get("agents", []))
tools = len(interface.get("tools", []))
lines.append(f"### {name}")
lines.append("")
lines.append(f"- Commands: {cmds}")
lines.append(f"- Agents: {agents_count}")
lines.append(f"- Tools: {tools}")
lines.append("")
# Compatibility results
if compatibility_results:
lines.extend([
"## Compatibility Checks",
"",
])
for result in compatibility_results:
status = "" if result.get("compatible", True) else ""
lines.append(f"### {result['plugin_a']}{result['plugin_b']} {status}")
lines.append("")
if result.get("shared_tools"):
lines.append(f"- Shared tools: `{', '.join(result['shared_tools'])}`")
if result.get("issues"):
for issue in result["issues"]:
sev = issue.get("severity", "info")
if hasattr(sev, 'value'):
sev = sev.value
lines.append(f"- [{sev.upper()}] {issue['message']}")
lines.append("")
# Issues section
if issues:
lines.extend([
"## All Issues",
"",
"| Severity | Type | Message |",
"|----------|------|---------|",
])
for issue in issues:
sev = issue.get("severity", "info")
itype = issue.get("issue_type", "unknown")
msg = issue.get("message", "")
if hasattr(sev, 'value'):
sev = sev.value
if hasattr(itype, 'value'):
itype = itype.value
# Truncate message for table
msg_short = msg[:60] + "..." if len(msg) > 60 else msg
lines.append(f"| {sev} | {itype} | {msg_short} |")
lines.append("")
return "\n".join(lines)
async def list_issues(
self,
marketplace_path: str,
severity: str = "all",
issue_type: str = "all"
) -> dict:
"""
List validation issues with optional filtering.
Args:
marketplace_path: Path to marketplace root directory
severity: Filter by severity ("error", "warning", "info", "all")
issue_type: Filter by type ("missing_tool", "interface_mismatch", etc., "all")
Returns:
Filtered list of issues
"""
# Generate full report first
report = await self.generate_compatibility_report(marketplace_path, format="json")
if "error" in report:
return report
all_issues = report.get("all_issues", [])
# Filter by severity
if severity != "all":
filtered = []
for issue in all_issues:
issue_sev = issue.get("severity", "info")
if hasattr(issue_sev, 'value'):
issue_sev = issue_sev.value
if isinstance(issue_sev, str) and severity.lower() in issue_sev.lower():
filtered.append(issue)
all_issues = filtered
# Filter by type
if issue_type != "all":
filtered = []
for issue in all_issues:
itype = issue.get("issue_type", "unknown")
if hasattr(itype, 'value'):
itype = itype.value
if isinstance(itype, str) and issue_type.lower() in itype.lower():
filtered.append(issue)
all_issues = filtered
return {
"marketplace_path": marketplace_path,
"filters": {
"severity": severity,
"issue_type": issue_type
},
"total_issues": len(all_issues),
"issues": all_issues
}

View File

@@ -1,309 +0,0 @@
"""
MCP Server entry point for Contract Validator.
Provides cross-plugin compatibility validation and Claude.md agent verification
tools to Claude Code via JSON-RPC 2.0 over stdio.
"""
import asyncio
import logging
import json
from mcp.server import Server
from mcp.server.stdio import stdio_server
from mcp.types import Tool, TextContent
from .parse_tools import ParseTools
from .validation_tools import ValidationTools
from .report_tools import ReportTools
# Suppress noisy MCP validation warnings on stderr
logging.basicConfig(level=logging.INFO)
logging.getLogger("root").setLevel(logging.ERROR)
logging.getLogger("mcp").setLevel(logging.ERROR)
logger = logging.getLogger(__name__)
class ContractValidatorMCPServer:
"""MCP Server for cross-plugin compatibility validation"""
def __init__(self):
self.server = Server("contract-validator-mcp")
self.parse_tools = ParseTools()
self.validation_tools = ValidationTools()
self.report_tools = ReportTools()
async def initialize(self):
"""Initialize server."""
logger.info("Contract Validator MCP Server initialized")
def setup_tools(self):
"""Register all available tools with the MCP server"""
@self.server.list_tools()
async def list_tools() -> list[Tool]:
"""Return list of available tools"""
tools = [
# Parse tools (to be implemented in #186)
Tool(
name="parse_plugin_interface",
description="Parse plugin README.md to extract interface declarations (inputs, outputs, tools)",
inputSchema={
"type": "object",
"properties": {
"plugin_path": {
"type": "string",
"description": "Path to plugin directory or README.md"
}
},
"required": ["plugin_path"]
}
),
Tool(
name="parse_claude_md_agents",
description="Parse Claude.md to extract agent definitions and their tool sequences",
inputSchema={
"type": "object",
"properties": {
"claude_md_path": {
"type": "string",
"description": "Path to CLAUDE.md file"
}
},
"required": ["claude_md_path"]
}
),
# Validation tools (to be implemented in #187)
Tool(
name="validate_compatibility",
description="Validate compatibility between two plugin interfaces",
inputSchema={
"type": "object",
"properties": {
"plugin_a": {
"type": "string",
"description": "Path to first plugin"
},
"plugin_b": {
"type": "string",
"description": "Path to second plugin"
}
},
"required": ["plugin_a", "plugin_b"]
}
),
Tool(
name="validate_agent_refs",
description="Validate that all tool references in an agent definition exist",
inputSchema={
"type": "object",
"properties": {
"agent_name": {
"type": "string",
"description": "Name of agent to validate"
},
"claude_md_path": {
"type": "string",
"description": "Path to CLAUDE.md containing agent"
},
"plugin_paths": {
"type": "array",
"items": {"type": "string"},
"description": "Paths to available plugins"
}
},
"required": ["agent_name", "claude_md_path"]
}
),
Tool(
name="validate_data_flow",
description="Validate data flow through an agent's tool sequence",
inputSchema={
"type": "object",
"properties": {
"agent_name": {
"type": "string",
"description": "Name of agent to validate"
},
"claude_md_path": {
"type": "string",
"description": "Path to CLAUDE.md containing agent"
}
},
"required": ["agent_name", "claude_md_path"]
}
),
Tool(
name="validate_workflow_integration",
description="Validate that a domain plugin exposes the required advisory interfaces (gate command, review command, advisory agent) expected by projman's domain-consultation skill. Also checks gate contract version compatibility.",
inputSchema={
"type": "object",
"properties": {
"plugin_path": {
"type": "string",
"description": "Path to the domain plugin directory"
},
"domain_label": {
"type": "string",
"description": "The Domain/* label it claims to handle, e.g. Domain/Viz"
},
"expected_contract": {
"type": "string",
"description": "Expected contract version (e.g., 'v1'). If provided, validates the gate command's contract matches."
}
},
"required": ["plugin_path", "domain_label"]
}
),
# Report tools (to be implemented in #188)
Tool(
name="generate_compatibility_report",
description="Generate a comprehensive compatibility report for all plugins",
inputSchema={
"type": "object",
"properties": {
"marketplace_path": {
"type": "string",
"description": "Path to marketplace root directory"
},
"format": {
"type": "string",
"enum": ["markdown", "json"],
"default": "markdown",
"description": "Output format"
}
},
"required": ["marketplace_path"]
}
),
Tool(
name="list_issues",
description="List validation issues with optional filtering",
inputSchema={
"type": "object",
"properties": {
"marketplace_path": {
"type": "string",
"description": "Path to marketplace root directory"
},
"severity": {
"type": "string",
"enum": ["error", "warning", "info", "all"],
"default": "all",
"description": "Filter by severity"
},
"issue_type": {
"type": "string",
"enum": ["missing_tool", "interface_mismatch", "optional_dependency", "undeclared_output", "all"],
"default": "all",
"description": "Filter by issue type"
}
},
"required": ["marketplace_path"]
}
)
]
return tools
@self.server.call_tool()
async def call_tool(name: str, arguments: dict) -> list[TextContent]:
"""Handle tool invocation."""
try:
# All tools return placeholder responses for now
# Actual implementation will be added in issues #186, #187, #188
if name == "parse_plugin_interface":
result = await self._parse_plugin_interface(**arguments)
elif name == "parse_claude_md_agents":
result = await self._parse_claude_md_agents(**arguments)
elif name == "validate_compatibility":
result = await self._validate_compatibility(**arguments)
elif name == "validate_agent_refs":
result = await self._validate_agent_refs(**arguments)
elif name == "validate_data_flow":
result = await self._validate_data_flow(**arguments)
elif name == "validate_workflow_integration":
result = await self._validate_workflow_integration(**arguments)
elif name == "generate_compatibility_report":
result = await self._generate_compatibility_report(**arguments)
elif name == "list_issues":
result = await self._list_issues(**arguments)
else:
raise ValueError(f"Unknown tool: {name}")
return [TextContent(
type="text",
text=json.dumps(result, indent=2, default=str)
)]
except Exception as e:
logger.error(f"Tool {name} failed: {e}")
return [TextContent(
type="text",
text=json.dumps({"error": str(e)}, indent=2)
)]
# Parse tool implementations (Issue #186)
async def _parse_plugin_interface(self, plugin_path: str) -> dict:
"""Parse plugin interface from README.md"""
return await self.parse_tools.parse_plugin_interface(plugin_path)
async def _parse_claude_md_agents(self, claude_md_path: str) -> dict:
"""Parse agents from CLAUDE.md"""
return await self.parse_tools.parse_claude_md_agents(claude_md_path)
# Validation tool implementations (Issue #187)
async def _validate_compatibility(self, plugin_a: str, plugin_b: str) -> dict:
"""Validate compatibility between plugins"""
return await self.validation_tools.validate_compatibility(plugin_a, plugin_b)
async def _validate_agent_refs(self, agent_name: str, claude_md_path: str, plugin_paths: list = None) -> dict:
"""Validate agent tool references"""
return await self.validation_tools.validate_agent_refs(agent_name, claude_md_path, plugin_paths)
async def _validate_data_flow(self, agent_name: str, claude_md_path: str) -> dict:
"""Validate agent data flow"""
return await self.validation_tools.validate_data_flow(agent_name, claude_md_path)
async def _validate_workflow_integration(
self,
plugin_path: str,
domain_label: str,
expected_contract: str = None
) -> dict:
"""Validate domain plugin exposes required advisory interfaces"""
return await self.validation_tools.validate_workflow_integration(
plugin_path, domain_label, expected_contract
)
# Report tool implementations (Issue #188)
async def _generate_compatibility_report(self, marketplace_path: str, format: str = "markdown") -> dict:
"""Generate comprehensive compatibility report"""
return await self.report_tools.generate_compatibility_report(marketplace_path, format)
async def _list_issues(self, marketplace_path: str, severity: str = "all", issue_type: str = "all") -> dict:
"""List validation issues with filtering"""
return await self.report_tools.list_issues(marketplace_path, severity, issue_type)
async def run(self):
"""Run the MCP server"""
await self.initialize()
self.setup_tools()
async with stdio_server() as (read_stream, write_stream):
await self.server.run(
read_stream,
write_stream,
self.server.create_initialization_options()
)
async def main():
"""Main entry point"""
server = ContractValidatorMCPServer()
await server.run()
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -1,493 +0,0 @@
"""
Validation tools for checking cross-plugin compatibility and agent references.
Provides:
- validate_compatibility: Compare two plugin interfaces
- validate_agent_refs: Check agent tool references exist
- validate_data_flow: Verify data flow through agent sequences
"""
from pathlib import Path
from typing import Optional
from pydantic import BaseModel
from enum import Enum
from .parse_tools import ParseTools, PluginInterface, ClaudeMdAgent
class IssueSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
INFO = "info"
class IssueType(str, Enum):
MISSING_TOOL = "missing_tool"
INTERFACE_MISMATCH = "interface_mismatch"
OPTIONAL_DEPENDENCY = "optional_dependency"
UNDECLARED_OUTPUT = "undeclared_output"
INVALID_SEQUENCE = "invalid_sequence"
MISSING_INTEGRATION = "missing_integration"
class ValidationIssue(BaseModel):
"""A single validation issue"""
severity: IssueSeverity
issue_type: IssueType
message: str
location: Optional[str] = None
suggestion: Optional[str] = None
class CompatibilityResult(BaseModel):
"""Result of compatibility check between two plugins"""
plugin_a: str
plugin_b: str
compatible: bool
shared_tools: list[str] = []
a_only_tools: list[str] = []
b_only_tools: list[str] = []
issues: list[ValidationIssue] = []
class AgentValidationResult(BaseModel):
"""Result of agent reference validation"""
agent_name: str
valid: bool
tool_refs_found: list[str] = []
tool_refs_missing: list[str] = []
issues: list[ValidationIssue] = []
class DataFlowResult(BaseModel):
"""Result of data flow validation"""
agent_name: str
valid: bool
flow_steps: list[str] = []
issues: list[ValidationIssue] = []
class WorkflowIntegrationResult(BaseModel):
"""Result of workflow integration validation for domain plugins"""
plugin_name: str
domain_label: str
valid: bool
gate_command_found: bool
gate_contract: Optional[str] = None # Contract version declared by gate command
review_command_found: bool
advisory_agent_found: bool
issues: list[ValidationIssue] = []
class ValidationTools:
"""Tools for validating plugin compatibility and agent references"""
def __init__(self):
self.parse_tools = ParseTools()
async def validate_compatibility(self, plugin_a: str, plugin_b: str) -> dict:
"""
Validate compatibility between two plugin interfaces.
Compares tools, commands, and agents to identify overlaps and gaps.
Args:
plugin_a: Path to first plugin directory
plugin_b: Path to second plugin directory
Returns:
Compatibility report with shared tools, unique tools, and issues
"""
# Parse both plugins
interface_a = await self.parse_tools.parse_plugin_interface(plugin_a)
interface_b = await self.parse_tools.parse_plugin_interface(plugin_b)
# Check for parse errors
if "error" in interface_a:
return {
"error": f"Failed to parse plugin A: {interface_a['error']}",
"plugin_a": plugin_a,
"plugin_b": plugin_b
}
if "error" in interface_b:
return {
"error": f"Failed to parse plugin B: {interface_b['error']}",
"plugin_a": plugin_a,
"plugin_b": plugin_b
}
# Extract tool names
tools_a = set(t["name"] for t in interface_a.get("tools", []))
tools_b = set(t["name"] for t in interface_b.get("tools", []))
# Find overlaps and differences
shared = tools_a & tools_b
a_only = tools_a - tools_b
b_only = tools_b - tools_a
issues = []
# Check for potential naming conflicts
if shared:
issues.append(ValidationIssue(
severity=IssueSeverity.WARNING,
issue_type=IssueType.INTERFACE_MISMATCH,
message=f"Both plugins define tools with same names: {list(shared)}",
location=f"{interface_a['plugin_name']} and {interface_b['plugin_name']}",
suggestion="Ensure tools with same names have compatible interfaces"
))
# Check command overlaps
cmds_a = set(c["name"] for c in interface_a.get("commands", []))
cmds_b = set(c["name"] for c in interface_b.get("commands", []))
shared_cmds = cmds_a & cmds_b
if shared_cmds:
issues.append(ValidationIssue(
severity=IssueSeverity.ERROR,
issue_type=IssueType.INTERFACE_MISMATCH,
message=f"Command name conflict: {list(shared_cmds)}",
location=f"{interface_a['plugin_name']} and {interface_b['plugin_name']}",
suggestion="Rename conflicting commands to avoid ambiguity"
))
result = CompatibilityResult(
plugin_a=interface_a["plugin_name"],
plugin_b=interface_b["plugin_name"],
compatible=len([i for i in issues if i.severity == IssueSeverity.ERROR]) == 0,
shared_tools=list(shared),
a_only_tools=list(a_only),
b_only_tools=list(b_only),
issues=issues
)
return result.model_dump()
async def validate_agent_refs(
self,
agent_name: str,
claude_md_path: str,
plugin_paths: list[str] = None
) -> dict:
"""
Validate that all tool references in an agent definition exist.
Args:
agent_name: Name of the agent to validate
claude_md_path: Path to CLAUDE.md containing the agent
plugin_paths: Optional list of plugin paths to check for tools
Returns:
Validation result with found/missing tools and issues
"""
# Parse CLAUDE.md for agents
agents_result = await self.parse_tools.parse_claude_md_agents(claude_md_path)
if "error" in agents_result:
return {
"error": agents_result["error"],
"agent_name": agent_name
}
# Find the specific agent
agent = None
for a in agents_result.get("agents", []):
if a["name"].lower() == agent_name.lower():
agent = a
break
if not agent:
return {
"error": f"Agent '{agent_name}' not found in {claude_md_path}",
"agent_name": agent_name,
"available_agents": [a["name"] for a in agents_result.get("agents", [])]
}
# Collect all available tools from plugins
available_tools = set()
if plugin_paths:
for plugin_path in plugin_paths:
interface = await self.parse_tools.parse_plugin_interface(plugin_path)
if "error" not in interface:
for tool in interface.get("tools", []):
available_tools.add(tool["name"])
# Check agent tool references
tool_refs = set(agent.get("tool_refs", []))
found = tool_refs & available_tools if available_tools else tool_refs
missing = tool_refs - available_tools if available_tools else set()
issues = []
# Report missing tools
for tool in missing:
issues.append(ValidationIssue(
severity=IssueSeverity.ERROR,
issue_type=IssueType.MISSING_TOOL,
message=f"Agent '{agent_name}' references tool '{tool}' which is not found",
location=claude_md_path,
suggestion=f"Check if tool '{tool}' exists or fix the reference"
))
# Check if agent has no tool refs (might be incomplete)
if not tool_refs:
issues.append(ValidationIssue(
severity=IssueSeverity.INFO,
issue_type=IssueType.UNDECLARED_OUTPUT,
message=f"Agent '{agent_name}' has no documented tool references",
location=claude_md_path,
suggestion="Consider documenting which tools this agent uses"
))
result = AgentValidationResult(
agent_name=agent_name,
valid=len([i for i in issues if i.severity == IssueSeverity.ERROR]) == 0,
tool_refs_found=list(found),
tool_refs_missing=list(missing),
issues=issues
)
return result.model_dump()
async def validate_data_flow(self, agent_name: str, claude_md_path: str) -> dict:
"""
Validate data flow through an agent's tool sequence.
Checks that each step's expected output can be used by the next step.
Args:
agent_name: Name of the agent to validate
claude_md_path: Path to CLAUDE.md containing the agent
Returns:
Data flow validation result with steps and issues
"""
# Parse CLAUDE.md for agents
agents_result = await self.parse_tools.parse_claude_md_agents(claude_md_path)
if "error" in agents_result:
return {
"error": agents_result["error"],
"agent_name": agent_name
}
# Find the specific agent
agent = None
for a in agents_result.get("agents", []):
if a["name"].lower() == agent_name.lower():
agent = a
break
if not agent:
return {
"error": f"Agent '{agent_name}' not found in {claude_md_path}",
"agent_name": agent_name,
"available_agents": [a["name"] for a in agents_result.get("agents", [])]
}
issues = []
flow_steps = []
# Extract workflow steps
workflow_steps = agent.get("workflow_steps", [])
responsibilities = agent.get("responsibilities", [])
# Build flow from workflow steps or responsibilities
steps = workflow_steps if workflow_steps else responsibilities
for i, step in enumerate(steps):
flow_steps.append(f"Step {i+1}: {step}")
# Check for data flow patterns
tool_refs = agent.get("tool_refs", [])
# Known data flow patterns
# e.g., data-platform produces data_ref, viz-platform consumes it
known_producers = {
"read_csv": "data_ref",
"read_parquet": "data_ref",
"pg_query": "data_ref",
"filter": "data_ref",
"groupby": "data_ref",
}
known_consumers = {
"describe": "data_ref",
"head": "data_ref",
"tail": "data_ref",
"to_csv": "data_ref",
"to_parquet": "data_ref",
}
# Check if agent uses tools that require data_ref
has_producer = any(t in known_producers for t in tool_refs)
has_consumer = any(t in known_consumers for t in tool_refs)
if has_consumer and not has_producer:
issues.append(ValidationIssue(
severity=IssueSeverity.WARNING,
issue_type=IssueType.INTERFACE_MISMATCH,
message=f"Agent '{agent_name}' uses tools that consume data_ref but no producer found",
location=claude_md_path,
suggestion="Ensure a data loading tool (read_csv, pg_query, etc.) is used before data consumers"
))
# Check for empty workflow
if not steps and not tool_refs:
issues.append(ValidationIssue(
severity=IssueSeverity.INFO,
issue_type=IssueType.UNDECLARED_OUTPUT,
message=f"Agent '{agent_name}' has no documented workflow or tool sequence",
location=claude_md_path,
suggestion="Consider documenting the agent's workflow steps"
))
result = DataFlowResult(
agent_name=agent_name,
valid=len([i for i in issues if i.severity == IssueSeverity.ERROR]) == 0,
flow_steps=flow_steps,
issues=issues
)
return result.model_dump()
async def validate_workflow_integration(
self,
plugin_path: str,
domain_label: str,
expected_contract: Optional[str] = None
) -> dict:
"""
Validate that a domain plugin exposes required advisory interfaces.
Checks for:
- Gate command (e.g., /design-gate, /data-gate) - REQUIRED
- Gate contract version (gate_contract in frontmatter) - INFO if missing
- Review command (e.g., /design-review, /data-review) - recommended
- Advisory agent referencing the domain label - recommended
Args:
plugin_path: Path to the domain plugin directory
domain_label: The Domain/* label it claims to handle (e.g., Domain/Viz)
expected_contract: Expected contract version (e.g., 'v1'). If provided,
validates the gate command's contract matches.
Returns:
Validation result with found interfaces and issues
"""
import re
plugin_path_obj = Path(plugin_path)
issues = []
# Extract plugin name from path
plugin_name = plugin_path_obj.name
if not plugin_path_obj.exists():
return {
"error": f"Plugin directory not found: {plugin_path}",
"plugin_path": plugin_path,
"domain_label": domain_label
}
# Extract domain short name from label (e.g., "Domain/Viz" -> "viz", "Domain/Data" -> "data")
domain_short = domain_label.split("/")[-1].lower() if "/" in domain_label else domain_label.lower()
# Check for gate command
commands_dir = plugin_path_obj / "commands"
gate_command_found = False
gate_contract = None
gate_patterns = ["pass", "fail", "PASS", "FAIL", "Binary pass/fail", "gate"]
if commands_dir.exists():
for cmd_file in commands_dir.glob("*.md"):
if "gate" in cmd_file.name.lower():
# Verify it's actually a gate command by checking content
content = cmd_file.read_text()
if any(pattern in content for pattern in gate_patterns):
gate_command_found = True
# Parse frontmatter for gate_contract
frontmatter_match = re.match(r'^---\n(.*?)\n---', content, re.DOTALL)
if frontmatter_match:
frontmatter = frontmatter_match.group(1)
contract_match = re.search(r'gate_contract:\s*(\S+)', frontmatter)
if contract_match:
gate_contract = contract_match.group(1)
break
if not gate_command_found:
issues.append(ValidationIssue(
severity=IssueSeverity.ERROR,
issue_type=IssueType.MISSING_INTEGRATION,
message=f"Plugin '{plugin_name}' lacks a gate command for domain '{domain_label}'",
location=str(commands_dir),
suggestion=f"Create commands/{domain_short}-gate.md with binary PASS/FAIL output"
))
# Check for review command
review_command_found = False
if commands_dir.exists():
for cmd_file in commands_dir.glob("*.md"):
if "review" in cmd_file.name.lower() and "gate" not in cmd_file.name.lower():
review_command_found = True
break
if not review_command_found:
issues.append(ValidationIssue(
severity=IssueSeverity.WARNING,
issue_type=IssueType.MISSING_INTEGRATION,
message=f"Plugin '{plugin_name}' lacks a review command for domain '{domain_label}'",
location=str(commands_dir),
suggestion=f"Create commands/{domain_short}-review.md for detailed audits"
))
# Check for advisory agent
agents_dir = plugin_path_obj / "agents"
advisory_agent_found = False
if agents_dir.exists():
for agent_file in agents_dir.glob("*.md"):
content = agent_file.read_text()
# Check if agent references the domain label or gate command
if domain_label in content or f"{domain_short}-gate" in content.lower() or "advisor" in agent_file.name.lower() or "reviewer" in agent_file.name.lower():
advisory_agent_found = True
break
if not advisory_agent_found:
issues.append(ValidationIssue(
severity=IssueSeverity.WARNING,
issue_type=IssueType.MISSING_INTEGRATION,
message=f"Plugin '{plugin_name}' lacks an advisory agent for domain '{domain_label}'",
location=str(agents_dir) if agents_dir.exists() else str(plugin_path_obj),
suggestion=f"Create agents/{domain_short}-advisor.md referencing '{domain_label}'"
))
# Check gate contract version
if gate_command_found:
if not gate_contract:
issues.append(ValidationIssue(
severity=IssueSeverity.INFO,
issue_type=IssueType.MISSING_INTEGRATION,
message=f"Gate command does not declare a contract version",
location=str(commands_dir),
suggestion="Consider adding `gate_contract: v1` to frontmatter for version tracking"
))
elif expected_contract and gate_contract != expected_contract:
issues.append(ValidationIssue(
severity=IssueSeverity.WARNING,
issue_type=IssueType.INTERFACE_MISMATCH,
message=f"Contract version mismatch: gate declares {gate_contract}, projman expects {expected_contract}",
location=str(commands_dir),
suggestion=f"Update domain-consultation.md Gate Command Reference table to {gate_contract}, or update gate command to {expected_contract}"
))
result = WorkflowIntegrationResult(
plugin_name=plugin_name,
domain_label=domain_label,
valid=gate_command_found, # Only gate is required for validity
gate_command_found=gate_command_found,
gate_contract=gate_contract,
review_command_found=review_command_found,
advisory_agent_found=advisory_agent_found,
issues=issues
)
return result.model_dump()

View File

@@ -1,41 +0,0 @@
[build-system]
requires = ["setuptools>=61.0", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "contract-validator-mcp"
version = "1.0.0"
description = "MCP Server for cross-plugin compatibility validation and agent verification"
readme = "README.md"
license = {text = "MIT"}
requires-python = ">=3.10"
authors = [
{name = "Leo Miranda"}
]
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
]
dependencies = [
"mcp>=0.9.0",
"pydantic>=2.5.0",
]
[project.optional-dependencies]
dev = [
"pytest>=7.4.3",
"pytest-asyncio>=0.23.0",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["mcp_server*"]
[tool.pytest.ini_options]
asyncio_mode = "auto"
testpaths = ["tests"]

View File

@@ -1,9 +0,0 @@
# MCP SDK
mcp>=0.9.0
# Utilities
pydantic>=2.5.0
# Testing
pytest>=7.4.3
pytest-asyncio>=0.23.0

View File

@@ -1,21 +0,0 @@
#!/bin/bash
# Capture original working directory before any cd operations
# This should be the user's project directory when launched by Claude Code
export CLAUDE_PROJECT_DIR="${CLAUDE_PROJECT_DIR:-$PWD}"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
CACHE_VENV="$HOME/.cache/claude-mcp-venvs/leo-claude-mktplace/contract-validator/.venv"
LOCAL_VENV="$SCRIPT_DIR/.venv"
if [[ -f "$CACHE_VENV/bin/python" ]]; then
PYTHON="$CACHE_VENV/bin/python"
elif [[ -f "$LOCAL_VENV/bin/python" ]]; then
PYTHON="$LOCAL_VENV/bin/python"
else
echo "ERROR: No venv found. Run: ./scripts/setup-venvs.sh" >&2
exit 1
fi
cd "$SCRIPT_DIR"
export PYTHONPATH="$SCRIPT_DIR"
exec "$PYTHON" -m mcp_server.server "$@"

View File

@@ -1 +0,0 @@
# Tests for contract-validator MCP server

View File

@@ -1,193 +0,0 @@
"""
Unit tests for parse tools.
"""
import pytest
from pathlib import Path
@pytest.fixture
def parse_tools():
"""Create ParseTools instance"""
from mcp_server.parse_tools import ParseTools
return ParseTools()
@pytest.fixture
def sample_readme(tmp_path):
"""Create a sample README.md for testing"""
readme = tmp_path / "README.md"
readme.write_text("""# Test Plugin
A test plugin for validation.
## Features
- **Feature One**: Does something
- **Feature Two**: Does something else
## Commands
| Command | Description |
|---------|-------------|
| `/test-cmd` | Test command |
| `/another-cmd` | Another test command |
## Agents
| Agent | Description |
|-------|-------------|
| `test-agent` | A test agent |
## Tools Summary
### Category A (3 tools)
`tool_a`, `tool_b`, `tool_c`
### Category B (2 tools)
`tool_d`, `tool_e`
""")
return str(tmp_path)
@pytest.fixture
def sample_claude_md(tmp_path):
"""Create a sample CLAUDE.md for testing"""
claude_md = tmp_path / "CLAUDE.md"
claude_md.write_text("""# CLAUDE.md
## Project Overview
### Four-Agent Model (test)
| Agent | Personality | Responsibilities |
|-------|-------------|------------------|
| **Planner** | Thoughtful | Planning via `create_issue`, `search_lessons` |
| **Executor** | Focused | Implementation via `write`, `edit` |
## Workflow
1. Planner creates issues
2. Executor implements code
""")
return str(claude_md)
@pytest.mark.asyncio
async def test_parse_plugin_interface_basic(parse_tools, sample_readme):
"""Test basic plugin interface parsing"""
result = await parse_tools.parse_plugin_interface(sample_readme)
assert "error" not in result
# Plugin name extraction strips "Plugin" suffix
assert result["plugin_name"] == "Test"
assert "A test plugin" in result["description"]
@pytest.mark.asyncio
async def test_parse_plugin_interface_commands(parse_tools, sample_readme):
"""Test command extraction from README"""
result = await parse_tools.parse_plugin_interface(sample_readme)
commands = result["commands"]
assert len(commands) == 2
assert commands[0]["name"] == "/test-cmd"
assert commands[1]["name"] == "/another-cmd"
@pytest.mark.asyncio
async def test_parse_plugin_interface_agents(parse_tools, sample_readme):
"""Test agent extraction from README"""
result = await parse_tools.parse_plugin_interface(sample_readme)
agents = result["agents"]
assert len(agents) == 1
assert agents[0]["name"] == "test-agent"
@pytest.mark.asyncio
async def test_parse_plugin_interface_tools(parse_tools, sample_readme):
"""Test tool extraction from README"""
result = await parse_tools.parse_plugin_interface(sample_readme)
tools = result["tools"]
tool_names = [t["name"] for t in tools]
assert "tool_a" in tool_names
assert "tool_b" in tool_names
assert "tool_e" in tool_names
assert len(tools) >= 5
@pytest.mark.asyncio
async def test_parse_plugin_interface_categories(parse_tools, sample_readme):
"""Test tool category extraction"""
result = await parse_tools.parse_plugin_interface(sample_readme)
categories = result["tool_categories"]
assert "Category A" in categories
assert "Category B" in categories
assert "tool_a" in categories["Category A"]
@pytest.mark.asyncio
async def test_parse_plugin_interface_features(parse_tools, sample_readme):
"""Test feature extraction"""
result = await parse_tools.parse_plugin_interface(sample_readme)
features = result["features"]
assert "Feature One" in features
assert "Feature Two" in features
@pytest.mark.asyncio
async def test_parse_plugin_interface_not_found(parse_tools, tmp_path):
"""Test error when README not found"""
result = await parse_tools.parse_plugin_interface(str(tmp_path / "nonexistent"))
assert "error" in result
assert "not found" in result["error"].lower()
@pytest.mark.asyncio
async def test_parse_claude_md_agents(parse_tools, sample_claude_md):
"""Test agent extraction from CLAUDE.md"""
result = await parse_tools.parse_claude_md_agents(sample_claude_md)
assert "error" not in result
assert result["agent_count"] == 2
agents = result["agents"]
agent_names = [a["name"] for a in agents]
assert "Planner" in agent_names
assert "Executor" in agent_names
@pytest.mark.asyncio
async def test_parse_claude_md_tool_refs(parse_tools, sample_claude_md):
"""Test tool reference extraction from agents"""
result = await parse_tools.parse_claude_md_agents(sample_claude_md)
agents = {a["name"]: a for a in result["agents"]}
planner = agents["Planner"]
assert "create_issue" in planner["tool_refs"]
assert "search_lessons" in planner["tool_refs"]
@pytest.mark.asyncio
async def test_parse_claude_md_not_found(parse_tools, tmp_path):
"""Test error when CLAUDE.md not found"""
result = await parse_tools.parse_claude_md_agents(str(tmp_path / "CLAUDE.md"))
assert "error" in result
assert "not found" in result["error"].lower()
@pytest.mark.asyncio
async def test_parse_plugin_with_direct_file(parse_tools, sample_readme):
"""Test parsing with direct file path instead of directory"""
readme_path = Path(sample_readme) / "README.md"
result = await parse_tools.parse_plugin_interface(str(readme_path))
assert "error" not in result
# Plugin name extraction strips "Plugin" suffix
assert result["plugin_name"] == "Test"

View File

@@ -1,261 +0,0 @@
"""
Unit tests for report tools.
"""
import pytest
from pathlib import Path
@pytest.fixture
def report_tools():
"""Create ReportTools instance"""
from mcp_server.report_tools import ReportTools
return ReportTools()
@pytest.fixture
def sample_marketplace(tmp_path):
"""Create a sample marketplace structure"""
import json
plugins_dir = tmp_path / "plugins"
plugins_dir.mkdir()
# Plugin 1
plugin1 = plugins_dir / "plugin-one"
plugin1.mkdir()
plugin1_meta = plugin1 / ".claude-plugin"
plugin1_meta.mkdir()
(plugin1_meta / "plugin.json").write_text(json.dumps({"name": "plugin-one"}))
(plugin1 / "README.md").write_text("""# plugin-one
First test plugin.
## Commands
| Command | Description |
|---------|-------------|
| `/cmd-one` | Command one |
## Tools Summary
### Tools (2 tools)
`tool_a`, `tool_b`
""")
# Plugin 2
plugin2 = plugins_dir / "plugin-two"
plugin2.mkdir()
plugin2_meta = plugin2 / ".claude-plugin"
plugin2_meta.mkdir()
(plugin2_meta / "plugin.json").write_text(json.dumps({"name": "plugin-two"}))
(plugin2 / "README.md").write_text("""# plugin-two
Second test plugin.
## Commands
| Command | Description |
|---------|-------------|
| `/cmd-two` | Command two |
## Tools Summary
### Tools (2 tools)
`tool_c`, `tool_d`
""")
# Plugin 3 (with conflict)
plugin3 = plugins_dir / "plugin-three"
plugin3.mkdir()
plugin3_meta = plugin3 / ".claude-plugin"
plugin3_meta.mkdir()
(plugin3_meta / "plugin.json").write_text(json.dumps({"name": "plugin-three"}))
(plugin3 / "README.md").write_text("""# plugin-three
Third test plugin with conflict.
## Commands
| Command | Description |
|---------|-------------|
| `/cmd-one` | Conflicting command |
## Tools Summary
### Tools (1 tool)
`tool_e`
""")
return str(tmp_path)
@pytest.fixture
def marketplace_no_plugins(tmp_path):
"""Create marketplace with no plugins"""
plugins_dir = tmp_path / "plugins"
plugins_dir.mkdir()
return str(tmp_path)
@pytest.fixture
def marketplace_no_dir(tmp_path):
"""Create path without plugins directory"""
return str(tmp_path)
@pytest.mark.asyncio
async def test_generate_report_json_format(report_tools, sample_marketplace):
"""Test JSON format report generation"""
result = await report_tools.generate_compatibility_report(
sample_marketplace, "json"
)
assert "error" not in result
assert "generated_at" in result
assert "summary" in result
assert "plugins" in result
assert result["summary"]["total_plugins"] == 3
@pytest.mark.asyncio
async def test_generate_report_markdown_format(report_tools, sample_marketplace):
"""Test markdown format report generation"""
result = await report_tools.generate_compatibility_report(
sample_marketplace, "markdown"
)
assert "error" not in result
assert "report" in result
assert "# Contract Validation Report" in result["report"]
assert "## Summary" in result["report"]
@pytest.mark.asyncio
async def test_generate_report_finds_conflicts(report_tools, sample_marketplace):
"""Test that report finds command conflicts"""
result = await report_tools.generate_compatibility_report(
sample_marketplace, "json"
)
assert "error" not in result
assert result["summary"]["errors"] > 0
assert result["summary"]["total_issues"] > 0
@pytest.mark.asyncio
async def test_generate_report_counts_correctly(report_tools, sample_marketplace):
"""Test summary counts are correct"""
result = await report_tools.generate_compatibility_report(
sample_marketplace, "json"
)
summary = result["summary"]
assert summary["total_plugins"] == 3
assert summary["total_commands"] == 3 # 3 commands total
assert summary["total_tools"] == 5 # a, b, c, d, e
@pytest.mark.asyncio
async def test_generate_report_no_plugins(report_tools, marketplace_no_plugins):
"""Test error when no plugins found"""
result = await report_tools.generate_compatibility_report(
marketplace_no_plugins, "json"
)
assert "error" in result
assert "no plugins" in result["error"].lower()
@pytest.mark.asyncio
async def test_generate_report_no_plugins_dir(report_tools, marketplace_no_dir):
"""Test error when plugins directory doesn't exist"""
result = await report_tools.generate_compatibility_report(
marketplace_no_dir, "json"
)
assert "error" in result
assert "not found" in result["error"].lower()
@pytest.mark.asyncio
async def test_list_issues_all(report_tools, sample_marketplace):
"""Test listing all issues"""
result = await report_tools.list_issues(sample_marketplace, "all", "all")
assert "error" not in result
assert "issues" in result
assert result["total_issues"] > 0
@pytest.mark.asyncio
async def test_list_issues_filter_by_severity(report_tools, sample_marketplace):
"""Test filtering issues by severity"""
all_result = await report_tools.list_issues(sample_marketplace, "all", "all")
error_result = await report_tools.list_issues(sample_marketplace, "error", "all")
# Error count should be less than or equal to all
assert error_result["total_issues"] <= all_result["total_issues"]
# All issues should have error severity
for issue in error_result["issues"]:
sev = issue.get("severity", "")
if hasattr(sev, 'value'):
sev = sev.value
assert "error" in str(sev).lower()
@pytest.mark.asyncio
async def test_list_issues_filter_by_type(report_tools, sample_marketplace):
"""Test filtering issues by type"""
result = await report_tools.list_issues(
sample_marketplace, "all", "interface_mismatch"
)
# All issues should have matching type
for issue in result["issues"]:
itype = issue.get("issue_type", "")
if hasattr(itype, 'value'):
itype = itype.value
assert "interface_mismatch" in str(itype).lower()
@pytest.mark.asyncio
async def test_list_issues_combined_filters(report_tools, sample_marketplace):
"""Test combined severity and type filters"""
result = await report_tools.list_issues(
sample_marketplace, "error", "interface_mismatch"
)
assert "error" not in result
# Should have command conflict errors
assert result["total_issues"] > 0
@pytest.mark.asyncio
async def test_report_markdown_has_all_sections(report_tools, sample_marketplace):
"""Test markdown report contains all expected sections"""
result = await report_tools.generate_compatibility_report(
sample_marketplace, "markdown"
)
report = result["report"]
assert "## Summary" in report
assert "## Plugins" in report
# Compatibility section only if there are checks
assert "Plugin One" in report or "plugin-one" in report.lower()
@pytest.mark.asyncio
async def test_report_includes_suggestions(report_tools, sample_marketplace):
"""Test that issues include suggestions"""
result = await report_tools.generate_compatibility_report(
sample_marketplace, "json"
)
issues = result.get("all_issues", [])
# Find an issue with a suggestion
issues_with_suggestions = [
i for i in issues
if i.get("suggestion")
]
assert len(issues_with_suggestions) > 0

View File

@@ -1,514 +0,0 @@
"""
Unit tests for validation tools.
"""
import pytest
from pathlib import Path
@pytest.fixture
def validation_tools():
"""Create ValidationTools instance"""
from mcp_server.validation_tools import ValidationTools
return ValidationTools()
@pytest.fixture
def plugin_a(tmp_path):
"""Create first test plugin"""
plugin_dir = tmp_path / "plugin-a"
plugin_dir.mkdir()
(plugin_dir / ".claude-plugin").mkdir()
readme = plugin_dir / "README.md"
readme.write_text("""# Plugin A
Test plugin A.
## Commands
| Command | Description |
|---------|-------------|
| `/setup-a` | Setup A |
| `/shared-cmd` | Shared command |
## Tools Summary
### Core (2 tools)
`tool_one`, `tool_two`
""")
return str(plugin_dir)
@pytest.fixture
def plugin_b(tmp_path):
"""Create second test plugin"""
plugin_dir = tmp_path / "plugin-b"
plugin_dir.mkdir()
(plugin_dir / ".claude-plugin").mkdir()
readme = plugin_dir / "README.md"
readme.write_text("""# Plugin B
Test plugin B.
## Commands
| Command | Description |
|---------|-------------|
| `/setup-b` | Setup B |
| `/shared-cmd` | Shared command (conflict!) |
## Tools Summary
### Core (2 tools)
`tool_two`, `tool_three`
""")
return str(plugin_dir)
@pytest.fixture
def plugin_no_conflict(tmp_path):
"""Create plugin with no conflicts"""
plugin_dir = tmp_path / "plugin-c"
plugin_dir.mkdir()
(plugin_dir / ".claude-plugin").mkdir()
readme = plugin_dir / "README.md"
readme.write_text("""# Plugin C
Test plugin C.
## Commands
| Command | Description |
|---------|-------------|
| `/unique-cmd` | Unique command |
## Tools Summary
### Core (1 tool)
`unique_tool`
""")
return str(plugin_dir)
@pytest.fixture
def claude_md_with_agents(tmp_path):
"""Create CLAUDE.md with agent definitions"""
claude_md = tmp_path / "CLAUDE.md"
claude_md.write_text("""# CLAUDE.md
### Four-Agent Model
| Agent | Personality | Responsibilities |
|-------|-------------|------------------|
| **TestAgent** | Careful | Uses `tool_one`, `tool_two`, `missing_tool` |
| **ValidAgent** | Thorough | Uses `tool_one` only |
| **EmptyAgent** | Unknown | General tasks |
""")
return str(claude_md)
@pytest.mark.asyncio
async def test_validate_compatibility_command_conflict(validation_tools, plugin_a, plugin_b):
"""Test detection of command name conflicts"""
result = await validation_tools.validate_compatibility(plugin_a, plugin_b)
assert "error" not in result
assert result["compatible"] is False
# Find the command conflict issue
error_issues = [i for i in result["issues"] if i["severity"].value == "error"]
assert len(error_issues) > 0
assert any("/shared-cmd" in str(i["message"]) for i in error_issues)
@pytest.mark.asyncio
async def test_validate_compatibility_tool_overlap(validation_tools, plugin_a, plugin_b):
"""Test detection of tool name overlaps"""
result = await validation_tools.validate_compatibility(plugin_a, plugin_b)
assert "tool_two" in result["shared_tools"]
@pytest.mark.asyncio
async def test_validate_compatibility_unique_tools(validation_tools, plugin_a, plugin_b):
"""Test identification of unique tools per plugin"""
result = await validation_tools.validate_compatibility(plugin_a, plugin_b)
assert "tool_one" in result["a_only_tools"]
assert "tool_three" in result["b_only_tools"]
@pytest.mark.asyncio
async def test_validate_compatibility_no_conflict(validation_tools, plugin_a, plugin_no_conflict):
"""Test compatible plugins"""
result = await validation_tools.validate_compatibility(plugin_a, plugin_no_conflict)
assert "error" not in result
assert result["compatible"] is True
@pytest.mark.asyncio
async def test_validate_compatibility_missing_plugin(validation_tools, plugin_a, tmp_path):
"""Test error when plugin not found"""
result = await validation_tools.validate_compatibility(
plugin_a,
str(tmp_path / "nonexistent")
)
assert "error" in result
@pytest.mark.asyncio
async def test_validate_agent_refs_with_missing_tools(validation_tools, claude_md_with_agents, plugin_a):
"""Test detection of missing tool references"""
result = await validation_tools.validate_agent_refs(
"TestAgent",
claude_md_with_agents,
[plugin_a]
)
assert "error" not in result
assert result["valid"] is False
assert "missing_tool" in result["tool_refs_missing"]
@pytest.mark.asyncio
async def test_validate_agent_refs_valid_agent(validation_tools, claude_md_with_agents, plugin_a):
"""Test valid agent with all tools found"""
result = await validation_tools.validate_agent_refs(
"ValidAgent",
claude_md_with_agents,
[plugin_a]
)
assert "error" not in result
assert result["valid"] is True
assert "tool_one" in result["tool_refs_found"]
@pytest.mark.asyncio
async def test_validate_agent_refs_empty_agent(validation_tools, claude_md_with_agents, plugin_a):
"""Test agent with no tool references"""
result = await validation_tools.validate_agent_refs(
"EmptyAgent",
claude_md_with_agents,
[plugin_a]
)
assert "error" not in result
# Should have info issue about undocumented references
info_issues = [i for i in result["issues"] if i["severity"].value == "info"]
assert len(info_issues) > 0
@pytest.mark.asyncio
async def test_validate_agent_refs_agent_not_found(validation_tools, claude_md_with_agents, plugin_a):
"""Test error when agent not found"""
result = await validation_tools.validate_agent_refs(
"NonexistentAgent",
claude_md_with_agents,
[plugin_a]
)
assert "error" in result
assert "not found" in result["error"].lower()
@pytest.mark.asyncio
async def test_validate_data_flow_valid(validation_tools, tmp_path):
"""Test data flow validation with valid flow"""
claude_md = tmp_path / "CLAUDE.md"
claude_md.write_text("""# CLAUDE.md
### Four-Agent Model
| Agent | Personality | Responsibilities |
|-------|-------------|------------------|
| **DataAgent** | Analytical | Load with `read_csv`, analyze with `describe`, export with `to_csv` |
""")
result = await validation_tools.validate_data_flow("DataAgent", str(claude_md))
assert "error" not in result
assert result["valid"] is True
@pytest.mark.asyncio
async def test_validate_data_flow_missing_producer(validation_tools, tmp_path):
"""Test data flow with consumer but no producer"""
claude_md = tmp_path / "CLAUDE.md"
claude_md.write_text("""# CLAUDE.md
### Four-Agent Model
| Agent | Personality | Responsibilities |
|-------|-------------|------------------|
| **BadAgent** | Careless | Just runs `describe`, `head`, `tail` without loading |
""")
result = await validation_tools.validate_data_flow("BadAgent", str(claude_md))
assert "error" not in result
# Should have warning about missing producer
warning_issues = [i for i in result["issues"] if i["severity"].value == "warning"]
assert len(warning_issues) > 0
# --- Workflow Integration Tests ---
@pytest.fixture
def domain_plugin_complete(tmp_path):
"""Create a complete domain plugin with gate, review, and advisory agent"""
plugin_dir = tmp_path / "viz-platform"
plugin_dir.mkdir()
(plugin_dir / ".claude-plugin").mkdir()
(plugin_dir / "commands").mkdir()
(plugin_dir / "agents").mkdir()
# Gate command with PASS/FAIL pattern
gate_cmd = plugin_dir / "commands" / "design-gate.md"
gate_cmd.write_text("""# /design-gate
Binary pass/fail validation gate for design system compliance.
## Output
- **PASS**: All design system checks passed
- **FAIL**: Design system violations detected
""")
# Review command
review_cmd = plugin_dir / "commands" / "design-review.md"
review_cmd.write_text("""# /design-review
Comprehensive design system audit.
""")
# Advisory agent
agent = plugin_dir / "agents" / "design-reviewer.md"
agent.write_text("""# design-reviewer
Design system compliance auditor.
Handles issues with `Domain/Viz` label.
""")
return str(plugin_dir)
@pytest.fixture
def domain_plugin_missing_gate(tmp_path):
"""Create domain plugin with review and agent but no gate command"""
plugin_dir = tmp_path / "data-platform"
plugin_dir.mkdir()
(plugin_dir / ".claude-plugin").mkdir()
(plugin_dir / "commands").mkdir()
(plugin_dir / "agents").mkdir()
# Review command (but no gate)
review_cmd = plugin_dir / "commands" / "data-review.md"
review_cmd.write_text("""# /data-review
Data integrity audit.
""")
# Advisory agent
agent = plugin_dir / "agents" / "data-advisor.md"
agent.write_text("""# data-advisor
Data integrity advisor for Domain/Data issues.
""")
return str(plugin_dir)
@pytest.fixture
def domain_plugin_minimal(tmp_path):
"""Create minimal plugin with no commands or agents"""
plugin_dir = tmp_path / "minimal-plugin"
plugin_dir.mkdir()
(plugin_dir / ".claude-plugin").mkdir()
readme = plugin_dir / "README.md"
readme.write_text("# Minimal Plugin\n\nNo commands or agents.")
return str(plugin_dir)
@pytest.mark.asyncio
async def test_validate_workflow_integration_complete(validation_tools, domain_plugin_complete):
"""Test complete domain plugin returns valid with all interfaces found"""
result = await validation_tools.validate_workflow_integration(
domain_plugin_complete,
"Domain/Viz"
)
assert "error" not in result
assert result["valid"] is True
assert result["gate_command_found"] is True
assert result["review_command_found"] is True
assert result["advisory_agent_found"] is True
# May have INFO issue about missing contract version (not an error/warning)
error_or_warning = [i for i in result["issues"]
if i["severity"].value in ("error", "warning")]
assert len(error_or_warning) == 0
@pytest.mark.asyncio
async def test_validate_workflow_integration_missing_gate(validation_tools, domain_plugin_missing_gate):
"""Test plugin missing gate command returns invalid with ERROR"""
result = await validation_tools.validate_workflow_integration(
domain_plugin_missing_gate,
"Domain/Data"
)
assert "error" not in result
assert result["valid"] is False
assert result["gate_command_found"] is False
assert result["review_command_found"] is True
assert result["advisory_agent_found"] is True
# Should have one ERROR for missing gate
error_issues = [i for i in result["issues"] if i["severity"].value == "error"]
assert len(error_issues) == 1
assert "gate" in error_issues[0]["message"].lower()
@pytest.mark.asyncio
async def test_validate_workflow_integration_minimal(validation_tools, domain_plugin_minimal):
"""Test minimal plugin returns invalid with multiple issues"""
result = await validation_tools.validate_workflow_integration(
domain_plugin_minimal,
"Domain/Test"
)
assert "error" not in result
assert result["valid"] is False
assert result["gate_command_found"] is False
assert result["review_command_found"] is False
assert result["advisory_agent_found"] is False
# Should have one ERROR (gate) and two WARNINGs (review, agent)
error_issues = [i for i in result["issues"] if i["severity"].value == "error"]
warning_issues = [i for i in result["issues"] if i["severity"].value == "warning"]
assert len(error_issues) == 1
assert len(warning_issues) == 2
@pytest.mark.asyncio
async def test_validate_workflow_integration_nonexistent_plugin(validation_tools, tmp_path):
"""Test error when plugin directory doesn't exist"""
result = await validation_tools.validate_workflow_integration(
str(tmp_path / "nonexistent"),
"Domain/Test"
)
assert "error" in result
assert "not found" in result["error"].lower()
# --- Gate Contract Version Tests ---
@pytest.fixture
def domain_plugin_with_contract(tmp_path):
"""Create domain plugin with gate_contract: v1 in frontmatter"""
plugin_dir = tmp_path / "viz-platform-versioned"
plugin_dir.mkdir()
(plugin_dir / ".claude-plugin").mkdir()
(plugin_dir / "commands").mkdir()
(plugin_dir / "agents").mkdir()
# Gate command with gate_contract in frontmatter
gate_cmd = plugin_dir / "commands" / "design-gate.md"
gate_cmd.write_text("""---
description: Design system compliance gate (pass/fail)
gate_contract: v1
---
# /design-gate
Binary pass/fail validation gate for design system compliance.
## Output
- **PASS**: All design system checks passed
- **FAIL**: Design system violations detected
""")
# Review command
review_cmd = plugin_dir / "commands" / "design-review.md"
review_cmd.write_text("""# /design-review
Comprehensive design system audit.
""")
# Advisory agent
agent = plugin_dir / "agents" / "design-reviewer.md"
agent.write_text("""# design-reviewer
Design system compliance auditor for Domain/Viz issues.
""")
return str(plugin_dir)
@pytest.mark.asyncio
async def test_validate_workflow_contract_match(validation_tools, domain_plugin_with_contract):
"""Test that matching expected_contract produces no warning"""
result = await validation_tools.validate_workflow_integration(
domain_plugin_with_contract,
"Domain/Viz",
expected_contract="v1"
)
assert "error" not in result
assert result["valid"] is True
assert result["gate_contract"] == "v1"
# Should have no warnings about contract mismatch
warning_issues = [i for i in result["issues"] if i["severity"].value == "warning"]
contract_warnings = [i for i in warning_issues if "contract" in i["message"].lower()]
assert len(contract_warnings) == 0
@pytest.mark.asyncio
async def test_validate_workflow_contract_mismatch(validation_tools, domain_plugin_with_contract):
"""Test that mismatched expected_contract produces WARNING"""
result = await validation_tools.validate_workflow_integration(
domain_plugin_with_contract,
"Domain/Viz",
expected_contract="v2" # Gate has v1
)
assert "error" not in result
assert result["valid"] is True # Contract mismatch doesn't affect validity
assert result["gate_contract"] == "v1"
# Should have warning about contract mismatch
warning_issues = [i for i in result["issues"] if i["severity"].value == "warning"]
contract_warnings = [i for i in warning_issues if "contract" in i["message"].lower()]
assert len(contract_warnings) == 1
assert "mismatch" in contract_warnings[0]["message"].lower()
assert "v1" in contract_warnings[0]["message"]
assert "v2" in contract_warnings[0]["message"]
@pytest.mark.asyncio
async def test_validate_workflow_no_contract(validation_tools, domain_plugin_complete):
"""Test that missing gate_contract produces INFO suggestion"""
result = await validation_tools.validate_workflow_integration(
domain_plugin_complete,
"Domain/Viz"
)
assert "error" not in result
assert result["valid"] is True
assert result["gate_contract"] is None
# Should have info issue about missing contract
info_issues = [i for i in result["issues"] if i["severity"].value == "info"]
contract_info = [i for i in info_issues if "contract" in i["message"].lower()]
assert len(contract_info) == 1
assert "does not declare" in contract_info[0]["message"].lower()

View File

@@ -1,131 +0,0 @@
# Data Platform MCP Server
MCP Server providing pandas, PostgreSQL/PostGIS, and dbt tools for Claude Code.
## Features
- **pandas Tools**: DataFrame operations with Arrow IPC data_ref persistence
- **PostgreSQL Tools**: Database queries with asyncpg connection pooling
- **PostGIS Tools**: Spatial data operations
- **dbt Tools**: Build tool wrapper with pre-execution validation
## Installation
```bash
cd mcp-servers/data-platform
python -m venv .venv
source .venv/bin/activate # On Windows: .venv\Scripts\activate
pip install -r requirements.txt
```
## Configuration
### System-Level (PostgreSQL credentials)
Create `~/.config/claude/postgres.env`:
```env
POSTGRES_URL=postgresql://user:password@host:5432/database
```
### Project-Level (dbt paths)
Create `.env` in your project root:
```env
DBT_PROJECT_DIR=/path/to/dbt/project
DBT_PROFILES_DIR=/path/to/.dbt
DATA_PLATFORM_MAX_ROWS=100000
```
## Tools
### pandas Tools (14 tools)
| Tool | Description |
|------|-------------|
| `read_csv` | Load CSV file into DataFrame |
| `read_parquet` | Load Parquet file into DataFrame |
| `read_json` | Load JSON/JSONL file into DataFrame |
| `to_csv` | Export DataFrame to CSV file |
| `to_parquet` | Export DataFrame to Parquet file |
| `describe` | Get statistical summary of DataFrame |
| `head` | Get first N rows of DataFrame |
| `tail` | Get last N rows of DataFrame |
| `filter` | Filter DataFrame rows by condition |
| `select` | Select specific columns from DataFrame |
| `groupby` | Group DataFrame and aggregate |
| `join` | Join two DataFrames |
| `list_data` | List all stored DataFrames |
| `drop_data` | Remove a DataFrame from storage |
### PostgreSQL Tools (6 tools)
| Tool | Description |
|------|-------------|
| `pg_connect` | Test connection and return status |
| `pg_query` | Execute SELECT, return as data_ref |
| `pg_execute` | Execute INSERT/UPDATE/DELETE |
| `pg_tables` | List all tables in schema |
| `pg_columns` | Get column info for table |
| `pg_schemas` | List all schemas |
### PostGIS Tools (4 tools)
| Tool | Description |
|------|-------------|
| `st_tables` | List PostGIS-enabled tables |
| `st_geometry_type` | Get geometry type of column |
| `st_srid` | Get SRID of geometry column |
| `st_extent` | Get bounding box of geometries |
### dbt Tools (8 tools)
| Tool | Description |
|------|-------------|
| `dbt_parse` | Validate project (pre-execution) |
| `dbt_run` | Run models with selection |
| `dbt_test` | Run tests |
| `dbt_build` | Run + test |
| `dbt_compile` | Compile SQL without executing |
| `dbt_ls` | List resources |
| `dbt_docs_generate` | Generate documentation |
| `dbt_lineage` | Get model dependencies |
## data_ref System
All DataFrame operations use a `data_ref` system to persist data across tool calls:
1. **Load data**: Returns a `data_ref` string (e.g., `"df_a1b2c3d4"`)
2. **Use data_ref**: Pass to other tools (filter, join, export)
3. **List data**: Use `list_data` to see all stored DataFrames
4. **Clean up**: Use `drop_data` when done
### Example Flow
```
read_csv("data.csv") → {"data_ref": "sales_data", "rows": 1000}
filter("sales_data", "amount > 100") → {"data_ref": "sales_data_filtered"}
describe("sales_data_filtered") → {statistics}
to_parquet("sales_data_filtered", "output.parquet") → {success}
```
## Memory Management
- Default row limit: 100,000 rows per DataFrame
- Configure via `DATA_PLATFORM_MAX_ROWS` environment variable
- Use chunked processing for large files (`chunk_size` parameter)
- Monitor with `list_data` tool (shows memory usage)
## Running
```bash
python -m mcp_server.server
```
## Development
```bash
pip install -e ".[dev]"
pytest
```

View File

@@ -1,7 +0,0 @@
"""
Data Platform MCP Server.
Provides pandas, PostgreSQL/PostGIS, and dbt tools to Claude Code via MCP.
"""
__version__ = "1.0.0"

View File

@@ -1,195 +0,0 @@
"""
Configuration loader for Data Platform MCP Server.
Implements hybrid configuration system:
- System-level: ~/.config/claude/postgres.env (credentials)
- Project-level: .env (dbt project paths, overrides)
- Auto-detection: dbt_project.yml discovery
"""
from pathlib import Path
from dotenv import load_dotenv
import os
import logging
from typing import Dict, Optional
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class DataPlatformConfig:
"""Hybrid configuration loader for data platform tools"""
def __init__(self):
self.postgres_url: Optional[str] = None
self.dbt_project_dir: Optional[str] = None
self.dbt_profiles_dir: Optional[str] = None
self.max_rows: int = 100_000
def load(self) -> Dict[str, Optional[str]]:
"""
Load configuration from system and project levels.
Returns:
Dict containing postgres_url, dbt_project_dir, dbt_profiles_dir, max_rows
Note:
PostgreSQL credentials are optional - server can run in pandas-only mode.
"""
# Load system config (PostgreSQL credentials)
system_config = Path.home() / '.config' / 'claude' / 'postgres.env'
if system_config.exists():
load_dotenv(system_config)
logger.info(f"Loaded system configuration from {system_config}")
else:
logger.info(
f"System config not found: {system_config} - "
"PostgreSQL tools will be unavailable"
)
# Find project directory
project_dir = self._find_project_directory()
# Load project config (overrides system)
if project_dir:
project_config = project_dir / '.env'
if project_config.exists():
load_dotenv(project_config, override=True)
logger.info(f"Loaded project configuration from {project_config}")
# Extract values
self.postgres_url = os.getenv('POSTGRES_URL')
self.dbt_project_dir = os.getenv('DBT_PROJECT_DIR')
self.dbt_profiles_dir = os.getenv('DBT_PROFILES_DIR')
self.max_rows = int(os.getenv('DATA_PLATFORM_MAX_ROWS', '100000'))
# Auto-detect dbt project if not specified
if not self.dbt_project_dir and project_dir:
self.dbt_project_dir = self._find_dbt_project(project_dir)
if self.dbt_project_dir:
logger.info(f"Auto-detected dbt project: {self.dbt_project_dir}")
# Default dbt profiles dir to ~/.dbt
if not self.dbt_profiles_dir:
default_profiles = Path.home() / '.dbt'
if default_profiles.exists():
self.dbt_profiles_dir = str(default_profiles)
return {
'postgres_url': self.postgres_url,
'dbt_project_dir': self.dbt_project_dir,
'dbt_profiles_dir': self.dbt_profiles_dir,
'max_rows': self.max_rows,
'postgres_available': self.postgres_url is not None,
'dbt_available': self.dbt_project_dir is not None
}
def _find_project_directory(self) -> Optional[Path]:
"""
Find the user's project directory.
Returns:
Path to project directory, or None if not found
"""
# Strategy 1: Check CLAUDE_PROJECT_DIR environment variable
project_dir = os.getenv('CLAUDE_PROJECT_DIR')
if project_dir:
path = Path(project_dir)
if path.exists():
logger.info(f"Found project directory from CLAUDE_PROJECT_DIR: {path}")
return path
# Strategy 2: Check PWD
pwd = os.getenv('PWD')
if pwd:
path = Path(pwd)
if path.exists() and (
(path / '.git').exists() or
(path / '.env').exists() or
(path / 'dbt_project.yml').exists()
):
logger.info(f"Found project directory from PWD: {path}")
return path
# Strategy 3: Check current working directory
cwd = Path.cwd()
if (cwd / '.git').exists() or (cwd / '.env').exists() or (cwd / 'dbt_project.yml').exists():
logger.info(f"Found project directory from cwd: {cwd}")
return cwd
logger.debug("Could not determine project directory")
return None
def _find_dbt_project(self, start_dir: Path) -> Optional[str]:
"""
Find dbt_project.yml in the project or its subdirectories.
Args:
start_dir: Directory to start searching from
Returns:
Path to dbt project directory, or None if not found
"""
# Check root
if (start_dir / 'dbt_project.yml').exists():
return str(start_dir)
# Check common subdirectories
for subdir in ['dbt', 'transform', 'analytics', 'models']:
candidate = start_dir / subdir
if (candidate / 'dbt_project.yml').exists():
return str(candidate)
# Search one level deep
for item in start_dir.iterdir():
if item.is_dir() and not item.name.startswith('.'):
if (item / 'dbt_project.yml').exists():
return str(item)
return None
def load_config() -> Dict[str, Optional[str]]:
"""
Convenience function to load configuration.
Returns:
Configuration dictionary
"""
config = DataPlatformConfig()
return config.load()
def check_postgres_connection() -> Dict[str, any]:
"""
Check PostgreSQL connection status for SessionStart hook.
Returns:
Dict with connection status and message
"""
import asyncio
config = load_config()
if not config.get('postgres_url'):
return {
'connected': False,
'message': 'PostgreSQL not configured (POSTGRES_URL not set)'
}
async def test_connection():
try:
import asyncpg
conn = await asyncpg.connect(config['postgres_url'], timeout=5)
version = await conn.fetchval('SELECT version()')
await conn.close()
return {
'connected': True,
'message': f'Connected to PostgreSQL',
'version': version.split(',')[0] if version else 'Unknown'
}
except Exception as e:
return {
'connected': False,
'message': f'PostgreSQL connection failed: {str(e)}'
}
return asyncio.run(test_connection())

View File

@@ -1,219 +0,0 @@
"""
Arrow IPC DataFrame Registry.
Provides persistent storage for DataFrames across tool calls using Apache Arrow
for efficient memory management and serialization.
"""
import pyarrow as pa
import pandas as pd
import uuid
import logging
from typing import Dict, Optional, List, Union
from dataclasses import dataclass
from datetime import datetime
logger = logging.getLogger(__name__)
@dataclass
class DataFrameInfo:
"""Metadata about a stored DataFrame"""
ref: str
rows: int
columns: int
column_names: List[str]
dtypes: Dict[str, str]
memory_bytes: int
created_at: datetime
source: Optional[str] = None
class DataStore:
"""
Singleton registry for Arrow Tables (DataFrames).
Uses Arrow IPC format for efficient memory usage and supports
data_ref based retrieval across multiple tool calls.
"""
_instance = None
_dataframes: Dict[str, pa.Table] = {}
_metadata: Dict[str, DataFrameInfo] = {}
_max_rows: int = 100_000
def __new__(cls):
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._dataframes = {}
cls._metadata = {}
return cls._instance
@classmethod
def get_instance(cls) -> 'DataStore':
"""Get the singleton instance"""
if cls._instance is None:
cls._instance = cls()
return cls._instance
@classmethod
def set_max_rows(cls, max_rows: int):
"""Set the maximum rows limit"""
cls._max_rows = max_rows
def store(
self,
data: Union[pa.Table, pd.DataFrame],
name: Optional[str] = None,
source: Optional[str] = None
) -> str:
"""
Store a DataFrame and return its reference.
Args:
data: Arrow Table or pandas DataFrame
name: Optional name for the reference (auto-generated if not provided)
source: Optional source description (e.g., file path, query)
Returns:
data_ref string to retrieve the DataFrame later
"""
# Convert pandas to Arrow if needed
if isinstance(data, pd.DataFrame):
table = pa.Table.from_pandas(data)
else:
table = data
# Generate reference
data_ref = name or f"df_{uuid.uuid4().hex[:8]}"
# Ensure unique reference
if data_ref in self._dataframes and name is None:
data_ref = f"{data_ref}_{uuid.uuid4().hex[:4]}"
# Store table
self._dataframes[data_ref] = table
# Store metadata
schema = table.schema
self._metadata[data_ref] = DataFrameInfo(
ref=data_ref,
rows=table.num_rows,
columns=table.num_columns,
column_names=[f.name for f in schema],
dtypes={f.name: str(f.type) for f in schema},
memory_bytes=table.nbytes,
created_at=datetime.now(),
source=source
)
logger.info(f"Stored DataFrame '{data_ref}': {table.num_rows} rows, {table.num_columns} cols")
return data_ref
def get(self, data_ref: str) -> Optional[pa.Table]:
"""
Retrieve an Arrow Table by reference.
Args:
data_ref: Reference string from store()
Returns:
Arrow Table or None if not found
"""
return self._dataframes.get(data_ref)
def get_pandas(self, data_ref: str) -> Optional[pd.DataFrame]:
"""
Retrieve a DataFrame as pandas.
Args:
data_ref: Reference string from store()
Returns:
pandas DataFrame or None if not found
"""
table = self.get(data_ref)
if table is not None:
return table.to_pandas()
return None
def get_info(self, data_ref: str) -> Optional[DataFrameInfo]:
"""
Get metadata about a stored DataFrame.
Args:
data_ref: Reference string
Returns:
DataFrameInfo or None if not found
"""
return self._metadata.get(data_ref)
def list_refs(self) -> List[Dict]:
"""
List all stored DataFrame references with metadata.
Returns:
List of dicts with ref, rows, columns, memory info
"""
result = []
for ref, info in self._metadata.items():
result.append({
'ref': ref,
'rows': info.rows,
'columns': info.columns,
'column_names': info.column_names,
'memory_mb': round(info.memory_bytes / (1024 * 1024), 2),
'source': info.source,
'created_at': info.created_at.isoformat()
})
return result
def drop(self, data_ref: str) -> bool:
"""
Remove a DataFrame from the store.
Args:
data_ref: Reference string
Returns:
True if removed, False if not found
"""
if data_ref in self._dataframes:
del self._dataframes[data_ref]
del self._metadata[data_ref]
logger.info(f"Dropped DataFrame '{data_ref}'")
return True
return False
def clear(self):
"""Remove all stored DataFrames"""
count = len(self._dataframes)
self._dataframes.clear()
self._metadata.clear()
logger.info(f"Cleared {count} DataFrames from store")
def total_memory_bytes(self) -> int:
"""Get total memory used by all stored DataFrames"""
return sum(info.memory_bytes for info in self._metadata.values())
def total_memory_mb(self) -> float:
"""Get total memory in MB"""
return round(self.total_memory_bytes() / (1024 * 1024), 2)
def check_row_limit(self, row_count: int) -> Dict:
"""
Check if row count exceeds limit.
Args:
row_count: Number of rows
Returns:
Dict with 'exceeded' bool and 'message' if exceeded
"""
if row_count > self._max_rows:
return {
'exceeded': True,
'message': f"Row count ({row_count:,}) exceeds limit ({self._max_rows:,})",
'suggestion': f"Use chunked processing or filter data first",
'limit': self._max_rows
}
return {'exceeded': False}

View File

@@ -1,387 +0,0 @@
"""
dbt MCP Tools.
Provides dbt CLI wrapper with pre-execution validation.
"""
import subprocess
import json
import logging
import os
from pathlib import Path
from typing import Dict, List, Optional, Any
from .config import load_config
logger = logging.getLogger(__name__)
class DbtTools:
"""dbt CLI wrapper tools with pre-validation"""
def __init__(self):
self.config = load_config()
self.project_dir = self.config.get('dbt_project_dir')
self.profiles_dir = self.config.get('dbt_profiles_dir')
def _get_dbt_command(self, cmd: List[str]) -> List[str]:
"""Build dbt command with project and profiles directories"""
base = ['dbt']
if self.project_dir:
base.extend(['--project-dir', self.project_dir])
if self.profiles_dir:
base.extend(['--profiles-dir', self.profiles_dir])
base.extend(cmd)
return base
def _run_dbt(
self,
cmd: List[str],
timeout: int = 300,
capture_json: bool = False
) -> Dict:
"""
Run dbt command and return result.
Args:
cmd: dbt subcommand and arguments
timeout: Command timeout in seconds
capture_json: If True, parse JSON output
Returns:
Dict with command result
"""
if not self.project_dir:
return {
'error': 'dbt project not found',
'suggestion': 'Set DBT_PROJECT_DIR in project .env or ensure dbt_project.yml exists'
}
full_cmd = self._get_dbt_command(cmd)
logger.info(f"Running: {' '.join(full_cmd)}")
try:
env = os.environ.copy()
# Disable dbt analytics/tracking
env['DBT_SEND_ANONYMOUS_USAGE_STATS'] = 'false'
result = subprocess.run(
full_cmd,
capture_output=True,
text=True,
timeout=timeout,
cwd=self.project_dir,
env=env
)
output = {
'success': result.returncode == 0,
'command': ' '.join(cmd),
'stdout': result.stdout,
'stderr': result.stderr if result.returncode != 0 else None
}
if capture_json and result.returncode == 0:
try:
output['data'] = json.loads(result.stdout)
except json.JSONDecodeError:
pass
return output
except subprocess.TimeoutExpired:
return {
'error': f'Command timed out after {timeout}s',
'command': ' '.join(cmd)
}
except FileNotFoundError:
return {
'error': 'dbt not found in PATH',
'suggestion': 'Install dbt: pip install dbt-core dbt-postgres'
}
except Exception as e:
logger.error(f"dbt command failed: {e}")
return {'error': str(e)}
async def dbt_parse(self) -> Dict:
"""
Validate dbt project without executing (pre-flight check).
Returns:
Dict with validation result and any errors
"""
result = self._run_dbt(['parse'])
# Check if _run_dbt returned an error (e.g., project not found, timeout, dbt not installed)
if 'error' in result:
return result
if not result.get('success'):
# Extract useful error info from stderr
stderr = result.get('stderr', '') or result.get('stdout', '')
errors = []
# Look for common dbt 1.9+ deprecation warnings
if 'deprecated' in stderr.lower():
errors.append({
'type': 'deprecation',
'message': 'Deprecated syntax found - check dbt 1.9+ migration guide'
})
# Look for compilation errors
if 'compilation error' in stderr.lower():
errors.append({
'type': 'compilation',
'message': 'SQL compilation error - check model syntax'
})
return {
'valid': False,
'errors': errors,
'details': stderr[:2000] if stderr else None,
'suggestion': 'Fix issues before running dbt models'
}
return {
'valid': True,
'message': 'dbt project validation passed'
}
async def dbt_run(
self,
select: Optional[str] = None,
exclude: Optional[str] = None,
full_refresh: bool = False
) -> Dict:
"""
Run dbt models with pre-validation.
Args:
select: Model selection (e.g., "model_name", "+model_name", "tag:daily")
exclude: Models to exclude
full_refresh: If True, rebuild incremental models
Returns:
Dict with run result
"""
# ALWAYS validate first
parse_result = await self.dbt_parse()
if not parse_result.get('valid'):
return {
'error': 'Pre-validation failed',
**parse_result
}
cmd = ['run']
if select:
cmd.extend(['--select', select])
if exclude:
cmd.extend(['--exclude', exclude])
if full_refresh:
cmd.append('--full-refresh')
return self._run_dbt(cmd)
async def dbt_test(
self,
select: Optional[str] = None,
exclude: Optional[str] = None
) -> Dict:
"""
Run dbt tests.
Args:
select: Test selection
exclude: Tests to exclude
Returns:
Dict with test results
"""
cmd = ['test']
if select:
cmd.extend(['--select', select])
if exclude:
cmd.extend(['--exclude', exclude])
return self._run_dbt(cmd)
async def dbt_build(
self,
select: Optional[str] = None,
exclude: Optional[str] = None,
full_refresh: bool = False
) -> Dict:
"""
Run dbt build (run + test) with pre-validation.
Args:
select: Model/test selection
exclude: Resources to exclude
full_refresh: If True, rebuild incremental models
Returns:
Dict with build result
"""
# ALWAYS validate first
parse_result = await self.dbt_parse()
if not parse_result.get('valid'):
return {
'error': 'Pre-validation failed',
**parse_result
}
cmd = ['build']
if select:
cmd.extend(['--select', select])
if exclude:
cmd.extend(['--exclude', exclude])
if full_refresh:
cmd.append('--full-refresh')
return self._run_dbt(cmd)
async def dbt_compile(
self,
select: Optional[str] = None
) -> Dict:
"""
Compile dbt models to SQL without executing.
Args:
select: Model selection
Returns:
Dict with compiled SQL info
"""
cmd = ['compile']
if select:
cmd.extend(['--select', select])
return self._run_dbt(cmd)
async def dbt_ls(
self,
select: Optional[str] = None,
resource_type: Optional[str] = None,
output: str = 'name'
) -> Dict:
"""
List dbt resources.
Args:
select: Resource selection
resource_type: Filter by type (model, test, seed, snapshot, source)
output: Output format ('name', 'path', 'json')
Returns:
Dict with list of resources
"""
cmd = ['ls', '--output', output]
if select:
cmd.extend(['--select', select])
if resource_type:
cmd.extend(['--resource-type', resource_type])
result = self._run_dbt(cmd)
if result.get('success') and result.get('stdout'):
lines = [l.strip() for l in result['stdout'].split('\n') if l.strip()]
result['resources'] = lines
result['count'] = len(lines)
return result
async def dbt_docs_generate(self) -> Dict:
"""
Generate dbt documentation.
Returns:
Dict with generation result
"""
result = self._run_dbt(['docs', 'generate'])
if result.get('success') and self.project_dir:
# Check for generated catalog
catalog_path = Path(self.project_dir) / 'target' / 'catalog.json'
manifest_path = Path(self.project_dir) / 'target' / 'manifest.json'
result['catalog_generated'] = catalog_path.exists()
result['manifest_generated'] = manifest_path.exists()
return result
async def dbt_lineage(self, model: str) -> Dict:
"""
Get model dependencies and lineage.
Args:
model: Model name to analyze
Returns:
Dict with upstream and downstream dependencies
"""
if not self.project_dir:
return {'error': 'dbt project not found'}
manifest_path = Path(self.project_dir) / 'target' / 'manifest.json'
# Generate manifest if not exists
if not manifest_path.exists():
compile_result = await self.dbt_compile(select=model)
if not compile_result.get('success'):
return {
'error': 'Failed to compile manifest',
'details': compile_result
}
if not manifest_path.exists():
return {
'error': 'Manifest not found',
'suggestion': 'Run dbt compile first'
}
try:
with open(manifest_path) as f:
manifest = json.load(f)
# Find the model node
model_key = None
for key in manifest.get('nodes', {}):
if key.endswith(f'.{model}') or manifest['nodes'][key].get('name') == model:
model_key = key
break
if not model_key:
return {
'error': f'Model not found: {model}',
'available_models': [
n.get('name') for n in manifest.get('nodes', {}).values()
if n.get('resource_type') == 'model'
][:20]
}
node = manifest['nodes'][model_key]
# Get upstream (depends_on)
upstream = node.get('depends_on', {}).get('nodes', [])
# Get downstream (find nodes that depend on this one)
downstream = []
for key, other_node in manifest.get('nodes', {}).items():
deps = other_node.get('depends_on', {}).get('nodes', [])
if model_key in deps:
downstream.append(key)
return {
'model': model,
'unique_id': model_key,
'materialization': node.get('config', {}).get('materialized'),
'schema': node.get('schema'),
'database': node.get('database'),
'upstream': upstream,
'downstream': downstream,
'description': node.get('description'),
'tags': node.get('tags', [])
}
except Exception as e:
logger.error(f"dbt_lineage failed: {e}")
return {'error': str(e)}

View File

@@ -1,500 +0,0 @@
"""
pandas MCP Tools.
Provides DataFrame operations with Arrow IPC data_ref persistence.
"""
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import json
import logging
from pathlib import Path
from typing import Dict, List, Optional, Any, Union
from .data_store import DataStore
from .config import load_config
logger = logging.getLogger(__name__)
class PandasTools:
"""pandas data manipulation tools with data_ref persistence"""
def __init__(self):
self.store = DataStore.get_instance()
config = load_config()
self.max_rows = config.get('max_rows', 100_000)
self.store.set_max_rows(self.max_rows)
def _check_and_store(
self,
df: pd.DataFrame,
name: Optional[str] = None,
source: Optional[str] = None
) -> Dict:
"""Check row limit and store DataFrame if within limits"""
check = self.store.check_row_limit(len(df))
if check['exceeded']:
return {
'error': 'row_limit_exceeded',
**check,
'preview': df.head(100).to_dict(orient='records')
}
data_ref = self.store.store(df, name=name, source=source)
return {
'data_ref': data_ref,
'rows': len(df),
'columns': list(df.columns),
'dtypes': {col: str(dtype) for col, dtype in df.dtypes.items()}
}
async def read_csv(
self,
file_path: str,
name: Optional[str] = None,
chunk_size: Optional[int] = None,
**kwargs
) -> Dict:
"""
Load CSV file into DataFrame.
Args:
file_path: Path to CSV file
name: Optional name for data_ref
chunk_size: If provided, process in chunks
**kwargs: Additional pandas read_csv arguments
Returns:
Dict with data_ref or error info
"""
path = Path(file_path)
if not path.exists():
return {'error': f'File not found: {file_path}'}
try:
if chunk_size:
# Chunked processing - return iterator info
chunks = []
for i, chunk in enumerate(pd.read_csv(path, chunksize=chunk_size, **kwargs)):
chunk_ref = self.store.store(chunk, name=f"{name or 'chunk'}_{i}", source=file_path)
chunks.append({'ref': chunk_ref, 'rows': len(chunk)})
return {
'chunked': True,
'chunks': chunks,
'total_chunks': len(chunks)
}
df = pd.read_csv(path, **kwargs)
return self._check_and_store(df, name=name, source=file_path)
except Exception as e:
logger.error(f"read_csv failed: {e}")
return {'error': str(e)}
async def read_parquet(
self,
file_path: str,
name: Optional[str] = None,
columns: Optional[List[str]] = None
) -> Dict:
"""
Load Parquet file into DataFrame.
Args:
file_path: Path to Parquet file
name: Optional name for data_ref
columns: Optional list of columns to load
Returns:
Dict with data_ref or error info
"""
path = Path(file_path)
if not path.exists():
return {'error': f'File not found: {file_path}'}
try:
table = pq.read_table(path, columns=columns)
df = table.to_pandas()
return self._check_and_store(df, name=name, source=file_path)
except Exception as e:
logger.error(f"read_parquet failed: {e}")
return {'error': str(e)}
async def read_json(
self,
file_path: str,
name: Optional[str] = None,
lines: bool = False,
**kwargs
) -> Dict:
"""
Load JSON/JSONL file into DataFrame.
Args:
file_path: Path to JSON file
name: Optional name for data_ref
lines: If True, read as JSON Lines format
**kwargs: Additional pandas read_json arguments
Returns:
Dict with data_ref or error info
"""
path = Path(file_path)
if not path.exists():
return {'error': f'File not found: {file_path}'}
try:
df = pd.read_json(path, lines=lines, **kwargs)
return self._check_and_store(df, name=name, source=file_path)
except Exception as e:
logger.error(f"read_json failed: {e}")
return {'error': str(e)}
async def to_csv(
self,
data_ref: str,
file_path: str,
index: bool = False,
**kwargs
) -> Dict:
"""
Export DataFrame to CSV file.
Args:
data_ref: Reference to stored DataFrame
file_path: Output file path
index: Whether to include index
**kwargs: Additional pandas to_csv arguments
Returns:
Dict with success status
"""
df = self.store.get_pandas(data_ref)
if df is None:
return {'error': f'DataFrame not found: {data_ref}'}
try:
df.to_csv(file_path, index=index, **kwargs)
return {
'success': True,
'file_path': file_path,
'rows': len(df),
'size_bytes': Path(file_path).stat().st_size
}
except Exception as e:
logger.error(f"to_csv failed: {e}")
return {'error': str(e)}
async def to_parquet(
self,
data_ref: str,
file_path: str,
compression: str = 'snappy'
) -> Dict:
"""
Export DataFrame to Parquet file.
Args:
data_ref: Reference to stored DataFrame
file_path: Output file path
compression: Compression codec
Returns:
Dict with success status
"""
table = self.store.get(data_ref)
if table is None:
return {'error': f'DataFrame not found: {data_ref}'}
try:
pq.write_table(table, file_path, compression=compression)
return {
'success': True,
'file_path': file_path,
'rows': table.num_rows,
'size_bytes': Path(file_path).stat().st_size
}
except Exception as e:
logger.error(f"to_parquet failed: {e}")
return {'error': str(e)}
async def describe(self, data_ref: str) -> Dict:
"""
Get statistical summary of DataFrame.
Args:
data_ref: Reference to stored DataFrame
Returns:
Dict with statistical summary
"""
df = self.store.get_pandas(data_ref)
if df is None:
return {'error': f'DataFrame not found: {data_ref}'}
try:
desc = df.describe(include='all')
info = self.store.get_info(data_ref)
return {
'data_ref': data_ref,
'shape': {'rows': len(df), 'columns': len(df.columns)},
'columns': list(df.columns),
'dtypes': {col: str(dtype) for col, dtype in df.dtypes.items()},
'memory_mb': info.memory_bytes / (1024 * 1024) if info else None,
'null_counts': df.isnull().sum().to_dict(),
'statistics': desc.to_dict()
}
except Exception as e:
logger.error(f"describe failed: {e}")
return {'error': str(e)}
async def head(self, data_ref: str, n: int = 10) -> Dict:
"""
Get first N rows of DataFrame.
Args:
data_ref: Reference to stored DataFrame
n: Number of rows
Returns:
Dict with rows as records
"""
df = self.store.get_pandas(data_ref)
if df is None:
return {'error': f'DataFrame not found: {data_ref}'}
try:
head_df = df.head(n)
return {
'data_ref': data_ref,
'total_rows': len(df),
'returned_rows': len(head_df),
'columns': list(df.columns),
'data': head_df.to_dict(orient='records')
}
except Exception as e:
logger.error(f"head failed: {e}")
return {'error': str(e)}
async def tail(self, data_ref: str, n: int = 10) -> Dict:
"""
Get last N rows of DataFrame.
Args:
data_ref: Reference to stored DataFrame
n: Number of rows
Returns:
Dict with rows as records
"""
df = self.store.get_pandas(data_ref)
if df is None:
return {'error': f'DataFrame not found: {data_ref}'}
try:
tail_df = df.tail(n)
return {
'data_ref': data_ref,
'total_rows': len(df),
'returned_rows': len(tail_df),
'columns': list(df.columns),
'data': tail_df.to_dict(orient='records')
}
except Exception as e:
logger.error(f"tail failed: {e}")
return {'error': str(e)}
async def filter(
self,
data_ref: str,
condition: str,
name: Optional[str] = None
) -> Dict:
"""
Filter DataFrame rows by condition.
Args:
data_ref: Reference to stored DataFrame
condition: pandas query string (e.g., "age > 30 and city == 'NYC'")
name: Optional name for result data_ref
Returns:
Dict with new data_ref for filtered result
"""
df = self.store.get_pandas(data_ref)
if df is None:
return {'error': f'DataFrame not found: {data_ref}'}
try:
filtered = df.query(condition).reset_index(drop=True)
result_name = name or f"{data_ref}_filtered"
return self._check_and_store(
filtered,
name=result_name,
source=f"filter({data_ref}, '{condition}')"
)
except Exception as e:
logger.error(f"filter failed: {e}")
return {'error': str(e)}
async def select(
self,
data_ref: str,
columns: List[str],
name: Optional[str] = None
) -> Dict:
"""
Select specific columns from DataFrame.
Args:
data_ref: Reference to stored DataFrame
columns: List of column names to select
name: Optional name for result data_ref
Returns:
Dict with new data_ref for selected columns
"""
df = self.store.get_pandas(data_ref)
if df is None:
return {'error': f'DataFrame not found: {data_ref}'}
try:
# Validate columns exist
missing = [c for c in columns if c not in df.columns]
if missing:
return {
'error': f'Columns not found: {missing}',
'available_columns': list(df.columns)
}
selected = df[columns]
result_name = name or f"{data_ref}_select"
return self._check_and_store(
selected,
name=result_name,
source=f"select({data_ref}, {columns})"
)
except Exception as e:
logger.error(f"select failed: {e}")
return {'error': str(e)}
async def groupby(
self,
data_ref: str,
by: Union[str, List[str]],
agg: Dict[str, Union[str, List[str]]],
name: Optional[str] = None
) -> Dict:
"""
Group DataFrame and aggregate.
Args:
data_ref: Reference to stored DataFrame
by: Column(s) to group by
agg: Aggregation dict (e.g., {"sales": "sum", "count": "mean"})
name: Optional name for result data_ref
Returns:
Dict with new data_ref for aggregated result
"""
df = self.store.get_pandas(data_ref)
if df is None:
return {'error': f'DataFrame not found: {data_ref}'}
try:
grouped = df.groupby(by).agg(agg).reset_index()
# Flatten column names if multi-level
if isinstance(grouped.columns, pd.MultiIndex):
grouped.columns = ['_'.join(col).strip('_') for col in grouped.columns]
result_name = name or f"{data_ref}_grouped"
return self._check_and_store(
grouped,
name=result_name,
source=f"groupby({data_ref}, by={by})"
)
except Exception as e:
logger.error(f"groupby failed: {e}")
return {'error': str(e)}
async def join(
self,
left_ref: str,
right_ref: str,
on: Optional[Union[str, List[str]]] = None,
left_on: Optional[Union[str, List[str]]] = None,
right_on: Optional[Union[str, List[str]]] = None,
how: str = 'inner',
name: Optional[str] = None
) -> Dict:
"""
Join two DataFrames.
Args:
left_ref: Reference to left DataFrame
right_ref: Reference to right DataFrame
on: Column(s) to join on (if same name in both)
left_on: Left join column(s)
right_on: Right join column(s)
how: Join type ('inner', 'left', 'right', 'outer')
name: Optional name for result data_ref
Returns:
Dict with new data_ref for joined result
"""
left_df = self.store.get_pandas(left_ref)
right_df = self.store.get_pandas(right_ref)
if left_df is None:
return {'error': f'DataFrame not found: {left_ref}'}
if right_df is None:
return {'error': f'DataFrame not found: {right_ref}'}
try:
joined = pd.merge(
left_df, right_df,
on=on, left_on=left_on, right_on=right_on,
how=how
)
result_name = name or f"{left_ref}_{right_ref}_joined"
return self._check_and_store(
joined,
name=result_name,
source=f"join({left_ref}, {right_ref}, how={how})"
)
except Exception as e:
logger.error(f"join failed: {e}")
return {'error': str(e)}
async def list_data(self) -> Dict:
"""
List all stored DataFrames.
Returns:
Dict with list of stored DataFrames and their info
"""
refs = self.store.list_refs()
return {
'count': len(refs),
'total_memory_mb': self.store.total_memory_mb(),
'max_rows_limit': self.max_rows,
'dataframes': refs
}
async def drop_data(self, data_ref: str) -> Dict:
"""
Remove a DataFrame from storage.
Args:
data_ref: Reference to drop
Returns:
Dict with success status
"""
if self.store.drop(data_ref):
return {'success': True, 'dropped': data_ref}
return {'error': f'DataFrame not found: {data_ref}'}

View File

@@ -1,538 +0,0 @@
"""
PostgreSQL/PostGIS MCP Tools.
Provides database operations with connection pooling and PostGIS support.
"""
import asyncio
import logging
from typing import Dict, List, Optional, Any
import json
from .data_store import DataStore
from .config import load_config
logger = logging.getLogger(__name__)
# Optional imports - gracefully handle missing dependencies
try:
import asyncpg
ASYNCPG_AVAILABLE = True
except ImportError:
ASYNCPG_AVAILABLE = False
logger.warning("asyncpg not available - PostgreSQL tools will be disabled")
try:
import pandas as pd
PANDAS_AVAILABLE = True
except ImportError:
PANDAS_AVAILABLE = False
class PostgresTools:
"""PostgreSQL/PostGIS database tools"""
def __init__(self):
self.store = DataStore.get_instance()
self.config = load_config()
self.pool: Optional[Any] = None
self.max_rows = self.config.get('max_rows', 100_000)
async def _get_pool(self):
"""Get or create connection pool"""
if not ASYNCPG_AVAILABLE:
raise RuntimeError("asyncpg not installed - run: pip install asyncpg")
if self.pool is None:
postgres_url = self.config.get('postgres_url')
if not postgres_url:
raise RuntimeError(
"PostgreSQL not configured. Set POSTGRES_URL in "
"~/.config/claude/postgres.env"
)
self.pool = await asyncpg.create_pool(postgres_url, min_size=1, max_size=5)
return self.pool
async def pg_connect(self) -> Dict:
"""
Test PostgreSQL connection and return status.
Returns:
Dict with connection status, version, and database info
"""
if not ASYNCPG_AVAILABLE:
return {
'connected': False,
'error': 'asyncpg not installed',
'suggestion': 'pip install asyncpg'
}
postgres_url = self.config.get('postgres_url')
if not postgres_url:
return {
'connected': False,
'error': 'POSTGRES_URL not configured',
'suggestion': 'Create ~/.config/claude/postgres.env with POSTGRES_URL=postgresql://...'
}
try:
pool = await self._get_pool()
async with pool.acquire() as conn:
version = await conn.fetchval('SELECT version()')
db_name = await conn.fetchval('SELECT current_database()')
user = await conn.fetchval('SELECT current_user')
# Check for PostGIS
postgis_version = None
try:
postgis_version = await conn.fetchval('SELECT PostGIS_Version()')
except Exception:
pass
return {
'connected': True,
'database': db_name,
'user': user,
'version': version.split(',')[0] if version else 'Unknown',
'postgis_version': postgis_version,
'postgis_available': postgis_version is not None
}
except Exception as e:
logger.error(f"pg_connect failed: {e}")
return {
'connected': False,
'error': str(e)
}
async def pg_query(
self,
query: str,
params: Optional[List] = None,
name: Optional[str] = None
) -> Dict:
"""
Execute SELECT query and return results as data_ref.
Args:
query: SQL SELECT query
params: Query parameters (positional, use $1, $2, etc.)
name: Optional name for result data_ref
Returns:
Dict with data_ref for results or error
"""
if not PANDAS_AVAILABLE:
return {'error': 'pandas not available'}
try:
pool = await self._get_pool()
async with pool.acquire() as conn:
if params:
rows = await conn.fetch(query, *params)
else:
rows = await conn.fetch(query)
if not rows:
return {
'data_ref': None,
'rows': 0,
'message': 'Query returned no results'
}
# Convert to DataFrame
df = pd.DataFrame([dict(r) for r in rows])
# Check row limit
check = self.store.check_row_limit(len(df))
if check['exceeded']:
return {
'error': 'row_limit_exceeded',
**check,
'preview': df.head(100).to_dict(orient='records')
}
# Store result
data_ref = self.store.store(df, name=name, source=f"pg_query: {query[:100]}...")
return {
'data_ref': data_ref,
'rows': len(df),
'columns': list(df.columns)
}
except Exception as e:
logger.error(f"pg_query failed: {e}")
return {'error': str(e)}
async def pg_execute(
self,
query: str,
params: Optional[List] = None
) -> Dict:
"""
Execute INSERT/UPDATE/DELETE query.
Args:
query: SQL DML query
params: Query parameters
Returns:
Dict with affected rows count
"""
try:
pool = await self._get_pool()
async with pool.acquire() as conn:
if params:
result = await conn.execute(query, *params)
else:
result = await conn.execute(query)
# Parse result (e.g., "INSERT 0 1" or "UPDATE 5")
parts = result.split()
affected = int(parts[-1]) if parts else 0
return {
'success': True,
'command': parts[0] if parts else 'UNKNOWN',
'affected_rows': affected
}
except Exception as e:
logger.error(f"pg_execute failed: {e}")
return {'error': str(e)}
async def pg_tables(self, schema: str = 'public') -> Dict:
"""
List all tables in schema.
Args:
schema: Schema name (default: public)
Returns:
Dict with list of tables
"""
query = """
SELECT
table_name,
table_type,
(SELECT count(*) FROM information_schema.columns c
WHERE c.table_schema = t.table_schema
AND c.table_name = t.table_name) as column_count
FROM information_schema.tables t
WHERE table_schema = $1
ORDER BY table_name
"""
try:
pool = await self._get_pool()
async with pool.acquire() as conn:
rows = await conn.fetch(query, schema)
tables = [
{
'name': r['table_name'],
'type': r['table_type'],
'columns': r['column_count']
}
for r in rows
]
return {
'schema': schema,
'count': len(tables),
'tables': tables
}
except Exception as e:
logger.error(f"pg_tables failed: {e}")
return {'error': str(e)}
async def pg_columns(self, table: str, schema: str = 'public') -> Dict:
"""
Get column information for a table.
Args:
table: Table name
schema: Schema name (default: public)
Returns:
Dict with column details
"""
query = """
SELECT
column_name,
data_type,
udt_name,
is_nullable,
column_default,
character_maximum_length,
numeric_precision
FROM information_schema.columns
WHERE table_schema = $1 AND table_name = $2
ORDER BY ordinal_position
"""
try:
pool = await self._get_pool()
async with pool.acquire() as conn:
rows = await conn.fetch(query, schema, table)
columns = [
{
'name': r['column_name'],
'type': r['data_type'],
'udt': r['udt_name'],
'nullable': r['is_nullable'] == 'YES',
'default': r['column_default'],
'max_length': r['character_maximum_length'],
'precision': r['numeric_precision']
}
for r in rows
]
return {
'table': f'{schema}.{table}',
'column_count': len(columns),
'columns': columns
}
except Exception as e:
logger.error(f"pg_columns failed: {e}")
return {'error': str(e)}
async def pg_schemas(self) -> Dict:
"""
List all schemas in database.
Returns:
Dict with list of schemas
"""
query = """
SELECT schema_name
FROM information_schema.schemata
WHERE schema_name NOT IN ('pg_catalog', 'information_schema', 'pg_toast')
ORDER BY schema_name
"""
try:
pool = await self._get_pool()
async with pool.acquire() as conn:
rows = await conn.fetch(query)
schemas = [r['schema_name'] for r in rows]
return {
'count': len(schemas),
'schemas': schemas
}
except Exception as e:
logger.error(f"pg_schemas failed: {e}")
return {'error': str(e)}
async def st_tables(self, schema: str = 'public') -> Dict:
"""
List PostGIS-enabled tables.
Args:
schema: Schema name (default: public)
Returns:
Dict with list of tables with geometry columns
"""
query = """
SELECT
f_table_name as table_name,
f_geometry_column as geometry_column,
type as geometry_type,
srid,
coord_dimension
FROM geometry_columns
WHERE f_table_schema = $1
ORDER BY f_table_name
"""
try:
pool = await self._get_pool()
async with pool.acquire() as conn:
rows = await conn.fetch(query, schema)
tables = [
{
'table': r['table_name'],
'geometry_column': r['geometry_column'],
'geometry_type': r['geometry_type'],
'srid': r['srid'],
'dimensions': r['coord_dimension']
}
for r in rows
]
return {
'schema': schema,
'count': len(tables),
'postgis_tables': tables
}
except Exception as e:
if 'geometry_columns' in str(e):
return {
'error': 'PostGIS not installed or extension not enabled',
'suggestion': 'Run: CREATE EXTENSION IF NOT EXISTS postgis;'
}
logger.error(f"st_tables failed: {e}")
return {'error': str(e)}
async def st_geometry_type(self, table: str, column: str, schema: str = 'public') -> Dict:
"""
Get geometry type of a column.
Args:
table: Table name
column: Geometry column name
schema: Schema name
Returns:
Dict with geometry type information
"""
query = f"""
SELECT DISTINCT ST_GeometryType({column}) as geom_type
FROM {schema}.{table}
WHERE {column} IS NOT NULL
LIMIT 10
"""
try:
pool = await self._get_pool()
async with pool.acquire() as conn:
rows = await conn.fetch(query)
types = [r['geom_type'] for r in rows]
return {
'table': f'{schema}.{table}',
'column': column,
'geometry_types': types
}
except Exception as e:
logger.error(f"st_geometry_type failed: {e}")
return {'error': str(e)}
async def st_srid(self, table: str, column: str, schema: str = 'public') -> Dict:
"""
Get SRID of geometry column.
Args:
table: Table name
column: Geometry column name
schema: Schema name
Returns:
Dict with SRID information
"""
query = f"""
SELECT DISTINCT ST_SRID({column}) as srid
FROM {schema}.{table}
WHERE {column} IS NOT NULL
LIMIT 1
"""
try:
pool = await self._get_pool()
async with pool.acquire() as conn:
row = await conn.fetchrow(query)
srid = row['srid'] if row else None
# Get SRID description
srid_info = None
if srid:
srid_query = """
SELECT srtext, proj4text
FROM spatial_ref_sys
WHERE srid = $1
"""
srid_row = await conn.fetchrow(srid_query, srid)
if srid_row:
srid_info = {
'description': srid_row['srtext'][:200] if srid_row['srtext'] else None,
'proj4': srid_row['proj4text']
}
return {
'table': f'{schema}.{table}',
'column': column,
'srid': srid,
'info': srid_info
}
except Exception as e:
logger.error(f"st_srid failed: {e}")
return {'error': str(e)}
async def st_extent(self, table: str, column: str, schema: str = 'public') -> Dict:
"""
Get bounding box of all geometries.
Args:
table: Table name
column: Geometry column name
schema: Schema name
Returns:
Dict with bounding box coordinates
"""
query = f"""
SELECT
ST_XMin(extent) as xmin,
ST_YMin(extent) as ymin,
ST_XMax(extent) as xmax,
ST_YMax(extent) as ymax
FROM (
SELECT ST_Extent({column}) as extent
FROM {schema}.{table}
) sub
"""
try:
pool = await self._get_pool()
async with pool.acquire() as conn:
row = await conn.fetchrow(query)
if row and row['xmin'] is not None:
return {
'table': f'{schema}.{table}',
'column': column,
'bbox': {
'xmin': float(row['xmin']),
'ymin': float(row['ymin']),
'xmax': float(row['xmax']),
'ymax': float(row['ymax'])
}
}
return {
'table': f'{schema}.{table}',
'column': column,
'bbox': None,
'message': 'No geometries found or all NULL'
}
except Exception as e:
logger.error(f"st_extent failed: {e}")
return {'error': str(e)}
async def close(self):
"""Close connection pool"""
if self.pool:
await self.pool.close()
self.pool = None
def check_connection() -> None:
"""
Check PostgreSQL connection for SessionStart hook.
Prints warning to stderr if connection fails.
"""
import sys
config = load_config()
if not config.get('postgres_url'):
print(
"[data-platform] PostgreSQL not configured (POSTGRES_URL not set)",
file=sys.stderr
)
return
async def test():
try:
if not ASYNCPG_AVAILABLE:
print(
"[data-platform] asyncpg not installed - PostgreSQL tools unavailable",
file=sys.stderr
)
return
conn = await asyncpg.connect(config['postgres_url'], timeout=5)
await conn.close()
print("[data-platform] PostgreSQL connection OK", file=sys.stderr)
except Exception as e:
print(
f"[data-platform] PostgreSQL connection failed: {e}",
file=sys.stderr
)
asyncio.run(test())

View File

@@ -1,795 +0,0 @@
"""
MCP Server entry point for Data Platform integration.
Provides pandas, PostgreSQL/PostGIS, and dbt tools to Claude Code via JSON-RPC 2.0 over stdio.
"""
import asyncio
import logging
import json
from mcp.server import Server
from mcp.server.stdio import stdio_server
from mcp.types import Tool, TextContent
from .config import DataPlatformConfig
from .data_store import DataStore
from .pandas_tools import PandasTools
from .postgres_tools import PostgresTools
from .dbt_tools import DbtTools
# Suppress noisy MCP validation warnings on stderr
logging.basicConfig(level=logging.INFO)
logging.getLogger("root").setLevel(logging.ERROR)
logging.getLogger("mcp").setLevel(logging.ERROR)
logger = logging.getLogger(__name__)
class DataPlatformMCPServer:
"""MCP Server for data platform integration"""
def __init__(self):
self.server = Server("data-platform-mcp")
self.config = None
self.pandas_tools = None
self.postgres_tools = None
self.dbt_tools = None
async def initialize(self):
"""Initialize server and load configuration."""
try:
config_loader = DataPlatformConfig()
self.config = config_loader.load()
self.pandas_tools = PandasTools()
self.postgres_tools = PostgresTools()
self.dbt_tools = DbtTools()
# Log available capabilities
caps = []
caps.append("pandas")
if self.config.get('postgres_available'):
caps.append("PostgreSQL")
if self.config.get('dbt_available'):
caps.append("dbt")
logger.info(f"Data Platform MCP Server initialized with: {', '.join(caps)}")
except Exception as e:
logger.error(f"Failed to initialize: {e}")
raise
def setup_tools(self):
"""Register all available tools with the MCP server"""
@self.server.list_tools()
async def list_tools() -> list[Tool]:
"""Return list of available tools"""
tools = [
# pandas tools - always available
Tool(
name="read_csv",
description="Load CSV file into DataFrame",
inputSchema={
"type": "object",
"properties": {
"file_path": {
"type": "string",
"description": "Path to CSV file"
},
"name": {
"type": "string",
"description": "Optional name for data_ref"
},
"chunk_size": {
"type": "integer",
"description": "Process in chunks of this size"
}
},
"required": ["file_path"]
}
),
Tool(
name="read_parquet",
description="Load Parquet file into DataFrame",
inputSchema={
"type": "object",
"properties": {
"file_path": {
"type": "string",
"description": "Path to Parquet file"
},
"name": {
"type": "string",
"description": "Optional name for data_ref"
},
"columns": {
"type": "array",
"items": {"type": "string"},
"description": "Optional list of columns to load"
}
},
"required": ["file_path"]
}
),
Tool(
name="read_json",
description="Load JSON/JSONL file into DataFrame",
inputSchema={
"type": "object",
"properties": {
"file_path": {
"type": "string",
"description": "Path to JSON file"
},
"name": {
"type": "string",
"description": "Optional name for data_ref"
},
"lines": {
"type": "boolean",
"default": False,
"description": "Read as JSON Lines format"
}
},
"required": ["file_path"]
}
),
Tool(
name="to_csv",
description="Export DataFrame to CSV file",
inputSchema={
"type": "object",
"properties": {
"data_ref": {
"type": "string",
"description": "Reference to stored DataFrame"
},
"file_path": {
"type": "string",
"description": "Output file path"
},
"index": {
"type": "boolean",
"default": False,
"description": "Include index column"
}
},
"required": ["data_ref", "file_path"]
}
),
Tool(
name="to_parquet",
description="Export DataFrame to Parquet file",
inputSchema={
"type": "object",
"properties": {
"data_ref": {
"type": "string",
"description": "Reference to stored DataFrame"
},
"file_path": {
"type": "string",
"description": "Output file path"
},
"compression": {
"type": "string",
"default": "snappy",
"description": "Compression codec"
}
},
"required": ["data_ref", "file_path"]
}
),
Tool(
name="describe",
description="Get statistical summary of DataFrame",
inputSchema={
"type": "object",
"properties": {
"data_ref": {
"type": "string",
"description": "Reference to stored DataFrame"
}
},
"required": ["data_ref"]
}
),
Tool(
name="head",
description="Get first N rows of DataFrame",
inputSchema={
"type": "object",
"properties": {
"data_ref": {
"type": "string",
"description": "Reference to stored DataFrame"
},
"n": {
"type": "integer",
"default": 10,
"description": "Number of rows"
}
},
"required": ["data_ref"]
}
),
Tool(
name="tail",
description="Get last N rows of DataFrame",
inputSchema={
"type": "object",
"properties": {
"data_ref": {
"type": "string",
"description": "Reference to stored DataFrame"
},
"n": {
"type": "integer",
"default": 10,
"description": "Number of rows"
}
},
"required": ["data_ref"]
}
),
Tool(
name="filter",
description="Filter DataFrame rows by condition",
inputSchema={
"type": "object",
"properties": {
"data_ref": {
"type": "string",
"description": "Reference to stored DataFrame"
},
"condition": {
"type": "string",
"description": "pandas query string (e.g., 'age > 30 and city == \"NYC\"')"
},
"name": {
"type": "string",
"description": "Optional name for result data_ref"
}
},
"required": ["data_ref", "condition"]
}
),
Tool(
name="select",
description="Select specific columns from DataFrame",
inputSchema={
"type": "object",
"properties": {
"data_ref": {
"type": "string",
"description": "Reference to stored DataFrame"
},
"columns": {
"type": "array",
"items": {"type": "string"},
"description": "List of column names to select"
},
"name": {
"type": "string",
"description": "Optional name for result data_ref"
}
},
"required": ["data_ref", "columns"]
}
),
Tool(
name="groupby",
description="Group DataFrame and aggregate",
inputSchema={
"type": "object",
"properties": {
"data_ref": {
"type": "string",
"description": "Reference to stored DataFrame"
},
"by": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
],
"description": "Column(s) to group by"
},
"agg": {
"type": "object",
"description": "Aggregation dict (e.g., {\"sales\": \"sum\", \"count\": \"mean\"})"
},
"name": {
"type": "string",
"description": "Optional name for result data_ref"
}
},
"required": ["data_ref", "by", "agg"]
}
),
Tool(
name="join",
description="Join two DataFrames",
inputSchema={
"type": "object",
"properties": {
"left_ref": {
"type": "string",
"description": "Reference to left DataFrame"
},
"right_ref": {
"type": "string",
"description": "Reference to right DataFrame"
},
"on": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
],
"description": "Column(s) to join on (if same name in both)"
},
"left_on": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
],
"description": "Left join column(s)"
},
"right_on": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
],
"description": "Right join column(s)"
},
"how": {
"type": "string",
"enum": ["inner", "left", "right", "outer"],
"default": "inner",
"description": "Join type"
},
"name": {
"type": "string",
"description": "Optional name for result data_ref"
}
},
"required": ["left_ref", "right_ref"]
}
),
Tool(
name="list_data",
description="List all stored DataFrames",
inputSchema={
"type": "object",
"properties": {}
}
),
Tool(
name="drop_data",
description="Remove a DataFrame from storage",
inputSchema={
"type": "object",
"properties": {
"data_ref": {
"type": "string",
"description": "Reference to drop"
}
},
"required": ["data_ref"]
}
),
# PostgreSQL tools
Tool(
name="pg_connect",
description="Test PostgreSQL connection and return status",
inputSchema={
"type": "object",
"properties": {}
}
),
Tool(
name="pg_query",
description="Execute SELECT query and return results as data_ref",
inputSchema={
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "SQL SELECT query"
},
"params": {
"type": "array",
"items": {},
"description": "Query parameters (use $1, $2, etc.)"
},
"name": {
"type": "string",
"description": "Optional name for result data_ref"
}
},
"required": ["query"]
}
),
Tool(
name="pg_execute",
description="Execute INSERT/UPDATE/DELETE query",
inputSchema={
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "SQL DML query"
},
"params": {
"type": "array",
"items": {},
"description": "Query parameters"
}
},
"required": ["query"]
}
),
Tool(
name="pg_tables",
description="List all tables in schema",
inputSchema={
"type": "object",
"properties": {
"schema": {
"type": "string",
"default": "public",
"description": "Schema name"
}
}
}
),
Tool(
name="pg_columns",
description="Get column information for a table",
inputSchema={
"type": "object",
"properties": {
"table": {
"type": "string",
"description": "Table name"
},
"schema": {
"type": "string",
"default": "public",
"description": "Schema name"
}
},
"required": ["table"]
}
),
Tool(
name="pg_schemas",
description="List all schemas in database",
inputSchema={
"type": "object",
"properties": {}
}
),
# PostGIS tools
Tool(
name="st_tables",
description="List PostGIS-enabled tables",
inputSchema={
"type": "object",
"properties": {
"schema": {
"type": "string",
"default": "public",
"description": "Schema name"
}
}
}
),
Tool(
name="st_geometry_type",
description="Get geometry type of a column",
inputSchema={
"type": "object",
"properties": {
"table": {
"type": "string",
"description": "Table name"
},
"column": {
"type": "string",
"description": "Geometry column name"
},
"schema": {
"type": "string",
"default": "public",
"description": "Schema name"
}
},
"required": ["table", "column"]
}
),
Tool(
name="st_srid",
description="Get SRID of geometry column",
inputSchema={
"type": "object",
"properties": {
"table": {
"type": "string",
"description": "Table name"
},
"column": {
"type": "string",
"description": "Geometry column name"
},
"schema": {
"type": "string",
"default": "public",
"description": "Schema name"
}
},
"required": ["table", "column"]
}
),
Tool(
name="st_extent",
description="Get bounding box of all geometries",
inputSchema={
"type": "object",
"properties": {
"table": {
"type": "string",
"description": "Table name"
},
"column": {
"type": "string",
"description": "Geometry column name"
},
"schema": {
"type": "string",
"default": "public",
"description": "Schema name"
}
},
"required": ["table", "column"]
}
),
# dbt tools
Tool(
name="dbt_parse",
description="Validate dbt project (pre-flight check)",
inputSchema={
"type": "object",
"properties": {}
}
),
Tool(
name="dbt_run",
description="Run dbt models with pre-validation",
inputSchema={
"type": "object",
"properties": {
"select": {
"type": "string",
"description": "Model selection (e.g., 'model_name', '+model_name', 'tag:daily')"
},
"exclude": {
"type": "string",
"description": "Models to exclude"
},
"full_refresh": {
"type": "boolean",
"default": False,
"description": "Rebuild incremental models"
}
}
}
),
Tool(
name="dbt_test",
description="Run dbt tests",
inputSchema={
"type": "object",
"properties": {
"select": {
"type": "string",
"description": "Test selection"
},
"exclude": {
"type": "string",
"description": "Tests to exclude"
}
}
}
),
Tool(
name="dbt_build",
description="Run dbt build (run + test) with pre-validation",
inputSchema={
"type": "object",
"properties": {
"select": {
"type": "string",
"description": "Model/test selection"
},
"exclude": {
"type": "string",
"description": "Resources to exclude"
},
"full_refresh": {
"type": "boolean",
"default": False,
"description": "Rebuild incremental models"
}
}
}
),
Tool(
name="dbt_compile",
description="Compile dbt models to SQL without executing",
inputSchema={
"type": "object",
"properties": {
"select": {
"type": "string",
"description": "Model selection"
}
}
}
),
Tool(
name="dbt_ls",
description="List dbt resources",
inputSchema={
"type": "object",
"properties": {
"select": {
"type": "string",
"description": "Resource selection"
},
"resource_type": {
"type": "string",
"enum": ["model", "test", "seed", "snapshot", "source"],
"description": "Filter by type"
},
"output": {
"type": "string",
"enum": ["name", "path", "json"],
"default": "name",
"description": "Output format"
}
}
}
),
Tool(
name="dbt_docs_generate",
description="Generate dbt documentation",
inputSchema={
"type": "object",
"properties": {}
}
),
Tool(
name="dbt_lineage",
description="Get model dependencies and lineage",
inputSchema={
"type": "object",
"properties": {
"model": {
"type": "string",
"description": "Model name to analyze"
}
},
"required": ["model"]
}
)
]
return tools
@self.server.call_tool()
async def call_tool(name: str, arguments: dict) -> list[TextContent]:
"""Handle tool invocation."""
try:
# Route to appropriate tool handler
# pandas tools
if name == "read_csv":
result = await self.pandas_tools.read_csv(**arguments)
elif name == "read_parquet":
result = await self.pandas_tools.read_parquet(**arguments)
elif name == "read_json":
result = await self.pandas_tools.read_json(**arguments)
elif name == "to_csv":
result = await self.pandas_tools.to_csv(**arguments)
elif name == "to_parquet":
result = await self.pandas_tools.to_parquet(**arguments)
elif name == "describe":
result = await self.pandas_tools.describe(**arguments)
elif name == "head":
result = await self.pandas_tools.head(**arguments)
elif name == "tail":
result = await self.pandas_tools.tail(**arguments)
elif name == "filter":
result = await self.pandas_tools.filter(**arguments)
elif name == "select":
result = await self.pandas_tools.select(**arguments)
elif name == "groupby":
result = await self.pandas_tools.groupby(**arguments)
elif name == "join":
result = await self.pandas_tools.join(**arguments)
elif name == "list_data":
result = await self.pandas_tools.list_data()
elif name == "drop_data":
result = await self.pandas_tools.drop_data(**arguments)
# PostgreSQL tools
elif name == "pg_connect":
result = await self.postgres_tools.pg_connect()
elif name == "pg_query":
result = await self.postgres_tools.pg_query(**arguments)
elif name == "pg_execute":
result = await self.postgres_tools.pg_execute(**arguments)
elif name == "pg_tables":
result = await self.postgres_tools.pg_tables(**arguments)
elif name == "pg_columns":
result = await self.postgres_tools.pg_columns(**arguments)
elif name == "pg_schemas":
result = await self.postgres_tools.pg_schemas()
# PostGIS tools
elif name == "st_tables":
result = await self.postgres_tools.st_tables(**arguments)
elif name == "st_geometry_type":
result = await self.postgres_tools.st_geometry_type(**arguments)
elif name == "st_srid":
result = await self.postgres_tools.st_srid(**arguments)
elif name == "st_extent":
result = await self.postgres_tools.st_extent(**arguments)
# dbt tools
elif name == "dbt_parse":
result = await self.dbt_tools.dbt_parse()
elif name == "dbt_run":
result = await self.dbt_tools.dbt_run(**arguments)
elif name == "dbt_test":
result = await self.dbt_tools.dbt_test(**arguments)
elif name == "dbt_build":
result = await self.dbt_tools.dbt_build(**arguments)
elif name == "dbt_compile":
result = await self.dbt_tools.dbt_compile(**arguments)
elif name == "dbt_ls":
result = await self.dbt_tools.dbt_ls(**arguments)
elif name == "dbt_docs_generate":
result = await self.dbt_tools.dbt_docs_generate()
elif name == "dbt_lineage":
result = await self.dbt_tools.dbt_lineage(**arguments)
else:
raise ValueError(f"Unknown tool: {name}")
return [TextContent(
type="text",
text=json.dumps(result, indent=2, default=str)
)]
except Exception as e:
logger.error(f"Tool {name} failed: {e}")
return [TextContent(
type="text",
text=json.dumps({"error": str(e)}, indent=2)
)]
async def run(self):
"""Run the MCP server"""
await self.initialize()
self.setup_tools()
async with stdio_server() as (read_stream, write_stream):
await self.server.run(
read_stream,
write_stream,
self.server.create_initialization_options()
)
async def main():
"""Main entry point"""
server = DataPlatformMCPServer()
await server.run()
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -1,49 +0,0 @@
[build-system]
requires = ["setuptools>=61.0", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "data-platform-mcp"
version = "1.0.0"
description = "MCP Server for data engineering with pandas, PostgreSQL/PostGIS, and dbt"
readme = "README.md"
license = {text = "MIT"}
requires-python = ">=3.10"
authors = [
{name = "Leo Miranda"}
]
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
]
dependencies = [
"mcp>=0.9.0",
"pandas>=2.0.0",
"pyarrow>=14.0.0",
"asyncpg>=0.29.0",
"geoalchemy2>=0.14.0",
"shapely>=2.0.0",
"dbt-core>=1.9.0",
"dbt-postgres>=1.9.0",
"python-dotenv>=1.0.0",
"pydantic>=2.5.0",
]
[project.optional-dependencies]
dev = [
"pytest>=7.4.3",
"pytest-asyncio>=0.23.0",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["mcp_server*"]
[tool.pytest.ini_options]
asyncio_mode = "auto"
testpaths = ["tests"]

View File

@@ -1,23 +0,0 @@
# MCP SDK
mcp>=0.9.0
# Data Processing
pandas>=2.0.0
pyarrow>=14.0.0
# PostgreSQL/PostGIS
asyncpg>=0.29.0
geoalchemy2>=0.14.0
shapely>=2.0.0
# dbt
dbt-core>=1.9.0
dbt-postgres>=1.9.0
# Utilities
python-dotenv>=1.0.0
pydantic>=2.5.0
# Testing
pytest>=7.4.3
pytest-asyncio>=0.23.0

View File

@@ -1,21 +0,0 @@
#!/bin/bash
# Capture original working directory before any cd operations
# This should be the user's project directory when launched by Claude Code
export CLAUDE_PROJECT_DIR="${CLAUDE_PROJECT_DIR:-$PWD}"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
CACHE_VENV="$HOME/.cache/claude-mcp-venvs/leo-claude-mktplace/data-platform/.venv"
LOCAL_VENV="$SCRIPT_DIR/.venv"
if [[ -f "$CACHE_VENV/bin/python" ]]; then
PYTHON="$CACHE_VENV/bin/python"
elif [[ -f "$LOCAL_VENV/bin/python" ]]; then
PYTHON="$LOCAL_VENV/bin/python"
else
echo "ERROR: No venv found. Run: ./scripts/setup-venvs.sh" >&2
exit 1
fi
cd "$SCRIPT_DIR"
export PYTHONPATH="$SCRIPT_DIR"
exec "$PYTHON" -m mcp_server.server "$@"

View File

@@ -1,3 +0,0 @@
"""
Tests for Data Platform MCP Server.
"""

View File

@@ -1,239 +0,0 @@
"""
Unit tests for configuration loader.
"""
import pytest
from pathlib import Path
import os
def test_load_system_config(tmp_path, monkeypatch):
"""Test loading system-level PostgreSQL configuration"""
# Import here to avoid import errors before setup
from mcp_server.config import DataPlatformConfig
# Mock home directory
config_dir = tmp_path / '.config' / 'claude'
config_dir.mkdir(parents=True)
config_file = config_dir / 'postgres.env'
config_file.write_text(
"POSTGRES_URL=postgresql://user:pass@localhost:5432/testdb\n"
)
monkeypatch.setenv('HOME', str(tmp_path))
monkeypatch.chdir(tmp_path)
config = DataPlatformConfig()
result = config.load()
assert result['postgres_url'] == 'postgresql://user:pass@localhost:5432/testdb'
assert result['postgres_available'] is True
def test_postgres_optional(tmp_path, monkeypatch):
"""Test that PostgreSQL configuration is optional"""
from mcp_server.config import DataPlatformConfig
# No postgres.env file
monkeypatch.setenv('HOME', str(tmp_path))
monkeypatch.chdir(tmp_path)
# Clear any existing env vars
monkeypatch.delenv('POSTGRES_URL', raising=False)
config = DataPlatformConfig()
result = config.load()
assert result['postgres_url'] is None
assert result['postgres_available'] is False
def test_project_config_override(tmp_path, monkeypatch):
"""Test that project config overrides system config"""
from mcp_server.config import DataPlatformConfig
# Set up system config
system_config_dir = tmp_path / '.config' / 'claude'
system_config_dir.mkdir(parents=True)
system_config = system_config_dir / 'postgres.env'
system_config.write_text(
"POSTGRES_URL=postgresql://system:pass@localhost:5432/systemdb\n"
)
# Set up project config
project_dir = tmp_path / 'project'
project_dir.mkdir()
project_config = project_dir / '.env'
project_config.write_text(
"POSTGRES_URL=postgresql://project:pass@localhost:5432/projectdb\n"
"DBT_PROJECT_DIR=/path/to/dbt\n"
)
monkeypatch.setenv('HOME', str(tmp_path))
monkeypatch.chdir(project_dir)
config = DataPlatformConfig()
result = config.load()
# Project config should override
assert result['postgres_url'] == 'postgresql://project:pass@localhost:5432/projectdb'
assert result['dbt_project_dir'] == '/path/to/dbt'
def test_max_rows_config(tmp_path, monkeypatch):
"""Test max rows configuration"""
from mcp_server.config import DataPlatformConfig
project_dir = tmp_path / 'project'
project_dir.mkdir()
project_config = project_dir / '.env'
project_config.write_text("DATA_PLATFORM_MAX_ROWS=50000\n")
monkeypatch.setenv('HOME', str(tmp_path))
monkeypatch.chdir(project_dir)
config = DataPlatformConfig()
result = config.load()
assert result['max_rows'] == 50000
def test_default_max_rows(tmp_path, monkeypatch):
"""Test default max rows value"""
from mcp_server.config import DataPlatformConfig
monkeypatch.setenv('HOME', str(tmp_path))
monkeypatch.chdir(tmp_path)
# Clear any existing env vars
monkeypatch.delenv('DATA_PLATFORM_MAX_ROWS', raising=False)
config = DataPlatformConfig()
result = config.load()
assert result['max_rows'] == 100_000 # Default value
def test_dbt_auto_detection(tmp_path, monkeypatch):
"""Test automatic dbt project detection"""
from mcp_server.config import DataPlatformConfig
# Create project with dbt_project.yml
project_dir = tmp_path / 'project'
project_dir.mkdir()
(project_dir / 'dbt_project.yml').write_text("name: test_project\n")
monkeypatch.setenv('HOME', str(tmp_path))
monkeypatch.chdir(project_dir)
# Clear PWD and DBT_PROJECT_DIR to ensure auto-detection
monkeypatch.delenv('PWD', raising=False)
monkeypatch.delenv('DBT_PROJECT_DIR', raising=False)
monkeypatch.delenv('CLAUDE_PROJECT_DIR', raising=False)
config = DataPlatformConfig()
result = config.load()
assert result['dbt_project_dir'] == str(project_dir)
assert result['dbt_available'] is True
def test_dbt_subdirectory_detection(tmp_path, monkeypatch):
"""Test dbt project detection in subdirectory"""
from mcp_server.config import DataPlatformConfig
# Create project with dbt in subdirectory
project_dir = tmp_path / 'project'
project_dir.mkdir()
# Need a marker file for _find_project_directory to find the project
(project_dir / '.git').mkdir()
dbt_dir = project_dir / 'transform'
dbt_dir.mkdir()
(dbt_dir / 'dbt_project.yml').write_text("name: test_project\n")
monkeypatch.setenv('HOME', str(tmp_path))
monkeypatch.chdir(project_dir)
# Clear env vars to ensure auto-detection
monkeypatch.delenv('PWD', raising=False)
monkeypatch.delenv('DBT_PROJECT_DIR', raising=False)
monkeypatch.delenv('CLAUDE_PROJECT_DIR', raising=False)
config = DataPlatformConfig()
result = config.load()
assert result['dbt_project_dir'] == str(dbt_dir)
assert result['dbt_available'] is True
def test_no_dbt_project(tmp_path, monkeypatch):
"""Test when no dbt project exists"""
from mcp_server.config import DataPlatformConfig
project_dir = tmp_path / 'project'
project_dir.mkdir()
monkeypatch.setenv('HOME', str(tmp_path))
monkeypatch.chdir(project_dir)
# Clear any existing env vars
monkeypatch.delenv('DBT_PROJECT_DIR', raising=False)
config = DataPlatformConfig()
result = config.load()
assert result['dbt_project_dir'] is None
assert result['dbt_available'] is False
def test_find_project_directory_from_env(tmp_path, monkeypatch):
"""Test finding project directory from CLAUDE_PROJECT_DIR env var"""
from mcp_server.config import DataPlatformConfig
project_dir = tmp_path / 'my-project'
project_dir.mkdir()
(project_dir / '.git').mkdir()
monkeypatch.setenv('CLAUDE_PROJECT_DIR', str(project_dir))
config = DataPlatformConfig()
result = config._find_project_directory()
assert result == project_dir
def test_find_project_directory_from_cwd(tmp_path, monkeypatch):
"""Test finding project directory from cwd with .env file"""
from mcp_server.config import DataPlatformConfig
project_dir = tmp_path / 'project'
project_dir.mkdir()
(project_dir / '.env').write_text("TEST=value")
monkeypatch.chdir(project_dir)
monkeypatch.delenv('CLAUDE_PROJECT_DIR', raising=False)
monkeypatch.delenv('PWD', raising=False)
config = DataPlatformConfig()
result = config._find_project_directory()
assert result == project_dir
def test_find_project_directory_none_when_no_markers(tmp_path, monkeypatch):
"""Test returns None when no project markers found"""
from mcp_server.config import DataPlatformConfig
empty_dir = tmp_path / 'empty'
empty_dir.mkdir()
monkeypatch.chdir(empty_dir)
monkeypatch.delenv('CLAUDE_PROJECT_DIR', raising=False)
monkeypatch.delenv('PWD', raising=False)
monkeypatch.delenv('DBT_PROJECT_DIR', raising=False)
config = DataPlatformConfig()
result = config._find_project_directory()
assert result is None

View File

@@ -1,240 +0,0 @@
"""
Unit tests for Arrow IPC DataFrame registry.
"""
import pytest
import pandas as pd
import pyarrow as pa
def test_store_pandas_dataframe():
"""Test storing pandas DataFrame"""
from mcp_server.data_store import DataStore
# Create fresh instance for test
store = DataStore()
store._dataframes = {}
store._metadata = {}
df = pd.DataFrame({'a': [1, 2, 3], 'b': ['x', 'y', 'z']})
data_ref = store.store(df, name='test_df')
assert data_ref == 'test_df'
assert 'test_df' in store._dataframes
assert store._metadata['test_df'].rows == 3
assert store._metadata['test_df'].columns == 2
def test_store_arrow_table():
"""Test storing Arrow Table directly"""
from mcp_server.data_store import DataStore
store = DataStore()
store._dataframes = {}
store._metadata = {}
table = pa.table({'x': [1, 2, 3], 'y': [4, 5, 6]})
data_ref = store.store(table, name='arrow_test')
assert data_ref == 'arrow_test'
assert store._dataframes['arrow_test'].num_rows == 3
def test_store_auto_name():
"""Test auto-generated data_ref names"""
from mcp_server.data_store import DataStore
store = DataStore()
store._dataframes = {}
store._metadata = {}
df = pd.DataFrame({'a': [1, 2]})
data_ref = store.store(df)
assert data_ref.startswith('df_')
assert len(data_ref) == 11 # df_ + 8 hex chars
def test_get_dataframe():
"""Test retrieving stored DataFrame"""
from mcp_server.data_store import DataStore
store = DataStore()
store._dataframes = {}
store._metadata = {}
df = pd.DataFrame({'a': [1, 2, 3]})
store.store(df, name='get_test')
result = store.get('get_test')
assert result is not None
assert result.num_rows == 3
def test_get_pandas():
"""Test retrieving as pandas DataFrame"""
from mcp_server.data_store import DataStore
store = DataStore()
store._dataframes = {}
store._metadata = {}
df = pd.DataFrame({'a': [1, 2, 3], 'b': ['x', 'y', 'z']})
store.store(df, name='pandas_test')
result = store.get_pandas('pandas_test')
assert isinstance(result, pd.DataFrame)
assert list(result.columns) == ['a', 'b']
assert len(result) == 3
def test_get_nonexistent():
"""Test getting nonexistent data_ref returns None"""
from mcp_server.data_store import DataStore
store = DataStore()
store._dataframes = {}
store._metadata = {}
assert store.get('nonexistent') is None
assert store.get_pandas('nonexistent') is None
def test_list_refs():
"""Test listing all stored DataFrames"""
from mcp_server.data_store import DataStore
store = DataStore()
store._dataframes = {}
store._metadata = {}
store.store(pd.DataFrame({'a': [1, 2]}), name='df1')
store.store(pd.DataFrame({'b': [3, 4, 5]}), name='df2')
refs = store.list_refs()
assert len(refs) == 2
ref_names = [r['ref'] for r in refs]
assert 'df1' in ref_names
assert 'df2' in ref_names
def test_drop_dataframe():
"""Test dropping a DataFrame"""
from mcp_server.data_store import DataStore
store = DataStore()
store._dataframes = {}
store._metadata = {}
store.store(pd.DataFrame({'a': [1]}), name='drop_test')
assert store.get('drop_test') is not None
result = store.drop('drop_test')
assert result is True
assert store.get('drop_test') is None
def test_drop_nonexistent():
"""Test dropping nonexistent data_ref"""
from mcp_server.data_store import DataStore
store = DataStore()
store._dataframes = {}
store._metadata = {}
result = store.drop('nonexistent')
assert result is False
def test_clear():
"""Test clearing all DataFrames"""
from mcp_server.data_store import DataStore
store = DataStore()
store._dataframes = {}
store._metadata = {}
store.store(pd.DataFrame({'a': [1]}), name='df1')
store.store(pd.DataFrame({'b': [2]}), name='df2')
store.clear()
assert len(store.list_refs()) == 0
def test_get_info():
"""Test getting DataFrame metadata"""
from mcp_server.data_store import DataStore
store = DataStore()
store._dataframes = {}
store._metadata = {}
df = pd.DataFrame({'a': [1, 2, 3], 'b': ['x', 'y', 'z']})
store.store(df, name='info_test', source='test source')
info = store.get_info('info_test')
assert info.ref == 'info_test'
assert info.rows == 3
assert info.columns == 2
assert info.column_names == ['a', 'b']
assert info.source == 'test source'
assert info.memory_bytes > 0
def test_total_memory():
"""Test total memory calculation"""
from mcp_server.data_store import DataStore
store = DataStore()
store._dataframes = {}
store._metadata = {}
store.store(pd.DataFrame({'a': range(100)}), name='df1')
store.store(pd.DataFrame({'b': range(200)}), name='df2')
total = store.total_memory_bytes()
assert total > 0
total_mb = store.total_memory_mb()
assert total_mb >= 0
def test_check_row_limit():
"""Test row limit checking"""
from mcp_server.data_store import DataStore
store = DataStore()
store._max_rows = 100
# Under limit
result = store.check_row_limit(50)
assert result['exceeded'] is False
# Over limit
result = store.check_row_limit(150)
assert result['exceeded'] is True
assert 'suggestion' in result
def test_metadata_dtypes():
"""Test that dtypes are correctly recorded"""
from mcp_server.data_store import DataStore
store = DataStore()
store._dataframes = {}
store._metadata = {}
df = pd.DataFrame({
'int_col': [1, 2, 3],
'float_col': [1.1, 2.2, 3.3],
'str_col': ['a', 'b', 'c']
})
store.store(df, name='dtype_test')
info = store.get_info('dtype_test')
assert 'int_col' in info.dtypes
assert 'float_col' in info.dtypes
assert 'str_col' in info.dtypes

View File

@@ -1,318 +0,0 @@
"""
Unit tests for dbt MCP tools.
"""
import pytest
from unittest.mock import Mock, patch, MagicMock
import subprocess
import json
import tempfile
import os
@pytest.fixture
def mock_config(tmp_path):
"""Mock configuration with dbt project"""
dbt_dir = tmp_path / 'dbt_project'
dbt_dir.mkdir()
(dbt_dir / 'dbt_project.yml').write_text('name: test_project\n')
return {
'dbt_project_dir': str(dbt_dir),
'dbt_profiles_dir': str(tmp_path / '.dbt')
}
@pytest.fixture
def dbt_tools(mock_config):
"""Create DbtTools instance with mocked config"""
with patch('mcp_server.dbt_tools.load_config', return_value=mock_config):
from mcp_server.dbt_tools import DbtTools
tools = DbtTools()
tools.project_dir = mock_config['dbt_project_dir']
tools.profiles_dir = mock_config['dbt_profiles_dir']
return tools
@pytest.mark.asyncio
async def test_dbt_parse_success(dbt_tools):
"""Test successful dbt parse"""
mock_result = MagicMock()
mock_result.returncode = 0
mock_result.stdout = 'Parsed successfully'
mock_result.stderr = ''
with patch('subprocess.run', return_value=mock_result):
result = await dbt_tools.dbt_parse()
assert result['valid'] is True
@pytest.mark.asyncio
async def test_dbt_parse_failure(dbt_tools):
"""Test dbt parse with errors"""
mock_result = MagicMock()
mock_result.returncode = 1
mock_result.stdout = ''
mock_result.stderr = 'Compilation error: deprecated syntax'
with patch('subprocess.run', return_value=mock_result):
result = await dbt_tools.dbt_parse()
assert result['valid'] is False
assert 'deprecated' in str(result.get('details', '')).lower() or len(result.get('errors', [])) > 0
@pytest.mark.asyncio
async def test_dbt_run_with_prevalidation(dbt_tools):
"""Test dbt run includes pre-validation"""
# First call is parse, second is run
mock_parse = MagicMock()
mock_parse.returncode = 0
mock_parse.stdout = 'OK'
mock_parse.stderr = ''
mock_run = MagicMock()
mock_run.returncode = 0
mock_run.stdout = 'Completed successfully'
mock_run.stderr = ''
with patch('subprocess.run', side_effect=[mock_parse, mock_run]):
result = await dbt_tools.dbt_run()
assert result['success'] is True
@pytest.mark.asyncio
async def test_dbt_run_fails_validation(dbt_tools):
"""Test dbt run fails if validation fails"""
mock_parse = MagicMock()
mock_parse.returncode = 1
mock_parse.stdout = ''
mock_parse.stderr = 'Parse error'
with patch('subprocess.run', return_value=mock_parse):
result = await dbt_tools.dbt_run()
assert 'error' in result
assert 'Pre-validation failed' in result['error']
@pytest.mark.asyncio
async def test_dbt_run_with_selection(dbt_tools):
"""Test dbt run with model selection"""
mock_parse = MagicMock()
mock_parse.returncode = 0
mock_parse.stdout = 'OK'
mock_parse.stderr = ''
mock_run = MagicMock()
mock_run.returncode = 0
mock_run.stdout = 'Completed'
mock_run.stderr = ''
calls = []
def track_calls(*args, **kwargs):
calls.append(args[0] if args else kwargs.get('args', []))
if len(calls) == 1:
return mock_parse
return mock_run
with patch('subprocess.run', side_effect=track_calls):
result = await dbt_tools.dbt_run(select='dim_customers')
# Verify --select was passed
assert any('--select' in str(call) for call in calls)
@pytest.mark.asyncio
async def test_dbt_test(dbt_tools):
"""Test dbt test"""
mock_result = MagicMock()
mock_result.returncode = 0
mock_result.stdout = 'All tests passed'
mock_result.stderr = ''
with patch('subprocess.run', return_value=mock_result):
result = await dbt_tools.dbt_test()
assert result['success'] is True
@pytest.mark.asyncio
async def test_dbt_build(dbt_tools):
"""Test dbt build with pre-validation"""
mock_parse = MagicMock()
mock_parse.returncode = 0
mock_parse.stdout = 'OK'
mock_parse.stderr = ''
mock_build = MagicMock()
mock_build.returncode = 0
mock_build.stdout = 'Build complete'
mock_build.stderr = ''
with patch('subprocess.run', side_effect=[mock_parse, mock_build]):
result = await dbt_tools.dbt_build()
assert result['success'] is True
@pytest.mark.asyncio
async def test_dbt_compile(dbt_tools):
"""Test dbt compile"""
mock_result = MagicMock()
mock_result.returncode = 0
mock_result.stdout = 'Compiled'
mock_result.stderr = ''
with patch('subprocess.run', return_value=mock_result):
result = await dbt_tools.dbt_compile()
assert result['success'] is True
@pytest.mark.asyncio
async def test_dbt_ls(dbt_tools):
"""Test dbt ls"""
mock_result = MagicMock()
mock_result.returncode = 0
mock_result.stdout = 'dim_customers\ndim_products\nfct_orders\n'
mock_result.stderr = ''
with patch('subprocess.run', return_value=mock_result):
result = await dbt_tools.dbt_ls()
assert result['success'] is True
assert result['count'] == 3
assert 'dim_customers' in result['resources']
@pytest.mark.asyncio
async def test_dbt_docs_generate(dbt_tools, tmp_path):
"""Test dbt docs generate"""
mock_result = MagicMock()
mock_result.returncode = 0
mock_result.stdout = 'Done'
mock_result.stderr = ''
# Create fake target directory
target_dir = tmp_path / 'dbt_project' / 'target'
target_dir.mkdir(parents=True)
(target_dir / 'catalog.json').write_text('{}')
(target_dir / 'manifest.json').write_text('{}')
dbt_tools.project_dir = str(tmp_path / 'dbt_project')
with patch('subprocess.run', return_value=mock_result):
result = await dbt_tools.dbt_docs_generate()
assert result['success'] is True
assert result['catalog_generated'] is True
assert result['manifest_generated'] is True
@pytest.mark.asyncio
async def test_dbt_lineage(dbt_tools, tmp_path):
"""Test dbt lineage"""
# Create manifest
target_dir = tmp_path / 'dbt_project' / 'target'
target_dir.mkdir(parents=True)
manifest = {
'nodes': {
'model.test.dim_customers': {
'name': 'dim_customers',
'resource_type': 'model',
'schema': 'public',
'database': 'testdb',
'description': 'Customer dimension',
'tags': ['daily'],
'config': {'materialized': 'table'},
'depends_on': {
'nodes': ['model.test.stg_customers']
}
},
'model.test.stg_customers': {
'name': 'stg_customers',
'resource_type': 'model',
'depends_on': {'nodes': []}
},
'model.test.fct_orders': {
'name': 'fct_orders',
'resource_type': 'model',
'depends_on': {
'nodes': ['model.test.dim_customers']
}
}
}
}
(target_dir / 'manifest.json').write_text(json.dumps(manifest))
dbt_tools.project_dir = str(tmp_path / 'dbt_project')
result = await dbt_tools.dbt_lineage('dim_customers')
assert result['model'] == 'dim_customers'
assert 'model.test.stg_customers' in result['upstream']
assert 'model.test.fct_orders' in result['downstream']
@pytest.mark.asyncio
async def test_dbt_lineage_model_not_found(dbt_tools, tmp_path):
"""Test dbt lineage with nonexistent model"""
target_dir = tmp_path / 'dbt_project' / 'target'
target_dir.mkdir(parents=True)
manifest = {
'nodes': {
'model.test.dim_customers': {
'name': 'dim_customers',
'resource_type': 'model'
}
}
}
(target_dir / 'manifest.json').write_text(json.dumps(manifest))
dbt_tools.project_dir = str(tmp_path / 'dbt_project')
result = await dbt_tools.dbt_lineage('nonexistent_model')
assert 'error' in result
assert 'not found' in result['error'].lower()
@pytest.mark.asyncio
async def test_dbt_no_project():
"""Test dbt tools when no project configured"""
with patch('mcp_server.dbt_tools.load_config', return_value={'dbt_project_dir': None}):
from mcp_server.dbt_tools import DbtTools
tools = DbtTools()
tools.project_dir = None
result = await tools.dbt_run()
assert 'error' in result
assert 'not found' in result['error'].lower()
@pytest.mark.asyncio
async def test_dbt_timeout(dbt_tools):
"""Test dbt command timeout handling"""
with patch('subprocess.run', side_effect=subprocess.TimeoutExpired('dbt', 300)):
result = await dbt_tools.dbt_parse()
assert 'error' in result
assert 'timed out' in result['error'].lower()
@pytest.mark.asyncio
async def test_dbt_not_installed(dbt_tools):
"""Test handling when dbt is not installed"""
with patch('subprocess.run', side_effect=FileNotFoundError()):
result = await dbt_tools.dbt_parse()
assert 'error' in result
assert 'not found' in result['error'].lower()

View File

@@ -1,301 +0,0 @@
"""
Unit tests for pandas MCP tools.
"""
import pytest
import pandas as pd
import tempfile
import os
from pathlib import Path
@pytest.fixture
def temp_csv(tmp_path):
"""Create a temporary CSV file for testing"""
csv_path = tmp_path / 'test.csv'
df = pd.DataFrame({
'id': [1, 2, 3, 4, 5],
'name': ['Alice', 'Bob', 'Charlie', 'Diana', 'Eve'],
'value': [10.5, 20.0, 30.5, 40.0, 50.5]
})
df.to_csv(csv_path, index=False)
return str(csv_path)
@pytest.fixture
def temp_parquet(tmp_path):
"""Create a temporary Parquet file for testing"""
parquet_path = tmp_path / 'test.parquet'
df = pd.DataFrame({
'id': [1, 2, 3],
'data': ['a', 'b', 'c']
})
df.to_parquet(parquet_path)
return str(parquet_path)
@pytest.fixture
def temp_json(tmp_path):
"""Create a temporary JSON file for testing"""
json_path = tmp_path / 'test.json'
df = pd.DataFrame({
'x': [1, 2],
'y': [3, 4]
})
df.to_json(json_path, orient='records')
return str(json_path)
@pytest.fixture
def pandas_tools():
"""Create PandasTools instance with fresh store"""
from mcp_server.pandas_tools import PandasTools
from mcp_server.data_store import DataStore
# Reset store for test isolation
store = DataStore.get_instance()
store._dataframes = {}
store._metadata = {}
return PandasTools()
@pytest.mark.asyncio
async def test_read_csv(pandas_tools, temp_csv):
"""Test reading CSV file"""
result = await pandas_tools.read_csv(temp_csv, name='csv_test')
assert 'data_ref' in result
assert result['data_ref'] == 'csv_test'
assert result['rows'] == 5
assert 'id' in result['columns']
assert 'name' in result['columns']
@pytest.mark.asyncio
async def test_read_csv_nonexistent(pandas_tools):
"""Test reading nonexistent CSV file"""
result = await pandas_tools.read_csv('/nonexistent/path.csv')
assert 'error' in result
assert 'not found' in result['error'].lower()
@pytest.mark.asyncio
async def test_read_parquet(pandas_tools, temp_parquet):
"""Test reading Parquet file"""
result = await pandas_tools.read_parquet(temp_parquet, name='parquet_test')
assert 'data_ref' in result
assert result['rows'] == 3
@pytest.mark.asyncio
async def test_read_json(pandas_tools, temp_json):
"""Test reading JSON file"""
result = await pandas_tools.read_json(temp_json, name='json_test')
assert 'data_ref' in result
assert result['rows'] == 2
@pytest.mark.asyncio
async def test_to_csv(pandas_tools, temp_csv, tmp_path):
"""Test exporting to CSV"""
# First load some data
await pandas_tools.read_csv(temp_csv, name='export_test')
# Export to new file
output_path = str(tmp_path / 'output.csv')
result = await pandas_tools.to_csv('export_test', output_path)
assert result['success'] is True
assert os.path.exists(output_path)
@pytest.mark.asyncio
async def test_to_parquet(pandas_tools, temp_csv, tmp_path):
"""Test exporting to Parquet"""
await pandas_tools.read_csv(temp_csv, name='parquet_export')
output_path = str(tmp_path / 'output.parquet')
result = await pandas_tools.to_parquet('parquet_export', output_path)
assert result['success'] is True
assert os.path.exists(output_path)
@pytest.mark.asyncio
async def test_describe(pandas_tools, temp_csv):
"""Test describe statistics"""
await pandas_tools.read_csv(temp_csv, name='describe_test')
result = await pandas_tools.describe('describe_test')
assert 'data_ref' in result
assert 'shape' in result
assert result['shape']['rows'] == 5
assert 'statistics' in result
assert 'null_counts' in result
@pytest.mark.asyncio
async def test_head(pandas_tools, temp_csv):
"""Test getting first N rows"""
await pandas_tools.read_csv(temp_csv, name='head_test')
result = await pandas_tools.head('head_test', n=3)
assert result['returned_rows'] == 3
assert len(result['data']) == 3
@pytest.mark.asyncio
async def test_tail(pandas_tools, temp_csv):
"""Test getting last N rows"""
await pandas_tools.read_csv(temp_csv, name='tail_test')
result = await pandas_tools.tail('tail_test', n=2)
assert result['returned_rows'] == 2
@pytest.mark.asyncio
async def test_filter(pandas_tools, temp_csv):
"""Test filtering rows"""
await pandas_tools.read_csv(temp_csv, name='filter_test')
result = await pandas_tools.filter('filter_test', 'value > 25')
assert 'data_ref' in result
assert result['rows'] == 3 # 30.5, 40.0, 50.5
@pytest.mark.asyncio
async def test_filter_invalid_condition(pandas_tools, temp_csv):
"""Test filter with invalid condition"""
await pandas_tools.read_csv(temp_csv, name='filter_error')
result = await pandas_tools.filter('filter_error', 'invalid_column > 0')
assert 'error' in result
@pytest.mark.asyncio
async def test_select(pandas_tools, temp_csv):
"""Test selecting columns"""
await pandas_tools.read_csv(temp_csv, name='select_test')
result = await pandas_tools.select('select_test', ['id', 'name'])
assert 'data_ref' in result
assert result['columns'] == ['id', 'name']
@pytest.mark.asyncio
async def test_select_invalid_column(pandas_tools, temp_csv):
"""Test select with invalid column"""
await pandas_tools.read_csv(temp_csv, name='select_error')
result = await pandas_tools.select('select_error', ['id', 'nonexistent'])
assert 'error' in result
assert 'available_columns' in result
@pytest.mark.asyncio
async def test_groupby(pandas_tools, tmp_path):
"""Test groupby aggregation"""
# Create test data with groups
csv_path = tmp_path / 'groupby.csv'
df = pd.DataFrame({
'category': ['A', 'A', 'B', 'B'],
'value': [10, 20, 30, 40]
})
df.to_csv(csv_path, index=False)
await pandas_tools.read_csv(str(csv_path), name='groupby_test')
result = await pandas_tools.groupby(
'groupby_test',
by='category',
agg={'value': 'sum'}
)
assert 'data_ref' in result
assert result['rows'] == 2 # Two groups: A, B
@pytest.mark.asyncio
async def test_join(pandas_tools, tmp_path):
"""Test joining DataFrames"""
# Create left table
left_path = tmp_path / 'left.csv'
pd.DataFrame({
'id': [1, 2, 3],
'name': ['A', 'B', 'C']
}).to_csv(left_path, index=False)
# Create right table
right_path = tmp_path / 'right.csv'
pd.DataFrame({
'id': [1, 2, 4],
'value': [100, 200, 400]
}).to_csv(right_path, index=False)
await pandas_tools.read_csv(str(left_path), name='left')
await pandas_tools.read_csv(str(right_path), name='right')
result = await pandas_tools.join('left', 'right', on='id', how='inner')
assert 'data_ref' in result
assert result['rows'] == 2 # Only id 1 and 2 match
@pytest.mark.asyncio
async def test_list_data(pandas_tools, temp_csv):
"""Test listing all DataFrames"""
await pandas_tools.read_csv(temp_csv, name='list_test1')
await pandas_tools.read_csv(temp_csv, name='list_test2')
result = await pandas_tools.list_data()
assert result['count'] == 2
refs = [df['ref'] for df in result['dataframes']]
assert 'list_test1' in refs
assert 'list_test2' in refs
@pytest.mark.asyncio
async def test_drop_data(pandas_tools, temp_csv):
"""Test dropping DataFrame"""
await pandas_tools.read_csv(temp_csv, name='drop_test')
result = await pandas_tools.drop_data('drop_test')
assert result['success'] is True
# Verify it's gone
list_result = await pandas_tools.list_data()
refs = [df['ref'] for df in list_result['dataframes']]
assert 'drop_test' not in refs
@pytest.mark.asyncio
async def test_drop_nonexistent(pandas_tools):
"""Test dropping nonexistent DataFrame"""
result = await pandas_tools.drop_data('nonexistent')
assert 'error' in result
@pytest.mark.asyncio
async def test_operations_on_nonexistent(pandas_tools):
"""Test operations on nonexistent data_ref"""
result = await pandas_tools.describe('nonexistent')
assert 'error' in result
result = await pandas_tools.head('nonexistent')
assert 'error' in result
result = await pandas_tools.filter('nonexistent', 'x > 0')
assert 'error' in result

View File

@@ -1,338 +0,0 @@
"""
Unit tests for PostgreSQL MCP tools.
"""
import pytest
from unittest.mock import Mock, AsyncMock, patch, MagicMock
import pandas as pd
@pytest.fixture
def mock_config():
"""Mock configuration"""
return {
'postgres_url': 'postgresql://test:test@localhost:5432/testdb',
'max_rows': 100000
}
@pytest.fixture
def postgres_tools(mock_config):
"""Create PostgresTools instance with mocked config"""
with patch('mcp_server.postgres_tools.load_config', return_value=mock_config):
from mcp_server.postgres_tools import PostgresTools
from mcp_server.data_store import DataStore
# Reset store
store = DataStore.get_instance()
store._dataframes = {}
store._metadata = {}
tools = PostgresTools()
tools.config = mock_config
return tools
@pytest.mark.asyncio
async def test_pg_connect_no_config():
"""Test pg_connect when no PostgreSQL configured"""
with patch('mcp_server.postgres_tools.load_config', return_value={'postgres_url': None}):
from mcp_server.postgres_tools import PostgresTools
tools = PostgresTools()
tools.config = {'postgres_url': None}
result = await tools.pg_connect()
assert result['connected'] is False
assert 'not configured' in result['error'].lower()
@pytest.mark.asyncio
async def test_pg_connect_success(postgres_tools):
"""Test successful pg_connect"""
mock_conn = AsyncMock()
mock_conn.fetchval = AsyncMock(side_effect=[
'PostgreSQL 15.1', # version
'testdb', # database name
'testuser', # user
None # PostGIS check fails
])
mock_conn.close = AsyncMock()
# Create proper async context manager
mock_cm = AsyncMock()
mock_cm.__aenter__ = AsyncMock(return_value=mock_conn)
mock_cm.__aexit__ = AsyncMock(return_value=None)
mock_pool = MagicMock()
mock_pool.acquire = MagicMock(return_value=mock_cm)
# Use AsyncMock for create_pool since it's awaited
with patch('asyncpg.create_pool', new=AsyncMock(return_value=mock_pool)):
postgres_tools.pool = None
result = await postgres_tools.pg_connect()
assert result['connected'] is True
assert result['database'] == 'testdb'
@pytest.mark.asyncio
async def test_pg_query_success(postgres_tools):
"""Test successful pg_query"""
mock_rows = [
{'id': 1, 'name': 'Alice'},
{'id': 2, 'name': 'Bob'}
]
mock_conn = AsyncMock()
mock_conn.fetch = AsyncMock(return_value=mock_rows)
mock_pool = AsyncMock()
mock_pool.acquire = MagicMock(return_value=AsyncMock(
__aenter__=AsyncMock(return_value=mock_conn),
__aexit__=AsyncMock()
))
postgres_tools.pool = mock_pool
result = await postgres_tools.pg_query('SELECT * FROM users', name='users_data')
assert 'data_ref' in result
assert result['rows'] == 2
@pytest.mark.asyncio
async def test_pg_query_empty_result(postgres_tools):
"""Test pg_query with no results"""
mock_conn = AsyncMock()
mock_conn.fetch = AsyncMock(return_value=[])
mock_pool = AsyncMock()
mock_pool.acquire = MagicMock(return_value=AsyncMock(
__aenter__=AsyncMock(return_value=mock_conn),
__aexit__=AsyncMock()
))
postgres_tools.pool = mock_pool
result = await postgres_tools.pg_query('SELECT * FROM empty_table')
assert result['data_ref'] is None
assert result['rows'] == 0
@pytest.mark.asyncio
async def test_pg_execute_success(postgres_tools):
"""Test successful pg_execute"""
mock_conn = AsyncMock()
mock_conn.execute = AsyncMock(return_value='INSERT 0 3')
mock_pool = AsyncMock()
mock_pool.acquire = MagicMock(return_value=AsyncMock(
__aenter__=AsyncMock(return_value=mock_conn),
__aexit__=AsyncMock()
))
postgres_tools.pool = mock_pool
result = await postgres_tools.pg_execute('INSERT INTO users VALUES (1, 2, 3)')
assert result['success'] is True
assert result['affected_rows'] == 3
assert result['command'] == 'INSERT'
@pytest.mark.asyncio
async def test_pg_tables(postgres_tools):
"""Test listing tables"""
mock_rows = [
{'table_name': 'users', 'table_type': 'BASE TABLE', 'column_count': 5},
{'table_name': 'orders', 'table_type': 'BASE TABLE', 'column_count': 8}
]
mock_conn = AsyncMock()
mock_conn.fetch = AsyncMock(return_value=mock_rows)
mock_pool = AsyncMock()
mock_pool.acquire = MagicMock(return_value=AsyncMock(
__aenter__=AsyncMock(return_value=mock_conn),
__aexit__=AsyncMock()
))
postgres_tools.pool = mock_pool
result = await postgres_tools.pg_tables(schema='public')
assert result['schema'] == 'public'
assert result['count'] == 2
assert len(result['tables']) == 2
@pytest.mark.asyncio
async def test_pg_columns(postgres_tools):
"""Test getting column info"""
mock_rows = [
{
'column_name': 'id',
'data_type': 'integer',
'udt_name': 'int4',
'is_nullable': 'NO',
'column_default': "nextval('users_id_seq'::regclass)",
'character_maximum_length': None,
'numeric_precision': 32
},
{
'column_name': 'name',
'data_type': 'character varying',
'udt_name': 'varchar',
'is_nullable': 'YES',
'column_default': None,
'character_maximum_length': 255,
'numeric_precision': None
}
]
mock_conn = AsyncMock()
mock_conn.fetch = AsyncMock(return_value=mock_rows)
mock_pool = AsyncMock()
mock_pool.acquire = MagicMock(return_value=AsyncMock(
__aenter__=AsyncMock(return_value=mock_conn),
__aexit__=AsyncMock()
))
postgres_tools.pool = mock_pool
result = await postgres_tools.pg_columns(table='users')
assert result['table'] == 'public.users'
assert result['column_count'] == 2
assert result['columns'][0]['name'] == 'id'
assert result['columns'][0]['nullable'] is False
@pytest.mark.asyncio
async def test_pg_schemas(postgres_tools):
"""Test listing schemas"""
mock_rows = [
{'schema_name': 'public'},
{'schema_name': 'app'}
]
mock_conn = AsyncMock()
mock_conn.fetch = AsyncMock(return_value=mock_rows)
mock_pool = AsyncMock()
mock_pool.acquire = MagicMock(return_value=AsyncMock(
__aenter__=AsyncMock(return_value=mock_conn),
__aexit__=AsyncMock()
))
postgres_tools.pool = mock_pool
result = await postgres_tools.pg_schemas()
assert result['count'] == 2
assert 'public' in result['schemas']
@pytest.mark.asyncio
async def test_st_tables(postgres_tools):
"""Test listing PostGIS tables"""
mock_rows = [
{
'table_name': 'locations',
'geometry_column': 'geom',
'geometry_type': 'POINT',
'srid': 4326,
'coord_dimension': 2
}
]
mock_conn = AsyncMock()
mock_conn.fetch = AsyncMock(return_value=mock_rows)
mock_pool = AsyncMock()
mock_pool.acquire = MagicMock(return_value=AsyncMock(
__aenter__=AsyncMock(return_value=mock_conn),
__aexit__=AsyncMock()
))
postgres_tools.pool = mock_pool
result = await postgres_tools.st_tables()
assert result['count'] == 1
assert result['postgis_tables'][0]['table'] == 'locations'
assert result['postgis_tables'][0]['srid'] == 4326
@pytest.mark.asyncio
async def test_st_tables_no_postgis(postgres_tools):
"""Test st_tables when PostGIS not installed"""
mock_conn = AsyncMock()
mock_conn.fetch = AsyncMock(side_effect=Exception("relation \"geometry_columns\" does not exist"))
# Create proper async context manager
mock_cm = AsyncMock()
mock_cm.__aenter__ = AsyncMock(return_value=mock_conn)
mock_cm.__aexit__ = AsyncMock(return_value=None)
mock_pool = MagicMock()
mock_pool.acquire = MagicMock(return_value=mock_cm)
postgres_tools.pool = mock_pool
result = await postgres_tools.st_tables()
assert 'error' in result
assert 'PostGIS' in result['error']
@pytest.mark.asyncio
async def test_st_extent(postgres_tools):
"""Test getting geometry bounding box"""
mock_row = {
'xmin': -122.5,
'ymin': 37.5,
'xmax': -122.0,
'ymax': 38.0
}
mock_conn = AsyncMock()
mock_conn.fetchrow = AsyncMock(return_value=mock_row)
mock_pool = AsyncMock()
mock_pool.acquire = MagicMock(return_value=AsyncMock(
__aenter__=AsyncMock(return_value=mock_conn),
__aexit__=AsyncMock()
))
postgres_tools.pool = mock_pool
result = await postgres_tools.st_extent(table='locations', column='geom')
assert result['bbox']['xmin'] == -122.5
assert result['bbox']['ymax'] == 38.0
@pytest.mark.asyncio
async def test_error_handling(postgres_tools):
"""Test error handling for database errors"""
mock_conn = AsyncMock()
mock_conn.fetch = AsyncMock(side_effect=Exception("Connection refused"))
# Create proper async context manager
mock_cm = AsyncMock()
mock_cm.__aenter__ = AsyncMock(return_value=mock_conn)
mock_cm.__aexit__ = AsyncMock(return_value=None)
mock_pool = MagicMock()
mock_pool.acquire = MagicMock(return_value=mock_cm)
postgres_tools.pool = mock_pool
result = await postgres_tools.pg_query('SELECT 1')
assert 'error' in result
assert 'Connection refused' in result['error']

View File

@@ -1,47 +1,412 @@
# Gitea MCP Server (Marketplace Wrapper)
# Gitea MCP Server
This directory provides the virtual environment for the `gitea-mcp` package.
Model Context Protocol (MCP) server for Gitea integration with Claude Code.
## Package
## Overview
**Source:** [gitea-mcp](https://gitea.hotserv.cloud/personal-projects/gitea-mcp)
**Registry:** Gitea PyPI at gitea.hotserv.cloud
**Version:** >=1.0.0
The Gitea MCP Server provides Claude Code with direct access to Gitea for issue management, label operations, and repository tracking. It supports both single-repository (project mode) and multi-repository (company/PMO mode) operations.
## Setup
```bash
python3 -m venv .venv
source .venv/bin/activate
pip install -r requirements.txt
```
Or use the marketplace setup script:
```bash
./scripts/setup-venvs.sh gitea
```
## Configuration
See `~/.config/claude/gitea.env` for system-level config (API URL, token).
See project `.env` for project-level config (GITEA_ORG, GITEA_REPO).
## Updating
```bash
source .venv/bin/activate
pip install --upgrade gitea-mcp \
--extra-index-url https://gitea.hotserv.cloud/api/packages/personal-projects/pypi/simple
```
**Status**: ✅ Phase 1 Complete - Fully functional and tested
## Features
The `gitea-mcp` package provides MCP tools for:
### Core Functionality
- **Issues**: CRUD, comments, dependencies, execution order
- **Labels**: Get, suggest, create (org + repo level)
- **Milestones**: CRUD operations
- **Pull Requests**: List, get, diff, comments, reviews, create
- **Wiki**: Pages, lessons learned, RFC allocation
- **Validation**: Repository org check, branch protection
- **Issue Management**: CRUD operations for Gitea issues
- **Label Taxonomy**: Dynamic 44-label system with intelligent suggestions
- **Mode Detection**: Automatic project vs company-wide mode detection
- **Branch-Aware Security**: Prevents accidental changes on production branches
- **Hybrid Configuration**: System-level credentials + project-level paths
- **PMO Support**: Multi-repository aggregation for organization-wide views
### Tools Provided
| Tool | Description | Mode |
|------|-------------|------|
| `list_issues` | List issues from repository | Both |
| `get_issue` | Get specific issue details | Both |
| `create_issue` | Create new issue with labels | Both |
| `update_issue` | Update existing issue | Both |
| `add_comment` | Add comment to issue | Both |
| `get_labels` | Get all labels (org + repo) | Both |
| `suggest_labels` | Intelligent label suggestion | Both |
| `aggregate_issues` | Cross-repository issue aggregation | PMO Only |
## Architecture
### Directory Structure
```
mcp-servers/gitea/
├── .venv/ # Python virtual environment
├── requirements.txt # Python dependencies
├── mcp_server/
│ ├── __init__.py
│ ├── server.py # MCP server entry point
│ ├── config.py # Configuration loader
│ ├── gitea_client.py # Gitea API client
│ └── tools/
│ ├── __init__.py
│ ├── issues.py # Issue tools
│ └── labels.py # Label tools
├── tests/
│ ├── __init__.py
│ ├── test_config.py
│ ├── test_gitea_client.py
│ ├── test_issues.py
│ └── test_labels.py
├── README.md # This file
└── TESTING.md # Testing instructions
```
### Mode Detection
The server operates in two modes based on environment variables:
**Project Mode** (Single Repository):
- When `GITEA_REPO` is set
- Operates on single repository
- Used by `projman` plugin
**Company Mode** (Multi-Repository / PMO):
- When `GITEA_REPO` is NOT set
- Operates on all repositories in organization
- Used by `projman-pmo` plugin
### Branch-Aware Security
Operations are restricted based on the current Git branch:
| Branch | Read | Create Issue | Update/Comment |
|--------|------|--------------|----------------|
| `main`, `master`, `prod/*` | ✅ | ❌ | ❌ |
| `staging`, `stage/*` | ✅ | ✅ | ❌ |
| `development`, `develop`, `feat/*`, `dev/*` | ✅ | ✅ | ✅ |
## Installation
### Prerequisites
- Python 3.10 or higher
- Git repository (for branch detection)
- Access to Gitea instance with API token
### Step 1: Install Dependencies
```bash
cd mcp-servers/gitea
python3 -m venv .venv
source .venv/bin/activate # Linux/Mac
# or .venv\Scripts\activate # Windows
pip install -r requirements.txt
```
### Step 2: Configure System-Level Settings
Create `~/.config/claude/gitea.env`:
```bash
mkdir -p ~/.config/claude
cat > ~/.config/claude/gitea.env << EOF
GITEA_API_URL=https://gitea.example.com/api/v1
GITEA_API_TOKEN=your_gitea_token_here
GITEA_OWNER=bandit
EOF
chmod 600 ~/.config/claude/gitea.env
```
### Step 3: Configure Project-Level Settings (Optional)
For project mode, create `.env` in your project root:
```bash
echo "GITEA_REPO=your-repo-name" > .env
echo ".env" >> .gitignore
```
For company/PMO mode, omit the `.env` file or don't set `GITEA_REPO`.
## Configuration
### System-Level Configuration
**File**: `~/.config/claude/gitea.env`
**Required Variables**:
- `GITEA_API_URL` - Gitea API endpoint (e.g., `https://gitea.example.com/api/v1`)
- `GITEA_API_TOKEN` - Personal access token with repo permissions
- `GITEA_OWNER` - Organization or user name (e.g., `bandit`)
### Project-Level Configuration
**File**: `<project-root>/.env`
**Optional Variables**:
- `GITEA_REPO` - Repository name (enables project mode)
### Generating Gitea API Token
1. Log into Gitea: https://gitea.example.com
2. Navigate to: **Settings****Applications****Manage Access Tokens**
3. Click **Generate New Token**
4. Configure token:
- **Token Name**: `claude-code-mcp`
- **Permissions**:
-`repo` (all) - Read/write repositories, issues, labels
-`read:org` - Read organization information and labels
-`read:user` - Read user information
5. Click **Generate Token**
6. Copy token immediately (shown only once)
7. Add to `~/.config/claude/gitea.env`
## Usage
### Running the MCP Server
```bash
cd mcp-servers/gitea
source .venv/bin/activate
python -m mcp_server.server
```
The server communicates via JSON-RPC 2.0 over stdio.
### Integration with Claude Code Plugins
The MCP server is designed to be used by Claude Code plugins via `.mcp.json` configuration:
```json
{
"mcpServers": {
"gitea": {
"command": "python",
"args": ["-m", "mcp_server.server"],
"cwd": "${CLAUDE_PLUGIN_ROOT}/../mcp-servers/gitea",
"env": {
"PYTHONPATH": "${CLAUDE_PLUGIN_ROOT}/../mcp-servers/gitea"
}
}
}
}
```
### Example Tool Calls
**List Issues**:
```python
from mcp_server.tools.issues import IssueTools
from mcp_server.gitea_client import GiteaClient
client = GiteaClient()
issue_tools = IssueTools(client)
issues = await issue_tools.list_issues(state='open', labels=['Type/Bug'])
```
**Suggest Labels**:
```python
from mcp_server.tools.labels import LabelTools
label_tools = LabelTools(client)
context = "Fix critical authentication bug in production API"
suggestions = await label_tools.suggest_labels(context)
# Returns: ['Type/Bug', 'Priority/Critical', 'Component/Auth', 'Component/API', ...]
```
## Testing
### Unit Tests
Run all 42 unit tests with mocks:
```bash
pytest tests/ -v
```
Expected: `42 passed in 0.57s`
### Integration Tests
Test with real Gitea instance:
```bash
python -c "
from mcp_server.gitea_client import GiteaClient
client = GiteaClient()
issues = client.list_issues(state='open')
print(f'Found {len(issues)} open issues')
"
```
### Full Testing Guide
See [TESTING.md](./TESTING.md) for comprehensive testing instructions.
## Label Taxonomy System
The system supports a dynamic 44-label taxonomy (28 org + 16 repo):
**Organization Labels (28)**:
- `Agent/*` (2) - Agent/Human, Agent/Claude
- `Complexity/*` (3) - Simple, Medium, Complex
- `Efforts/*` (5) - XS, S, M, L, XL
- `Priority/*` (4) - Low, Medium, High, Critical
- `Risk/*` (3) - Low, Medium, High
- `Source/*` (4) - Development, Staging, Production, Customer
- `Type/*` (6) - Bug, Feature, Refactor, Documentation, Test, Chore
**Repository Labels (16)**:
- `Component/*` (9) - Backend, Frontend, API, Database, Auth, Deploy, Testing, Docs, Infra
- `Tech/*` (7) - Python, JavaScript, Docker, PostgreSQL, Redis, Vue, FastAPI
Labels are fetched dynamically from Gitea and suggestions adapt to the current taxonomy.
## Security
### Token Storage
- Store tokens in `~/.config/claude/gitea.env`
- Set file permissions to `600` (read/write owner only)
- Never commit tokens to Git
- Use separate tokens for development and production
### Branch Detection
The MCP server implements defense-in-depth branch detection:
1. **MCP Tools**: Check branch before operations
2. **Agent Prompts**: Warn users about branch restrictions
3. **CLAUDE.md**: Provides additional context
### Input Validation
- All user input is validated before API calls
- Issue titles and descriptions are sanitized
- Label names are checked against taxonomy
- Repository names are validated
## Troubleshooting
### Common Issues
**Module not found**:
```bash
cd mcp-servers/gitea
source .venv/bin/activate
```
**Configuration not found**:
```bash
ls -la ~/.config/claude/gitea.env
# If missing, create it following installation steps
```
**Authentication failed**:
```bash
# Test token manually
curl -H "Authorization: token YOUR_TOKEN" \
https://gitea.example.com/api/v1/user
```
**Permission denied on branch**:
```bash
# Check current branch
git branch --show-current
# Switch to development branch
git checkout development
```
See [TESTING.md](./TESTING.md#troubleshooting) for more details.
## Development
### Project Structure
- `config.py` - Hybrid configuration loader with mode detection
- `gitea_client.py` - Synchronous Gitea API client using requests
- `tools/issues.py` - Async wrappers with branch detection
- `tools/labels.py` - Label management and suggestion
- `server.py` - MCP server with JSON-RPC 2.0 over stdio
### Adding New Tools
1. Add method to `GiteaClient` (sync)
2. Add async wrapper to appropriate tool class
3. Register tool in `server.py` `setup_tools()`
4. Add unit tests
5. Update documentation
### Testing Philosophy
- **Unit tests**: Use mocks for fast feedback
- **Integration tests**: Use real Gitea API for validation
- **Branch detection**: Test all branch types
- **Mode detection**: Test both project and company modes
## Performance
### Caching
Labels are cached to reduce API calls:
```python
from functools import lru_cache
@lru_cache(maxsize=128)
def get_labels_cached(self, repo: str):
return self.get_labels(repo)
```
### Retry Logic
API calls include automatic retry with exponential backoff:
```python
@retry_on_failure(max_retries=3, delay=1)
def list_issues(self, state='open', labels=None, repo=None):
# Implementation
```
## Changelog
### v1.0.0 (2025-01-06) - Phase 1 Complete
✅ Initial implementation:
- Configuration management (hybrid system + project)
- Gitea API client with all CRUD operations
- MCP server with 8 tools
- Issue tools with branch detection
- Label tools with intelligent suggestions
- Mode detection (project vs company)
- Branch-aware security model
- 42 unit tests (100% passing)
- Comprehensive documentation
## License
MIT License - Part of the Leo Claude Marketplace project.
## Related Documentation
- **Projman Documentation**: `plugins/projman/README.md`
- **Configuration Guide**: `plugins/projman/CONFIGURATION.md`
- **Testing Guide**: `TESTING.md`
## Support
For issues or questions:
1. Check [TESTING.md](./TESTING.md) troubleshooting section
2. Review [plugins/projman/README.md](../../README.md) for plugin documentation
3. Create an issue in the project repository
---
**Built for**: Leo Claude Marketplace - Project Management Plugins
**Phase**: 1 (Complete)
**Status**: ✅ Production Ready
**Last Updated**: 2025-01-06

View File

@@ -0,0 +1,582 @@
# Gitea MCP Server - Testing Guide
This document provides comprehensive testing instructions for the Gitea MCP Server implementation.
## Table of Contents
1. [Unit Tests](#unit-tests)
2. [Manual MCP Server Testing](#manual-mcp-server-testing)
3. [Integration Testing](#integration-testing)
4. [Configuration Setup for Testing](#configuration-setup-for-testing)
5. [Troubleshooting](#troubleshooting)
---
## Unit Tests
Unit tests use mocks to test all modules without requiring a real Gitea instance.
### Prerequisites
Ensure the virtual environment is activated and dependencies are installed:
```bash
cd mcp-servers/gitea
source .venv/bin/activate # Linux/Mac
# or .venv\Scripts\activate # Windows
```
### Running All Tests
Run all 42 unit tests:
```bash
pytest tests/ -v
```
Expected output:
```
============================== 42 passed in 0.57s ==============================
```
### Running Specific Test Files
Run tests for a specific module:
```bash
# Configuration tests
pytest tests/test_config.py -v
# Gitea client tests
pytest tests/test_gitea_client.py -v
# Issue tools tests
pytest tests/test_issues.py -v
# Label tools tests
pytest tests/test_labels.py -v
```
### Running Specific Tests
Run a single test:
```bash
pytest tests/test_config.py::test_load_system_config -v
```
### Test Coverage
Generate coverage report:
```bash
pytest --cov=mcp_server --cov-report=html tests/
# View coverage report
# Open htmlcov/index.html in your browser
```
Expected coverage: >80% for all modules
### Test Organization
**Configuration Tests** (`test_config.py`):
- System-level configuration loading
- Project-level configuration override
- Mode detection (project vs company)
- Missing configuration handling
**Gitea Client Tests** (`test_gitea_client.py`):
- API client initialization
- Issue CRUD operations
- Label retrieval
- PMO multi-repo operations
**Issue Tools Tests** (`test_issues.py`):
- Branch-aware security checks
- Async wrappers for sync client
- Permission enforcement
- PMO aggregation mode
**Label Tools Tests** (`test_labels.py`):
- Label retrieval (org + repo)
- Intelligent label suggestion
- Multi-category detection
---
## Manual MCP Server Testing
Test the MCP server manually using stdio communication.
### Step 1: Start the MCP Server
```bash
cd mcp-servers/gitea
source .venv/bin/activate
python -m mcp_server.server
```
The server will start and wait for JSON-RPC 2.0 messages on stdin.
### Step 2: Test Tool Listing
In another terminal, send a tool listing request:
```bash
echo '{"jsonrpc": "2.0", "id": 1, "method": "tools/list"}' | python -m mcp_server.server
```
Expected response:
```json
{
"jsonrpc": "2.0",
"id": 1,
"result": {
"tools": [
{"name": "list_issues", "description": "List issues from Gitea repository", ...},
{"name": "get_issue", "description": "Get specific issue details", ...},
{"name": "create_issue", "description": "Create a new issue in Gitea", ...},
...
]
}
}
```
### Step 3: Test Tool Invocation
**Note:** Manual tool invocation requires proper configuration. See [Configuration Setup](#configuration-setup-for-testing).
Example: List issues
```bash
echo '{
"jsonrpc": "2.0",
"id": 2,
"method": "tools/call",
"params": {
"name": "list_issues",
"arguments": {
"state": "open"
}
}
}' | python -m mcp_server.server
```
---
## Integration Testing
Test the MCP server with a real Gitea instance.
### Prerequisites
1. **Gitea Instance**: Access to https://gitea.example.com (or your Gitea instance)
2. **API Token**: Personal access token with required permissions
3. **Configuration**: Properly configured system and project configs
### Step 1: Configuration Setup
Create system-level configuration:
```bash
mkdir -p ~/.config/claude
cat > ~/.config/claude/gitea.env << EOF
GITEA_API_URL=https://gitea.example.com/api/v1
GITEA_API_TOKEN=your_gitea_token_here
GITEA_OWNER=bandit
EOF
chmod 600 ~/.config/claude/gitea.env
```
Create project-level configuration (for project mode testing):
```bash
cd /path/to/test/project
cat > .env << EOF
GITEA_REPO=test-repo
EOF
# Add to .gitignore
echo ".env" >> .gitignore
```
### Step 2: Generate Gitea API Token
1. Log into Gitea: https://gitea.example.com
2. Navigate to: **Settings****Applications****Manage Access Tokens**
3. Click **Generate New Token**
4. Token configuration:
- **Token Name:** `mcp-integration-test`
- **Required Permissions:**
-`repo` (all) - Read/write access to repositories, issues, labels
-`read:org` - Read organization information and labels
-`read:user` - Read user information
5. Click **Generate Token**
6. Copy the token immediately (shown only once)
7. Add to `~/.config/claude/gitea.env`
### Step 3: Verify Configuration
Test configuration loading:
```bash
cd mcp-servers/gitea
source .venv/bin/activate
python -c "
from mcp_server.config import GiteaConfig
config = GiteaConfig()
result = config.load()
print(f'API URL: {result[\"api_url\"]}')
print(f'Owner: {result[\"owner\"]}')
print(f'Repo: {result[\"repo\"]}')
print(f'Mode: {result[\"mode\"]}')
"
```
Expected output:
```
API URL: https://gitea.example.com/api/v1
Owner: bandit
Repo: test-repo (or None for company mode)
Mode: project (or company)
```
### Step 4: Test Gitea Client
Test basic Gitea API operations:
```bash
python -c "
from mcp_server.gitea_client import GiteaClient
client = GiteaClient()
# Test listing issues
print('Testing list_issues...')
issues = client.list_issues(state='open')
print(f'Found {len(issues)} open issues')
# Test getting labels
print('\\nTesting get_labels...')
labels = client.get_labels()
print(f'Found {len(labels)} repository labels')
# Test getting org labels
print('\\nTesting get_org_labels...')
org_labels = client.get_org_labels()
print(f'Found {len(org_labels)} organization labels')
print('\\n✅ All integration tests passed!')
"
```
### Step 5: Test Issue Creation (Optional)
**Warning:** This creates a real issue in Gitea. Use a test repository.
```bash
python -c "
from mcp_server.gitea_client import GiteaClient
client = GiteaClient()
# Create test issue
print('Creating test issue...')
issue = client.create_issue(
title='[TEST] MCP Server Integration Test',
body='This is a test issue created by the Gitea MCP Server integration tests.',
labels=['Type/Test']
)
print(f'Created issue #{issue[\"number\"]}: {issue[\"title\"]}')
# Clean up: Close the issue
print('\\nClosing test issue...')
client.update_issue(issue['number'], state='closed')
print('✅ Test issue closed')
"
```
### Step 6: Test MCP Server with Real API
Start the MCP server and test with real Gitea API:
```bash
cd mcp-servers/gitea
source .venv/bin/activate
# Run server with test script
python << 'EOF'
import asyncio
import json
from mcp_server.server import GiteaMCPServer
async def test_server():
server = GiteaMCPServer()
await server.initialize()
# Test list_issues
result = await server.issue_tools.list_issues(state='open')
print(f'Found {len(result)} open issues')
# Test get_labels
labels = await server.label_tools.get_labels()
print(f'Found {labels["total_count"]} total labels')
# Test suggest_labels
suggestions = await server.label_tools.suggest_labels(
"Fix critical bug in authentication"
)
print(f'Suggested labels: {", ".join(suggestions)}')
print('✅ All MCP server integration tests passed!')
asyncio.run(test_server())
EOF
```
### Step 7: Test PMO Mode (Optional)
Test company-wide mode (no GITEA_REPO):
```bash
# Temporarily remove GITEA_REPO
unset GITEA_REPO
python -c "
from mcp_server.gitea_client import GiteaClient
client = GiteaClient()
print(f'Running in {client.mode} mode')
# Test list_repos
print('\\nTesting list_repos...')
repos = client.list_repos()
print(f'Found {len(repos)} repositories')
# Test aggregate_issues
print('\\nTesting aggregate_issues...')
aggregated = client.aggregate_issues(state='open')
for repo_name, issues in aggregated.items():
print(f' {repo_name}: {len(issues)} open issues')
print('\\n✅ PMO mode tests passed!')
"
```
---
## Configuration Setup for Testing
### Minimal Configuration
**System-level** (`~/.config/claude/gitea.env`):
```bash
GITEA_API_URL=https://gitea.example.com/api/v1
GITEA_API_TOKEN=your_token_here
GITEA_OWNER=bandit
```
**Project-level** (`.env` in project root):
```bash
# For project mode
GITEA_REPO=test-repo
# For company mode (PMO), omit GITEA_REPO
```
### Verification
Verify configuration is correct:
```bash
# Check system config exists
ls -la ~/.config/claude/gitea.env
# Check permissions (should be 600)
stat -c "%a %n" ~/.config/claude/gitea.env
# Check content (without exposing token)
grep -v TOKEN ~/.config/claude/gitea.env
# Check project config (if using project mode)
cat .env
```
---
## Troubleshooting
### Common Issues
#### 1. Import Errors
**Error:**
```
ModuleNotFoundError: No module named 'mcp_server'
```
**Solution:**
```bash
# Ensure you're in the correct directory
cd mcp-servers/gitea
# Activate virtual environment
source .venv/bin/activate
# Verify installation
pip list | grep mcp
```
#### 2. Configuration Not Found
**Error:**
```
FileNotFoundError: System config not found: /home/user/.config/claude/gitea.env
```
**Solution:**
```bash
# Create system config
mkdir -p ~/.config/claude
cat > ~/.config/claude/gitea.env << EOF
GITEA_API_URL=https://gitea.example.com/api/v1
GITEA_API_TOKEN=your_token_here
GITEA_OWNER=bandit
EOF
chmod 600 ~/.config/claude/gitea.env
```
#### 3. Missing Required Configuration
**Error:**
```
ValueError: Missing required configuration: GITEA_API_TOKEN, GITEA_OWNER
```
**Solution:**
```bash
# Check configuration file
cat ~/.config/claude/gitea.env
# Ensure all required variables are present:
# - GITEA_API_URL
# - GITEA_API_TOKEN
# - GITEA_OWNER
```
#### 4. API Authentication Failed
**Error:**
```
requests.exceptions.HTTPError: 401 Client Error: Unauthorized
```
**Solution:**
```bash
# Test token manually
curl -H "Authorization: token YOUR_TOKEN" \
https://gitea.example.com/api/v1/user
# If fails, regenerate token in Gitea settings
```
#### 5. Permission Errors (Branch Detection)
**Error:**
```
PermissionError: Cannot create issues on branch 'main'
```
**Solution:**
```bash
# Check current branch
git branch --show-current
# Switch to development branch
git checkout development
# or
git checkout -b feat/test-feature
```
#### 6. Repository Not Specified
**Error:**
```
ValueError: Repository not specified
```
**Solution:**
```bash
# Add GITEA_REPO to project config
echo "GITEA_REPO=your-repo-name" >> .env
# Or specify repo in tool call
# (for PMO mode multi-repo operations)
```
### Debug Mode
Enable debug logging:
```bash
export LOG_LEVEL=DEBUG
python -m mcp_server.server
```
### Test Summary
After completing all tests, verify:
- ✅ All 42 unit tests pass
- ✅ MCP server starts without errors
- ✅ Configuration loads correctly
- ✅ Gitea API client connects successfully
- ✅ Issues can be listed from Gitea
- ✅ Labels can be retrieved
- ✅ Label suggestions work correctly
- ✅ Branch detection blocks writes on main/staging
- ✅ Mode detection works (project vs company)
---
## Success Criteria
Phase 1 is complete when:
1. **All unit tests pass** (42/42)
2. **MCP server starts without errors**
3. **Can list issues from Gitea**
4. **Can create issues with labels** (in development mode)
5. **Mode detection works** (project vs company)
6. **Branch detection prevents writes on main/staging**
7. **Configuration properly merges** system + project levels
---
## Next Steps
After completing testing:
1. **Document any issues** found during testing
2. **Create integration with projman plugin** (Phase 2)
3. **Test in real project workflow** (Phase 5)
4. **Performance optimization** (if needed)
5. **Production hardening** (Phase 8)
---
## Additional Resources
- **MCP Documentation**: https://docs.anthropic.com/claude/docs/mcp
- **Gitea API Documentation**: https://docs.gitea.io/en-us/api-usage/
- **Projman Documentation**: `plugins/projman/README.md`
- **Configuration Guide**: `plugins/projman/CONFIGURATION.md`
---
**Last Updated**: 2025-01-06 (Phase 1 Implementation)

View File

View File

@@ -0,0 +1,227 @@
"""
Configuration loader for Gitea MCP Server.
Implements hybrid configuration system:
- System-level: ~/.config/claude/gitea.env (credentials)
- Project-level: .env (repository specification)
- Auto-detection: Falls back to git remote URL parsing
"""
from pathlib import Path
from dotenv import load_dotenv
import os
import re
import subprocess
import logging
from typing import Dict, Optional
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class GiteaConfig:
"""Hybrid configuration loader with mode detection"""
def __init__(self):
self.api_url: Optional[str] = None
self.api_token: Optional[str] = None
self.repo: Optional[str] = None
self.mode: str = 'project'
def load(self) -> Dict[str, Optional[str]]:
"""
Load configuration from system and project levels.
Project-level configuration overrides system-level.
Returns:
Dict containing api_url, api_token, repo, mode
Raises:
FileNotFoundError: If system config is missing
ValueError: If required configuration is missing
"""
# Load system config
system_config = Path.home() / '.config' / 'claude' / 'gitea.env'
if system_config.exists():
load_dotenv(system_config)
logger.info(f"Loaded system configuration from {system_config}")
else:
raise FileNotFoundError(
f"System config not found: {system_config}\n"
"Create it with: mkdir -p ~/.config/claude && "
"cat > ~/.config/claude/gitea.env"
)
# Find project directory (MCP server cwd is plugin dir, not project dir)
project_dir = self._find_project_directory()
# Load project config (overrides system)
if project_dir:
project_config = project_dir / '.env'
if project_config.exists():
load_dotenv(project_config, override=True)
logger.info(f"Loaded project configuration from {project_config}")
# Extract values
self.api_url = os.getenv('GITEA_API_URL')
self.api_token = os.getenv('GITEA_API_TOKEN')
self.repo = os.getenv('GITEA_REPO') # Optional, must be owner/repo format
# Auto-detect repo from git remote if not specified
if not self.repo and project_dir:
self.repo = self._detect_repo_from_git(project_dir)
if self.repo:
logger.info(f"Auto-detected repository from git remote: {self.repo}")
# Detect mode
if self.repo:
self.mode = 'project'
logger.info(f"Running in project mode: {self.repo}")
else:
self.mode = 'company'
logger.info("Running in company-wide mode (PMO)")
# Validate required variables
self._validate()
return {
'api_url': self.api_url,
'api_token': self.api_token,
'repo': self.repo,
'mode': self.mode
}
def _validate(self) -> None:
"""
Validate that required configuration is present.
Raises:
ValueError: If required configuration is missing
"""
required = {
'GITEA_API_URL': self.api_url,
'GITEA_API_TOKEN': self.api_token
}
missing = [key for key, value in required.items() if not value]
if missing:
raise ValueError(
f"Missing required configuration: {', '.join(missing)}\n"
"Check your ~/.config/claude/gitea.env file"
)
def _find_project_directory(self) -> Optional[Path]:
"""
Find the user's project directory.
The MCP server runs with cwd set to the plugin directory, not the
user's project. We need to find the actual project directory using
various heuristics.
Returns:
Path to project directory, or None if not found
"""
# Strategy 1: Check CLAUDE_PROJECT_DIR environment variable
project_dir = os.getenv('CLAUDE_PROJECT_DIR')
if project_dir:
path = Path(project_dir)
if path.exists():
logger.info(f"Found project directory from CLAUDE_PROJECT_DIR: {path}")
return path
# Strategy 2: Check PWD (original working directory before cwd override)
pwd = os.getenv('PWD')
if pwd:
path = Path(pwd)
# Verify it has .git or .env (indicates a project)
if path.exists() and ((path / '.git').exists() or (path / '.env').exists()):
logger.info(f"Found project directory from PWD: {path}")
return path
# Strategy 3: Check current working directory
# This handles test scenarios and cases where cwd is actually the project
cwd = Path.cwd()
if (cwd / '.git').exists() or (cwd / '.env').exists():
logger.info(f"Found project directory from cwd: {cwd}")
return cwd
# Strategy 4: Check if GITEA_REPO is already set (user configured it)
# If so, we don't need to find the project directory for git detection
if os.getenv('GITEA_REPO'):
logger.debug("GITEA_REPO already set, skipping project directory detection")
return None
logger.debug("Could not determine project directory")
return None
def _detect_repo_from_git(self, project_dir: Optional[Path] = None) -> Optional[str]:
"""
Auto-detect repository from git remote origin URL.
Args:
project_dir: Directory to run git command from (defaults to cwd)
Supports URL formats:
- SSH: ssh://git@host:port/owner/repo.git
- SSH short: git@host:owner/repo.git
- HTTPS: https://host/owner/repo.git
- HTTP: http://host/owner/repo.git
Returns:
Repository in 'owner/repo' format, or None if detection fails
"""
try:
result = subprocess.run(
['git', 'remote', 'get-url', 'origin'],
capture_output=True,
text=True,
timeout=5,
cwd=str(project_dir) if project_dir else None
)
if result.returncode != 0:
logger.debug("No git remote 'origin' found")
return None
url = result.stdout.strip()
return self._parse_git_url(url)
except subprocess.TimeoutExpired:
logger.warning("Git command timed out")
return None
except FileNotFoundError:
logger.debug("Git not available")
return None
except Exception as e:
logger.debug(f"Failed to detect repo from git: {e}")
return None
def _parse_git_url(self, url: str) -> Optional[str]:
"""
Parse git URL to extract owner/repo.
Args:
url: Git remote URL
Returns:
Repository in 'owner/repo' format, or None if parsing fails
"""
# Remove .git suffix if present
url = re.sub(r'\.git$', '', url)
# SSH format: ssh://git@host:port/owner/repo
ssh_match = re.match(r'ssh://[^/]+/(.+/.+)$', url)
if ssh_match:
return ssh_match.group(1)
# SSH short format: git@host:owner/repo
ssh_short_match = re.match(r'git@[^:]+:(.+/.+)$', url)
if ssh_short_match:
return ssh_short_match.group(1)
# HTTPS/HTTP format: https://host/owner/repo
http_match = re.match(r'https?://[^/]+/(.+/.+)$', url)
if http_match:
return http_match.group(1)
logger.warning(f"Could not parse git URL: {url}")
return None

View File

@@ -0,0 +1,745 @@
"""
Gitea API client for interacting with Gitea API.
Provides synchronous methods for:
- Issue CRUD operations
- Label management
- Repository operations
- PMO multi-repo aggregation
- Wiki operations (lessons learned)
- Milestone management
- Issue dependencies
"""
import requests
import logging
import re
from typing import List, Dict, Optional
from .config import GiteaConfig
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class GiteaClient:
"""Client for interacting with Gitea API"""
def __init__(self):
"""Initialize Gitea client with configuration"""
config = GiteaConfig()
config_dict = config.load()
self.base_url = config_dict['api_url']
self.token = config_dict['api_token']
self.repo = config_dict.get('repo') # Optional default repo in owner/repo format
self.mode = config_dict['mode']
self.session = requests.Session()
self.session.headers.update({
'Authorization': f'token {self.token}',
'Content-Type': 'application/json'
})
logger.info(f"Gitea client initialized in {self.mode} mode")
def _parse_repo(self, repo: Optional[str] = None) -> tuple:
"""Parse owner/repo from input. Always requires 'owner/repo' format."""
target = repo or self.repo
if not target or '/' not in target:
raise ValueError("Use 'owner/repo' format (e.g. 'org/repo-name')")
parts = target.split('/', 1)
return parts[0], parts[1]
def list_issues(
self,
state: str = 'open',
labels: Optional[List[str]] = None,
repo: Optional[str] = None
) -> List[Dict]:
"""
List issues from Gitea repository.
Args:
state: Issue state (open, closed, all)
labels: Filter by labels
repo: Repository in 'owner/repo' format
Returns:
List of issue dictionaries
"""
owner, target_repo = self._parse_repo(repo)
url = f"{self.base_url}/repos/{owner}/{target_repo}/issues"
params = {'state': state}
if labels:
params['labels'] = ','.join(labels)
logger.info(f"Listing issues from {owner}/{target_repo} with state={state}")
response = self.session.get(url, params=params)
response.raise_for_status()
return response.json()
def get_issue(
self,
issue_number: int,
repo: Optional[str] = None
) -> Dict:
"""Get specific issue details."""
owner, target_repo = self._parse_repo(repo)
url = f"{self.base_url}/repos/{owner}/{target_repo}/issues/{issue_number}"
logger.info(f"Getting issue #{issue_number} from {owner}/{target_repo}")
response = self.session.get(url)
response.raise_for_status()
return response.json()
def create_issue(
self,
title: str,
body: str,
labels: Optional[List[str]] = None,
repo: Optional[str] = None
) -> Dict:
"""Create a new issue in Gitea."""
owner, target_repo = self._parse_repo(repo)
url = f"{self.base_url}/repos/{owner}/{target_repo}/issues"
data = {'title': title, 'body': body}
if labels:
label_ids = self._resolve_label_ids(labels, owner, target_repo)
data['labels'] = label_ids
logger.info(f"Creating issue in {owner}/{target_repo}: {title}")
response = self.session.post(url, json=data)
response.raise_for_status()
return response.json()
def _resolve_label_ids(self, label_names: List[str], owner: str, repo: str) -> List[int]:
"""Convert label names to label IDs."""
full_repo = f"{owner}/{repo}"
# Only fetch org labels if repo belongs to an organization
org_labels = []
if self.is_org_repo(full_repo):
org_labels = self.get_org_labels(owner)
repo_labels = self.get_labels(full_repo)
all_labels = org_labels + repo_labels
label_map = {label['name']: label['id'] for label in all_labels}
label_ids = []
for name in label_names:
if name in label_map:
label_ids.append(label_map[name])
else:
logger.warning(f"Label '{name}' not found, skipping")
return label_ids
def update_issue(
self,
issue_number: int,
title: Optional[str] = None,
body: Optional[str] = None,
state: Optional[str] = None,
labels: Optional[List[str]] = None,
repo: Optional[str] = None
) -> Dict:
"""Update existing issue. Repo must be 'owner/repo' format."""
owner, target_repo = self._parse_repo(repo)
url = f"{self.base_url}/repos/{owner}/{target_repo}/issues/{issue_number}"
data = {}
if title is not None:
data['title'] = title
if body is not None:
data['body'] = body
if state is not None:
data['state'] = state
if labels is not None:
data['labels'] = labels
logger.info(f"Updating issue #{issue_number} in {owner}/{target_repo}")
response = self.session.patch(url, json=data)
response.raise_for_status()
return response.json()
def add_comment(
self,
issue_number: int,
comment: str,
repo: Optional[str] = None
) -> Dict:
"""Add comment to issue. Repo must be 'owner/repo' format."""
owner, target_repo = self._parse_repo(repo)
url = f"{self.base_url}/repos/{owner}/{target_repo}/issues/{issue_number}/comments"
data = {'body': comment}
logger.info(f"Adding comment to issue #{issue_number} in {owner}/{target_repo}")
response = self.session.post(url, json=data)
response.raise_for_status()
return response.json()
def get_labels(self, repo: Optional[str] = None) -> List[Dict]:
"""Get all labels from repository. Repo must be 'owner/repo' format."""
owner, target_repo = self._parse_repo(repo)
url = f"{self.base_url}/repos/{owner}/{target_repo}/labels"
logger.info(f"Getting labels from {owner}/{target_repo}")
response = self.session.get(url)
response.raise_for_status()
return response.json()
def get_org_labels(self, org: str) -> List[Dict]:
"""Get organization-level labels. Org is the organization name."""
url = f"{self.base_url}/orgs/{org}/labels"
logger.info(f"Getting organization labels for {org}")
response = self.session.get(url)
response.raise_for_status()
return response.json()
def list_repos(self, org: str) -> List[Dict]:
"""List all repositories in organization. Org is the organization name."""
url = f"{self.base_url}/orgs/{org}/repos"
logger.info(f"Listing all repositories for organization {org}")
response = self.session.get(url)
response.raise_for_status()
return response.json()
def aggregate_issues(
self,
org: str,
state: str = 'open',
labels: Optional[List[str]] = None
) -> Dict[str, List[Dict]]:
"""Fetch issues across all repositories in org."""
repos = self.list_repos(org)
aggregated = {}
logger.info(f"Aggregating issues across {len(repos)} repositories")
for repo in repos:
repo_name = repo['name']
try:
issues = self.list_issues(
state=state,
labels=labels,
repo=f"{org}/{repo_name}"
)
if issues:
aggregated[repo_name] = issues
logger.info(f"Found {len(issues)} issues in {repo_name}")
except Exception as e:
logger.error(f"Error fetching issues from {repo_name}: {e}")
return aggregated
# ========================================
# WIKI OPERATIONS (Lessons Learned)
# ========================================
def list_wiki_pages(self, repo: Optional[str] = None) -> List[Dict]:
"""List all wiki pages in repository."""
owner, target_repo = self._parse_repo(repo)
url = f"{self.base_url}/repos/{owner}/{target_repo}/wiki/pages"
logger.info(f"Listing wiki pages from {owner}/{target_repo}")
response = self.session.get(url)
response.raise_for_status()
return response.json()
def get_wiki_page(
self,
page_name: str,
repo: Optional[str] = None
) -> Dict:
"""Get a specific wiki page by name."""
owner, target_repo = self._parse_repo(repo)
url = f"{self.base_url}/repos/{owner}/{target_repo}/wiki/page/{page_name}"
logger.info(f"Getting wiki page '{page_name}' from {owner}/{target_repo}")
response = self.session.get(url)
response.raise_for_status()
return response.json()
def create_wiki_page(
self,
title: str,
content: str,
repo: Optional[str] = None
) -> Dict:
"""Create a new wiki page."""
owner, target_repo = self._parse_repo(repo)
url = f"{self.base_url}/repos/{owner}/{target_repo}/wiki/new"
data = {
'title': title,
'content_base64': self._encode_base64(content)
}
logger.info(f"Creating wiki page '{title}' in {owner}/{target_repo}")
response = self.session.post(url, json=data)
response.raise_for_status()
return response.json()
def update_wiki_page(
self,
page_name: str,
content: str,
repo: Optional[str] = None
) -> Dict:
"""Update an existing wiki page."""
owner, target_repo = self._parse_repo(repo)
url = f"{self.base_url}/repos/{owner}/{target_repo}/wiki/page/{page_name}"
data = {
'content_base64': self._encode_base64(content)
}
logger.info(f"Updating wiki page '{page_name}' in {owner}/{target_repo}")
response = self.session.patch(url, json=data)
response.raise_for_status()
return response.json()
def delete_wiki_page(
self,
page_name: str,
repo: Optional[str] = None
) -> bool:
"""Delete a wiki page."""
owner, target_repo = self._parse_repo(repo)
url = f"{self.base_url}/repos/{owner}/{target_repo}/wiki/page/{page_name}"
logger.info(f"Deleting wiki page '{page_name}' from {owner}/{target_repo}")
response = self.session.delete(url)
response.raise_for_status()
return True
def _encode_base64(self, content: str) -> str:
"""Encode content to base64 for wiki API."""
import base64
return base64.b64encode(content.encode('utf-8')).decode('utf-8')
def _decode_base64(self, content: str) -> str:
"""Decode base64 content from wiki API."""
import base64
return base64.b64decode(content.encode('utf-8')).decode('utf-8')
def search_wiki_pages(
self,
query: str,
repo: Optional[str] = None
) -> List[Dict]:
"""Search wiki pages by content (client-side filtering)."""
pages = self.list_wiki_pages(repo)
results = []
query_lower = query.lower()
for page in pages:
if query_lower in page.get('title', '').lower():
results.append(page)
return results
def create_lesson(
self,
title: str,
content: str,
tags: List[str],
category: str = "sprints",
repo: Optional[str] = None
) -> Dict:
"""Create a lessons learned entry in the wiki."""
# Sanitize title for wiki page name
page_name = f"lessons/{category}/{self._sanitize_page_name(title)}"
# Add tags as metadata at the end of content
full_content = f"{content}\n\n---\n**Tags:** {', '.join(tags)}"
return self.create_wiki_page(page_name, full_content, repo)
def search_lessons(
self,
query: Optional[str] = None,
tags: Optional[List[str]] = None,
repo: Optional[str] = None
) -> List[Dict]:
"""Search lessons learned by query and/or tags."""
pages = self.list_wiki_pages(repo)
results = []
for page in pages:
title = page.get('title', '')
# Filter to only lessons (pages starting with lessons/)
if not title.startswith('lessons/'):
continue
# If query provided, check if it matches title
if query:
if query.lower() not in title.lower():
continue
# Get full page content for tag matching if tags provided
if tags:
try:
full_page = self.get_wiki_page(title, repo)
content = self._decode_base64(full_page.get('content_base64', ''))
# Check if any tag is in the content
if not any(tag.lower() in content.lower() for tag in tags):
continue
except Exception:
continue
results.append(page)
return results
def _sanitize_page_name(self, title: str) -> str:
"""Convert title to valid wiki page name."""
# Replace spaces with hyphens, remove special chars
name = re.sub(r'[^\w\s-]', '', title)
name = re.sub(r'[\s]+', '-', name)
return name.lower()
# ========================================
# MILESTONE OPERATIONS
# ========================================
def list_milestones(
self,
state: str = 'open',
repo: Optional[str] = None
) -> List[Dict]:
"""List all milestones in repository."""
owner, target_repo = self._parse_repo(repo)
url = f"{self.base_url}/repos/{owner}/{target_repo}/milestones"
params = {'state': state}
logger.info(f"Listing milestones from {owner}/{target_repo}")
response = self.session.get(url, params=params)
response.raise_for_status()
return response.json()
def get_milestone(
self,
milestone_id: int,
repo: Optional[str] = None
) -> Dict:
"""Get a specific milestone by ID."""
owner, target_repo = self._parse_repo(repo)
url = f"{self.base_url}/repos/{owner}/{target_repo}/milestones/{milestone_id}"
logger.info(f"Getting milestone #{milestone_id} from {owner}/{target_repo}")
response = self.session.get(url)
response.raise_for_status()
return response.json()
def create_milestone(
self,
title: str,
description: Optional[str] = None,
due_on: Optional[str] = None,
repo: Optional[str] = None
) -> Dict:
"""Create a new milestone."""
owner, target_repo = self._parse_repo(repo)
url = f"{self.base_url}/repos/{owner}/{target_repo}/milestones"
data = {'title': title}
if description:
data['description'] = description
if due_on:
data['due_on'] = due_on
logger.info(f"Creating milestone '{title}' in {owner}/{target_repo}")
response = self.session.post(url, json=data)
response.raise_for_status()
return response.json()
def update_milestone(
self,
milestone_id: int,
title: Optional[str] = None,
description: Optional[str] = None,
state: Optional[str] = None,
due_on: Optional[str] = None,
repo: Optional[str] = None
) -> Dict:
"""Update an existing milestone."""
owner, target_repo = self._parse_repo(repo)
url = f"{self.base_url}/repos/{owner}/{target_repo}/milestones/{milestone_id}"
data = {}
if title is not None:
data['title'] = title
if description is not None:
data['description'] = description
if state is not None:
data['state'] = state
if due_on is not None:
data['due_on'] = due_on
logger.info(f"Updating milestone #{milestone_id} in {owner}/{target_repo}")
response = self.session.patch(url, json=data)
response.raise_for_status()
return response.json()
def delete_milestone(
self,
milestone_id: int,
repo: Optional[str] = None
) -> bool:
"""Delete a milestone."""
owner, target_repo = self._parse_repo(repo)
url = f"{self.base_url}/repos/{owner}/{target_repo}/milestones/{milestone_id}"
logger.info(f"Deleting milestone #{milestone_id} from {owner}/{target_repo}")
response = self.session.delete(url)
response.raise_for_status()
return True
# ========================================
# ISSUE DEPENDENCY OPERATIONS
# ========================================
def list_issue_dependencies(
self,
issue_number: int,
repo: Optional[str] = None
) -> List[Dict]:
"""List all dependencies for an issue (issues that block this one)."""
owner, target_repo = self._parse_repo(repo)
url = f"{self.base_url}/repos/{owner}/{target_repo}/issues/{issue_number}/dependencies"
logger.info(f"Listing dependencies for issue #{issue_number} in {owner}/{target_repo}")
response = self.session.get(url)
response.raise_for_status()
return response.json()
def create_issue_dependency(
self,
issue_number: int,
depends_on: int,
repo: Optional[str] = None
) -> Dict:
"""Create a dependency (issue_number depends on depends_on)."""
owner, target_repo = self._parse_repo(repo)
url = f"{self.base_url}/repos/{owner}/{target_repo}/issues/{issue_number}/dependencies"
data = {
'dependentIssue': {
'owner': owner,
'repo': target_repo,
'index': depends_on
}
}
logger.info(f"Creating dependency: #{issue_number} depends on #{depends_on} in {owner}/{target_repo}")
response = self.session.post(url, json=data)
response.raise_for_status()
return response.json()
def remove_issue_dependency(
self,
issue_number: int,
depends_on: int,
repo: Optional[str] = None
) -> bool:
"""Remove a dependency between issues."""
owner, target_repo = self._parse_repo(repo)
url = f"{self.base_url}/repos/{owner}/{target_repo}/issues/{issue_number}/dependencies"
data = {
'dependentIssue': {
'owner': owner,
'repo': target_repo,
'index': depends_on
}
}
logger.info(f"Removing dependency: #{issue_number} no longer depends on #{depends_on}")
response = self.session.delete(url, json=data)
response.raise_for_status()
return True
def list_issue_blocks(
self,
issue_number: int,
repo: Optional[str] = None
) -> List[Dict]:
"""List all issues that this issue blocks."""
owner, target_repo = self._parse_repo(repo)
url = f"{self.base_url}/repos/{owner}/{target_repo}/issues/{issue_number}/blocks"
logger.info(f"Listing issues blocked by #{issue_number} in {owner}/{target_repo}")
response = self.session.get(url)
response.raise_for_status()
return response.json()
# ========================================
# REPOSITORY VALIDATION
# ========================================
def get_repo_info(self, repo: Optional[str] = None) -> Dict:
"""Get repository information including owner type."""
owner, target_repo = self._parse_repo(repo)
url = f"{self.base_url}/repos/{owner}/{target_repo}"
logger.info(f"Getting repo info for {owner}/{target_repo}")
response = self.session.get(url)
response.raise_for_status()
return response.json()
def is_org_repo(self, repo: Optional[str] = None) -> bool:
"""
Check if repository belongs to an organization (not a user).
Uses the /orgs/{owner} endpoint to reliably detect organizations,
as the owner.type field in repo info may be null in some Gitea versions.
"""
owner, _ = self._parse_repo(repo)
return self._is_organization(owner)
def _is_organization(self, owner: str) -> bool:
"""
Check if an owner is an organization by querying the orgs endpoint.
Args:
owner: The owner name to check
Returns:
True if owner is an organization, False if user or unknown
"""
url = f"{self.base_url}/orgs/{owner}"
try:
response = self.session.get(url)
# 200 = organization exists, 404 = not an organization (user account)
return response.status_code == 200
except Exception as e:
logger.warning(f"Failed to check if {owner} is organization: {e}")
return False
def get_branch_protection(
self,
branch: str,
repo: Optional[str] = None
) -> Optional[Dict]:
"""Get branch protection rules for a branch."""
owner, target_repo = self._parse_repo(repo)
url = f"{self.base_url}/repos/{owner}/{target_repo}/branch_protections/{branch}"
logger.info(f"Getting branch protection for {branch} in {owner}/{target_repo}")
try:
response = self.session.get(url)
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
return None # No protection rules
raise
def create_label(
self,
name: str,
color: str,
description: Optional[str] = None,
repo: Optional[str] = None
) -> Dict:
"""Create a new label in the repository."""
owner, target_repo = self._parse_repo(repo)
url = f"{self.base_url}/repos/{owner}/{target_repo}/labels"
data = {
'name': name,
'color': color.lstrip('#') # Remove # if present
}
if description:
data['description'] = description
logger.info(f"Creating label '{name}' in {owner}/{target_repo}")
response = self.session.post(url, json=data)
response.raise_for_status()
return response.json()
# ========================================
# PULL REQUEST OPERATIONS
# ========================================
def list_pull_requests(
self,
state: str = 'open',
sort: str = 'recentupdate',
labels: Optional[List[str]] = None,
repo: Optional[str] = None
) -> List[Dict]:
"""
List pull requests from Gitea repository.
Args:
state: PR state (open, closed, all)
sort: Sort order (oldest, recentupdate, leastupdate, mostcomment, leastcomment, priority)
labels: Filter by labels
repo: Repository in 'owner/repo' format
Returns:
List of pull request dictionaries
"""
owner, target_repo = self._parse_repo(repo)
url = f"{self.base_url}/repos/{owner}/{target_repo}/pulls"
params = {'state': state, 'sort': sort}
if labels:
params['labels'] = ','.join(labels)
logger.info(f"Listing PRs from {owner}/{target_repo} with state={state}")
response = self.session.get(url, params=params)
response.raise_for_status()
return response.json()
def get_pull_request(
self,
pr_number: int,
repo: Optional[str] = None
) -> Dict:
"""Get specific pull request details."""
owner, target_repo = self._parse_repo(repo)
url = f"{self.base_url}/repos/{owner}/{target_repo}/pulls/{pr_number}"
logger.info(f"Getting PR #{pr_number} from {owner}/{target_repo}")
response = self.session.get(url)
response.raise_for_status()
return response.json()
def get_pr_diff(
self,
pr_number: int,
repo: Optional[str] = None
) -> str:
"""Get the diff for a pull request."""
owner, target_repo = self._parse_repo(repo)
url = f"{self.base_url}/repos/{owner}/{target_repo}/pulls/{pr_number}.diff"
logger.info(f"Getting diff for PR #{pr_number} from {owner}/{target_repo}")
response = self.session.get(url)
response.raise_for_status()
return response.text
def get_pr_comments(
self,
pr_number: int,
repo: Optional[str] = None
) -> List[Dict]:
"""Get comments on a pull request (uses issue comments endpoint)."""
owner, target_repo = self._parse_repo(repo)
# PRs share comment endpoint with issues in Gitea
url = f"{self.base_url}/repos/{owner}/{target_repo}/issues/{pr_number}/comments"
logger.info(f"Getting comments for PR #{pr_number} from {owner}/{target_repo}")
response = self.session.get(url)
response.raise_for_status()
return response.json()
def create_pr_review(
self,
pr_number: int,
body: str,
event: str = 'COMMENT',
comments: Optional[List[Dict]] = None,
repo: Optional[str] = None
) -> Dict:
"""
Create a review on a pull request.
Args:
pr_number: Pull request number
body: Review body/summary
event: Review action (APPROVE, REQUEST_CHANGES, COMMENT)
comments: Optional list of inline comments with path, position, body
repo: Repository in 'owner/repo' format
Returns:
Created review dictionary
"""
owner, target_repo = self._parse_repo(repo)
url = f"{self.base_url}/repos/{owner}/{target_repo}/pulls/{pr_number}/reviews"
data = {
'body': body,
'event': event
}
if comments:
data['comments'] = comments
logger.info(f"Creating review on PR #{pr_number} in {owner}/{target_repo}")
response = self.session.post(url, json=data)
response.raise_for_status()
return response.json()
def add_pr_comment(
self,
pr_number: int,
body: str,
repo: Optional[str] = None
) -> Dict:
"""Add a general comment to a pull request (uses issue comment endpoint)."""
owner, target_repo = self._parse_repo(repo)
# PRs share comment endpoint with issues in Gitea
url = f"{self.base_url}/repos/{owner}/{target_repo}/issues/{pr_number}/comments"
data = {'body': body}
logger.info(f"Adding comment to PR #{pr_number} in {owner}/{target_repo}")
response = self.session.post(url, json=data)
response.raise_for_status()
return response.json()

View File

@@ -0,0 +1,931 @@
"""
MCP Server entry point for Gitea integration.
Provides Gitea tools to Claude Code via JSON-RPC 2.0 over stdio.
"""
import asyncio
import logging
import json
from mcp.server import Server
from mcp.server.stdio import stdio_server
from mcp.types import Tool, TextContent
from .config import GiteaConfig
from .gitea_client import GiteaClient
from .tools.issues import IssueTools
from .tools.labels import LabelTools
from .tools.wiki import WikiTools
from .tools.milestones import MilestoneTools
from .tools.dependencies import DependencyTools
from .tools.pull_requests import PullRequestTools
# Suppress noisy MCP validation warnings on stderr
logging.basicConfig(level=logging.INFO)
logging.getLogger("root").setLevel(logging.ERROR)
logging.getLogger("mcp").setLevel(logging.ERROR)
logger = logging.getLogger(__name__)
class GiteaMCPServer:
"""MCP Server for Gitea integration"""
def __init__(self):
self.server = Server("gitea-mcp")
self.config = None
self.client = None
self.issue_tools = None
self.label_tools = None
self.wiki_tools = None
self.milestone_tools = None
self.dependency_tools = None
self.pr_tools = None
async def initialize(self):
"""
Initialize server and load configuration.
Raises:
Exception: If initialization fails
"""
try:
config_loader = GiteaConfig()
self.config = config_loader.load()
self.client = GiteaClient()
self.issue_tools = IssueTools(self.client)
self.label_tools = LabelTools(self.client)
self.wiki_tools = WikiTools(self.client)
self.milestone_tools = MilestoneTools(self.client)
self.dependency_tools = DependencyTools(self.client)
self.pr_tools = PullRequestTools(self.client)
logger.info(f"Gitea MCP Server initialized in {self.config['mode']} mode")
except Exception as e:
logger.error(f"Failed to initialize: {e}")
raise
def setup_tools(self):
"""Register all available tools with the MCP server"""
@self.server.list_tools()
async def list_tools() -> list[Tool]:
"""Return list of available tools"""
return [
Tool(
name="list_issues",
description="List issues from Gitea repository",
inputSchema={
"type": "object",
"properties": {
"state": {
"type": "string",
"enum": ["open", "closed", "all"],
"default": "open",
"description": "Issue state filter"
},
"labels": {
"type": "array",
"items": {"type": "string"},
"description": "Filter by labels"
},
"repo": {
"type": "string",
"description": "Repository name (for PMO mode)"
}
}
}
),
Tool(
name="get_issue",
description="Get specific issue details",
inputSchema={
"type": "object",
"properties": {
"issue_number": {
"type": "integer",
"description": "Issue number"
},
"repo": {
"type": "string",
"description": "Repository name (for PMO mode)"
}
},
"required": ["issue_number"]
}
),
Tool(
name="create_issue",
description="Create a new issue in Gitea",
inputSchema={
"type": "object",
"properties": {
"title": {
"type": "string",
"description": "Issue title"
},
"body": {
"type": "string",
"description": "Issue description"
},
"labels": {
"type": "array",
"items": {"type": "string"},
"description": "List of label names"
},
"repo": {
"type": "string",
"description": "Repository name (for PMO mode)"
}
},
"required": ["title", "body"]
}
),
Tool(
name="update_issue",
description="Update existing issue",
inputSchema={
"type": "object",
"properties": {
"issue_number": {
"type": "integer",
"description": "Issue number"
},
"title": {
"type": "string",
"description": "New title"
},
"body": {
"type": "string",
"description": "New body"
},
"state": {
"type": "string",
"enum": ["open", "closed"],
"description": "New state"
},
"labels": {
"type": "array",
"items": {"type": "string"},
"description": "New labels"
},
"repo": {
"type": "string",
"description": "Repository name (for PMO mode)"
}
},
"required": ["issue_number"]
}
),
Tool(
name="add_comment",
description="Add comment to issue",
inputSchema={
"type": "object",
"properties": {
"issue_number": {
"type": "integer",
"description": "Issue number"
},
"comment": {
"type": "string",
"description": "Comment text"
},
"repo": {
"type": "string",
"description": "Repository name (for PMO mode)"
}
},
"required": ["issue_number", "comment"]
}
),
Tool(
name="get_labels",
description="Get all available labels (org + repo)",
inputSchema={
"type": "object",
"properties": {
"repo": {
"type": "string",
"description": "Repository name (for PMO mode)"
}
}
}
),
Tool(
name="suggest_labels",
description="Analyze context and suggest appropriate labels",
inputSchema={
"type": "object",
"properties": {
"context": {
"type": "string",
"description": "Issue title + description or sprint context"
},
"repo": {
"type": "string",
"description": "Repository name (owner/repo format)"
}
},
"required": ["context"]
}
),
Tool(
name="aggregate_issues",
description="Fetch issues across all repositories (PMO mode)",
inputSchema={
"type": "object",
"properties": {
"org": {
"type": "string",
"description": "Organization name (e.g. 'bandit')"
},
"state": {
"type": "string",
"enum": ["open", "closed", "all"],
"default": "open",
"description": "Issue state filter"
},
"labels": {
"type": "array",
"items": {"type": "string"},
"description": "Filter by labels"
}
},
"required": ["org"]
}
),
# Wiki Tools (Lessons Learned)
Tool(
name="list_wiki_pages",
description="List all wiki pages in repository",
inputSchema={
"type": "object",
"properties": {
"repo": {
"type": "string",
"description": "Repository name (owner/repo format)"
}
}
}
),
Tool(
name="get_wiki_page",
description="Get a specific wiki page by name",
inputSchema={
"type": "object",
"properties": {
"page_name": {
"type": "string",
"description": "Wiki page name/path"
},
"repo": {
"type": "string",
"description": "Repository name (owner/repo format)"
}
},
"required": ["page_name"]
}
),
Tool(
name="create_wiki_page",
description="Create a new wiki page",
inputSchema={
"type": "object",
"properties": {
"title": {
"type": "string",
"description": "Page title/name"
},
"content": {
"type": "string",
"description": "Page content (markdown)"
},
"repo": {
"type": "string",
"description": "Repository name (owner/repo format)"
}
},
"required": ["title", "content"]
}
),
Tool(
name="update_wiki_page",
description="Update an existing wiki page",
inputSchema={
"type": "object",
"properties": {
"page_name": {
"type": "string",
"description": "Wiki page name/path"
},
"content": {
"type": "string",
"description": "New page content (markdown)"
},
"repo": {
"type": "string",
"description": "Repository name (owner/repo format)"
}
},
"required": ["page_name", "content"]
}
),
Tool(
name="create_lesson",
description="Create a lessons learned entry in the wiki",
inputSchema={
"type": "object",
"properties": {
"title": {
"type": "string",
"description": "Lesson title (e.g., 'Sprint 16 - Prevent Infinite Loops')"
},
"content": {
"type": "string",
"description": "Lesson content (markdown with context, problem, solution, prevention)"
},
"tags": {
"type": "array",
"items": {"type": "string"},
"description": "Tags for categorization"
},
"category": {
"type": "string",
"default": "sprints",
"description": "Category (sprints, patterns, architecture, etc.)"
},
"repo": {
"type": "string",
"description": "Repository name (owner/repo format)"
}
},
"required": ["title", "content", "tags"]
}
),
Tool(
name="search_lessons",
description="Search lessons learned from previous sprints",
inputSchema={
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "Search query (optional)"
},
"tags": {
"type": "array",
"items": {"type": "string"},
"description": "Tags to filter by (optional)"
},
"limit": {
"type": "integer",
"default": 20,
"description": "Maximum results"
},
"repo": {
"type": "string",
"description": "Repository name (owner/repo format)"
}
}
}
),
# Milestone Tools
Tool(
name="list_milestones",
description="List all milestones in repository",
inputSchema={
"type": "object",
"properties": {
"state": {
"type": "string",
"enum": ["open", "closed", "all"],
"default": "open",
"description": "Milestone state filter"
},
"repo": {
"type": "string",
"description": "Repository name (owner/repo format)"
}
}
}
),
Tool(
name="get_milestone",
description="Get a specific milestone by ID",
inputSchema={
"type": "object",
"properties": {
"milestone_id": {
"type": "integer",
"description": "Milestone ID"
},
"repo": {
"type": "string",
"description": "Repository name (owner/repo format)"
}
},
"required": ["milestone_id"]
}
),
Tool(
name="create_milestone",
description="Create a new milestone",
inputSchema={
"type": "object",
"properties": {
"title": {
"type": "string",
"description": "Milestone title"
},
"description": {
"type": "string",
"description": "Milestone description"
},
"due_on": {
"type": "string",
"description": "Due date (ISO 8601 format)"
},
"repo": {
"type": "string",
"description": "Repository name (owner/repo format)"
}
},
"required": ["title"]
}
),
Tool(
name="update_milestone",
description="Update an existing milestone",
inputSchema={
"type": "object",
"properties": {
"milestone_id": {
"type": "integer",
"description": "Milestone ID"
},
"title": {
"type": "string",
"description": "New title"
},
"description": {
"type": "string",
"description": "New description"
},
"state": {
"type": "string",
"enum": ["open", "closed"],
"description": "New state"
},
"due_on": {
"type": "string",
"description": "New due date"
},
"repo": {
"type": "string",
"description": "Repository name (owner/repo format)"
}
},
"required": ["milestone_id"]
}
),
Tool(
name="delete_milestone",
description="Delete a milestone",
inputSchema={
"type": "object",
"properties": {
"milestone_id": {
"type": "integer",
"description": "Milestone ID"
},
"repo": {
"type": "string",
"description": "Repository name (owner/repo format)"
}
},
"required": ["milestone_id"]
}
),
# Dependency Tools
Tool(
name="list_issue_dependencies",
description="List all dependencies for an issue (issues that block this one)",
inputSchema={
"type": "object",
"properties": {
"issue_number": {
"type": "integer",
"description": "Issue number"
},
"repo": {
"type": "string",
"description": "Repository name (owner/repo format)"
}
},
"required": ["issue_number"]
}
),
Tool(
name="create_issue_dependency",
description="Create a dependency (issue depends on another issue)",
inputSchema={
"type": "object",
"properties": {
"issue_number": {
"type": "integer",
"description": "Issue that will depend on another"
},
"depends_on": {
"type": "integer",
"description": "Issue that blocks issue_number"
},
"repo": {
"type": "string",
"description": "Repository name (owner/repo format)"
}
},
"required": ["issue_number", "depends_on"]
}
),
Tool(
name="remove_issue_dependency",
description="Remove a dependency between issues",
inputSchema={
"type": "object",
"properties": {
"issue_number": {
"type": "integer",
"description": "Issue that depends on another"
},
"depends_on": {
"type": "integer",
"description": "Issue being depended on"
},
"repo": {
"type": "string",
"description": "Repository name (owner/repo format)"
}
},
"required": ["issue_number", "depends_on"]
}
),
Tool(
name="get_execution_order",
description="Get parallelizable execution order for issues based on dependencies",
inputSchema={
"type": "object",
"properties": {
"issue_numbers": {
"type": "array",
"items": {"type": "integer"},
"description": "List of issue numbers to analyze"
},
"repo": {
"type": "string",
"description": "Repository name (owner/repo format)"
}
},
"required": ["issue_numbers"]
}
),
# Validation Tools
Tool(
name="validate_repo_org",
description="Check if repository belongs to an organization",
inputSchema={
"type": "object",
"properties": {
"repo": {
"type": "string",
"description": "Repository name (owner/repo format)"
}
}
}
),
Tool(
name="get_branch_protection",
description="Get branch protection rules",
inputSchema={
"type": "object",
"properties": {
"branch": {
"type": "string",
"description": "Branch name"
},
"repo": {
"type": "string",
"description": "Repository name (owner/repo format)"
}
},
"required": ["branch"]
}
),
Tool(
name="create_label",
description="Create a new label in the repository",
inputSchema={
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Label name"
},
"color": {
"type": "string",
"description": "Label color (hex code)"
},
"description": {
"type": "string",
"description": "Label description"
},
"repo": {
"type": "string",
"description": "Repository name (owner/repo format)"
}
},
"required": ["name", "color"]
}
),
# Pull Request Tools
Tool(
name="list_pull_requests",
description="List pull requests from repository",
inputSchema={
"type": "object",
"properties": {
"state": {
"type": "string",
"enum": ["open", "closed", "all"],
"default": "open",
"description": "PR state filter"
},
"sort": {
"type": "string",
"enum": ["oldest", "recentupdate", "leastupdate", "mostcomment", "leastcomment", "priority"],
"default": "recentupdate",
"description": "Sort order"
},
"labels": {
"type": "array",
"items": {"type": "string"},
"description": "Filter by labels"
},
"repo": {
"type": "string",
"description": "Repository name (owner/repo format)"
}
}
}
),
Tool(
name="get_pull_request",
description="Get specific pull request details",
inputSchema={
"type": "object",
"properties": {
"pr_number": {
"type": "integer",
"description": "Pull request number"
},
"repo": {
"type": "string",
"description": "Repository name (owner/repo format)"
}
},
"required": ["pr_number"]
}
),
Tool(
name="get_pr_diff",
description="Get the diff for a pull request",
inputSchema={
"type": "object",
"properties": {
"pr_number": {
"type": "integer",
"description": "Pull request number"
},
"repo": {
"type": "string",
"description": "Repository name (owner/repo format)"
}
},
"required": ["pr_number"]
}
),
Tool(
name="get_pr_comments",
description="Get comments on a pull request",
inputSchema={
"type": "object",
"properties": {
"pr_number": {
"type": "integer",
"description": "Pull request number"
},
"repo": {
"type": "string",
"description": "Repository name (owner/repo format)"
}
},
"required": ["pr_number"]
}
),
Tool(
name="create_pr_review",
description="Create a review on a pull request (approve, request changes, or comment)",
inputSchema={
"type": "object",
"properties": {
"pr_number": {
"type": "integer",
"description": "Pull request number"
},
"body": {
"type": "string",
"description": "Review body/summary"
},
"event": {
"type": "string",
"enum": ["APPROVE", "REQUEST_CHANGES", "COMMENT"],
"default": "COMMENT",
"description": "Review action"
},
"comments": {
"type": "array",
"items": {
"type": "object",
"properties": {
"path": {"type": "string"},
"position": {"type": "integer"},
"body": {"type": "string"}
}
},
"description": "Optional inline comments"
},
"repo": {
"type": "string",
"description": "Repository name (owner/repo format)"
}
},
"required": ["pr_number", "body"]
}
),
Tool(
name="add_pr_comment",
description="Add a general comment to a pull request",
inputSchema={
"type": "object",
"properties": {
"pr_number": {
"type": "integer",
"description": "Pull request number"
},
"body": {
"type": "string",
"description": "Comment text"
},
"repo": {
"type": "string",
"description": "Repository name (owner/repo format)"
}
},
"required": ["pr_number", "body"]
}
)
]
@self.server.call_tool()
async def call_tool(name: str, arguments: dict) -> list[TextContent]:
"""
Handle tool invocation.
Args:
name: Tool name
arguments: Tool arguments
Returns:
List of TextContent with results
"""
try:
# Route to appropriate tool handler
if name == "list_issues":
result = await self.issue_tools.list_issues(**arguments)
elif name == "get_issue":
result = await self.issue_tools.get_issue(**arguments)
elif name == "create_issue":
result = await self.issue_tools.create_issue(**arguments)
elif name == "update_issue":
result = await self.issue_tools.update_issue(**arguments)
elif name == "add_comment":
result = await self.issue_tools.add_comment(**arguments)
elif name == "get_labels":
result = await self.label_tools.get_labels(**arguments)
elif name == "suggest_labels":
result = await self.label_tools.suggest_labels(**arguments)
elif name == "aggregate_issues":
result = await self.issue_tools.aggregate_issues(**arguments)
# Wiki tools
elif name == "list_wiki_pages":
result = await self.wiki_tools.list_wiki_pages(**arguments)
elif name == "get_wiki_page":
result = await self.wiki_tools.get_wiki_page(**arguments)
elif name == "create_wiki_page":
result = await self.wiki_tools.create_wiki_page(**arguments)
elif name == "update_wiki_page":
result = await self.wiki_tools.update_wiki_page(**arguments)
elif name == "create_lesson":
result = await self.wiki_tools.create_lesson(**arguments)
elif name == "search_lessons":
tags = arguments.get('tags')
result = await self.wiki_tools.search_lessons(
query=arguments.get('query'),
tags=tags,
limit=arguments.get('limit', 20),
repo=arguments.get('repo')
)
# Milestone tools
elif name == "list_milestones":
result = await self.milestone_tools.list_milestones(**arguments)
elif name == "get_milestone":
result = await self.milestone_tools.get_milestone(**arguments)
elif name == "create_milestone":
result = await self.milestone_tools.create_milestone(**arguments)
elif name == "update_milestone":
result = await self.milestone_tools.update_milestone(**arguments)
elif name == "delete_milestone":
result = await self.milestone_tools.delete_milestone(**arguments)
# Dependency tools
elif name == "list_issue_dependencies":
result = await self.dependency_tools.list_issue_dependencies(**arguments)
elif name == "create_issue_dependency":
result = await self.dependency_tools.create_issue_dependency(**arguments)
elif name == "remove_issue_dependency":
result = await self.dependency_tools.remove_issue_dependency(**arguments)
elif name == "get_execution_order":
result = await self.dependency_tools.get_execution_order(**arguments)
# Validation tools
elif name == "validate_repo_org":
is_org = self.client.is_org_repo(arguments.get('repo'))
result = {'is_organization': is_org}
elif name == "get_branch_protection":
result = self.client.get_branch_protection(
arguments['branch'],
arguments.get('repo')
)
elif name == "create_label":
result = self.client.create_label(
arguments['name'],
arguments['color'],
arguments.get('description'),
arguments.get('repo')
)
# Pull Request tools
elif name == "list_pull_requests":
result = await self.pr_tools.list_pull_requests(**arguments)
elif name == "get_pull_request":
result = await self.pr_tools.get_pull_request(**arguments)
elif name == "get_pr_diff":
result = await self.pr_tools.get_pr_diff(**arguments)
elif name == "get_pr_comments":
result = await self.pr_tools.get_pr_comments(**arguments)
elif name == "create_pr_review":
result = await self.pr_tools.create_pr_review(**arguments)
elif name == "add_pr_comment":
result = await self.pr_tools.add_pr_comment(**arguments)
else:
raise ValueError(f"Unknown tool: {name}")
return [TextContent(
type="text",
text=json.dumps(result, indent=2)
)]
except Exception as e:
logger.error(f"Tool {name} failed: {e}")
return [TextContent(
type="text",
text=f"Error: {str(e)}"
)]
async def run(self):
"""Run the MCP server"""
await self.initialize()
self.setup_tools()
async with stdio_server() as (read_stream, write_stream):
await self.server.run(
read_stream,
write_stream,
self.server.create_initialization_options()
)
async def main():
"""Main entry point"""
server = GiteaMCPServer()
await server.run()
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,11 @@
"""
MCP tools for Gitea integration.
This package provides MCP tool implementations for:
- Issue operations (issues.py)
- Label management (labels.py)
- Wiki operations (wiki.py)
- Milestone management (milestones.py)
- Issue dependencies (dependencies.py)
- Pull request operations (pull_requests.py)
"""

View File

@@ -0,0 +1,216 @@
"""
Issue dependency management tools for MCP server.
Provides async wrappers for issue dependency operations:
- List/create/remove dependencies
- Build dependency graphs for parallel execution
"""
import asyncio
import logging
from typing import List, Dict, Optional, Set, Tuple
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class DependencyTools:
"""Async wrappers for Gitea issue dependency operations"""
def __init__(self, gitea_client):
"""
Initialize dependency tools.
Args:
gitea_client: GiteaClient instance
"""
self.gitea = gitea_client
async def list_issue_dependencies(
self,
issue_number: int,
repo: Optional[str] = None
) -> List[Dict]:
"""
List all dependencies for an issue (issues that block this one).
Args:
issue_number: Issue number
repo: Repository in owner/repo format
Returns:
List of issues that this issue depends on
"""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.list_issue_dependencies(issue_number, repo)
)
async def create_issue_dependency(
self,
issue_number: int,
depends_on: int,
repo: Optional[str] = None
) -> Dict:
"""
Create a dependency between issues.
Args:
issue_number: The issue that will depend on another
depends_on: The issue that blocks issue_number
repo: Repository in owner/repo format
Returns:
Created dependency information
"""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.create_issue_dependency(issue_number, depends_on, repo)
)
async def remove_issue_dependency(
self,
issue_number: int,
depends_on: int,
repo: Optional[str] = None
) -> bool:
"""
Remove a dependency between issues.
Args:
issue_number: The issue that currently depends on another
depends_on: The issue being depended on
repo: Repository in owner/repo format
Returns:
True if removed successfully
"""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.remove_issue_dependency(issue_number, depends_on, repo)
)
async def list_issue_blocks(
self,
issue_number: int,
repo: Optional[str] = None
) -> List[Dict]:
"""
List all issues that this issue blocks.
Args:
issue_number: Issue number
repo: Repository in owner/repo format
Returns:
List of issues blocked by this issue
"""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.list_issue_blocks(issue_number, repo)
)
async def build_dependency_graph(
self,
issue_numbers: List[int],
repo: Optional[str] = None
) -> Dict[int, List[int]]:
"""
Build a dependency graph for a list of issues.
Args:
issue_numbers: List of issue numbers to analyze
repo: Repository in owner/repo format
Returns:
Dictionary mapping issue_number -> list of issues it depends on
"""
graph = {}
for issue_num in issue_numbers:
try:
deps = await self.list_issue_dependencies(issue_num, repo)
graph[issue_num] = [
d.get('number') or d.get('index')
for d in deps
if (d.get('number') or d.get('index')) in issue_numbers
]
except Exception as e:
logger.warning(f"Could not fetch dependencies for #{issue_num}: {e}")
graph[issue_num] = []
return graph
async def get_ready_tasks(
self,
issue_numbers: List[int],
completed: Set[int],
repo: Optional[str] = None
) -> List[int]:
"""
Get tasks that are ready to execute (no unresolved dependencies).
Args:
issue_numbers: List of all issue numbers in sprint
completed: Set of already completed issue numbers
repo: Repository in owner/repo format
Returns:
List of issue numbers that can be executed now
"""
graph = await self.build_dependency_graph(issue_numbers, repo)
ready = []
for issue_num in issue_numbers:
if issue_num in completed:
continue
deps = graph.get(issue_num, [])
# Task is ready if all its dependencies are completed
if all(dep in completed for dep in deps):
ready.append(issue_num)
return ready
async def get_execution_order(
self,
issue_numbers: List[int],
repo: Optional[str] = None
) -> List[List[int]]:
"""
Get a parallelizable execution order for issues.
Returns batches of issues that can be executed in parallel.
Each batch contains issues with no unresolved dependencies.
Args:
issue_numbers: List of all issue numbers
repo: Repository in owner/repo format
Returns:
List of batches, where each batch can be executed in parallel
"""
graph = await self.build_dependency_graph(issue_numbers, repo)
completed: Set[int] = set()
remaining = set(issue_numbers)
batches = []
while remaining:
# Find all tasks with no unresolved dependencies
batch = []
for issue_num in remaining:
deps = graph.get(issue_num, [])
if all(dep in completed for dep in deps):
batch.append(issue_num)
if not batch:
# Circular dependency detected
logger.error(f"Circular dependency detected! Remaining: {remaining}")
batch = list(remaining) # Force include remaining to avoid infinite loop
batches.append(batch)
completed.update(batch)
remaining -= set(batch)
return batches

View File

@@ -0,0 +1,261 @@
"""
Issue management tools for MCP server.
Provides async wrappers for issue CRUD operations with:
- Branch-aware security
- PMO multi-repo support
- Comprehensive error handling
"""
import asyncio
import subprocess
import logging
from typing import List, Dict, Optional
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class IssueTools:
"""Async wrappers for Gitea issue operations with branch detection"""
def __init__(self, gitea_client):
"""
Initialize issue tools.
Args:
gitea_client: GiteaClient instance
"""
self.gitea = gitea_client
def _get_current_branch(self) -> str:
"""
Get current git branch.
Returns:
Current branch name or 'unknown' if not in a git repo
"""
try:
result = subprocess.run(
['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
capture_output=True,
text=True,
check=True
)
return result.stdout.strip()
except subprocess.CalledProcessError:
return "unknown"
def _check_branch_permissions(self, operation: str) -> bool:
"""
Check if operation is allowed on current branch.
Args:
operation: Operation name (list_issues, create_issue, etc.)
Returns:
True if operation is allowed, False otherwise
"""
branch = self._get_current_branch()
# Production branches (read-only except incidents)
if branch in ['main', 'master'] or branch.startswith('prod/'):
return operation in ['list_issues', 'get_issue', 'get_labels']
# Staging branches (read-only for code)
if branch == 'staging' or branch.startswith('stage/'):
return operation in ['list_issues', 'get_issue', 'get_labels', 'create_issue']
# Development branches (full access)
if branch in ['development', 'develop'] or branch.startswith(('feat/', 'feature/', 'dev/')):
return True
# Unknown branch - be restrictive
return False
async def list_issues(
self,
state: str = 'open',
labels: Optional[List[str]] = None,
repo: Optional[str] = None
) -> List[Dict]:
"""
List issues from repository (async wrapper).
Args:
state: Issue state (open, closed, all)
labels: Filter by labels
repo: Override configured repo (for PMO multi-repo)
Returns:
List of issue dictionaries
Raises:
PermissionError: If operation not allowed on current branch
"""
if not self._check_branch_permissions('list_issues'):
branch = self._get_current_branch()
raise PermissionError(
f"Cannot list issues on branch '{branch}'. "
f"Switch to a development branch."
)
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.list_issues(state, labels, repo)
)
async def get_issue(
self,
issue_number: int,
repo: Optional[str] = None
) -> Dict:
"""
Get specific issue details (async wrapper).
Args:
issue_number: Issue number
repo: Override configured repo (for PMO multi-repo)
Returns:
Issue dictionary
Raises:
PermissionError: If operation not allowed on current branch
"""
if not self._check_branch_permissions('get_issue'):
branch = self._get_current_branch()
raise PermissionError(
f"Cannot get issue on branch '{branch}'. "
f"Switch to a development branch."
)
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.get_issue(issue_number, repo)
)
async def create_issue(
self,
title: str,
body: str,
labels: Optional[List[str]] = None,
repo: Optional[str] = None
) -> Dict:
"""
Create new issue (async wrapper with branch check).
Args:
title: Issue title
body: Issue description
labels: List of label names
repo: Override configured repo (for PMO multi-repo)
Returns:
Created issue dictionary
Raises:
PermissionError: If operation not allowed on current branch
"""
if not self._check_branch_permissions('create_issue'):
branch = self._get_current_branch()
raise PermissionError(
f"Cannot create issues on branch '{branch}'. "
f"Switch to a development branch to create issues."
)
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.create_issue(title, body, labels, repo)
)
async def update_issue(
self,
issue_number: int,
title: Optional[str] = None,
body: Optional[str] = None,
state: Optional[str] = None,
labels: Optional[List[str]] = None,
repo: Optional[str] = None
) -> Dict:
"""
Update existing issue (async wrapper with branch check).
Args:
issue_number: Issue number
title: New title (optional)
body: New body (optional)
state: New state - 'open' or 'closed' (optional)
labels: New labels (optional)
repo: Override configured repo (for PMO multi-repo)
Returns:
Updated issue dictionary
Raises:
PermissionError: If operation not allowed on current branch
"""
if not self._check_branch_permissions('update_issue'):
branch = self._get_current_branch()
raise PermissionError(
f"Cannot update issues on branch '{branch}'. "
f"Switch to a development branch to update issues."
)
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.update_issue(issue_number, title, body, state, labels, repo)
)
async def add_comment(
self,
issue_number: int,
comment: str,
repo: Optional[str] = None
) -> Dict:
"""
Add comment to issue (async wrapper with branch check).
Args:
issue_number: Issue number
comment: Comment text
repo: Override configured repo (for PMO multi-repo)
Returns:
Created comment dictionary
Raises:
PermissionError: If operation not allowed on current branch
"""
if not self._check_branch_permissions('add_comment'):
branch = self._get_current_branch()
raise PermissionError(
f"Cannot add comments on branch '{branch}'. "
f"Switch to a development branch to add comments."
)
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.add_comment(issue_number, comment, repo)
)
async def aggregate_issues(
self,
org: str,
state: str = 'open',
labels: Optional[List[str]] = None
) -> Dict[str, List[Dict]]:
"""Aggregate issues across all repositories in org."""
if not self._check_branch_permissions('aggregate_issues'):
branch = self._get_current_branch()
raise PermissionError(f"Cannot aggregate issues on branch '{branch}'.")
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.aggregate_issues(org, state, labels)
)

View File

@@ -0,0 +1,261 @@
"""
Label management tools for MCP server.
Provides async wrappers for label operations with:
- Label taxonomy retrieval
- Intelligent label suggestion
- Dynamic label detection
"""
import asyncio
import logging
import re
from typing import List, Dict, Optional
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class LabelTools:
"""Async wrappers for Gitea label operations"""
def __init__(self, gitea_client):
"""
Initialize label tools.
Args:
gitea_client: GiteaClient instance
"""
self.gitea = gitea_client
async def get_labels(self, repo: Optional[str] = None) -> Dict[str, List[Dict]]:
"""Get all labels (org + repo if org-owned, repo-only if user-owned)."""
loop = asyncio.get_event_loop()
target_repo = repo or self.gitea.repo
if not target_repo or '/' not in target_repo:
raise ValueError("Use 'owner/repo' format (e.g. 'org/repo-name')")
# Check if repo belongs to an organization or user
is_org = await loop.run_in_executor(
None,
lambda: self.gitea.is_org_repo(target_repo)
)
org_labels = []
if is_org:
org = target_repo.split('/')[0]
org_labels = await loop.run_in_executor(
None,
lambda: self.gitea.get_org_labels(org)
)
repo_labels = await loop.run_in_executor(
None,
lambda: self.gitea.get_labels(target_repo)
)
return {
'organization': org_labels,
'repository': repo_labels,
'total_count': len(org_labels) + len(repo_labels)
}
async def suggest_labels(self, context: str, repo: Optional[str] = None) -> List[str]:
"""
Analyze context and suggest appropriate labels from repository's actual labels.
This method fetches actual labels from the repository and matches them
dynamically, supporting any label naming convention (slash, colon-space, etc.).
Args:
context: Issue title + description or sprint context
repo: Repository in 'owner/repo' format (optional, uses default if not provided)
Returns:
List of suggested label names that exist in the repository
"""
# Fetch actual labels from repository
target_repo = repo or self.gitea.repo
if not target_repo:
logger.warning("No repository specified, returning empty suggestions")
return []
try:
labels_data = await self.get_labels(target_repo)
all_labels = labels_data.get('organization', []) + labels_data.get('repository', [])
label_names = [label['name'] for label in all_labels]
except Exception as e:
logger.warning(f"Failed to fetch labels: {e}. Using fallback suggestions.")
label_names = []
# Build label lookup for dynamic matching
label_lookup = self._build_label_lookup(label_names)
suggested = []
context_lower = context.lower()
# Type detection (exclusive - only one)
type_label = None
if any(word in context_lower for word in ['bug', 'error', 'fix', 'broken', 'crash', 'fail']):
type_label = self._find_label(label_lookup, 'type', 'bug')
elif any(word in context_lower for word in ['refactor', 'extract', 'restructure', 'architecture', 'service extraction']):
type_label = self._find_label(label_lookup, 'type', 'refactor')
elif any(word in context_lower for word in ['feature', 'add', 'implement', 'new', 'create']):
type_label = self._find_label(label_lookup, 'type', 'feature')
elif any(word in context_lower for word in ['docs', 'documentation', 'readme', 'guide']):
type_label = self._find_label(label_lookup, 'type', 'documentation')
elif any(word in context_lower for word in ['test', 'testing', 'spec', 'coverage']):
type_label = self._find_label(label_lookup, 'type', 'test')
elif any(word in context_lower for word in ['chore', 'maintenance', 'update', 'upgrade']):
type_label = self._find_label(label_lookup, 'type', 'chore')
if type_label:
suggested.append(type_label)
# Priority detection
priority_label = None
if any(word in context_lower for word in ['critical', 'urgent', 'blocker', 'blocking', 'emergency']):
priority_label = self._find_label(label_lookup, 'priority', 'critical')
elif any(word in context_lower for word in ['high', 'important', 'asap', 'soon']):
priority_label = self._find_label(label_lookup, 'priority', 'high')
elif any(word in context_lower for word in ['low', 'nice-to-have', 'optional', 'later']):
priority_label = self._find_label(label_lookup, 'priority', 'low')
else:
priority_label = self._find_label(label_lookup, 'priority', 'medium')
if priority_label:
suggested.append(priority_label)
# Complexity detection
complexity_label = None
if any(word in context_lower for word in ['simple', 'trivial', 'easy', 'quick']):
complexity_label = self._find_label(label_lookup, 'complexity', 'simple')
elif any(word in context_lower for word in ['complex', 'difficult', 'challenging', 'intricate']):
complexity_label = self._find_label(label_lookup, 'complexity', 'complex')
else:
complexity_label = self._find_label(label_lookup, 'complexity', 'medium')
if complexity_label:
suggested.append(complexity_label)
# Effort detection (supports both "Effort" and "Efforts" naming)
effort_label = None
if any(word in context_lower for word in ['xs', 'tiny', '1 hour', '2 hours']):
effort_label = self._find_label(label_lookup, 'effort', 'xs')
elif any(word in context_lower for word in ['small', 's ', '1 day', 'half day']):
effort_label = self._find_label(label_lookup, 'effort', 's')
elif any(word in context_lower for word in ['medium', 'm ', '2 days', '3 days']):
effort_label = self._find_label(label_lookup, 'effort', 'm')
elif any(word in context_lower for word in ['large', 'l ', '1 week', '5 days']):
effort_label = self._find_label(label_lookup, 'effort', 'l')
elif any(word in context_lower for word in ['xl', 'extra large', '2 weeks', 'sprint']):
effort_label = self._find_label(label_lookup, 'effort', 'xl')
if effort_label:
suggested.append(effort_label)
# Component detection (based on keywords)
component_mappings = {
'backend': ['backend', 'server', 'api', 'database', 'service'],
'frontend': ['frontend', 'ui', 'interface', 'react', 'vue', 'component'],
'api': ['api', 'endpoint', 'rest', 'graphql', 'route'],
'database': ['database', 'db', 'sql', 'migration', 'schema', 'postgres'],
'auth': ['auth', 'authentication', 'login', 'oauth', 'token', 'session'],
'deploy': ['deploy', 'deployment', 'docker', 'kubernetes', 'ci/cd'],
'testing': ['test', 'testing', 'spec', 'jest', 'pytest', 'coverage'],
'docs': ['docs', 'documentation', 'readme', 'guide', 'wiki']
}
for component, keywords in component_mappings.items():
if any(keyword in context_lower for keyword in keywords):
label = self._find_label(label_lookup, 'component', component)
if label and label not in suggested:
suggested.append(label)
# Tech stack detection
tech_mappings = {
'python': ['python', 'fastapi', 'django', 'flask', 'pytest'],
'javascript': ['javascript', 'js', 'node', 'npm', 'yarn'],
'docker': ['docker', 'dockerfile', 'container', 'compose'],
'postgresql': ['postgres', 'postgresql', 'psql', 'sql'],
'redis': ['redis', 'cache', 'session store'],
'vue': ['vue', 'vuejs', 'nuxt'],
'fastapi': ['fastapi', 'pydantic', 'starlette']
}
for tech, keywords in tech_mappings.items():
if any(keyword in context_lower for keyword in keywords):
label = self._find_label(label_lookup, 'tech', tech)
if label and label not in suggested:
suggested.append(label)
# Source detection (based on git branch or context)
source_label = None
if 'development' in context_lower or 'dev/' in context_lower:
source_label = self._find_label(label_lookup, 'source', 'development')
elif 'staging' in context_lower or 'stage/' in context_lower:
source_label = self._find_label(label_lookup, 'source', 'staging')
elif 'production' in context_lower or 'prod' in context_lower:
source_label = self._find_label(label_lookup, 'source', 'production')
if source_label:
suggested.append(source_label)
# Risk detection
risk_label = None
if any(word in context_lower for word in ['breaking', 'breaking change', 'major', 'risky']):
risk_label = self._find_label(label_lookup, 'risk', 'high')
elif any(word in context_lower for word in ['safe', 'low risk', 'minor']):
risk_label = self._find_label(label_lookup, 'risk', 'low')
if risk_label:
suggested.append(risk_label)
logger.info(f"Suggested {len(suggested)} labels based on context and {len(label_names)} available labels")
return suggested
def _build_label_lookup(self, label_names: List[str]) -> Dict[str, Dict[str, str]]:
"""
Build a lookup dictionary for label matching.
Supports various label formats:
- Slash format: Type/Bug, Priority/High
- Colon-space format: Type: Bug, Priority: High
- Colon format: Type:Bug
Args:
label_names: List of actual label names from repository
Returns:
Nested dict: {category: {value: actual_label_name}}
"""
lookup: Dict[str, Dict[str, str]] = {}
for label in label_names:
# Try different separator patterns
# Pattern: Category<separator>Value
# Separators: /, : , :
match = re.match(r'^([^/:]+)(?:/|:\s*|:)(.+)$', label)
if match:
category = match.group(1).lower().rstrip('s') # Normalize: "Efforts" -> "effort"
value = match.group(2).lower()
if category not in lookup:
lookup[category] = {}
lookup[category][value] = label
return lookup
def _find_label(self, lookup: Dict[str, Dict[str, str]], category: str, value: str) -> Optional[str]:
"""
Find actual label name from lookup.
Args:
lookup: Label lookup dictionary
category: Category to search (e.g., 'type', 'priority')
value: Value to find (e.g., 'bug', 'high')
Returns:
Actual label name if found, None otherwise
"""
category_lower = category.lower().rstrip('s') # Normalize
value_lower = value.lower()
if category_lower in lookup and value_lower in lookup[category_lower]:
return lookup[category_lower][value_lower]
return None

View File

@@ -0,0 +1,145 @@
"""
Milestone management tools for MCP server.
Provides async wrappers for milestone operations:
- CRUD operations for milestones
- Milestone-sprint relationship tracking
"""
import asyncio
import logging
from typing import List, Dict, Optional
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class MilestoneTools:
"""Async wrappers for Gitea milestone operations"""
def __init__(self, gitea_client):
"""
Initialize milestone tools.
Args:
gitea_client: GiteaClient instance
"""
self.gitea = gitea_client
async def list_milestones(
self,
state: str = 'open',
repo: Optional[str] = None
) -> List[Dict]:
"""
List all milestones in repository.
Args:
state: Milestone state (open, closed, all)
repo: Repository in owner/repo format
Returns:
List of milestone dictionaries
"""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.list_milestones(state, repo)
)
async def get_milestone(
self,
milestone_id: int,
repo: Optional[str] = None
) -> Dict:
"""
Get a specific milestone by ID.
Args:
milestone_id: Milestone ID
repo: Repository in owner/repo format
Returns:
Milestone dictionary
"""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.get_milestone(milestone_id, repo)
)
async def create_milestone(
self,
title: str,
description: Optional[str] = None,
due_on: Optional[str] = None,
repo: Optional[str] = None
) -> Dict:
"""
Create a new milestone.
Args:
title: Milestone title (e.g., "v2.0 Release", "Sprint 17")
description: Milestone description
due_on: Due date in ISO 8601 format (e.g., "2025-02-01T00:00:00Z")
repo: Repository in owner/repo format
Returns:
Created milestone dictionary
"""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.create_milestone(title, description, due_on, repo)
)
async def update_milestone(
self,
milestone_id: int,
title: Optional[str] = None,
description: Optional[str] = None,
state: Optional[str] = None,
due_on: Optional[str] = None,
repo: Optional[str] = None
) -> Dict:
"""
Update an existing milestone.
Args:
milestone_id: Milestone ID
title: New title (optional)
description: New description (optional)
state: New state - 'open' or 'closed' (optional)
due_on: New due date (optional)
repo: Repository in owner/repo format
Returns:
Updated milestone dictionary
"""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.update_milestone(
milestone_id, title, description, state, due_on, repo
)
)
async def delete_milestone(
self,
milestone_id: int,
repo: Optional[str] = None
) -> bool:
"""
Delete a milestone.
Args:
milestone_id: Milestone ID
repo: Repository in owner/repo format
Returns:
True if deleted successfully
"""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.delete_milestone(milestone_id, repo)
)

View File

@@ -0,0 +1,274 @@
"""
Pull request management tools for MCP server.
Provides async wrappers for PR operations with:
- Branch-aware security
- PMO multi-repo support
- Comprehensive error handling
"""
import asyncio
import subprocess
import logging
from typing import List, Dict, Optional
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class PullRequestTools:
"""Async wrappers for Gitea pull request operations with branch detection"""
def __init__(self, gitea_client):
"""
Initialize pull request tools.
Args:
gitea_client: GiteaClient instance
"""
self.gitea = gitea_client
def _get_current_branch(self) -> str:
"""
Get current git branch.
Returns:
Current branch name or 'unknown' if not in a git repo
"""
try:
result = subprocess.run(
['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
capture_output=True,
text=True,
check=True
)
return result.stdout.strip()
except subprocess.CalledProcessError:
return "unknown"
def _check_branch_permissions(self, operation: str) -> bool:
"""
Check if operation is allowed on current branch.
Args:
operation: Operation name (list_prs, create_review, etc.)
Returns:
True if operation is allowed, False otherwise
"""
branch = self._get_current_branch()
# Read-only operations allowed everywhere
read_ops = ['list_pull_requests', 'get_pull_request', 'get_pr_diff', 'get_pr_comments']
# Production branches (read-only)
if branch in ['main', 'master'] or branch.startswith('prod/'):
return operation in read_ops
# Staging branches (read-only for PRs, can comment)
if branch == 'staging' or branch.startswith('stage/'):
return operation in read_ops + ['add_pr_comment']
# Development branches (full access)
if branch in ['development', 'develop'] or branch.startswith(('feat/', 'feature/', 'dev/')):
return True
# Unknown branch - be restrictive
return operation in read_ops
async def list_pull_requests(
self,
state: str = 'open',
sort: str = 'recentupdate',
labels: Optional[List[str]] = None,
repo: Optional[str] = None
) -> List[Dict]:
"""
List pull requests from repository (async wrapper).
Args:
state: PR state (open, closed, all)
sort: Sort order
labels: Filter by labels
repo: Override configured repo (for PMO multi-repo)
Returns:
List of pull request dictionaries
Raises:
PermissionError: If operation not allowed on current branch
"""
if not self._check_branch_permissions('list_pull_requests'):
branch = self._get_current_branch()
raise PermissionError(
f"Cannot list PRs on branch '{branch}'. "
f"Switch to a development branch."
)
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.list_pull_requests(state, sort, labels, repo)
)
async def get_pull_request(
self,
pr_number: int,
repo: Optional[str] = None
) -> Dict:
"""
Get specific pull request details (async wrapper).
Args:
pr_number: Pull request number
repo: Override configured repo (for PMO multi-repo)
Returns:
Pull request dictionary
Raises:
PermissionError: If operation not allowed on current branch
"""
if not self._check_branch_permissions('get_pull_request'):
branch = self._get_current_branch()
raise PermissionError(
f"Cannot get PR on branch '{branch}'. "
f"Switch to a development branch."
)
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.get_pull_request(pr_number, repo)
)
async def get_pr_diff(
self,
pr_number: int,
repo: Optional[str] = None
) -> str:
"""
Get pull request diff (async wrapper).
Args:
pr_number: Pull request number
repo: Override configured repo (for PMO multi-repo)
Returns:
Diff as string
Raises:
PermissionError: If operation not allowed on current branch
"""
if not self._check_branch_permissions('get_pr_diff'):
branch = self._get_current_branch()
raise PermissionError(
f"Cannot get PR diff on branch '{branch}'. "
f"Switch to a development branch."
)
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.get_pr_diff(pr_number, repo)
)
async def get_pr_comments(
self,
pr_number: int,
repo: Optional[str] = None
) -> List[Dict]:
"""
Get comments on a pull request (async wrapper).
Args:
pr_number: Pull request number
repo: Override configured repo (for PMO multi-repo)
Returns:
List of comment dictionaries
Raises:
PermissionError: If operation not allowed on current branch
"""
if not self._check_branch_permissions('get_pr_comments'):
branch = self._get_current_branch()
raise PermissionError(
f"Cannot get PR comments on branch '{branch}'. "
f"Switch to a development branch."
)
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.get_pr_comments(pr_number, repo)
)
async def create_pr_review(
self,
pr_number: int,
body: str,
event: str = 'COMMENT',
comments: Optional[List[Dict]] = None,
repo: Optional[str] = None
) -> Dict:
"""
Create a review on a pull request (async wrapper with branch check).
Args:
pr_number: Pull request number
body: Review body/summary
event: Review action (APPROVE, REQUEST_CHANGES, COMMENT)
comments: Optional list of inline comments
repo: Override configured repo (for PMO multi-repo)
Returns:
Created review dictionary
Raises:
PermissionError: If operation not allowed on current branch
"""
if not self._check_branch_permissions('create_pr_review'):
branch = self._get_current_branch()
raise PermissionError(
f"Cannot create PR review on branch '{branch}'. "
f"Switch to a development branch to review PRs."
)
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.create_pr_review(pr_number, body, event, comments, repo)
)
async def add_pr_comment(
self,
pr_number: int,
body: str,
repo: Optional[str] = None
) -> Dict:
"""
Add a general comment to a pull request (async wrapper with branch check).
Args:
pr_number: Pull request number
body: Comment text
repo: Override configured repo (for PMO multi-repo)
Returns:
Created comment dictionary
Raises:
PermissionError: If operation not allowed on current branch
"""
if not self._check_branch_permissions('add_pr_comment'):
branch = self._get_current_branch()
raise PermissionError(
f"Cannot add PR comment on branch '{branch}'. "
f"Switch to a development or staging branch to comment on PRs."
)
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.add_pr_comment(pr_number, body, repo)
)

View File

@@ -0,0 +1,149 @@
"""
Wiki management tools for MCP server.
Provides async wrappers for wiki operations to support lessons learned:
- Page CRUD operations
- Lessons learned creation and search
"""
import asyncio
import logging
from typing import List, Dict, Optional
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class WikiTools:
"""Async wrappers for Gitea wiki operations"""
def __init__(self, gitea_client):
"""
Initialize wiki tools.
Args:
gitea_client: GiteaClient instance
"""
self.gitea = gitea_client
async def list_wiki_pages(self, repo: Optional[str] = None) -> List[Dict]:
"""List all wiki pages in repository."""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.list_wiki_pages(repo)
)
async def get_wiki_page(
self,
page_name: str,
repo: Optional[str] = None
) -> Dict:
"""Get a specific wiki page by name."""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.get_wiki_page(page_name, repo)
)
async def create_wiki_page(
self,
title: str,
content: str,
repo: Optional[str] = None
) -> Dict:
"""Create a new wiki page."""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.create_wiki_page(title, content, repo)
)
async def update_wiki_page(
self,
page_name: str,
content: str,
repo: Optional[str] = None
) -> Dict:
"""Update an existing wiki page."""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.update_wiki_page(page_name, content, repo)
)
async def delete_wiki_page(
self,
page_name: str,
repo: Optional[str] = None
) -> bool:
"""Delete a wiki page."""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.delete_wiki_page(page_name, repo)
)
async def search_wiki_pages(
self,
query: str,
repo: Optional[str] = None
) -> List[Dict]:
"""Search wiki pages by title."""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.search_wiki_pages(query, repo)
)
async def create_lesson(
self,
title: str,
content: str,
tags: List[str],
category: str = "sprints",
repo: Optional[str] = None
) -> Dict:
"""
Create a lessons learned entry in the wiki.
Args:
title: Lesson title (e.g., "Sprint 16 - Prevent Infinite Loops")
content: Lesson content in markdown
tags: List of tags for categorization
category: Category (sprints, patterns, architecture, etc.)
repo: Repository in owner/repo format
Returns:
Created wiki page
"""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None,
lambda: self.gitea.create_lesson(title, content, tags, category, repo)
)
async def search_lessons(
self,
query: Optional[str] = None,
tags: Optional[List[str]] = None,
limit: int = 20,
repo: Optional[str] = None
) -> List[Dict]:
"""
Search lessons learned from previous sprints.
Args:
query: Search query (optional)
tags: Tags to filter by (optional)
limit: Maximum results (default 20)
repo: Repository in owner/repo format
Returns:
List of matching lessons
"""
loop = asyncio.get_event_loop()
results = await loop.run_in_executor(
None,
lambda: self.gitea.search_lessons(query, tags, repo)
)
return results[:limit]

View File

@@ -1,2 +1,6 @@
--extra-index-url https://gitea.hotserv.cloud/api/packages/personal-projects/pypi/simple
gitea-mcp>=1.0.0
mcp>=0.9.0 # MCP SDK from Anthropic
python-dotenv>=1.0.0 # Environment variable loading
requests>=2.31.0 # HTTP client for Gitea API
pydantic>=2.5.0 # Data validation
pytest>=7.4.3 # Testing framework
pytest-asyncio>=0.23.0 # Async testing support

View File

@@ -1,20 +0,0 @@
#!/bin/bash
# Capture original working directory before any cd operations
# This should be the user's project directory when launched by Claude Code
export CLAUDE_PROJECT_DIR="${CLAUDE_PROJECT_DIR:-$PWD}"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
CACHE_VENV="$HOME/.cache/claude-mcp-venvs/leo-claude-mktplace/gitea/.venv"
LOCAL_VENV="$SCRIPT_DIR/.venv"
if [[ -f "$CACHE_VENV/bin/python" ]]; then
PYTHON="$CACHE_VENV/bin/python"
elif [[ -f "$LOCAL_VENV/bin/python" ]]; then
PYTHON="$LOCAL_VENV/bin/python"
else
echo "ERROR: No venv found. Run: ./scripts/setup-venvs.sh" >&2
exit 1
fi
cd "$SCRIPT_DIR"
exec "$PYTHON" -m gitea_mcp.server "$@"

View File

View File

@@ -0,0 +1,260 @@
"""
Unit tests for configuration loader.
"""
import pytest
from pathlib import Path
import os
from mcp_server.config import GiteaConfig
def test_load_system_config(tmp_path, monkeypatch):
"""Test loading system-level configuration"""
# Mock home directory
config_dir = tmp_path / '.config' / 'claude'
config_dir.mkdir(parents=True)
config_file = config_dir / 'gitea.env'
config_file.write_text(
"GITEA_API_URL=https://test.com/api/v1\n"
"GITEA_API_TOKEN=test_token\n"
"GITEA_OWNER=test_owner\n"
)
monkeypatch.setenv('HOME', str(tmp_path))
monkeypatch.chdir(tmp_path)
config = GiteaConfig()
result = config.load()
assert result['api_url'] == 'https://test.com/api/v1'
assert result['api_token'] == 'test_token'
assert result['owner'] == 'test_owner'
assert result['mode'] == 'company' # No repo specified
assert result['repo'] is None
def test_project_config_override(tmp_path, monkeypatch):
"""Test that project config overrides system config"""
# Set up system config
system_config_dir = tmp_path / '.config' / 'claude'
system_config_dir.mkdir(parents=True)
system_config = system_config_dir / 'gitea.env'
system_config.write_text(
"GITEA_API_URL=https://test.com/api/v1\n"
"GITEA_API_TOKEN=test_token\n"
"GITEA_OWNER=test_owner\n"
)
# Set up project config
project_dir = tmp_path / 'project'
project_dir.mkdir()
project_config = project_dir / '.env'
project_config.write_text("GITEA_REPO=test_repo\n")
monkeypatch.setenv('HOME', str(tmp_path))
monkeypatch.chdir(project_dir)
config = GiteaConfig()
result = config.load()
assert result['repo'] == 'test_repo'
assert result['mode'] == 'project'
def test_missing_system_config(tmp_path, monkeypatch):
"""Test error handling for missing system configuration"""
monkeypatch.setenv('HOME', str(tmp_path))
monkeypatch.chdir(tmp_path)
with pytest.raises(FileNotFoundError) as exc_info:
config = GiteaConfig()
config.load()
assert "System config not found" in str(exc_info.value)
def test_missing_required_config(tmp_path, monkeypatch):
"""Test error handling for missing required variables"""
# Clear environment variables
for var in ['GITEA_API_URL', 'GITEA_API_TOKEN', 'GITEA_OWNER', 'GITEA_REPO']:
monkeypatch.delenv(var, raising=False)
# Create incomplete config
config_dir = tmp_path / '.config' / 'claude'
config_dir.mkdir(parents=True)
config_file = config_dir / 'gitea.env'
config_file.write_text(
"GITEA_API_URL=https://test.com/api/v1\n"
# Missing GITEA_API_TOKEN and GITEA_OWNER
)
monkeypatch.setenv('HOME', str(tmp_path))
monkeypatch.chdir(tmp_path)
with pytest.raises(ValueError) as exc_info:
config = GiteaConfig()
config.load()
assert "Missing required configuration" in str(exc_info.value)
def test_mode_detection_project(tmp_path, monkeypatch):
"""Test mode detection for project mode"""
config_dir = tmp_path / '.config' / 'claude'
config_dir.mkdir(parents=True)
config_file = config_dir / 'gitea.env'
config_file.write_text(
"GITEA_API_URL=https://test.com/api/v1\n"
"GITEA_API_TOKEN=test_token\n"
"GITEA_OWNER=test_owner\n"
"GITEA_REPO=test_repo\n"
)
monkeypatch.setenv('HOME', str(tmp_path))
monkeypatch.chdir(tmp_path)
config = GiteaConfig()
result = config.load()
assert result['mode'] == 'project'
assert result['repo'] == 'test_repo'
def test_mode_detection_company(tmp_path, monkeypatch):
"""Test mode detection for company mode (PMO)"""
# Clear environment variables, especially GITEA_REPO
for var in ['GITEA_API_URL', 'GITEA_API_TOKEN', 'GITEA_OWNER', 'GITEA_REPO']:
monkeypatch.delenv(var, raising=False)
config_dir = tmp_path / '.config' / 'claude'
config_dir.mkdir(parents=True)
config_file = config_dir / 'gitea.env'
config_file.write_text(
"GITEA_API_URL=https://test.com/api/v1\n"
"GITEA_API_TOKEN=test_token\n"
"GITEA_OWNER=test_owner\n"
# No GITEA_REPO
)
monkeypatch.setenv('HOME', str(tmp_path))
monkeypatch.chdir(tmp_path)
config = GiteaConfig()
result = config.load()
assert result['mode'] == 'company'
assert result['repo'] is None
# ========================================
# GIT URL PARSING TESTS
# ========================================
def test_parse_git_url_ssh_format():
"""Test parsing SSH format git URL"""
config = GiteaConfig()
# SSH with port: ssh://git@host:port/owner/repo.git
url = "ssh://git@hotserv.tailc9b278.ts.net:2222/personal-projects/personal-portfolio.git"
result = config._parse_git_url(url)
assert result == "personal-projects/personal-portfolio"
def test_parse_git_url_ssh_short_format():
"""Test parsing SSH short format git URL"""
config = GiteaConfig()
# SSH short: git@host:owner/repo.git
url = "git@github.com:owner/repo.git"
result = config._parse_git_url(url)
assert result == "owner/repo"
def test_parse_git_url_https_format():
"""Test parsing HTTPS format git URL"""
config = GiteaConfig()
# HTTPS: https://host/owner/repo.git
url = "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git"
result = config._parse_git_url(url)
assert result == "personal-projects/leo-claude-mktplace"
def test_parse_git_url_http_format():
"""Test parsing HTTP format git URL"""
config = GiteaConfig()
# HTTP: http://host/owner/repo.git
url = "http://gitea.hotserv.cloud/personal-projects/repo.git"
result = config._parse_git_url(url)
assert result == "personal-projects/repo"
def test_parse_git_url_without_git_suffix():
"""Test parsing git URL without .git suffix"""
config = GiteaConfig()
url = "https://github.com/owner/repo"
result = config._parse_git_url(url)
assert result == "owner/repo"
def test_parse_git_url_invalid_format():
"""Test parsing invalid git URL returns None"""
config = GiteaConfig()
url = "not-a-valid-url"
result = config._parse_git_url(url)
assert result is None
def test_find_project_directory_from_env(tmp_path, monkeypatch):
"""Test finding project directory from CLAUDE_PROJECT_DIR env var"""
project_dir = tmp_path / 'my-project'
project_dir.mkdir()
(project_dir / '.git').mkdir()
monkeypatch.setenv('CLAUDE_PROJECT_DIR', str(project_dir))
config = GiteaConfig()
result = config._find_project_directory()
assert result == project_dir
def test_find_project_directory_from_cwd(tmp_path, monkeypatch):
"""Test finding project directory from cwd with .env file"""
project_dir = tmp_path / 'project'
project_dir.mkdir()
(project_dir / '.env').write_text("GITEA_REPO=test/repo")
monkeypatch.chdir(project_dir)
# Clear env vars that might interfere
monkeypatch.delenv('CLAUDE_PROJECT_DIR', raising=False)
monkeypatch.delenv('PWD', raising=False)
config = GiteaConfig()
result = config._find_project_directory()
assert result == project_dir
def test_find_project_directory_none_when_no_markers(tmp_path, monkeypatch):
"""Test returns None when no project markers found"""
empty_dir = tmp_path / 'empty'
empty_dir.mkdir()
monkeypatch.chdir(empty_dir)
monkeypatch.delenv('CLAUDE_PROJECT_DIR', raising=False)
monkeypatch.delenv('PWD', raising=False)
monkeypatch.delenv('GITEA_REPO', raising=False)
config = GiteaConfig()
result = config._find_project_directory()
assert result is None

View File

@@ -0,0 +1,268 @@
"""
Unit tests for Gitea API client.
"""
import pytest
from unittest.mock import Mock, patch, MagicMock
from mcp_server.gitea_client import GiteaClient
@pytest.fixture
def mock_config():
"""Fixture providing mocked configuration"""
with patch('mcp_server.gitea_client.GiteaConfig') as mock_cfg:
mock_instance = mock_cfg.return_value
mock_instance.load.return_value = {
'api_url': 'https://test.com/api/v1',
'api_token': 'test_token',
'owner': 'test_owner',
'repo': 'test_repo',
'mode': 'project'
}
yield mock_cfg
@pytest.fixture
def gitea_client(mock_config):
"""Fixture providing GiteaClient instance with mocked config"""
return GiteaClient()
def test_client_initialization(gitea_client):
"""Test client initializes with correct configuration"""
assert gitea_client.base_url == 'https://test.com/api/v1'
assert gitea_client.token == 'test_token'
assert gitea_client.owner == 'test_owner'
assert gitea_client.repo == 'test_repo'
assert gitea_client.mode == 'project'
assert 'Authorization' in gitea_client.session.headers
assert gitea_client.session.headers['Authorization'] == 'token test_token'
def test_list_issues(gitea_client):
"""Test listing issues"""
mock_response = Mock()
mock_response.json.return_value = [
{'number': 1, 'title': 'Test Issue 1'},
{'number': 2, 'title': 'Test Issue 2'}
]
mock_response.raise_for_status = Mock()
with patch.object(gitea_client.session, 'get', return_value=mock_response):
issues = gitea_client.list_issues(state='open')
assert len(issues) == 2
assert issues[0]['title'] == 'Test Issue 1'
gitea_client.session.get.assert_called_once()
def test_list_issues_with_labels(gitea_client):
"""Test listing issues with label filter"""
mock_response = Mock()
mock_response.json.return_value = [{'number': 1, 'title': 'Bug Issue'}]
mock_response.raise_for_status = Mock()
with patch.object(gitea_client.session, 'get', return_value=mock_response):
issues = gitea_client.list_issues(state='open', labels=['Type/Bug'])
gitea_client.session.get.assert_called_once()
call_args = gitea_client.session.get.call_args
assert call_args[1]['params']['labels'] == 'Type/Bug'
def test_get_issue(gitea_client):
"""Test getting specific issue"""
mock_response = Mock()
mock_response.json.return_value = {'number': 1, 'title': 'Test Issue'}
mock_response.raise_for_status = Mock()
with patch.object(gitea_client.session, 'get', return_value=mock_response):
issue = gitea_client.get_issue(1)
assert issue['number'] == 1
assert issue['title'] == 'Test Issue'
def test_create_issue(gitea_client):
"""Test creating new issue"""
mock_response = Mock()
mock_response.json.return_value = {
'number': 1,
'title': 'New Issue',
'body': 'Issue body'
}
mock_response.raise_for_status = Mock()
with patch.object(gitea_client.session, 'post', return_value=mock_response):
issue = gitea_client.create_issue(
title='New Issue',
body='Issue body',
labels=['Type/Bug']
)
assert issue['title'] == 'New Issue'
gitea_client.session.post.assert_called_once()
def test_update_issue(gitea_client):
"""Test updating existing issue"""
mock_response = Mock()
mock_response.json.return_value = {
'number': 1,
'title': 'Updated Issue'
}
mock_response.raise_for_status = Mock()
with patch.object(gitea_client.session, 'patch', return_value=mock_response):
issue = gitea_client.update_issue(
issue_number=1,
title='Updated Issue'
)
assert issue['title'] == 'Updated Issue'
gitea_client.session.patch.assert_called_once()
def test_add_comment(gitea_client):
"""Test adding comment to issue"""
mock_response = Mock()
mock_response.json.return_value = {'body': 'Test comment'}
mock_response.raise_for_status = Mock()
with patch.object(gitea_client.session, 'post', return_value=mock_response):
comment = gitea_client.add_comment(1, 'Test comment')
assert comment['body'] == 'Test comment'
gitea_client.session.post.assert_called_once()
def test_get_labels(gitea_client):
"""Test getting repository labels"""
mock_response = Mock()
mock_response.json.return_value = [
{'name': 'Type/Bug'},
{'name': 'Priority/High'}
]
mock_response.raise_for_status = Mock()
with patch.object(gitea_client.session, 'get', return_value=mock_response):
labels = gitea_client.get_labels()
assert len(labels) == 2
assert labels[0]['name'] == 'Type/Bug'
def test_get_org_labels(gitea_client):
"""Test getting organization labels"""
mock_response = Mock()
mock_response.json.return_value = [
{'name': 'Type/Bug'},
{'name': 'Type/Feature'}
]
mock_response.raise_for_status = Mock()
with patch.object(gitea_client.session, 'get', return_value=mock_response):
labels = gitea_client.get_org_labels()
assert len(labels) == 2
def test_list_repos(gitea_client):
"""Test listing organization repositories (PMO mode)"""
mock_response = Mock()
mock_response.json.return_value = [
{'name': 'repo1'},
{'name': 'repo2'}
]
mock_response.raise_for_status = Mock()
with patch.object(gitea_client.session, 'get', return_value=mock_response):
repos = gitea_client.list_repos()
assert len(repos) == 2
assert repos[0]['name'] == 'repo1'
def test_aggregate_issues(gitea_client):
"""Test aggregating issues across repositories (PMO mode)"""
# Mock list_repos
gitea_client.list_repos = Mock(return_value=[
{'name': 'repo1'},
{'name': 'repo2'}
])
# Mock list_issues
gitea_client.list_issues = Mock(side_effect=[
[{'number': 1, 'title': 'Issue 1'}], # repo1
[{'number': 2, 'title': 'Issue 2'}] # repo2
])
aggregated = gitea_client.aggregate_issues(state='open')
assert 'repo1' in aggregated
assert 'repo2' in aggregated
assert len(aggregated['repo1']) == 1
assert len(aggregated['repo2']) == 1
def test_no_repo_specified_error(gitea_client):
"""Test error when repository not specified"""
# Create client without repo
with patch('mcp_server.gitea_client.GiteaConfig') as mock_cfg:
mock_instance = mock_cfg.return_value
mock_instance.load.return_value = {
'api_url': 'https://test.com/api/v1',
'api_token': 'test_token',
'owner': 'test_owner',
'repo': None, # No repo
'mode': 'company'
}
client = GiteaClient()
with pytest.raises(ValueError) as exc_info:
client.list_issues()
assert "Repository not specified" in str(exc_info.value)
# ========================================
# ORGANIZATION DETECTION TESTS
# ========================================
def test_is_organization_true(gitea_client):
"""Test _is_organization returns True for valid organization"""
mock_response = Mock()
mock_response.status_code = 200
with patch.object(gitea_client.session, 'get', return_value=mock_response):
result = gitea_client._is_organization('personal-projects')
assert result is True
gitea_client.session.get.assert_called_once_with(
'https://test.com/api/v1/orgs/personal-projects'
)
def test_is_organization_false(gitea_client):
"""Test _is_organization returns False for user account"""
mock_response = Mock()
mock_response.status_code = 404
with patch.object(gitea_client.session, 'get', return_value=mock_response):
result = gitea_client._is_organization('lmiranda')
assert result is False
def test_is_org_repo_uses_orgs_endpoint(gitea_client):
"""Test is_org_repo uses /orgs endpoint instead of owner.type"""
mock_response = Mock()
mock_response.status_code = 200
with patch.object(gitea_client.session, 'get', return_value=mock_response):
result = gitea_client.is_org_repo('personal-projects/repo')
assert result is True
# Should call /orgs/personal-projects, not /repos/.../
gitea_client.session.get.assert_called_once_with(
'https://test.com/api/v1/orgs/personal-projects'
)

View File

@@ -0,0 +1,159 @@
"""
Unit tests for issue tools with branch detection.
"""
import pytest
from unittest.mock import Mock, patch, AsyncMock
from mcp_server.tools.issues import IssueTools
@pytest.fixture
def mock_gitea_client():
"""Fixture providing mocked Gitea client"""
client = Mock()
client.mode = 'project'
return client
@pytest.fixture
def issue_tools(mock_gitea_client):
"""Fixture providing IssueTools instance"""
return IssueTools(mock_gitea_client)
@pytest.mark.asyncio
async def test_list_issues_development_branch(issue_tools):
"""Test listing issues on development branch (allowed)"""
with patch.object(issue_tools, '_get_current_branch', return_value='feat/test-feature'):
issue_tools.gitea.list_issues = Mock(return_value=[{'number': 1}])
issues = await issue_tools.list_issues(state='open')
assert len(issues) == 1
issue_tools.gitea.list_issues.assert_called_once()
@pytest.mark.asyncio
async def test_create_issue_development_branch(issue_tools):
"""Test creating issue on development branch (allowed)"""
with patch.object(issue_tools, '_get_current_branch', return_value='development'):
issue_tools.gitea.create_issue = Mock(return_value={'number': 1})
issue = await issue_tools.create_issue('Test', 'Body')
assert issue['number'] == 1
issue_tools.gitea.create_issue.assert_called_once()
@pytest.mark.asyncio
async def test_create_issue_main_branch_blocked(issue_tools):
"""Test creating issue on main branch (blocked)"""
with patch.object(issue_tools, '_get_current_branch', return_value='main'):
with pytest.raises(PermissionError) as exc_info:
await issue_tools.create_issue('Test', 'Body')
assert "Cannot create issues on branch 'main'" in str(exc_info.value)
@pytest.mark.asyncio
async def test_create_issue_staging_branch_allowed(issue_tools):
"""Test creating issue on staging branch (allowed for documentation)"""
with patch.object(issue_tools, '_get_current_branch', return_value='staging'):
issue_tools.gitea.create_issue = Mock(return_value={'number': 1})
issue = await issue_tools.create_issue('Test', 'Body')
assert issue['number'] == 1
@pytest.mark.asyncio
async def test_update_issue_main_branch_blocked(issue_tools):
"""Test updating issue on main branch (blocked)"""
with patch.object(issue_tools, '_get_current_branch', return_value='main'):
with pytest.raises(PermissionError) as exc_info:
await issue_tools.update_issue(1, title='Updated')
assert "Cannot update issues on branch 'main'" in str(exc_info.value)
@pytest.mark.asyncio
async def test_list_issues_main_branch_allowed(issue_tools):
"""Test listing issues on main branch (allowed - read-only)"""
with patch.object(issue_tools, '_get_current_branch', return_value='main'):
issue_tools.gitea.list_issues = Mock(return_value=[{'number': 1}])
issues = await issue_tools.list_issues(state='open')
assert len(issues) == 1
@pytest.mark.asyncio
async def test_get_issue(issue_tools):
"""Test getting specific issue"""
with patch.object(issue_tools, '_get_current_branch', return_value='development'):
issue_tools.gitea.get_issue = Mock(return_value={'number': 1, 'title': 'Test'})
issue = await issue_tools.get_issue(1)
assert issue['number'] == 1
@pytest.mark.asyncio
async def test_add_comment(issue_tools):
"""Test adding comment to issue"""
with patch.object(issue_tools, '_get_current_branch', return_value='development'):
issue_tools.gitea.add_comment = Mock(return_value={'body': 'Test comment'})
comment = await issue_tools.add_comment(1, 'Test comment')
assert comment['body'] == 'Test comment'
@pytest.mark.asyncio
async def test_aggregate_issues_company_mode(issue_tools):
"""Test aggregating issues in company mode"""
issue_tools.gitea.mode = 'company'
with patch.object(issue_tools, '_get_current_branch', return_value='development'):
issue_tools.gitea.aggregate_issues = Mock(return_value={
'repo1': [{'number': 1}],
'repo2': [{'number': 2}]
})
aggregated = await issue_tools.aggregate_issues()
assert 'repo1' in aggregated
assert 'repo2' in aggregated
@pytest.mark.asyncio
async def test_aggregate_issues_project_mode_error(issue_tools):
"""Test that aggregate_issues fails in project mode"""
issue_tools.gitea.mode = 'project'
with patch.object(issue_tools, '_get_current_branch', return_value='development'):
with pytest.raises(ValueError) as exc_info:
await issue_tools.aggregate_issues()
assert "only available in company mode" in str(exc_info.value)
def test_branch_detection():
"""Test branch detection logic"""
tools = IssueTools(Mock())
# Test development branches
with patch.object(tools, '_get_current_branch', return_value='development'):
assert tools._check_branch_permissions('create_issue') is True
with patch.object(tools, '_get_current_branch', return_value='feat/new-feature'):
assert tools._check_branch_permissions('create_issue') is True
# Test production branches
with patch.object(tools, '_get_current_branch', return_value='main'):
assert tools._check_branch_permissions('create_issue') is False
assert tools._check_branch_permissions('list_issues') is True
# Test staging branches
with patch.object(tools, '_get_current_branch', return_value='staging'):
assert tools._check_branch_permissions('create_issue') is True
assert tools._check_branch_permissions('update_issue') is False

View File

@@ -0,0 +1,478 @@
"""
Unit tests for label tools with suggestion logic.
"""
import pytest
from unittest.mock import Mock, patch
from mcp_server.tools.labels import LabelTools
@pytest.fixture
def mock_gitea_client():
"""Fixture providing mocked Gitea client"""
client = Mock()
client.repo = 'test_org/test_repo'
client.is_org_repo = Mock(return_value=True)
return client
@pytest.fixture
def label_tools(mock_gitea_client):
"""Fixture providing LabelTools instance"""
return LabelTools(mock_gitea_client)
@pytest.mark.asyncio
async def test_get_labels(label_tools):
"""Test getting all labels (org + repo)"""
label_tools.gitea.get_org_labels = Mock(return_value=[
{'name': 'Type/Bug'},
{'name': 'Type/Feature'}
])
label_tools.gitea.get_labels = Mock(return_value=[
{'name': 'Component/Backend'},
{'name': 'Component/Frontend'}
])
result = await label_tools.get_labels()
assert len(result['organization']) == 2
assert len(result['repository']) == 2
assert result['total_count'] == 4
# ========================================
# LABEL LOOKUP TESTS (NEW)
# ========================================
def test_build_label_lookup_slash_format():
"""Test building label lookup with slash format labels"""
mock_client = Mock()
mock_client.repo = 'test/repo'
tools = LabelTools(mock_client)
labels = ['Type/Bug', 'Type/Feature', 'Priority/High', 'Priority/Low']
lookup = tools._build_label_lookup(labels)
assert 'type' in lookup
assert 'bug' in lookup['type']
assert lookup['type']['bug'] == 'Type/Bug'
assert lookup['type']['feature'] == 'Type/Feature'
assert 'priority' in lookup
assert lookup['priority']['high'] == 'Priority/High'
def test_build_label_lookup_colon_space_format():
"""Test building label lookup with colon-space format labels"""
mock_client = Mock()
mock_client.repo = 'test/repo'
tools = LabelTools(mock_client)
labels = ['Type: Bug', 'Type: Feature', 'Priority: High', 'Effort: M']
lookup = tools._build_label_lookup(labels)
assert 'type' in lookup
assert 'bug' in lookup['type']
assert lookup['type']['bug'] == 'Type: Bug'
assert lookup['type']['feature'] == 'Type: Feature'
assert 'priority' in lookup
assert lookup['priority']['high'] == 'Priority: High'
# Test singular "Effort" (not "Efforts")
assert 'effort' in lookup
assert lookup['effort']['m'] == 'Effort: M'
def test_build_label_lookup_efforts_normalization():
"""Test that 'Efforts' is normalized to 'effort' for matching"""
mock_client = Mock()
mock_client.repo = 'test/repo'
tools = LabelTools(mock_client)
labels = ['Efforts/XS', 'Efforts/S', 'Efforts/M']
lookup = tools._build_label_lookup(labels)
# 'Efforts' should be normalized to 'effort'
assert 'effort' in lookup
assert lookup['effort']['xs'] == 'Efforts/XS'
def test_find_label():
"""Test finding labels from lookup"""
mock_client = Mock()
mock_client.repo = 'test/repo'
tools = LabelTools(mock_client)
lookup = {
'type': {'bug': 'Type: Bug', 'feature': 'Type: Feature'},
'priority': {'high': 'Priority: High', 'low': 'Priority: Low'}
}
assert tools._find_label(lookup, 'type', 'bug') == 'Type: Bug'
assert tools._find_label(lookup, 'priority', 'high') == 'Priority: High'
assert tools._find_label(lookup, 'type', 'nonexistent') is None
assert tools._find_label(lookup, 'nonexistent', 'bug') is None
# ========================================
# SUGGEST LABELS WITH DYNAMIC FORMAT TESTS
# ========================================
def _create_tools_with_labels(labels):
"""Helper to create LabelTools with mocked labels"""
import asyncio
mock_client = Mock()
mock_client.repo = 'test/repo'
mock_client.is_org_repo = Mock(return_value=False)
mock_client.get_labels = Mock(return_value=[{'name': l} for l in labels])
return LabelTools(mock_client)
@pytest.mark.asyncio
async def test_suggest_labels_with_slash_format():
"""Test label suggestion with slash format labels"""
labels = [
'Type/Bug', 'Type/Feature', 'Type/Refactor',
'Priority/Critical', 'Priority/High', 'Priority/Medium', 'Priority/Low',
'Complexity/Simple', 'Complexity/Medium', 'Complexity/Complex',
'Component/Auth'
]
tools = _create_tools_with_labels(labels)
context = "Fix critical bug in login authentication"
suggestions = await tools.suggest_labels(context)
assert 'Type/Bug' in suggestions
assert 'Priority/Critical' in suggestions
assert 'Component/Auth' in suggestions
@pytest.mark.asyncio
async def test_suggest_labels_with_colon_space_format():
"""Test label suggestion with colon-space format labels"""
labels = [
'Type: Bug', 'Type: Feature', 'Type: Refactor',
'Priority: Critical', 'Priority: High', 'Priority: Medium', 'Priority: Low',
'Complexity: Simple', 'Complexity: Medium', 'Complexity: Complex',
'Effort: XS', 'Effort: S', 'Effort: M', 'Effort: L', 'Effort: XL'
]
tools = _create_tools_with_labels(labels)
context = "Fix critical bug for tiny 1 hour fix"
suggestions = await tools.suggest_labels(context)
# Should return colon-space format labels
assert 'Type: Bug' in suggestions
assert 'Priority: Critical' in suggestions
assert 'Effort: XS' in suggestions
@pytest.mark.asyncio
async def test_suggest_labels_bug():
"""Test label suggestion for bug context"""
labels = [
'Type/Bug', 'Type/Feature',
'Priority/Critical', 'Priority/High', 'Priority/Medium', 'Priority/Low',
'Complexity/Simple', 'Complexity/Medium', 'Complexity/Complex',
'Component/Auth'
]
tools = _create_tools_with_labels(labels)
context = "Fix critical bug in login authentication"
suggestions = await tools.suggest_labels(context)
assert 'Type/Bug' in suggestions
assert 'Priority/Critical' in suggestions
assert 'Component/Auth' in suggestions
@pytest.mark.asyncio
async def test_suggest_labels_feature():
"""Test label suggestion for feature context"""
labels = ['Type/Feature', 'Priority/Medium', 'Complexity/Medium']
tools = _create_tools_with_labels(labels)
context = "Add new feature to implement user dashboard"
suggestions = await tools.suggest_labels(context)
assert 'Type/Feature' in suggestions
assert any('Priority' in label for label in suggestions)
@pytest.mark.asyncio
async def test_suggest_labels_refactor():
"""Test label suggestion for refactor context"""
labels = ['Type/Refactor', 'Priority/Medium', 'Complexity/Medium', 'Component/Backend']
tools = _create_tools_with_labels(labels)
context = "Refactor architecture to extract service layer"
suggestions = await tools.suggest_labels(context)
assert 'Type/Refactor' in suggestions
assert 'Component/Backend' in suggestions
@pytest.mark.asyncio
async def test_suggest_labels_documentation():
"""Test label suggestion for documentation context"""
labels = ['Type/Documentation', 'Priority/Medium', 'Complexity/Medium', 'Component/API', 'Component/Docs']
tools = _create_tools_with_labels(labels)
context = "Update documentation for API endpoints"
suggestions = await tools.suggest_labels(context)
assert 'Type/Documentation' in suggestions
assert 'Component/API' in suggestions or 'Component/Docs' in suggestions
@pytest.mark.asyncio
async def test_suggest_labels_priority():
"""Test priority detection in suggestions"""
labels = ['Type/Feature', 'Priority/Critical', 'Priority/High', 'Priority/Medium', 'Priority/Low', 'Complexity/Medium']
tools = _create_tools_with_labels(labels)
# Critical priority
context = "Urgent blocker in production"
suggestions = await tools.suggest_labels(context)
assert 'Priority/Critical' in suggestions
# High priority
context = "Important feature needed asap"
suggestions = await tools.suggest_labels(context)
assert 'Priority/High' in suggestions
# Low priority
context = "Nice-to-have optional improvement"
suggestions = await tools.suggest_labels(context)
assert 'Priority/Low' in suggestions
@pytest.mark.asyncio
async def test_suggest_labels_complexity():
"""Test complexity detection in suggestions"""
labels = ['Type/Feature', 'Priority/Medium', 'Complexity/Simple', 'Complexity/Medium', 'Complexity/Complex']
tools = _create_tools_with_labels(labels)
# Simple complexity
context = "Simple quick fix for typo"
suggestions = await tools.suggest_labels(context)
assert 'Complexity/Simple' in suggestions
# Complex complexity
context = "Complex challenging architecture redesign"
suggestions = await tools.suggest_labels(context)
assert 'Complexity/Complex' in suggestions
@pytest.mark.asyncio
async def test_suggest_labels_efforts():
"""Test efforts detection in suggestions"""
labels = ['Type/Feature', 'Priority/Medium', 'Complexity/Medium', 'Efforts/XS', 'Efforts/S', 'Efforts/M', 'Efforts/L', 'Efforts/XL']
tools = _create_tools_with_labels(labels)
# XS effort
context = "Tiny fix that takes 1 hour"
suggestions = await tools.suggest_labels(context)
assert 'Efforts/XS' in suggestions
# L effort
context = "Large feature taking 1 week"
suggestions = await tools.suggest_labels(context)
assert 'Efforts/L' in suggestions
@pytest.mark.asyncio
async def test_suggest_labels_components():
"""Test component detection in suggestions"""
labels = ['Type/Feature', 'Priority/Medium', 'Complexity/Medium', 'Component/Backend', 'Component/Frontend', 'Component/API', 'Component/Database']
tools = _create_tools_with_labels(labels)
# Backend component
context = "Update backend API service"
suggestions = await tools.suggest_labels(context)
assert 'Component/Backend' in suggestions
assert 'Component/API' in suggestions
# Frontend component
context = "Fix frontend UI component"
suggestions = await tools.suggest_labels(context)
assert 'Component/Frontend' in suggestions
# Database component
context = "Add database migration for schema"
suggestions = await tools.suggest_labels(context)
assert 'Component/Database' in suggestions
@pytest.mark.asyncio
async def test_suggest_labels_tech_stack():
"""Test tech stack detection in suggestions"""
labels = ['Type/Feature', 'Priority/Medium', 'Complexity/Medium', 'Tech/Python', 'Tech/FastAPI', 'Tech/Docker', 'Tech/PostgreSQL']
tools = _create_tools_with_labels(labels)
# Python
context = "Update Python FastAPI endpoint"
suggestions = await tools.suggest_labels(context)
assert 'Tech/Python' in suggestions
assert 'Tech/FastAPI' in suggestions
# Docker
context = "Fix Dockerfile configuration"
suggestions = await tools.suggest_labels(context)
assert 'Tech/Docker' in suggestions
# PostgreSQL
context = "Optimize PostgreSQL query"
suggestions = await tools.suggest_labels(context)
assert 'Tech/PostgreSQL' in suggestions
@pytest.mark.asyncio
async def test_suggest_labels_source():
"""Test source detection in suggestions"""
labels = ['Type/Feature', 'Priority/Medium', 'Complexity/Medium', 'Source/Development', 'Source/Staging', 'Source/Production']
tools = _create_tools_with_labels(labels)
# Development
context = "Issue found in development environment"
suggestions = await tools.suggest_labels(context)
assert 'Source/Development' in suggestions
# Production
context = "Critical production issue"
suggestions = await tools.suggest_labels(context)
assert 'Source/Production' in suggestions
@pytest.mark.asyncio
async def test_suggest_labels_risk():
"""Test risk detection in suggestions"""
labels = ['Type/Feature', 'Priority/Medium', 'Complexity/Medium', 'Risk/High', 'Risk/Low']
tools = _create_tools_with_labels(labels)
# High risk
context = "Breaking change to major API"
suggestions = await tools.suggest_labels(context)
assert 'Risk/High' in suggestions
# Low risk
context = "Safe minor update with low risk"
suggestions = await tools.suggest_labels(context)
assert 'Risk/Low' in suggestions
@pytest.mark.asyncio
async def test_suggest_labels_multiple_categories():
"""Test that suggestions span multiple categories"""
labels = [
'Type/Bug', 'Type/Feature',
'Priority/Critical', 'Priority/Medium',
'Complexity/Complex', 'Complexity/Medium',
'Component/Backend', 'Component/API', 'Component/Auth',
'Tech/FastAPI', 'Tech/PostgreSQL',
'Source/Production'
]
tools = _create_tools_with_labels(labels)
context = """
Urgent critical bug in production backend API service.
Need to fix broken authentication endpoint.
This is a complex issue requiring FastAPI and PostgreSQL expertise.
"""
suggestions = await tools.suggest_labels(context)
# Should have Type
assert any('Type/' in label for label in suggestions)
# Should have Priority
assert any('Priority/' in label for label in suggestions)
# Should have Component
assert any('Component/' in label for label in suggestions)
# Should have Tech
assert any('Tech/' in label for label in suggestions)
# Should have Source
assert any('Source/' in label for label in suggestions)
@pytest.mark.asyncio
async def test_suggest_labels_empty_repo():
"""Test suggestions when no repo specified and no labels available"""
mock_client = Mock()
mock_client.repo = None
tools = LabelTools(mock_client)
context = "Fix a bug"
suggestions = await tools.suggest_labels(context)
# Should return empty list when no repo
assert suggestions == []
@pytest.mark.asyncio
async def test_suggest_labels_no_matching_labels():
"""Test suggestions return empty when no matching labels exist"""
labels = ['Custom/Label', 'Other/Thing'] # No standard labels
tools = _create_tools_with_labels(labels)
context = "Fix a bug"
suggestions = await tools.suggest_labels(context)
# Should return empty list since no Type/Bug or similar exists
assert len(suggestions) == 0
@pytest.mark.asyncio
async def test_get_labels_org_owned_repo():
"""Test getting labels for organization-owned repository"""
mock_client = Mock()
mock_client.repo = 'myorg/myrepo'
mock_client.is_org_repo = Mock(return_value=True)
mock_client.get_org_labels = Mock(return_value=[
{'name': 'Type/Bug', 'id': 1},
{'name': 'Type/Feature', 'id': 2}
])
mock_client.get_labels = Mock(return_value=[
{'name': 'Component/Backend', 'id': 3}
])
tools = LabelTools(mock_client)
result = await tools.get_labels()
# Should fetch both org and repo labels
mock_client.is_org_repo.assert_called_once_with('myorg/myrepo')
mock_client.get_org_labels.assert_called_once_with('myorg')
mock_client.get_labels.assert_called_once_with('myorg/myrepo')
assert len(result['organization']) == 2
assert len(result['repository']) == 1
assert result['total_count'] == 3
@pytest.mark.asyncio
async def test_get_labels_user_owned_repo():
"""Test getting labels for user-owned repository (no org labels)"""
mock_client = Mock()
mock_client.repo = 'lmiranda/personal-portfolio'
mock_client.is_org_repo = Mock(return_value=False)
mock_client.get_labels = Mock(return_value=[
{'name': 'bug', 'id': 1},
{'name': 'enhancement', 'id': 2}
])
tools = LabelTools(mock_client)
result = await tools.get_labels()
# Should check if org repo
mock_client.is_org_repo.assert_called_once_with('lmiranda/personal-portfolio')
# Should NOT call get_org_labels for user-owned repos
mock_client.get_org_labels.assert_not_called()
# Should still get repo labels
mock_client.get_labels.assert_called_once_with('lmiranda/personal-portfolio')
assert len(result['organization']) == 0
assert len(result['repository']) == 2
assert result['total_count'] == 2

View File

@@ -1,17 +1,19 @@
# NetBox MCP Server
MCP (Model Context Protocol) server for essential NetBox API integration with Claude Code.
MCP (Model Context Protocol) server for comprehensive NetBox API integration with Claude Code.
## Overview
This MCP server provides Claude Code with focused access to the NetBox REST API for tracking **servers, services, IP addresses, and databases**. It has been optimized to include only essential tools:
This MCP server provides Claude Code with full access to the NetBox REST API, enabling infrastructure management, documentation, and automation workflows. It covers all major NetBox application areas:
- **DCIM** - Sites, Devices (servers/VPS), Interfaces
- **IPAM** - IP Addresses, Prefixes, Services (applications/databases)
- **DCIM** - Sites, Locations, Racks, Devices, Interfaces, Cables, Power
- **IPAM** - IP Addresses, Prefixes, VLANs, VRFs, ASNs, Services
- **Circuits** - Providers, Circuits, Terminations
- **Virtualization** - Clusters, Virtual Machines, VM Interfaces
- **Extras** - Tags, Journal Entries (audit/notes)
**Total:** 37 tools (~3,700 tokens) — down from 182 tools (~19,810 tokens).
- **Tenancy** - Tenants, Contacts, Contact Assignments
- **VPN** - Tunnels, IKE/IPSec Policies, L2VPN
- **Wireless** - Wireless LANs, Links, Groups
- **Extras** - Tags, Custom Fields, Webhooks, Config Contexts, Audit Log
## Installation
@@ -47,227 +49,249 @@ EOF
### 3. Register with Claude Code
Add to your Claude Code MCP configuration (`.claude/mcp.json` or project-level `.mcp.json`):
Add to your Claude Code MCP configuration (`~/.config/claude/mcp.json` or project `.mcp.json`):
```json
{
"mcpServers": {
"netbox": {
"command": "/path/to/mcp-servers/netbox/.venv/bin/python",
"args": ["-m", "mcp_server"],
"args": ["-m", "mcp_server.server"],
"cwd": "/path/to/mcp-servers/netbox"
}
}
}
```
**Windows:**
```json
{
"mcpServers": {
"netbox": {
"command": "C:\\path\\to\\mcp-servers\\netbox\\.venv\\Scripts\\python.exe",
"args": ["-m", "mcp_server"],
"cwd": "C:\\path\\to\\mcp-servers\\netbox"
}
}
}
```
## Available Tools (37 Total)
### DCIM: Sites, Devices, Interfaces (11 tools)
**Sites (4):**
| Tool | Description | Parameters |
|------|-------------|-----------|
| `dcim_list_sites` | List sites | `name`, `status` |
| `dcim_get_site` | Get site by ID | `id` (required) |
| `dcim_create_site` | Create site | `name`, `slug` (required), `status` |
| `dcim_update_site` | Update site | `id` (required), fields to update |
**Devices (4):**
| Tool | Description | Parameters |
|------|-------------|-----------|
| `dcim_list_devices` | List devices (servers/VPS) | `name`, `site_id`, `status`, `role_id` |
| `dcim_get_device` | Get device by ID | `id` (required) |
| `dcim_create_device` | Create device | `name`, `device_type`, `role`, `site` (required) |
| `dcim_update_device` | Update device | `id` (required), fields to update |
**Interfaces (3):**
| Tool | Description | Parameters |
|------|-------------|-----------|
| `dcim_list_interfaces` | List device interfaces | `device_id`, `name`, `type` |
| `dcim_get_interface` | Get interface by ID | `id` (required) |
| `dcim_create_interface` | Create interface | `device`, `name`, `type` (required) |
### IPAM: IPs, Prefixes, Services (10 tools)
**IP Addresses (4):**
| Tool | Description | Parameters |
|------|-------------|-----------|
| `ipam_list_ip_addresses` | List IP addresses | `address`, `device_id`, `status` |
| `ipam_get_ip_address` | Get IP by ID | `id` (required) |
| `ipam_create_ip_address` | Create IP address | `address` (required), `status`, `assigned_object_type` |
| `ipam_update_ip_address` | Update IP address | `id` (required), fields to update |
**Prefixes (3):**
| Tool | Description | Parameters |
|------|-------------|-----------|
| `ipam_list_prefixes` | List prefixes | `prefix`, `site_id`, `status` |
| `ipam_get_prefix` | Get prefix by ID | `id` (required) |
| `ipam_create_prefix` | Create prefix | `prefix` (required), `status`, `site` |
**Services (3):**
| Tool | Description | Parameters |
|------|-------------|-----------|
| `ipam_list_services` | List services (apps/databases) | `device_id`, `virtual_machine_id`, `name` |
| `ipam_get_service` | Get service by ID | `id` (required) |
| `ipam_create_service` | Create service | `name`, `ports`, `protocol` (required), `device`, `virtual_machine` |
### Virtualization: Clusters, VMs, VM Interfaces (10 tools)
**Clusters (3):**
| Tool | Description | Parameters |
|------|-------------|-----------|
| `virt_list_clusters` | List virtualization clusters | `name`, `site_id` |
| `virt_get_cluster` | Get cluster by ID | `id` (required) |
| `virt_create_cluster` | Create cluster | `name`, `type` (required), `site` |
**Virtual Machines (4):**
| Tool | Description | Parameters |
|------|-------------|-----------|
| `virt_list_vms` | List VMs | `name`, `cluster_id`, `site_id`, `status` |
| `virt_get_vm` | Get VM by ID | `id` (required) |
| `virt_create_vm` | Create VM | `name`, `cluster` (required), `vcpus`, `memory`, `disk` |
| `virt_update_vm` | Update VM | `id` (required), fields to update |
**VM Interfaces (3):**
| Tool | Description | Parameters |
|------|-------------|-----------|
| `virt_list_vm_ifaces` | List VM interfaces | `virtual_machine_id` |
| `virt_get_vm_iface` | Get VM interface by ID | `id` (required) |
| `virt_create_vm_iface` | Create VM interface | `virtual_machine`, `name` (required) |
### Extras: Tags, Journal Entries (6 tools)
**Tags (3):**
| Tool | Description | Parameters |
|------|-------------|-----------|
| `extras_list_tags` | List tags | `name` |
| `extras_get_tag` | Get tag by ID | `id` (required) |
| `extras_create_tag` | Create tag | `name`, `slug` (required), `color` |
**Journal Entries (3):**
| Tool | Description | Parameters |
|------|-------------|-----------|
| `extras_list_journal_entries` | List journal entries | `assigned_object_type`, `assigned_object_id` |
| `extras_get_journal_entry` | Get journal entry by ID | `id` (required) |
| `extras_create_journal_entry` | Create journal entry | `assigned_object_type`, `assigned_object_id`, `comments` (required), `kind` |
## Configuration
All configuration is done via environment variables in `~/.config/claude/netbox.env`:
### Environment Variables
| Variable | Required | Default | Description |
|----------|----------|---------|-------------|
| `NETBOX_API_URL` | Yes | | NetBox API URL (e.g., https://netbox.example.com/api) |
| `NETBOX_API_TOKEN` | Yes | | NetBox API token |
| `NETBOX_VERIFY_SSL` | No | true | Verify SSL certificates |
| `NETBOX_TIMEOUT` | No | 30 | Request timeout in seconds |
| `NETBOX_API_URL` | Yes | - | Full URL to NetBox API (e.g., `https://netbox.example.com/api`) |
| `NETBOX_API_TOKEN` | Yes | - | API authentication token |
| `NETBOX_VERIFY_SSL` | No | `true` | Verify SSL certificates |
| `NETBOX_TIMEOUT` | No | `30` | Request timeout in seconds |
### Configuration Hierarchy
1. **System-level** (`~/.config/claude/netbox.env`): Credentials and defaults
2. **Project-level** (`.env` in current directory): Optional overrides
## Available Tools
### DCIM (Data Center Infrastructure Management)
| Tool | Description |
|------|-------------|
| `dcim_list_sites` | List all sites |
| `dcim_get_site` | Get site details |
| `dcim_create_site` | Create a new site |
| `dcim_update_site` | Update a site |
| `dcim_delete_site` | Delete a site |
| `dcim_list_devices` | List all devices |
| `dcim_get_device` | Get device details |
| `dcim_create_device` | Create a new device |
| `dcim_update_device` | Update a device |
| `dcim_delete_device` | Delete a device |
| `dcim_list_interfaces` | List device interfaces |
| `dcim_create_interface` | Create an interface |
| `dcim_list_racks` | List all racks |
| `dcim_create_rack` | Create a new rack |
| `dcim_list_cables` | List all cables |
| `dcim_create_cable` | Create a cable connection |
| ... and many more |
### IPAM (IP Address Management)
| Tool | Description |
|------|-------------|
| `ipam_list_prefixes` | List IP prefixes |
| `ipam_create_prefix` | Create a prefix |
| `ipam_list_available_prefixes` | List available child prefixes |
| `ipam_create_available_prefix` | Auto-allocate a prefix |
| `ipam_list_ip_addresses` | List IP addresses |
| `ipam_create_ip_address` | Create an IP address |
| `ipam_list_available_ips` | List available IPs in prefix |
| `ipam_create_available_ip` | Auto-allocate an IP |
| `ipam_list_vlans` | List VLANs |
| `ipam_create_vlan` | Create a VLAN |
| `ipam_list_vrfs` | List VRFs |
| ... and many more |
### Circuits
| Tool | Description |
|------|-------------|
| `circuits_list_providers` | List circuit providers |
| `circuits_create_provider` | Create a provider |
| `circuits_list_circuits` | List circuits |
| `circuits_create_circuit` | Create a circuit |
| `circuits_list_circuit_terminations` | List terminations |
| ... and more |
### Virtualization
| Tool | Description |
|------|-------------|
| `virtualization_list_clusters` | List clusters |
| `virtualization_create_cluster` | Create a cluster |
| `virtualization_list_virtual_machines` | List VMs |
| `virtualization_create_virtual_machine` | Create a VM |
| `virtualization_list_vm_interfaces` | List VM interfaces |
| ... and more |
### Tenancy
| Tool | Description |
|------|-------------|
| `tenancy_list_tenants` | List tenants |
| `tenancy_create_tenant` | Create a tenant |
| `tenancy_list_contacts` | List contacts |
| `tenancy_create_contact` | Create a contact |
| ... and more |
### VPN
| Tool | Description |
|------|-------------|
| `vpn_list_tunnels` | List VPN tunnels |
| `vpn_create_tunnel` | Create a tunnel |
| `vpn_list_l2vpns` | List L2VPNs |
| `vpn_list_ike_policies` | List IKE policies |
| `vpn_list_ipsec_policies` | List IPSec policies |
| ... and more |
### Wireless
| Tool | Description |
|------|-------------|
| `wireless_list_wireless_lans` | List wireless LANs |
| `wireless_create_wireless_lan` | Create a WLAN |
| `wireless_list_wireless_links` | List wireless links |
| ... and more |
### Extras
| Tool | Description |
|------|-------------|
| `extras_list_tags` | List all tags |
| `extras_create_tag` | Create a tag |
| `extras_list_custom_fields` | List custom fields |
| `extras_list_webhooks` | List webhooks |
| `extras_list_journal_entries` | List journal entries |
| `extras_create_journal_entry` | Create journal entry |
| `extras_list_object_changes` | View audit log |
| `extras_list_config_contexts` | List config contexts |
| ... and more |
## Usage Examples
### List all devices at a site
```
Use the dcim_list_devices tool with site_id filter to see all devices at site 5
```
### Create a new prefix and allocate IPs
```
1. Use ipam_create_prefix to create 10.0.1.0/24
2. Use ipam_list_available_ips with the prefix ID to see available addresses
3. Use ipam_create_available_ip to auto-allocate the next IP
```
### Document a new server
```
1. Use dcim_create_device to create the device
2. Use dcim_create_interface to add network interfaces
3. Use ipam_create_ip_address to assign IPs to interfaces
4. Use extras_create_journal_entry to add notes
```
### Audit recent changes
```
Use extras_list_object_changes to see recent modifications in NetBox
```
## Architecture
### Hybrid Configuration
- **System-level:** `~/.config/claude/netbox.env` (credentials)
- **Project-level:** `.env` (optional overrides)
### Tool Routing
Tool names follow the pattern `{module}_{action}_{resource}`:
- `dcim_list_sites` → DCIMTools.list_sites()
- `ipam_create_service` → IPAMTools.create_service()
- `virt_list_vms` → VirtualizationTools.list_virtual_machines()
Shortened names (virt_*) are mapped via TOOL_NAME_MAP to meet the 28-character MCP limit.
### Error Handling
All tools return JSON responses. Errors are caught and returned as:
```json
{
"error": "Error message",
"status_code": 404
}
```
## Development
### Testing
```bash
# Test import
python -c "from mcp_server.server import NetBoxMCPServer; print('OK')"
# Test tool count
python -c "from mcp_server.server import TOOL_DEFINITIONS; print(f'{len(TOOL_DEFINITIONS)} tools')"
```
### File Structure
```
netbox/
mcp-servers/netbox/
├── mcp_server/
│ ├── __init__.py
│ ├── server.py # Main MCP server (37 TOOL_DEFINITIONS)
│ ├── config.py # Configuration loader
│ ├── netbox_client.py # HTTP client wrapper
│ ├── netbox_client.py # Generic HTTP client
│ ├── server.py # MCP server entry point
│ └── tools/
│ ├── __init__.py
│ ├── dcim.py # Sites, Devices, Interfaces
│ ├── ipam.py # IPs, Prefixes, Services
│ ├── virtualization.py # Clusters, VMs, VM Interfaces
── extras.py # Tags, Journal Entries
├── .venv/ # Python virtual environment
│ ├── dcim.py # DCIM operations
│ ├── ipam.py # IPAM operations
│ ├── circuits.py # Circuits operations
── virtualization.py
│ ├── tenancy.py
│ ├── vpn.py
│ ├── wireless.py
│ └── extras.py
├── tests/
│ └── __init__.py
├── requirements.txt
└── README.md
```
## API Coverage
This MCP server provides comprehensive coverage of the NetBox REST API v4.x:
- Full CRUD operations for all major models
- Filtering and search capabilities
- Special endpoints (available prefixes, available IPs)
- Pagination handling (automatic)
- Error handling with detailed messages
## Error Handling
The server returns detailed error messages from the NetBox API, including:
- Validation errors
- Authentication failures
- Not found errors
- Permission errors
## Security Notes
- API tokens should be kept secure and not committed to version control
- Use environment variables or the system config file for credentials
- SSL verification is enabled by default
- Consider using read-only tokens for query-only workflows
## Troubleshooting
### MCP Server Won't Start
### Common Issues
**Check configuration:**
```bash
cat ~/.config/claude/netbox.env
1. **Connection refused**: Check `NETBOX_API_URL` is correct and accessible
2. **401 Unauthorized**: Verify your API token is valid
3. **SSL errors**: Set `NETBOX_VERIFY_SSL=false` for self-signed certs (not recommended for production)
4. **Timeout errors**: Increase `NETBOX_TIMEOUT` for slow connections
### Debug Mode
Enable debug logging:
```python
import logging
logging.basicConfig(level=logging.DEBUG)
```
**Test credentials:**
```bash
curl -H "Authorization: Token YOUR_TOKEN" https://netbox.example.com/api/
```
### Tools Not Appearing in Claude
**Verify MCP registration:**
```bash
cat ~/.claude/mcp.json # or project-level .mcp.json
```
**Check MCP server logs:**
Claude Code will show MCP server stderr in the UI.
### Connection Errors
- Verify `NETBOX_API_URL` ends with `/api`
- Check firewall/network connectivity to NetBox instance
- Ensure API token has required permissions
## License
MIT License - See LICENSE file for details.
## Contributing
This MCP server is part of the leo-claude-mktplace project. For issues or contributions, refer to the main repository.
1. Follow the existing code patterns
2. Add tests for new functionality
3. Update documentation for new tools
4. Ensure compatibility with NetBox 4.x API
## License
MIT License - Part of the Leo Claude Marketplace.

View File

@@ -4,7 +4,6 @@ NetBox API client for interacting with NetBox REST API.
Provides a generic HTTP client with methods for all standard REST operations.
Individual tool modules use this client for their specific endpoints.
"""
import json
import requests
import logging
from typing import List, Dict, Optional, Any, Union
@@ -84,20 +83,7 @@ class NetBoxClient:
if response.status_code == 204 or not response.content:
return None
# Parse JSON with diagnostic error handling
try:
return response.json()
except json.JSONDecodeError as e:
logger.error(
f"JSON decode failed. Status: {response.status_code}, "
f"Content-Length: {len(response.content)}, "
f"Content preview: {response.content[:200]!r}"
)
raise ValueError(
f"Invalid JSON response from NetBox: {e}. "
f"Status code: {response.status_code}, "
f"Content length: {len(response.content)} bytes"
) from e
return response.json()
def list(
self,

File diff suppressed because it is too large Load Diff

View File

@@ -1,12 +1,20 @@
"""NetBox MCP tools package."""
from .dcim import DCIMTools
from .ipam import IPAMTools
from .circuits import CircuitsTools
from .virtualization import VirtualizationTools
from .tenancy import TenancyTools
from .vpn import VPNTools
from .wireless import WirelessTools
from .extras import ExtrasTools
__all__ = [
'DCIMTools',
'IPAMTools',
'CircuitsTools',
'VirtualizationTools',
'TenancyTools',
'VPNTools',
'WirelessTools',
'ExtrasTools',
]

View File

@@ -0,0 +1,373 @@
"""
Circuits tools for NetBox MCP Server.
Covers: Providers, Circuits, Circuit Types, Circuit Terminations, and related models.
"""
import logging
from typing import List, Dict, Optional, Any
from ..netbox_client import NetBoxClient
logger = logging.getLogger(__name__)
class CircuitsTools:
"""Tools for Circuits operations in NetBox"""
def __init__(self, client: NetBoxClient):
self.client = client
self.base_endpoint = 'circuits'
# ==================== Providers ====================
async def list_providers(
self,
name: Optional[str] = None,
slug: Optional[str] = None,
**kwargs
) -> List[Dict]:
"""List all circuit providers."""
params = {k: v for k, v in {'name': name, 'slug': slug, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/providers', params=params)
async def get_provider(self, id: int) -> Dict:
"""Get a specific provider by ID."""
return self.client.get(f'{self.base_endpoint}/providers', id)
async def create_provider(
self,
name: str,
slug: str,
asns: Optional[List[int]] = None,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new provider."""
data = {'name': name, 'slug': slug, **kwargs}
if asns:
data['asns'] = asns
if description:
data['description'] = description
return self.client.create(f'{self.base_endpoint}/providers', data)
async def update_provider(self, id: int, **kwargs) -> Dict:
"""Update a provider."""
return self.client.patch(f'{self.base_endpoint}/providers', id, kwargs)
async def delete_provider(self, id: int) -> None:
"""Delete a provider."""
self.client.delete(f'{self.base_endpoint}/providers', id)
# ==================== Provider Accounts ====================
async def list_provider_accounts(
self,
provider_id: Optional[int] = None,
name: Optional[str] = None,
account: Optional[str] = None,
**kwargs
) -> List[Dict]:
"""List all provider accounts."""
params = {k: v for k, v in {
'provider_id': provider_id, 'name': name, 'account': account, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/provider-accounts', params=params)
async def get_provider_account(self, id: int) -> Dict:
"""Get a specific provider account by ID."""
return self.client.get(f'{self.base_endpoint}/provider-accounts', id)
async def create_provider_account(
self,
provider: int,
account: str,
name: Optional[str] = None,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new provider account."""
data = {'provider': provider, 'account': account, **kwargs}
if name:
data['name'] = name
if description:
data['description'] = description
return self.client.create(f'{self.base_endpoint}/provider-accounts', data)
async def update_provider_account(self, id: int, **kwargs) -> Dict:
"""Update a provider account."""
return self.client.patch(f'{self.base_endpoint}/provider-accounts', id, kwargs)
async def delete_provider_account(self, id: int) -> None:
"""Delete a provider account."""
self.client.delete(f'{self.base_endpoint}/provider-accounts', id)
# ==================== Provider Networks ====================
async def list_provider_networks(
self,
provider_id: Optional[int] = None,
name: Optional[str] = None,
**kwargs
) -> List[Dict]:
"""List all provider networks."""
params = {k: v for k, v in {
'provider_id': provider_id, 'name': name, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/provider-networks', params=params)
async def get_provider_network(self, id: int) -> Dict:
"""Get a specific provider network by ID."""
return self.client.get(f'{self.base_endpoint}/provider-networks', id)
async def create_provider_network(
self,
provider: int,
name: str,
service_id: Optional[str] = None,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new provider network."""
data = {'provider': provider, 'name': name, **kwargs}
if service_id:
data['service_id'] = service_id
if description:
data['description'] = description
return self.client.create(f'{self.base_endpoint}/provider-networks', data)
async def update_provider_network(self, id: int, **kwargs) -> Dict:
"""Update a provider network."""
return self.client.patch(f'{self.base_endpoint}/provider-networks', id, kwargs)
async def delete_provider_network(self, id: int) -> None:
"""Delete a provider network."""
self.client.delete(f'{self.base_endpoint}/provider-networks', id)
# ==================== Circuit Types ====================
async def list_circuit_types(
self,
name: Optional[str] = None,
slug: Optional[str] = None,
**kwargs
) -> List[Dict]:
"""List all circuit types."""
params = {k: v for k, v in {'name': name, 'slug': slug, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/circuit-types', params=params)
async def get_circuit_type(self, id: int) -> Dict:
"""Get a specific circuit type by ID."""
return self.client.get(f'{self.base_endpoint}/circuit-types', id)
async def create_circuit_type(
self,
name: str,
slug: str,
color: Optional[str] = None,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new circuit type."""
data = {'name': name, 'slug': slug, **kwargs}
if color:
data['color'] = color
if description:
data['description'] = description
return self.client.create(f'{self.base_endpoint}/circuit-types', data)
async def update_circuit_type(self, id: int, **kwargs) -> Dict:
"""Update a circuit type."""
return self.client.patch(f'{self.base_endpoint}/circuit-types', id, kwargs)
async def delete_circuit_type(self, id: int) -> None:
"""Delete a circuit type."""
self.client.delete(f'{self.base_endpoint}/circuit-types', id)
# ==================== Circuit Groups ====================
async def list_circuit_groups(
self,
name: Optional[str] = None,
slug: Optional[str] = None,
**kwargs
) -> List[Dict]:
"""List all circuit groups."""
params = {k: v for k, v in {'name': name, 'slug': slug, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/circuit-groups', params=params)
async def get_circuit_group(self, id: int) -> Dict:
"""Get a specific circuit group by ID."""
return self.client.get(f'{self.base_endpoint}/circuit-groups', id)
async def create_circuit_group(
self,
name: str,
slug: str,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new circuit group."""
data = {'name': name, 'slug': slug, **kwargs}
if description:
data['description'] = description
return self.client.create(f'{self.base_endpoint}/circuit-groups', data)
async def update_circuit_group(self, id: int, **kwargs) -> Dict:
"""Update a circuit group."""
return self.client.patch(f'{self.base_endpoint}/circuit-groups', id, kwargs)
async def delete_circuit_group(self, id: int) -> None:
"""Delete a circuit group."""
self.client.delete(f'{self.base_endpoint}/circuit-groups', id)
# ==================== Circuit Group Assignments ====================
async def list_circuit_group_assignments(
self,
group_id: Optional[int] = None,
circuit_id: Optional[int] = None,
**kwargs
) -> List[Dict]:
"""List all circuit group assignments."""
params = {k: v for k, v in {
'group_id': group_id, 'circuit_id': circuit_id, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/circuit-group-assignments', params=params)
async def get_circuit_group_assignment(self, id: int) -> Dict:
"""Get a specific circuit group assignment by ID."""
return self.client.get(f'{self.base_endpoint}/circuit-group-assignments', id)
async def create_circuit_group_assignment(
self,
group: int,
circuit: int,
priority: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new circuit group assignment."""
data = {'group': group, 'circuit': circuit, **kwargs}
if priority:
data['priority'] = priority
return self.client.create(f'{self.base_endpoint}/circuit-group-assignments', data)
async def update_circuit_group_assignment(self, id: int, **kwargs) -> Dict:
"""Update a circuit group assignment."""
return self.client.patch(f'{self.base_endpoint}/circuit-group-assignments', id, kwargs)
async def delete_circuit_group_assignment(self, id: int) -> None:
"""Delete a circuit group assignment."""
self.client.delete(f'{self.base_endpoint}/circuit-group-assignments', id)
# ==================== Circuits ====================
async def list_circuits(
self,
cid: Optional[str] = None,
provider_id: Optional[int] = None,
provider_account_id: Optional[int] = None,
type_id: Optional[int] = None,
status: Optional[str] = None,
tenant_id: Optional[int] = None,
site_id: Optional[int] = None,
**kwargs
) -> List[Dict]:
"""List all circuits with optional filtering."""
params = {k: v for k, v in {
'cid': cid, 'provider_id': provider_id, 'provider_account_id': provider_account_id,
'type_id': type_id, 'status': status, 'tenant_id': tenant_id, 'site_id': site_id, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/circuits', params=params)
async def get_circuit(self, id: int) -> Dict:
"""Get a specific circuit by ID."""
return self.client.get(f'{self.base_endpoint}/circuits', id)
async def create_circuit(
self,
cid: str,
provider: int,
type: int,
status: str = 'active',
provider_account: Optional[int] = None,
tenant: Optional[int] = None,
install_date: Optional[str] = None,
termination_date: Optional[str] = None,
commit_rate: Optional[int] = None,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new circuit."""
data = {'cid': cid, 'provider': provider, 'type': type, 'status': status, **kwargs}
for key, val in [
('provider_account', provider_account), ('tenant', tenant),
('install_date', install_date), ('termination_date', termination_date),
('commit_rate', commit_rate), ('description', description)
]:
if val is not None:
data[key] = val
return self.client.create(f'{self.base_endpoint}/circuits', data)
async def update_circuit(self, id: int, **kwargs) -> Dict:
"""Update a circuit."""
return self.client.patch(f'{self.base_endpoint}/circuits', id, kwargs)
async def delete_circuit(self, id: int) -> None:
"""Delete a circuit."""
self.client.delete(f'{self.base_endpoint}/circuits', id)
# ==================== Circuit Terminations ====================
async def list_circuit_terminations(
self,
circuit_id: Optional[int] = None,
site_id: Optional[int] = None,
provider_network_id: Optional[int] = None,
term_side: Optional[str] = None,
**kwargs
) -> List[Dict]:
"""List all circuit terminations."""
params = {k: v for k, v in {
'circuit_id': circuit_id, 'site_id': site_id,
'provider_network_id': provider_network_id, 'term_side': term_side, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/circuit-terminations', params=params)
async def get_circuit_termination(self, id: int) -> Dict:
"""Get a specific circuit termination by ID."""
return self.client.get(f'{self.base_endpoint}/circuit-terminations', id)
async def create_circuit_termination(
self,
circuit: int,
term_side: str,
site: Optional[int] = None,
provider_network: Optional[int] = None,
port_speed: Optional[int] = None,
upstream_speed: Optional[int] = None,
xconnect_id: Optional[str] = None,
pp_info: Optional[str] = None,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new circuit termination."""
data = {'circuit': circuit, 'term_side': term_side, **kwargs}
for key, val in [
('site', site), ('provider_network', provider_network),
('port_speed', port_speed), ('upstream_speed', upstream_speed),
('xconnect_id', xconnect_id), ('pp_info', pp_info), ('description', description)
]:
if val is not None:
data[key] = val
return self.client.create(f'{self.base_endpoint}/circuit-terminations', data)
async def update_circuit_termination(self, id: int, **kwargs) -> Dict:
"""Update a circuit termination."""
return self.client.patch(f'{self.base_endpoint}/circuit-terminations', id, kwargs)
async def delete_circuit_termination(self, id: int) -> None:
"""Delete a circuit termination."""
self.client.delete(f'{self.base_endpoint}/circuit-terminations', id)
async def get_circuit_termination_paths(self, id: int) -> Dict:
"""Get cable paths for a circuit termination."""
return self.client.get(f'{self.base_endpoint}/circuit-terminations', f'{id}/paths')

View File

@@ -1,7 +1,7 @@
"""
DCIM (Data Center Infrastructure Management) tools for NetBox MCP Server.
Covers: Sites, Devices, and Interfaces only.
Covers: Sites, Locations, Racks, Devices, Cables, Interfaces, and related models.
"""
import logging
from typing import List, Dict, Optional, Any
@@ -17,6 +17,74 @@ class DCIMTools:
self.client = client
self.base_endpoint = 'dcim'
# ==================== Regions ====================
async def list_regions(
self,
name: Optional[str] = None,
slug: Optional[str] = None,
parent_id: Optional[int] = None,
**kwargs
) -> List[Dict]:
"""List all regions with optional filtering."""
params = {k: v for k, v in {
'name': name, 'slug': slug, 'parent_id': parent_id, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/regions', params=params)
async def get_region(self, id: int) -> Dict:
"""Get a specific region by ID."""
return self.client.get(f'{self.base_endpoint}/regions', id)
async def create_region(self, name: str, slug: str, parent: Optional[int] = None, **kwargs) -> Dict:
"""Create a new region."""
data = {'name': name, 'slug': slug, **kwargs}
if parent:
data['parent'] = parent
return self.client.create(f'{self.base_endpoint}/regions', data)
async def update_region(self, id: int, **kwargs) -> Dict:
"""Update a region."""
return self.client.patch(f'{self.base_endpoint}/regions', id, kwargs)
async def delete_region(self, id: int) -> None:
"""Delete a region."""
self.client.delete(f'{self.base_endpoint}/regions', id)
# ==================== Site Groups ====================
async def list_site_groups(
self,
name: Optional[str] = None,
slug: Optional[str] = None,
parent_id: Optional[int] = None,
**kwargs
) -> List[Dict]:
"""List all site groups with optional filtering."""
params = {k: v for k, v in {
'name': name, 'slug': slug, 'parent_id': parent_id, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/site-groups', params=params)
async def get_site_group(self, id: int) -> Dict:
"""Get a specific site group by ID."""
return self.client.get(f'{self.base_endpoint}/site-groups', id)
async def create_site_group(self, name: str, slug: str, parent: Optional[int] = None, **kwargs) -> Dict:
"""Create a new site group."""
data = {'name': name, 'slug': slug, **kwargs}
if parent:
data['parent'] = parent
return self.client.create(f'{self.base_endpoint}/site-groups', data)
async def update_site_group(self, id: int, **kwargs) -> Dict:
"""Update a site group."""
return self.client.patch(f'{self.base_endpoint}/site-groups', id, kwargs)
async def delete_site_group(self, id: int) -> None:
"""Delete a site group."""
self.client.delete(f'{self.base_endpoint}/site-groups', id)
# ==================== Sites ====================
async def list_sites(
@@ -74,6 +142,359 @@ class DCIMTools:
"""Update a site."""
return self.client.patch(f'{self.base_endpoint}/sites', id, kwargs)
async def delete_site(self, id: int) -> None:
"""Delete a site."""
self.client.delete(f'{self.base_endpoint}/sites', id)
# ==================== Locations ====================
async def list_locations(
self,
name: Optional[str] = None,
slug: Optional[str] = None,
site_id: Optional[int] = None,
parent_id: Optional[int] = None,
**kwargs
) -> List[Dict]:
"""List all locations with optional filtering."""
params = {k: v for k, v in {
'name': name, 'slug': slug, 'site_id': site_id, 'parent_id': parent_id, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/locations', params=params)
async def get_location(self, id: int) -> Dict:
"""Get a specific location by ID."""
return self.client.get(f'{self.base_endpoint}/locations', id)
async def create_location(
self,
name: str,
slug: str,
site: int,
parent: Optional[int] = None,
**kwargs
) -> Dict:
"""Create a new location."""
data = {'name': name, 'slug': slug, 'site': site, **kwargs}
if parent:
data['parent'] = parent
return self.client.create(f'{self.base_endpoint}/locations', data)
async def update_location(self, id: int, **kwargs) -> Dict:
"""Update a location."""
return self.client.patch(f'{self.base_endpoint}/locations', id, kwargs)
async def delete_location(self, id: int) -> None:
"""Delete a location."""
self.client.delete(f'{self.base_endpoint}/locations', id)
# ==================== Rack Roles ====================
async def list_rack_roles(self, name: Optional[str] = None, **kwargs) -> List[Dict]:
"""List all rack roles."""
params = {k: v for k, v in {'name': name, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/rack-roles', params=params)
async def get_rack_role(self, id: int) -> Dict:
"""Get a specific rack role by ID."""
return self.client.get(f'{self.base_endpoint}/rack-roles', id)
async def create_rack_role(self, name: str, slug: str, color: str = '9e9e9e', **kwargs) -> Dict:
"""Create a new rack role."""
data = {'name': name, 'slug': slug, 'color': color, **kwargs}
return self.client.create(f'{self.base_endpoint}/rack-roles', data)
async def update_rack_role(self, id: int, **kwargs) -> Dict:
"""Update a rack role."""
return self.client.patch(f'{self.base_endpoint}/rack-roles', id, kwargs)
async def delete_rack_role(self, id: int) -> None:
"""Delete a rack role."""
self.client.delete(f'{self.base_endpoint}/rack-roles', id)
# ==================== Rack Types ====================
async def list_rack_types(self, manufacturer_id: Optional[int] = None, **kwargs) -> List[Dict]:
"""List all rack types."""
params = {k: v for k, v in {'manufacturer_id': manufacturer_id, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/rack-types', params=params)
async def get_rack_type(self, id: int) -> Dict:
"""Get a specific rack type by ID."""
return self.client.get(f'{self.base_endpoint}/rack-types', id)
async def create_rack_type(
self,
manufacturer: int,
model: str,
slug: str,
form_factor: str = '4-post-frame',
width: int = 19,
u_height: int = 42,
**kwargs
) -> Dict:
"""Create a new rack type."""
data = {
'manufacturer': manufacturer, 'model': model, 'slug': slug,
'form_factor': form_factor, 'width': width, 'u_height': u_height, **kwargs
}
return self.client.create(f'{self.base_endpoint}/rack-types', data)
async def update_rack_type(self, id: int, **kwargs) -> Dict:
"""Update a rack type."""
return self.client.patch(f'{self.base_endpoint}/rack-types', id, kwargs)
async def delete_rack_type(self, id: int) -> None:
"""Delete a rack type."""
self.client.delete(f'{self.base_endpoint}/rack-types', id)
# ==================== Racks ====================
async def list_racks(
self,
name: Optional[str] = None,
site_id: Optional[int] = None,
location_id: Optional[int] = None,
status: Optional[str] = None,
role_id: Optional[int] = None,
tenant_id: Optional[int] = None,
**kwargs
) -> List[Dict]:
"""List all racks with optional filtering."""
params = {k: v for k, v in {
'name': name, 'site_id': site_id, 'location_id': location_id,
'status': status, 'role_id': role_id, 'tenant_id': tenant_id, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/racks', params=params)
async def get_rack(self, id: int) -> Dict:
"""Get a specific rack by ID."""
return self.client.get(f'{self.base_endpoint}/racks', id)
async def create_rack(
self,
name: str,
site: int,
status: str = 'active',
location: Optional[int] = None,
role: Optional[int] = None,
tenant: Optional[int] = None,
rack_type: Optional[int] = None,
width: int = 19,
u_height: int = 42,
**kwargs
) -> Dict:
"""Create a new rack."""
data = {'name': name, 'site': site, 'status': status, 'width': width, 'u_height': u_height, **kwargs}
for key, val in [('location', location), ('role', role), ('tenant', tenant), ('rack_type', rack_type)]:
if val is not None:
data[key] = val
return self.client.create(f'{self.base_endpoint}/racks', data)
async def update_rack(self, id: int, **kwargs) -> Dict:
"""Update a rack."""
return self.client.patch(f'{self.base_endpoint}/racks', id, kwargs)
async def delete_rack(self, id: int) -> None:
"""Delete a rack."""
self.client.delete(f'{self.base_endpoint}/racks', id)
# ==================== Rack Reservations ====================
async def list_rack_reservations(
self,
rack_id: Optional[int] = None,
site_id: Optional[int] = None,
tenant_id: Optional[int] = None,
**kwargs
) -> List[Dict]:
"""List all rack reservations."""
params = {k: v for k, v in {
'rack_id': rack_id, 'site_id': site_id, 'tenant_id': tenant_id, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/rack-reservations', params=params)
async def get_rack_reservation(self, id: int) -> Dict:
"""Get a specific rack reservation by ID."""
return self.client.get(f'{self.base_endpoint}/rack-reservations', id)
async def create_rack_reservation(
self,
rack: int,
units: List[int],
user: int,
description: str,
tenant: Optional[int] = None,
**kwargs
) -> Dict:
"""Create a new rack reservation."""
data = {'rack': rack, 'units': units, 'user': user, 'description': description, **kwargs}
if tenant:
data['tenant'] = tenant
return self.client.create(f'{self.base_endpoint}/rack-reservations', data)
async def update_rack_reservation(self, id: int, **kwargs) -> Dict:
"""Update a rack reservation."""
return self.client.patch(f'{self.base_endpoint}/rack-reservations', id, kwargs)
async def delete_rack_reservation(self, id: int) -> None:
"""Delete a rack reservation."""
self.client.delete(f'{self.base_endpoint}/rack-reservations', id)
# ==================== Manufacturers ====================
async def list_manufacturers(self, name: Optional[str] = None, **kwargs) -> List[Dict]:
"""List all manufacturers."""
params = {k: v for k, v in {'name': name, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/manufacturers', params=params)
async def get_manufacturer(self, id: int) -> Dict:
"""Get a specific manufacturer by ID."""
return self.client.get(f'{self.base_endpoint}/manufacturers', id)
async def create_manufacturer(self, name: str, slug: str, **kwargs) -> Dict:
"""Create a new manufacturer."""
data = {'name': name, 'slug': slug, **kwargs}
return self.client.create(f'{self.base_endpoint}/manufacturers', data)
async def update_manufacturer(self, id: int, **kwargs) -> Dict:
"""Update a manufacturer."""
return self.client.patch(f'{self.base_endpoint}/manufacturers', id, kwargs)
async def delete_manufacturer(self, id: int) -> None:
"""Delete a manufacturer."""
self.client.delete(f'{self.base_endpoint}/manufacturers', id)
# ==================== Device Types ====================
async def list_device_types(
self,
manufacturer_id: Optional[int] = None,
model: Optional[str] = None,
slug: Optional[str] = None,
**kwargs
) -> List[Dict]:
"""List all device types."""
params = {k: v for k, v in {
'manufacturer_id': manufacturer_id, 'model': model, 'slug': slug, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/device-types', params=params)
async def get_device_type(self, id: int) -> Dict:
"""Get a specific device type by ID."""
return self.client.get(f'{self.base_endpoint}/device-types', id)
async def create_device_type(
self,
manufacturer: int,
model: str,
slug: str,
u_height: float = 1.0,
is_full_depth: bool = True,
**kwargs
) -> Dict:
"""Create a new device type."""
data = {
'manufacturer': manufacturer, 'model': model, 'slug': slug,
'u_height': u_height, 'is_full_depth': is_full_depth, **kwargs
}
return self.client.create(f'{self.base_endpoint}/device-types', data)
async def update_device_type(self, id: int, **kwargs) -> Dict:
"""Update a device type."""
return self.client.patch(f'{self.base_endpoint}/device-types', id, kwargs)
async def delete_device_type(self, id: int) -> None:
"""Delete a device type."""
self.client.delete(f'{self.base_endpoint}/device-types', id)
# ==================== Module Types ====================
async def list_module_types(self, manufacturer_id: Optional[int] = None, **kwargs) -> List[Dict]:
"""List all module types."""
params = {k: v for k, v in {'manufacturer_id': manufacturer_id, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/module-types', params=params)
async def get_module_type(self, id: int) -> Dict:
"""Get a specific module type by ID."""
return self.client.get(f'{self.base_endpoint}/module-types', id)
async def create_module_type(self, manufacturer: int, model: str, **kwargs) -> Dict:
"""Create a new module type."""
data = {'manufacturer': manufacturer, 'model': model, **kwargs}
return self.client.create(f'{self.base_endpoint}/module-types', data)
async def update_module_type(self, id: int, **kwargs) -> Dict:
"""Update a module type."""
return self.client.patch(f'{self.base_endpoint}/module-types', id, kwargs)
async def delete_module_type(self, id: int) -> None:
"""Delete a module type."""
self.client.delete(f'{self.base_endpoint}/module-types', id)
# ==================== Device Roles ====================
async def list_device_roles(self, name: Optional[str] = None, **kwargs) -> List[Dict]:
"""List all device roles."""
params = {k: v for k, v in {'name': name, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/device-roles', params=params)
async def get_device_role(self, id: int) -> Dict:
"""Get a specific device role by ID."""
return self.client.get(f'{self.base_endpoint}/device-roles', id)
async def create_device_role(
self,
name: str,
slug: str,
color: str = '9e9e9e',
vm_role: bool = False,
**kwargs
) -> Dict:
"""Create a new device role."""
data = {'name': name, 'slug': slug, 'color': color, 'vm_role': vm_role, **kwargs}
return self.client.create(f'{self.base_endpoint}/device-roles', data)
async def update_device_role(self, id: int, **kwargs) -> Dict:
"""Update a device role."""
return self.client.patch(f'{self.base_endpoint}/device-roles', id, kwargs)
async def delete_device_role(self, id: int) -> None:
"""Delete a device role."""
self.client.delete(f'{self.base_endpoint}/device-roles', id)
# ==================== Platforms ====================
async def list_platforms(self, name: Optional[str] = None, manufacturer_id: Optional[int] = None, **kwargs) -> List[Dict]:
"""List all platforms."""
params = {k: v for k, v in {'name': name, 'manufacturer_id': manufacturer_id, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/platforms', params=params)
async def get_platform(self, id: int) -> Dict:
"""Get a specific platform by ID."""
return self.client.get(f'{self.base_endpoint}/platforms', id)
async def create_platform(
self,
name: str,
slug: str,
manufacturer: Optional[int] = None,
**kwargs
) -> Dict:
"""Create a new platform."""
data = {'name': name, 'slug': slug, **kwargs}
if manufacturer:
data['manufacturer'] = manufacturer
return self.client.create(f'{self.base_endpoint}/platforms', data)
async def update_platform(self, id: int, **kwargs) -> Dict:
"""Update a platform."""
return self.client.patch(f'{self.base_endpoint}/platforms', id, kwargs)
async def delete_platform(self, id: int) -> None:
"""Delete a platform."""
self.client.delete(f'{self.base_endpoint}/platforms', id)
# ==================== Devices ====================
async def list_devices(
@@ -144,6 +565,34 @@ class DCIMTools:
"""Update a device."""
return self.client.patch(f'{self.base_endpoint}/devices', id, kwargs)
async def delete_device(self, id: int) -> None:
"""Delete a device."""
self.client.delete(f'{self.base_endpoint}/devices', id)
# ==================== Modules ====================
async def list_modules(self, device_id: Optional[int] = None, **kwargs) -> List[Dict]:
"""List all modules."""
params = {k: v for k, v in {'device_id': device_id, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/modules', params=params)
async def get_module(self, id: int) -> Dict:
"""Get a specific module by ID."""
return self.client.get(f'{self.base_endpoint}/modules', id)
async def create_module(self, device: int, module_bay: int, module_type: int, **kwargs) -> Dict:
"""Create a new module."""
data = {'device': device, 'module_bay': module_bay, 'module_type': module_type, **kwargs}
return self.client.create(f'{self.base_endpoint}/modules', data)
async def update_module(self, id: int, **kwargs) -> Dict:
"""Update a module."""
return self.client.patch(f'{self.base_endpoint}/modules', id, kwargs)
async def delete_module(self, id: int) -> None:
"""Delete a module."""
self.client.delete(f'{self.base_endpoint}/modules', id)
# ==================== Interfaces ====================
async def list_interfaces(
@@ -187,3 +636,300 @@ class DCIMTools:
if val is not None:
data[key] = val
return self.client.create(f'{self.base_endpoint}/interfaces', data)
async def update_interface(self, id: int, **kwargs) -> Dict:
"""Update an interface."""
return self.client.patch(f'{self.base_endpoint}/interfaces', id, kwargs)
async def delete_interface(self, id: int) -> None:
"""Delete an interface."""
self.client.delete(f'{self.base_endpoint}/interfaces', id)
# ==================== Console Ports ====================
async def list_console_ports(self, device_id: Optional[int] = None, **kwargs) -> List[Dict]:
"""List all console ports."""
params = {k: v for k, v in {'device_id': device_id, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/console-ports', params=params)
async def get_console_port(self, id: int) -> Dict:
"""Get a specific console port by ID."""
return self.client.get(f'{self.base_endpoint}/console-ports', id)
async def create_console_port(self, device: int, name: str, **kwargs) -> Dict:
"""Create a new console port."""
data = {'device': device, 'name': name, **kwargs}
return self.client.create(f'{self.base_endpoint}/console-ports', data)
async def update_console_port(self, id: int, **kwargs) -> Dict:
"""Update a console port."""
return self.client.patch(f'{self.base_endpoint}/console-ports', id, kwargs)
async def delete_console_port(self, id: int) -> None:
"""Delete a console port."""
self.client.delete(f'{self.base_endpoint}/console-ports', id)
# ==================== Console Server Ports ====================
async def list_console_server_ports(self, device_id: Optional[int] = None, **kwargs) -> List[Dict]:
"""List all console server ports."""
params = {k: v for k, v in {'device_id': device_id, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/console-server-ports', params=params)
async def get_console_server_port(self, id: int) -> Dict:
"""Get a specific console server port by ID."""
return self.client.get(f'{self.base_endpoint}/console-server-ports', id)
async def create_console_server_port(self, device: int, name: str, **kwargs) -> Dict:
"""Create a new console server port."""
data = {'device': device, 'name': name, **kwargs}
return self.client.create(f'{self.base_endpoint}/console-server-ports', data)
async def update_console_server_port(self, id: int, **kwargs) -> Dict:
"""Update a console server port."""
return self.client.patch(f'{self.base_endpoint}/console-server-ports', id, kwargs)
async def delete_console_server_port(self, id: int) -> None:
"""Delete a console server port."""
self.client.delete(f'{self.base_endpoint}/console-server-ports', id)
# ==================== Power Ports ====================
async def list_power_ports(self, device_id: Optional[int] = None, **kwargs) -> List[Dict]:
"""List all power ports."""
params = {k: v for k, v in {'device_id': device_id, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/power-ports', params=params)
async def get_power_port(self, id: int) -> Dict:
"""Get a specific power port by ID."""
return self.client.get(f'{self.base_endpoint}/power-ports', id)
async def create_power_port(self, device: int, name: str, **kwargs) -> Dict:
"""Create a new power port."""
data = {'device': device, 'name': name, **kwargs}
return self.client.create(f'{self.base_endpoint}/power-ports', data)
async def update_power_port(self, id: int, **kwargs) -> Dict:
"""Update a power port."""
return self.client.patch(f'{self.base_endpoint}/power-ports', id, kwargs)
async def delete_power_port(self, id: int) -> None:
"""Delete a power port."""
self.client.delete(f'{self.base_endpoint}/power-ports', id)
# ==================== Power Outlets ====================
async def list_power_outlets(self, device_id: Optional[int] = None, **kwargs) -> List[Dict]:
"""List all power outlets."""
params = {k: v for k, v in {'device_id': device_id, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/power-outlets', params=params)
async def get_power_outlet(self, id: int) -> Dict:
"""Get a specific power outlet by ID."""
return self.client.get(f'{self.base_endpoint}/power-outlets', id)
async def create_power_outlet(self, device: int, name: str, **kwargs) -> Dict:
"""Create a new power outlet."""
data = {'device': device, 'name': name, **kwargs}
return self.client.create(f'{self.base_endpoint}/power-outlets', data)
async def update_power_outlet(self, id: int, **kwargs) -> Dict:
"""Update a power outlet."""
return self.client.patch(f'{self.base_endpoint}/power-outlets', id, kwargs)
async def delete_power_outlet(self, id: int) -> None:
"""Delete a power outlet."""
self.client.delete(f'{self.base_endpoint}/power-outlets', id)
# ==================== Power Panels ====================
async def list_power_panels(self, site_id: Optional[int] = None, **kwargs) -> List[Dict]:
"""List all power panels."""
params = {k: v for k, v in {'site_id': site_id, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/power-panels', params=params)
async def get_power_panel(self, id: int) -> Dict:
"""Get a specific power panel by ID."""
return self.client.get(f'{self.base_endpoint}/power-panels', id)
async def create_power_panel(self, site: int, name: str, location: Optional[int] = None, **kwargs) -> Dict:
"""Create a new power panel."""
data = {'site': site, 'name': name, **kwargs}
if location:
data['location'] = location
return self.client.create(f'{self.base_endpoint}/power-panels', data)
async def update_power_panel(self, id: int, **kwargs) -> Dict:
"""Update a power panel."""
return self.client.patch(f'{self.base_endpoint}/power-panels', id, kwargs)
async def delete_power_panel(self, id: int) -> None:
"""Delete a power panel."""
self.client.delete(f'{self.base_endpoint}/power-panels', id)
# ==================== Power Feeds ====================
async def list_power_feeds(self, power_panel_id: Optional[int] = None, **kwargs) -> List[Dict]:
"""List all power feeds."""
params = {k: v for k, v in {'power_panel_id': power_panel_id, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/power-feeds', params=params)
async def get_power_feed(self, id: int) -> Dict:
"""Get a specific power feed by ID."""
return self.client.get(f'{self.base_endpoint}/power-feeds', id)
async def create_power_feed(
self,
power_panel: int,
name: str,
status: str = 'active',
type: str = 'primary',
supply: str = 'ac',
phase: str = 'single-phase',
voltage: int = 120,
amperage: int = 20,
**kwargs
) -> Dict:
"""Create a new power feed."""
data = {
'power_panel': power_panel, 'name': name, 'status': status,
'type': type, 'supply': supply, 'phase': phase,
'voltage': voltage, 'amperage': amperage, **kwargs
}
return self.client.create(f'{self.base_endpoint}/power-feeds', data)
async def update_power_feed(self, id: int, **kwargs) -> Dict:
"""Update a power feed."""
return self.client.patch(f'{self.base_endpoint}/power-feeds', id, kwargs)
async def delete_power_feed(self, id: int) -> None:
"""Delete a power feed."""
self.client.delete(f'{self.base_endpoint}/power-feeds', id)
# ==================== Cables ====================
async def list_cables(
self,
site_id: Optional[int] = None,
device_id: Optional[int] = None,
rack_id: Optional[int] = None,
type: Optional[str] = None,
status: Optional[str] = None,
**kwargs
) -> List[Dict]:
"""List all cables."""
params = {k: v for k, v in {
'site_id': site_id, 'device_id': device_id, 'rack_id': rack_id,
'type': type, 'status': status, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/cables', params=params)
async def get_cable(self, id: int) -> Dict:
"""Get a specific cable by ID."""
return self.client.get(f'{self.base_endpoint}/cables', id)
async def create_cable(
self,
a_terminations: List[Dict],
b_terminations: List[Dict],
type: Optional[str] = None,
status: str = 'connected',
label: Optional[str] = None,
color: Optional[str] = None,
length: Optional[float] = None,
length_unit: Optional[str] = None,
**kwargs
) -> Dict:
"""
Create a new cable.
a_terminations and b_terminations are lists of dicts with:
- object_type: e.g., 'dcim.interface'
- object_id: ID of the object
"""
data = {
'a_terminations': a_terminations,
'b_terminations': b_terminations,
'status': status,
**kwargs
}
for key, val in [
('type', type), ('label', label), ('color', color),
('length', length), ('length_unit', length_unit)
]:
if val is not None:
data[key] = val
return self.client.create(f'{self.base_endpoint}/cables', data)
async def update_cable(self, id: int, **kwargs) -> Dict:
"""Update a cable."""
return self.client.patch(f'{self.base_endpoint}/cables', id, kwargs)
async def delete_cable(self, id: int) -> None:
"""Delete a cable."""
self.client.delete(f'{self.base_endpoint}/cables', id)
# ==================== Virtual Chassis ====================
async def list_virtual_chassis(self, **kwargs) -> List[Dict]:
"""List all virtual chassis."""
return self.client.list(f'{self.base_endpoint}/virtual-chassis', params=kwargs)
async def get_virtual_chassis(self, id: int) -> Dict:
"""Get a specific virtual chassis by ID."""
return self.client.get(f'{self.base_endpoint}/virtual-chassis', id)
async def create_virtual_chassis(self, name: str, domain: Optional[str] = None, **kwargs) -> Dict:
"""Create a new virtual chassis."""
data = {'name': name, **kwargs}
if domain:
data['domain'] = domain
return self.client.create(f'{self.base_endpoint}/virtual-chassis', data)
async def update_virtual_chassis(self, id: int, **kwargs) -> Dict:
"""Update a virtual chassis."""
return self.client.patch(f'{self.base_endpoint}/virtual-chassis', id, kwargs)
async def delete_virtual_chassis(self, id: int) -> None:
"""Delete a virtual chassis."""
self.client.delete(f'{self.base_endpoint}/virtual-chassis', id)
# ==================== Inventory Items ====================
async def list_inventory_items(self, device_id: Optional[int] = None, **kwargs) -> List[Dict]:
"""List all inventory items."""
params = {k: v for k, v in {'device_id': device_id, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/inventory-items', params=params)
async def get_inventory_item(self, id: int) -> Dict:
"""Get a specific inventory item by ID."""
return self.client.get(f'{self.base_endpoint}/inventory-items', id)
async def create_inventory_item(
self,
device: int,
name: str,
parent: Optional[int] = None,
manufacturer: Optional[int] = None,
part_id: Optional[str] = None,
serial: Optional[str] = None,
asset_tag: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new inventory item."""
data = {'device': device, 'name': name, **kwargs}
for key, val in [
('parent', parent), ('manufacturer', manufacturer),
('part_id', part_id), ('serial', serial), ('asset_tag', asset_tag)
]:
if val is not None:
data[key] = val
return self.client.create(f'{self.base_endpoint}/inventory-items', data)
async def update_inventory_item(self, id: int, **kwargs) -> Dict:
"""Update an inventory item."""
return self.client.patch(f'{self.base_endpoint}/inventory-items', id, kwargs)
async def delete_inventory_item(self, id: int) -> None:
"""Delete an inventory item."""
self.client.delete(f'{self.base_endpoint}/inventory-items', id)

View File

@@ -1,7 +1,7 @@
"""
Extras tools for NetBox MCP Server.
Covers: Tags and Journal Entries only.
Covers: Tags, Custom Fields, Custom Links, Webhooks, Journal Entries, and more.
"""
import logging
from typing import List, Dict, Optional, Any
@@ -50,6 +50,209 @@ class ExtrasTools:
data['description'] = description
return self.client.create(f'{self.base_endpoint}/tags', data)
async def update_tag(self, id: int, **kwargs) -> Dict:
"""Update a tag."""
return self.client.patch(f'{self.base_endpoint}/tags', id, kwargs)
async def delete_tag(self, id: int) -> None:
"""Delete a tag."""
self.client.delete(f'{self.base_endpoint}/tags', id)
# ==================== Custom Fields ====================
async def list_custom_fields(
self,
name: Optional[str] = None,
type: Optional[str] = None,
content_types: Optional[str] = None,
**kwargs
) -> List[Dict]:
"""List all custom fields."""
params = {k: v for k, v in {
'name': name, 'type': type, 'content_types': content_types, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/custom-fields', params=params)
async def get_custom_field(self, id: int) -> Dict:
"""Get a specific custom field by ID."""
return self.client.get(f'{self.base_endpoint}/custom-fields', id)
async def create_custom_field(
self,
name: str,
content_types: List[str],
type: str = 'text',
label: Optional[str] = None,
description: Optional[str] = None,
required: bool = False,
filter_logic: str = 'loose',
default: Optional[Any] = None,
weight: int = 100,
validation_minimum: Optional[int] = None,
validation_maximum: Optional[int] = None,
validation_regex: Optional[str] = None,
choice_set: Optional[int] = None,
**kwargs
) -> Dict:
"""Create a new custom field."""
data = {
'name': name, 'content_types': content_types, 'type': type,
'required': required, 'filter_logic': filter_logic, 'weight': weight, **kwargs
}
for key, val in [
('label', label), ('description', description), ('default', default),
('validation_minimum', validation_minimum), ('validation_maximum', validation_maximum),
('validation_regex', validation_regex), ('choice_set', choice_set)
]:
if val is not None:
data[key] = val
return self.client.create(f'{self.base_endpoint}/custom-fields', data)
async def update_custom_field(self, id: int, **kwargs) -> Dict:
"""Update a custom field."""
return self.client.patch(f'{self.base_endpoint}/custom-fields', id, kwargs)
async def delete_custom_field(self, id: int) -> None:
"""Delete a custom field."""
self.client.delete(f'{self.base_endpoint}/custom-fields', id)
# ==================== Custom Field Choice Sets ====================
async def list_custom_field_choice_sets(self, name: Optional[str] = None, **kwargs) -> List[Dict]:
"""List all custom field choice sets."""
params = {k: v for k, v in {'name': name, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/custom-field-choice-sets', params=params)
async def get_custom_field_choice_set(self, id: int) -> Dict:
"""Get a specific custom field choice set by ID."""
return self.client.get(f'{self.base_endpoint}/custom-field-choice-sets', id)
async def create_custom_field_choice_set(
self,
name: str,
extra_choices: List[List[str]],
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new custom field choice set."""
data = {'name': name, 'extra_choices': extra_choices, **kwargs}
if description:
data['description'] = description
return self.client.create(f'{self.base_endpoint}/custom-field-choice-sets', data)
async def update_custom_field_choice_set(self, id: int, **kwargs) -> Dict:
"""Update a custom field choice set."""
return self.client.patch(f'{self.base_endpoint}/custom-field-choice-sets', id, kwargs)
async def delete_custom_field_choice_set(self, id: int) -> None:
"""Delete a custom field choice set."""
self.client.delete(f'{self.base_endpoint}/custom-field-choice-sets', id)
# ==================== Custom Links ====================
async def list_custom_links(
self,
name: Optional[str] = None,
content_types: Optional[str] = None,
**kwargs
) -> List[Dict]:
"""List all custom links."""
params = {k: v for k, v in {
'name': name, 'content_types': content_types, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/custom-links', params=params)
async def get_custom_link(self, id: int) -> Dict:
"""Get a specific custom link by ID."""
return self.client.get(f'{self.base_endpoint}/custom-links', id)
async def create_custom_link(
self,
name: str,
content_types: List[str],
link_text: str,
link_url: str,
enabled: bool = True,
new_window: bool = False,
weight: int = 100,
group_name: Optional[str] = None,
button_class: str = 'outline-dark',
**kwargs
) -> Dict:
"""Create a new custom link."""
data = {
'name': name, 'content_types': content_types,
'link_text': link_text, 'link_url': link_url,
'enabled': enabled, 'new_window': new_window,
'weight': weight, 'button_class': button_class, **kwargs
}
if group_name:
data['group_name'] = group_name
return self.client.create(f'{self.base_endpoint}/custom-links', data)
async def update_custom_link(self, id: int, **kwargs) -> Dict:
"""Update a custom link."""
return self.client.patch(f'{self.base_endpoint}/custom-links', id, kwargs)
async def delete_custom_link(self, id: int) -> None:
"""Delete a custom link."""
self.client.delete(f'{self.base_endpoint}/custom-links', id)
# ==================== Webhooks ====================
async def list_webhooks(self, name: Optional[str] = None, **kwargs) -> List[Dict]:
"""List all webhooks."""
params = {k: v for k, v in {'name': name, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/webhooks', params=params)
async def get_webhook(self, id: int) -> Dict:
"""Get a specific webhook by ID."""
return self.client.get(f'{self.base_endpoint}/webhooks', id)
async def create_webhook(
self,
name: str,
payload_url: str,
content_types: List[str],
type_create: bool = True,
type_update: bool = True,
type_delete: bool = True,
type_job_start: bool = False,
type_job_end: bool = False,
enabled: bool = True,
http_method: str = 'POST',
http_content_type: str = 'application/json',
additional_headers: Optional[str] = None,
body_template: Optional[str] = None,
secret: Optional[str] = None,
ssl_verification: bool = True,
ca_file_path: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new webhook."""
data = {
'name': name, 'payload_url': payload_url, 'content_types': content_types,
'type_create': type_create, 'type_update': type_update, 'type_delete': type_delete,
'type_job_start': type_job_start, 'type_job_end': type_job_end,
'enabled': enabled, 'http_method': http_method,
'http_content_type': http_content_type, 'ssl_verification': ssl_verification, **kwargs
}
for key, val in [
('additional_headers', additional_headers), ('body_template', body_template),
('secret', secret), ('ca_file_path', ca_file_path)
]:
if val is not None:
data[key] = val
return self.client.create(f'{self.base_endpoint}/webhooks', data)
async def update_webhook(self, id: int, **kwargs) -> Dict:
"""Update a webhook."""
return self.client.patch(f'{self.base_endpoint}/webhooks', id, kwargs)
async def delete_webhook(self, id: int) -> None:
"""Delete a webhook."""
self.client.delete(f'{self.base_endpoint}/webhooks', id)
# ==================== Journal Entries ====================
async def list_journal_entries(
@@ -85,3 +288,273 @@ class ExtrasTools:
'comments': comments, 'kind': kind, **kwargs
}
return self.client.create(f'{self.base_endpoint}/journal-entries', data)
async def update_journal_entry(self, id: int, **kwargs) -> Dict:
"""Update a journal entry."""
return self.client.patch(f'{self.base_endpoint}/journal-entries', id, kwargs)
async def delete_journal_entry(self, id: int) -> None:
"""Delete a journal entry."""
self.client.delete(f'{self.base_endpoint}/journal-entries', id)
# ==================== Config Contexts ====================
async def list_config_contexts(self, name: Optional[str] = None, **kwargs) -> List[Dict]:
"""List all config contexts."""
params = {k: v for k, v in {'name': name, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/config-contexts', params=params)
async def get_config_context(self, id: int) -> Dict:
"""Get a specific config context by ID."""
return self.client.get(f'{self.base_endpoint}/config-contexts', id)
async def create_config_context(
self,
name: str,
data: Dict[str, Any],
weight: int = 1000,
description: Optional[str] = None,
is_active: bool = True,
regions: Optional[List[int]] = None,
site_groups: Optional[List[int]] = None,
sites: Optional[List[int]] = None,
locations: Optional[List[int]] = None,
device_types: Optional[List[int]] = None,
roles: Optional[List[int]] = None,
platforms: Optional[List[int]] = None,
cluster_types: Optional[List[int]] = None,
cluster_groups: Optional[List[int]] = None,
clusters: Optional[List[int]] = None,
tenant_groups: Optional[List[int]] = None,
tenants: Optional[List[int]] = None,
tags: Optional[List[str]] = None,
**kwargs
) -> Dict:
"""Create a new config context."""
context_data = {
'name': name, 'data': data, 'weight': weight, 'is_active': is_active, **kwargs
}
for key, val in [
('description', description), ('regions', regions),
('site_groups', site_groups), ('sites', sites),
('locations', locations), ('device_types', device_types),
('roles', roles), ('platforms', platforms),
('cluster_types', cluster_types), ('cluster_groups', cluster_groups),
('clusters', clusters), ('tenant_groups', tenant_groups),
('tenants', tenants), ('tags', tags)
]:
if val is not None:
context_data[key] = val
return self.client.create(f'{self.base_endpoint}/config-contexts', context_data)
async def update_config_context(self, id: int, **kwargs) -> Dict:
"""Update a config context."""
return self.client.patch(f'{self.base_endpoint}/config-contexts', id, kwargs)
async def delete_config_context(self, id: int) -> None:
"""Delete a config context."""
self.client.delete(f'{self.base_endpoint}/config-contexts', id)
# ==================== Config Templates ====================
async def list_config_templates(self, name: Optional[str] = None, **kwargs) -> List[Dict]:
"""List all config templates."""
params = {k: v for k, v in {'name': name, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/config-templates', params=params)
async def get_config_template(self, id: int) -> Dict:
"""Get a specific config template by ID."""
return self.client.get(f'{self.base_endpoint}/config-templates', id)
async def create_config_template(
self,
name: str,
template_code: str,
description: Optional[str] = None,
environment_params: Optional[Dict[str, Any]] = None,
**kwargs
) -> Dict:
"""Create a new config template."""
data = {'name': name, 'template_code': template_code, **kwargs}
if description:
data['description'] = description
if environment_params:
data['environment_params'] = environment_params
return self.client.create(f'{self.base_endpoint}/config-templates', data)
async def update_config_template(self, id: int, **kwargs) -> Dict:
"""Update a config template."""
return self.client.patch(f'{self.base_endpoint}/config-templates', id, kwargs)
async def delete_config_template(self, id: int) -> None:
"""Delete a config template."""
self.client.delete(f'{self.base_endpoint}/config-templates', id)
# ==================== Export Templates ====================
async def list_export_templates(
self,
name: Optional[str] = None,
content_types: Optional[str] = None,
**kwargs
) -> List[Dict]:
"""List all export templates."""
params = {k: v for k, v in {
'name': name, 'content_types': content_types, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/export-templates', params=params)
async def get_export_template(self, id: int) -> Dict:
"""Get a specific export template by ID."""
return self.client.get(f'{self.base_endpoint}/export-templates', id)
async def create_export_template(
self,
name: str,
content_types: List[str],
template_code: str,
description: Optional[str] = None,
mime_type: str = 'text/plain',
file_extension: Optional[str] = None,
as_attachment: bool = True,
**kwargs
) -> Dict:
"""Create a new export template."""
data = {
'name': name, 'content_types': content_types,
'template_code': template_code, 'mime_type': mime_type,
'as_attachment': as_attachment, **kwargs
}
if description:
data['description'] = description
if file_extension:
data['file_extension'] = file_extension
return self.client.create(f'{self.base_endpoint}/export-templates', data)
async def update_export_template(self, id: int, **kwargs) -> Dict:
"""Update an export template."""
return self.client.patch(f'{self.base_endpoint}/export-templates', id, kwargs)
async def delete_export_template(self, id: int) -> None:
"""Delete an export template."""
self.client.delete(f'{self.base_endpoint}/export-templates', id)
# ==================== Saved Filters ====================
async def list_saved_filters(
self,
name: Optional[str] = None,
content_types: Optional[str] = None,
**kwargs
) -> List[Dict]:
"""List all saved filters."""
params = {k: v for k, v in {
'name': name, 'content_types': content_types, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/saved-filters', params=params)
async def get_saved_filter(self, id: int) -> Dict:
"""Get a specific saved filter by ID."""
return self.client.get(f'{self.base_endpoint}/saved-filters', id)
async def create_saved_filter(
self,
name: str,
slug: str,
content_types: List[str],
parameters: Dict[str, Any],
description: Optional[str] = None,
weight: int = 100,
enabled: bool = True,
shared: bool = True,
**kwargs
) -> Dict:
"""Create a new saved filter."""
data = {
'name': name, 'slug': slug, 'content_types': content_types,
'parameters': parameters, 'weight': weight,
'enabled': enabled, 'shared': shared, **kwargs
}
if description:
data['description'] = description
return self.client.create(f'{self.base_endpoint}/saved-filters', data)
async def update_saved_filter(self, id: int, **kwargs) -> Dict:
"""Update a saved filter."""
return self.client.patch(f'{self.base_endpoint}/saved-filters', id, kwargs)
async def delete_saved_filter(self, id: int) -> None:
"""Delete a saved filter."""
self.client.delete(f'{self.base_endpoint}/saved-filters', id)
# ==================== Image Attachments ====================
async def list_image_attachments(
self,
object_type: Optional[str] = None,
object_id: Optional[int] = None,
**kwargs
) -> List[Dict]:
"""List all image attachments."""
params = {k: v for k, v in {
'object_type': object_type, 'object_id': object_id, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/image-attachments', params=params)
async def get_image_attachment(self, id: int) -> Dict:
"""Get a specific image attachment by ID."""
return self.client.get(f'{self.base_endpoint}/image-attachments', id)
async def delete_image_attachment(self, id: int) -> None:
"""Delete an image attachment."""
self.client.delete(f'{self.base_endpoint}/image-attachments', id)
# ==================== Object Changes (Audit Log) ====================
async def list_object_changes(
self,
user_id: Optional[int] = None,
changed_object_type: Optional[str] = None,
changed_object_id: Optional[int] = None,
action: Optional[str] = None,
**kwargs
) -> List[Dict]:
"""List all object changes (audit log)."""
params = {k: v for k, v in {
'user_id': user_id, 'changed_object_type': changed_object_type,
'changed_object_id': changed_object_id, 'action': action, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/object-changes', params=params)
async def get_object_change(self, id: int) -> Dict:
"""Get a specific object change by ID."""
return self.client.get(f'{self.base_endpoint}/object-changes', id)
# ==================== Scripts ====================
async def list_scripts(self, **kwargs) -> List[Dict]:
"""List all available scripts."""
return self.client.list(f'{self.base_endpoint}/scripts', params=kwargs)
async def get_script(self, id: str) -> Dict:
"""Get a specific script by ID."""
return self.client.get(f'{self.base_endpoint}/scripts', id)
async def run_script(self, id: str, data: Dict[str, Any], commit: bool = True) -> Dict:
"""Run a script with the provided data."""
payload = {'data': data, 'commit': commit}
return self.client.create(f'{self.base_endpoint}/scripts/{id}', payload)
# ==================== Reports ====================
async def list_reports(self, **kwargs) -> List[Dict]:
"""List all available reports."""
return self.client.list(f'{self.base_endpoint}/reports', params=kwargs)
async def get_report(self, id: str) -> Dict:
"""Get a specific report by ID."""
return self.client.get(f'{self.base_endpoint}/reports', id)
async def run_report(self, id: str) -> Dict:
"""Run a report."""
return self.client.create(f'{self.base_endpoint}/reports/{id}', {})

View File

@@ -1,7 +1,7 @@
"""
IPAM (IP Address Management) tools for NetBox MCP Server.
Covers: IP Addresses, Prefixes, and Services only.
Covers: IP Addresses, Prefixes, VLANs, VRFs, ASNs, and related models.
"""
import logging
from typing import List, Dict, Optional, Any
@@ -17,6 +17,164 @@ class IPAMTools:
self.client = client
self.base_endpoint = 'ipam'
# ==================== ASN Ranges ====================
async def list_asn_ranges(self, name: Optional[str] = None, **kwargs) -> List[Dict]:
"""List all ASN ranges."""
params = {k: v for k, v in {'name': name, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/asn-ranges', params=params)
async def get_asn_range(self, id: int) -> Dict:
"""Get a specific ASN range by ID."""
return self.client.get(f'{self.base_endpoint}/asn-ranges', id)
async def create_asn_range(self, name: str, slug: str, rir: int, start: int, end: int, **kwargs) -> Dict:
"""Create a new ASN range."""
data = {'name': name, 'slug': slug, 'rir': rir, 'start': start, 'end': end, **kwargs}
return self.client.create(f'{self.base_endpoint}/asn-ranges', data)
async def update_asn_range(self, id: int, **kwargs) -> Dict:
"""Update an ASN range."""
return self.client.patch(f'{self.base_endpoint}/asn-ranges', id, kwargs)
async def delete_asn_range(self, id: int) -> None:
"""Delete an ASN range."""
self.client.delete(f'{self.base_endpoint}/asn-ranges', id)
# ==================== ASNs ====================
async def list_asns(
self,
asn: Optional[int] = None,
rir_id: Optional[int] = None,
tenant_id: Optional[int] = None,
**kwargs
) -> List[Dict]:
"""List all ASNs."""
params = {k: v for k, v in {
'asn': asn, 'rir_id': rir_id, 'tenant_id': tenant_id, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/asns', params=params)
async def get_asn(self, id: int) -> Dict:
"""Get a specific ASN by ID."""
return self.client.get(f'{self.base_endpoint}/asns', id)
async def create_asn(
self,
asn: int,
rir: int,
tenant: Optional[int] = None,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new ASN."""
data = {'asn': asn, 'rir': rir, **kwargs}
if tenant:
data['tenant'] = tenant
if description:
data['description'] = description
return self.client.create(f'{self.base_endpoint}/asns', data)
async def update_asn(self, id: int, **kwargs) -> Dict:
"""Update an ASN."""
return self.client.patch(f'{self.base_endpoint}/asns', id, kwargs)
async def delete_asn(self, id: int) -> None:
"""Delete an ASN."""
self.client.delete(f'{self.base_endpoint}/asns', id)
# ==================== RIRs ====================
async def list_rirs(self, name: Optional[str] = None, **kwargs) -> List[Dict]:
"""List all RIRs (Regional Internet Registries)."""
params = {k: v for k, v in {'name': name, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/rirs', params=params)
async def get_rir(self, id: int) -> Dict:
"""Get a specific RIR by ID."""
return self.client.get(f'{self.base_endpoint}/rirs', id)
async def create_rir(self, name: str, slug: str, is_private: bool = False, **kwargs) -> Dict:
"""Create a new RIR."""
data = {'name': name, 'slug': slug, 'is_private': is_private, **kwargs}
return self.client.create(f'{self.base_endpoint}/rirs', data)
async def update_rir(self, id: int, **kwargs) -> Dict:
"""Update a RIR."""
return self.client.patch(f'{self.base_endpoint}/rirs', id, kwargs)
async def delete_rir(self, id: int) -> None:
"""Delete a RIR."""
self.client.delete(f'{self.base_endpoint}/rirs', id)
# ==================== Aggregates ====================
async def list_aggregates(
self,
prefix: Optional[str] = None,
rir_id: Optional[int] = None,
tenant_id: Optional[int] = None,
**kwargs
) -> List[Dict]:
"""List all aggregates."""
params = {k: v for k, v in {
'prefix': prefix, 'rir_id': rir_id, 'tenant_id': tenant_id, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/aggregates', params=params)
async def get_aggregate(self, id: int) -> Dict:
"""Get a specific aggregate by ID."""
return self.client.get(f'{self.base_endpoint}/aggregates', id)
async def create_aggregate(
self,
prefix: str,
rir: int,
tenant: Optional[int] = None,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new aggregate."""
data = {'prefix': prefix, 'rir': rir, **kwargs}
if tenant:
data['tenant'] = tenant
if description:
data['description'] = description
return self.client.create(f'{self.base_endpoint}/aggregates', data)
async def update_aggregate(self, id: int, **kwargs) -> Dict:
"""Update an aggregate."""
return self.client.patch(f'{self.base_endpoint}/aggregates', id, kwargs)
async def delete_aggregate(self, id: int) -> None:
"""Delete an aggregate."""
self.client.delete(f'{self.base_endpoint}/aggregates', id)
# ==================== Roles ====================
async def list_roles(self, name: Optional[str] = None, **kwargs) -> List[Dict]:
"""List all IPAM roles."""
params = {k: v for k, v in {'name': name, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/roles', params=params)
async def get_role(self, id: int) -> Dict:
"""Get a specific role by ID."""
return self.client.get(f'{self.base_endpoint}/roles', id)
async def create_role(self, name: str, slug: str, weight: int = 1000, **kwargs) -> Dict:
"""Create a new IPAM role."""
data = {'name': name, 'slug': slug, 'weight': weight, **kwargs}
return self.client.create(f'{self.base_endpoint}/roles', data)
async def update_role(self, id: int, **kwargs) -> Dict:
"""Update a role."""
return self.client.patch(f'{self.base_endpoint}/roles', id, kwargs)
async def delete_role(self, id: int) -> None:
"""Delete a role."""
self.client.delete(f'{self.base_endpoint}/roles', id)
# ==================== Prefixes ====================
async def list_prefixes(
@@ -72,6 +230,83 @@ class IPAMTools:
data[key] = val
return self.client.create(f'{self.base_endpoint}/prefixes', data)
async def update_prefix(self, id: int, **kwargs) -> Dict:
"""Update a prefix."""
return self.client.patch(f'{self.base_endpoint}/prefixes', id, kwargs)
async def delete_prefix(self, id: int) -> None:
"""Delete a prefix."""
self.client.delete(f'{self.base_endpoint}/prefixes', id)
async def list_available_prefixes(self, id: int) -> List[Dict]:
"""List available child prefixes within a prefix."""
return self.client.list(f'{self.base_endpoint}/prefixes/{id}/available-prefixes', paginate=False)
async def create_available_prefix(self, id: int, prefix_length: int, **kwargs) -> Dict:
"""Create a new prefix from available space."""
data = {'prefix_length': prefix_length, **kwargs}
return self.client.create(f'{self.base_endpoint}/prefixes/{id}/available-prefixes', data)
async def list_available_ips(self, id: int) -> List[Dict]:
"""List available IP addresses within a prefix."""
return self.client.list(f'{self.base_endpoint}/prefixes/{id}/available-ips', paginate=False)
async def create_available_ip(self, id: int, **kwargs) -> Dict:
"""Create a new IP address from available space in prefix."""
return self.client.create(f'{self.base_endpoint}/prefixes/{id}/available-ips', kwargs)
# ==================== IP Ranges ====================
async def list_ip_ranges(
self,
start_address: Optional[str] = None,
end_address: Optional[str] = None,
vrf_id: Optional[int] = None,
tenant_id: Optional[int] = None,
status: Optional[str] = None,
**kwargs
) -> List[Dict]:
"""List all IP ranges."""
params = {k: v for k, v in {
'start_address': start_address, 'end_address': end_address,
'vrf_id': vrf_id, 'tenant_id': tenant_id, 'status': status, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/ip-ranges', params=params)
async def get_ip_range(self, id: int) -> Dict:
"""Get a specific IP range by ID."""
return self.client.get(f'{self.base_endpoint}/ip-ranges', id)
async def create_ip_range(
self,
start_address: str,
end_address: str,
status: str = 'active',
vrf: Optional[int] = None,
tenant: Optional[int] = None,
role: Optional[int] = None,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new IP range."""
data = {'start_address': start_address, 'end_address': end_address, 'status': status, **kwargs}
for key, val in [('vrf', vrf), ('tenant', tenant), ('role', role), ('description', description)]:
if val is not None:
data[key] = val
return self.client.create(f'{self.base_endpoint}/ip-ranges', data)
async def update_ip_range(self, id: int, **kwargs) -> Dict:
"""Update an IP range."""
return self.client.patch(f'{self.base_endpoint}/ip-ranges', id, kwargs)
async def delete_ip_range(self, id: int) -> None:
"""Delete an IP range."""
self.client.delete(f'{self.base_endpoint}/ip-ranges', id)
async def list_available_ips_in_range(self, id: int) -> List[Dict]:
"""List available IP addresses within an IP range."""
return self.client.list(f'{self.base_endpoint}/ip-ranges/{id}/available-ips', paginate=False)
# ==================== IP Addresses ====================
async def list_ip_addresses(
@@ -133,6 +368,271 @@ class IPAMTools:
"""Update an IP address."""
return self.client.patch(f'{self.base_endpoint}/ip-addresses', id, kwargs)
async def delete_ip_address(self, id: int) -> None:
"""Delete an IP address."""
self.client.delete(f'{self.base_endpoint}/ip-addresses', id)
# ==================== FHRP Groups ====================
async def list_fhrp_groups(
self,
protocol: Optional[str] = None,
group_id: Optional[int] = None,
**kwargs
) -> List[Dict]:
"""List all FHRP groups."""
params = {k: v for k, v in {'protocol': protocol, 'group_id': group_id, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/fhrp-groups', params=params)
async def get_fhrp_group(self, id: int) -> Dict:
"""Get a specific FHRP group by ID."""
return self.client.get(f'{self.base_endpoint}/fhrp-groups', id)
async def create_fhrp_group(
self,
protocol: str,
group_id: int,
auth_type: Optional[str] = None,
auth_key: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new FHRP group."""
data = {'protocol': protocol, 'group_id': group_id, **kwargs}
if auth_type:
data['auth_type'] = auth_type
if auth_key:
data['auth_key'] = auth_key
return self.client.create(f'{self.base_endpoint}/fhrp-groups', data)
async def update_fhrp_group(self, id: int, **kwargs) -> Dict:
"""Update an FHRP group."""
return self.client.patch(f'{self.base_endpoint}/fhrp-groups', id, kwargs)
async def delete_fhrp_group(self, id: int) -> None:
"""Delete an FHRP group."""
self.client.delete(f'{self.base_endpoint}/fhrp-groups', id)
# ==================== FHRP Group Assignments ====================
async def list_fhrp_group_assignments(self, group_id: Optional[int] = None, **kwargs) -> List[Dict]:
"""List all FHRP group assignments."""
params = {k: v for k, v in {'group_id': group_id, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/fhrp-group-assignments', params=params)
async def get_fhrp_group_assignment(self, id: int) -> Dict:
"""Get a specific FHRP group assignment by ID."""
return self.client.get(f'{self.base_endpoint}/fhrp-group-assignments', id)
async def create_fhrp_group_assignment(
self,
group: int,
interface_type: str,
interface_id: int,
priority: int = 100,
**kwargs
) -> Dict:
"""Create a new FHRP group assignment."""
data = {
'group': group, 'interface_type': interface_type,
'interface_id': interface_id, 'priority': priority, **kwargs
}
return self.client.create(f'{self.base_endpoint}/fhrp-group-assignments', data)
async def update_fhrp_group_assignment(self, id: int, **kwargs) -> Dict:
"""Update an FHRP group assignment."""
return self.client.patch(f'{self.base_endpoint}/fhrp-group-assignments', id, kwargs)
async def delete_fhrp_group_assignment(self, id: int) -> None:
"""Delete an FHRP group assignment."""
self.client.delete(f'{self.base_endpoint}/fhrp-group-assignments', id)
# ==================== VLAN Groups ====================
async def list_vlan_groups(
self,
name: Optional[str] = None,
site_id: Optional[int] = None,
**kwargs
) -> List[Dict]:
"""List all VLAN groups."""
params = {k: v for k, v in {'name': name, 'site_id': site_id, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/vlan-groups', params=params)
async def get_vlan_group(self, id: int) -> Dict:
"""Get a specific VLAN group by ID."""
return self.client.get(f'{self.base_endpoint}/vlan-groups', id)
async def create_vlan_group(
self,
name: str,
slug: str,
scope_type: Optional[str] = None,
scope_id: Optional[int] = None,
min_vid: int = 1,
max_vid: int = 4094,
**kwargs
) -> Dict:
"""Create a new VLAN group."""
data = {'name': name, 'slug': slug, 'min_vid': min_vid, 'max_vid': max_vid, **kwargs}
if scope_type:
data['scope_type'] = scope_type
if scope_id:
data['scope_id'] = scope_id
return self.client.create(f'{self.base_endpoint}/vlan-groups', data)
async def update_vlan_group(self, id: int, **kwargs) -> Dict:
"""Update a VLAN group."""
return self.client.patch(f'{self.base_endpoint}/vlan-groups', id, kwargs)
async def delete_vlan_group(self, id: int) -> None:
"""Delete a VLAN group."""
self.client.delete(f'{self.base_endpoint}/vlan-groups', id)
async def list_available_vlans(self, id: int) -> List[Dict]:
"""List available VLANs in a VLAN group."""
return self.client.list(f'{self.base_endpoint}/vlan-groups/{id}/available-vlans', paginate=False)
# ==================== VLANs ====================
async def list_vlans(
self,
vid: Optional[int] = None,
name: Optional[str] = None,
site_id: Optional[int] = None,
group_id: Optional[int] = None,
role_id: Optional[int] = None,
tenant_id: Optional[int] = None,
status: Optional[str] = None,
**kwargs
) -> List[Dict]:
"""List all VLANs with optional filtering."""
params = {k: v for k, v in {
'vid': vid, 'name': name, 'site_id': site_id, 'group_id': group_id,
'role_id': role_id, 'tenant_id': tenant_id, 'status': status, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/vlans', params=params)
async def get_vlan(self, id: int) -> Dict:
"""Get a specific VLAN by ID."""
return self.client.get(f'{self.base_endpoint}/vlans', id)
async def create_vlan(
self,
vid: int,
name: str,
status: str = 'active',
site: Optional[int] = None,
group: Optional[int] = None,
role: Optional[int] = None,
tenant: Optional[int] = None,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new VLAN."""
data = {'vid': vid, 'name': name, 'status': status, **kwargs}
for key, val in [
('site', site), ('group', group), ('role', role),
('tenant', tenant), ('description', description)
]:
if val is not None:
data[key] = val
return self.client.create(f'{self.base_endpoint}/vlans', data)
async def update_vlan(self, id: int, **kwargs) -> Dict:
"""Update a VLAN."""
return self.client.patch(f'{self.base_endpoint}/vlans', id, kwargs)
async def delete_vlan(self, id: int) -> None:
"""Delete a VLAN."""
self.client.delete(f'{self.base_endpoint}/vlans', id)
# ==================== VRFs ====================
async def list_vrfs(
self,
name: Optional[str] = None,
rd: Optional[str] = None,
tenant_id: Optional[int] = None,
**kwargs
) -> List[Dict]:
"""List all VRFs with optional filtering."""
params = {k: v for k, v in {
'name': name, 'rd': rd, 'tenant_id': tenant_id, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/vrfs', params=params)
async def get_vrf(self, id: int) -> Dict:
"""Get a specific VRF by ID."""
return self.client.get(f'{self.base_endpoint}/vrfs', id)
async def create_vrf(
self,
name: str,
rd: Optional[str] = None,
tenant: Optional[int] = None,
enforce_unique: bool = True,
description: Optional[str] = None,
import_targets: Optional[List[int]] = None,
export_targets: Optional[List[int]] = None,
**kwargs
) -> Dict:
"""Create a new VRF."""
data = {'name': name, 'enforce_unique': enforce_unique, **kwargs}
for key, val in [
('rd', rd), ('tenant', tenant), ('description', description),
('import_targets', import_targets), ('export_targets', export_targets)
]:
if val is not None:
data[key] = val
return self.client.create(f'{self.base_endpoint}/vrfs', data)
async def update_vrf(self, id: int, **kwargs) -> Dict:
"""Update a VRF."""
return self.client.patch(f'{self.base_endpoint}/vrfs', id, kwargs)
async def delete_vrf(self, id: int) -> None:
"""Delete a VRF."""
self.client.delete(f'{self.base_endpoint}/vrfs', id)
# ==================== Route Targets ====================
async def list_route_targets(
self,
name: Optional[str] = None,
tenant_id: Optional[int] = None,
**kwargs
) -> List[Dict]:
"""List all route targets."""
params = {k: v for k, v in {'name': name, 'tenant_id': tenant_id, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/route-targets', params=params)
async def get_route_target(self, id: int) -> Dict:
"""Get a specific route target by ID."""
return self.client.get(f'{self.base_endpoint}/route-targets', id)
async def create_route_target(
self,
name: str,
tenant: Optional[int] = None,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new route target."""
data = {'name': name, **kwargs}
if tenant:
data['tenant'] = tenant
if description:
data['description'] = description
return self.client.create(f'{self.base_endpoint}/route-targets', data)
async def update_route_target(self, id: int, **kwargs) -> Dict:
"""Update a route target."""
return self.client.patch(f'{self.base_endpoint}/route-targets', id, kwargs)
async def delete_route_target(self, id: int) -> None:
"""Delete a route target."""
self.client.delete(f'{self.base_endpoint}/route-targets', id)
# ==================== Services ====================
async def list_services(
@@ -175,3 +675,44 @@ class IPAMTools:
if val is not None:
data[key] = val
return self.client.create(f'{self.base_endpoint}/services', data)
async def update_service(self, id: int, **kwargs) -> Dict:
"""Update a service."""
return self.client.patch(f'{self.base_endpoint}/services', id, kwargs)
async def delete_service(self, id: int) -> None:
"""Delete a service."""
self.client.delete(f'{self.base_endpoint}/services', id)
# ==================== Service Templates ====================
async def list_service_templates(self, name: Optional[str] = None, **kwargs) -> List[Dict]:
"""List all service templates."""
params = {k: v for k, v in {'name': name, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/service-templates', params=params)
async def get_service_template(self, id: int) -> Dict:
"""Get a specific service template by ID."""
return self.client.get(f'{self.base_endpoint}/service-templates', id)
async def create_service_template(
self,
name: str,
ports: List[int],
protocol: str,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new service template."""
data = {'name': name, 'ports': ports, 'protocol': protocol, **kwargs}
if description:
data['description'] = description
return self.client.create(f'{self.base_endpoint}/service-templates', data)
async def update_service_template(self, id: int, **kwargs) -> Dict:
"""Update a service template."""
return self.client.patch(f'{self.base_endpoint}/service-templates', id, kwargs)
async def delete_service_template(self, id: int) -> None:
"""Delete a service template."""
self.client.delete(f'{self.base_endpoint}/service-templates', id)

View File

@@ -0,0 +1,281 @@
"""
Tenancy tools for NetBox MCP Server.
Covers: Tenants, Tenant Groups, Contacts, Contact Groups, and Contact Roles.
"""
import logging
from typing import List, Dict, Optional, Any
from ..netbox_client import NetBoxClient
logger = logging.getLogger(__name__)
class TenancyTools:
"""Tools for Tenancy operations in NetBox"""
def __init__(self, client: NetBoxClient):
self.client = client
self.base_endpoint = 'tenancy'
# ==================== Tenant Groups ====================
async def list_tenant_groups(
self,
name: Optional[str] = None,
slug: Optional[str] = None,
parent_id: Optional[int] = None,
**kwargs
) -> List[Dict]:
"""List all tenant groups."""
params = {k: v for k, v in {
'name': name, 'slug': slug, 'parent_id': parent_id, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/tenant-groups', params=params)
async def get_tenant_group(self, id: int) -> Dict:
"""Get a specific tenant group by ID."""
return self.client.get(f'{self.base_endpoint}/tenant-groups', id)
async def create_tenant_group(
self,
name: str,
slug: str,
parent: Optional[int] = None,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new tenant group."""
data = {'name': name, 'slug': slug, **kwargs}
if parent:
data['parent'] = parent
if description:
data['description'] = description
return self.client.create(f'{self.base_endpoint}/tenant-groups', data)
async def update_tenant_group(self, id: int, **kwargs) -> Dict:
"""Update a tenant group."""
return self.client.patch(f'{self.base_endpoint}/tenant-groups', id, kwargs)
async def delete_tenant_group(self, id: int) -> None:
"""Delete a tenant group."""
self.client.delete(f'{self.base_endpoint}/tenant-groups', id)
# ==================== Tenants ====================
async def list_tenants(
self,
name: Optional[str] = None,
slug: Optional[str] = None,
group_id: Optional[int] = None,
**kwargs
) -> List[Dict]:
"""List all tenants with optional filtering."""
params = {k: v for k, v in {
'name': name, 'slug': slug, 'group_id': group_id, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/tenants', params=params)
async def get_tenant(self, id: int) -> Dict:
"""Get a specific tenant by ID."""
return self.client.get(f'{self.base_endpoint}/tenants', id)
async def create_tenant(
self,
name: str,
slug: str,
group: Optional[int] = None,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new tenant."""
data = {'name': name, 'slug': slug, **kwargs}
if group:
data['group'] = group
if description:
data['description'] = description
return self.client.create(f'{self.base_endpoint}/tenants', data)
async def update_tenant(self, id: int, **kwargs) -> Dict:
"""Update a tenant."""
return self.client.patch(f'{self.base_endpoint}/tenants', id, kwargs)
async def delete_tenant(self, id: int) -> None:
"""Delete a tenant."""
self.client.delete(f'{self.base_endpoint}/tenants', id)
# ==================== Contact Groups ====================
async def list_contact_groups(
self,
name: Optional[str] = None,
slug: Optional[str] = None,
parent_id: Optional[int] = None,
**kwargs
) -> List[Dict]:
"""List all contact groups."""
params = {k: v for k, v in {
'name': name, 'slug': slug, 'parent_id': parent_id, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/contact-groups', params=params)
async def get_contact_group(self, id: int) -> Dict:
"""Get a specific contact group by ID."""
return self.client.get(f'{self.base_endpoint}/contact-groups', id)
async def create_contact_group(
self,
name: str,
slug: str,
parent: Optional[int] = None,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new contact group."""
data = {'name': name, 'slug': slug, **kwargs}
if parent:
data['parent'] = parent
if description:
data['description'] = description
return self.client.create(f'{self.base_endpoint}/contact-groups', data)
async def update_contact_group(self, id: int, **kwargs) -> Dict:
"""Update a contact group."""
return self.client.patch(f'{self.base_endpoint}/contact-groups', id, kwargs)
async def delete_contact_group(self, id: int) -> None:
"""Delete a contact group."""
self.client.delete(f'{self.base_endpoint}/contact-groups', id)
# ==================== Contact Roles ====================
async def list_contact_roles(
self,
name: Optional[str] = None,
slug: Optional[str] = None,
**kwargs
) -> List[Dict]:
"""List all contact roles."""
params = {k: v for k, v in {'name': name, 'slug': slug, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/contact-roles', params=params)
async def get_contact_role(self, id: int) -> Dict:
"""Get a specific contact role by ID."""
return self.client.get(f'{self.base_endpoint}/contact-roles', id)
async def create_contact_role(
self,
name: str,
slug: str,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new contact role."""
data = {'name': name, 'slug': slug, **kwargs}
if description:
data['description'] = description
return self.client.create(f'{self.base_endpoint}/contact-roles', data)
async def update_contact_role(self, id: int, **kwargs) -> Dict:
"""Update a contact role."""
return self.client.patch(f'{self.base_endpoint}/contact-roles', id, kwargs)
async def delete_contact_role(self, id: int) -> None:
"""Delete a contact role."""
self.client.delete(f'{self.base_endpoint}/contact-roles', id)
# ==================== Contacts ====================
async def list_contacts(
self,
name: Optional[str] = None,
group_id: Optional[int] = None,
email: Optional[str] = None,
**kwargs
) -> List[Dict]:
"""List all contacts with optional filtering."""
params = {k: v for k, v in {
'name': name, 'group_id': group_id, 'email': email, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/contacts', params=params)
async def get_contact(self, id: int) -> Dict:
"""Get a specific contact by ID."""
return self.client.get(f'{self.base_endpoint}/contacts', id)
async def create_contact(
self,
name: str,
group: Optional[int] = None,
title: Optional[str] = None,
phone: Optional[str] = None,
email: Optional[str] = None,
address: Optional[str] = None,
link: Optional[str] = None,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new contact."""
data = {'name': name, **kwargs}
for key, val in [
('group', group), ('title', title), ('phone', phone),
('email', email), ('address', address), ('link', link),
('description', description)
]:
if val is not None:
data[key] = val
return self.client.create(f'{self.base_endpoint}/contacts', data)
async def update_contact(self, id: int, **kwargs) -> Dict:
"""Update a contact."""
return self.client.patch(f'{self.base_endpoint}/contacts', id, kwargs)
async def delete_contact(self, id: int) -> None:
"""Delete a contact."""
self.client.delete(f'{self.base_endpoint}/contacts', id)
# ==================== Contact Assignments ====================
async def list_contact_assignments(
self,
contact_id: Optional[int] = None,
role_id: Optional[int] = None,
object_type: Optional[str] = None,
object_id: Optional[int] = None,
**kwargs
) -> List[Dict]:
"""List all contact assignments."""
params = {k: v for k, v in {
'contact_id': contact_id, 'role_id': role_id,
'object_type': object_type, 'object_id': object_id, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/contact-assignments', params=params)
async def get_contact_assignment(self, id: int) -> Dict:
"""Get a specific contact assignment by ID."""
return self.client.get(f'{self.base_endpoint}/contact-assignments', id)
async def create_contact_assignment(
self,
contact: int,
role: int,
object_type: str,
object_id: int,
priority: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new contact assignment."""
data = {
'contact': contact, 'role': role,
'object_type': object_type, 'object_id': object_id, **kwargs
}
if priority:
data['priority'] = priority
return self.client.create(f'{self.base_endpoint}/contact-assignments', data)
async def update_contact_assignment(self, id: int, **kwargs) -> Dict:
"""Update a contact assignment."""
return self.client.patch(f'{self.base_endpoint}/contact-assignments', id, kwargs)
async def delete_contact_assignment(self, id: int) -> None:
"""Delete a contact assignment."""
self.client.delete(f'{self.base_endpoint}/contact-assignments', id)

View File

@@ -1,7 +1,7 @@
"""
Virtualization tools for NetBox MCP Server.
Covers: Clusters, Virtual Machines, and VM Interfaces only.
Covers: Clusters, Virtual Machines, VM Interfaces, and related models.
"""
import logging
from typing import List, Dict, Optional, Any
@@ -17,6 +17,80 @@ class VirtualizationTools:
self.client = client
self.base_endpoint = 'virtualization'
# ==================== Cluster Types ====================
async def list_cluster_types(
self,
name: Optional[str] = None,
slug: Optional[str] = None,
**kwargs
) -> List[Dict]:
"""List all cluster types."""
params = {k: v for k, v in {'name': name, 'slug': slug, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/cluster-types', params=params)
async def get_cluster_type(self, id: int) -> Dict:
"""Get a specific cluster type by ID."""
return self.client.get(f'{self.base_endpoint}/cluster-types', id)
async def create_cluster_type(
self,
name: str,
slug: str,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new cluster type."""
data = {'name': name, 'slug': slug, **kwargs}
if description:
data['description'] = description
return self.client.create(f'{self.base_endpoint}/cluster-types', data)
async def update_cluster_type(self, id: int, **kwargs) -> Dict:
"""Update a cluster type."""
return self.client.patch(f'{self.base_endpoint}/cluster-types', id, kwargs)
async def delete_cluster_type(self, id: int) -> None:
"""Delete a cluster type."""
self.client.delete(f'{self.base_endpoint}/cluster-types', id)
# ==================== Cluster Groups ====================
async def list_cluster_groups(
self,
name: Optional[str] = None,
slug: Optional[str] = None,
**kwargs
) -> List[Dict]:
"""List all cluster groups."""
params = {k: v for k, v in {'name': name, 'slug': slug, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/cluster-groups', params=params)
async def get_cluster_group(self, id: int) -> Dict:
"""Get a specific cluster group by ID."""
return self.client.get(f'{self.base_endpoint}/cluster-groups', id)
async def create_cluster_group(
self,
name: str,
slug: str,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new cluster group."""
data = {'name': name, 'slug': slug, **kwargs}
if description:
data['description'] = description
return self.client.create(f'{self.base_endpoint}/cluster-groups', data)
async def update_cluster_group(self, id: int, **kwargs) -> Dict:
"""Update a cluster group."""
return self.client.patch(f'{self.base_endpoint}/cluster-groups', id, kwargs)
async def delete_cluster_group(self, id: int) -> None:
"""Delete a cluster group."""
self.client.delete(f'{self.base_endpoint}/cluster-groups', id)
# ==================== Clusters ====================
async def list_clusters(
@@ -60,6 +134,14 @@ class VirtualizationTools:
data[key] = val
return self.client.create(f'{self.base_endpoint}/clusters', data)
async def update_cluster(self, id: int, **kwargs) -> Dict:
"""Update a cluster."""
return self.client.patch(f'{self.base_endpoint}/clusters', id, kwargs)
async def delete_cluster(self, id: int) -> None:
"""Delete a cluster."""
self.client.delete(f'{self.base_endpoint}/clusters', id)
# ==================== Virtual Machines ====================
async def list_virtual_machines(
@@ -119,6 +201,10 @@ class VirtualizationTools:
"""Update a virtual machine."""
return self.client.patch(f'{self.base_endpoint}/virtual-machines', id, kwargs)
async def delete_virtual_machine(self, id: int) -> None:
"""Delete a virtual machine."""
self.client.delete(f'{self.base_endpoint}/virtual-machines', id)
# ==================== VM Interfaces ====================
async def list_vm_interfaces(
@@ -160,3 +246,51 @@ class VirtualizationTools:
if val is not None:
data[key] = val
return self.client.create(f'{self.base_endpoint}/interfaces', data)
async def update_vm_interface(self, id: int, **kwargs) -> Dict:
"""Update a VM interface."""
return self.client.patch(f'{self.base_endpoint}/interfaces', id, kwargs)
async def delete_vm_interface(self, id: int) -> None:
"""Delete a VM interface."""
self.client.delete(f'{self.base_endpoint}/interfaces', id)
# ==================== Virtual Disks ====================
async def list_virtual_disks(
self,
virtual_machine_id: Optional[int] = None,
name: Optional[str] = None,
**kwargs
) -> List[Dict]:
"""List all virtual disks."""
params = {k: v for k, v in {
'virtual_machine_id': virtual_machine_id, 'name': name, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/virtual-disks', params=params)
async def get_virtual_disk(self, id: int) -> Dict:
"""Get a specific virtual disk by ID."""
return self.client.get(f'{self.base_endpoint}/virtual-disks', id)
async def create_virtual_disk(
self,
virtual_machine: int,
name: str,
size: int,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new virtual disk."""
data = {'virtual_machine': virtual_machine, 'name': name, 'size': size, **kwargs}
if description:
data['description'] = description
return self.client.create(f'{self.base_endpoint}/virtual-disks', data)
async def update_virtual_disk(self, id: int, **kwargs) -> Dict:
"""Update a virtual disk."""
return self.client.patch(f'{self.base_endpoint}/virtual-disks', id, kwargs)
async def delete_virtual_disk(self, id: int) -> None:
"""Delete a virtual disk."""
self.client.delete(f'{self.base_endpoint}/virtual-disks', id)

View File

@@ -0,0 +1,428 @@
"""
VPN tools for NetBox MCP Server.
Covers: Tunnels, Tunnel Groups, Tunnel Terminations, IKE/IPSec Policies, and L2VPN.
"""
import logging
from typing import List, Dict, Optional, Any
from ..netbox_client import NetBoxClient
logger = logging.getLogger(__name__)
class VPNTools:
"""Tools for VPN operations in NetBox"""
def __init__(self, client: NetBoxClient):
self.client = client
self.base_endpoint = 'vpn'
# ==================== Tunnel Groups ====================
async def list_tunnel_groups(
self,
name: Optional[str] = None,
slug: Optional[str] = None,
**kwargs
) -> List[Dict]:
"""List all tunnel groups."""
params = {k: v for k, v in {'name': name, 'slug': slug, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/tunnel-groups', params=params)
async def get_tunnel_group(self, id: int) -> Dict:
"""Get a specific tunnel group by ID."""
return self.client.get(f'{self.base_endpoint}/tunnel-groups', id)
async def create_tunnel_group(
self,
name: str,
slug: str,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new tunnel group."""
data = {'name': name, 'slug': slug, **kwargs}
if description:
data['description'] = description
return self.client.create(f'{self.base_endpoint}/tunnel-groups', data)
async def update_tunnel_group(self, id: int, **kwargs) -> Dict:
"""Update a tunnel group."""
return self.client.patch(f'{self.base_endpoint}/tunnel-groups', id, kwargs)
async def delete_tunnel_group(self, id: int) -> None:
"""Delete a tunnel group."""
self.client.delete(f'{self.base_endpoint}/tunnel-groups', id)
# ==================== Tunnels ====================
async def list_tunnels(
self,
name: Optional[str] = None,
status: Optional[str] = None,
group_id: Optional[int] = None,
encapsulation: Optional[str] = None,
tenant_id: Optional[int] = None,
**kwargs
) -> List[Dict]:
"""List all tunnels with optional filtering."""
params = {k: v for k, v in {
'name': name, 'status': status, 'group_id': group_id,
'encapsulation': encapsulation, 'tenant_id': tenant_id, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/tunnels', params=params)
async def get_tunnel(self, id: int) -> Dict:
"""Get a specific tunnel by ID."""
return self.client.get(f'{self.base_endpoint}/tunnels', id)
async def create_tunnel(
self,
name: str,
status: str = 'active',
encapsulation: str = 'ipsec-tunnel',
group: Optional[int] = None,
ipsec_profile: Optional[int] = None,
tenant: Optional[int] = None,
tunnel_id: Optional[int] = None,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new tunnel."""
data = {'name': name, 'status': status, 'encapsulation': encapsulation, **kwargs}
for key, val in [
('group', group), ('ipsec_profile', ipsec_profile),
('tenant', tenant), ('tunnel_id', tunnel_id), ('description', description)
]:
if val is not None:
data[key] = val
return self.client.create(f'{self.base_endpoint}/tunnels', data)
async def update_tunnel(self, id: int, **kwargs) -> Dict:
"""Update a tunnel."""
return self.client.patch(f'{self.base_endpoint}/tunnels', id, kwargs)
async def delete_tunnel(self, id: int) -> None:
"""Delete a tunnel."""
self.client.delete(f'{self.base_endpoint}/tunnels', id)
# ==================== Tunnel Terminations ====================
async def list_tunnel_terminations(
self,
tunnel_id: Optional[int] = None,
role: Optional[str] = None,
**kwargs
) -> List[Dict]:
"""List all tunnel terminations."""
params = {k: v for k, v in {
'tunnel_id': tunnel_id, 'role': role, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/tunnel-terminations', params=params)
async def get_tunnel_termination(self, id: int) -> Dict:
"""Get a specific tunnel termination by ID."""
return self.client.get(f'{self.base_endpoint}/tunnel-terminations', id)
async def create_tunnel_termination(
self,
tunnel: int,
role: str,
termination_type: str,
termination_id: int,
outside_ip: Optional[int] = None,
**kwargs
) -> Dict:
"""Create a new tunnel termination."""
data = {
'tunnel': tunnel, 'role': role,
'termination_type': termination_type, 'termination_id': termination_id, **kwargs
}
if outside_ip:
data['outside_ip'] = outside_ip
return self.client.create(f'{self.base_endpoint}/tunnel-terminations', data)
async def update_tunnel_termination(self, id: int, **kwargs) -> Dict:
"""Update a tunnel termination."""
return self.client.patch(f'{self.base_endpoint}/tunnel-terminations', id, kwargs)
async def delete_tunnel_termination(self, id: int) -> None:
"""Delete a tunnel termination."""
self.client.delete(f'{self.base_endpoint}/tunnel-terminations', id)
# ==================== IKE Proposals ====================
async def list_ike_proposals(self, name: Optional[str] = None, **kwargs) -> List[Dict]:
"""List all IKE proposals."""
params = {k: v for k, v in {'name': name, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/ike-proposals', params=params)
async def get_ike_proposal(self, id: int) -> Dict:
"""Get a specific IKE proposal by ID."""
return self.client.get(f'{self.base_endpoint}/ike-proposals', id)
async def create_ike_proposal(
self,
name: str,
authentication_method: str,
encryption_algorithm: str,
authentication_algorithm: str,
group: int,
sa_lifetime: Optional[int] = None,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new IKE proposal."""
data = {
'name': name, 'authentication_method': authentication_method,
'encryption_algorithm': encryption_algorithm,
'authentication_algorithm': authentication_algorithm, 'group': group, **kwargs
}
if sa_lifetime:
data['sa_lifetime'] = sa_lifetime
if description:
data['description'] = description
return self.client.create(f'{self.base_endpoint}/ike-proposals', data)
async def update_ike_proposal(self, id: int, **kwargs) -> Dict:
"""Update an IKE proposal."""
return self.client.patch(f'{self.base_endpoint}/ike-proposals', id, kwargs)
async def delete_ike_proposal(self, id: int) -> None:
"""Delete an IKE proposal."""
self.client.delete(f'{self.base_endpoint}/ike-proposals', id)
# ==================== IKE Policies ====================
async def list_ike_policies(self, name: Optional[str] = None, **kwargs) -> List[Dict]:
"""List all IKE policies."""
params = {k: v for k, v in {'name': name, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/ike-policies', params=params)
async def get_ike_policy(self, id: int) -> Dict:
"""Get a specific IKE policy by ID."""
return self.client.get(f'{self.base_endpoint}/ike-policies', id)
async def create_ike_policy(
self,
name: str,
version: int,
mode: str,
proposals: List[int],
preshared_key: Optional[str] = None,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new IKE policy."""
data = {'name': name, 'version': version, 'mode': mode, 'proposals': proposals, **kwargs}
if preshared_key:
data['preshared_key'] = preshared_key
if description:
data['description'] = description
return self.client.create(f'{self.base_endpoint}/ike-policies', data)
async def update_ike_policy(self, id: int, **kwargs) -> Dict:
"""Update an IKE policy."""
return self.client.patch(f'{self.base_endpoint}/ike-policies', id, kwargs)
async def delete_ike_policy(self, id: int) -> None:
"""Delete an IKE policy."""
self.client.delete(f'{self.base_endpoint}/ike-policies', id)
# ==================== IPSec Proposals ====================
async def list_ipsec_proposals(self, name: Optional[str] = None, **kwargs) -> List[Dict]:
"""List all IPSec proposals."""
params = {k: v for k, v in {'name': name, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/ipsec-proposals', params=params)
async def get_ipsec_proposal(self, id: int) -> Dict:
"""Get a specific IPSec proposal by ID."""
return self.client.get(f'{self.base_endpoint}/ipsec-proposals', id)
async def create_ipsec_proposal(
self,
name: str,
encryption_algorithm: str,
authentication_algorithm: str,
sa_lifetime_seconds: Optional[int] = None,
sa_lifetime_data: Optional[int] = None,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new IPSec proposal."""
data = {
'name': name, 'encryption_algorithm': encryption_algorithm,
'authentication_algorithm': authentication_algorithm, **kwargs
}
for key, val in [
('sa_lifetime_seconds', sa_lifetime_seconds),
('sa_lifetime_data', sa_lifetime_data), ('description', description)
]:
if val is not None:
data[key] = val
return self.client.create(f'{self.base_endpoint}/ipsec-proposals', data)
async def update_ipsec_proposal(self, id: int, **kwargs) -> Dict:
"""Update an IPSec proposal."""
return self.client.patch(f'{self.base_endpoint}/ipsec-proposals', id, kwargs)
async def delete_ipsec_proposal(self, id: int) -> None:
"""Delete an IPSec proposal."""
self.client.delete(f'{self.base_endpoint}/ipsec-proposals', id)
# ==================== IPSec Policies ====================
async def list_ipsec_policies(self, name: Optional[str] = None, **kwargs) -> List[Dict]:
"""List all IPSec policies."""
params = {k: v for k, v in {'name': name, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/ipsec-policies', params=params)
async def get_ipsec_policy(self, id: int) -> Dict:
"""Get a specific IPSec policy by ID."""
return self.client.get(f'{self.base_endpoint}/ipsec-policies', id)
async def create_ipsec_policy(
self,
name: str,
proposals: List[int],
pfs_group: Optional[int] = None,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new IPSec policy."""
data = {'name': name, 'proposals': proposals, **kwargs}
if pfs_group:
data['pfs_group'] = pfs_group
if description:
data['description'] = description
return self.client.create(f'{self.base_endpoint}/ipsec-policies', data)
async def update_ipsec_policy(self, id: int, **kwargs) -> Dict:
"""Update an IPSec policy."""
return self.client.patch(f'{self.base_endpoint}/ipsec-policies', id, kwargs)
async def delete_ipsec_policy(self, id: int) -> None:
"""Delete an IPSec policy."""
self.client.delete(f'{self.base_endpoint}/ipsec-policies', id)
# ==================== IPSec Profiles ====================
async def list_ipsec_profiles(self, name: Optional[str] = None, **kwargs) -> List[Dict]:
"""List all IPSec profiles."""
params = {k: v for k, v in {'name': name, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/ipsec-profiles', params=params)
async def get_ipsec_profile(self, id: int) -> Dict:
"""Get a specific IPSec profile by ID."""
return self.client.get(f'{self.base_endpoint}/ipsec-profiles', id)
async def create_ipsec_profile(
self,
name: str,
mode: str,
ike_policy: int,
ipsec_policy: int,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new IPSec profile."""
data = {'name': name, 'mode': mode, 'ike_policy': ike_policy, 'ipsec_policy': ipsec_policy, **kwargs}
if description:
data['description'] = description
return self.client.create(f'{self.base_endpoint}/ipsec-profiles', data)
async def update_ipsec_profile(self, id: int, **kwargs) -> Dict:
"""Update an IPSec profile."""
return self.client.patch(f'{self.base_endpoint}/ipsec-profiles', id, kwargs)
async def delete_ipsec_profile(self, id: int) -> None:
"""Delete an IPSec profile."""
self.client.delete(f'{self.base_endpoint}/ipsec-profiles', id)
# ==================== L2VPN ====================
async def list_l2vpns(
self,
name: Optional[str] = None,
slug: Optional[str] = None,
type: Optional[str] = None,
tenant_id: Optional[int] = None,
**kwargs
) -> List[Dict]:
"""List all L2VPNs with optional filtering."""
params = {k: v for k, v in {
'name': name, 'slug': slug, 'type': type, 'tenant_id': tenant_id, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/l2vpns', params=params)
async def get_l2vpn(self, id: int) -> Dict:
"""Get a specific L2VPN by ID."""
return self.client.get(f'{self.base_endpoint}/l2vpns', id)
async def create_l2vpn(
self,
name: str,
slug: str,
type: str,
identifier: Optional[int] = None,
tenant: Optional[int] = None,
description: Optional[str] = None,
import_targets: Optional[List[int]] = None,
export_targets: Optional[List[int]] = None,
**kwargs
) -> Dict:
"""Create a new L2VPN."""
data = {'name': name, 'slug': slug, 'type': type, **kwargs}
for key, val in [
('identifier', identifier), ('tenant', tenant), ('description', description),
('import_targets', import_targets), ('export_targets', export_targets)
]:
if val is not None:
data[key] = val
return self.client.create(f'{self.base_endpoint}/l2vpns', data)
async def update_l2vpn(self, id: int, **kwargs) -> Dict:
"""Update an L2VPN."""
return self.client.patch(f'{self.base_endpoint}/l2vpns', id, kwargs)
async def delete_l2vpn(self, id: int) -> None:
"""Delete an L2VPN."""
self.client.delete(f'{self.base_endpoint}/l2vpns', id)
# ==================== L2VPN Terminations ====================
async def list_l2vpn_terminations(
self,
l2vpn_id: Optional[int] = None,
**kwargs
) -> List[Dict]:
"""List all L2VPN terminations."""
params = {k: v for k, v in {'l2vpn_id': l2vpn_id, **kwargs}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/l2vpn-terminations', params=params)
async def get_l2vpn_termination(self, id: int) -> Dict:
"""Get a specific L2VPN termination by ID."""
return self.client.get(f'{self.base_endpoint}/l2vpn-terminations', id)
async def create_l2vpn_termination(
self,
l2vpn: int,
assigned_object_type: str,
assigned_object_id: int,
**kwargs
) -> Dict:
"""Create a new L2VPN termination."""
data = {
'l2vpn': l2vpn, 'assigned_object_type': assigned_object_type,
'assigned_object_id': assigned_object_id, **kwargs
}
return self.client.create(f'{self.base_endpoint}/l2vpn-terminations', data)
async def update_l2vpn_termination(self, id: int, **kwargs) -> Dict:
"""Update an L2VPN termination."""
return self.client.patch(f'{self.base_endpoint}/l2vpn-terminations', id, kwargs)
async def delete_l2vpn_termination(self, id: int) -> None:
"""Delete an L2VPN termination."""
self.client.delete(f'{self.base_endpoint}/l2vpn-terminations', id)

View File

@@ -0,0 +1,166 @@
"""
Wireless tools for NetBox MCP Server.
Covers: Wireless LANs, Wireless LAN Groups, and Wireless Links.
"""
import logging
from typing import List, Dict, Optional, Any
from ..netbox_client import NetBoxClient
logger = logging.getLogger(__name__)
class WirelessTools:
"""Tools for Wireless operations in NetBox"""
def __init__(self, client: NetBoxClient):
self.client = client
self.base_endpoint = 'wireless'
# ==================== Wireless LAN Groups ====================
async def list_wireless_lan_groups(
self,
name: Optional[str] = None,
slug: Optional[str] = None,
parent_id: Optional[int] = None,
**kwargs
) -> List[Dict]:
"""List all wireless LAN groups."""
params = {k: v for k, v in {
'name': name, 'slug': slug, 'parent_id': parent_id, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/wireless-lan-groups', params=params)
async def get_wireless_lan_group(self, id: int) -> Dict:
"""Get a specific wireless LAN group by ID."""
return self.client.get(f'{self.base_endpoint}/wireless-lan-groups', id)
async def create_wireless_lan_group(
self,
name: str,
slug: str,
parent: Optional[int] = None,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new wireless LAN group."""
data = {'name': name, 'slug': slug, **kwargs}
if parent:
data['parent'] = parent
if description:
data['description'] = description
return self.client.create(f'{self.base_endpoint}/wireless-lan-groups', data)
async def update_wireless_lan_group(self, id: int, **kwargs) -> Dict:
"""Update a wireless LAN group."""
return self.client.patch(f'{self.base_endpoint}/wireless-lan-groups', id, kwargs)
async def delete_wireless_lan_group(self, id: int) -> None:
"""Delete a wireless LAN group."""
self.client.delete(f'{self.base_endpoint}/wireless-lan-groups', id)
# ==================== Wireless LANs ====================
async def list_wireless_lans(
self,
ssid: Optional[str] = None,
group_id: Optional[int] = None,
vlan_id: Optional[int] = None,
tenant_id: Optional[int] = None,
status: Optional[str] = None,
auth_type: Optional[str] = None,
**kwargs
) -> List[Dict]:
"""List all wireless LANs with optional filtering."""
params = {k: v for k, v in {
'ssid': ssid, 'group_id': group_id, 'vlan_id': vlan_id,
'tenant_id': tenant_id, 'status': status, 'auth_type': auth_type, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/wireless-lans', params=params)
async def get_wireless_lan(self, id: int) -> Dict:
"""Get a specific wireless LAN by ID."""
return self.client.get(f'{self.base_endpoint}/wireless-lans', id)
async def create_wireless_lan(
self,
ssid: str,
status: str = 'active',
group: Optional[int] = None,
vlan: Optional[int] = None,
tenant: Optional[int] = None,
auth_type: Optional[str] = None,
auth_cipher: Optional[str] = None,
auth_psk: Optional[str] = None,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new wireless LAN."""
data = {'ssid': ssid, 'status': status, **kwargs}
for key, val in [
('group', group), ('vlan', vlan), ('tenant', tenant),
('auth_type', auth_type), ('auth_cipher', auth_cipher),
('auth_psk', auth_psk), ('description', description)
]:
if val is not None:
data[key] = val
return self.client.create(f'{self.base_endpoint}/wireless-lans', data)
async def update_wireless_lan(self, id: int, **kwargs) -> Dict:
"""Update a wireless LAN."""
return self.client.patch(f'{self.base_endpoint}/wireless-lans', id, kwargs)
async def delete_wireless_lan(self, id: int) -> None:
"""Delete a wireless LAN."""
self.client.delete(f'{self.base_endpoint}/wireless-lans', id)
# ==================== Wireless Links ====================
async def list_wireless_links(
self,
ssid: Optional[str] = None,
status: Optional[str] = None,
tenant_id: Optional[int] = None,
**kwargs
) -> List[Dict]:
"""List all wireless links with optional filtering."""
params = {k: v for k, v in {
'ssid': ssid, 'status': status, 'tenant_id': tenant_id, **kwargs
}.items() if v is not None}
return self.client.list(f'{self.base_endpoint}/wireless-links', params=params)
async def get_wireless_link(self, id: int) -> Dict:
"""Get a specific wireless link by ID."""
return self.client.get(f'{self.base_endpoint}/wireless-links', id)
async def create_wireless_link(
self,
interface_a: int,
interface_b: int,
ssid: Optional[str] = None,
status: str = 'connected',
tenant: Optional[int] = None,
auth_type: Optional[str] = None,
auth_cipher: Optional[str] = None,
auth_psk: Optional[str] = None,
description: Optional[str] = None,
**kwargs
) -> Dict:
"""Create a new wireless link."""
data = {'interface_a': interface_a, 'interface_b': interface_b, 'status': status, **kwargs}
for key, val in [
('ssid', ssid), ('tenant', tenant), ('auth_type', auth_type),
('auth_cipher', auth_cipher), ('auth_psk', auth_psk), ('description', description)
]:
if val is not None:
data[key] = val
return self.client.create(f'{self.base_endpoint}/wireless-links', data)
async def update_wireless_link(self, id: int, **kwargs) -> Dict:
"""Update a wireless link."""
return self.client.patch(f'{self.base_endpoint}/wireless-links', id, kwargs)
async def delete_wireless_link(self, id: int) -> None:
"""Delete a wireless link."""
self.client.delete(f'{self.base_endpoint}/wireless-links', id)

View File

@@ -1,21 +0,0 @@
#!/bin/bash
# Capture original working directory before any cd operations
# This should be the user's project directory when launched by Claude Code
export CLAUDE_PROJECT_DIR="${CLAUDE_PROJECT_DIR:-$PWD}"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
CACHE_VENV="$HOME/.cache/claude-mcp-venvs/leo-claude-mktplace/netbox/.venv"
LOCAL_VENV="$SCRIPT_DIR/.venv"
if [[ -f "$CACHE_VENV/bin/python" ]]; then
PYTHON="$CACHE_VENV/bin/python"
elif [[ -f "$LOCAL_VENV/bin/python" ]]; then
PYTHON="$LOCAL_VENV/bin/python"
else
echo "ERROR: No venv found. Run: ./scripts/setup-venvs.sh" >&2
exit 1
fi
cd "$SCRIPT_DIR"
export PYTHONPATH="$SCRIPT_DIR"
exec "$PYTHON" -m mcp_server.server "$@"

View File

@@ -1,5 +0,0 @@
2026-01-26T11:40:11 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/viz-platform/registry/dmc_2_5.json | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-01-26T13:46:31 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/viz-platform/tests/test_chart_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-01-26T13:46:32 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/viz-platform/tests/test_theme_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-01-26T13:46:34 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/viz-platform/tests/test_theme_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
2026-01-26T13:46:35 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/viz-platform/tests/test_theme_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md

View File

@@ -1,115 +0,0 @@
# viz-platform MCP Server
Model Context Protocol (MCP) server for Dash Mantine Components validation and visualization tools.
## Overview
This MCP server provides 21 tools for:
- **DMC Validation**: Version-locked component registry prevents Claude from hallucinating invalid props
- **Chart Creation**: Plotly-based visualization with theme integration
- **Layout Composition**: Dashboard layouts with responsive grids
- **Theme Management**: Design token-based theming system
- **Page Structure**: Multi-page Dash app generation
## Tools
### DMC Tools (3)
| Tool | Description |
|------|-------------|
| `list_components` | List available DMC components by category |
| `get_component_props` | Get valid props, types, and defaults for a component |
| `validate_component` | Validate component definition before use |
### Chart Tools (2)
| Tool | Description |
|------|-------------|
| `chart_create` | Create Plotly chart (line, bar, scatter, pie, histogram, area, heatmap) |
| `chart_configure_interaction` | Configure chart interactions (zoom, pan, hover) |
### Layout Tools (5)
| Tool | Description |
|------|-------------|
| `layout_create` | Create dashboard layout structure |
| `layout_add_filter` | Add filter components to layout |
| `layout_set_grid` | Configure responsive grid settings |
| `layout_get` | Retrieve layout configuration |
| `layout_add_section` | Add sections to layout |
### Theme Tools (6)
| Tool | Description |
|------|-------------|
| `theme_create` | Create new theme with design tokens |
| `theme_extend` | Extend existing theme with overrides |
| `theme_validate` | Validate theme completeness |
| `theme_export_css` | Export theme as CSS custom properties |
| `theme_list` | List available themes |
| `theme_activate` | Set active theme for visualizations |
### Page Tools (5)
| Tool | Description |
|------|-------------|
| `page_create` | Create new page structure |
| `page_add_navbar` | Add navigation bar to page |
| `page_set_auth` | Configure page authentication |
| `page_list` | List available pages |
| `page_get_app_config` | Get full app configuration |
## Configuration
### Environment Variables
| Variable | Required | Description |
|----------|----------|-------------|
| `DMC_VERSION` | No | Dash Mantine Components version (auto-detected if installed) |
| `VIZ_DEFAULT_THEME` | No | Default theme name |
| `CLAUDE_PROJECT_DIR` | No | Project directory for theme storage |
### Theme Storage
Themes can be stored at two levels:
- **User-level**: `~/.config/claude/themes/`
- **Project-level**: `{project}/.viz-platform/themes/`
Project-level themes take precedence.
## Component Registry
The server uses a static JSON registry for DMC component validation:
- Pre-generated from DMC source code
- Version-tagged (e.g., `dmc_2_5.json`)
- Prevents hallucination of non-existent props
- Fast, deterministic validation
Registry files are stored in `registry/` directory.
## Tests
94 tests with coverage:
- `test_config.py`: 82% coverage
- `test_component_registry.py`: 92% coverage
- `test_dmc_tools.py`: 88% coverage
- `test_chart_tools.py`: 68% coverage
- `test_theme_tools.py`: 99% coverage
Run tests:
```bash
cd mcp-servers/viz-platform
source .venv/bin/activate
pytest tests/ -v
```
## Dependencies
- Python 3.10+
- FastMCP
- plotly
- dash-mantine-components (optional, for version detection)
## Usage
This MCP server is used by the `viz-platform` plugin. See the plugin's commands in `plugins/viz-platform/commands/` for usage.

View File

@@ -1,7 +0,0 @@
"""
viz-platform MCP Server package.
Provides Dash Mantine Components validation and visualization tools to Claude Code.
"""
__version__ = "1.0.0"

View File

@@ -1,479 +0,0 @@
"""
Accessibility validation tools for color blindness and WCAG compliance.
Provides tools for validating color palettes against color blindness
simulations and WCAG contrast requirements.
"""
import logging
import math
from typing import Dict, List, Optional, Any, Tuple
logger = logging.getLogger(__name__)
# Color-blind safe palettes
SAFE_PALETTES = {
"categorical": {
"name": "Paul Tol's Qualitative",
"colors": ["#4477AA", "#EE6677", "#228833", "#CCBB44", "#66CCEE", "#AA3377", "#BBBBBB"],
"description": "Distinguishable for all types of color blindness"
},
"ibm": {
"name": "IBM Design",
"colors": ["#648FFF", "#785EF0", "#DC267F", "#FE6100", "#FFB000"],
"description": "IBM's accessible color palette"
},
"okabe_ito": {
"name": "Okabe-Ito",
"colors": ["#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#000000"],
"description": "Optimized for all color vision deficiencies"
},
"tableau_colorblind": {
"name": "Tableau Colorblind 10",
"colors": ["#006BA4", "#FF800E", "#ABABAB", "#595959", "#5F9ED1",
"#C85200", "#898989", "#A2C8EC", "#FFBC79", "#CFCFCF"],
"description": "Industry-standard accessible palette"
}
}
# Simulation matrices for color blindness (LMS color space transformation)
# These approximate how colors appear to people with different types of color blindness
SIMULATION_MATRICES = {
"deuteranopia": {
# Green-blind (most common)
"severity": "common",
"population": "6% males, 0.4% females",
"description": "Difficulty distinguishing red from green (green-blind)",
"matrix": [
[0.625, 0.375, 0.0],
[0.700, 0.300, 0.0],
[0.0, 0.300, 0.700]
]
},
"protanopia": {
# Red-blind
"severity": "common",
"population": "2.5% males, 0.05% females",
"description": "Difficulty distinguishing red from green (red-blind)",
"matrix": [
[0.567, 0.433, 0.0],
[0.558, 0.442, 0.0],
[0.0, 0.242, 0.758]
]
},
"tritanopia": {
# Blue-blind (rare)
"severity": "rare",
"population": "0.01% total",
"description": "Difficulty distinguishing blue from yellow",
"matrix": [
[0.950, 0.050, 0.0],
[0.0, 0.433, 0.567],
[0.0, 0.475, 0.525]
]
}
}
class AccessibilityTools:
"""
Color accessibility validation tools.
Validates colors for WCAG compliance and color blindness accessibility.
"""
def __init__(self, theme_store=None):
"""
Initialize accessibility tools.
Args:
theme_store: Optional ThemeStore for theme color extraction
"""
self.theme_store = theme_store
def _hex_to_rgb(self, hex_color: str) -> Tuple[int, int, int]:
"""Convert hex color to RGB tuple."""
hex_color = hex_color.lstrip('#')
if len(hex_color) == 3:
hex_color = ''.join([c * 2 for c in hex_color])
return tuple(int(hex_color[i:i+2], 16) for i in (0, 2, 4))
def _rgb_to_hex(self, rgb: Tuple[int, int, int]) -> str:
"""Convert RGB tuple to hex color."""
return '#{:02x}{:02x}{:02x}'.format(
max(0, min(255, int(rgb[0]))),
max(0, min(255, int(rgb[1]))),
max(0, min(255, int(rgb[2])))
)
def _get_relative_luminance(self, rgb: Tuple[int, int, int]) -> float:
"""
Calculate relative luminance per WCAG 2.1.
https://www.w3.org/WAI/GL/wiki/Relative_luminance
"""
def channel_luminance(value: int) -> float:
v = value / 255
return v / 12.92 if v <= 0.03928 else ((v + 0.055) / 1.055) ** 2.4
r, g, b = rgb
return (
0.2126 * channel_luminance(r) +
0.7152 * channel_luminance(g) +
0.0722 * channel_luminance(b)
)
def _get_contrast_ratio(self, color1: str, color2: str) -> float:
"""
Calculate contrast ratio between two colors per WCAG 2.1.
Returns ratio between 1:1 and 21:1.
"""
rgb1 = self._hex_to_rgb(color1)
rgb2 = self._hex_to_rgb(color2)
l1 = self._get_relative_luminance(rgb1)
l2 = self._get_relative_luminance(rgb2)
lighter = max(l1, l2)
darker = min(l1, l2)
return (lighter + 0.05) / (darker + 0.05)
def _simulate_color_blindness(
self,
hex_color: str,
deficiency_type: str
) -> str:
"""
Simulate how a color appears with a specific color blindness type.
Uses linear RGB transformation approximation.
"""
if deficiency_type not in SIMULATION_MATRICES:
return hex_color
rgb = self._hex_to_rgb(hex_color)
matrix = SIMULATION_MATRICES[deficiency_type]["matrix"]
# Apply transformation matrix
r = rgb[0] * matrix[0][0] + rgb[1] * matrix[0][1] + rgb[2] * matrix[0][2]
g = rgb[0] * matrix[1][0] + rgb[1] * matrix[1][1] + rgb[2] * matrix[1][2]
b = rgb[0] * matrix[2][0] + rgb[1] * matrix[2][1] + rgb[2] * matrix[2][2]
return self._rgb_to_hex((r, g, b))
def _get_color_distance(self, color1: str, color2: str) -> float:
"""
Calculate perceptual color distance (CIE76 approximation).
Returns a value where < 20 means colors may be hard to distinguish.
"""
rgb1 = self._hex_to_rgb(color1)
rgb2 = self._hex_to_rgb(color2)
# Simple Euclidean distance in RGB space (approximation)
# For production, should use CIEDE2000
return math.sqrt(
(rgb1[0] - rgb2[0]) ** 2 +
(rgb1[1] - rgb2[1]) ** 2 +
(rgb1[2] - rgb2[2]) ** 2
)
async def accessibility_validate_colors(
self,
colors: List[str],
check_types: Optional[List[str]] = None,
min_contrast_ratio: float = 4.5
) -> Dict[str, Any]:
"""
Validate a list of colors for accessibility.
Args:
colors: List of hex colors to validate
check_types: Color blindness types to check (default: all)
min_contrast_ratio: Minimum WCAG contrast ratio (default: 4.5 for AA)
Returns:
Dict with:
- issues: List of accessibility issues found
- simulations: How colors appear under each deficiency
- recommendations: Suggestions for improvement
- safe_palettes: Color-blind safe palette suggestions
"""
check_types = check_types or list(SIMULATION_MATRICES.keys())
issues = []
simulations = {}
# Normalize colors
normalized_colors = [c.upper() if c.startswith('#') else f'#{c.upper()}' for c in colors]
# Simulate each color blindness type
for deficiency in check_types:
if deficiency not in SIMULATION_MATRICES:
continue
simulated = [self._simulate_color_blindness(c, deficiency) for c in normalized_colors]
simulations[deficiency] = {
"original": normalized_colors,
"simulated": simulated,
"info": SIMULATION_MATRICES[deficiency]
}
# Check if any color pairs become indistinguishable
for i in range(len(normalized_colors)):
for j in range(i + 1, len(normalized_colors)):
distance = self._get_color_distance(simulated[i], simulated[j])
if distance < 30: # Threshold for distinguishability
issues.append({
"type": "distinguishability",
"severity": "warning" if distance > 15 else "error",
"colors": [normalized_colors[i], normalized_colors[j]],
"affected_by": [deficiency],
"simulated_colors": [simulated[i], simulated[j]],
"distance": round(distance, 1),
"message": f"Colors may be hard to distinguish for {deficiency} ({SIMULATION_MATRICES[deficiency]['description']})"
})
# Check contrast ratios against white and black backgrounds
for color in normalized_colors:
white_contrast = self._get_contrast_ratio(color, "#FFFFFF")
black_contrast = self._get_contrast_ratio(color, "#000000")
if white_contrast < min_contrast_ratio and black_contrast < min_contrast_ratio:
issues.append({
"type": "contrast_ratio",
"severity": "error",
"colors": [color],
"white_contrast": round(white_contrast, 2),
"black_contrast": round(black_contrast, 2),
"required": min_contrast_ratio,
"message": f"Insufficient contrast against both white ({white_contrast:.1f}:1) and black ({black_contrast:.1f}:1) backgrounds"
})
# Generate recommendations
recommendations = self._generate_recommendations(issues)
# Calculate overall score
error_count = sum(1 for i in issues if i["severity"] == "error")
warning_count = sum(1 for i in issues if i["severity"] == "warning")
if error_count == 0 and warning_count == 0:
score = "A"
elif error_count == 0 and warning_count <= 2:
score = "B"
elif error_count <= 2:
score = "C"
else:
score = "D"
return {
"colors_checked": normalized_colors,
"overall_score": score,
"issue_count": len(issues),
"issues": issues,
"simulations": simulations,
"recommendations": recommendations,
"safe_palettes": SAFE_PALETTES
}
async def accessibility_validate_theme(
self,
theme_name: str
) -> Dict[str, Any]:
"""
Validate a theme's colors for accessibility.
Args:
theme_name: Theme name to validate
Returns:
Dict with accessibility validation results
"""
if not self.theme_store:
return {
"error": "Theme store not configured",
"theme_name": theme_name
}
theme = self.theme_store.get_theme(theme_name)
if not theme:
available = self.theme_store.list_themes()
return {
"error": f"Theme '{theme_name}' not found. Available: {available}",
"theme_name": theme_name
}
# Extract colors from theme
colors = []
tokens = theme.get("tokens", {})
color_tokens = tokens.get("colors", {})
def extract_colors(obj, prefix=""):
"""Recursively extract color values."""
if isinstance(obj, str) and (obj.startswith('#') or len(obj) == 6):
colors.append(obj if obj.startswith('#') else f'#{obj}')
elif isinstance(obj, dict):
for key, value in obj.items():
extract_colors(value, f"{prefix}.{key}")
elif isinstance(obj, list):
for item in obj:
extract_colors(item, prefix)
extract_colors(color_tokens)
# Validate extracted colors
result = await self.accessibility_validate_colors(colors)
result["theme_name"] = theme_name
# Add theme-specific checks
primary = color_tokens.get("primary")
background = color_tokens.get("background", {})
text = color_tokens.get("text", {})
if primary and background:
bg_color = background.get("base") if isinstance(background, dict) else background
if bg_color:
contrast = self._get_contrast_ratio(primary, bg_color)
if contrast < 4.5:
result["issues"].append({
"type": "primary_contrast",
"severity": "error",
"colors": [primary, bg_color],
"ratio": round(contrast, 2),
"required": 4.5,
"message": f"Primary color has insufficient contrast ({contrast:.1f}:1) against background"
})
return result
async def accessibility_suggest_alternative(
self,
color: str,
deficiency_type: str
) -> Dict[str, Any]:
"""
Suggest accessible alternative colors.
Args:
color: Original hex color
deficiency_type: Type of color blindness to optimize for
Returns:
Dict with alternative color suggestions
"""
rgb = self._hex_to_rgb(color)
suggestions = []
# Suggest shifting hue while maintaining saturation and brightness
# For red-green deficiency, shift toward blue or yellow
if deficiency_type in ["deuteranopia", "protanopia"]:
# Shift toward blue
blue_shift = self._rgb_to_hex((
max(0, rgb[0] - 50),
max(0, rgb[1] - 30),
min(255, rgb[2] + 80)
))
suggestions.append({
"color": blue_shift,
"description": "Blue-shifted alternative",
"preserves": "approximate brightness"
})
# Shift toward yellow/orange
yellow_shift = self._rgb_to_hex((
min(255, rgb[0] + 50),
min(255, rgb[1] + 30),
max(0, rgb[2] - 80)
))
suggestions.append({
"color": yellow_shift,
"description": "Yellow-shifted alternative",
"preserves": "approximate brightness"
})
elif deficiency_type == "tritanopia":
# For blue-yellow deficiency, shift toward red or green
red_shift = self._rgb_to_hex((
min(255, rgb[0] + 60),
max(0, rgb[1] - 20),
max(0, rgb[2] - 40)
))
suggestions.append({
"color": red_shift,
"description": "Red-shifted alternative",
"preserves": "approximate brightness"
})
# Add safe palette suggestions
for palette_name, palette in SAFE_PALETTES.items():
# Find closest color in safe palette
min_distance = float('inf')
closest = None
for safe_color in palette["colors"]:
distance = self._get_color_distance(color, safe_color)
if distance < min_distance:
min_distance = distance
closest = safe_color
if closest:
suggestions.append({
"color": closest,
"description": f"From {palette['name']} palette",
"palette": palette_name
})
return {
"original_color": color,
"deficiency_type": deficiency_type,
"suggestions": suggestions[:5] # Limit to 5 suggestions
}
def _generate_recommendations(self, issues: List[Dict[str, Any]]) -> List[str]:
"""Generate actionable recommendations based on issues."""
recommendations = []
# Check for distinguishability issues
distinguishability_issues = [i for i in issues if i["type"] == "distinguishability"]
if distinguishability_issues:
affected_types = set()
for issue in distinguishability_issues:
affected_types.update(issue.get("affected_by", []))
if "deuteranopia" in affected_types or "protanopia" in affected_types:
recommendations.append(
"Avoid using red and green as the only differentiators - "
"add patterns, shapes, or labels"
)
recommendations.append(
"Consider using a color-blind safe palette like Okabe-Ito or IBM Design"
)
# Check for contrast issues
contrast_issues = [i for i in issues if i["type"] in ["contrast_ratio", "primary_contrast"]]
if contrast_issues:
recommendations.append(
"Increase contrast by darkening colors for light backgrounds "
"or lightening for dark backgrounds"
)
recommendations.append(
"Use WCAG contrast checker tools to verify text readability"
)
# General recommendations
if len(issues) > 0:
recommendations.append(
"Add secondary visual cues (icons, patterns, labels) "
"to not rely solely on color"
)
if not recommendations:
recommendations.append(
"Color palette appears accessible! Consider adding patterns "
"for additional distinguishability"
)
return recommendations

View File

@@ -1,533 +0,0 @@
"""
Chart creation tools using Plotly.
Provides tools for creating data visualizations with automatic theme integration.
"""
import base64
import logging
import os
from typing import Dict, List, Optional, Any, Union
logger = logging.getLogger(__name__)
# Check for kaleido availability
KALEIDO_AVAILABLE = False
try:
import kaleido
KALEIDO_AVAILABLE = True
except ImportError:
logger.debug("kaleido not installed - chart export will be unavailable")
# Default color palette based on Mantine theme
DEFAULT_COLORS = [
"#228be6", # blue
"#40c057", # green
"#fa5252", # red
"#fab005", # yellow
"#7950f2", # violet
"#fd7e14", # orange
"#20c997", # teal
"#f783ac", # pink
"#868e96", # gray
"#15aabf", # cyan
]
class ChartTools:
"""
Plotly-based chart creation tools.
Creates charts that integrate with DMC theming system.
"""
def __init__(self, theme_store=None):
"""
Initialize chart tools.
Args:
theme_store: Optional ThemeStore for theme token resolution
"""
self.theme_store = theme_store
self._active_theme = None
def set_theme(self, theme: Dict[str, Any]) -> None:
"""Set the active theme for chart styling."""
self._active_theme = theme
def _get_color_palette(self) -> List[str]:
"""Get color palette from theme or defaults."""
if self._active_theme and 'colors' in self._active_theme:
colors = self._active_theme['colors']
# Extract primary colors from theme
palette = []
for key in ['primary', 'secondary', 'success', 'warning', 'error']:
if key in colors:
palette.append(colors[key])
if palette:
return palette + DEFAULT_COLORS[len(palette):]
return DEFAULT_COLORS
def _resolve_color(self, color: Optional[str]) -> str:
"""Resolve a color token to actual color value."""
if not color:
return self._get_color_palette()[0]
# Check if it's a theme token
if self._active_theme and 'colors' in self._active_theme:
colors = self._active_theme['colors']
if color in colors:
return colors[color]
# Check if it's already a valid color
if color.startswith('#') or color.startswith('rgb'):
return color
# Map common color names to palette
color_map = {
'blue': DEFAULT_COLORS[0],
'green': DEFAULT_COLORS[1],
'red': DEFAULT_COLORS[2],
'yellow': DEFAULT_COLORS[3],
'violet': DEFAULT_COLORS[4],
'orange': DEFAULT_COLORS[5],
'teal': DEFAULT_COLORS[6],
'pink': DEFAULT_COLORS[7],
'gray': DEFAULT_COLORS[8],
'cyan': DEFAULT_COLORS[9],
}
return color_map.get(color, color)
async def chart_create(
self,
chart_type: str,
data: Dict[str, Any],
options: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""
Create a Plotly chart.
Args:
chart_type: Type of chart (line, bar, scatter, pie, heatmap, histogram, area)
data: Data specification with x, y values or labels/values for pie
options: Optional chart options (title, color, layout settings)
Returns:
Dict with:
- figure: Plotly figure JSON
- chart_type: Type of chart created
- error: Error message if creation failed
"""
options = options or {}
# Validate chart type
valid_types = ['line', 'bar', 'scatter', 'pie', 'heatmap', 'histogram', 'area']
if chart_type not in valid_types:
return {
"error": f"Invalid chart_type '{chart_type}'. Must be one of: {valid_types}",
"chart_type": chart_type,
"figure": None
}
try:
# Build trace based on chart type
trace = self._build_trace(chart_type, data, options)
if 'error' in trace:
return trace
# Build layout
layout = self._build_layout(options)
# Create figure structure
figure = {
"data": [trace],
"layout": layout
}
return {
"figure": figure,
"chart_type": chart_type,
"trace_count": 1
}
except Exception as e:
logger.error(f"Chart creation failed: {e}")
return {
"error": str(e),
"chart_type": chart_type,
"figure": None
}
def _build_trace(
self,
chart_type: str,
data: Dict[str, Any],
options: Dict[str, Any]
) -> Dict[str, Any]:
"""Build Plotly trace for the chart type."""
color = self._resolve_color(options.get('color'))
palette = self._get_color_palette()
# Common trace properties
trace: Dict[str, Any] = {}
if chart_type == 'line':
trace = {
"type": "scatter",
"mode": "lines+markers",
"x": data.get('x', []),
"y": data.get('y', []),
"line": {"color": color},
"marker": {"color": color}
}
if 'name' in data:
trace['name'] = data['name']
elif chart_type == 'bar':
trace = {
"type": "bar",
"x": data.get('x', []),
"y": data.get('y', []),
"marker": {"color": color}
}
if options.get('horizontal'):
trace['orientation'] = 'h'
trace['x'], trace['y'] = trace['y'], trace['x']
if 'name' in data:
trace['name'] = data['name']
elif chart_type == 'scatter':
trace = {
"type": "scatter",
"mode": "markers",
"x": data.get('x', []),
"y": data.get('y', []),
"marker": {
"color": color,
"size": options.get('marker_size', 10)
}
}
if 'size' in data:
trace['marker']['size'] = data['size']
if 'name' in data:
trace['name'] = data['name']
elif chart_type == 'pie':
labels = data.get('labels', data.get('x', []))
values = data.get('values', data.get('y', []))
trace = {
"type": "pie",
"labels": labels,
"values": values,
"marker": {"colors": palette[:len(labels)]}
}
if options.get('donut'):
trace['hole'] = options.get('hole', 0.4)
elif chart_type == 'heatmap':
trace = {
"type": "heatmap",
"z": data.get('z', data.get('values', [])),
"x": data.get('x', []),
"y": data.get('y', []),
"colorscale": options.get('colorscale', 'Blues')
}
elif chart_type == 'histogram':
trace = {
"type": "histogram",
"x": data.get('x', data.get('values', [])),
"marker": {"color": color}
}
if 'nbins' in options:
trace['nbinsx'] = options['nbins']
elif chart_type == 'area':
trace = {
"type": "scatter",
"mode": "lines",
"x": data.get('x', []),
"y": data.get('y', []),
"fill": "tozeroy",
"line": {"color": color},
"fillcolor": color.replace(')', ', 0.3)').replace('rgb', 'rgba') if color.startswith('rgb') else color + '4D'
}
if 'name' in data:
trace['name'] = data['name']
else:
return {"error": f"Unsupported chart type: {chart_type}"}
return trace
def _build_layout(self, options: Dict[str, Any]) -> Dict[str, Any]:
"""Build Plotly layout from options."""
layout: Dict[str, Any] = {
"autosize": True,
"margin": {"l": 50, "r": 30, "t": 50, "b": 50}
}
# Title
if 'title' in options:
layout['title'] = {
"text": options['title'],
"x": 0.5,
"xanchor": "center"
}
# Axis labels
if 'x_label' in options:
layout['xaxis'] = layout.get('xaxis', {})
layout['xaxis']['title'] = options['x_label']
if 'y_label' in options:
layout['yaxis'] = layout.get('yaxis', {})
layout['yaxis']['title'] = options['y_label']
# Theme-based styling
if self._active_theme:
colors = self._active_theme.get('colors', {})
bg = colors.get('background', {})
if isinstance(bg, dict):
layout['paper_bgcolor'] = bg.get('base', '#ffffff')
layout['plot_bgcolor'] = bg.get('subtle', '#f8f9fa')
elif isinstance(bg, str):
layout['paper_bgcolor'] = bg
layout['plot_bgcolor'] = bg
text_color = colors.get('text', {})
if isinstance(text_color, dict):
layout['font'] = {'color': text_color.get('primary', '#212529')}
elif isinstance(text_color, str):
layout['font'] = {'color': text_color}
# Additional layout options
if 'showlegend' in options:
layout['showlegend'] = options['showlegend']
if 'height' in options:
layout['height'] = options['height']
if 'width' in options:
layout['width'] = options['width']
return layout
async def chart_configure_interaction(
self,
figure: Dict[str, Any],
interactions: Dict[str, Any]
) -> Dict[str, Any]:
"""
Configure interactions for a chart.
Args:
figure: Plotly figure JSON to modify
interactions: Interaction configuration:
- hover_template: Custom hover text template
- click_data: Enable click data capture
- selection: Enable selection (box, lasso)
- zoom: Enable/disable zoom
Returns:
Dict with:
- figure: Updated figure JSON
- interactions_added: List of interactions configured
- error: Error message if configuration failed
"""
if not figure or 'data' not in figure:
return {
"error": "Invalid figure: must contain 'data' key",
"figure": figure,
"interactions_added": []
}
try:
interactions_added = []
# Process each trace
for i, trace in enumerate(figure['data']):
# Hover template
if 'hover_template' in interactions:
trace['hovertemplate'] = interactions['hover_template']
if i == 0:
interactions_added.append('hover_template')
# Custom hover info
if 'hover_info' in interactions:
trace['hoverinfo'] = interactions['hover_info']
if i == 0:
interactions_added.append('hover_info')
# Layout-level interactions
layout = figure.get('layout', {})
# Click data (Dash callback integration)
if interactions.get('click_data', False):
layout['clickmode'] = 'event+select'
interactions_added.append('click_data')
# Selection mode
if 'selection' in interactions:
sel_mode = interactions['selection']
if sel_mode in ['box', 'lasso', 'box+lasso']:
layout['dragmode'] = 'select' if sel_mode == 'box' else sel_mode
interactions_added.append(f'selection:{sel_mode}')
# Zoom configuration
if 'zoom' in interactions:
if not interactions['zoom']:
layout['xaxis'] = layout.get('xaxis', {})
layout['yaxis'] = layout.get('yaxis', {})
layout['xaxis']['fixedrange'] = True
layout['yaxis']['fixedrange'] = True
interactions_added.append('zoom:disabled')
else:
interactions_added.append('zoom:enabled')
# Modebar configuration
if 'modebar' in interactions:
layout['modebar'] = interactions['modebar']
interactions_added.append('modebar')
figure['layout'] = layout
return {
"figure": figure,
"interactions_added": interactions_added
}
except Exception as e:
logger.error(f"Interaction configuration failed: {e}")
return {
"error": str(e),
"figure": figure,
"interactions_added": []
}
async def chart_export(
self,
figure: Dict[str, Any],
format: str = "png",
width: Optional[int] = None,
height: Optional[int] = None,
scale: float = 2.0,
output_path: Optional[str] = None
) -> Dict[str, Any]:
"""
Export a Plotly chart to a static image format.
Args:
figure: Plotly figure JSON to export
format: Output format - png, svg, or pdf
width: Image width in pixels (default: from figure or 1200)
height: Image height in pixels (default: from figure or 800)
scale: Resolution scale factor (default: 2 for retina)
output_path: Optional file path to save the image
Returns:
Dict with:
- image_data: Base64-encoded image (if no output_path)
- file_path: Path to saved file (if output_path provided)
- format: Export format used
- dimensions: {width, height, scale}
- error: Error message if export failed
"""
# Validate format
valid_formats = ['png', 'svg', 'pdf']
format = format.lower()
if format not in valid_formats:
return {
"error": f"Invalid format '{format}'. Must be one of: {valid_formats}",
"format": format,
"image_data": None
}
# Check kaleido availability
if not KALEIDO_AVAILABLE:
return {
"error": "kaleido package not installed. Install with: pip install kaleido",
"format": format,
"image_data": None,
"install_hint": "pip install kaleido"
}
# Validate figure
if not figure or 'data' not in figure:
return {
"error": "Invalid figure: must contain 'data' key",
"format": format,
"image_data": None
}
try:
import plotly.graph_objects as go
import plotly.io as pio
# Create Plotly figure object
fig = go.Figure(figure)
# Determine dimensions
layout = figure.get('layout', {})
export_width = width or layout.get('width') or 1200
export_height = height or layout.get('height') or 800
# Export to bytes
image_bytes = pio.to_image(
fig,
format=format,
width=export_width,
height=export_height,
scale=scale
)
result = {
"format": format,
"dimensions": {
"width": export_width,
"height": export_height,
"scale": scale,
"effective_width": int(export_width * scale),
"effective_height": int(export_height * scale)
}
}
# Save to file or return base64
if output_path:
# Ensure directory exists
output_dir = os.path.dirname(output_path)
if output_dir and not os.path.exists(output_dir):
os.makedirs(output_dir, exist_ok=True)
# Add extension if missing
if not output_path.endswith(f'.{format}'):
output_path = f"{output_path}.{format}"
with open(output_path, 'wb') as f:
f.write(image_bytes)
result["file_path"] = output_path
result["file_size_bytes"] = len(image_bytes)
else:
# Return as base64
result["image_data"] = base64.b64encode(image_bytes).decode('utf-8')
result["data_uri"] = f"data:image/{format};base64,{result['image_data']}"
return result
except ImportError as e:
logger.error(f"Chart export failed - missing dependency: {e}")
return {
"error": f"Missing dependency for export: {e}",
"format": format,
"image_data": None,
"install_hint": "pip install plotly kaleido"
}
except Exception as e:
logger.error(f"Chart export failed: {e}")
return {
"error": str(e),
"format": format,
"image_data": None
}

View File

@@ -1,301 +0,0 @@
"""
DMC Component Registry for viz-platform.
Provides version-locked component definitions to prevent Claude from
hallucinating invalid props. Uses static JSON registries pre-generated
from DMC source.
"""
import json
import logging
from pathlib import Path
from typing import Dict, List, Optional, Any
logger = logging.getLogger(__name__)
class ComponentRegistry:
"""
Version-locked registry of Dash Mantine Components.
Loads component definitions from static JSON files and provides
lookup methods for validation tools.
"""
def __init__(self, dmc_version: Optional[str] = None):
"""
Initialize the component registry.
Args:
dmc_version: Installed DMC version (e.g., "0.14.7").
If None, will try to detect or use fallback.
"""
self.dmc_version = dmc_version
self.registry_dir = Path(__file__).parent.parent / 'registry'
self.components: Dict[str, Dict[str, Any]] = {}
self.categories: Dict[str, List[str]] = {}
self.loaded_version: Optional[str] = None
def load(self) -> bool:
"""
Load the component registry for the configured DMC version.
Returns:
True if registry loaded successfully, False otherwise
"""
registry_file = self._find_registry_file()
if not registry_file:
logger.warning(
f"No registry found for DMC {self.dmc_version}. "
"Component validation will be limited."
)
return False
try:
with open(registry_file, 'r') as f:
data = json.load(f)
self.loaded_version = data.get('version')
self.components = data.get('components', {})
self.categories = data.get('categories', {})
logger.info(
f"Loaded component registry v{self.loaded_version} "
f"with {len(self.components)} components"
)
return True
except Exception as e:
logger.error(f"Failed to load registry: {e}")
return False
def _find_registry_file(self) -> Optional[Path]:
"""
Find the best matching registry file for the DMC version.
Strategy:
1. Exact major.minor match (e.g., dmc_0_14.json for 0.14.7)
2. Fallback to latest available registry
Returns:
Path to registry file, or None if not found
"""
if not self.registry_dir.exists():
logger.warning(f"Registry directory not found: {self.registry_dir}")
return None
# Try exact major.minor match
if self.dmc_version:
parts = self.dmc_version.split('.')
if len(parts) >= 2:
major_minor = f"{parts[0]}_{parts[1]}"
exact_match = self.registry_dir / f"dmc_{major_minor}.json"
if exact_match.exists():
return exact_match
# Fallback: find latest registry
registry_files = list(self.registry_dir.glob("dmc_*.json"))
if registry_files:
# Sort by version and return latest
registry_files.sort(reverse=True)
fallback = registry_files[0]
if self.dmc_version:
logger.warning(
f"No exact match for DMC {self.dmc_version}, "
f"using fallback: {fallback.name}"
)
return fallback
return None
def get_component(self, name: str) -> Optional[Dict[str, Any]]:
"""
Get component definition by name.
Args:
name: Component name (e.g., "Button", "TextInput")
Returns:
Component definition dict, or None if not found
"""
return self.components.get(name)
def get_component_props(self, name: str) -> Optional[Dict[str, Any]]:
"""
Get props schema for a component.
Args:
name: Component name
Returns:
Props dict with type info, or None if component not found
"""
component = self.get_component(name)
if component:
return component.get('props', {})
return None
def list_components(self, category: Optional[str] = None) -> Dict[str, List[str]]:
"""
List available components, optionally filtered by category.
Args:
category: Optional category filter (e.g., "inputs", "buttons")
Returns:
Dict of category -> component names
"""
if category:
if category in self.categories:
return {category: self.categories[category]}
return {}
return self.categories
def get_categories(self) -> List[str]:
"""
Get list of available component categories.
Returns:
List of category names
"""
return list(self.categories.keys())
def validate_prop(
self,
component: str,
prop_name: str,
prop_value: Any
) -> Dict[str, Any]:
"""
Validate a single prop value against the registry.
Args:
component: Component name
prop_name: Prop name
prop_value: Value to validate
Returns:
Dict with valid: bool, error: Optional[str]
"""
props = self.get_component_props(component)
if props is None:
return {
'valid': False,
'error': f"Unknown component: {component}"
}
if prop_name not in props:
# Check for similar prop names (typo detection)
similar = self._find_similar_props(prop_name, props.keys())
if similar:
return {
'valid': False,
'error': f"Unknown prop '{prop_name}' for {component}. Did you mean '{similar}'?"
}
return {
'valid': False,
'error': f"Unknown prop '{prop_name}' for {component}"
}
prop_schema = props[prop_name]
return self._validate_value(prop_value, prop_schema, prop_name)
def _validate_value(
self,
value: Any,
schema: Dict[str, Any],
prop_name: str
) -> Dict[str, Any]:
"""
Validate a value against a prop schema.
Args:
value: Value to validate
schema: Prop schema from registry
prop_name: Prop name (for error messages)
Returns:
Dict with valid: bool, error: Optional[str]
"""
prop_type = schema.get('type', 'any')
# Any type always valid
if prop_type == 'any':
return {'valid': True}
# Check enum values
if 'enum' in schema:
if value not in schema['enum']:
return {
'valid': False,
'error': f"Prop '{prop_name}' expects one of {schema['enum']}, got '{value}'"
}
return {'valid': True}
# Type checking
type_checks = {
'string': lambda v: isinstance(v, str),
'number': lambda v: isinstance(v, (int, float)),
'integer': lambda v: isinstance(v, int),
'boolean': lambda v: isinstance(v, bool),
'array': lambda v: isinstance(v, list),
'object': lambda v: isinstance(v, dict),
}
checker = type_checks.get(prop_type)
if checker and not checker(value):
return {
'valid': False,
'error': f"Prop '{prop_name}' expects type '{prop_type}', got '{type(value).__name__}'"
}
return {'valid': True}
def _find_similar_props(
self,
prop_name: str,
available_props: List[str]
) -> Optional[str]:
"""
Find a similar prop name for typo suggestions.
Uses simple edit distance heuristic.
Args:
prop_name: The (possibly misspelled) prop name
available_props: List of valid prop names
Returns:
Most similar prop name, or None if no close match
"""
prop_lower = prop_name.lower()
for prop in available_props:
# Exact match after lowercase
if prop.lower() == prop_lower:
return prop
# Common typos: extra/missing letter
if abs(len(prop) - len(prop_name)) == 1:
if prop_lower.startswith(prop.lower()[:3]):
return prop
return None
def is_loaded(self) -> bool:
"""Check if registry is loaded."""
return len(self.components) > 0
def load_registry(dmc_version: Optional[str] = None) -> ComponentRegistry:
"""
Convenience function to load and return a component registry.
Args:
dmc_version: Optional DMC version string
Returns:
Loaded ComponentRegistry instance
"""
registry = ComponentRegistry(dmc_version)
registry.load()
return registry

View File

@@ -1,172 +0,0 @@
"""
Configuration loader for viz-platform MCP Server.
Implements hybrid configuration system:
- System-level: ~/.config/claude/viz-platform.env (theme preferences)
- Project-level: .env (DMC version overrides)
- Auto-detection: DMC package version from installed package
"""
from pathlib import Path
from dotenv import load_dotenv
import os
import logging
from typing import Dict, Optional
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class VizPlatformConfig:
"""Hybrid configuration loader for viz-platform tools"""
def __init__(self):
self.dmc_version: Optional[str] = None
self.theme_dir_user: Path = Path.home() / '.config' / 'claude' / 'themes'
self.theme_dir_project: Optional[Path] = None
self.default_theme: Optional[str] = None
def load(self) -> Dict[str, any]:
"""
Load configuration from system and project levels.
Returns:
Dict containing dmc_version, theme directories, and availability flags
"""
# Load system config
system_config = Path.home() / '.config' / 'claude' / 'viz-platform.env'
if system_config.exists():
load_dotenv(system_config)
logger.info(f"Loaded system configuration from {system_config}")
# Find project directory
project_dir = self._find_project_directory()
# Load project config (overrides system)
if project_dir:
project_config = project_dir / '.env'
if project_config.exists():
load_dotenv(project_config, override=True)
logger.info(f"Loaded project configuration from {project_config}")
# Set project theme directory
self.theme_dir_project = project_dir / '.viz-platform' / 'themes'
# Get DMC version (from env or auto-detect)
self.dmc_version = os.getenv('DMC_VERSION') or self._detect_dmc_version()
self.default_theme = os.getenv('VIZ_DEFAULT_THEME')
# Ensure user theme directory exists
self.theme_dir_user.mkdir(parents=True, exist_ok=True)
return {
'dmc_version': self.dmc_version,
'dmc_available': self.dmc_version is not None,
'theme_dir_user': str(self.theme_dir_user),
'theme_dir_project': str(self.theme_dir_project) if self.theme_dir_project else None,
'default_theme': self.default_theme,
'project_dir': str(project_dir) if project_dir else None
}
def _detect_dmc_version(self) -> Optional[str]:
"""
Auto-detect installed Dash Mantine Components version.
Returns:
Version string (e.g., "0.14.7") or None if not installed
"""
try:
from importlib.metadata import version
dmc_version = version('dash-mantine-components')
logger.info(f"Detected DMC version: {dmc_version}")
return dmc_version
except ImportError:
logger.warning("dash-mantine-components not installed - using registry fallback")
return None
except Exception as e:
logger.warning(f"Could not detect DMC version: {e}")
return None
def _find_project_directory(self) -> Optional[Path]:
"""
Find the user's project directory.
Returns:
Path to project directory, or None if not found
"""
# Strategy 1: Check CLAUDE_PROJECT_DIR environment variable
project_dir = os.getenv('CLAUDE_PROJECT_DIR')
if project_dir:
path = Path(project_dir)
if path.exists():
logger.info(f"Found project directory from CLAUDE_PROJECT_DIR: {path}")
return path
# Strategy 2: Check PWD
pwd = os.getenv('PWD')
if pwd:
path = Path(pwd)
if path.exists() and (
(path / '.git').exists() or
(path / '.env').exists() or
(path / '.viz-platform').exists()
):
logger.info(f"Found project directory from PWD: {path}")
return path
# Strategy 3: Check current working directory
cwd = Path.cwd()
if (cwd / '.git').exists() or (cwd / '.env').exists() or (cwd / '.viz-platform').exists():
logger.info(f"Found project directory from cwd: {cwd}")
return cwd
logger.debug("Could not determine project directory")
return None
def load_config() -> Dict[str, any]:
"""
Convenience function to load configuration.
Returns:
Configuration dictionary
"""
config = VizPlatformConfig()
return config.load()
def check_dmc_version() -> Dict[str, any]:
"""
Check DMC installation status for SessionStart hook.
Returns:
Dict with installation status and version info
"""
config = load_config()
if not config.get('dmc_available'):
return {
'installed': False,
'message': 'dash-mantine-components not installed. Run: pip install dash-mantine-components'
}
version = config.get('dmc_version', 'unknown')
# Check for registry compatibility
registry_path = Path(__file__).parent.parent / 'registry'
major_minor = '.'.join(version.split('.')[:2]) if version else None
registry_file = registry_path / f'dmc_{major_minor.replace(".", "_")}.json' if major_minor else None
if registry_file and registry_file.exists():
return {
'installed': True,
'version': version,
'registry_available': True,
'message': f'DMC {version} ready with component registry'
}
else:
return {
'installed': True,
'version': version,
'registry_available': False,
'message': f'DMC {version} installed but no matching registry. Validation may be limited.'
}

View File

@@ -1,306 +0,0 @@
"""
DMC (Dash Mantine Components) validation tools.
Provides component constraint layer to prevent Claude from hallucinating invalid props.
"""
import logging
from typing import Dict, List, Optional, Any
from .component_registry import ComponentRegistry
logger = logging.getLogger(__name__)
class DMCTools:
"""
DMC component validation tools.
These tools provide the "constraint layer" that validates component usage
against a version-locked registry of DMC components.
"""
def __init__(self, registry: Optional[ComponentRegistry] = None):
"""
Initialize DMC tools with component registry.
Args:
registry: ComponentRegistry instance. If None, creates one.
"""
self.registry = registry
self._initialized = False
def initialize(self, dmc_version: Optional[str] = None) -> bool:
"""
Initialize the registry if not already provided.
Args:
dmc_version: DMC version to load registry for
Returns:
True if initialized successfully
"""
if self.registry is None:
self.registry = ComponentRegistry(dmc_version)
if not self.registry.is_loaded():
self.registry.load()
self._initialized = self.registry.is_loaded()
return self._initialized
async def list_components(
self,
category: Optional[str] = None
) -> Dict[str, Any]:
"""
List available DMC components, optionally filtered by category.
Args:
category: Optional category filter (e.g., "inputs", "buttons", "navigation")
Returns:
Dict with:
- components: Dict[category -> [component names]]
- categories: List of available categories
- version: Loaded DMC registry version
- total_count: Total number of components
"""
if not self._initialized:
return {
"error": "Registry not initialized",
"components": {},
"categories": [],
"version": None,
"total_count": 0
}
components = self.registry.list_components(category)
all_categories = self.registry.get_categories()
# Count total components
total = sum(len(comps) for comps in components.values())
return {
"components": components,
"categories": all_categories if not category else [category],
"version": self.registry.loaded_version,
"total_count": total
}
async def get_component_props(self, component: str) -> Dict[str, Any]:
"""
Get props schema for a specific component.
Args:
component: Component name (e.g., "Button", "TextInput")
Returns:
Dict with:
- component: Component name
- description: Component description
- props: Dict of prop name -> {type, default, enum, description}
- prop_count: Number of props
- required: List of required prop names
Or error dict if component not found
"""
if not self._initialized:
return {
"error": "Registry not initialized",
"component": component,
"props": {},
"prop_count": 0
}
comp_def = self.registry.get_component(component)
if not comp_def:
# Try to suggest similar component name
similar = self._find_similar_component(component)
error_msg = f"Component '{component}' not found in registry"
if similar:
error_msg += f". Did you mean '{similar}'?"
return {
"error": error_msg,
"component": component,
"props": {},
"prop_count": 0
}
props = comp_def.get('props', {})
# Extract required props
required = [
name for name, schema in props.items()
if schema.get('required', False)
]
return {
"component": component,
"description": comp_def.get('description', ''),
"props": props,
"prop_count": len(props),
"required": required,
"version": self.registry.loaded_version
}
async def validate_component(
self,
component: str,
props: Dict[str, Any]
) -> Dict[str, Any]:
"""
Validate component props against registry.
Args:
component: Component name
props: Props dict to validate
Returns:
Dict with:
- valid: bool - True if all props are valid
- errors: List of error messages
- warnings: List of warning messages
- validated_props: Number of props validated
- component: Component name for reference
"""
if not self._initialized:
return {
"valid": False,
"errors": ["Registry not initialized"],
"warnings": [],
"validated_props": 0,
"component": component
}
errors: List[str] = []
warnings: List[str] = []
# Check if component exists
comp_def = self.registry.get_component(component)
if not comp_def:
similar = self._find_similar_component(component)
error_msg = f"Unknown component: {component}"
if similar:
error_msg += f". Did you mean '{similar}'?"
errors.append(error_msg)
return {
"valid": False,
"errors": errors,
"warnings": warnings,
"validated_props": 0,
"component": component
}
comp_props = comp_def.get('props', {})
# Check for required props
for prop_name, prop_schema in comp_props.items():
if prop_schema.get('required', False) and prop_name not in props:
errors.append(f"Missing required prop: '{prop_name}'")
# Validate each provided prop
for prop_name, prop_value in props.items():
# Skip special props that are always allowed
if prop_name in ('id', 'children', 'className', 'style', 'key'):
continue
result = self.registry.validate_prop(component, prop_name, prop_value)
if not result.get('valid', True):
error = result.get('error', f"Invalid prop: {prop_name}")
# Distinguish between typos/unknown props and type errors
if "Unknown prop" in error:
errors.append(f"{error}")
elif "expects one of" in error:
errors.append(f"{error}")
elif "expects type" in error:
warnings.append(f"⚠️ {error}")
else:
errors.append(f"{error}")
# Check for props that exist but might have common mistakes
self._check_common_mistakes(component, props, warnings)
return {
"valid": len(errors) == 0,
"errors": errors,
"warnings": warnings,
"validated_props": len(props),
"component": component,
"version": self.registry.loaded_version
}
def _find_similar_component(self, component: str) -> Optional[str]:
"""
Find a similar component name for suggestions.
Args:
component: The (possibly misspelled) component name
Returns:
Similar component name, or None if no close match
"""
if not self.registry:
return None
comp_lower = component.lower()
all_components = []
for comps in self.registry.categories.values():
all_components.extend(comps)
for comp in all_components:
# Exact match after lowercase
if comp.lower() == comp_lower:
return comp
# Check if it's a prefix match
if comp.lower().startswith(comp_lower) or comp_lower.startswith(comp.lower()):
return comp
# Check for common typos
if abs(len(comp) - len(component)) <= 2:
if comp_lower[:4] == comp.lower()[:4]:
return comp
return None
def _check_common_mistakes(
self,
component: str,
props: Dict[str, Any],
warnings: List[str]
) -> None:
"""
Check for common prop usage mistakes and add warnings.
Args:
component: Component name
props: Props being used
warnings: List to append warnings to
"""
# Common mistake: using 'onclick' instead of callback pattern
if 'onclick' in [p.lower() for p in props.keys()]:
warnings.append(
"⚠️ Dash uses callback patterns, not inline event handlers. "
"Use 'n_clicks' prop with a callback instead."
)
# Common mistake: using 'class' instead of 'className'
if 'class' in props:
warnings.append(
"⚠️ Use 'className' instead of 'class' for CSS classes."
)
# Button-specific checks
if component == 'Button':
if 'href' in props and 'component' not in props:
warnings.append(
"⚠️ Button with 'href' should also set 'component=\"a\"' for proper anchor behavior."
)
# Input-specific checks
if 'Input' in component:
if 'value' in props and 'onChange' in [p for p in props.keys()]:
warnings.append(
"⚠️ Dash uses 'value' prop with callbacks, not 'onChange'. "
"The value updates automatically through Dash callbacks."
)

View File

@@ -1,553 +0,0 @@
"""
Layout composition tools for dashboard building.
Provides tools for creating structured layouts with grids, filters, and sections.
"""
import logging
from typing import Dict, List, Optional, Any
from uuid import uuid4
logger = logging.getLogger(__name__)
# Standard responsive breakpoints (Mantine/Bootstrap-aligned)
DEFAULT_BREAKPOINTS = {
"xs": {
"min_width": "0px",
"max_width": "575px",
"cols": 1,
"spacing": "xs",
"description": "Extra small devices (phones, portrait)"
},
"sm": {
"min_width": "576px",
"max_width": "767px",
"cols": 2,
"spacing": "sm",
"description": "Small devices (phones, landscape)"
},
"md": {
"min_width": "768px",
"max_width": "991px",
"cols": 6,
"spacing": "md",
"description": "Medium devices (tablets)"
},
"lg": {
"min_width": "992px",
"max_width": "1199px",
"cols": 12,
"spacing": "md",
"description": "Large devices (desktops)"
},
"xl": {
"min_width": "1200px",
"max_width": None,
"cols": 12,
"spacing": "lg",
"description": "Extra large devices (large desktops)"
}
}
# Layout templates
TEMPLATES = {
"dashboard": {
"sections": ["header", "filters", "main", "footer"],
"default_grid": {"cols": 12, "spacing": "md"},
"description": "Standard dashboard with header, filters, main content, and footer"
},
"report": {
"sections": ["title", "summary", "content", "appendix"],
"default_grid": {"cols": 1, "spacing": "lg"},
"description": "Report layout with title, summary, and content sections"
},
"form": {
"sections": ["header", "fields", "actions"],
"default_grid": {"cols": 2, "spacing": "md"},
"description": "Form layout with header, fields, and action buttons"
},
"blank": {
"sections": ["main"],
"default_grid": {"cols": 12, "spacing": "md"},
"description": "Blank canvas for custom layouts"
}
}
# Filter type definitions
FILTER_TYPES = {
"dropdown": {
"component": "Select",
"props": ["label", "data", "placeholder", "clearable", "searchable", "value"]
},
"multi_select": {
"component": "MultiSelect",
"props": ["label", "data", "placeholder", "clearable", "searchable", "value"]
},
"date_range": {
"component": "DateRangePicker",
"props": ["label", "placeholder", "value", "minDate", "maxDate"]
},
"date": {
"component": "DatePicker",
"props": ["label", "placeholder", "value", "minDate", "maxDate"]
},
"search": {
"component": "TextInput",
"props": ["label", "placeholder", "value", "icon"]
},
"checkbox_group": {
"component": "CheckboxGroup",
"props": ["label", "children", "value"]
},
"radio_group": {
"component": "RadioGroup",
"props": ["label", "children", "value"]
},
"slider": {
"component": "Slider",
"props": ["label", "min", "max", "step", "value", "marks"]
},
"range_slider": {
"component": "RangeSlider",
"props": ["label", "min", "max", "step", "value", "marks"]
}
}
class LayoutTools:
"""
Dashboard layout composition tools.
Creates layouts that map to DMC Grid and AppShell components.
"""
def __init__(self):
"""Initialize layout tools."""
self._layouts: Dict[str, Dict[str, Any]] = {}
async def layout_create(
self,
name: str,
template: Optional[str] = None
) -> Dict[str, Any]:
"""
Create a new layout container.
Args:
name: Unique name for the layout
template: Optional template (dashboard, report, form, blank)
Returns:
Dict with:
- layout_ref: Reference to use in other tools
- template: Template used
- sections: Available sections
- grid: Default grid configuration
"""
# Validate template
template = template or "blank"
if template not in TEMPLATES:
return {
"error": f"Invalid template '{template}'. Must be one of: {list(TEMPLATES.keys())}",
"layout_ref": None
}
# Check for name collision
if name in self._layouts:
return {
"error": f"Layout '{name}' already exists. Use a different name or modify existing.",
"layout_ref": name
}
template_config = TEMPLATES[template]
# Create layout structure
layout = {
"id": str(uuid4()),
"name": name,
"template": template,
"sections": {section: {"items": []} for section in template_config["sections"]},
"grid": template_config["default_grid"].copy(),
"filters": [],
"metadata": {
"description": template_config["description"]
}
}
self._layouts[name] = layout
return {
"layout_ref": name,
"template": template,
"sections": template_config["sections"],
"grid": layout["grid"],
"description": template_config["description"]
}
async def layout_add_filter(
self,
layout_ref: str,
filter_type: str,
options: Dict[str, Any]
) -> Dict[str, Any]:
"""
Add a filter control to a layout.
Args:
layout_ref: Layout name to add filter to
filter_type: Type of filter (dropdown, date_range, search, checkbox_group, etc.)
options: Filter options (label, data for dropdown, placeholder, position)
Returns:
Dict with:
- filter_id: Unique ID for the filter
- component: DMC component that will be used
- props: Props that were set
- position: Where filter was placed
"""
# Validate layout exists
if layout_ref not in self._layouts:
return {
"error": f"Layout '{layout_ref}' not found. Create it first with layout_create.",
"filter_id": None
}
# Validate filter type
if filter_type not in FILTER_TYPES:
return {
"error": f"Invalid filter_type '{filter_type}'. Must be one of: {list(FILTER_TYPES.keys())}",
"filter_id": None
}
filter_config = FILTER_TYPES[filter_type]
layout = self._layouts[layout_ref]
# Generate filter ID
filter_id = f"filter_{filter_type}_{len(layout['filters'])}"
# Extract relevant props
props = {"id": filter_id}
for prop in filter_config["props"]:
if prop in options:
props[prop] = options[prop]
# Determine position
position = options.get("position", "filters")
if position not in layout["sections"]:
# Default to first available section
position = list(layout["sections"].keys())[0]
# Create filter definition
filter_def = {
"id": filter_id,
"type": filter_type,
"component": filter_config["component"],
"props": props,
"position": position
}
layout["filters"].append(filter_def)
layout["sections"][position]["items"].append({
"type": "filter",
"ref": filter_id
})
return {
"filter_id": filter_id,
"component": filter_config["component"],
"props": props,
"position": position,
"layout_ref": layout_ref
}
async def layout_set_grid(
self,
layout_ref: str,
grid: Dict[str, Any]
) -> Dict[str, Any]:
"""
Configure the grid system for a layout.
Args:
layout_ref: Layout name to configure
grid: Grid configuration:
- cols: Number of columns (default 12)
- spacing: Gap between items (xs, sm, md, lg, xl)
- breakpoints: Responsive breakpoints {xs: cols, sm: cols, ...}
- gutter: Gutter size
Returns:
Dict with:
- grid: Updated grid configuration
- layout_ref: Layout reference
"""
# Validate layout exists
if layout_ref not in self._layouts:
return {
"error": f"Layout '{layout_ref}' not found. Create it first with layout_create.",
"grid": None
}
layout = self._layouts[layout_ref]
# Validate spacing if provided
valid_spacing = ["xs", "sm", "md", "lg", "xl"]
if "spacing" in grid and grid["spacing"] not in valid_spacing:
return {
"error": f"Invalid spacing '{grid['spacing']}'. Must be one of: {valid_spacing}",
"grid": layout["grid"]
}
# Validate cols
if "cols" in grid:
cols = grid["cols"]
if not isinstance(cols, int) or cols < 1 or cols > 24:
return {
"error": f"Invalid cols '{cols}'. Must be integer between 1 and 24.",
"grid": layout["grid"]
}
# Update grid configuration
layout["grid"].update(grid)
# Process breakpoints if provided
if "breakpoints" in grid:
bp = grid["breakpoints"]
layout["grid"]["breakpoints"] = bp
return {
"grid": layout["grid"],
"layout_ref": layout_ref
}
async def layout_get(self, layout_ref: str) -> Dict[str, Any]:
"""
Get a layout's full configuration.
Args:
layout_ref: Layout name to retrieve
Returns:
Full layout configuration or error
"""
if layout_ref not in self._layouts:
return {
"error": f"Layout '{layout_ref}' not found.",
"layout": None
}
layout = self._layouts[layout_ref]
return {
"layout": layout,
"filter_count": len(layout["filters"]),
"sections": list(layout["sections"].keys())
}
async def layout_add_section(
self,
layout_ref: str,
section_name: str,
position: Optional[int] = None
) -> Dict[str, Any]:
"""
Add a custom section to a layout.
Args:
layout_ref: Layout name
section_name: Name for the new section
position: Optional position index (appends if not specified)
Returns:
Dict with sections list and the new section name
"""
if layout_ref not in self._layouts:
return {
"error": f"Layout '{layout_ref}' not found.",
"sections": []
}
layout = self._layouts[layout_ref]
if section_name in layout["sections"]:
return {
"error": f"Section '{section_name}' already exists.",
"sections": list(layout["sections"].keys())
}
# Add new section
layout["sections"][section_name] = {"items": []}
return {
"section_name": section_name,
"sections": list(layout["sections"].keys()),
"layout_ref": layout_ref
}
def get_available_templates(self) -> Dict[str, Any]:
"""Get list of available layout templates."""
return {
name: {
"sections": config["sections"],
"description": config["description"]
}
for name, config in TEMPLATES.items()
}
def get_available_filter_types(self) -> Dict[str, Any]:
"""Get list of available filter types."""
return {
name: {
"component": config["component"],
"props": config["props"]
}
for name, config in FILTER_TYPES.items()
}
async def layout_set_breakpoints(
self,
layout_ref: str,
breakpoints: Dict[str, Dict[str, Any]],
mobile_first: bool = True
) -> Dict[str, Any]:
"""
Configure responsive breakpoints for a layout.
Args:
layout_ref: Layout name to configure
breakpoints: Breakpoint configuration dict:
{
"xs": {"cols": 1, "spacing": "xs"},
"sm": {"cols": 2, "spacing": "sm"},
"md": {"cols": 6, "spacing": "md"},
"lg": {"cols": 12, "spacing": "md"},
"xl": {"cols": 12, "spacing": "lg"}
}
mobile_first: If True, use min-width media queries (default)
Returns:
Dict with:
- breakpoints: Complete breakpoint configuration
- css_media_queries: Generated CSS media queries
- mobile_first: Whether mobile-first approach is used
"""
# Validate layout exists
if layout_ref not in self._layouts:
return {
"error": f"Layout '{layout_ref}' not found. Create it first with layout_create.",
"breakpoints": None
}
layout = self._layouts[layout_ref]
# Validate breakpoint names
valid_breakpoints = ["xs", "sm", "md", "lg", "xl"]
for bp_name in breakpoints.keys():
if bp_name not in valid_breakpoints:
return {
"error": f"Invalid breakpoint '{bp_name}'. Must be one of: {valid_breakpoints}",
"breakpoints": layout.get("breakpoints")
}
# Merge with defaults
merged_breakpoints = {}
for bp_name in valid_breakpoints:
default = DEFAULT_BREAKPOINTS[bp_name].copy()
if bp_name in breakpoints:
default.update(breakpoints[bp_name])
merged_breakpoints[bp_name] = default
# Validate spacing values
valid_spacing = ["xs", "sm", "md", "lg", "xl"]
for bp_name, bp_config in merged_breakpoints.items():
if "spacing" in bp_config and bp_config["spacing"] not in valid_spacing:
return {
"error": f"Invalid spacing '{bp_config['spacing']}' for breakpoint '{bp_name}'. Must be one of: {valid_spacing}",
"breakpoints": layout.get("breakpoints")
}
# Validate column counts
for bp_name, bp_config in merged_breakpoints.items():
if "cols" in bp_config:
cols = bp_config["cols"]
if not isinstance(cols, int) or cols < 1 or cols > 24:
return {
"error": f"Invalid cols '{cols}' for breakpoint '{bp_name}'. Must be integer between 1 and 24.",
"breakpoints": layout.get("breakpoints")
}
# Generate CSS media queries
css_queries = self._generate_media_queries(merged_breakpoints, mobile_first)
# Store in layout
layout["breakpoints"] = merged_breakpoints
layout["mobile_first"] = mobile_first
layout["responsive_css"] = css_queries
return {
"layout_ref": layout_ref,
"breakpoints": merged_breakpoints,
"mobile_first": mobile_first,
"css_media_queries": css_queries
}
def _generate_media_queries(
self,
breakpoints: Dict[str, Dict[str, Any]],
mobile_first: bool
) -> List[str]:
"""Generate CSS media queries for breakpoints."""
queries = []
bp_order = ["xs", "sm", "md", "lg", "xl"]
if mobile_first:
# Use min-width queries (mobile-first)
for bp_name in bp_order[1:]: # Skip xs (base styles)
bp = breakpoints[bp_name]
min_width = bp.get("min_width", DEFAULT_BREAKPOINTS[bp_name]["min_width"])
if min_width and min_width != "0px":
queries.append(f"@media (min-width: {min_width}) {{ /* {bp_name} styles */ }}")
else:
# Use max-width queries (desktop-first)
for bp_name in reversed(bp_order[:-1]): # Skip xl (base styles)
bp = breakpoints[bp_name]
max_width = bp.get("max_width", DEFAULT_BREAKPOINTS[bp_name]["max_width"])
if max_width:
queries.append(f"@media (max-width: {max_width}) {{ /* {bp_name} styles */ }}")
return queries
async def layout_get_breakpoints(self, layout_ref: str) -> Dict[str, Any]:
"""
Get the breakpoint configuration for a layout.
Args:
layout_ref: Layout name
Returns:
Dict with breakpoint configuration
"""
if layout_ref not in self._layouts:
return {
"error": f"Layout '{layout_ref}' not found.",
"breakpoints": None
}
layout = self._layouts[layout_ref]
return {
"layout_ref": layout_ref,
"breakpoints": layout.get("breakpoints", DEFAULT_BREAKPOINTS.copy()),
"mobile_first": layout.get("mobile_first", True),
"css_media_queries": layout.get("responsive_css", [])
}
def get_default_breakpoints(self) -> Dict[str, Any]:
"""Get the default breakpoint configuration."""
return {
"breakpoints": DEFAULT_BREAKPOINTS.copy(),
"description": "Standard responsive breakpoints aligned with Mantine/Bootstrap",
"mobile_first": True
}

View File

@@ -1,366 +0,0 @@
"""
Multi-page app tools for viz-platform.
Provides tools for building complete Dash applications with routing and navigation.
"""
import logging
from typing import Dict, List, Optional, Any
from uuid import uuid4
logger = logging.getLogger(__name__)
# Navigation position options
NAV_POSITIONS = ["top", "left", "right"]
# Auth types supported
AUTH_TYPES = ["none", "basic", "oauth", "custom"]
class PageTools:
"""
Multi-page Dash application tools.
Creates page definitions, navigation, and auth configuration.
"""
def __init__(self):
"""Initialize page tools."""
self._pages: Dict[str, Dict[str, Any]] = {}
self._navbars: Dict[str, Dict[str, Any]] = {}
self._app_config: Dict[str, Any] = {
"title": "Dash App",
"suppress_callback_exceptions": True
}
async def page_create(
self,
name: str,
path: str,
layout_ref: Optional[str] = None,
title: Optional[str] = None
) -> Dict[str, Any]:
"""
Create a new page definition.
Args:
name: Unique page name (used as identifier)
path: URL path for the page (e.g., "/", "/settings")
layout_ref: Optional layout reference to use for the page
title: Optional page title (defaults to name)
Returns:
Dict with:
- page_ref: Reference to use in other tools
- path: URL path
- registered: Whether page was registered
"""
# Validate path format
if not path.startswith('/'):
return {
"error": f"Path must start with '/'. Got: {path}",
"page_ref": None
}
# Check for name collision
if name in self._pages:
return {
"error": f"Page '{name}' already exists. Use a different name.",
"page_ref": name
}
# Check for path collision
for page_name, page_data in self._pages.items():
if page_data['path'] == path:
return {
"error": f"Path '{path}' already used by page '{page_name}'.",
"page_ref": None
}
# Create page definition
page = {
"id": str(uuid4()),
"name": name,
"path": path,
"title": title or name,
"layout_ref": layout_ref,
"auth": None,
"metadata": {}
}
self._pages[name] = page
return {
"page_ref": name,
"path": path,
"title": page["title"],
"layout_ref": layout_ref,
"registered": True
}
async def page_add_navbar(
self,
pages: List[str],
options: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""
Generate a navigation component linking pages.
Args:
pages: List of page names to include in navigation
options: Navigation options:
- position: "top", "left", or "right"
- style: Style variant
- brand: Brand/logo text or config
- collapsible: Whether to collapse on mobile
Returns:
Dict with:
- navbar_id: Navigation ID
- pages: List of page links generated
- component: DMC component structure
"""
options = options or {}
# Validate pages exist
missing_pages = [p for p in pages if p not in self._pages]
if missing_pages:
return {
"error": f"Pages not found: {missing_pages}. Create them first.",
"navbar_id": None
}
# Validate position
position = options.get("position", "top")
if position not in NAV_POSITIONS:
return {
"error": f"Invalid position '{position}'. Must be one of: {NAV_POSITIONS}",
"navbar_id": None
}
# Generate navbar ID
navbar_id = f"navbar_{len(self._navbars)}"
# Build page links
page_links = []
for page_name in pages:
page = self._pages[page_name]
page_links.append({
"label": page["title"],
"href": page["path"],
"page_ref": page_name
})
# Build DMC component structure
if position == "top":
component = self._build_top_navbar(page_links, options)
else:
component = self._build_side_navbar(page_links, options, position)
# Store navbar config
self._navbars[navbar_id] = {
"id": navbar_id,
"position": position,
"pages": pages,
"options": options,
"component": component
}
return {
"navbar_id": navbar_id,
"position": position,
"pages": page_links,
"component": component
}
async def page_set_auth(
self,
page_ref: str,
auth_config: Dict[str, Any]
) -> Dict[str, Any]:
"""
Configure authentication for a page.
Args:
page_ref: Page name to configure
auth_config: Authentication configuration:
- type: "none", "basic", "oauth", "custom"
- required: Whether auth is required (default True)
- roles: List of required roles (optional)
- redirect: Redirect path for unauthenticated users
Returns:
Dict with:
- page_ref: Page reference
- auth_type: Type of auth configured
- protected: Whether page is protected
"""
# Validate page exists
if page_ref not in self._pages:
available = list(self._pages.keys())
return {
"error": f"Page '{page_ref}' not found. Available: {available}",
"page_ref": page_ref
}
# Validate auth type
auth_type = auth_config.get("type", "basic")
if auth_type not in AUTH_TYPES:
return {
"error": f"Invalid auth type '{auth_type}'. Must be one of: {AUTH_TYPES}",
"page_ref": page_ref
}
# Build auth config
auth = {
"type": auth_type,
"required": auth_config.get("required", True),
"roles": auth_config.get("roles", []),
"redirect": auth_config.get("redirect", "/login")
}
# Handle OAuth-specific config
if auth_type == "oauth":
auth["provider"] = auth_config.get("provider", "generic")
auth["scopes"] = auth_config.get("scopes", [])
# Update page
self._pages[page_ref]["auth"] = auth
return {
"page_ref": page_ref,
"auth_type": auth_type,
"protected": auth["required"],
"roles": auth["roles"],
"redirect": auth["redirect"]
}
async def page_list(self) -> Dict[str, Any]:
"""
List all registered pages.
Returns:
Dict with pages and their configurations
"""
pages_info = {}
for name, page in self._pages.items():
pages_info[name] = {
"path": page["path"],
"title": page["title"],
"layout_ref": page["layout_ref"],
"protected": page["auth"] is not None and page["auth"].get("required", False)
}
return {
"pages": pages_info,
"count": len(pages_info),
"navbars": list(self._navbars.keys())
}
async def page_get_app_config(self) -> Dict[str, Any]:
"""
Get the complete app configuration for Dash.
Returns:
Dict with app config including pages, navbars, and settings
"""
# Build pages config
pages_config = []
for name, page in self._pages.items():
pages_config.append({
"name": name,
"path": page["path"],
"title": page["title"],
"layout_ref": page["layout_ref"]
})
# Build routing config
routes = {page["path"]: name for name, page in self._pages.items()}
return {
"app": self._app_config,
"pages": pages_config,
"routes": routes,
"navbars": list(self._navbars.values()),
"page_count": len(self._pages)
}
def _build_top_navbar(
self,
page_links: List[Dict[str, str]],
options: Dict[str, Any]
) -> Dict[str, Any]:
"""Build a top navigation bar component."""
brand = options.get("brand", "App")
# Build nav links
nav_items = []
for link in page_links:
nav_items.append({
"component": "NavLink",
"props": {
"label": link["label"],
"href": link["href"]
}
})
return {
"component": "AppShell.Header",
"children": [
{
"component": "Group",
"props": {"justify": "space-between", "h": "100%", "px": "md"},
"children": [
{
"component": "Text",
"props": {"size": "lg", "fw": 700},
"children": brand
},
{
"component": "Group",
"props": {"gap": "sm"},
"children": nav_items
}
]
}
]
}
def _build_side_navbar(
self,
page_links: List[Dict[str, str]],
options: Dict[str, Any],
position: str
) -> Dict[str, Any]:
"""Build a side navigation bar component."""
brand = options.get("brand", "App")
# Build nav links
nav_items = []
for link in page_links:
nav_items.append({
"component": "NavLink",
"props": {
"label": link["label"],
"href": link["href"]
}
})
navbar_component = "AppShell.Navbar" if position == "left" else "AppShell.Aside"
return {
"component": navbar_component,
"props": {"p": "md"},
"children": [
{
"component": "Text",
"props": {"size": "lg", "fw": 700, "mb": "md"},
"children": brand
},
{
"component": "Stack",
"props": {"gap": "xs"},
"children": nav_items
}
]
}

View File

@@ -1,928 +0,0 @@
"""
MCP Server entry point for viz-platform integration.
Provides Dash Mantine Components validation, charting, layout, theming, and page tools
to Claude Code via JSON-RPC 2.0 over stdio.
"""
import asyncio
import logging
import json
from mcp.server import Server
from mcp.server.stdio import stdio_server
from mcp.types import Tool, TextContent
from .config import VizPlatformConfig
from .dmc_tools import DMCTools
from .chart_tools import ChartTools
from .layout_tools import LayoutTools
from .theme_tools import ThemeTools
from .page_tools import PageTools
from .accessibility_tools import AccessibilityTools
# Suppress noisy MCP validation warnings on stderr
logging.basicConfig(level=logging.INFO)
logging.getLogger("root").setLevel(logging.ERROR)
logging.getLogger("mcp").setLevel(logging.ERROR)
logger = logging.getLogger(__name__)
class VizPlatformMCPServer:
"""MCP Server for visualization platform integration"""
def __init__(self):
self.server = Server("viz-platform-mcp")
self.config = None
self.dmc_tools = DMCTools()
self.chart_tools = ChartTools()
self.layout_tools = LayoutTools()
self.theme_tools = ThemeTools()
self.page_tools = PageTools()
self.accessibility_tools = AccessibilityTools(theme_store=self.theme_tools.store)
async def initialize(self):
"""Initialize server and load configuration."""
try:
config_loader = VizPlatformConfig()
self.config = config_loader.load()
# Initialize DMC tools with detected version
dmc_version = self.config.get('dmc_version')
self.dmc_tools.initialize(dmc_version)
# Log available capabilities
caps = []
if self.config.get('dmc_available'):
caps.append(f"DMC {dmc_version}")
if self.dmc_tools._initialized:
caps.append(f"Registry loaded ({self.dmc_tools.registry.loaded_version})")
else:
caps.append("DMC (not installed)")
logger.info(f"viz-platform MCP Server initialized with: {', '.join(caps)}")
except Exception as e:
logger.error(f"Failed to initialize: {e}")
raise
def setup_tools(self):
"""Register all available tools with the MCP server"""
@self.server.list_tools()
async def list_tools() -> list[Tool]:
"""Return list of available tools"""
tools = []
# DMC validation tools (Issue #172)
tools.append(Tool(
name="list_components",
description=(
"List available Dash Mantine Components. "
"Returns components grouped by category with version info. "
"Use this to discover what components are available before building UI."
),
inputSchema={
"type": "object",
"properties": {
"category": {
"type": "string",
"description": (
"Optional category filter. Available categories: "
"buttons, inputs, navigation, feedback, overlays, "
"typography, layout, data_display, charts, dates"
)
}
},
"required": []
}
))
tools.append(Tool(
name="get_component_props",
description=(
"Get the props schema for a specific DMC component. "
"Returns all available props with types, defaults, and allowed values. "
"ALWAYS use this before creating a component to ensure valid props."
),
inputSchema={
"type": "object",
"properties": {
"component": {
"type": "string",
"description": "Component name (e.g., 'Button', 'TextInput', 'Select')"
}
},
"required": ["component"]
}
))
tools.append(Tool(
name="validate_component",
description=(
"Validate component props before use. "
"Checks for invalid props, type mismatches, and common mistakes. "
"Returns errors and warnings with suggestions for fixes."
),
inputSchema={
"type": "object",
"properties": {
"component": {
"type": "string",
"description": "Component name to validate"
},
"props": {
"type": "object",
"description": "Props object to validate"
}
},
"required": ["component", "props"]
}
))
# Chart tools (Issue #173)
tools.append(Tool(
name="chart_create",
description=(
"Create a Plotly chart for data visualization. "
"Supports line, bar, scatter, pie, heatmap, histogram, and area charts. "
"Automatically applies theme colors when a theme is active."
),
inputSchema={
"type": "object",
"properties": {
"chart_type": {
"type": "string",
"enum": ["line", "bar", "scatter", "pie", "heatmap", "histogram", "area"],
"description": "Type of chart to create"
},
"data": {
"type": "object",
"description": (
"Data for the chart. For most charts: {x: [], y: []}. "
"For pie: {labels: [], values: []}. "
"For heatmap: {x: [], y: [], z: [[]]}"
)
},
"options": {
"type": "object",
"description": (
"Optional settings: title, x_label, y_label, color, "
"showlegend, height, width, horizontal (for bar)"
)
}
},
"required": ["chart_type", "data"]
}
))
tools.append(Tool(
name="chart_configure_interaction",
description=(
"Configure interactions on an existing chart. "
"Add hover templates, enable click data capture, selection modes, "
"and zoom behavior for Dash callback integration."
),
inputSchema={
"type": "object",
"properties": {
"figure": {
"type": "object",
"description": "Plotly figure JSON to modify"
},
"interactions": {
"type": "object",
"description": (
"Interaction config: hover_template (string), "
"click_data (bool), selection ('box'|'lasso'), zoom (bool)"
)
}
},
"required": ["figure", "interactions"]
}
))
# Chart export tool (Issue #247)
tools.append(Tool(
name="chart_export",
description=(
"Export a Plotly chart to static image format (PNG, SVG, PDF). "
"Requires kaleido package. Returns base64 image data or saves to file."
),
inputSchema={
"type": "object",
"properties": {
"figure": {
"type": "object",
"description": "Plotly figure JSON to export"
},
"format": {
"type": "string",
"enum": ["png", "svg", "pdf"],
"description": "Output format (default: png)"
},
"width": {
"type": "integer",
"description": "Image width in pixels (default: 1200)"
},
"height": {
"type": "integer",
"description": "Image height in pixels (default: 800)"
},
"scale": {
"type": "number",
"description": "Resolution scale factor (default: 2 for retina)"
},
"output_path": {
"type": "string",
"description": "Optional file path to save image"
}
},
"required": ["figure"]
}
))
# Layout tools (Issue #174)
tools.append(Tool(
name="layout_create",
description=(
"Create a new dashboard layout container. "
"Templates available: dashboard, report, form, blank. "
"Returns layout reference for use with other layout tools."
),
inputSchema={
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Unique name for the layout"
},
"template": {
"type": "string",
"enum": ["dashboard", "report", "form", "blank"],
"description": "Layout template to use (default: blank)"
}
},
"required": ["name"]
}
))
tools.append(Tool(
name="layout_add_filter",
description=(
"Add a filter control to a layout. "
"Filter types: dropdown, multi_select, date_range, date, search, "
"checkbox_group, radio_group, slider, range_slider."
),
inputSchema={
"type": "object",
"properties": {
"layout_ref": {
"type": "string",
"description": "Layout name to add filter to"
},
"filter_type": {
"type": "string",
"enum": ["dropdown", "multi_select", "date_range", "date",
"search", "checkbox_group", "radio_group", "slider", "range_slider"],
"description": "Type of filter control"
},
"options": {
"type": "object",
"description": (
"Filter options: label, data (for dropdown), placeholder, "
"position (section name), value, etc."
)
}
},
"required": ["layout_ref", "filter_type", "options"]
}
))
tools.append(Tool(
name="layout_set_grid",
description=(
"Configure the grid system for a layout. "
"Uses DMC Grid component patterns with 12 or 24 column system."
),
inputSchema={
"type": "object",
"properties": {
"layout_ref": {
"type": "string",
"description": "Layout name to configure"
},
"grid": {
"type": "object",
"description": (
"Grid config: cols (1-24), spacing (xs|sm|md|lg|xl), "
"breakpoints ({xs: cols, sm: cols, ...}), gutter"
)
}
},
"required": ["layout_ref", "grid"]
}
))
# Responsive breakpoints tool (Issue #249)
tools.append(Tool(
name="layout_set_breakpoints",
description=(
"Configure responsive breakpoints for a layout. "
"Supports xs, sm, md, lg, xl breakpoints with mobile-first approach. "
"Each breakpoint can define cols, spacing, and other grid properties."
),
inputSchema={
"type": "object",
"properties": {
"layout_ref": {
"type": "string",
"description": "Layout name to configure"
},
"breakpoints": {
"type": "object",
"description": (
"Breakpoint config: {xs: {cols, spacing}, sm: {...}, md: {...}, lg: {...}, xl: {...}}"
)
},
"mobile_first": {
"type": "boolean",
"description": "Use mobile-first (min-width) media queries (default: true)"
}
},
"required": ["layout_ref", "breakpoints"]
}
))
# Theme tools (Issue #175)
tools.append(Tool(
name="theme_create",
description=(
"Create a new design theme with tokens. "
"Tokens include colors, spacing, typography, radii. "
"Missing tokens are filled from defaults."
),
inputSchema={
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Unique theme name"
},
"tokens": {
"type": "object",
"description": (
"Design tokens: colors (primary, background, text), "
"spacing (xs-xl), typography (fontFamily, fontSize), radii (sm-xl)"
)
}
},
"required": ["name", "tokens"]
}
))
tools.append(Tool(
name="theme_extend",
description=(
"Create a new theme by extending an existing one. "
"Only specify the tokens you want to override."
),
inputSchema={
"type": "object",
"properties": {
"base_theme": {
"type": "string",
"description": "Theme to extend (e.g., 'default')"
},
"overrides": {
"type": "object",
"description": "Token overrides to apply"
},
"new_name": {
"type": "string",
"description": "Name for the new theme (optional)"
}
},
"required": ["base_theme", "overrides"]
}
))
tools.append(Tool(
name="theme_validate",
description=(
"Validate a theme for completeness. "
"Checks for required tokens and common issues."
),
inputSchema={
"type": "object",
"properties": {
"theme_name": {
"type": "string",
"description": "Theme to validate"
}
},
"required": ["theme_name"]
}
))
tools.append(Tool(
name="theme_export_css",
description=(
"Export a theme as CSS custom properties. "
"Generates :root CSS variables for all tokens."
),
inputSchema={
"type": "object",
"properties": {
"theme_name": {
"type": "string",
"description": "Theme to export"
}
},
"required": ["theme_name"]
}
))
# Page tools (Issue #176)
tools.append(Tool(
name="page_create",
description=(
"Create a new page for a multi-page Dash application. "
"Defines page routing and can link to a layout."
),
inputSchema={
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Unique page name (identifier)"
},
"path": {
"type": "string",
"description": "URL path (e.g., '/', '/settings')"
},
"layout_ref": {
"type": "string",
"description": "Optional layout reference to use"
},
"title": {
"type": "string",
"description": "Page title (defaults to name)"
}
},
"required": ["name", "path"]
}
))
tools.append(Tool(
name="page_add_navbar",
description=(
"Generate navigation component linking pages. "
"Creates top or side navigation with DMC components."
),
inputSchema={
"type": "object",
"properties": {
"pages": {
"type": "array",
"items": {"type": "string"},
"description": "List of page names to include"
},
"options": {
"type": "object",
"description": (
"Navigation options: position (top|left|right), "
"brand (app name), collapsible (bool)"
)
}
},
"required": ["pages"]
}
))
tools.append(Tool(
name="page_set_auth",
description=(
"Configure authentication for a page. "
"Sets auth requirements, roles, and redirect behavior."
),
inputSchema={
"type": "object",
"properties": {
"page_ref": {
"type": "string",
"description": "Page name to configure"
},
"auth_config": {
"type": "object",
"description": (
"Auth config: type (none|basic|oauth|custom), "
"required (bool), roles (array), redirect (path)"
)
}
},
"required": ["page_ref", "auth_config"]
}
))
# Accessibility tools (Issue #248)
tools.append(Tool(
name="accessibility_validate_colors",
description=(
"Validate colors for color blind accessibility. "
"Checks contrast ratios for deuteranopia, protanopia, tritanopia. "
"Returns issues, simulations, and accessible palette suggestions."
),
inputSchema={
"type": "object",
"properties": {
"colors": {
"type": "array",
"items": {"type": "string"},
"description": "List of hex colors to validate (e.g., ['#228be6', '#40c057'])"
},
"check_types": {
"type": "array",
"items": {"type": "string"},
"description": "Color blindness types to check: deuteranopia, protanopia, tritanopia (default: all)"
},
"min_contrast_ratio": {
"type": "number",
"description": "Minimum WCAG contrast ratio (default: 4.5 for AA)"
}
},
"required": ["colors"]
}
))
tools.append(Tool(
name="accessibility_validate_theme",
description=(
"Validate a theme's colors for accessibility. "
"Extracts all colors from theme tokens and checks for color blind safety."
),
inputSchema={
"type": "object",
"properties": {
"theme_name": {
"type": "string",
"description": "Theme name to validate"
}
},
"required": ["theme_name"]
}
))
tools.append(Tool(
name="accessibility_suggest_alternative",
description=(
"Suggest accessible alternative colors for a given color. "
"Provides alternatives optimized for specific color blindness types."
),
inputSchema={
"type": "object",
"properties": {
"color": {
"type": "string",
"description": "Hex color to find alternatives for"
},
"deficiency_type": {
"type": "string",
"enum": ["deuteranopia", "protanopia", "tritanopia"],
"description": "Color blindness type to optimize for"
}
},
"required": ["color", "deficiency_type"]
}
))
return tools
@self.server.call_tool()
async def call_tool(name: str, arguments: dict) -> list[TextContent]:
"""Handle tool invocation."""
try:
# DMC validation tools
if name == "list_components":
result = await self.dmc_tools.list_components(
category=arguments.get('category')
)
return [TextContent(
type="text",
text=json.dumps(result, indent=2)
)]
elif name == "get_component_props":
component = arguments.get('component')
if not component:
return [TextContent(
type="text",
text=json.dumps({"error": "component is required"}, indent=2)
)]
result = await self.dmc_tools.get_component_props(component)
return [TextContent(
type="text",
text=json.dumps(result, indent=2)
)]
elif name == "validate_component":
component = arguments.get('component')
props = arguments.get('props', {})
if not component:
return [TextContent(
type="text",
text=json.dumps({"error": "component is required"}, indent=2)
)]
result = await self.dmc_tools.validate_component(component, props)
return [TextContent(
type="text",
text=json.dumps(result, indent=2)
)]
# Chart tools
elif name == "chart_create":
chart_type = arguments.get('chart_type')
data = arguments.get('data', {})
options = arguments.get('options', {})
if not chart_type:
return [TextContent(
type="text",
text=json.dumps({"error": "chart_type is required"}, indent=2)
)]
result = await self.chart_tools.chart_create(chart_type, data, options)
return [TextContent(
type="text",
text=json.dumps(result, indent=2)
)]
elif name == "chart_configure_interaction":
figure = arguments.get('figure')
interactions = arguments.get('interactions', {})
if not figure:
return [TextContent(
type="text",
text=json.dumps({"error": "figure is required"}, indent=2)
)]
result = await self.chart_tools.chart_configure_interaction(figure, interactions)
return [TextContent(
type="text",
text=json.dumps(result, indent=2)
)]
elif name == "chart_export":
figure = arguments.get('figure')
if not figure:
return [TextContent(
type="text",
text=json.dumps({"error": "figure is required"}, indent=2)
)]
result = await self.chart_tools.chart_export(
figure=figure,
format=arguments.get('format', 'png'),
width=arguments.get('width'),
height=arguments.get('height'),
scale=arguments.get('scale', 2.0),
output_path=arguments.get('output_path')
)
return [TextContent(
type="text",
text=json.dumps(result, indent=2)
)]
# Layout tools
elif name == "layout_create":
layout_name = arguments.get('name')
template = arguments.get('template')
if not layout_name:
return [TextContent(
type="text",
text=json.dumps({"error": "name is required"}, indent=2)
)]
result = await self.layout_tools.layout_create(layout_name, template)
return [TextContent(
type="text",
text=json.dumps(result, indent=2)
)]
elif name == "layout_add_filter":
layout_ref = arguments.get('layout_ref')
filter_type = arguments.get('filter_type')
options = arguments.get('options', {})
if not layout_ref or not filter_type:
return [TextContent(
type="text",
text=json.dumps({"error": "layout_ref and filter_type are required"}, indent=2)
)]
result = await self.layout_tools.layout_add_filter(layout_ref, filter_type, options)
return [TextContent(
type="text",
text=json.dumps(result, indent=2)
)]
elif name == "layout_set_grid":
layout_ref = arguments.get('layout_ref')
grid = arguments.get('grid', {})
if not layout_ref:
return [TextContent(
type="text",
text=json.dumps({"error": "layout_ref is required"}, indent=2)
)]
result = await self.layout_tools.layout_set_grid(layout_ref, grid)
return [TextContent(
type="text",
text=json.dumps(result, indent=2)
)]
elif name == "layout_set_breakpoints":
layout_ref = arguments.get('layout_ref')
breakpoints = arguments.get('breakpoints', {})
mobile_first = arguments.get('mobile_first', True)
if not layout_ref:
return [TextContent(
type="text",
text=json.dumps({"error": "layout_ref is required"}, indent=2)
)]
result = await self.layout_tools.layout_set_breakpoints(
layout_ref, breakpoints, mobile_first
)
return [TextContent(
type="text",
text=json.dumps(result, indent=2)
)]
# Theme tools
elif name == "theme_create":
theme_name = arguments.get('name')
tokens = arguments.get('tokens', {})
if not theme_name:
return [TextContent(
type="text",
text=json.dumps({"error": "name is required"}, indent=2)
)]
result = await self.theme_tools.theme_create(theme_name, tokens)
return [TextContent(
type="text",
text=json.dumps(result, indent=2)
)]
elif name == "theme_extend":
base_theme = arguments.get('base_theme')
overrides = arguments.get('overrides', {})
new_name = arguments.get('new_name')
if not base_theme:
return [TextContent(
type="text",
text=json.dumps({"error": "base_theme is required"}, indent=2)
)]
result = await self.theme_tools.theme_extend(base_theme, overrides, new_name)
return [TextContent(
type="text",
text=json.dumps(result, indent=2)
)]
elif name == "theme_validate":
theme_name = arguments.get('theme_name')
if not theme_name:
return [TextContent(
type="text",
text=json.dumps({"error": "theme_name is required"}, indent=2)
)]
result = await self.theme_tools.theme_validate(theme_name)
return [TextContent(
type="text",
text=json.dumps(result, indent=2)
)]
elif name == "theme_export_css":
theme_name = arguments.get('theme_name')
if not theme_name:
return [TextContent(
type="text",
text=json.dumps({"error": "theme_name is required"}, indent=2)
)]
result = await self.theme_tools.theme_export_css(theme_name)
return [TextContent(
type="text",
text=json.dumps(result, indent=2)
)]
# Page tools
elif name == "page_create":
page_name = arguments.get('name')
path = arguments.get('path')
layout_ref = arguments.get('layout_ref')
title = arguments.get('title')
if not page_name or not path:
return [TextContent(
type="text",
text=json.dumps({"error": "name and path are required"}, indent=2)
)]
result = await self.page_tools.page_create(page_name, path, layout_ref, title)
return [TextContent(
type="text",
text=json.dumps(result, indent=2)
)]
elif name == "page_add_navbar":
pages = arguments.get('pages', [])
options = arguments.get('options', {})
if not pages:
return [TextContent(
type="text",
text=json.dumps({"error": "pages list is required"}, indent=2)
)]
result = await self.page_tools.page_add_navbar(pages, options)
return [TextContent(
type="text",
text=json.dumps(result, indent=2)
)]
elif name == "page_set_auth":
page_ref = arguments.get('page_ref')
auth_config = arguments.get('auth_config', {})
if not page_ref:
return [TextContent(
type="text",
text=json.dumps({"error": "page_ref is required"}, indent=2)
)]
result = await self.page_tools.page_set_auth(page_ref, auth_config)
return [TextContent(
type="text",
text=json.dumps(result, indent=2)
)]
# Accessibility tools
elif name == "accessibility_validate_colors":
colors = arguments.get('colors')
if not colors:
return [TextContent(
type="text",
text=json.dumps({"error": "colors list is required"}, indent=2)
)]
result = await self.accessibility_tools.accessibility_validate_colors(
colors=colors,
check_types=arguments.get('check_types'),
min_contrast_ratio=arguments.get('min_contrast_ratio', 4.5)
)
return [TextContent(
type="text",
text=json.dumps(result, indent=2)
)]
elif name == "accessibility_validate_theme":
theme_name = arguments.get('theme_name')
if not theme_name:
return [TextContent(
type="text",
text=json.dumps({"error": "theme_name is required"}, indent=2)
)]
result = await self.accessibility_tools.accessibility_validate_theme(theme_name)
return [TextContent(
type="text",
text=json.dumps(result, indent=2)
)]
elif name == "accessibility_suggest_alternative":
color = arguments.get('color')
deficiency_type = arguments.get('deficiency_type')
if not color or not deficiency_type:
return [TextContent(
type="text",
text=json.dumps({"error": "color and deficiency_type are required"}, indent=2)
)]
result = await self.accessibility_tools.accessibility_suggest_alternative(
color, deficiency_type
)
return [TextContent(
type="text",
text=json.dumps(result, indent=2)
)]
raise ValueError(f"Unknown tool: {name}")
except Exception as e:
logger.error(f"Tool {name} failed: {e}")
return [TextContent(
type="text",
text=json.dumps({"error": str(e)}, indent=2)
)]
async def run(self):
"""Run the MCP server"""
await self.initialize()
self.setup_tools()
async with stdio_server() as (read_stream, write_stream):
await self.server.run(
read_stream,
write_stream,
self.server.create_initialization_options()
)
async def main():
"""Main entry point"""
server = VizPlatformMCPServer()
await server.run()
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -1,259 +0,0 @@
"""
Theme storage and persistence for viz-platform.
Handles saving/loading themes from user and project locations.
"""
import json
import logging
from pathlib import Path
from typing import Dict, List, Optional, Any
logger = logging.getLogger(__name__)
# Default theme based on Mantine defaults
DEFAULT_THEME = {
"name": "default",
"version": "1.0.0",
"tokens": {
"colors": {
"primary": "#228be6",
"secondary": "#868e96",
"success": "#40c057",
"warning": "#fab005",
"error": "#fa5252",
"info": "#15aabf",
"background": {
"base": "#ffffff",
"subtle": "#f8f9fa",
"dark": "#212529"
},
"text": {
"primary": "#212529",
"secondary": "#495057",
"muted": "#868e96",
"inverse": "#ffffff"
},
"border": "#dee2e6"
},
"spacing": {
"xs": "4px",
"sm": "8px",
"md": "16px",
"lg": "24px",
"xl": "32px"
},
"typography": {
"fontFamily": "Inter, -apple-system, BlinkMacSystemFont, Segoe UI, Roboto, sans-serif",
"fontFamilyMono": "ui-monospace, SFMono-Regular, Menlo, Monaco, monospace",
"fontSize": {
"xs": "12px",
"sm": "14px",
"md": "16px",
"lg": "18px",
"xl": "20px"
},
"fontWeight": {
"normal": 400,
"medium": 500,
"semibold": 600,
"bold": 700
},
"lineHeight": {
"tight": 1.25,
"normal": 1.5,
"relaxed": 1.75
}
},
"radii": {
"none": "0px",
"sm": "4px",
"md": "8px",
"lg": "16px",
"xl": "24px",
"full": "9999px"
},
"shadows": {
"none": "none",
"sm": "0 1px 2px 0 rgb(0 0 0 / 0.05)",
"md": "0 4px 6px -1px rgb(0 0 0 / 0.1)",
"lg": "0 10px 15px -3px rgb(0 0 0 / 0.1)",
"xl": "0 20px 25px -5px rgb(0 0 0 / 0.1)"
},
"transitions": {
"fast": "150ms",
"normal": "300ms",
"slow": "500ms"
}
}
}
# Required token categories for validation
REQUIRED_TOKEN_CATEGORIES = ["colors", "spacing", "typography", "radii"]
class ThemeStore:
"""
Store and manage design themes.
Handles persistence to user-level and project-level locations.
"""
def __init__(self, project_dir: Optional[Path] = None):
"""
Initialize theme store.
Args:
project_dir: Project directory for project-level themes
"""
self.project_dir = project_dir
self._themes: Dict[str, Dict[str, Any]] = {}
self._active_theme: Optional[str] = None
# Load default theme
self._themes["default"] = DEFAULT_THEME.copy()
@property
def user_themes_dir(self) -> Path:
"""User-level themes directory."""
return Path.home() / ".config" / "claude" / "themes"
@property
def project_themes_dir(self) -> Optional[Path]:
"""Project-level themes directory."""
if self.project_dir:
return self.project_dir / ".viz-platform" / "themes"
return None
def load_themes(self) -> int:
"""
Load themes from user and project directories.
Project themes take precedence over user themes.
Returns:
Number of themes loaded
"""
count = 0
# Load user themes
if self.user_themes_dir.exists():
for theme_file in self.user_themes_dir.glob("*.json"):
try:
with open(theme_file, 'r') as f:
theme = json.load(f)
name = theme.get('name', theme_file.stem)
self._themes[name] = theme
count += 1
logger.debug(f"Loaded user theme: {name}")
except Exception as e:
logger.warning(f"Failed to load theme {theme_file}: {e}")
# Load project themes (override user themes)
if self.project_themes_dir and self.project_themes_dir.exists():
for theme_file in self.project_themes_dir.glob("*.json"):
try:
with open(theme_file, 'r') as f:
theme = json.load(f)
name = theme.get('name', theme_file.stem)
self._themes[name] = theme
count += 1
logger.debug(f"Loaded project theme: {name}")
except Exception as e:
logger.warning(f"Failed to load theme {theme_file}: {e}")
return count
def save_theme(
self,
theme: Dict[str, Any],
location: str = "project"
) -> Path:
"""
Save a theme to disk.
Args:
theme: Theme dict to save
location: "user" or "project"
Returns:
Path where theme was saved
"""
name = theme.get('name', 'unnamed')
if location == "user":
target_dir = self.user_themes_dir
else:
target_dir = self.project_themes_dir
if not target_dir:
target_dir = self.user_themes_dir
target_dir.mkdir(parents=True, exist_ok=True)
theme_path = target_dir / f"{name}.json"
with open(theme_path, 'w') as f:
json.dump(theme, f, indent=2)
# Update in-memory store
self._themes[name] = theme
return theme_path
def get_theme(self, name: str) -> Optional[Dict[str, Any]]:
"""Get a theme by name."""
return self._themes.get(name)
def list_themes(self) -> List[str]:
"""List all available theme names."""
return list(self._themes.keys())
def set_active_theme(self, name: str) -> bool:
"""
Set the active theme.
Args:
name: Theme name to activate
Returns:
True if theme was activated
"""
if name in self._themes:
self._active_theme = name
return True
return False
def get_active_theme(self) -> Optional[Dict[str, Any]]:
"""Get the currently active theme."""
if self._active_theme:
return self._themes.get(self._active_theme)
return None
def delete_theme(self, name: str) -> bool:
"""
Delete a theme.
Args:
name: Theme name to delete
Returns:
True if theme was deleted
"""
if name == "default":
return False # Cannot delete default theme
if name in self._themes:
del self._themes[name]
# Remove file if exists
for themes_dir in [self.user_themes_dir, self.project_themes_dir]:
if themes_dir and themes_dir.exists():
theme_path = themes_dir / f"{name}.json"
if theme_path.exists():
theme_path.unlink()
if self._active_theme == name:
self._active_theme = None
return True
return False

View File

@@ -1,391 +0,0 @@
"""
Theme management tools for viz-platform.
Provides design token-based theming system for consistent visual styling.
"""
import copy
import logging
from typing import Dict, List, Optional, Any
from .theme_store import ThemeStore, DEFAULT_THEME, REQUIRED_TOKEN_CATEGORIES
logger = logging.getLogger(__name__)
class ThemeTools:
"""
Design token-based theming tools.
Creates and manages themes that integrate with DMC and Plotly.
"""
def __init__(self, store: Optional[ThemeStore] = None):
"""
Initialize theme tools.
Args:
store: Optional ThemeStore for persistence
"""
self.store = store or ThemeStore()
async def theme_create(
self,
name: str,
tokens: Dict[str, Any]
) -> Dict[str, Any]:
"""
Create a new theme with design tokens.
Args:
name: Unique theme name
tokens: Design tokens dict with colors, spacing, typography, radii
Returns:
Dict with:
- name: Theme name
- tokens: Full token set (merged with defaults)
- validation: Validation results
"""
# Check for name collision
if self.store.get_theme(name) and name != "default":
return {
"error": f"Theme '{name}' already exists. Use theme_extend to modify it.",
"name": name
}
# Start with default tokens and merge provided ones
theme_tokens = copy.deepcopy(DEFAULT_THEME["tokens"])
theme_tokens = self._deep_merge(theme_tokens, tokens)
# Create theme object
theme = {
"name": name,
"version": "1.0.0",
"tokens": theme_tokens
}
# Validate the theme
validation = self._validate_tokens(theme_tokens)
# Save to store
self.store._themes[name] = theme
return {
"name": name,
"tokens": theme_tokens,
"validation": validation,
"complete": validation["complete"]
}
async def theme_extend(
self,
base_theme: str,
overrides: Dict[str, Any],
new_name: Optional[str] = None
) -> Dict[str, Any]:
"""
Create a new theme by extending an existing one.
Args:
base_theme: Name of theme to extend
overrides: Token overrides to apply
new_name: Optional name for the new theme (defaults to base_theme_extended)
Returns:
Dict with the new theme or error
"""
# Get base theme
base = self.store.get_theme(base_theme)
if not base:
available = self.store.list_themes()
return {
"error": f"Base theme '{base_theme}' not found. Available: {available}",
"name": None
}
# Determine new name
name = new_name or f"{base_theme}_extended"
# Check for collision
if self.store.get_theme(name) and name != base_theme:
return {
"error": f"Theme '{name}' already exists. Choose a different name.",
"name": name
}
# Merge tokens
theme_tokens = copy.deepcopy(base.get("tokens", {}))
theme_tokens = self._deep_merge(theme_tokens, overrides)
# Create theme
theme = {
"name": name,
"version": "1.0.0",
"extends": base_theme,
"tokens": theme_tokens
}
# Validate
validation = self._validate_tokens(theme_tokens)
# Save to store
self.store._themes[name] = theme
return {
"name": name,
"extends": base_theme,
"tokens": theme_tokens,
"validation": validation,
"complete": validation["complete"]
}
async def theme_validate(self, theme_name: str) -> Dict[str, Any]:
"""
Validate a theme for completeness.
Args:
theme_name: Theme name to validate
Returns:
Dict with:
- valid: bool
- complete: bool (all optional tokens present)
- missing: List of missing required tokens
- warnings: List of warnings
"""
theme = self.store.get_theme(theme_name)
if not theme:
available = self.store.list_themes()
return {
"error": f"Theme '{theme_name}' not found. Available: {available}",
"valid": False
}
tokens = theme.get("tokens", {})
validation = self._validate_tokens(tokens)
return {
"theme_name": theme_name,
"valid": validation["valid"],
"complete": validation["complete"],
"missing_required": validation["missing_required"],
"missing_optional": validation["missing_optional"],
"warnings": validation["warnings"]
}
async def theme_export_css(self, theme_name: str) -> Dict[str, Any]:
"""
Export a theme as CSS custom properties.
Args:
theme_name: Theme name to export
Returns:
Dict with:
- css: CSS custom properties string
- variables: List of variable names
"""
theme = self.store.get_theme(theme_name)
if not theme:
available = self.store.list_themes()
return {
"error": f"Theme '{theme_name}' not found. Available: {available}",
"css": None
}
tokens = theme.get("tokens", {})
css_vars = []
var_names = []
# Convert tokens to CSS custom properties
css_vars.append(f"/* Theme: {theme_name} */")
css_vars.append(":root {")
# Colors
colors = tokens.get("colors", {})
css_vars.append(" /* Colors */")
for key, value in self._flatten_tokens(colors, "color").items():
var_name = f"--{key}"
css_vars.append(f" {var_name}: {value};")
var_names.append(var_name)
# Spacing
spacing = tokens.get("spacing", {})
css_vars.append("\n /* Spacing */")
for key, value in spacing.items():
var_name = f"--spacing-{key}"
css_vars.append(f" {var_name}: {value};")
var_names.append(var_name)
# Typography
typography = tokens.get("typography", {})
css_vars.append("\n /* Typography */")
for key, value in self._flatten_tokens(typography, "font").items():
var_name = f"--{key}"
css_vars.append(f" {var_name}: {value};")
var_names.append(var_name)
# Radii
radii = tokens.get("radii", {})
css_vars.append("\n /* Border Radius */")
for key, value in radii.items():
var_name = f"--radius-{key}"
css_vars.append(f" {var_name}: {value};")
var_names.append(var_name)
# Shadows
shadows = tokens.get("shadows", {})
if shadows:
css_vars.append("\n /* Shadows */")
for key, value in shadows.items():
var_name = f"--shadow-{key}"
css_vars.append(f" {var_name}: {value};")
var_names.append(var_name)
# Transitions
transitions = tokens.get("transitions", {})
if transitions:
css_vars.append("\n /* Transitions */")
for key, value in transitions.items():
var_name = f"--transition-{key}"
css_vars.append(f" {var_name}: {value};")
var_names.append(var_name)
css_vars.append("}")
css_content = "\n".join(css_vars)
return {
"theme_name": theme_name,
"css": css_content,
"variable_count": len(var_names),
"variables": var_names
}
async def theme_list(self) -> Dict[str, Any]:
"""
List all available themes.
Returns:
Dict with theme names and active theme
"""
themes = self.store.list_themes()
active = self.store._active_theme
theme_info = {}
for name in themes:
theme = self.store.get_theme(name)
theme_info[name] = {
"extends": theme.get("extends"),
"version": theme.get("version", "1.0.0")
}
return {
"themes": theme_info,
"active_theme": active,
"count": len(themes)
}
async def theme_activate(self, theme_name: str) -> Dict[str, Any]:
"""
Set the active theme.
Args:
theme_name: Theme to activate
Returns:
Dict with activation status
"""
if self.store.set_active_theme(theme_name):
return {
"active_theme": theme_name,
"success": True
}
return {
"error": f"Theme '{theme_name}' not found.",
"success": False
}
def _validate_tokens(self, tokens: Dict[str, Any]) -> Dict[str, Any]:
"""Validate token structure and completeness."""
missing_required = []
missing_optional = []
warnings = []
# Check required categories
for category in REQUIRED_TOKEN_CATEGORIES:
if category not in tokens:
missing_required.append(category)
# Check colors structure
colors = tokens.get("colors", {})
required_colors = ["primary", "background", "text"]
for color in required_colors:
if color not in colors:
missing_required.append(f"colors.{color}")
# Check spacing
spacing = tokens.get("spacing", {})
required_spacing = ["xs", "sm", "md", "lg"]
for size in required_spacing:
if size not in spacing:
missing_optional.append(f"spacing.{size}")
# Check typography
typography = tokens.get("typography", {})
if "fontFamily" not in typography:
missing_optional.append("typography.fontFamily")
if "fontSize" not in typography:
missing_optional.append("typography.fontSize")
# Check radii
radii = tokens.get("radii", {})
if "sm" not in radii and "md" not in radii:
missing_optional.append("radii.sm or radii.md")
# Warnings for common issues
if "shadows" not in tokens:
warnings.append("No shadows defined - components may have no elevation")
if "transitions" not in tokens:
warnings.append("No transitions defined - animations will use defaults")
return {
"valid": len(missing_required) == 0,
"complete": len(missing_required) == 0 and len(missing_optional) == 0,
"missing_required": missing_required,
"missing_optional": missing_optional,
"warnings": warnings
}
def _deep_merge(
self,
base: Dict[str, Any],
override: Dict[str, Any]
) -> Dict[str, Any]:
"""Deep merge two dictionaries."""
result = copy.deepcopy(base)
for key, value in override.items():
if key in result and isinstance(result[key], dict) and isinstance(value, dict):
result[key] = self._deep_merge(result[key], value)
else:
result[key] = value
return result
def _flatten_tokens(
self,
tokens: Dict[str, Any],
prefix: str
) -> Dict[str, str]:
"""Flatten nested token dict for CSS export."""
result = {}
for key, value in tokens.items():
if isinstance(value, dict):
nested = self._flatten_tokens(value, f"{prefix}-{key}")
result.update(nested)
else:
result[f"{prefix}-{key}"] = str(value)
return result

View File

@@ -1,45 +0,0 @@
[build-system]
requires = ["setuptools>=61.0", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "viz-platform-mcp"
version = "1.0.0"
description = "MCP Server for visualization with Dash Mantine Components validation and theming"
readme = "README.md"
license = {text = "MIT"}
requires-python = ">=3.10"
authors = [
{name = "Leo Miranda"}
]
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
]
dependencies = [
"mcp>=0.9.0",
"plotly>=5.18.0",
"dash>=2.14.0",
"dash-mantine-components>=2.0.0",
"python-dotenv>=1.0.0",
"pydantic>=2.5.0",
]
[project.optional-dependencies]
dev = [
"pytest>=7.4.3",
"pytest-asyncio>=0.23.0",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["mcp_server*"]
[tool.pytest.ini_options]
asyncio_mode = "auto"
testpaths = ["tests"]

View File

@@ -1,668 +0,0 @@
{
"version": "2.5.1",
"generated": "2026-01-26",
"mantine_version": "7.x",
"categories": {
"buttons": ["Button", "ButtonGroup", "ActionIcon", "ActionIconGroup", "CopyButton", "CloseButton", "UnstyledButton"],
"inputs": [
"TextInput", "PasswordInput", "NumberInput", "Textarea", "Select", "MultiSelect",
"Checkbox", "CheckboxGroup", "CheckboxCard", "Switch", "Radio", "RadioGroup", "RadioCard",
"Slider", "RangeSlider", "ColorInput", "ColorPicker", "Autocomplete", "TagsInput",
"PinInput", "Rating", "SegmentedControl", "Chip", "ChipGroup", "JsonInput",
"NativeSelect", "FileInput", "Combobox"
],
"navigation": ["Anchor", "Breadcrumbs", "Burger", "NavLink", "Pagination", "Stepper", "Tabs", "TabsList", "TabsTab", "TabsPanel"],
"feedback": ["Alert", "Loader", "Notification", "NotificationContainer", "Progress", "RingProgress", "Skeleton"],
"overlays": ["Modal", "Drawer", "DrawerStack", "Popover", "HoverCard", "Tooltip", "FloatingTooltip", "Menu", "MenuTarget", "MenuDropdown", "MenuItem", "Affix"],
"typography": ["Text", "Title", "Highlight", "Mark", "Code", "CodeHighlight", "Blockquote", "List", "ListItem", "Kbd"],
"layout": [
"AppShell", "AppShellHeader", "AppShellNavbar", "AppShellAside", "AppShellFooter", "AppShellMain", "AppShellSection",
"Container", "Center", "Stack", "Group", "Flex", "Grid", "GridCol", "SimpleGrid",
"Paper", "Card", "CardSection", "Box", "Space", "Divider", "AspectRatio", "ScrollArea"
],
"data_display": [
"Accordion", "AccordionItem", "AccordionControl", "AccordionPanel",
"Avatar", "AvatarGroup", "Badge", "Image", "BackgroundImage",
"Indicator", "Spoiler", "Table", "ThemeIcon", "Timeline", "TimelineItem", "Tree"
],
"charts": ["AreaChart", "BarChart", "LineChart", "PieChart", "DonutChart", "RadarChart", "ScatterChart", "BubbleChart", "CompositeChart", "Sparkline"],
"dates": ["DatePicker", "DateTimePicker", "DateInput", "DatePickerInput", "MonthPicker", "YearPicker", "TimePicker", "TimeInput", "Calendar", "MiniCalendar", "DatesProvider"]
},
"components": {
"Button": {
"description": "Button component for user interactions",
"props": {
"children": {"type": "any", "description": "Button content"},
"variant": {"type": "string", "enum": ["filled", "light", "outline", "transparent", "white", "subtle", "default", "gradient"], "default": "filled"},
"color": {"type": "string", "default": "blue", "description": "Key of theme.colors or CSS color"},
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl", "compact-xs", "compact-sm", "compact-md", "compact-lg", "compact-xl"], "default": "sm"},
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
"disabled": {"type": "boolean", "default": false},
"loading": {"type": "boolean", "default": false},
"loaderProps": {"type": "object"},
"leftSection": {"type": "any", "description": "Content on the left side of label"},
"rightSection": {"type": "any", "description": "Content on the right side of label"},
"fullWidth": {"type": "boolean", "default": false},
"gradient": {"type": "object", "description": "Gradient for gradient variant"},
"justify": {"type": "string", "enum": ["center", "start", "end", "space-between"], "default": "center"},
"autoContrast": {"type": "boolean", "default": false},
"n_clicks": {"type": "integer", "default": 0, "description": "Dash callback trigger"}
}
},
"ActionIcon": {
"description": "Icon button without text label",
"props": {
"children": {"type": "any", "required": true, "description": "Icon element"},
"variant": {"type": "string", "enum": ["filled", "light", "outline", "transparent", "white", "subtle", "default", "gradient"], "default": "subtle"},
"color": {"type": "string", "default": "gray"},
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
"disabled": {"type": "boolean", "default": false},
"loading": {"type": "boolean", "default": false},
"autoContrast": {"type": "boolean", "default": false},
"n_clicks": {"type": "integer", "default": 0}
}
},
"TextInput": {
"description": "Text input field",
"props": {
"value": {"type": "string", "default": ""},
"placeholder": {"type": "string"},
"label": {"type": "any"},
"description": {"type": "any"},
"error": {"type": "any"},
"disabled": {"type": "boolean", "default": false},
"required": {"type": "boolean", "default": false},
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
"variant": {"type": "string", "enum": ["default", "filled", "unstyled"], "default": "default"},
"leftSection": {"type": "any"},
"rightSection": {"type": "any"},
"withAsterisk": {"type": "boolean", "default": false},
"debounce": {"type": "integer", "description": "Debounce delay in ms"},
"leftSectionPointerEvents": {"type": "string", "enum": ["none", "all"], "default": "none"},
"rightSectionPointerEvents": {"type": "string", "enum": ["none", "all"], "default": "none"}
}
},
"NumberInput": {
"description": "Numeric input with optional controls",
"props": {
"value": {"type": "number"},
"placeholder": {"type": "string"},
"label": {"type": "any"},
"description": {"type": "any"},
"error": {"type": "any"},
"disabled": {"type": "boolean", "default": false},
"required": {"type": "boolean", "default": false},
"min": {"type": "number"},
"max": {"type": "number"},
"step": {"type": "number", "default": 1},
"hideControls": {"type": "boolean", "default": false},
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
"allowNegative": {"type": "boolean", "default": true},
"allowDecimal": {"type": "boolean", "default": true},
"clampBehavior": {"type": "string", "enum": ["strict", "blur", "none"], "default": "blur"},
"decimalScale": {"type": "integer"},
"fixedDecimalScale": {"type": "boolean", "default": false},
"thousandSeparator": {"type": "string"},
"decimalSeparator": {"type": "string"},
"prefix": {"type": "string"},
"suffix": {"type": "string"}
}
},
"Select": {
"description": "Dropdown select input",
"props": {
"value": {"type": "string"},
"data": {"type": "array", "required": true, "description": "Array of options: strings or {value, label} objects"},
"placeholder": {"type": "string"},
"label": {"type": "any"},
"description": {"type": "any"},
"error": {"type": "any"},
"disabled": {"type": "boolean", "default": false},
"required": {"type": "boolean", "default": false},
"searchable": {"type": "boolean", "default": false},
"clearable": {"type": "boolean", "default": false},
"nothingFoundMessage": {"type": "string"},
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
"maxDropdownHeight": {"type": "number", "default": 250},
"allowDeselect": {"type": "boolean", "default": true},
"checkIconPosition": {"type": "string", "enum": ["left", "right"], "default": "left"},
"comboboxProps": {"type": "object"},
"withScrollArea": {"type": "boolean", "default": true}
}
},
"MultiSelect": {
"description": "Multiple selection dropdown",
"props": {
"value": {"type": "array", "default": []},
"data": {"type": "array", "required": true},
"placeholder": {"type": "string"},
"label": {"type": "any"},
"description": {"type": "any"},
"error": {"type": "any"},
"disabled": {"type": "boolean", "default": false},
"required": {"type": "boolean", "default": false},
"searchable": {"type": "boolean", "default": false},
"clearable": {"type": "boolean", "default": false},
"maxValues": {"type": "integer"},
"hidePickedOptions": {"type": "boolean", "default": false},
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
"maxDropdownHeight": {"type": "number", "default": 250},
"withCheckIcon": {"type": "boolean", "default": true}
}
},
"Checkbox": {
"description": "Checkbox input",
"props": {
"checked": {"type": "boolean", "default": false},
"label": {"type": "any"},
"description": {"type": "any"},
"error": {"type": "any"},
"disabled": {"type": "boolean", "default": false},
"indeterminate": {"type": "boolean", "default": false},
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
"color": {"type": "string", "default": "blue"},
"labelPosition": {"type": "string", "enum": ["left", "right"], "default": "right"},
"autoContrast": {"type": "boolean", "default": false},
"icon": {"type": "any"},
"iconColor": {"type": "string"}
}
},
"Switch": {
"description": "Toggle switch input",
"props": {
"checked": {"type": "boolean", "default": false},
"label": {"type": "any"},
"description": {"type": "any"},
"error": {"type": "any"},
"disabled": {"type": "boolean", "default": false},
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "xl"},
"color": {"type": "string", "default": "blue"},
"onLabel": {"type": "any"},
"offLabel": {"type": "any"},
"thumbIcon": {"type": "any"},
"labelPosition": {"type": "string", "enum": ["left", "right"], "default": "right"}
}
},
"Slider": {
"description": "Slider input for numeric values",
"props": {
"value": {"type": "number"},
"min": {"type": "number", "default": 0},
"max": {"type": "number", "default": 100},
"step": {"type": "number", "default": 1},
"label": {"type": "any"},
"disabled": {"type": "boolean", "default": false},
"marks": {"type": "array"},
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "xl"},
"color": {"type": "string", "default": "blue"},
"showLabelOnHover": {"type": "boolean", "default": true},
"labelAlwaysOn": {"type": "boolean", "default": false},
"thumbLabel": {"type": "string"},
"precision": {"type": "integer", "default": 0},
"inverted": {"type": "boolean", "default": false},
"thumbSize": {"type": "number"},
"restrictToMarks": {"type": "boolean", "default": false}
}
},
"Alert": {
"description": "Alert component for feedback messages",
"props": {
"children": {"type": "any"},
"title": {"type": "any"},
"color": {"type": "string", "default": "blue"},
"variant": {"type": "string", "enum": ["filled", "light", "outline", "default", "transparent", "white"], "default": "light"},
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
"icon": {"type": "any"},
"withCloseButton": {"type": "boolean", "default": false},
"closeButtonLabel": {"type": "string"},
"autoContrast": {"type": "boolean", "default": false}
}
},
"Loader": {
"description": "Loading indicator",
"props": {
"color": {"type": "string", "default": "blue"},
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
"type": {"type": "string", "enum": ["oval", "bars", "dots"], "default": "oval"}
}
},
"Progress": {
"description": "Progress bar",
"props": {
"value": {"type": "number", "required": true},
"color": {"type": "string", "default": "blue"},
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
"striped": {"type": "boolean", "default": false},
"animated": {"type": "boolean", "default": false},
"autoContrast": {"type": "boolean", "default": false},
"transitionDuration": {"type": "number", "default": 100}
}
},
"Modal": {
"description": "Modal dialog overlay",
"props": {
"children": {"type": "any"},
"opened": {"type": "boolean", "required": true},
"title": {"type": "any"},
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl", "auto"], "default": "md"},
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
"centered": {"type": "boolean", "default": false},
"fullScreen": {"type": "boolean", "default": false},
"withCloseButton": {"type": "boolean", "default": true},
"closeOnClickOutside": {"type": "boolean", "default": true},
"closeOnEscape": {"type": "boolean", "default": true},
"overlayProps": {"type": "object"},
"padding": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
"transitionProps": {"type": "object"},
"zIndex": {"type": "number", "default": 200},
"trapFocus": {"type": "boolean", "default": true},
"returnFocus": {"type": "boolean", "default": true},
"lockScroll": {"type": "boolean", "default": true}
}
},
"Drawer": {
"description": "Sliding panel drawer",
"props": {
"children": {"type": "any"},
"opened": {"type": "boolean", "required": true},
"title": {"type": "any"},
"position": {"type": "string", "enum": ["left", "right", "top", "bottom"], "default": "left"},
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"]},
"withCloseButton": {"type": "boolean", "default": true},
"closeOnClickOutside": {"type": "boolean", "default": true},
"closeOnEscape": {"type": "boolean", "default": true},
"overlayProps": {"type": "object"},
"padding": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
"zIndex": {"type": "number", "default": 200},
"offset": {"type": "number", "default": 0},
"trapFocus": {"type": "boolean", "default": true},
"returnFocus": {"type": "boolean", "default": true},
"lockScroll": {"type": "boolean", "default": true}
}
},
"Tooltip": {
"description": "Tooltip on hover",
"props": {
"children": {"type": "any", "required": true},
"label": {"type": "any", "required": true},
"position": {"type": "string", "enum": ["top", "right", "bottom", "left", "top-start", "top-end", "right-start", "right-end", "bottom-start", "bottom-end", "left-start", "left-end"], "default": "top"},
"color": {"type": "string"},
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
"withArrow": {"type": "boolean", "default": false},
"arrowSize": {"type": "number", "default": 4},
"arrowOffset": {"type": "number", "default": 5},
"offset": {"type": "number", "default": 5},
"multiline": {"type": "boolean", "default": false},
"disabled": {"type": "boolean", "default": false},
"openDelay": {"type": "number", "default": 0},
"closeDelay": {"type": "number", "default": 0},
"transitionProps": {"type": "object"},
"zIndex": {"type": "number", "default": 300}
}
},
"Text": {
"description": "Text component with styling",
"props": {
"children": {"type": "any"},
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"]},
"c": {"type": "string", "description": "Color"},
"fw": {"type": "number", "description": "Font weight"},
"fs": {"type": "string", "enum": ["normal", "italic"], "description": "Font style"},
"td": {"type": "string", "enum": ["none", "underline", "line-through"], "description": "Text decoration"},
"tt": {"type": "string", "enum": ["none", "capitalize", "uppercase", "lowercase"], "description": "Text transform"},
"ta": {"type": "string", "enum": ["left", "center", "right", "justify"], "description": "Text align"},
"lineClamp": {"type": "integer"},
"truncate": {"type": "boolean", "default": false},
"inherit": {"type": "boolean", "default": false},
"gradient": {"type": "object"},
"span": {"type": "boolean", "default": false},
"lh": {"type": "string", "description": "Line height"}
}
},
"Title": {
"description": "Heading component",
"props": {
"children": {"type": "any"},
"order": {"type": "integer", "enum": [1, 2, 3, 4, 5, 6], "default": 1},
"size": {"type": "string"},
"c": {"type": "string", "description": "Color"},
"ta": {"type": "string", "enum": ["left", "center", "right", "justify"]},
"td": {"type": "string", "enum": ["none", "underline", "line-through"]},
"tt": {"type": "string", "enum": ["none", "capitalize", "uppercase", "lowercase"]},
"lineClamp": {"type": "integer"},
"truncate": {"type": "boolean", "default": false}
}
},
"Stack": {
"description": "Vertical stack layout",
"props": {
"children": {"type": "any"},
"gap": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
"align": {"type": "string", "enum": ["stretch", "center", "flex-start", "flex-end"], "default": "stretch"},
"justify": {"type": "string", "enum": ["flex-start", "flex-end", "center", "space-between", "space-around", "space-evenly"], "default": "flex-start"}
}
},
"Group": {
"description": "Horizontal group layout",
"props": {
"children": {"type": "any"},
"gap": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
"align": {"type": "string", "enum": ["stretch", "center", "flex-start", "flex-end"], "default": "center"},
"justify": {"type": "string", "enum": ["flex-start", "flex-end", "center", "space-between", "space-around"], "default": "flex-start"},
"grow": {"type": "boolean", "default": false},
"wrap": {"type": "string", "enum": ["wrap", "nowrap", "wrap-reverse"], "default": "wrap"},
"preventGrowOverflow": {"type": "boolean", "default": true}
}
},
"Flex": {
"description": "Flexbox container",
"props": {
"children": {"type": "any"},
"gap": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"]},
"rowGap": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"]},
"columnGap": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"]},
"align": {"type": "string", "enum": ["stretch", "center", "flex-start", "flex-end", "baseline"]},
"justify": {"type": "string", "enum": ["flex-start", "flex-end", "center", "space-between", "space-around", "space-evenly"]},
"wrap": {"type": "string", "enum": ["wrap", "nowrap", "wrap-reverse"], "default": "nowrap"},
"direction": {"type": "string", "enum": ["row", "column", "row-reverse", "column-reverse"], "default": "row"}
}
},
"Grid": {
"description": "Grid layout component",
"props": {
"children": {"type": "any"},
"columns": {"type": "integer", "default": 12},
"gutter": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
"grow": {"type": "boolean", "default": false},
"justify": {"type": "string", "enum": ["flex-start", "flex-end", "center", "space-between", "space-around"], "default": "flex-start"},
"align": {"type": "string", "enum": ["stretch", "center", "flex-start", "flex-end"], "default": "stretch"},
"overflow": {"type": "string", "enum": ["visible", "hidden"], "default": "visible"}
}
},
"SimpleGrid": {
"description": "Simple grid with equal columns",
"props": {
"children": {"type": "any"},
"cols": {"type": "integer", "default": 1},
"spacing": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
"verticalSpacing": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"]}
}
},
"Container": {
"description": "Centered container with max-width",
"props": {
"children": {"type": "any"},
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
"fluid": {"type": "boolean", "default": false}
}
},
"Paper": {
"description": "Paper surface component",
"props": {
"children": {"type": "any"},
"shadow": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"]},
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
"p": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "description": "Padding"},
"withBorder": {"type": "boolean", "default": false}
}
},
"Card": {
"description": "Card container",
"props": {
"children": {"type": "any"},
"shadow": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
"padding": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
"withBorder": {"type": "boolean", "default": false}
}
},
"Tabs": {
"description": "Tabbed interface",
"props": {
"children": {"type": "any"},
"value": {"type": "string"},
"defaultValue": {"type": "string"},
"orientation": {"type": "string", "enum": ["horizontal", "vertical"], "default": "horizontal"},
"variant": {"type": "string", "enum": ["default", "outline", "pills"], "default": "default"},
"color": {"type": "string", "default": "blue"},
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
"placement": {"type": "string", "enum": ["left", "right"], "default": "left"},
"grow": {"type": "boolean", "default": false},
"inverted": {"type": "boolean", "default": false},
"keepMounted": {"type": "boolean", "default": true},
"activateTabWithKeyboard": {"type": "boolean", "default": true},
"allowTabDeactivation": {"type": "boolean", "default": false},
"autoContrast": {"type": "boolean", "default": false}
}
},
"Accordion": {
"description": "Collapsible content panels",
"props": {
"children": {"type": "any"},
"value": {"type": "any"},
"defaultValue": {"type": "any"},
"multiple": {"type": "boolean", "default": false},
"variant": {"type": "string", "enum": ["default", "contained", "filled", "separated"], "default": "default"},
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
"chevronPosition": {"type": "string", "enum": ["left", "right"], "default": "right"},
"disableChevronRotation": {"type": "boolean", "default": false},
"transitionDuration": {"type": "number", "default": 200},
"chevronSize": {"type": "any"},
"order": {"type": "integer", "enum": [2, 3, 4, 5, 6]}
}
},
"Badge": {
"description": "Badge for status or labels",
"props": {
"children": {"type": "any"},
"color": {"type": "string", "default": "blue"},
"variant": {"type": "string", "enum": ["filled", "light", "outline", "dot", "gradient", "default", "transparent", "white"], "default": "filled"},
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "xl"},
"fullWidth": {"type": "boolean", "default": false},
"leftSection": {"type": "any"},
"rightSection": {"type": "any"},
"autoContrast": {"type": "boolean", "default": false},
"circle": {"type": "boolean", "default": false}
}
},
"Avatar": {
"description": "User avatar image",
"props": {
"src": {"type": "string"},
"alt": {"type": "string"},
"children": {"type": "any", "description": "Fallback content"},
"color": {"type": "string", "default": "gray"},
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "xl"},
"variant": {"type": "string", "enum": ["filled", "light", "outline", "gradient", "default", "transparent", "white"], "default": "filled"},
"autoContrast": {"type": "boolean", "default": false}
}
},
"Image": {
"description": "Image with fallback",
"props": {
"src": {"type": "string"},
"alt": {"type": "string"},
"w": {"type": "any", "description": "Width"},
"h": {"type": "any", "description": "Height"},
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"]},
"fit": {"type": "string", "enum": ["contain", "cover", "fill", "none", "scale-down"], "default": "cover"},
"fallbackSrc": {"type": "string"}
}
},
"Table": {
"description": "Data table component",
"props": {
"children": {"type": "any"},
"data": {"type": "object", "description": "Table data object with head, body, foot"},
"striped": {"type": "boolean", "default": false},
"highlightOnHover": {"type": "boolean", "default": false},
"withTableBorder": {"type": "boolean", "default": false},
"withColumnBorders": {"type": "boolean", "default": false},
"withRowBorders": {"type": "boolean", "default": true},
"verticalSpacing": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "xs"},
"horizontalSpacing": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "xs"},
"captionSide": {"type": "string", "enum": ["top", "bottom"], "default": "bottom"},
"stickyHeader": {"type": "boolean", "default": false},
"stickyHeaderOffset": {"type": "number", "default": 0}
}
},
"AreaChart": {
"description": "Area chart for time series data",
"props": {
"data": {"type": "array", "required": true},
"dataKey": {"type": "string", "required": true, "description": "X-axis data key"},
"series": {"type": "array", "required": true, "description": "Array of {name, color} objects"},
"h": {"type": "any", "description": "Chart height"},
"w": {"type": "any", "description": "Chart width"},
"curveType": {"type": "string", "enum": ["bump", "linear", "natural", "monotone", "step", "stepBefore", "stepAfter"], "default": "monotone"},
"connectNulls": {"type": "boolean", "default": true},
"withDots": {"type": "boolean", "default": true},
"withGradient": {"type": "boolean", "default": true},
"withLegend": {"type": "boolean", "default": false},
"withTooltip": {"type": "boolean", "default": true},
"withXAxis": {"type": "boolean", "default": true},
"withYAxis": {"type": "boolean", "default": true},
"gridAxis": {"type": "string", "enum": ["x", "y", "xy", "none"], "default": "x"},
"tickLine": {"type": "string", "enum": ["x", "y", "xy", "none"], "default": "y"},
"strokeDasharray": {"type": "string"},
"fillOpacity": {"type": "number", "default": 0.2},
"splitColors": {"type": "array"},
"areaChartProps": {"type": "object"},
"type": {"type": "string", "enum": ["default", "stacked", "percent", "split"], "default": "default"}
}
},
"BarChart": {
"description": "Bar chart for categorical data",
"props": {
"data": {"type": "array", "required": true},
"dataKey": {"type": "string", "required": true},
"series": {"type": "array", "required": true},
"h": {"type": "any"},
"w": {"type": "any"},
"orientation": {"type": "string", "enum": ["horizontal", "vertical"], "default": "vertical"},
"withLegend": {"type": "boolean", "default": false},
"withTooltip": {"type": "boolean", "default": true},
"withXAxis": {"type": "boolean", "default": true},
"withYAxis": {"type": "boolean", "default": true},
"gridAxis": {"type": "string", "enum": ["x", "y", "xy", "none"], "default": "x"},
"tickLine": {"type": "string", "enum": ["x", "y", "xy", "none"], "default": "y"},
"barProps": {"type": "object"},
"type": {"type": "string", "enum": ["default", "stacked", "percent", "waterfall"], "default": "default"}
}
},
"LineChart": {
"description": "Line chart for trends",
"props": {
"data": {"type": "array", "required": true},
"dataKey": {"type": "string", "required": true},
"series": {"type": "array", "required": true},
"h": {"type": "any"},
"w": {"type": "any"},
"curveType": {"type": "string", "enum": ["bump", "linear", "natural", "monotone", "step", "stepBefore", "stepAfter"], "default": "monotone"},
"connectNulls": {"type": "boolean", "default": true},
"withDots": {"type": "boolean", "default": true},
"withLegend": {"type": "boolean", "default": false},
"withTooltip": {"type": "boolean", "default": true},
"withXAxis": {"type": "boolean", "default": true},
"withYAxis": {"type": "boolean", "default": true},
"gridAxis": {"type": "string", "enum": ["x", "y", "xy", "none"], "default": "x"},
"strokeWidth": {"type": "number", "default": 2}
}
},
"PieChart": {
"description": "Pie chart for proportions",
"props": {
"data": {"type": "array", "required": true, "description": "Array of {name, value, color} objects"},
"h": {"type": "any"},
"w": {"type": "any"},
"withLabels": {"type": "boolean", "default": false},
"withLabelsLine": {"type": "boolean", "default": true},
"withTooltip": {"type": "boolean", "default": true},
"labelsPosition": {"type": "string", "enum": ["inside", "outside"], "default": "outside"},
"labelsType": {"type": "string", "enum": ["value", "percent"], "default": "value"},
"strokeWidth": {"type": "number", "default": 1},
"strokeColor": {"type": "string"},
"startAngle": {"type": "number", "default": 0},
"endAngle": {"type": "number", "default": 360}
}
},
"DonutChart": {
"description": "Donut chart (pie with hole)",
"props": {
"data": {"type": "array", "required": true},
"h": {"type": "any"},
"w": {"type": "any"},
"withLabels": {"type": "boolean", "default": false},
"withLabelsLine": {"type": "boolean", "default": true},
"withTooltip": {"type": "boolean", "default": true},
"thickness": {"type": "number", "default": 20},
"chartLabel": {"type": "any"},
"strokeWidth": {"type": "number", "default": 1},
"strokeColor": {"type": "string"},
"startAngle": {"type": "number", "default": 0},
"endAngle": {"type": "number", "default": 360},
"paddingAngle": {"type": "number", "default": 0}
}
},
"DatePicker": {
"description": "Date picker calendar",
"props": {
"value": {"type": "string"},
"type": {"type": "string", "enum": ["default", "range", "multiple"], "default": "default"},
"defaultValue": {"type": "any"},
"allowDeselect": {"type": "boolean", "default": false},
"allowSingleDateInRange": {"type": "boolean", "default": false},
"numberOfColumns": {"type": "integer", "default": 1},
"columnsToScroll": {"type": "integer", "default": 1},
"ariaLabels": {"type": "object"},
"hideOutsideDates": {"type": "boolean", "default": false},
"hideWeekdays": {"type": "boolean", "default": false},
"weekendDays": {"type": "array", "default": [0, 6]},
"renderDay": {"type": "any"},
"minDate": {"type": "string"},
"maxDate": {"type": "string"},
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"}
}
},
"DatePickerInput": {
"description": "Date picker input field",
"props": {
"value": {"type": "string"},
"label": {"type": "any"},
"description": {"type": "any"},
"error": {"type": "any"},
"placeholder": {"type": "string"},
"clearable": {"type": "boolean", "default": false},
"type": {"type": "string", "enum": ["default", "range", "multiple"], "default": "default"},
"valueFormat": {"type": "string", "default": "MMMM D, YYYY"},
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
"disabled": {"type": "boolean", "default": false},
"required": {"type": "boolean", "default": false},
"minDate": {"type": "string"},
"maxDate": {"type": "string"},
"popoverProps": {"type": "object"},
"dropdownType": {"type": "string", "enum": ["popover", "modal"], "default": "popover"}
}
},
"DatesProvider": {
"description": "Provider for date localization settings",
"props": {
"children": {"type": "any", "required": true},
"settings": {"type": "object", "description": "Locale and formatting settings"}
}
}
}
}

View File

@@ -1,16 +0,0 @@
# MCP SDK
mcp>=0.9.0
# Visualization
plotly>=5.18.0
dash>=2.14.0
dash-mantine-components>=2.0.0
kaleido>=0.2.1 # For chart export (PNG, SVG, PDF)
# Utilities
python-dotenv>=1.0.0
pydantic>=2.5.0
# Testing
pytest>=7.4.3
pytest-asyncio>=0.23.0

Some files were not shown because too many files have changed in this diff Show More