Compare commits
137 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| a77b8ee123 | |||
| 498dac5230 | |||
| af0b92461a | |||
| 89f0354ccc | |||
| 6a267d074b | |||
| bcde33c7d0 | |||
| ee3268fbe0 | |||
| f6a38ffaa8 | |||
| b13ffce0a0 | |||
| b39e01efd7 | |||
| 98eea5b6f9 | |||
| fe36ed91f2 | |||
| 8c85f9ca5f | |||
| 98df35a33e | |||
| 70d6963d0d | |||
| 54c6694117 | |||
| 2402f88daf | |||
| 1cf1dbefb8 | |||
| dafa8db8bb | |||
| 65e79efb24 | |||
| 5ffc13b635 | |||
| 50bfd20fd4 | |||
| c14f1f46cd | |||
| 52c8371f4a | |||
| f8d6d42150 | |||
| 469487f6ed | |||
| 7a2966367d | |||
| 0466b299a7 | |||
| b34304ed57 | |||
| 96963531fc | |||
| 4c9a7c55ae | |||
| 8a75203251 | |||
| da6e81260e | |||
| e1f1335655 | |||
| b017db83a1 | |||
| bc136fab7e | |||
| 6c24bcbb91 | |||
| 11a05799d3 | |||
| 403271dc0c | |||
| cc4abf67b9 | |||
| 35cf20e02d | |||
| 5209f82efb | |||
| 1f55387e9e | |||
| 32bbca73ba | |||
| 0e6999ea21 | |||
| 0d120bd041 | |||
| 508832dae1 | |||
| 6cf3c1830c | |||
| 0b23a02886 | |||
| 71987ee537 | |||
| b7829dca05 | |||
| 9b0e9a69b1 | |||
| ad0e14d07f | |||
| 7fd5fffedf | |||
| 620173eef6 | |||
| 0fe4f62a30 | |||
| 533810f018 | |||
| 2ee23a39d8 | |||
| 894c85bd54 | |||
| 01809a7367 | |||
| a20f1bfdf8 | |||
| 7879e07815 | |||
| eced0fbd07 | |||
| aa6d7f5866 | |||
| 3e5197779d | |||
| 9206931a3c | |||
| ff3be54f1c | |||
| 1b0f5f4973 | |||
| 8ed0d8f207 | |||
| 007b55916c | |||
| eeef35aa61 | |||
| be2d989899 | |||
| 306143882a | |||
| 0c07820b5a | |||
| d2ad90d5bb | |||
| 642dca7062 | |||
| faafced061 | |||
| c3df0f95e6 | |||
| f714957d83 | |||
| 40af243229 | |||
| 69b71fc7cf | |||
| 5ad207520a | |||
| 78d77c1e0a | |||
| 5cf43d5de2 | |||
| 51ef10633b | |||
| 83094598c5 | |||
| 5da29c8e35 | |||
| 4f3560d121 | |||
| d5e521a759 | |||
| b2c51251f3 | |||
| 71efa1aafa | |||
| aa3ff016e2 | |||
| 4557d2ce40 | |||
| d282a65fc6 | |||
| ad56700059 | |||
| df2f5ebb47 | |||
| feb86b059f | |||
| c23e84f965 | |||
| 195ca5c10c | |||
| 53f1b9662f | |||
| eeffb9e853 | |||
| 6c142a9710 | |||
| f781c6f7b1 | |||
| 8228c20d47 | |||
| 85953d8e1e | |||
| f8b6131bfc | |||
| cd3d4c69f0 | |||
| 7f6e0893dd | |||
| 39105688a5 | |||
| 2a6b3df8e1 | |||
| 0c2fc8c0d9 | |||
| b5144de0cf | |||
| 29c54279a9 | |||
| 178593f355 | |||
| a70df64cae | |||
| 2a2ac5f85e | |||
| e01ba74e84 | |||
| 565540d0ba | |||
| 394c91f8cf | |||
| 89bfd98d9f | |||
| 5c9dd8d6e0 | |||
| 374912b463 | |||
| debb91aa7e | |||
| 40860c172e | |||
| 50ebe83c0a | |||
| 7295345013 | |||
| a2502c708b | |||
| 4ede59e89a | |||
| 3017e4c097 | |||
| de7675a649 | |||
| aa7bb8f1a4 | |||
| 0a8af05f9c | |||
| 04322732bc | |||
| 09d82b310e | |||
| 50b45f4834 | |||
| 39ad0043c6 | |||
| e5ca804692 |
@@ -1,27 +1,29 @@
|
||||
{
|
||||
"name": "claude-code-marketplace",
|
||||
"name": "leo-claude-mktplace",
|
||||
"owner": {
|
||||
"name": "Leo Miranda",
|
||||
"email": "leobmiranda@gmail.com"
|
||||
},
|
||||
"metadata": {
|
||||
"description": "Project management plugins with Gitea and NetBox integrations",
|
||||
"version": "2.3.0"
|
||||
"version": "4.0.0"
|
||||
},
|
||||
"plugins": [
|
||||
{
|
||||
"name": "projman",
|
||||
"version": "2.3.0",
|
||||
"version": "3.1.0",
|
||||
"description": "Sprint planning and project management with Gitea integration",
|
||||
"source": "./plugins/projman",
|
||||
"author": {
|
||||
"name": "Leo Miranda",
|
||||
"email": "leobmiranda@gmail.com"
|
||||
},
|
||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace/src/branch/main/plugins/projman/README.md",
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace.git",
|
||||
"mcpServers": ["gitea"],
|
||||
"integrationFile": "claude-md-integration.md"
|
||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/projman/README.md",
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||
"mcpServers": ["./.mcp.json"],
|
||||
"category": "development",
|
||||
"tags": ["sprint", "agile", "gitea", "project-management"],
|
||||
"license": "MIT"
|
||||
},
|
||||
{
|
||||
"name": "doc-guardian",
|
||||
@@ -32,11 +34,12 @@
|
||||
"name": "Leo Miranda",
|
||||
"email": "leobmiranda@gmail.com"
|
||||
},
|
||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace/src/branch/main/plugins/doc-guardian/README.md",
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace.git",
|
||||
"mcpServers": [],
|
||||
"integrationFile": "claude-md-integration.md",
|
||||
"hooks": ["PostToolUse", "Stop"]
|
||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/doc-guardian/README.md",
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||
"hooks": ["./hooks/hooks.json"],
|
||||
"category": "productivity",
|
||||
"tags": ["documentation", "drift-detection", "sync"],
|
||||
"license": "MIT"
|
||||
},
|
||||
{
|
||||
"name": "code-sentinel",
|
||||
@@ -47,11 +50,12 @@
|
||||
"name": "Leo Miranda",
|
||||
"email": "leobmiranda@gmail.com"
|
||||
},
|
||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace/src/branch/main/plugins/code-sentinel/README.md",
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace.git",
|
||||
"mcpServers": [],
|
||||
"integrationFile": "claude-md-integration.md",
|
||||
"hooks": ["PreToolUse"]
|
||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/code-sentinel/README.md",
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||
"hooks": ["./hooks/hooks.json"],
|
||||
"category": "security",
|
||||
"tags": ["security-scan", "refactoring", "vulnerabilities"],
|
||||
"license": "MIT"
|
||||
},
|
||||
{
|
||||
"name": "project-hygiene",
|
||||
@@ -62,11 +66,12 @@
|
||||
"name": "Leo Miranda",
|
||||
"email": "leobmiranda@gmail.com"
|
||||
},
|
||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace/src/branch/main/plugins/project-hygiene/README.md",
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace.git",
|
||||
"mcpServers": [],
|
||||
"integrationFile": "claude-md-integration.md",
|
||||
"hooks": ["PostToolUse"]
|
||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/project-hygiene/README.md",
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||
"hooks": ["./hooks/hooks.json"],
|
||||
"category": "productivity",
|
||||
"tags": ["cleanup", "automation", "hygiene"],
|
||||
"license": "MIT"
|
||||
},
|
||||
{
|
||||
"name": "cmdb-assistant",
|
||||
@@ -77,10 +82,12 @@
|
||||
"name": "Leo Miranda",
|
||||
"email": "leobmiranda@gmail.com"
|
||||
},
|
||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace/src/branch/main/plugins/cmdb-assistant/README.md",
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace.git",
|
||||
"mcpServers": ["netbox"],
|
||||
"integrationFile": "claude-md-integration.md"
|
||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/cmdb-assistant/README.md",
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||
"mcpServers": ["./.mcp.json"],
|
||||
"category": "infrastructure",
|
||||
"tags": ["cmdb", "netbox", "dcim", "ipam"],
|
||||
"license": "MIT"
|
||||
},
|
||||
{
|
||||
"name": "claude-config-maintainer",
|
||||
@@ -91,21 +98,73 @@
|
||||
"name": "Leo Miranda",
|
||||
"email": "leobmiranda@gmail.com"
|
||||
},
|
||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace/src/branch/main/plugins/claude-config-maintainer/README.md",
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace.git",
|
||||
"mcpServers": [],
|
||||
"integrationFile": "claude-md-integration.md"
|
||||
}
|
||||
],
|
||||
"pluginDetection": {
|
||||
"mcpServerMapping": {
|
||||
"gitea": "projman",
|
||||
"netbox": "cmdb-assistant"
|
||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/claude-config-maintainer/README.md",
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||
"category": "development",
|
||||
"tags": ["claude-md", "configuration", "optimization"],
|
||||
"license": "MIT"
|
||||
},
|
||||
"hookMapping": {
|
||||
"PostToolUse:Write|Edit": "project-hygiene",
|
||||
"PostToolUse:Write|Edit|MultiEdit": "doc-guardian",
|
||||
"PreToolUse:Write|Edit|MultiEdit": "code-sentinel"
|
||||
{
|
||||
"name": "clarity-assist",
|
||||
"version": "1.0.0",
|
||||
"description": "Prompt optimization and requirement clarification with ND-friendly accommodations",
|
||||
"source": "./plugins/clarity-assist",
|
||||
"author": {
|
||||
"name": "Leo Miranda",
|
||||
"email": "leobmiranda@gmail.com"
|
||||
},
|
||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/clarity-assist/README.md",
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||
"category": "productivity",
|
||||
"tags": ["prompts", "requirements", "clarification", "nd-friendly"],
|
||||
"license": "MIT"
|
||||
},
|
||||
{
|
||||
"name": "git-flow",
|
||||
"version": "1.0.0",
|
||||
"description": "Git workflow automation with intelligent commit messages and branch management",
|
||||
"source": "./plugins/git-flow",
|
||||
"author": {
|
||||
"name": "Leo Miranda",
|
||||
"email": "leobmiranda@gmail.com"
|
||||
},
|
||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/git-flow/README.md",
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||
"category": "development",
|
||||
"tags": ["git", "workflow", "commits", "branching"],
|
||||
"license": "MIT"
|
||||
},
|
||||
{
|
||||
"name": "pr-review",
|
||||
"version": "1.0.0",
|
||||
"description": "Multi-agent pull request review with confidence scoring and actionable feedback",
|
||||
"source": "./plugins/pr-review",
|
||||
"author": {
|
||||
"name": "Leo Miranda",
|
||||
"email": "leobmiranda@gmail.com"
|
||||
},
|
||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/pr-review/README.md",
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||
"mcpServers": ["./.mcp.json"],
|
||||
"category": "development",
|
||||
"tags": ["code-review", "pull-requests", "security", "quality"],
|
||||
"license": "MIT"
|
||||
},
|
||||
{
|
||||
"name": "data-platform",
|
||||
"version": "1.0.0",
|
||||
"description": "Data engineering tools with pandas, PostgreSQL/PostGIS, and dbt integration",
|
||||
"source": "./plugins/data-platform",
|
||||
"author": {
|
||||
"name": "Leo Miranda",
|
||||
"email": "leobmiranda@gmail.com"
|
||||
},
|
||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/data-platform/README.md",
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||
"mcpServers": ["./.mcp.json"],
|
||||
"category": "data",
|
||||
"tags": ["pandas", "postgresql", "postgis", "dbt", "data-engineering", "etl"],
|
||||
"license": "MIT"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
211
CHANGELOG.md
211
CHANGELOG.md
@@ -1,9 +1,218 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to support-claude-mktplace will be documented in this file.
|
||||
All notable changes to the Leo Claude Marketplace will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
|
||||
|
||||
## [4.0.0] - 2026-01-25
|
||||
|
||||
### Added
|
||||
|
||||
#### New Plugin: data-platform v1.0.0
|
||||
- **pandas MCP Tools** (14 tools): DataFrame operations with Arrow IPC data_ref persistence
|
||||
- `read_csv`, `read_parquet`, `read_json` - Load data with chunking support
|
||||
- `to_csv`, `to_parquet` - Export to various formats
|
||||
- `describe`, `head`, `tail` - Data exploration
|
||||
- `filter`, `select`, `groupby`, `join` - Data transformation
|
||||
- `list_data`, `drop_data` - Memory management
|
||||
|
||||
- **PostgreSQL MCP Tools** (10 tools): Database operations with asyncpg connection pooling
|
||||
- `pg_connect`, `pg_query`, `pg_execute` - Core database operations
|
||||
- `pg_tables`, `pg_columns`, `pg_schemas` - Schema exploration
|
||||
- `st_tables`, `st_geometry_type`, `st_srid`, `st_extent` - PostGIS spatial support
|
||||
|
||||
- **dbt MCP Tools** (8 tools): Build tool wrapper with pre-execution validation
|
||||
- `dbt_parse` - Pre-flight validation (catches dbt 1.9+ deprecations)
|
||||
- `dbt_run`, `dbt_test`, `dbt_build` - Execution with auto-validation
|
||||
- `dbt_compile`, `dbt_ls`, `dbt_docs_generate`, `dbt_lineage` - Analysis tools
|
||||
|
||||
- **Commands**: `/ingest`, `/profile`, `/schema`, `/explain`, `/lineage`, `/run`
|
||||
- **Agents**: `data-ingestion` (loading/transformation), `data-analysis` (exploration/profiling)
|
||||
- **SessionStart Hook**: Graceful PostgreSQL connection check (non-blocking warning)
|
||||
|
||||
- **Key Features**:
|
||||
- data_ref system for DataFrame persistence across tool calls
|
||||
- 100k row limit with chunking support for large datasets
|
||||
- Hybrid configuration (system: `~/.config/claude/postgres.env`, project: `.env`)
|
||||
- Auto-detection of dbt projects
|
||||
- Arrow IPC format for efficient memory management
|
||||
|
||||
---
|
||||
|
||||
## [3.2.0] - 2026-01-24
|
||||
|
||||
### Added
|
||||
- **git-flow:** `/commit` now detects protected branches before committing
|
||||
- Warns when on protected branch (main, master, development, staging, production)
|
||||
- Offers to create feature branch automatically instead of committing directly
|
||||
- Configurable via `GIT_PROTECTED_BRANCHES` environment variable
|
||||
- **netbox:** Platform and primary_ip parameters added to device update tools
|
||||
- **claude-config-maintainer:** Auto-enforce mandatory behavior rules via SessionStart hook
|
||||
- **scripts:** `release.sh` - Versioning workflow script for consistent releases
|
||||
- **scripts:** `verify-hooks.sh` - Verify all hooks are command type
|
||||
|
||||
### Changed
|
||||
- **doc-guardian:** Hook switched from `prompt` type to `command` type
|
||||
- Prompt hooks unreliable - Claude ignores explicit instructions
|
||||
- New `notify.sh` bash script guarantees exact output behavior
|
||||
- Only notifies for config file changes (commands/, agents/, skills/, hooks/)
|
||||
- Silent exit for all other files - no blocking possible
|
||||
- **All hooks:** Converted to command type with stricter plugin prefix enforcement
|
||||
- All hooks now mandate `[plugin-name]` prefix with "NO EXCEPTIONS" rule
|
||||
- Simplified output formats with word limits
|
||||
- Consistent structure across projman, pr-review, code-sentinel, doc-guardian
|
||||
- **CLAUDE.md:** Replaced destructive "ALWAYS CLEAR CACHE" rule with "VERIFY AND RESTART"
|
||||
- Cache clearing mid-session breaks MCP tools
|
||||
- Added guidance for proper plugin development workflow
|
||||
|
||||
### Fixed
|
||||
- **cmdb-assistant:** Complete MCP tool schemas for update operations (#138)
|
||||
- **netbox:** Shorten tool names to meet 64-char API limit (#134)
|
||||
- **cmdb-assistant:** Correct NetBox API URL format in setup wizard (#132)
|
||||
- **gitea/projman:** Type safety for `create_label_smart`, curl-based debug-report (#124)
|
||||
- **netbox:** Add diagnostic logging for JSON parse errors (#121)
|
||||
- **labels:** Add duplicate check before creating labels (#116)
|
||||
- **hooks:** Convert ALL hooks to command type with proper prefixes (#114)
|
||||
- Protected branch workflow: Claude no longer commits directly to protected branches (fixes #109)
|
||||
- doc-guardian hook no longer blocks workflow (fixes #110)
|
||||
|
||||
---
|
||||
|
||||
## [3.1.1] - 2026-01-22
|
||||
|
||||
### Added
|
||||
- **git-flow:** `/commit-sync` now prunes stale remote-tracking branches with `git fetch --prune`
|
||||
- **git-flow:** `/commit-sync` detects and reports local branches with deleted upstreams
|
||||
- **git-flow:** `/branch-cleanup` now handles stale branches (upstream gone) separately from merged branches
|
||||
- **git-flow:** New `GIT_CLEANUP_STALE` environment variable for stale branch cleanup control
|
||||
|
||||
### Changed
|
||||
- **All hooks:** Added `[plugin-name]` prefix to all hook messages for better identification
|
||||
- `[projman]`, `[pr-review]`, `[code-sentinel]`, `[doc-guardian]` prefixes
|
||||
- **doc-guardian:** Hook now notification-only (no file reads or blocking operations)
|
||||
- Suggests running `/doc-sync` instead of performing inline checks
|
||||
- Significantly reduces workflow interruption
|
||||
|
||||
### Fixed
|
||||
- doc-guardian hook no longer stalls workflow with deep file analysis
|
||||
|
||||
---
|
||||
|
||||
## [3.1.0] - 2026-01-21
|
||||
|
||||
### Added
|
||||
|
||||
#### Debug Workflow Commands (projman)
|
||||
- **`/debug-report`** - Run diagnostics in test projects, create structured issues in marketplace
|
||||
- Runs 5 diagnostic MCP tool tests with explicit repo parameter
|
||||
- Captures full project context (git remote, cwd, branch)
|
||||
- Generates structured issue with hypothesis and investigation steps
|
||||
- Creates issue in configured marketplace repository automatically
|
||||
|
||||
- **`/debug-review`** - Investigate diagnostic issues with human approval gates
|
||||
- Lists open diagnostic issues for triage
|
||||
- Maps errors to relevant code files using error-to-file mapping
|
||||
- MANDATORY: Reads relevant files before proposing any fix
|
||||
- Three approval gates: investigation summary, fix approach, PR creation
|
||||
- Creates feature branch, commits, and PR with proper linking
|
||||
|
||||
#### MCP Server Improvements
|
||||
- Dynamic label format detection in `suggest_labels`
|
||||
- Supports slash format (`Type/Bug`) and colon-space format (`Type: Bug`)
|
||||
- Fetches actual labels from repo and matches suggestions to real format
|
||||
- Handles Effort/Efforts singular/plural normalization
|
||||
|
||||
### Changed
|
||||
- **`/labels-sync`** completely rewritten with explicit execution steps
|
||||
- Step 1 now explicitly requires running `git remote get-url origin` via Bash
|
||||
- All MCP tool calls show required `repo` parameter
|
||||
- Added "DO NOT" section preventing common mistakes
|
||||
- Removed confusing "Label Reference" section that caused file creation prompts
|
||||
|
||||
### Fixed
|
||||
- MCP tools no longer fail with "Use 'owner/repo' format" error
|
||||
- Root cause: MCP server is sandboxed and cannot auto-detect project directory
|
||||
- Solution: Command documentation now instructs Claude to detect repo via Bash first
|
||||
|
||||
---
|
||||
|
||||
## [3.0.1] - 2026-01-21
|
||||
|
||||
### Added
|
||||
- `/project-init` command for quick project setup when system is already configured
|
||||
- `/project-sync` command to sync .env with git remote after repository move/rename
|
||||
- SessionStart hooks for automatic mismatch detection between git remote and .env
|
||||
- Interactive setup wizard (`/initial-setup`) redesigned to use Claude tools instead of bash script
|
||||
|
||||
### Changed
|
||||
- `GITEA_ORG` moved from system-level to project-level configuration (different projects may belong to different organizations)
|
||||
- Environment variables renamed to match MCP server expectations:
|
||||
- `GITEA_URL` → `GITEA_API_URL` (must include `/api/v1`)
|
||||
- `GITEA_TOKEN` → `GITEA_API_TOKEN`
|
||||
- `NETBOX_URL` → `NETBOX_API_URL` (must include `/api`)
|
||||
- `NETBOX_TOKEN` → `NETBOX_API_TOKEN`
|
||||
- Setup commands now validate repository via Gitea API before saving configuration
|
||||
- README.md simplified to show only wizard setup path (manual setup moved to CONFIGURATION.md)
|
||||
|
||||
### Fixed
|
||||
- API URL paths in curl commands (removed redundant `/api/v1` since it's now in the URL variable)
|
||||
- Documentation now correctly references environment variable names
|
||||
|
||||
---
|
||||
|
||||
## [3.0.0] - 2026-01-20
|
||||
|
||||
### Added
|
||||
|
||||
#### New Plugins
|
||||
- **clarity-assist** v1.0.0 - Prompt optimization with ND accommodations
|
||||
- `/clarify` command for full 4-D methodology optimization
|
||||
- `/quick-clarify` command for rapid single-pass clarification
|
||||
- clarity-coach agent with ND-friendly questioning patterns
|
||||
- prompt-patterns skill with optimization rules
|
||||
|
||||
- **git-flow** v1.0.0 - Git workflow automation
|
||||
- `/commit` command with smart conventional commit messages
|
||||
- `/commit-push`, `/commit-merge`, `/commit-sync` workflow commands
|
||||
- `/branch-start`, `/branch-cleanup` branch management commands
|
||||
- `/git-status` enhanced status with recommendations
|
||||
- `/git-config` interactive configuration
|
||||
- git-assistant agent for complex operations
|
||||
- workflow-patterns skill with branching strategies
|
||||
|
||||
- **pr-review** v1.0.0 - Multi-agent pull request review
|
||||
- `/pr-review` command for comprehensive multi-agent review
|
||||
- `/pr-summary` command for quick PR overview
|
||||
- `/pr-findings` command for filtering review findings
|
||||
- coordinator agent for orchestrating reviews
|
||||
- security-reviewer, performance-analyst, maintainability-auditor, test-validator agents
|
||||
- review-patterns skill with confidence scoring rules
|
||||
|
||||
#### Gitea MCP Server Enhancements
|
||||
- 6 new Pull Request tools:
|
||||
- `list_pull_requests` - List PRs with filters
|
||||
- `get_pull_request` - Get PR details
|
||||
- `get_pr_diff` - Get PR diff
|
||||
- `get_pr_comments` - Get PR comments
|
||||
- `create_pr_review` - Create review (approve, request changes, comment)
|
||||
- `add_pr_comment` - Add comment to PR
|
||||
|
||||
#### Documentation
|
||||
- `docs/CONFIGURATION.md` - Centralized configuration guide for all plugins
|
||||
|
||||
### Changed
|
||||
- **BREAKING:** Marketplace renamed from `claude-code-marketplace` to `leo-claude-mktplace`
|
||||
- **BREAKING:** MCP servers moved from plugin directories to shared `mcp-servers/` at repository root
|
||||
- All plugins now have `category`, `tags`, and `license` fields in marketplace.json
|
||||
- Plugin MCP dependencies now use symlinks to shared servers
|
||||
- projman version bumped to 3.0.0 (includes PR tools integration)
|
||||
- projman CONFIGURATION.md slimmed down, links to central docs
|
||||
|
||||
### Removed
|
||||
- Standalone MCP server directories inside plugins (replaced with symlinks)
|
||||
|
||||
---
|
||||
|
||||
## [2.3.0] - 2026-01-20
|
||||
|
||||
### Added
|
||||
|
||||
256
CLAUDE.md
256
CLAUDE.md
@@ -1,18 +1,66 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code when working with code in this repository.
|
||||
## ⛔ MANDATORY BEHAVIOR RULES - READ FIRST
|
||||
|
||||
**These rules are NON-NEGOTIABLE. Violating them wastes the user's time and money.**
|
||||
|
||||
### 1. WHEN USER ASKS YOU TO CHECK SOMETHING - CHECK EVERYTHING
|
||||
- Search ALL locations, not just where you think it is
|
||||
- Check cache directories: `~/.claude/plugins/cache/`
|
||||
- Check installed: `~/.claude/plugins/marketplaces/`
|
||||
- Check source: `~/claude-plugins-work/`
|
||||
- **NEVER say "no" or "that's not the issue" without exhaustive verification**
|
||||
|
||||
### 2. WHEN USER SAYS SOMETHING IS WRONG - BELIEVE THEM
|
||||
- The user knows their system better than you
|
||||
- Investigate thoroughly before disagreeing
|
||||
- If user suspects cache, CHECK THE CACHE
|
||||
- If user suspects a file, READ THE FILE
|
||||
- **Your confidence is often wrong. User's instincts are often right.**
|
||||
|
||||
### 3. NEVER SAY "DONE" WITHOUT VERIFICATION
|
||||
- Run the actual command/script to verify
|
||||
- Show the output to the user
|
||||
- Check ALL affected locations
|
||||
- **"Done" means VERIFIED WORKING, not "I made changes"**
|
||||
|
||||
### 4. SHOW EXACTLY WHAT USER ASKS FOR
|
||||
- If user asks for messages, show the MESSAGES
|
||||
- If user asks for code, show the CODE
|
||||
- If user asks for output, show the OUTPUT
|
||||
- **Don't interpret or summarize unless asked**
|
||||
|
||||
### 5. AFTER PLUGIN UPDATES - VERIFY AND RESTART
|
||||
|
||||
**⚠️ DO NOT clear cache mid-session** - this breaks MCP tools that are already loaded.
|
||||
|
||||
1. Run `./scripts/verify-hooks.sh` to check hook types
|
||||
2. If changes affect MCP servers or hooks, inform the user:
|
||||
> "Plugin changes require a session restart to take effect. Please restart Claude Code."
|
||||
3. Cache clearing is ONLY safe **before** starting a new session (not during)
|
||||
|
||||
See `docs/DEBUGGING-CHECKLIST.md` for details on cache timing.
|
||||
|
||||
**FAILURE TO FOLLOW THESE RULES = WASTED USER TIME = UNACCEPTABLE**
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Project Overview
|
||||
|
||||
**Repository:** support-claude-mktplace
|
||||
**Version:** 2.3.0
|
||||
**Repository:** leo-claude-mktplace
|
||||
**Version:** 3.1.2
|
||||
**Status:** Production Ready
|
||||
|
||||
A Claude Code plugin marketplace containing:
|
||||
A plugin marketplace for Claude Code containing:
|
||||
|
||||
| Plugin | Description | Version |
|
||||
|--------|-------------|---------|
|
||||
| `projman` | Sprint planning and project management with Gitea integration | 2.3.0 |
|
||||
| `projman` | Sprint planning and project management with Gitea integration | 3.1.0 |
|
||||
| `git-flow` | Git workflow automation with smart commits and branch management | 1.0.0 |
|
||||
| `pr-review` | Multi-agent PR review with confidence scoring | 1.0.0 |
|
||||
| `clarity-assist` | Prompt optimization with ND-friendly accommodations | 1.0.0 |
|
||||
| `doc-guardian` | Automatic documentation drift detection and synchronization | 1.0.0 |
|
||||
| `code-sentinel` | Security scanning and code refactoring tools | 1.0.0 |
|
||||
| `claude-config-maintainer` | CLAUDE.md optimization and maintenance | 1.0.0 |
|
||||
@@ -25,54 +73,69 @@ A Claude Code plugin marketplace containing:
|
||||
# Validate marketplace compliance
|
||||
./scripts/validate-marketplace.sh
|
||||
|
||||
# Run projman commands (in a target project with plugin installed)
|
||||
/sprint-plan # Start sprint planning
|
||||
/sprint-status # Check progress
|
||||
/review # Pre-close code quality review
|
||||
/test-check # Verify tests before close
|
||||
/sprint-close # Complete sprint
|
||||
# After updates
|
||||
./scripts/post-update.sh # Rebuild venvs, verify symlinks
|
||||
```
|
||||
|
||||
### Plugin Commands by Category
|
||||
|
||||
| Category | Commands |
|
||||
|----------|----------|
|
||||
| **Setup** | `/initial-setup`, `/project-init`, `/project-sync` |
|
||||
| **Sprint** | `/sprint-plan`, `/sprint-start`, `/sprint-status`, `/sprint-close` |
|
||||
| **Quality** | `/review`, `/test-check`, `/test-gen` |
|
||||
| **PR Review** | `/pr-review:initial-setup`, `/pr-review:project-init` |
|
||||
| **Docs** | `/doc-audit`, `/doc-sync` |
|
||||
| **Security** | `/security-scan`, `/refactor`, `/refactor-dry` |
|
||||
| **Config** | `/config-analyze`, `/config-optimize` |
|
||||
| **Debug** | `/debug-report`, `/debug-review` |
|
||||
|
||||
## Repository Structure
|
||||
|
||||
```
|
||||
support-claude-mktplace/
|
||||
leo-claude-mktplace/
|
||||
├── .claude-plugin/
|
||||
│ └── marketplace.json # Marketplace manifest
|
||||
├── mcp-servers/ # SHARED MCP servers (v3.0.0+)
|
||||
│ ├── gitea/ # Gitea MCP (issues, PRs, wiki)
|
||||
│ └── netbox/ # NetBox MCP (CMDB)
|
||||
├── plugins/
|
||||
│ ├── projman/ # Sprint management
|
||||
│ │ ├── .claude-plugin/plugin.json
|
||||
│ │ ├── .mcp.json
|
||||
│ │ ├── mcp-servers/gitea/ # Bundled MCP server
|
||||
│ │ ├── commands/ # 9 commands
|
||||
│ │ │ ├── sprint-plan.md, sprint-start.md, sprint-status.md
|
||||
│ │ │ ├── sprint-close.md, labels-sync.md, initial-setup.md
|
||||
│ │ │ └── review.md, test-check.md, test-gen.md
|
||||
│ │ ├── mcp-servers/gitea -> ../../../mcp-servers/gitea # SYMLINK
|
||||
│ │ ├── commands/ # 13 commands (incl. setup, debug)
|
||||
│ │ ├── hooks/ # SessionStart mismatch detection
|
||||
│ │ ├── agents/ # 4 agents
|
||||
│ │ │ ├── planner.md, orchestrator.md, executor.md
|
||||
│ │ │ └── code-reviewer.md
|
||||
│ │ └── skills/label-taxonomy/
|
||||
│ ├── git-flow/ # Git workflow automation
|
||||
│ │ ├── .claude-plugin/plugin.json
|
||||
│ │ ├── commands/ # 8 commands
|
||||
│ │ └── agents/
|
||||
│ ├── pr-review/ # Multi-agent PR review
|
||||
│ │ ├── .claude-plugin/plugin.json
|
||||
│ │ ├── .mcp.json
|
||||
│ │ ├── mcp-servers/gitea -> ../../../mcp-servers/gitea # SYMLINK
|
||||
│ │ ├── commands/ # 6 commands (incl. setup)
|
||||
│ │ ├── hooks/ # SessionStart mismatch detection
|
||||
│ │ └── agents/ # 5 agents
|
||||
│ ├── clarity-assist/ # Prompt optimization (NEW v3.0.0)
|
||||
│ │ ├── .claude-plugin/plugin.json
|
||||
│ │ ├── commands/ # 2 commands
|
||||
│ │ └── agents/
|
||||
│ ├── doc-guardian/ # Documentation drift detection
|
||||
│ │ ├── .claude-plugin/plugin.json
|
||||
│ │ ├── hooks/hooks.json # PostToolUse, Stop hooks
|
||||
│ │ ├── commands/ # doc-audit.md, doc-sync.md
|
||||
│ │ ├── agents/ # doc-analyzer.md
|
||||
│ │ └── skills/doc-patterns/
|
||||
│ ├── code-sentinel/ # Security scanning & refactoring
|
||||
│ │ ├── .claude-plugin/plugin.json
|
||||
│ │ ├── hooks/hooks.json # PreToolUse hook
|
||||
│ │ ├── commands/ # security-scan.md, refactor.md, refactor-dry.md
|
||||
│ │ ├── agents/ # security-reviewer.md, refactor-advisor.md
|
||||
│ │ └── skills/security-patterns/
|
||||
│ ├── claude-config-maintainer/
|
||||
│ ├── cmdb-assistant/
|
||||
│ └── project-hygiene/
|
||||
├── scripts/
|
||||
│ ├── setup.sh, post-update.sh
|
||||
│ └── validate-marketplace.sh # Marketplace compliance validation
|
||||
│ ├── validate-marketplace.sh # Marketplace compliance validation
|
||||
│ ├── verify-hooks.sh # Verify all hooks are command type
|
||||
│ └── check-venv.sh # Check MCP server venvs exist
|
||||
└── docs/
|
||||
├── CANONICAL-PATHS.md # Single source of truth for paths
|
||||
└── references/
|
||||
└── CONFIGURATION.md # Centralized configuration guide
|
||||
```
|
||||
|
||||
## CRITICAL: Rules You MUST Follow
|
||||
@@ -86,7 +149,8 @@ support-claude-mktplace/
|
||||
### Plugin Development
|
||||
- **plugin.json MUST be in `.claude-plugin/` directory** (not plugin root)
|
||||
- **Every plugin MUST be listed in marketplace.json**
|
||||
- **MCP servers MUST use venv python path**: `${CLAUDE_PLUGIN_ROOT}/mcp-servers/{name}/.venv/bin/python`
|
||||
- **MCP servers are SHARED at root** with symlinks from plugins
|
||||
- **MCP server venv path**: `${CLAUDE_PLUGIN_ROOT}/mcp-servers/{name}/.venv/bin/python`
|
||||
- **CLI tools forbidden** - Use MCP tools exclusively (never `tea`, `gh`, etc.)
|
||||
|
||||
### Hooks (Valid Events Only)
|
||||
@@ -98,11 +162,11 @@ support-claude-mktplace/
|
||||
`CLAUDE.md`, `README.md`, `LICENSE`, `CHANGELOG.md`, `.gitignore`, `.env.example`
|
||||
|
||||
### Allowed Root Directories
|
||||
`.claude/`, `.claude-plugin/`, `.claude-plugins/`, `.scratch/`, `docs/`, `hooks/`, `plugins/`, `scripts/`
|
||||
`.claude/`, `.claude-plugin/`, `.claude-plugins/`, `.scratch/`, `docs/`, `hooks/`, `mcp-servers/`, `plugins/`, `scripts/`
|
||||
|
||||
## Architecture
|
||||
|
||||
### Four-Agent Model
|
||||
### Four-Agent Model (projman)
|
||||
|
||||
| Agent | Personality | Responsibilities |
|
||||
|-------|-------------|------------------|
|
||||
@@ -115,19 +179,22 @@ support-claude-mktplace/
|
||||
|
||||
| Category | Tools |
|
||||
|----------|-------|
|
||||
| Issues | `list_issues`, `get_issue`, `create_issue`, `update_issue`, `add_comment` |
|
||||
| Labels | `get_labels`, `suggest_labels`, `create_label` |
|
||||
| Milestones | `list_milestones`, `get_milestone`, `create_milestone`, `update_milestone` |
|
||||
| Dependencies | `list_issue_dependencies`, `create_issue_dependency`, `get_execution_order` |
|
||||
| Wiki | `list_wiki_pages`, `get_wiki_page`, `create_wiki_page`, `create_lesson`, `search_lessons` |
|
||||
| Issues | `list_issues`, `get_issue`, `create_issue`, `update_issue`, `add_comment`, `aggregate_issues` |
|
||||
| Labels | `get_labels`, `suggest_labels`, `create_label`, `create_label_smart` |
|
||||
| Milestones | `list_milestones`, `get_milestone`, `create_milestone`, `update_milestone`, `delete_milestone` |
|
||||
| Dependencies | `list_issue_dependencies`, `create_issue_dependency`, `remove_issue_dependency`, `get_execution_order` |
|
||||
| Wiki | `list_wiki_pages`, `get_wiki_page`, `create_wiki_page`, `update_wiki_page`, `create_lesson`, `search_lessons` |
|
||||
| **Pull Requests** | `list_pull_requests`, `get_pull_request`, `get_pr_diff`, `get_pr_comments`, `create_pr_review`, `add_pr_comment` |
|
||||
| Validation | `validate_repo_org`, `get_branch_protection` |
|
||||
|
||||
### Hybrid Configuration
|
||||
|
||||
| Level | Location | Purpose |
|
||||
|-------|----------|---------|
|
||||
| System | `~/.config/claude/gitea.env` | Credentials (GITEA_URL, GITEA_TOKEN, GITEA_ORG) |
|
||||
| Project | `.env` in project root | Repository specification (GITEA_REPO) |
|
||||
| System | `~/.config/claude/gitea.env` | Credentials (GITEA_API_URL, GITEA_API_TOKEN) |
|
||||
| Project | `.env` in project root | Repository specification (GITEA_ORG, GITEA_REPO) |
|
||||
|
||||
**Note:** `GITEA_ORG` is at project level since different projects may belong to different organizations.
|
||||
|
||||
### Branch-Aware Security
|
||||
|
||||
@@ -160,10 +227,11 @@ Stored in Gitea Wiki under `lessons-learned/sprints/`.
|
||||
### Adding a New Plugin
|
||||
|
||||
1. Create `plugins/{name}/.claude-plugin/plugin.json`
|
||||
2. Add entry to `.claude-plugin/marketplace.json`
|
||||
2. Add entry to `.claude-plugin/marketplace.json` with category, tags, license
|
||||
3. Create `README.md` and `claude-md-integration.md`
|
||||
4. Run `./scripts/validate-marketplace.sh`
|
||||
5. Update `CHANGELOG.md`
|
||||
4. If using MCP server, create symlink: `ln -s ../../../mcp-servers/{server} plugins/{name}/mcp-servers/{server}`
|
||||
5. Run `./scripts/validate-marketplace.sh`
|
||||
6. Update `CHANGELOG.md`
|
||||
|
||||
### Adding a Command to projman
|
||||
|
||||
@@ -191,40 +259,90 @@ Stored in Gitea Wiki under `lessons-learned/sprints/`.
|
||||
| Document | Purpose |
|
||||
|----------|---------|
|
||||
| `docs/CANONICAL-PATHS.md` | **Single source of truth** for paths |
|
||||
| `docs/COMMANDS-CHEATSHEET.md` | All commands quick reference |
|
||||
| `docs/CONFIGURATION.md` | Centralized setup guide |
|
||||
| `docs/DEBUGGING-CHECKLIST.md` | Systematic troubleshooting guide |
|
||||
| `docs/UPDATING.md` | Update guide for the marketplace |
|
||||
| `plugins/projman/CONFIGURATION.md` | Projman setup guide |
|
||||
| `plugins/projman/CONFIGURATION.md` | Projman quick reference (links to central) |
|
||||
| `plugins/projman/README.md` | Projman full documentation |
|
||||
|
||||
## Versioning and Changelog Rules
|
||||
## Installation Paths
|
||||
|
||||
### Version Display
|
||||
**The marketplace version is displayed ONLY in the main `README.md` title.**
|
||||
Understanding where files live is critical for debugging:
|
||||
|
||||
- Format: `# Claude Code Marketplace - vX.Y.Z`
|
||||
- Do NOT add version numbers to individual plugin documentation titles
|
||||
- Do NOT add version numbers to configuration guides
|
||||
- Do NOT add version numbers to CLAUDE.md or other docs
|
||||
| Context | Path | Purpose |
|
||||
|---------|------|---------|
|
||||
| **Source** | `~/claude-plugins-work/` | Development - edit here |
|
||||
| **Installed** | `~/.claude/plugins/marketplaces/leo-claude-mktplace/` | Runtime - Claude uses this |
|
||||
| **Cache** | `~/.claude/` | Plugin metadata and settings |
|
||||
|
||||
### Changelog Maintenance (MANDATORY)
|
||||
**`CHANGELOG.md` is the authoritative source for version history.**
|
||||
**Key insight:** Edits to source require reinstall/update to take effect at runtime.
|
||||
|
||||
When releasing a new version:
|
||||
1. Update main `README.md` title with new version
|
||||
2. Update `CHANGELOG.md` with:
|
||||
- Version number and date: `## [X.Y.Z] - YYYY-MM-DD`
|
||||
- **Added**: New features, commands, files
|
||||
- **Changed**: Modifications to existing functionality
|
||||
- **Fixed**: Bug fixes
|
||||
- **Removed**: Deleted features, files, deprecated items
|
||||
3. Update `marketplace.json` metadata version
|
||||
4. Update plugin `plugin.json` versions if plugin-specific changes
|
||||
## Debugging & Troubleshooting
|
||||
|
||||
### Version Format
|
||||
- Follow [Semantic Versioning](https://semver.org/): MAJOR.MINOR.PATCH
|
||||
- MAJOR: Breaking changes
|
||||
- MINOR: New features, backward compatible
|
||||
- PATCH: Bug fixes, minor improvements
|
||||
See `docs/DEBUGGING-CHECKLIST.md` for systematic troubleshooting.
|
||||
|
||||
**Common Issues:**
|
||||
| Symptom | Likely Cause | Fix |
|
||||
|---------|--------------|-----|
|
||||
| "X MCP servers failed" | Missing venv in installed path | `cd ~/.claude/plugins/marketplaces/leo-claude-mktplace && ./scripts/setup.sh` |
|
||||
| MCP tools not available | Symlink broken or venv missing | Run `/debug-report` to diagnose |
|
||||
| Changes not taking effect | Editing source, not installed | Reinstall plugin or edit installed path |
|
||||
|
||||
**Debug Commands:**
|
||||
- `/debug-report` - Run full diagnostics, create issue if needed
|
||||
- `/debug-review` - Investigate and propose fixes
|
||||
|
||||
## Versioning Workflow
|
||||
|
||||
This project follows [SemVer](https://semver.org/) and [Keep a Changelog](https://keepachangelog.com).
|
||||
|
||||
### Version Locations (must stay in sync)
|
||||
|
||||
| Location | Format | Example |
|
||||
|----------|--------|---------|
|
||||
| Git tags | `vX.Y.Z` | `v3.2.0` |
|
||||
| README.md title | `# Leo Claude Marketplace - vX.Y.Z` | `v3.2.0` |
|
||||
| marketplace.json | `"version": "X.Y.Z"` | `3.2.0` |
|
||||
| CHANGELOG.md | `## [X.Y.Z] - YYYY-MM-DD` | `[3.2.0] - 2026-01-24` |
|
||||
|
||||
### During Development
|
||||
|
||||
**All changes go under `[Unreleased]` in CHANGELOG.md.** Never create a versioned section until release time.
|
||||
|
||||
```markdown
|
||||
## [Unreleased]
|
||||
|
||||
### Added
|
||||
- New feature description
|
||||
|
||||
### Fixed
|
||||
- Bug fix description
|
||||
```
|
||||
|
||||
### Creating a Release
|
||||
|
||||
Use the release script to ensure consistency:
|
||||
|
||||
```bash
|
||||
./scripts/release.sh 3.2.0
|
||||
```
|
||||
|
||||
The script will:
|
||||
1. Validate `[Unreleased]` section has content
|
||||
2. Replace `[Unreleased]` with `[3.2.0] - YYYY-MM-DD`
|
||||
3. Update README.md title
|
||||
4. Update marketplace.json version
|
||||
5. Commit and create git tag
|
||||
|
||||
### SemVer Guidelines
|
||||
|
||||
| Change Type | Version Bump | Example |
|
||||
|-------------|--------------|---------|
|
||||
| Bug fixes only | PATCH (x.y.**Z**) | 3.1.1 → 3.1.2 |
|
||||
| New features (backwards compatible) | MINOR (x.**Y**.0) | 3.1.2 → 3.2.0 |
|
||||
| Breaking changes | MAJOR (**X**.0.0) | 3.2.0 → 4.0.0 |
|
||||
|
||||
---
|
||||
|
||||
**Last Updated:** 2026-01-20
|
||||
**Last Updated:** 2026-01-24
|
||||
|
||||
324
README.md
324
README.md
@@ -1,15 +1,17 @@
|
||||
# Claude Code Marketplace - v2.3.0
|
||||
# Leo Claude Marketplace - v4.0.0
|
||||
|
||||
A collection of Claude Code plugins for project management, infrastructure automation, and development workflows.
|
||||
|
||||
## Plugins
|
||||
|
||||
### [projman](./plugins/projman/README.md)
|
||||
### Development & Project Management
|
||||
|
||||
#### [projman](./plugins/projman/README.md)
|
||||
**Sprint Planning and Project Management**
|
||||
|
||||
AI-guided sprint planning with full Gitea integration. Transforms a proven 15-sprint workflow into a distributable plugin.
|
||||
|
||||
- Three-agent model: Planner, Orchestrator, Executor, Code Reviewer
|
||||
- Four-agent model: Planner, Orchestrator, Executor, Code Reviewer
|
||||
- Intelligent label suggestions from 43-label taxonomy
|
||||
- Lessons learned capture via Gitea Wiki
|
||||
- Native issue dependencies with parallel execution
|
||||
@@ -17,82 +19,117 @@ AI-guided sprint planning with full Gitea integration. Transforms a proven 15-sp
|
||||
- Branch-aware security (development/staging/production)
|
||||
- Pre-sprint-close code quality review and test verification
|
||||
|
||||
**Commands:** `/sprint-plan`, `/sprint-start`, `/sprint-status`, `/sprint-close`, `/labels-sync`, `/initial-setup`, `/review`, `/test-check`, `/test-gen`
|
||||
**Commands:** `/sprint-plan`, `/sprint-start`, `/sprint-status`, `/sprint-close`, `/labels-sync`, `/initial-setup`, `/project-init`, `/project-sync`, `/review`, `/test-check`, `/test-gen`, `/debug-report`, `/debug-review`
|
||||
|
||||
### [claude-config-maintainer](./plugins/claude-config-maintainer/README.md)
|
||||
#### [git-flow](./plugins/git-flow/README.md) *NEW in v3.0.0*
|
||||
**Git Workflow Automation**
|
||||
|
||||
Smart git operations with intelligent commit messages and branch management.
|
||||
|
||||
- Auto-generated conventional commit messages
|
||||
- Multiple workflow styles (simple, feature-branch, pr-required, trunk-based)
|
||||
- Branch naming enforcement
|
||||
- Merge and cleanup automation
|
||||
- Protected branch awareness
|
||||
|
||||
**Commands:** `/commit`, `/commit-push`, `/commit-merge`, `/commit-sync`, `/branch-start`, `/branch-cleanup`, `/git-status`, `/git-config`
|
||||
|
||||
#### [pr-review](./plugins/pr-review/README.md) *NEW in v3.0.0*
|
||||
**Multi-Agent PR Review**
|
||||
|
||||
Comprehensive pull request review using specialized agents.
|
||||
|
||||
- Multi-agent review: Security, Performance, Maintainability, Tests
|
||||
- Confidence scoring (only reports HIGH/MEDIUM confidence findings)
|
||||
- Actionable feedback with suggested fixes
|
||||
- Gitea integration for automated review submission
|
||||
|
||||
**Commands:** `/pr-review`, `/pr-summary`, `/pr-findings`, `/initial-setup`, `/project-init`, `/project-sync`
|
||||
|
||||
#### [claude-config-maintainer](./plugins/claude-config-maintainer/README.md)
|
||||
**CLAUDE.md Optimization and Maintenance**
|
||||
|
||||
Analyze, optimize, and create CLAUDE.md configuration files for Claude Code projects.
|
||||
|
||||
- Structure and clarity scoring (100-point system)
|
||||
- Automatic optimization with preview and backup
|
||||
- Project-aware initialization with stack detection
|
||||
- Best practices enforcement
|
||||
|
||||
**Commands:** `/config-analyze`, `/config-optimize`, `/config-init`
|
||||
|
||||
### [cmdb-assistant](./plugins/cmdb-assistant/README.md)
|
||||
**NetBox CMDB Integration**
|
||||
### Productivity
|
||||
|
||||
Full CRUD operations for network infrastructure management directly from Claude Code.
|
||||
#### [clarity-assist](./plugins/clarity-assist/README.md) *NEW in v3.0.0*
|
||||
**Prompt Optimization with ND Accommodations**
|
||||
|
||||
- Device, IP, site, and rack management
|
||||
- Smart search across all NetBox modules
|
||||
- Conversational infrastructure queries
|
||||
- Audit trail and change tracking
|
||||
Transform vague requests into clear specifications using structured methodology.
|
||||
|
||||
**Commands:** `/cmdb-search`, `/cmdb-device`, `/cmdb-ip`, `/cmdb-site`
|
||||
- 4-D methodology: Deconstruct, Diagnose, Develop, Deliver
|
||||
- ND-friendly question patterns (option-based, chunked)
|
||||
- Conflict detection and escalation protocols
|
||||
|
||||
### [project-hygiene](./plugins/project-hygiene/README.md)
|
||||
**Commands:** `/clarify`, `/quick-clarify`
|
||||
|
||||
#### [doc-guardian](./plugins/doc-guardian/README.md)
|
||||
**Documentation Lifecycle Management**
|
||||
|
||||
Automatic documentation drift detection and synchronization.
|
||||
|
||||
**Commands:** `/doc-audit`, `/doc-sync`
|
||||
|
||||
#### [project-hygiene](./plugins/project-hygiene/README.md)
|
||||
**Post-Task Cleanup Automation**
|
||||
|
||||
Hook-based cleanup that runs after Claude completes work.
|
||||
|
||||
- Deletes temp files (`*.tmp`, `*.bak`, `__pycache__`, etc.)
|
||||
- Warns about unexpected files in project root
|
||||
- Identifies orphaned supporting files
|
||||
- Configurable via `.hygiene.json`
|
||||
### Security
|
||||
|
||||
### [doc-guardian](./plugins/doc-guardian/README.md)
|
||||
**Documentation Lifecycle Management**
|
||||
|
||||
Automatic documentation drift detection and synchronization. Eliminates manual doc update cycles.
|
||||
|
||||
- PostToolUse hook detects when code changes affect documentation
|
||||
- Stop hook reminds of pending updates before session ends
|
||||
- Batched updates in single commit
|
||||
|
||||
**Commands:** `/doc-audit`, `/doc-sync`
|
||||
|
||||
### [code-sentinel](./plugins/code-sentinel/README.md)
|
||||
#### [code-sentinel](./plugins/code-sentinel/README.md)
|
||||
**Security Scanning & Refactoring**
|
||||
|
||||
Security vulnerability detection and code refactoring tools.
|
||||
|
||||
- PreToolUse hook catches security issues before code is written
|
||||
- Pattern library: SQL injection, XSS, command injection, hardcoded secrets
|
||||
- Refactoring patterns: extract method, simplify conditional, modernize syntax
|
||||
|
||||
**Commands:** `/security-scan`, `/refactor`, `/refactor-dry`
|
||||
|
||||
### Infrastructure
|
||||
|
||||
#### [cmdb-assistant](./plugins/cmdb-assistant/README.md)
|
||||
**NetBox CMDB Integration**
|
||||
|
||||
Full CRUD operations for network infrastructure management directly from Claude Code.
|
||||
|
||||
**Commands:** `/initial-setup`, `/cmdb-search`, `/cmdb-device`, `/cmdb-ip`, `/cmdb-site`
|
||||
|
||||
### Data Engineering
|
||||
|
||||
#### [data-platform](./plugins/data-platform/README.md) *NEW*
|
||||
**pandas, PostgreSQL/PostGIS, and dbt Integration**
|
||||
|
||||
Comprehensive data engineering toolkit with persistent DataFrame storage.
|
||||
|
||||
- 14 pandas tools with Arrow IPC data_ref system
|
||||
- 10 PostgreSQL/PostGIS tools with connection pooling
|
||||
- 8 dbt tools with automatic pre-validation
|
||||
- 100k row limit with chunking support
|
||||
- Auto-detection of dbt projects
|
||||
|
||||
**Commands:** `/ingest`, `/profile`, `/schema`, `/explain`, `/lineage`, `/run`
|
||||
|
||||
## MCP Servers
|
||||
|
||||
MCP servers are **bundled inside each plugin** that needs them. This ensures plugins work when cached by Claude Code.
|
||||
MCP servers are **shared at repository root** with **symlinks** from plugins that use them.
|
||||
|
||||
### Gitea MCP Server (bundled in projman)
|
||||
### Gitea MCP Server (shared)
|
||||
|
||||
Full Gitea API integration for project management.
|
||||
|
||||
| Category | Tools |
|
||||
|----------|-------|
|
||||
| Issues | `list_issues`, `get_issue`, `create_issue`, `update_issue`, `add_comment` |
|
||||
| Labels | `get_labels`, `suggest_labels`, `create_label` |
|
||||
| Wiki | `list_wiki_pages`, `get_wiki_page`, `create_wiki_page`, `create_lesson`, `search_lessons` |
|
||||
| Milestones | `list_milestones`, `get_milestone`, `create_milestone`, `update_milestone` |
|
||||
| Dependencies | `list_issue_dependencies`, `create_issue_dependency`, `get_execution_order` |
|
||||
| Issues | `list_issues`, `get_issue`, `create_issue`, `update_issue`, `add_comment`, `aggregate_issues` |
|
||||
| Labels | `get_labels`, `suggest_labels`, `create_label`, `create_label_smart` |
|
||||
| Wiki | `list_wiki_pages`, `get_wiki_page`, `create_wiki_page`, `update_wiki_page`, `create_lesson`, `search_lessons` |
|
||||
| Milestones | `list_milestones`, `get_milestone`, `create_milestone`, `update_milestone`, `delete_milestone` |
|
||||
| Dependencies | `list_issue_dependencies`, `create_issue_dependency`, `remove_issue_dependency`, `get_execution_order` |
|
||||
| **Pull Requests** | `list_pull_requests`, `get_pull_request`, `get_pr_diff`, `get_pr_comments`, `create_pr_review`, `add_pr_comment` *(NEW in v3.0.0)* |
|
||||
| Validation | `validate_repo_org`, `get_branch_protection` |
|
||||
|
||||
### NetBox MCP Server (bundled in cmdb-assistant)
|
||||
### NetBox MCP Server (shared)
|
||||
|
||||
Comprehensive NetBox REST API integration for infrastructure management.
|
||||
|
||||
@@ -104,6 +141,17 @@ Comprehensive NetBox REST API integration for infrastructure management.
|
||||
| Virtualization | Clusters, VMs, Interfaces |
|
||||
| Extras | Tags, Custom Fields, Audit Log |
|
||||
|
||||
### Data Platform MCP Server (shared) *NEW*
|
||||
|
||||
pandas, PostgreSQL/PostGIS, and dbt integration for data engineering.
|
||||
|
||||
| Category | Tools |
|
||||
|----------|-------|
|
||||
| pandas | `read_csv`, `read_parquet`, `read_json`, `to_csv`, `to_parquet`, `describe`, `head`, `tail`, `filter`, `select`, `groupby`, `join`, `list_data`, `drop_data` |
|
||||
| PostgreSQL | `pg_connect`, `pg_query`, `pg_execute`, `pg_tables`, `pg_columns`, `pg_schemas` |
|
||||
| PostGIS | `st_tables`, `st_geometry_type`, `st_srid`, `st_extent` |
|
||||
| dbt | `dbt_parse`, `dbt_run`, `dbt_test`, `dbt_build`, `dbt_compile`, `dbt_ls`, `dbt_docs_generate`, `dbt_lineage` |
|
||||
|
||||
## Installation
|
||||
|
||||
### Prerequisites
|
||||
@@ -116,7 +164,7 @@ Comprehensive NetBox REST API integration for infrastructure management.
|
||||
|
||||
**Option 1 - CLI command (recommended):**
|
||||
```bash
|
||||
/plugin marketplace add https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace.git
|
||||
/plugin marketplace add https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git
|
||||
```
|
||||
|
||||
**Option 2 - Settings file (for team distribution):**
|
||||
@@ -125,141 +173,122 @@ Add to `.claude/settings.json` in your target project:
|
||||
```json
|
||||
{
|
||||
"extraKnownMarketplaces": {
|
||||
"support-claude-mktplace": {
|
||||
"leo-claude-mktplace": {
|
||||
"source": {
|
||||
"source": "git",
|
||||
"url": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace.git"
|
||||
"url": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Option 3 - Local development:**
|
||||
```bash
|
||||
# Clone the repository first
|
||||
git clone https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace.git
|
||||
### Run Interactive Setup
|
||||
|
||||
# Then add from local path
|
||||
/plugin marketplace add /path/to/support-claude-mktplace
|
||||
After installing plugins, run the setup wizard:
|
||||
|
||||
```
|
||||
/initial-setup
|
||||
```
|
||||
|
||||
**Alternative SSH URL (for authenticated access):**
|
||||
The wizard handles everything:
|
||||
- Sets up MCP server (Python venv + dependencies)
|
||||
- Creates system config (`~/.config/claude/gitea.env`)
|
||||
- Guides you through adding your API token
|
||||
- Detects and validates your repository via API
|
||||
- Creates project config (`.env`)
|
||||
|
||||
**For new projects** (when system is already configured):
|
||||
```
|
||||
ssh://git@hotserv.tailc9b278.ts.net:2222/personal-projects/support-claude-mktplace.git
|
||||
/project-init
|
||||
```
|
||||
|
||||
### Configure MCP Server Dependencies
|
||||
|
||||
If using plugins with MCP servers (projman, cmdb-assistant), install dependencies:
|
||||
|
||||
```bash
|
||||
# Gitea MCP (for projman)
|
||||
cd plugins/projman/mcp-servers/gitea
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install -r requirements.txt
|
||||
deactivate
|
||||
|
||||
# NetBox MCP (for cmdb-assistant)
|
||||
cd ../../../cmdb-assistant/mcp-servers/netbox
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install -r requirements.txt
|
||||
deactivate
|
||||
**After moving a repository:**
|
||||
```
|
||||
/project-sync
|
||||
```
|
||||
|
||||
### Configure Credentials
|
||||
See [docs/CONFIGURATION.md](./docs/CONFIGURATION.md) for manual setup and advanced options.
|
||||
|
||||
**System-level credentials:**
|
||||
```bash
|
||||
mkdir -p ~/.config/claude
|
||||
## Verifying Plugin Installation
|
||||
|
||||
# Gitea credentials
|
||||
cat > ~/.config/claude/gitea.env << 'EOF'
|
||||
GITEA_URL=https://gitea.example.com
|
||||
GITEA_TOKEN=your_token
|
||||
GITEA_ORG=your_org
|
||||
EOF
|
||||
After installing plugins, the `/plugin` command may show `(no content)` - this is normal Claude Code behavior and doesn't indicate an error.
|
||||
|
||||
# NetBox credentials
|
||||
cat > ~/.config/claude/netbox.env << 'EOF'
|
||||
NETBOX_API_URL=https://netbox.example.com/api
|
||||
NETBOX_API_TOKEN=your_token
|
||||
EOF
|
||||
**To verify a plugin is installed correctly:**
|
||||
|
||||
chmod 600 ~/.config/claude/*.env
|
||||
```
|
||||
1. **Check installed plugins list:**
|
||||
```
|
||||
/plugin list
|
||||
```
|
||||
Look for `✔ plugin-name · Installed`
|
||||
|
||||
**Project-level settings:**
|
||||
```bash
|
||||
# In your target project root
|
||||
cat > .env << 'EOF'
|
||||
GITEA_REPO=your-repository-name
|
||||
EOF
|
||||
```
|
||||
2. **Test a plugin command directly:**
|
||||
```
|
||||
/git-flow:git-status
|
||||
/projman:sprint-status
|
||||
/clarity-assist:clarify
|
||||
```
|
||||
If the command executes and shows output, the plugin is working.
|
||||
|
||||
3. **Check for loading errors:**
|
||||
```
|
||||
/plugin list
|
||||
```
|
||||
Look for any `Plugin Loading Errors` section - this indicates manifest issues.
|
||||
|
||||
**Command format:** All plugin commands use the format `/plugin-name:command-name`
|
||||
|
||||
| Plugin | Test Command |
|
||||
|--------|--------------|
|
||||
| git-flow | `/git-flow:git-status` |
|
||||
| projman | `/projman:sprint-status` |
|
||||
| pr-review | `/pr-review:pr-summary` |
|
||||
| clarity-assist | `/clarity-assist:clarify` |
|
||||
| doc-guardian | `/doc-guardian:doc-audit` |
|
||||
| code-sentinel | `/code-sentinel:security-scan` |
|
||||
| claude-config-maintainer | `/claude-config-maintainer:config-analyze` |
|
||||
| cmdb-assistant | `/cmdb-assistant:cmdb-search` |
|
||||
| data-platform | `/data-platform:ingest` |
|
||||
|
||||
## Repository Structure
|
||||
|
||||
```
|
||||
support-claude-mktplace/
|
||||
leo-claude-mktplace/
|
||||
├── .claude-plugin/ # Marketplace manifest
|
||||
│ └── marketplace.json
|
||||
├── plugins/ # All plugins (with bundled MCP servers)
|
||||
│ ├── projman/ # Sprint management plugin
|
||||
│ │ ├── .claude-plugin/
|
||||
│ │ ├── .mcp.json
|
||||
│ │ ├── mcp-servers/ # Bundled MCP server
|
||||
│ │ │ └── gitea/
|
||||
│ │ ├── commands/
|
||||
│ │ ├── agents/
|
||||
│ │ └── skills/
|
||||
│ ├── claude-config-maintainer/ # CLAUDE.md optimization plugin
|
||||
│ │ ├── .claude-plugin/
|
||||
│ │ ├── commands/
|
||||
│ │ └── agents/
|
||||
├── mcp-servers/ # SHARED MCP servers (v3.0.0+)
|
||||
│ ├── gitea/ # Gitea MCP (issues, PRs, wiki)
|
||||
│ ├── netbox/ # NetBox MCP (CMDB)
|
||||
│ └── data-platform/ # Data engineering (pandas, PostgreSQL, dbt)
|
||||
├── plugins/ # All plugins
|
||||
│ ├── projman/ # Sprint management
|
||||
│ ├── git-flow/ # Git workflow automation
|
||||
│ ├── pr-review/ # PR review
|
||||
│ ├── clarity-assist/ # Prompt optimization
|
||||
│ ├── data-platform/ # Data engineering (NEW)
|
||||
│ ├── claude-config-maintainer/ # CLAUDE.md optimization
|
||||
│ ├── cmdb-assistant/ # NetBox CMDB integration
|
||||
│ │ ├── .claude-plugin/
|
||||
│ │ ├── .mcp.json
|
||||
│ │ ├── mcp-servers/ # Bundled MCP server
|
||||
│ │ │ └── netbox/
|
||||
│ │ ├── commands/
|
||||
│ │ └── agents/
|
||||
│ ├── projman-pmo/ # PMO coordination plugin (planned)
|
||||
│ ├── project-hygiene/ # Cleanup automation plugin
|
||||
│ ├── doc-guardian/ # Documentation drift detection
|
||||
│ └── code-sentinel/ # Security scanning & refactoring
|
||||
├── docs/ # Reference documentation
|
||||
│ ├── CANONICAL-PATHS.md # Single source of truth for paths
|
||||
│ └── references/
|
||||
└── scripts/ # Setup and maintenance scripts
|
||||
└── validate-marketplace.sh # Marketplace compliance validation
|
||||
│ ├── code-sentinel/ # Security scanning
|
||||
│ └── project-hygiene/ # Cleanup automation
|
||||
├── docs/ # Documentation
|
||||
│ ├── CANONICAL-PATHS.md # Path reference
|
||||
│ └── CONFIGURATION.md # Setup guide
|
||||
├── scripts/ # Setup scripts
|
||||
└── CHANGELOG.md # Version history
|
||||
```
|
||||
|
||||
## Key Features
|
||||
|
||||
### Parallel Execution
|
||||
Tasks are batched by dependency graph for optimal parallel execution:
|
||||
```
|
||||
Batch 1 (parallel): Task A, Task B, Task C
|
||||
Batch 2 (parallel): Task D, Task E (depend on Batch 1)
|
||||
Batch 3 (sequential): Task F (depends on Batch 2)
|
||||
```
|
||||
|
||||
### Naming Conventions
|
||||
- **Tasks:** `[Sprint XX] <type>: <description>`
|
||||
- **Branches:** `feat/`, `fix/`, `debug/` prefixes with issue numbers
|
||||
|
||||
### CLI Tools Blocked
|
||||
All agents use MCP tools exclusively. CLI tools like `tea` or `gh` are forbidden to ensure consistent, auditable operations.
|
||||
|
||||
## Documentation
|
||||
|
||||
| Document | Description |
|
||||
|----------|-------------|
|
||||
| [CLAUDE.md](./CLAUDE.md) | Main project instructions |
|
||||
| [CONFIGURATION.md](./docs/CONFIGURATION.md) | Centralized setup guide |
|
||||
| [COMMANDS-CHEATSHEET.md](./docs/COMMANDS-CHEATSHEET.md) | All commands quick reference |
|
||||
| [UPDATING.md](./docs/UPDATING.md) | Update guide for the marketplace |
|
||||
| [CANONICAL-PATHS.md](./docs/CANONICAL-PATHS.md) | Authoritative path reference |
|
||||
| [projman/CONFIGURATION.md](./plugins/projman/CONFIGURATION.md) | Projman setup guide |
|
||||
| [DEBUGGING-CHECKLIST.md](./docs/DEBUGGING-CHECKLIST.md) | Systematic troubleshooting guide |
|
||||
| [CHANGELOG.md](./CHANGELOG.md) | Version history |
|
||||
|
||||
## License
|
||||
|
||||
@@ -268,5 +297,4 @@ MIT License
|
||||
## Support
|
||||
|
||||
- **Issues**: Contact repository maintainer
|
||||
- **Repository**: `https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace.git`
|
||||
- **SSH URL**: `ssh://git@hotserv.tailc9b278.ts.net:2222/personal-projects/support-claude-mktplace.git`
|
||||
- **Repository**: `https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git`
|
||||
|
||||
@@ -2,65 +2,112 @@
|
||||
|
||||
**This file defines ALL valid paths in this repository. No exceptions. No inference. No assumptions.**
|
||||
|
||||
Last Updated: 2026-01-20
|
||||
Last Updated: 2026-01-23 (v3.1.2)
|
||||
|
||||
---
|
||||
|
||||
## Repository Root Structure
|
||||
|
||||
```
|
||||
support-claude-mktplace/
|
||||
leo-claude-mktplace/
|
||||
├── .claude/ # Claude Code local settings
|
||||
├── .claude-plugin/ # Marketplace manifest (claude-code-marketplace)
|
||||
├── .claude-plugin/ # Marketplace manifest
|
||||
│ └── marketplace.json
|
||||
├── .scratch/ # Transient work (auto-cleaned)
|
||||
├── docs/ # All documentation
|
||||
│ ├── architecture/ # Draw.io diagrams and specs
|
||||
│ ├── CANONICAL-PATHS.md # This file - single source of truth
|
||||
│ ├── UPDATING.md # Update guide
|
||||
│ └── workflows/ # Workflow documentation
|
||||
│ ├── COMMANDS-CHEATSHEET.md # All commands quick reference
|
||||
│ ├── CONFIGURATION.md # Centralized configuration guide
|
||||
│ ├── DEBUGGING-CHECKLIST.md # Systematic troubleshooting guide
|
||||
│ └── UPDATING.md # Update guide
|
||||
├── hooks/ # Shared hooks (if any)
|
||||
├── plugins/ # ALL plugins with bundled MCP servers
|
||||
│ ├── projman/
|
||||
├── mcp-servers/ # SHARED MCP servers (v3.0.0+)
|
||||
│ ├── gitea/ # Gitea MCP server
|
||||
│ │ ├── mcp_server/
|
||||
│ │ │ ├── server.py
|
||||
│ │ │ ├── gitea_client.py
|
||||
│ │ │ ├── config.py
|
||||
│ │ │ └── tools/
|
||||
│ │ │ ├── issues.py
|
||||
│ │ │ ├── labels.py
|
||||
│ │ │ ├── wiki.py
|
||||
│ │ │ ├── milestones.py
|
||||
│ │ │ ├── dependencies.py
|
||||
│ │ │ └── pull_requests.py # NEW in v3.0.0
|
||||
│ │ ├── requirements.txt
|
||||
│ │ └── .venv/
|
||||
│ └── netbox/ # NetBox MCP server
|
||||
│ ├── mcp_server/
|
||||
│ ├── requirements.txt
|
||||
│ └── .venv/
|
||||
├── plugins/ # ALL plugins
|
||||
│ ├── projman/ # Sprint management
|
||||
│ │ ├── .claude-plugin/
|
||||
│ │ ├── mcp-servers/ # MCP servers bundled IN plugin
|
||||
│ │ │ └── gitea/ # Gitea + Wiki tools
|
||||
│ │ ├── .mcp.json
|
||||
│ │ ├── mcp-servers/
|
||||
│ │ │ └── gitea -> ../../../mcp-servers/gitea # SYMLINK
|
||||
│ │ ├── commands/
|
||||
│ │ ├── agents/
|
||||
│ │ ├── skills/
|
||||
│ │ └── claude-md-integration.md # CLAUDE.md integration snippet
|
||||
│ │ └── claude-md-integration.md
|
||||
│ ├── doc-guardian/ # Documentation drift detection
|
||||
│ │ ├── .claude-plugin/
|
||||
│ │ ├── hooks/ # PostToolUse, Stop hooks
|
||||
│ │ ├── hooks/
|
||||
│ │ ├── commands/
|
||||
│ │ ├── agents/
|
||||
│ │ ├── skills/
|
||||
│ │ └── claude-md-integration.md
|
||||
│ ├── code-sentinel/ # Security scanning & refactoring
|
||||
│ │ ├── .claude-plugin/
|
||||
│ │ ├── hooks/ # PreToolUse hook
|
||||
│ │ ├── hooks/
|
||||
│ │ ├── commands/
|
||||
│ │ ├── agents/
|
||||
│ │ ├── skills/
|
||||
│ │ └── claude-md-integration.md
|
||||
│ ├── projman-pmo/
|
||||
│ ├── cmdb-assistant/
|
||||
│ ├── cmdb-assistant/ # NetBox CMDB integration
|
||||
│ │ ├── .claude-plugin/
|
||||
│ │ ├── mcp-servers/ # MCP servers bundled IN plugin
|
||||
│ │ │ └── netbox/
|
||||
│ │ ├── .mcp.json
|
||||
│ │ ├── mcp-servers/
|
||||
│ │ │ └── netbox -> ../../../mcp-servers/netbox # SYMLINK
|
||||
│ │ ├── commands/
|
||||
│ │ ├── agents/
|
||||
│ │ └── claude-md-integration.md # CLAUDE.md integration snippet
|
||||
│ │ └── claude-md-integration.md
|
||||
│ ├── claude-config-maintainer/
|
||||
│ │ ├── .claude-plugin/
|
||||
│ │ ├── commands/
|
||||
│ │ ├── agents/
|
||||
│ │ └── claude-md-integration.md # CLAUDE.md integration snippet
|
||||
│ └── project-hygiene/
|
||||
│ │ └── claude-md-integration.md
|
||||
│ ├── project-hygiene/
|
||||
│ │ ├── .claude-plugin/
|
||||
│ │ ├── hooks/
|
||||
│ │ └── claude-md-integration.md
|
||||
│ ├── clarity-assist/ # NEW in v3.0.0
|
||||
│ │ ├── .claude-plugin/
|
||||
│ │ ├── commands/
|
||||
│ │ ├── agents/
|
||||
│ │ ├── skills/
|
||||
│ │ └── claude-md-integration.md
|
||||
│ ├── git-flow/ # NEW in v3.0.0
|
||||
│ │ ├── .claude-plugin/
|
||||
│ │ ├── commands/
|
||||
│ │ ├── agents/
|
||||
│ │ ├── skills/
|
||||
│ │ └── claude-md-integration.md
|
||||
│ └── pr-review/ # NEW in v3.0.0
|
||||
│ ├── .claude-plugin/
|
||||
│ ├── hooks/
|
||||
│ └── claude-md-integration.md # CLAUDE.md integration snippet
|
||||
│ ├── .mcp.json
|
||||
│ ├── mcp-servers/
|
||||
│ │ └── gitea -> ../../../mcp-servers/gitea # SYMLINK
|
||||
│ ├── commands/
|
||||
│ ├── agents/
|
||||
│ ├── skills/
|
||||
│ └── claude-md-integration.md
|
||||
├── scripts/ # Setup and maintenance scripts
|
||||
│ ├── setup.sh # Initial setup (create venvs, config templates)
|
||||
│ ├── post-update.sh # Post-update (rebuild venvs, verify symlinks)
|
||||
│ ├── check-venv.sh # Check if venvs exist (for hooks)
|
||||
│ └── validate-marketplace.sh # Marketplace compliance validation
|
||||
├── CLAUDE.md
|
||||
├── README.md
|
||||
├── LICENSE
|
||||
@@ -83,31 +130,37 @@ support-claude-mktplace/
|
||||
| Plugin .mcp.json | `plugins/{plugin-name}/.mcp.json` | `plugins/projman/.mcp.json` |
|
||||
| Plugin integration snippet | `plugins/{plugin-name}/claude-md-integration.md` | `plugins/projman/claude-md-integration.md` |
|
||||
|
||||
### MCP Server Paths (Bundled in Plugins)
|
||||
### MCP Server Paths (v3.0.0 Architecture)
|
||||
|
||||
MCP servers are now **bundled inside each plugin** to ensure they work when plugins are cached.
|
||||
MCP servers are **shared at repository root** with **symlinks** from plugins.
|
||||
|
||||
| Context | Pattern | Example |
|
||||
|---------|---------|---------|
|
||||
| MCP server location | `plugins/{plugin}/mcp-servers/{server}/` | `plugins/projman/mcp-servers/gitea/` |
|
||||
| MCP server code | `plugins/{plugin}/mcp-servers/{server}/mcp_server/` | `plugins/projman/mcp-servers/gitea/mcp_server/` |
|
||||
| MCP venv | `plugins/{plugin}/mcp-servers/{server}/.venv/` | `plugins/projman/mcp-servers/gitea/.venv/` |
|
||||
| Shared MCP server | `mcp-servers/{server}/` | `mcp-servers/gitea/` |
|
||||
| MCP server code | `mcp-servers/{server}/mcp_server/` | `mcp-servers/gitea/mcp_server/` |
|
||||
| MCP venv | `mcp-servers/{server}/.venv/` | `mcp-servers/gitea/.venv/` |
|
||||
| Plugin symlink | `plugins/{plugin}/mcp-servers/{server}` | `plugins/projman/mcp-servers/gitea` |
|
||||
|
||||
### Relative Path Patterns (CRITICAL)
|
||||
### Symlink Pattern
|
||||
|
||||
| From | To | Pattern |
|
||||
|------|----|---------|
|
||||
| Plugin .mcp.json | Bundled MCP server | `${CLAUDE_PLUGIN_ROOT}/mcp-servers/{server}` |
|
||||
| marketplace.json | Plugin | `./plugins/{plugin-name}` |
|
||||
Plugins that use MCP servers create symlinks:
|
||||
```bash
|
||||
# From plugin directory
|
||||
ln -s ../../../mcp-servers/gitea plugins/projman/mcp-servers/gitea
|
||||
```
|
||||
|
||||
The symlink target is relative: `../../../mcp-servers/{server}`
|
||||
|
||||
### Documentation Paths
|
||||
|
||||
| Type | Location |
|
||||
|------|----------|
|
||||
| Architecture diagrams | `docs/architecture/` |
|
||||
| Workflow docs | `docs/workflows/` |
|
||||
| This file | `docs/CANONICAL-PATHS.md` |
|
||||
| Update guide | `docs/UPDATING.md` |
|
||||
| Configuration guide | `docs/CONFIGURATION.md` |
|
||||
| Commands cheat sheet | `docs/COMMANDS-CHEATSHEET.md` |
|
||||
| Debugging checklist | `docs/DEBUGGING-CHECKLIST.md` |
|
||||
|
||||
---
|
||||
|
||||
@@ -125,15 +178,15 @@ MCP servers are now **bundled inside each plugin** to ensure they work when plug
|
||||
2. Verify each path against patterns in this file
|
||||
3. Show verification to user before proceeding
|
||||
|
||||
### Relative Path Calculation
|
||||
### Relative Path Calculation (v3.0.0)
|
||||
|
||||
From `plugins/projman/.mcp.json` to bundled `mcp-servers/gitea/`:
|
||||
From `plugins/projman/.mcp.json` to shared `mcp-servers/gitea/`:
|
||||
```
|
||||
plugins/projman/.mcp.json
|
||||
→ MCP servers are IN the plugin at mcp-servers/
|
||||
→ Uses ${CLAUDE_PLUGIN_ROOT}/mcp-servers/gitea/
|
||||
→ Symlink at plugins/projman/mcp-servers/gitea points to ../../../mcp-servers/gitea
|
||||
|
||||
Result: mcp-servers/gitea/
|
||||
With variable: ${CLAUDE_PLUGIN_ROOT}/mcp-servers/gitea/
|
||||
Result in .mcp.json: ${CLAUDE_PLUGIN_ROOT}/mcp-servers/gitea/.venv/bin/python
|
||||
```
|
||||
|
||||
From `.claude-plugin/marketplace.json` to `plugins/projman/`:
|
||||
@@ -152,18 +205,28 @@ Result: ./plugins/projman
|
||||
| Wrong | Why | Correct |
|
||||
|-------|-----|---------|
|
||||
| `projman/` at root | Plugins go in `plugins/` | `plugins/projman/` |
|
||||
| `mcp-servers/` at root | MCP servers are bundled in plugins | `plugins/{plugin}/mcp-servers/` |
|
||||
| `../../mcp-servers/` from plugin | Old pattern, doesn't work with caching | `${CLAUDE_PLUGIN_ROOT}/mcp-servers/` |
|
||||
| `./../../../plugins/projman` in marketplace | Wrong (old nested structure) | `./plugins/projman` |
|
||||
| Direct path in .mcp.json to root mcp-servers | Use symlink | Symlink at `plugins/{plugin}/mcp-servers/` |
|
||||
| Creating new mcp-servers inside plugins | Use shared + symlink | Symlink to `mcp-servers/` |
|
||||
| Hardcoding absolute paths | Breaks portability | Use `${CLAUDE_PLUGIN_ROOT}` |
|
||||
|
||||
---
|
||||
|
||||
## Architecture Note
|
||||
## Architecture Note (v3.0.0)
|
||||
|
||||
MCP servers are bundled inside each plugin (not shared at root) because:
|
||||
- Claude Code caches only the plugin directory when installed
|
||||
- Relative paths to parent directories break in the cache
|
||||
- Each plugin must be self-contained to work properly
|
||||
MCP servers are now **shared at repository root** with **symlinks** from plugins:
|
||||
|
||||
**Benefits:**
|
||||
- Single source of truth for each MCP server
|
||||
- Updates apply to all plugins automatically
|
||||
- Reduced duplication
|
||||
- Symlinks work with Claude Code caching
|
||||
|
||||
**Symlink Pattern:**
|
||||
```
|
||||
plugins/projman/mcp-servers/gitea -> ../../../mcp-servers/gitea
|
||||
plugins/cmdb-assistant/mcp-servers/netbox -> ../../../mcp-servers/netbox
|
||||
plugins/pr-review/mcp-servers/gitea -> ../../../mcp-servers/gitea
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -171,7 +234,11 @@ MCP servers are bundled inside each plugin (not shared at root) because:
|
||||
|
||||
| Date | Change | By |
|
||||
|------|--------|-----|
|
||||
| 2026-01-20 | v3.0.0: MCP servers moved to root with symlinks | Claude Code |
|
||||
| 2026-01-20 | v3.0.0: Added clarity-assist, git-flow, pr-review plugins | Claude Code |
|
||||
| 2026-01-20 | v3.0.0: Added docs/CONFIGURATION.md | Claude Code |
|
||||
| 2026-01-20 | v3.0.0: Renamed marketplace to leo-claude-mktplace | Claude Code |
|
||||
| 2026-01-20 | Removed docs/references/ (obsolete planning docs) | Claude Code |
|
||||
| 2026-01-19 | Added claude-md-integration.md path pattern for plugin integration snippets | Claude Code |
|
||||
| 2025-12-15 | Restructured: MCP servers now bundled in plugins | Claude Code |
|
||||
| 2026-01-19 | Added claude-md-integration.md path pattern | Claude Code |
|
||||
| 2025-12-15 | Restructured: MCP servers bundled in plugins | Claude Code |
|
||||
| 2025-12-12 | Initial creation | Claude Code |
|
||||
|
||||
217
docs/COMMANDS-CHEATSHEET.md
Normal file
217
docs/COMMANDS-CHEATSHEET.md
Normal file
@@ -0,0 +1,217 @@
|
||||
# Plugin Commands Cheat Sheet
|
||||
|
||||
Quick reference for all commands in the Leo Claude Marketplace.
|
||||
|
||||
---
|
||||
|
||||
## Command Reference Table
|
||||
|
||||
| Plugin | Command | Auto | Manual | Description |
|
||||
|--------|---------|:----:|:------:|-------------|
|
||||
| **projman** | `/sprint-plan` | | X | Start sprint planning with AI-guided architecture analysis and issue creation |
|
||||
| **projman** | `/sprint-start` | | X | Begin sprint execution with dependency analysis and parallel task coordination |
|
||||
| **projman** | `/sprint-status` | | X | Check current sprint progress and identify blockers |
|
||||
| **projman** | `/review` | | X | Pre-sprint-close code quality review (debug artifacts, security, error handling) |
|
||||
| **projman** | `/test-check` | | X | Run tests and verify coverage before sprint close |
|
||||
| **projman** | `/sprint-close` | | X | Complete sprint and capture lessons learned to Gitea Wiki |
|
||||
| **projman** | `/labels-sync` | | X | Synchronize label taxonomy from Gitea |
|
||||
| **projman** | `/initial-setup` | | X | Full setup wizard: MCP server + system config + project config |
|
||||
| **projman** | `/project-init` | | X | Quick project setup (assumes system config exists) |
|
||||
| **projman** | `/project-sync` | | X | Sync config with git remote after repo move/rename |
|
||||
| **projman** | *SessionStart hook* | X | | Detects git remote vs .env mismatch, warns to run /project-sync |
|
||||
| **projman** | `/test-gen` | | X | Generate comprehensive tests for specified code |
|
||||
| **projman** | `/debug-report` | | X | Run diagnostics and create structured issue in marketplace |
|
||||
| **projman** | `/debug-review` | | X | Investigate diagnostic issues and propose fixes with approval gates |
|
||||
| **git-flow** | `/commit` | | X | Create commit with auto-generated conventional message |
|
||||
| **git-flow** | `/commit-push` | | X | Commit and push to remote in one operation |
|
||||
| **git-flow** | `/commit-merge` | | X | Commit current changes, then merge into target branch |
|
||||
| **git-flow** | `/commit-sync` | | X | Full sync: commit, push, and sync with upstream/base branch |
|
||||
| **git-flow** | `/branch-start` | | X | Create new feature/fix/chore branch with naming conventions |
|
||||
| **git-flow** | `/branch-cleanup` | | X | Remove merged branches locally and optionally on remote |
|
||||
| **git-flow** | `/git-status` | | X | Enhanced git status with recommendations |
|
||||
| **git-flow** | `/git-config` | | X | Configure git-flow settings for the project |
|
||||
| **pr-review** | `/initial-setup` | | X | Setup wizard for pr-review (shares Gitea MCP with projman) |
|
||||
| **pr-review** | `/project-init` | | X | Quick project setup for PR reviews |
|
||||
| **pr-review** | `/project-sync` | | X | Sync config with git remote after repo move/rename |
|
||||
| **pr-review** | *SessionStart hook* | X | | Detects git remote vs .env mismatch |
|
||||
| **pr-review** | `/pr-review` | | X | Full multi-agent PR review with confidence scoring |
|
||||
| **pr-review** | `/pr-summary` | | X | Quick summary of PR changes |
|
||||
| **pr-review** | `/pr-findings` | | X | List and filter review findings by category/severity |
|
||||
| **clarity-assist** | `/clarify` | | X | Full 4-D prompt optimization with ND accommodations |
|
||||
| **clarity-assist** | `/quick-clarify` | | X | Rapid single-pass clarification for simple requests |
|
||||
| **doc-guardian** | `/doc-audit` | | X | Full documentation audit - scans for doc drift |
|
||||
| **doc-guardian** | `/doc-sync` | | X | Synchronize pending documentation updates |
|
||||
| **doc-guardian** | *PostToolUse hook* | X | | Silently detects doc drift on Write/Edit |
|
||||
| **code-sentinel** | `/security-scan` | | X | Full security audit (SQL injection, XSS, secrets, etc.) |
|
||||
| **code-sentinel** | `/refactor` | | X | Apply refactoring patterns to improve code |
|
||||
| **code-sentinel** | `/refactor-dry` | | X | Preview refactoring without applying changes |
|
||||
| **code-sentinel** | *PreToolUse hook* | X | | Scans code before writing; blocks critical issues |
|
||||
| **claude-config-maintainer** | `/config-analyze` | | X | Analyze CLAUDE.md for optimization opportunities |
|
||||
| **claude-config-maintainer** | `/config-optimize` | | X | Optimize CLAUDE.md structure with preview/backup |
|
||||
| **claude-config-maintainer** | `/config-init` | | X | Initialize new CLAUDE.md for a project |
|
||||
| **cmdb-assistant** | `/initial-setup` | | X | Setup wizard for NetBox MCP server |
|
||||
| **cmdb-assistant** | `/cmdb-search` | | X | Search NetBox for devices, IPs, sites |
|
||||
| **cmdb-assistant** | `/cmdb-device` | | X | Manage network devices (create, view, update, delete) |
|
||||
| **cmdb-assistant** | `/cmdb-ip` | | X | Manage IP addresses and prefixes |
|
||||
| **cmdb-assistant** | `/cmdb-site` | | X | Manage sites, locations, racks, and regions |
|
||||
| **project-hygiene** | *PostToolUse hook* | X | | Removes temp files, warns about unexpected root files |
|
||||
|
||||
---
|
||||
|
||||
## Plugins by Category
|
||||
|
||||
| Category | Plugins | Primary Use |
|
||||
|----------|---------|-------------|
|
||||
| **Setup** | projman, pr-review, cmdb-assistant | `/initial-setup`, `/project-init` |
|
||||
| **Task Planning** | projman, clarity-assist | Sprint management, requirement clarification |
|
||||
| **Code Quality** | code-sentinel, pr-review | Security scanning, PR reviews |
|
||||
| **Documentation** | doc-guardian, claude-config-maintainer | Doc sync, CLAUDE.md maintenance |
|
||||
| **Git Operations** | git-flow | Commits, branches, workflow automation |
|
||||
| **Infrastructure** | cmdb-assistant | NetBox CMDB management |
|
||||
| **Maintenance** | project-hygiene | Automatic cleanup |
|
||||
|
||||
---
|
||||
|
||||
## Hook-Based Automation Summary
|
||||
|
||||
| Plugin | Hook Event | Behavior |
|
||||
|--------|------------|----------|
|
||||
| **projman** | SessionStart | Checks git remote vs .env; warns if mismatch detected |
|
||||
| **pr-review** | SessionStart | Checks git remote vs .env; warns if mismatch detected |
|
||||
| **doc-guardian** | PostToolUse (Write/Edit) | Silently tracks documentation drift |
|
||||
| **code-sentinel** | PreToolUse (Write/Edit) | Scans for security issues; blocks critical vulnerabilities |
|
||||
| **project-hygiene** | PostToolUse (Write/Edit) | Cleans temp files, warns about misplaced files |
|
||||
|
||||
---
|
||||
|
||||
## Dev Workflow Examples
|
||||
|
||||
### Example 1: Starting a New Feature Sprint
|
||||
|
||||
A typical workflow for planning and executing a feature sprint:
|
||||
|
||||
```
|
||||
1. /clarify # Clarify requirements if vague
|
||||
2. /sprint-plan # Plan the sprint with architecture analysis
|
||||
3. /labels-sync # Ensure labels are up-to-date
|
||||
4. /sprint-start # Begin execution with dependency ordering
|
||||
5. /branch-start feat/... # Create feature branch
|
||||
... implement features ...
|
||||
6. /commit # Commit with conventional message
|
||||
7. /sprint-status # Check progress mid-sprint
|
||||
8. /review # Pre-close quality review
|
||||
9. /test-check # Verify test coverage
|
||||
10. /sprint-close # Capture lessons learned
|
||||
```
|
||||
|
||||
### Example 2: Daily Development Cycle
|
||||
|
||||
Quick daily workflow with git-flow:
|
||||
|
||||
```
|
||||
1. /git-status # Check current state
|
||||
2. /branch-start fix/... # Start bugfix branch
|
||||
... make changes ...
|
||||
3. /commit # Auto-generate commit message
|
||||
4. /commit-push # Push to remote
|
||||
5. /branch-cleanup # Clean merged branches
|
||||
```
|
||||
|
||||
### Example 3: Pull Request Review Workflow
|
||||
|
||||
Reviewing a PR before merge:
|
||||
|
||||
```
|
||||
1. /pr-summary # Quick overview of changes
|
||||
2. /pr-review # Full multi-agent review
|
||||
3. /pr-findings # Filter findings by severity
|
||||
4. /security-scan # Deep security audit if needed
|
||||
```
|
||||
|
||||
### Example 4: Documentation Maintenance
|
||||
|
||||
Keeping docs in sync:
|
||||
|
||||
```
|
||||
1. /doc-audit # Scan for documentation drift
|
||||
2. /doc-sync # Apply pending updates
|
||||
3. /config-analyze # Check CLAUDE.md health
|
||||
4. /config-optimize # Optimize if needed
|
||||
```
|
||||
|
||||
### Example 5: Code Refactoring Session
|
||||
|
||||
Safe refactoring with preview:
|
||||
|
||||
```
|
||||
1. /refactor-dry # Preview opportunities
|
||||
2. /security-scan # Baseline security check
|
||||
3. /refactor # Apply improvements
|
||||
4. /test-check # Verify nothing broke
|
||||
5. /commit # Commit with descriptive message
|
||||
```
|
||||
|
||||
### Example 6: Infrastructure Documentation
|
||||
|
||||
Managing infrastructure with CMDB:
|
||||
|
||||
```
|
||||
1. /cmdb-search "server" # Find existing devices
|
||||
2. /cmdb-device view X # Check device details
|
||||
3. /cmdb-ip list # List available IPs
|
||||
4. /cmdb-site view Y # Check site info
|
||||
```
|
||||
|
||||
### Example 7: First-Time Setup (New Machine)
|
||||
|
||||
Setting up the marketplace for the first time:
|
||||
|
||||
```
|
||||
1. /initial-setup # Full setup: MCP + system config + project
|
||||
# → Follow prompts for Gitea URL, org
|
||||
# → Add token manually when prompted
|
||||
# → Confirm repository name
|
||||
2. # Restart Claude Code session
|
||||
3. /labels-sync # Sync Gitea labels
|
||||
4. /sprint-plan # Plan first sprint
|
||||
```
|
||||
|
||||
### Example 8: New Project Setup (System Already Configured)
|
||||
|
||||
Adding a new project when system config exists:
|
||||
|
||||
```
|
||||
1. /project-init # Quick project setup
|
||||
# → Confirms detected repo name
|
||||
# → Creates .env
|
||||
2. /labels-sync # Sync Gitea labels
|
||||
3. /sprint-plan # Plan first sprint
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Quick Tips
|
||||
|
||||
- **Hooks run automatically** - doc-guardian and code-sentinel protect you without manual invocation
|
||||
- **Use `/commit` over `git commit`** - generates better commit messages following conventions
|
||||
- **Run `/review` before `/sprint-close`** - catches issues before closing the sprint
|
||||
- **Use `/clarify` for vague requests** - especially helpful for complex requirements
|
||||
- **`/refactor-dry` is safe** - always preview before applying refactoring changes
|
||||
|
||||
---
|
||||
|
||||
## MCP Server Requirements
|
||||
|
||||
Some plugins require MCP server connectivity:
|
||||
|
||||
| Plugin | MCP Server | Purpose |
|
||||
|--------|------------|---------|
|
||||
| projman | Gitea | Issues, PRs, wiki, labels, milestones |
|
||||
| pr-review | Gitea | PR operations and reviews |
|
||||
| cmdb-assistant | NetBox | Infrastructure CMDB |
|
||||
|
||||
Ensure credentials are configured in `~/.config/claude/gitea.env` or `~/.config/claude/netbox.env`.
|
||||
|
||||
---
|
||||
|
||||
*Last Updated: 2026-01-22*
|
||||
547
docs/CONFIGURATION.md
Normal file
547
docs/CONFIGURATION.md
Normal file
@@ -0,0 +1,547 @@
|
||||
# Configuration Guide
|
||||
|
||||
Centralized configuration documentation for all plugins and MCP servers in the Leo Claude Marketplace.
|
||||
|
||||
---
|
||||
|
||||
## Quick Start
|
||||
|
||||
**After installing the marketplace and plugins via Claude Code:**
|
||||
|
||||
```
|
||||
/initial-setup
|
||||
```
|
||||
|
||||
The interactive wizard handles everything except manually adding your API tokens.
|
||||
|
||||
---
|
||||
|
||||
## Setup Flow Diagram
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────────────────┐
|
||||
│ FIRST TIME SETUP │
|
||||
│ (once per machine) │
|
||||
└─────────────────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
/initial-setup
|
||||
│
|
||||
┌──────────────────────────────┼──────────────────────────────┐
|
||||
▼ ▼ ▼
|
||||
┌─────────────┐ ┌─────────────────┐ ┌─────────────────┐
|
||||
│ PHASE 1 │ │ PHASE 2 │ │ PHASE 3 │
|
||||
│ Automated │───────────▶│ Automated │───────────▶│ Interactive │
|
||||
│ │ │ │ │ │
|
||||
│ • Check │ │ • Find MCP path │ │ • Ask Gitea URL │
|
||||
│ Python │ │ • Create venv │ │ • Ask Org name │
|
||||
│ version │ │ • Install deps │ │ • Create config │
|
||||
└─────────────┘ └─────────────────┘ └─────────────────┘
|
||||
│
|
||||
▼
|
||||
┌───────────────────────────┐
|
||||
│ PHASE 4 │
|
||||
│ USER ACTION │
|
||||
│ │
|
||||
│ Edit config file to add │
|
||||
│ API token (for security) │
|
||||
│ │
|
||||
│ nano ~/.config/claude/ │
|
||||
│ gitea.env │
|
||||
└───────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────┬──────────────────────────────┐
|
||||
▼ ▼ ▼
|
||||
┌─────────────┐ ┌─────────────────┐ ┌─────────────────┐
|
||||
│ PHASE 5 │ │ PHASE 6 │ │ PHASE 7 │
|
||||
│ Interactive │ │ Automated │ │ Automated │
|
||||
│ │ │ │ │ │
|
||||
│ • Confirm │ │ • Create .env │ │ • Test API │
|
||||
│ repo name │ │ • Check │ │ • Show summary │
|
||||
│ from git │ │ .gitignore │ │ • Restart note │
|
||||
└─────────────┘ └─────────────────┘ └─────────────────┘
|
||||
│
|
||||
▼
|
||||
┌───────────────────────────┐
|
||||
│ RESTART SESSION │
|
||||
│ │
|
||||
│ MCP tools available │
|
||||
│ after restart │
|
||||
└───────────────────────────┘
|
||||
|
||||
|
||||
┌─────────────────────────────────────────────────────────────────────────────┐
|
||||
│ NEW PROJECT SETUP │
|
||||
│ (once per project) │
|
||||
└─────────────────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
┌───────────────┴───────────────┐
|
||||
▼ ▼
|
||||
/project-init /initial-setup
|
||||
(direct path) (smart detection)
|
||||
│ │
|
||||
│ ┌──────────┴──────────┐
|
||||
│ ▼ ▼
|
||||
│ "Quick setup" "Full setup"
|
||||
│ (skips to (re-runs
|
||||
│ project config) everything)
|
||||
│ │ │
|
||||
└────────────────────┴─────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────┐
|
||||
│ PROJECT CONFIG │
|
||||
│ │
|
||||
│ • Detect repo from │
|
||||
│ git remote │
|
||||
│ • Confirm with user │
|
||||
│ • Create .env │
|
||||
│ • Check .gitignore │
|
||||
└─────────────────────┘
|
||||
│
|
||||
▼
|
||||
Done!
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## What Runs Automatically vs User Interaction
|
||||
|
||||
### `/initial-setup` - Full Setup
|
||||
|
||||
| Phase | Type | What Happens |
|
||||
|-------|------|--------------|
|
||||
| **1. Environment Check** | Automated | Verifies Python 3.10+ is installed |
|
||||
| **2. MCP Server Setup** | Automated | Finds plugin path, creates venv, installs dependencies |
|
||||
| **3. System Config Creation** | Interactive | Asks for Gitea URL and organization name |
|
||||
| **4. Token Entry** | **User Action** | User manually edits config file to add API token |
|
||||
| **5. Project Detection** | Interactive | Shows detected repo name, asks for confirmation |
|
||||
| **6. Project Config** | Automated | Creates `.env` file, checks `.gitignore` |
|
||||
| **7. Validation** | Automated | Tests API connectivity, shows summary |
|
||||
|
||||
### `/project-init` - Quick Project Setup
|
||||
|
||||
| Phase | Type | What Happens |
|
||||
|-------|------|--------------|
|
||||
| **1. Pre-flight Check** | Automated | Verifies system config exists |
|
||||
| **2. Project Detection** | Interactive | Shows detected repo name, asks for confirmation |
|
||||
| **3. Project Config** | Automated | Creates/updates `.env` file |
|
||||
| **4. Gitignore Check** | Interactive | Asks to add `.env` to `.gitignore` if missing |
|
||||
|
||||
---
|
||||
|
||||
## Three Commands for Different Scenarios
|
||||
|
||||
| Command | When to Use | What It Does |
|
||||
|---------|-------------|--------------|
|
||||
| `/initial-setup` | First time on a machine | Full setup: MCP server + system config + project config |
|
||||
| `/project-init` | Starting a new project | Quick setup: project config only (assumes system is ready) |
|
||||
| `/project-sync` | After repo move/rename | Updates .env to match current git remote |
|
||||
|
||||
**Typical workflow:**
|
||||
1. Install plugin → run `/initial-setup` (once per machine)
|
||||
2. Start new project → run `/project-init` (once per project)
|
||||
3. Repository moved? → run `/project-sync` (updates config)
|
||||
|
||||
**Smart features:**
|
||||
- `/initial-setup` detects existing system config and offers quick project setup
|
||||
- All commands validate org/repo via Gitea API before saving (auto-fills if verified)
|
||||
- SessionStart hook automatically detects git remote vs .env mismatches
|
||||
|
||||
---
|
||||
|
||||
## Configuration Architecture
|
||||
|
||||
This marketplace uses a **hybrid configuration** approach:
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ SYSTEM-LEVEL (once per machine) │
|
||||
│ ~/.config/claude/ │
|
||||
├─────────────────────────────────────────────────────────────────┤
|
||||
│ gitea.env │ GITEA_API_URL, GITEA_API_TOKEN │
|
||||
│ netbox.env │ NETBOX_API_URL, NETBOX_API_TOKEN │
|
||||
│ git-flow.env │ GIT_WORKFLOW_STYLE, GIT_DEFAULT_BASE, etc. │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
│ Shared across all projects
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ PROJECT-LEVEL (once per project) │
|
||||
│ <project-root>/.env │
|
||||
├─────────────────────────────────────────────────────────────────┤
|
||||
│ GITEA_ORG │ Organization for this project │
|
||||
│ GITEA_REPO │ Repository name for this project │
|
||||
│ GIT_WORKFLOW_STYLE │ (optional) Override system default │
|
||||
│ PR_REVIEW_* │ (optional) PR review settings │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Benefits:**
|
||||
- Single token per service (update once, use everywhere)
|
||||
- Easy multi-project setup (just run `/project-init` in each project)
|
||||
- Security (tokens never committed to git, never typed into AI chat)
|
||||
- Project isolation (each project can override defaults)
|
||||
|
||||
---
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before running `/initial-setup`:
|
||||
|
||||
1. **Python 3.10+** installed
|
||||
```bash
|
||||
python3 --version # Should be 3.10.0 or higher
|
||||
```
|
||||
|
||||
2. **Git repository** initialized (for project setup)
|
||||
```bash
|
||||
git status # Should show initialized repository
|
||||
```
|
||||
|
||||
3. **Claude Code** installed and working with the marketplace
|
||||
|
||||
---
|
||||
|
||||
## Setup Methods
|
||||
|
||||
### Method 1: Interactive Wizard (Recommended)
|
||||
|
||||
Run the setup wizard in Claude Code:
|
||||
|
||||
```
|
||||
/initial-setup
|
||||
```
|
||||
|
||||
The wizard will guide you through each step interactively.
|
||||
|
||||
**Note:** After first-time setup, you'll need to restart your Claude Code session for MCP tools to become available.
|
||||
|
||||
### Method 2: Manual Setup
|
||||
|
||||
If you prefer to set up manually or need to troubleshoot:
|
||||
|
||||
#### Step 1: MCP Server Setup
|
||||
|
||||
```bash
|
||||
# Navigate to marketplace directory
|
||||
cd /path/to/leo-claude-mktplace
|
||||
|
||||
# Set up Gitea MCP server
|
||||
cd mcp-servers/gitea
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install -r requirements.txt
|
||||
deactivate
|
||||
|
||||
# (Optional) Set up NetBox MCP server
|
||||
cd ../netbox
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install -r requirements.txt
|
||||
deactivate
|
||||
```
|
||||
|
||||
#### Step 2: System Configuration
|
||||
|
||||
```bash
|
||||
mkdir -p ~/.config/claude
|
||||
|
||||
# Gitea configuration (credentials only)
|
||||
cat > ~/.config/claude/gitea.env << 'EOF'
|
||||
GITEA_API_URL=https://gitea.example.com
|
||||
GITEA_API_TOKEN=your_token_here
|
||||
EOF
|
||||
chmod 600 ~/.config/claude/gitea.env
|
||||
```
|
||||
|
||||
#### Step 3: Project Configuration
|
||||
|
||||
In each project root:
|
||||
|
||||
```bash
|
||||
cat > .env << 'EOF'
|
||||
GITEA_ORG=your-organization
|
||||
GITEA_REPO=your-repo-name
|
||||
EOF
|
||||
```
|
||||
|
||||
Add `.env` to `.gitignore` if not already there.
|
||||
|
||||
### Method 3: Automation Script (CI/Scripting)
|
||||
|
||||
For automated setups or CI environments:
|
||||
|
||||
```bash
|
||||
cd /path/to/leo-claude-mktplace
|
||||
./scripts/setup.sh
|
||||
```
|
||||
|
||||
This script is useful for CI/CD pipelines and bulk provisioning.
|
||||
|
||||
---
|
||||
|
||||
## Configuration Reference
|
||||
|
||||
### System-Level Files
|
||||
|
||||
Located in `~/.config/claude/`:
|
||||
|
||||
| File | Required By | Purpose |
|
||||
|------|-------------|---------|
|
||||
| `gitea.env` | projman, pr-review | Gitea API credentials |
|
||||
| `netbox.env` | cmdb-assistant | NetBox API credentials |
|
||||
| `git-flow.env` | git-flow | Default git workflow settings |
|
||||
|
||||
### Gitea Configuration
|
||||
|
||||
```bash
|
||||
# ~/.config/claude/gitea.env
|
||||
GITEA_API_URL=https://gitea.example.com/api/v1
|
||||
GITEA_API_TOKEN=your_gitea_token_here
|
||||
```
|
||||
|
||||
| Variable | Description | Example |
|
||||
|----------|-------------|---------|
|
||||
| `GITEA_API_URL` | Gitea API endpoint (with `/api/v1`) | `https://gitea.example.com/api/v1` |
|
||||
| `GITEA_API_TOKEN` | Personal access token | `abc123...` |
|
||||
|
||||
**Note:** `GITEA_ORG` is configured at the project level (see below) since different projects may belong to different organizations.
|
||||
|
||||
**Generating a Gitea Token:**
|
||||
1. Log into Gitea → **User Icon** → **Settings**
|
||||
2. **Applications** tab → **Manage Access Tokens**
|
||||
3. **Generate New Token** with permissions:
|
||||
- `repo` (all sub-permissions)
|
||||
- `read:org`
|
||||
- `read:user`
|
||||
- `write:repo` (for wiki access)
|
||||
4. Copy token immediately (shown only once)
|
||||
|
||||
### NetBox Configuration
|
||||
|
||||
```bash
|
||||
# ~/.config/claude/netbox.env
|
||||
NETBOX_API_URL=https://netbox.example.com
|
||||
NETBOX_API_TOKEN=your_netbox_token_here
|
||||
```
|
||||
|
||||
| Variable | Description | Example |
|
||||
|----------|-------------|---------|
|
||||
| `NETBOX_API_URL` | NetBox base URL | `https://netbox.example.com` |
|
||||
| `NETBOX_API_TOKEN` | API token | `abc123...` |
|
||||
|
||||
### Git-Flow Configuration
|
||||
|
||||
```bash
|
||||
# ~/.config/claude/git-flow.env
|
||||
GIT_WORKFLOW_STYLE=feature-branch
|
||||
GIT_DEFAULT_BASE=development
|
||||
GIT_AUTO_DELETE_MERGED=true
|
||||
GIT_AUTO_PUSH=false
|
||||
GIT_PROTECTED_BRANCHES=main,master,development,staging,production
|
||||
GIT_COMMIT_STYLE=conventional
|
||||
GIT_CO_AUTHOR=true
|
||||
```
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `GIT_WORKFLOW_STYLE` | `feature-branch` | Branching strategy |
|
||||
| `GIT_DEFAULT_BASE` | `development` | Default base branch |
|
||||
| `GIT_AUTO_DELETE_MERGED` | `true` | Delete merged branches |
|
||||
| `GIT_AUTO_PUSH` | `false` | Auto-push after commit |
|
||||
| `GIT_PROTECTED_BRANCHES` | `main,master,...` | Protected branches |
|
||||
| `GIT_COMMIT_STYLE` | `conventional` | Commit message style |
|
||||
| `GIT_CO_AUTHOR` | `true` | Include Claude co-author |
|
||||
|
||||
---
|
||||
|
||||
## Project-Level Configuration
|
||||
|
||||
Create `.env` in each project root:
|
||||
|
||||
```bash
|
||||
# Required for projman, pr-review
|
||||
GITEA_ORG=your-organization
|
||||
GITEA_REPO=your-repo-name
|
||||
|
||||
# Optional: Override git-flow defaults
|
||||
GIT_WORKFLOW_STYLE=pr-required
|
||||
GIT_DEFAULT_BASE=main
|
||||
|
||||
# Optional: PR review settings
|
||||
PR_REVIEW_CONFIDENCE_THRESHOLD=0.5
|
||||
PR_REVIEW_AUTO_SUBMIT=false
|
||||
```
|
||||
|
||||
| Variable | Required | Description |
|
||||
|----------|----------|-------------|
|
||||
| `GITEA_ORG` | Yes | Gitea organization for this project |
|
||||
| `GITEA_REPO` | Yes | Repository name (must match Gitea exactly) |
|
||||
| `GIT_WORKFLOW_STYLE` | No | Override system default |
|
||||
| `PR_REVIEW_*` | No | PR review settings |
|
||||
|
||||
---
|
||||
|
||||
## Plugin Configuration Summary
|
||||
|
||||
| Plugin | System Config | Project Config | Setup Commands |
|
||||
|--------|---------------|----------------|----------------|
|
||||
| **projman** | gitea.env | .env (GITEA_ORG, GITEA_REPO) | `/initial-setup`, `/project-init`, `/project-sync` |
|
||||
| **pr-review** | gitea.env | .env (GITEA_ORG, GITEA_REPO) | `/initial-setup`, `/project-init`, `/project-sync` |
|
||||
| **git-flow** | git-flow.env (optional) | .env (optional) | None needed |
|
||||
| **clarity-assist** | None | None | None needed |
|
||||
| **cmdb-assistant** | netbox.env | None | `/initial-setup` |
|
||||
| **doc-guardian** | None | None | None needed |
|
||||
| **code-sentinel** | None | None | None needed |
|
||||
| **project-hygiene** | None | None | None needed |
|
||||
| **claude-config-maintainer** | None | None | None needed |
|
||||
|
||||
---
|
||||
|
||||
## Multi-Project Workflow
|
||||
|
||||
Once system-level config is set up, adding new projects is simple:
|
||||
|
||||
**Option 1: Use `/project-init` (faster)**
|
||||
```
|
||||
cd ~/projects/new-project
|
||||
/project-init
|
||||
```
|
||||
|
||||
**Option 2: Use `/initial-setup` (auto-detects)**
|
||||
```
|
||||
cd ~/projects/new-project
|
||||
/initial-setup
|
||||
# → Detects system config exists
|
||||
# → Offers "Quick project setup" option
|
||||
```
|
||||
|
||||
Both approaches work. Use `/project-init` when you know the system is already configured.
|
||||
|
||||
---
|
||||
|
||||
## Automatic Validation Features
|
||||
|
||||
### API Validation
|
||||
|
||||
When running `/initial-setup`, `/project-init`, or `/project-sync`, the commands:
|
||||
|
||||
1. **Detect** organization and repository from git remote URL
|
||||
2. **Validate** via Gitea API: `GET /api/v1/repos/{org}/{repo}`
|
||||
3. **Auto-fill** if repository exists and is accessible (no confirmation needed)
|
||||
4. **Ask for confirmation** only if validation fails (404 or permission error)
|
||||
|
||||
This catches typos and permission issues before saving configuration.
|
||||
|
||||
### Mismatch Detection (SessionStart Hook)
|
||||
|
||||
When you start a Claude Code session, a hook automatically:
|
||||
|
||||
1. Reads `GITEA_ORG` and `GITEA_REPO` from `.env`
|
||||
2. Compares with current `git remote get-url origin`
|
||||
3. **Warns** if mismatch detected: "Repository location mismatch. Run `/project-sync` to update."
|
||||
|
||||
This helps when you:
|
||||
- Move a repository to a different organization
|
||||
- Rename a repository
|
||||
- Clone a repo but forget to update `.env`
|
||||
|
||||
---
|
||||
|
||||
## Verification
|
||||
|
||||
### Test Gitea Connection
|
||||
|
||||
```bash
|
||||
source ~/.config/claude/gitea.env
|
||||
curl -H "Authorization: token $GITEA_API_TOKEN" "$GITEA_API_URL/user"
|
||||
```
|
||||
|
||||
### Verify Project Setup
|
||||
|
||||
In Claude Code, after restarting your session:
|
||||
```
|
||||
/labels-sync
|
||||
```
|
||||
|
||||
If this works, your setup is complete.
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### MCP tools not available
|
||||
|
||||
**Cause:** Session wasn't restarted after setup.
|
||||
**Solution:** Exit Claude Code and start a new session.
|
||||
|
||||
### "Configuration not found" error
|
||||
|
||||
```bash
|
||||
# Check system config exists
|
||||
ls -la ~/.config/claude/gitea.env
|
||||
|
||||
# Check permissions (should be 600)
|
||||
stat ~/.config/claude/gitea.env
|
||||
```
|
||||
|
||||
### Authentication failed
|
||||
|
||||
```bash
|
||||
# Test token directly
|
||||
source ~/.config/claude/gitea.env
|
||||
curl -H "Authorization: token $GITEA_API_TOKEN" "$GITEA_API_URL/user"
|
||||
```
|
||||
|
||||
If you get 401, regenerate your token in Gitea.
|
||||
|
||||
### MCP server won't start
|
||||
|
||||
```bash
|
||||
# Check venv exists
|
||||
ls /path/to/mcp-servers/gitea/.venv
|
||||
|
||||
# Reinstall if missing
|
||||
cd /path/to/mcp-servers/gitea
|
||||
rm -rf .venv
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install -r requirements.txt
|
||||
deactivate
|
||||
```
|
||||
|
||||
### Wrong repository
|
||||
|
||||
```bash
|
||||
# Check project .env
|
||||
cat .env
|
||||
|
||||
# Verify GITEA_REPO matches the Gitea repository name exactly
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Security Best Practices
|
||||
|
||||
1. **Never commit tokens**
|
||||
- Keep credentials in `~/.config/claude/` only
|
||||
- Add `.env` to `.gitignore`
|
||||
|
||||
2. **Secure configuration files**
|
||||
```bash
|
||||
chmod 600 ~/.config/claude/*.env
|
||||
```
|
||||
|
||||
3. **Never type tokens into AI chat**
|
||||
- Always edit config files directly in your editor
|
||||
- The `/initial-setup` wizard respects this
|
||||
|
||||
4. **Rotate tokens periodically**
|
||||
- Every 6-12 months
|
||||
- Immediately if compromised
|
||||
|
||||
5. **Minimum permissions**
|
||||
- Only grant required token permissions
|
||||
- Use separate tokens for different environments
|
||||
258
docs/DEBUGGING-CHECKLIST.md
Normal file
258
docs/DEBUGGING-CHECKLIST.md
Normal file
@@ -0,0 +1,258 @@
|
||||
# Debugging Checklist for Marketplace Troubleshooting
|
||||
|
||||
**Purpose:** Systematic approach to diagnose and fix plugin loading issues.
|
||||
|
||||
Last Updated: 2026-01-22
|
||||
|
||||
---
|
||||
|
||||
## Step 1: Identify the Loading Path
|
||||
|
||||
Claude Code loads plugins from different locations depending on context:
|
||||
|
||||
| Location | Path | When Used |
|
||||
|----------|------|-----------|
|
||||
| **Source** | `~/claude-plugins-work/` | When developing in this directory |
|
||||
| **Installed** | `~/.claude/plugins/marketplaces/leo-claude-mktplace/` | After marketplace install |
|
||||
| **Cache** | `~/.claude/` | Plugin metadata, settings |
|
||||
|
||||
**Determine which path Claude is using:**
|
||||
|
||||
```bash
|
||||
# Check if installed marketplace exists
|
||||
ls -la ~/.claude/plugins/marketplaces/leo-claude-mktplace/
|
||||
|
||||
# Check Claude's current plugin loading
|
||||
cat ~/.claude/settings.local.json | grep -A5 "mcpServers"
|
||||
```
|
||||
|
||||
**Key insight:** If you're editing source but Claude uses installed, your changes won't take effect.
|
||||
|
||||
---
|
||||
|
||||
## Step 2: Verify Files Exist at Runtime Location
|
||||
|
||||
Check the files Claude will actually load:
|
||||
|
||||
```bash
|
||||
# For installed marketplace
|
||||
RUNTIME=~/.claude/plugins/marketplaces/leo-claude-mktplace
|
||||
|
||||
# Check MCP server exists
|
||||
ls -la $RUNTIME/mcp-servers/gitea/
|
||||
ls -la $RUNTIME/mcp-servers/netbox/
|
||||
|
||||
# Check plugin manifests
|
||||
ls -la $RUNTIME/plugins/projman/.claude-plugin/plugin.json
|
||||
ls -la $RUNTIME/plugins/pr-review/.claude-plugin/plugin.json
|
||||
|
||||
# Check .mcp.json files
|
||||
cat $RUNTIME/plugins/projman/.mcp.json
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Step 3: Verify Virtual Environments Exist
|
||||
|
||||
**This is the most common failure point after installation.**
|
||||
|
||||
MCP servers require Python venvs to exist at the INSTALLED location:
|
||||
|
||||
```bash
|
||||
RUNTIME=~/.claude/plugins/marketplaces/leo-claude-mktplace
|
||||
|
||||
# Check venvs exist
|
||||
ls -la $RUNTIME/mcp-servers/gitea/.venv/bin/python
|
||||
ls -la $RUNTIME/mcp-servers/netbox/.venv/bin/python
|
||||
|
||||
# If missing, create them:
|
||||
cd $RUNTIME && ./scripts/setup.sh
|
||||
```
|
||||
|
||||
**Common error:** "X MCP servers failed to start" = venvs don't exist in installed path.
|
||||
|
||||
---
|
||||
|
||||
## Step 4: Verify Symlink Resolution
|
||||
|
||||
Plugins use symlinks to shared MCP servers. Verify they resolve correctly:
|
||||
|
||||
```bash
|
||||
RUNTIME=~/.claude/plugins/marketplaces/leo-claude-mktplace
|
||||
|
||||
# Check symlinks exist and resolve
|
||||
readlink -f $RUNTIME/plugins/projman/mcp-servers/gitea
|
||||
readlink -f $RUNTIME/plugins/pr-review/mcp-servers/gitea
|
||||
readlink -f $RUNTIME/plugins/cmdb-assistant/mcp-servers/netbox
|
||||
|
||||
# Should resolve to:
|
||||
# $RUNTIME/mcp-servers/gitea
|
||||
# $RUNTIME/mcp-servers/netbox
|
||||
```
|
||||
|
||||
**If broken:** Symlinks are relative. If directory structure differs, they'll break.
|
||||
|
||||
---
|
||||
|
||||
## Step 5: Test MCP Server Startup
|
||||
|
||||
Manually test if the MCP server can start:
|
||||
|
||||
```bash
|
||||
RUNTIME=~/.claude/plugins/marketplaces/leo-claude-mktplace
|
||||
|
||||
# Test Gitea MCP
|
||||
cd $RUNTIME/mcp-servers/gitea
|
||||
PYTHONPATH=. .venv/bin/python -c "from mcp_server.server import main; print('OK')"
|
||||
|
||||
# Test NetBox MCP
|
||||
cd $RUNTIME/mcp-servers/netbox
|
||||
PYTHONPATH=. .venv/bin/python -c "from mcp_server.server import main; print('OK')"
|
||||
```
|
||||
|
||||
**If import fails:** Check requirements.txt installed, check Python version compatibility.
|
||||
|
||||
---
|
||||
|
||||
## Step 6: Verify Configuration Files
|
||||
|
||||
Check environment variables are set:
|
||||
|
||||
```bash
|
||||
# System-level credentials (should exist)
|
||||
cat ~/.config/claude/gitea.env
|
||||
# Should contain: GITEA_API_URL, GITEA_API_TOKEN
|
||||
|
||||
cat ~/.config/claude/netbox.env
|
||||
# Should contain: NETBOX_API_URL, NETBOX_API_TOKEN
|
||||
|
||||
# Project-level config (in target project)
|
||||
cat /path/to/project/.env
|
||||
# Should contain: GITEA_ORG, GITEA_REPO
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Step 7: Verify Hooks Configuration
|
||||
|
||||
Check hooks are valid:
|
||||
|
||||
```bash
|
||||
RUNTIME=~/.claude/plugins/marketplaces/leo-claude-mktplace
|
||||
|
||||
# List all hooks.json files
|
||||
find $RUNTIME/plugins -name "hooks.json" -exec echo "=== {} ===" \; -exec cat {} \;
|
||||
|
||||
# Verify hook events are valid
|
||||
# Valid: PreToolUse, PostToolUse, UserPromptSubmit, SessionStart, SessionEnd,
|
||||
# Notification, Stop, SubagentStop, PreCompact
|
||||
# INVALID: task-completed, file-changed, git-commit-msg-needed
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Quick Diagnostic Commands
|
||||
|
||||
Run these to quickly identify issues:
|
||||
|
||||
```bash
|
||||
RUNTIME=~/.claude/plugins/marketplaces/leo-claude-mktplace
|
||||
|
||||
echo "=== Installation Status ==="
|
||||
[ -d "$RUNTIME" ] && echo "Installed: YES" || echo "Installed: NO"
|
||||
|
||||
echo -e "\n=== Virtual Environments ==="
|
||||
[ -f "$RUNTIME/mcp-servers/gitea/.venv/bin/python" ] && echo "Gitea venv: OK" || echo "Gitea venv: MISSING"
|
||||
[ -f "$RUNTIME/mcp-servers/netbox/.venv/bin/python" ] && echo "NetBox venv: OK" || echo "NetBox venv: MISSING"
|
||||
|
||||
echo -e "\n=== Symlinks ==="
|
||||
[ -L "$RUNTIME/plugins/projman/mcp-servers/gitea" ] && echo "projman->gitea: OK" || echo "projman->gitea: MISSING"
|
||||
[ -L "$RUNTIME/plugins/pr-review/mcp-servers/gitea" ] && echo "pr-review->gitea: OK" || echo "pr-review->gitea: MISSING"
|
||||
[ -L "$RUNTIME/plugins/cmdb-assistant/mcp-servers/netbox" ] && echo "cmdb-assistant->netbox: OK" || echo "cmdb-assistant->netbox: MISSING"
|
||||
|
||||
echo -e "\n=== Config Files ==="
|
||||
[ -f ~/.config/claude/gitea.env ] && echo "gitea.env: OK" || echo "gitea.env: MISSING"
|
||||
[ -f ~/.config/claude/netbox.env ] && echo "netbox.env: OK" || echo "netbox.env: MISSING"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Common Issues and Fixes
|
||||
|
||||
| Issue | Symptom | Fix |
|
||||
|-------|---------|-----|
|
||||
| Missing venvs | "X MCP servers failed" | `cd ~/.claude/plugins/marketplaces/leo-claude-mktplace && ./scripts/setup.sh` |
|
||||
| Broken symlinks | MCP tools not available | Reinstall marketplace or manually recreate symlinks |
|
||||
| Wrong path edits | Changes don't take effect | Edit installed path or reinstall after source changes |
|
||||
| Missing credentials | MCP connection errors | Create `~/.config/claude/gitea.env` with API credentials |
|
||||
| Invalid hook events | Hooks don't fire | Use only valid event names (see Step 7) |
|
||||
|
||||
---
|
||||
|
||||
## After Fixing Issues
|
||||
|
||||
1. **Restart Claude Code** - Plugins are loaded at startup
|
||||
2. **Verify fix works** - Run a simple command that uses the MCP
|
||||
3. **Document the issue** - If it's a new failure mode, add to this checklist
|
||||
|
||||
---
|
||||
|
||||
## Cache Clearing: When It's Safe vs Destructive
|
||||
|
||||
**⚠️ CRITICAL: Never clear plugin cache mid-session.**
|
||||
|
||||
### Why Cache Clearing Breaks MCP Tools
|
||||
|
||||
When Claude Code starts a session:
|
||||
1. MCP tools are loaded from the cache directory
|
||||
2. Tool definitions include **absolute paths** to the venv (e.g., `~/.claude/plugins/cache/.../venv/`)
|
||||
3. These paths are cached in the session memory
|
||||
4. Deleting the cache removes the venv, but the session still references the old paths
|
||||
5. Any MCP tool making HTTP requests fails with TLS certificate errors
|
||||
|
||||
### When Cache Clearing is SAFE
|
||||
|
||||
| Scenario | Safe? | Action |
|
||||
|----------|-------|--------|
|
||||
| Before starting Claude Code | ✅ Yes | Clear cache, then start session |
|
||||
| Between sessions | ✅ Yes | Clear cache after `/exit`, before next session |
|
||||
| During a session | ❌ NO | Never - will break MCP tools |
|
||||
| After plugin source edits | ❌ NO | Restart session instead |
|
||||
|
||||
### Recovery: MCP Tools Broken Mid-Session
|
||||
|
||||
If you accidentally cleared cache during a session and MCP tools fail:
|
||||
|
||||
```
|
||||
Error: Could not find a suitable TLS CA certificate bundle, invalid path:
|
||||
/home/.../.claude/plugins/cache/.../certifi/cacert.pem
|
||||
```
|
||||
|
||||
**Fix:**
|
||||
1. Exit the current session (`/exit` or Ctrl+C)
|
||||
2. Start a new Claude Code session
|
||||
3. MCP tools will reload from the reinstalled cache
|
||||
|
||||
### Correct Workflow for Plugin Development
|
||||
|
||||
1. Make changes to plugin source files
|
||||
2. Run `./scripts/verify-hooks.sh` (verifies hook types)
|
||||
3. Tell user: "Please restart Claude Code for changes to take effect"
|
||||
4. **Do NOT clear cache** - session restart handles reloading
|
||||
|
||||
---
|
||||
|
||||
## Automated Diagnostics
|
||||
|
||||
Use these commands for automated checking:
|
||||
|
||||
- `/debug-report` - Run full diagnostics, create issue if problems found
|
||||
- `/debug-review` - Investigate existing diagnostic issues and propose fixes
|
||||
|
||||
---
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- `CLAUDE.md` - Installation Paths and Troubleshooting sections
|
||||
- `docs/CONFIGURATION.md` - Setup and configuration guide
|
||||
- `docs/UPDATING.md` - Update procedures
|
||||
169
docs/UPDATING.md
169
docs/UPDATING.md
@@ -1,24 +1,71 @@
|
||||
# Updating support-claude-mktplace
|
||||
# Updating Leo Claude Marketplace
|
||||
|
||||
This guide covers how to update your local installation when new versions are released.
|
||||
|
||||
## Quick Update
|
||||
---
|
||||
|
||||
## ⚠️ CRITICAL: Run Setup in Installed Location
|
||||
|
||||
When Claude Code installs a marketplace, it copies files to `~/.claude/plugins/marketplaces/` but **does NOT create Python virtual environments**. You must run setup manually after installation or update.
|
||||
|
||||
**After installing or updating the marketplace:**
|
||||
|
||||
```bash
|
||||
# 1. Pull latest changes
|
||||
cd /path/to/support-claude-mktplace
|
||||
cd ~/.claude/plugins/marketplaces/leo-claude-mktplace && ./scripts/setup.sh
|
||||
```
|
||||
|
||||
This creates the required `.venv` directories for MCP servers. Without this step, **all MCP servers will fail to start**.
|
||||
|
||||
---
|
||||
|
||||
## Quick Update (Source Repository)
|
||||
|
||||
```bash
|
||||
# 1. Pull latest changes to source
|
||||
cd /path/to/leo-claude-mktplace
|
||||
git pull origin main
|
||||
|
||||
# 2. Run post-update script
|
||||
# 2. Run post-update script (updates source repo venvs)
|
||||
./scripts/post-update.sh
|
||||
|
||||
# 3. CRITICAL: Run setup in installed marketplace location
|
||||
cd ~/.claude/plugins/marketplaces/leo-claude-mktplace && ./scripts/setup.sh
|
||||
```
|
||||
|
||||
**Then restart your Claude Code session** to load any changes.
|
||||
|
||||
---
|
||||
|
||||
## What the Post-Update Script Does
|
||||
|
||||
1. **Updates Python dependencies** for MCP servers
|
||||
1. **Updates Python dependencies** for MCP servers (gitea, netbox)
|
||||
2. **Shows recent changelog entries** so you know what changed
|
||||
3. **Validates your configuration** is still compatible
|
||||
|
||||
---
|
||||
|
||||
## After Updating: Re-run Setup if Needed
|
||||
|
||||
### When to Re-run `/initial-setup`
|
||||
|
||||
You typically **don't need** to re-run setup after updates. However, re-run if:
|
||||
|
||||
- Changelog mentions **new required environment variables**
|
||||
- Changelog mentions **breaking changes** to configuration
|
||||
- MCP tools stop working after update
|
||||
|
||||
### For Existing Projects
|
||||
|
||||
If an update requires new project-level configuration:
|
||||
|
||||
```
|
||||
/project-init
|
||||
```
|
||||
|
||||
This will detect existing settings and only add what's missing.
|
||||
|
||||
---
|
||||
|
||||
## Manual Steps After Update
|
||||
|
||||
Some updates may require manual configuration changes:
|
||||
@@ -29,7 +76,7 @@ If the changelog mentions new environment variables:
|
||||
|
||||
1. Check the variable name and purpose in the changelog
|
||||
2. Add it to the appropriate config file:
|
||||
- Gitea variables → `~/.config/claude/gitea.env`
|
||||
- System variables → `~/.config/claude/gitea.env` or `netbox.env`
|
||||
- Project variables → `.env` in your project root
|
||||
|
||||
### New MCP Server Features
|
||||
@@ -37,20 +84,56 @@ If the changelog mentions new environment variables:
|
||||
If a new MCP server tool is added:
|
||||
|
||||
1. The post-update script handles dependency installation
|
||||
2. Check `plugins/projman/README.md` for usage documentation
|
||||
3. New tools are available immediately after update
|
||||
2. Check plugin documentation for usage
|
||||
3. New tools are available immediately after session restart
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
Breaking changes will be clearly marked in CHANGELOG.md with migration instructions.
|
||||
|
||||
## Troubleshooting
|
||||
### Setup Script and Configuration Workflow Changes
|
||||
|
||||
When updating, review if changes affect the setup workflow:
|
||||
|
||||
1. **Check for setup command changes:**
|
||||
```bash
|
||||
git diff HEAD~1 plugins/*/commands/initial-setup.md
|
||||
git diff HEAD~1 plugins/*/commands/project-init.md
|
||||
git diff HEAD~1 plugins/*/commands/project-sync.md
|
||||
```
|
||||
|
||||
2. **Check for hook changes:**
|
||||
```bash
|
||||
git diff HEAD~1 plugins/*/hooks/hooks.json
|
||||
```
|
||||
|
||||
3. **Check for configuration structure changes:**
|
||||
```bash
|
||||
git diff HEAD~1 docs/CONFIGURATION.md
|
||||
```
|
||||
|
||||
**If setup commands changed:**
|
||||
- Review what's new (new validation steps, new prompts, etc.)
|
||||
- Consider re-running `/initial-setup` or `/project-init` to benefit from improvements
|
||||
- Existing configurations remain valid unless changelog notes breaking changes
|
||||
|
||||
**If hooks changed:**
|
||||
- Restart your Claude Code session to load new hooks
|
||||
- New hooks (like SessionStart validation) activate automatically
|
||||
|
||||
**If configuration structure changed:**
|
||||
- Check if new variables are required
|
||||
- Run `/project-sync` if repository detection logic improved
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting Updates
|
||||
|
||||
### Dependencies fail to install
|
||||
|
||||
```bash
|
||||
# Rebuild virtual environment
|
||||
cd plugins/projman/mcp-servers/gitea
|
||||
cd mcp-servers/gitea
|
||||
rm -rf .venv
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
@@ -61,14 +144,47 @@ deactivate
|
||||
### Configuration no longer works
|
||||
|
||||
1. Check CHANGELOG.md for breaking changes
|
||||
2. Compare your config files with updated `.env.example` (if provided)
|
||||
3. Run `./scripts/setup.sh` to validate configuration
|
||||
2. Run `/initial-setup` to re-validate and fix configuration
|
||||
3. Compare your config files with documentation in `docs/CONFIGURATION.md`
|
||||
|
||||
### MCP server won't start
|
||||
### MCP server won't start after update
|
||||
|
||||
**Most common cause:** Virtual environments don't exist in the installed marketplace.
|
||||
|
||||
```bash
|
||||
# Fix: Run setup in installed location
|
||||
cd ~/.claude/plugins/marketplaces/leo-claude-mktplace && ./scripts/setup.sh
|
||||
```
|
||||
|
||||
If that doesn't work:
|
||||
|
||||
1. Check Python version: `python3 --version` (requires 3.10+)
|
||||
2. Verify venv exists: `ls plugins/projman/mcp-servers/gitea/.venv`
|
||||
3. Check logs for specific errors
|
||||
2. Verify venv exists in INSTALLED location:
|
||||
```bash
|
||||
ls ~/.claude/plugins/marketplaces/leo-claude-mktplace/mcp-servers/gitea/.venv
|
||||
ls ~/.claude/plugins/marketplaces/leo-claude-mktplace/mcp-servers/netbox/.venv
|
||||
```
|
||||
3. If missing, the symlinks won't resolve. Run setup.sh as shown above.
|
||||
4. Restart Claude Code session
|
||||
5. Check logs for specific errors
|
||||
|
||||
### "X MCP servers failed" on startup
|
||||
|
||||
This almost always means the venvs don't exist in the installed marketplace:
|
||||
|
||||
```bash
|
||||
cd ~/.claude/plugins/marketplaces/leo-claude-mktplace && ./scripts/setup.sh
|
||||
```
|
||||
|
||||
Then restart Claude Code.
|
||||
|
||||
### New commands not available
|
||||
|
||||
1. Restart your Claude Code session
|
||||
2. Verify the plugin is still installed
|
||||
3. Check if the command requires additional setup
|
||||
|
||||
---
|
||||
|
||||
## Version Pinning
|
||||
|
||||
@@ -79,15 +195,28 @@ To stay on a specific version:
|
||||
git tag
|
||||
|
||||
# Checkout specific version
|
||||
git checkout v2.2.0
|
||||
git checkout v3.0.0
|
||||
|
||||
# Run post-update
|
||||
./scripts/post-update.sh
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Checking Current Version
|
||||
|
||||
The version is displayed in the main README.md title and in `CHANGELOG.md`.
|
||||
|
||||
```bash
|
||||
# Check version from changelog
|
||||
head -20 CHANGELOG.md
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Getting Help
|
||||
|
||||
- Check `plugins/projman/README.md` for projman documentation
|
||||
- Check `plugins/projman/CONFIGURATION.md` for setup guide
|
||||
- Review CHANGELOG.md for recent changes
|
||||
- Check `docs/CONFIGURATION.md` for setup guide
|
||||
- Check `docs/COMMANDS-CHEATSHEET.md` for command reference
|
||||
- Review `CHANGELOG.md` for recent changes
|
||||
- Search existing issues in Gitea
|
||||
|
||||
131
mcp-servers/data-platform/README.md
Normal file
131
mcp-servers/data-platform/README.md
Normal file
@@ -0,0 +1,131 @@
|
||||
# Data Platform MCP Server
|
||||
|
||||
MCP Server providing pandas, PostgreSQL/PostGIS, and dbt tools for Claude Code.
|
||||
|
||||
## Features
|
||||
|
||||
- **pandas Tools**: DataFrame operations with Arrow IPC data_ref persistence
|
||||
- **PostgreSQL Tools**: Database queries with asyncpg connection pooling
|
||||
- **PostGIS Tools**: Spatial data operations
|
||||
- **dbt Tools**: Build tool wrapper with pre-execution validation
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
cd mcp-servers/data-platform
|
||||
python -m venv .venv
|
||||
source .venv/bin/activate # On Windows: .venv\Scripts\activate
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### System-Level (PostgreSQL credentials)
|
||||
|
||||
Create `~/.config/claude/postgres.env`:
|
||||
|
||||
```env
|
||||
POSTGRES_URL=postgresql://user:password@host:5432/database
|
||||
```
|
||||
|
||||
### Project-Level (dbt paths)
|
||||
|
||||
Create `.env` in your project root:
|
||||
|
||||
```env
|
||||
DBT_PROJECT_DIR=/path/to/dbt/project
|
||||
DBT_PROFILES_DIR=/path/to/.dbt
|
||||
DATA_PLATFORM_MAX_ROWS=100000
|
||||
```
|
||||
|
||||
## Tools
|
||||
|
||||
### pandas Tools (14 tools)
|
||||
|
||||
| Tool | Description |
|
||||
|------|-------------|
|
||||
| `read_csv` | Load CSV file into DataFrame |
|
||||
| `read_parquet` | Load Parquet file into DataFrame |
|
||||
| `read_json` | Load JSON/JSONL file into DataFrame |
|
||||
| `to_csv` | Export DataFrame to CSV file |
|
||||
| `to_parquet` | Export DataFrame to Parquet file |
|
||||
| `describe` | Get statistical summary of DataFrame |
|
||||
| `head` | Get first N rows of DataFrame |
|
||||
| `tail` | Get last N rows of DataFrame |
|
||||
| `filter` | Filter DataFrame rows by condition |
|
||||
| `select` | Select specific columns from DataFrame |
|
||||
| `groupby` | Group DataFrame and aggregate |
|
||||
| `join` | Join two DataFrames |
|
||||
| `list_data` | List all stored DataFrames |
|
||||
| `drop_data` | Remove a DataFrame from storage |
|
||||
|
||||
### PostgreSQL Tools (6 tools)
|
||||
|
||||
| Tool | Description |
|
||||
|------|-------------|
|
||||
| `pg_connect` | Test connection and return status |
|
||||
| `pg_query` | Execute SELECT, return as data_ref |
|
||||
| `pg_execute` | Execute INSERT/UPDATE/DELETE |
|
||||
| `pg_tables` | List all tables in schema |
|
||||
| `pg_columns` | Get column info for table |
|
||||
| `pg_schemas` | List all schemas |
|
||||
|
||||
### PostGIS Tools (4 tools)
|
||||
|
||||
| Tool | Description |
|
||||
|------|-------------|
|
||||
| `st_tables` | List PostGIS-enabled tables |
|
||||
| `st_geometry_type` | Get geometry type of column |
|
||||
| `st_srid` | Get SRID of geometry column |
|
||||
| `st_extent` | Get bounding box of geometries |
|
||||
|
||||
### dbt Tools (8 tools)
|
||||
|
||||
| Tool | Description |
|
||||
|------|-------------|
|
||||
| `dbt_parse` | Validate project (pre-execution) |
|
||||
| `dbt_run` | Run models with selection |
|
||||
| `dbt_test` | Run tests |
|
||||
| `dbt_build` | Run + test |
|
||||
| `dbt_compile` | Compile SQL without executing |
|
||||
| `dbt_ls` | List resources |
|
||||
| `dbt_docs_generate` | Generate documentation |
|
||||
| `dbt_lineage` | Get model dependencies |
|
||||
|
||||
## data_ref System
|
||||
|
||||
All DataFrame operations use a `data_ref` system to persist data across tool calls:
|
||||
|
||||
1. **Load data**: Returns a `data_ref` string (e.g., `"df_a1b2c3d4"`)
|
||||
2. **Use data_ref**: Pass to other tools (filter, join, export)
|
||||
3. **List data**: Use `list_data` to see all stored DataFrames
|
||||
4. **Clean up**: Use `drop_data` when done
|
||||
|
||||
### Example Flow
|
||||
|
||||
```
|
||||
read_csv("data.csv") → {"data_ref": "sales_data", "rows": 1000}
|
||||
filter("sales_data", "amount > 100") → {"data_ref": "sales_data_filtered"}
|
||||
describe("sales_data_filtered") → {statistics}
|
||||
to_parquet("sales_data_filtered", "output.parquet") → {success}
|
||||
```
|
||||
|
||||
## Memory Management
|
||||
|
||||
- Default row limit: 100,000 rows per DataFrame
|
||||
- Configure via `DATA_PLATFORM_MAX_ROWS` environment variable
|
||||
- Use chunked processing for large files (`chunk_size` parameter)
|
||||
- Monitor with `list_data` tool (shows memory usage)
|
||||
|
||||
## Running
|
||||
|
||||
```bash
|
||||
python -m mcp_server.server
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
```bash
|
||||
pip install -e ".[dev]"
|
||||
pytest
|
||||
```
|
||||
7
mcp-servers/data-platform/mcp_server/__init__.py
Normal file
7
mcp-servers/data-platform/mcp_server/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""
|
||||
Data Platform MCP Server.
|
||||
|
||||
Provides pandas, PostgreSQL/PostGIS, and dbt tools to Claude Code via MCP.
|
||||
"""
|
||||
|
||||
__version__ = "1.0.0"
|
||||
195
mcp-servers/data-platform/mcp_server/config.py
Normal file
195
mcp-servers/data-platform/mcp_server/config.py
Normal file
@@ -0,0 +1,195 @@
|
||||
"""
|
||||
Configuration loader for Data Platform MCP Server.
|
||||
|
||||
Implements hybrid configuration system:
|
||||
- System-level: ~/.config/claude/postgres.env (credentials)
|
||||
- Project-level: .env (dbt project paths, overrides)
|
||||
- Auto-detection: dbt_project.yml discovery
|
||||
"""
|
||||
from pathlib import Path
|
||||
from dotenv import load_dotenv
|
||||
import os
|
||||
import logging
|
||||
from typing import Dict, Optional
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DataPlatformConfig:
|
||||
"""Hybrid configuration loader for data platform tools"""
|
||||
|
||||
def __init__(self):
|
||||
self.postgres_url: Optional[str] = None
|
||||
self.dbt_project_dir: Optional[str] = None
|
||||
self.dbt_profiles_dir: Optional[str] = None
|
||||
self.max_rows: int = 100_000
|
||||
|
||||
def load(self) -> Dict[str, Optional[str]]:
|
||||
"""
|
||||
Load configuration from system and project levels.
|
||||
|
||||
Returns:
|
||||
Dict containing postgres_url, dbt_project_dir, dbt_profiles_dir, max_rows
|
||||
|
||||
Note:
|
||||
PostgreSQL credentials are optional - server can run in pandas-only mode.
|
||||
"""
|
||||
# Load system config (PostgreSQL credentials)
|
||||
system_config = Path.home() / '.config' / 'claude' / 'postgres.env'
|
||||
if system_config.exists():
|
||||
load_dotenv(system_config)
|
||||
logger.info(f"Loaded system configuration from {system_config}")
|
||||
else:
|
||||
logger.info(
|
||||
f"System config not found: {system_config} - "
|
||||
"PostgreSQL tools will be unavailable"
|
||||
)
|
||||
|
||||
# Find project directory
|
||||
project_dir = self._find_project_directory()
|
||||
|
||||
# Load project config (overrides system)
|
||||
if project_dir:
|
||||
project_config = project_dir / '.env'
|
||||
if project_config.exists():
|
||||
load_dotenv(project_config, override=True)
|
||||
logger.info(f"Loaded project configuration from {project_config}")
|
||||
|
||||
# Extract values
|
||||
self.postgres_url = os.getenv('POSTGRES_URL')
|
||||
self.dbt_project_dir = os.getenv('DBT_PROJECT_DIR')
|
||||
self.dbt_profiles_dir = os.getenv('DBT_PROFILES_DIR')
|
||||
self.max_rows = int(os.getenv('DATA_PLATFORM_MAX_ROWS', '100000'))
|
||||
|
||||
# Auto-detect dbt project if not specified
|
||||
if not self.dbt_project_dir and project_dir:
|
||||
self.dbt_project_dir = self._find_dbt_project(project_dir)
|
||||
if self.dbt_project_dir:
|
||||
logger.info(f"Auto-detected dbt project: {self.dbt_project_dir}")
|
||||
|
||||
# Default dbt profiles dir to ~/.dbt
|
||||
if not self.dbt_profiles_dir:
|
||||
default_profiles = Path.home() / '.dbt'
|
||||
if default_profiles.exists():
|
||||
self.dbt_profiles_dir = str(default_profiles)
|
||||
|
||||
return {
|
||||
'postgres_url': self.postgres_url,
|
||||
'dbt_project_dir': self.dbt_project_dir,
|
||||
'dbt_profiles_dir': self.dbt_profiles_dir,
|
||||
'max_rows': self.max_rows,
|
||||
'postgres_available': self.postgres_url is not None,
|
||||
'dbt_available': self.dbt_project_dir is not None
|
||||
}
|
||||
|
||||
def _find_project_directory(self) -> Optional[Path]:
|
||||
"""
|
||||
Find the user's project directory.
|
||||
|
||||
Returns:
|
||||
Path to project directory, or None if not found
|
||||
"""
|
||||
# Strategy 1: Check CLAUDE_PROJECT_DIR environment variable
|
||||
project_dir = os.getenv('CLAUDE_PROJECT_DIR')
|
||||
if project_dir:
|
||||
path = Path(project_dir)
|
||||
if path.exists():
|
||||
logger.info(f"Found project directory from CLAUDE_PROJECT_DIR: {path}")
|
||||
return path
|
||||
|
||||
# Strategy 2: Check PWD
|
||||
pwd = os.getenv('PWD')
|
||||
if pwd:
|
||||
path = Path(pwd)
|
||||
if path.exists() and (
|
||||
(path / '.git').exists() or
|
||||
(path / '.env').exists() or
|
||||
(path / 'dbt_project.yml').exists()
|
||||
):
|
||||
logger.info(f"Found project directory from PWD: {path}")
|
||||
return path
|
||||
|
||||
# Strategy 3: Check current working directory
|
||||
cwd = Path.cwd()
|
||||
if (cwd / '.git').exists() or (cwd / '.env').exists() or (cwd / 'dbt_project.yml').exists():
|
||||
logger.info(f"Found project directory from cwd: {cwd}")
|
||||
return cwd
|
||||
|
||||
logger.debug("Could not determine project directory")
|
||||
return None
|
||||
|
||||
def _find_dbt_project(self, start_dir: Path) -> Optional[str]:
|
||||
"""
|
||||
Find dbt_project.yml in the project or its subdirectories.
|
||||
|
||||
Args:
|
||||
start_dir: Directory to start searching from
|
||||
|
||||
Returns:
|
||||
Path to dbt project directory, or None if not found
|
||||
"""
|
||||
# Check root
|
||||
if (start_dir / 'dbt_project.yml').exists():
|
||||
return str(start_dir)
|
||||
|
||||
# Check common subdirectories
|
||||
for subdir in ['dbt', 'transform', 'analytics', 'models']:
|
||||
candidate = start_dir / subdir
|
||||
if (candidate / 'dbt_project.yml').exists():
|
||||
return str(candidate)
|
||||
|
||||
# Search one level deep
|
||||
for item in start_dir.iterdir():
|
||||
if item.is_dir() and not item.name.startswith('.'):
|
||||
if (item / 'dbt_project.yml').exists():
|
||||
return str(item)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def load_config() -> Dict[str, Optional[str]]:
|
||||
"""
|
||||
Convenience function to load configuration.
|
||||
|
||||
Returns:
|
||||
Configuration dictionary
|
||||
"""
|
||||
config = DataPlatformConfig()
|
||||
return config.load()
|
||||
|
||||
|
||||
def check_postgres_connection() -> Dict[str, any]:
|
||||
"""
|
||||
Check PostgreSQL connection status for SessionStart hook.
|
||||
|
||||
Returns:
|
||||
Dict with connection status and message
|
||||
"""
|
||||
import asyncio
|
||||
|
||||
config = load_config()
|
||||
if not config.get('postgres_url'):
|
||||
return {
|
||||
'connected': False,
|
||||
'message': 'PostgreSQL not configured (POSTGRES_URL not set)'
|
||||
}
|
||||
|
||||
async def test_connection():
|
||||
try:
|
||||
import asyncpg
|
||||
conn = await asyncpg.connect(config['postgres_url'], timeout=5)
|
||||
version = await conn.fetchval('SELECT version()')
|
||||
await conn.close()
|
||||
return {
|
||||
'connected': True,
|
||||
'message': f'Connected to PostgreSQL',
|
||||
'version': version.split(',')[0] if version else 'Unknown'
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
'connected': False,
|
||||
'message': f'PostgreSQL connection failed: {str(e)}'
|
||||
}
|
||||
|
||||
return asyncio.run(test_connection())
|
||||
219
mcp-servers/data-platform/mcp_server/data_store.py
Normal file
219
mcp-servers/data-platform/mcp_server/data_store.py
Normal file
@@ -0,0 +1,219 @@
|
||||
"""
|
||||
Arrow IPC DataFrame Registry.
|
||||
|
||||
Provides persistent storage for DataFrames across tool calls using Apache Arrow
|
||||
for efficient memory management and serialization.
|
||||
"""
|
||||
import pyarrow as pa
|
||||
import pandas as pd
|
||||
import uuid
|
||||
import logging
|
||||
from typing import Dict, Optional, List, Union
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class DataFrameInfo:
|
||||
"""Metadata about a stored DataFrame"""
|
||||
ref: str
|
||||
rows: int
|
||||
columns: int
|
||||
column_names: List[str]
|
||||
dtypes: Dict[str, str]
|
||||
memory_bytes: int
|
||||
created_at: datetime
|
||||
source: Optional[str] = None
|
||||
|
||||
|
||||
class DataStore:
|
||||
"""
|
||||
Singleton registry for Arrow Tables (DataFrames).
|
||||
|
||||
Uses Arrow IPC format for efficient memory usage and supports
|
||||
data_ref based retrieval across multiple tool calls.
|
||||
"""
|
||||
_instance = None
|
||||
_dataframes: Dict[str, pa.Table] = {}
|
||||
_metadata: Dict[str, DataFrameInfo] = {}
|
||||
_max_rows: int = 100_000
|
||||
|
||||
def __new__(cls):
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
cls._dataframes = {}
|
||||
cls._metadata = {}
|
||||
return cls._instance
|
||||
|
||||
@classmethod
|
||||
def get_instance(cls) -> 'DataStore':
|
||||
"""Get the singleton instance"""
|
||||
if cls._instance is None:
|
||||
cls._instance = cls()
|
||||
return cls._instance
|
||||
|
||||
@classmethod
|
||||
def set_max_rows(cls, max_rows: int):
|
||||
"""Set the maximum rows limit"""
|
||||
cls._max_rows = max_rows
|
||||
|
||||
def store(
|
||||
self,
|
||||
data: Union[pa.Table, pd.DataFrame],
|
||||
name: Optional[str] = None,
|
||||
source: Optional[str] = None
|
||||
) -> str:
|
||||
"""
|
||||
Store a DataFrame and return its reference.
|
||||
|
||||
Args:
|
||||
data: Arrow Table or pandas DataFrame
|
||||
name: Optional name for the reference (auto-generated if not provided)
|
||||
source: Optional source description (e.g., file path, query)
|
||||
|
||||
Returns:
|
||||
data_ref string to retrieve the DataFrame later
|
||||
"""
|
||||
# Convert pandas to Arrow if needed
|
||||
if isinstance(data, pd.DataFrame):
|
||||
table = pa.Table.from_pandas(data)
|
||||
else:
|
||||
table = data
|
||||
|
||||
# Generate reference
|
||||
data_ref = name or f"df_{uuid.uuid4().hex[:8]}"
|
||||
|
||||
# Ensure unique reference
|
||||
if data_ref in self._dataframes and name is None:
|
||||
data_ref = f"{data_ref}_{uuid.uuid4().hex[:4]}"
|
||||
|
||||
# Store table
|
||||
self._dataframes[data_ref] = table
|
||||
|
||||
# Store metadata
|
||||
schema = table.schema
|
||||
self._metadata[data_ref] = DataFrameInfo(
|
||||
ref=data_ref,
|
||||
rows=table.num_rows,
|
||||
columns=table.num_columns,
|
||||
column_names=[f.name for f in schema],
|
||||
dtypes={f.name: str(f.type) for f in schema},
|
||||
memory_bytes=table.nbytes,
|
||||
created_at=datetime.now(),
|
||||
source=source
|
||||
)
|
||||
|
||||
logger.info(f"Stored DataFrame '{data_ref}': {table.num_rows} rows, {table.num_columns} cols")
|
||||
return data_ref
|
||||
|
||||
def get(self, data_ref: str) -> Optional[pa.Table]:
|
||||
"""
|
||||
Retrieve an Arrow Table by reference.
|
||||
|
||||
Args:
|
||||
data_ref: Reference string from store()
|
||||
|
||||
Returns:
|
||||
Arrow Table or None if not found
|
||||
"""
|
||||
return self._dataframes.get(data_ref)
|
||||
|
||||
def get_pandas(self, data_ref: str) -> Optional[pd.DataFrame]:
|
||||
"""
|
||||
Retrieve a DataFrame as pandas.
|
||||
|
||||
Args:
|
||||
data_ref: Reference string from store()
|
||||
|
||||
Returns:
|
||||
pandas DataFrame or None if not found
|
||||
"""
|
||||
table = self.get(data_ref)
|
||||
if table is not None:
|
||||
return table.to_pandas()
|
||||
return None
|
||||
|
||||
def get_info(self, data_ref: str) -> Optional[DataFrameInfo]:
|
||||
"""
|
||||
Get metadata about a stored DataFrame.
|
||||
|
||||
Args:
|
||||
data_ref: Reference string
|
||||
|
||||
Returns:
|
||||
DataFrameInfo or None if not found
|
||||
"""
|
||||
return self._metadata.get(data_ref)
|
||||
|
||||
def list_refs(self) -> List[Dict]:
|
||||
"""
|
||||
List all stored DataFrame references with metadata.
|
||||
|
||||
Returns:
|
||||
List of dicts with ref, rows, columns, memory info
|
||||
"""
|
||||
result = []
|
||||
for ref, info in self._metadata.items():
|
||||
result.append({
|
||||
'ref': ref,
|
||||
'rows': info.rows,
|
||||
'columns': info.columns,
|
||||
'column_names': info.column_names,
|
||||
'memory_mb': round(info.memory_bytes / (1024 * 1024), 2),
|
||||
'source': info.source,
|
||||
'created_at': info.created_at.isoformat()
|
||||
})
|
||||
return result
|
||||
|
||||
def drop(self, data_ref: str) -> bool:
|
||||
"""
|
||||
Remove a DataFrame from the store.
|
||||
|
||||
Args:
|
||||
data_ref: Reference string
|
||||
|
||||
Returns:
|
||||
True if removed, False if not found
|
||||
"""
|
||||
if data_ref in self._dataframes:
|
||||
del self._dataframes[data_ref]
|
||||
del self._metadata[data_ref]
|
||||
logger.info(f"Dropped DataFrame '{data_ref}'")
|
||||
return True
|
||||
return False
|
||||
|
||||
def clear(self):
|
||||
"""Remove all stored DataFrames"""
|
||||
count = len(self._dataframes)
|
||||
self._dataframes.clear()
|
||||
self._metadata.clear()
|
||||
logger.info(f"Cleared {count} DataFrames from store")
|
||||
|
||||
def total_memory_bytes(self) -> int:
|
||||
"""Get total memory used by all stored DataFrames"""
|
||||
return sum(info.memory_bytes for info in self._metadata.values())
|
||||
|
||||
def total_memory_mb(self) -> float:
|
||||
"""Get total memory in MB"""
|
||||
return round(self.total_memory_bytes() / (1024 * 1024), 2)
|
||||
|
||||
def check_row_limit(self, row_count: int) -> Dict:
|
||||
"""
|
||||
Check if row count exceeds limit.
|
||||
|
||||
Args:
|
||||
row_count: Number of rows
|
||||
|
||||
Returns:
|
||||
Dict with 'exceeded' bool and 'message' if exceeded
|
||||
"""
|
||||
if row_count > self._max_rows:
|
||||
return {
|
||||
'exceeded': True,
|
||||
'message': f"Row count ({row_count:,}) exceeds limit ({self._max_rows:,})",
|
||||
'suggestion': f"Use chunked processing or filter data first",
|
||||
'limit': self._max_rows
|
||||
}
|
||||
return {'exceeded': False}
|
||||
387
mcp-servers/data-platform/mcp_server/dbt_tools.py
Normal file
387
mcp-servers/data-platform/mcp_server/dbt_tools.py
Normal file
@@ -0,0 +1,387 @@
|
||||
"""
|
||||
dbt MCP Tools.
|
||||
|
||||
Provides dbt CLI wrapper with pre-execution validation.
|
||||
"""
|
||||
import subprocess
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Any
|
||||
|
||||
from .config import load_config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DbtTools:
|
||||
"""dbt CLI wrapper tools with pre-validation"""
|
||||
|
||||
def __init__(self):
|
||||
self.config = load_config()
|
||||
self.project_dir = self.config.get('dbt_project_dir')
|
||||
self.profiles_dir = self.config.get('dbt_profiles_dir')
|
||||
|
||||
def _get_dbt_command(self, cmd: List[str]) -> List[str]:
|
||||
"""Build dbt command with project and profiles directories"""
|
||||
base = ['dbt']
|
||||
if self.project_dir:
|
||||
base.extend(['--project-dir', self.project_dir])
|
||||
if self.profiles_dir:
|
||||
base.extend(['--profiles-dir', self.profiles_dir])
|
||||
base.extend(cmd)
|
||||
return base
|
||||
|
||||
def _run_dbt(
|
||||
self,
|
||||
cmd: List[str],
|
||||
timeout: int = 300,
|
||||
capture_json: bool = False
|
||||
) -> Dict:
|
||||
"""
|
||||
Run dbt command and return result.
|
||||
|
||||
Args:
|
||||
cmd: dbt subcommand and arguments
|
||||
timeout: Command timeout in seconds
|
||||
capture_json: If True, parse JSON output
|
||||
|
||||
Returns:
|
||||
Dict with command result
|
||||
"""
|
||||
if not self.project_dir:
|
||||
return {
|
||||
'error': 'dbt project not found',
|
||||
'suggestion': 'Set DBT_PROJECT_DIR in project .env or ensure dbt_project.yml exists'
|
||||
}
|
||||
|
||||
full_cmd = self._get_dbt_command(cmd)
|
||||
logger.info(f"Running: {' '.join(full_cmd)}")
|
||||
|
||||
try:
|
||||
env = os.environ.copy()
|
||||
# Disable dbt analytics/tracking
|
||||
env['DBT_SEND_ANONYMOUS_USAGE_STATS'] = 'false'
|
||||
|
||||
result = subprocess.run(
|
||||
full_cmd,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=timeout,
|
||||
cwd=self.project_dir,
|
||||
env=env
|
||||
)
|
||||
|
||||
output = {
|
||||
'success': result.returncode == 0,
|
||||
'command': ' '.join(cmd),
|
||||
'stdout': result.stdout,
|
||||
'stderr': result.stderr if result.returncode != 0 else None
|
||||
}
|
||||
|
||||
if capture_json and result.returncode == 0:
|
||||
try:
|
||||
output['data'] = json.loads(result.stdout)
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
return output
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
return {
|
||||
'error': f'Command timed out after {timeout}s',
|
||||
'command': ' '.join(cmd)
|
||||
}
|
||||
except FileNotFoundError:
|
||||
return {
|
||||
'error': 'dbt not found in PATH',
|
||||
'suggestion': 'Install dbt: pip install dbt-core dbt-postgres'
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"dbt command failed: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
async def dbt_parse(self) -> Dict:
|
||||
"""
|
||||
Validate dbt project without executing (pre-flight check).
|
||||
|
||||
Returns:
|
||||
Dict with validation result and any errors
|
||||
"""
|
||||
result = self._run_dbt(['parse'])
|
||||
|
||||
# Check if _run_dbt returned an error (e.g., project not found, timeout, dbt not installed)
|
||||
if 'error' in result:
|
||||
return result
|
||||
|
||||
if not result.get('success'):
|
||||
# Extract useful error info from stderr
|
||||
stderr = result.get('stderr', '') or result.get('stdout', '')
|
||||
errors = []
|
||||
|
||||
# Look for common dbt 1.9+ deprecation warnings
|
||||
if 'deprecated' in stderr.lower():
|
||||
errors.append({
|
||||
'type': 'deprecation',
|
||||
'message': 'Deprecated syntax found - check dbt 1.9+ migration guide'
|
||||
})
|
||||
|
||||
# Look for compilation errors
|
||||
if 'compilation error' in stderr.lower():
|
||||
errors.append({
|
||||
'type': 'compilation',
|
||||
'message': 'SQL compilation error - check model syntax'
|
||||
})
|
||||
|
||||
return {
|
||||
'valid': False,
|
||||
'errors': errors,
|
||||
'details': stderr[:2000] if stderr else None,
|
||||
'suggestion': 'Fix issues before running dbt models'
|
||||
}
|
||||
|
||||
return {
|
||||
'valid': True,
|
||||
'message': 'dbt project validation passed'
|
||||
}
|
||||
|
||||
async def dbt_run(
|
||||
self,
|
||||
select: Optional[str] = None,
|
||||
exclude: Optional[str] = None,
|
||||
full_refresh: bool = False
|
||||
) -> Dict:
|
||||
"""
|
||||
Run dbt models with pre-validation.
|
||||
|
||||
Args:
|
||||
select: Model selection (e.g., "model_name", "+model_name", "tag:daily")
|
||||
exclude: Models to exclude
|
||||
full_refresh: If True, rebuild incremental models
|
||||
|
||||
Returns:
|
||||
Dict with run result
|
||||
"""
|
||||
# ALWAYS validate first
|
||||
parse_result = await self.dbt_parse()
|
||||
if not parse_result.get('valid'):
|
||||
return {
|
||||
'error': 'Pre-validation failed',
|
||||
**parse_result
|
||||
}
|
||||
|
||||
cmd = ['run']
|
||||
if select:
|
||||
cmd.extend(['--select', select])
|
||||
if exclude:
|
||||
cmd.extend(['--exclude', exclude])
|
||||
if full_refresh:
|
||||
cmd.append('--full-refresh')
|
||||
|
||||
return self._run_dbt(cmd)
|
||||
|
||||
async def dbt_test(
|
||||
self,
|
||||
select: Optional[str] = None,
|
||||
exclude: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Run dbt tests.
|
||||
|
||||
Args:
|
||||
select: Test selection
|
||||
exclude: Tests to exclude
|
||||
|
||||
Returns:
|
||||
Dict with test results
|
||||
"""
|
||||
cmd = ['test']
|
||||
if select:
|
||||
cmd.extend(['--select', select])
|
||||
if exclude:
|
||||
cmd.extend(['--exclude', exclude])
|
||||
|
||||
return self._run_dbt(cmd)
|
||||
|
||||
async def dbt_build(
|
||||
self,
|
||||
select: Optional[str] = None,
|
||||
exclude: Optional[str] = None,
|
||||
full_refresh: bool = False
|
||||
) -> Dict:
|
||||
"""
|
||||
Run dbt build (run + test) with pre-validation.
|
||||
|
||||
Args:
|
||||
select: Model/test selection
|
||||
exclude: Resources to exclude
|
||||
full_refresh: If True, rebuild incremental models
|
||||
|
||||
Returns:
|
||||
Dict with build result
|
||||
"""
|
||||
# ALWAYS validate first
|
||||
parse_result = await self.dbt_parse()
|
||||
if not parse_result.get('valid'):
|
||||
return {
|
||||
'error': 'Pre-validation failed',
|
||||
**parse_result
|
||||
}
|
||||
|
||||
cmd = ['build']
|
||||
if select:
|
||||
cmd.extend(['--select', select])
|
||||
if exclude:
|
||||
cmd.extend(['--exclude', exclude])
|
||||
if full_refresh:
|
||||
cmd.append('--full-refresh')
|
||||
|
||||
return self._run_dbt(cmd)
|
||||
|
||||
async def dbt_compile(
|
||||
self,
|
||||
select: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Compile dbt models to SQL without executing.
|
||||
|
||||
Args:
|
||||
select: Model selection
|
||||
|
||||
Returns:
|
||||
Dict with compiled SQL info
|
||||
"""
|
||||
cmd = ['compile']
|
||||
if select:
|
||||
cmd.extend(['--select', select])
|
||||
|
||||
return self._run_dbt(cmd)
|
||||
|
||||
async def dbt_ls(
|
||||
self,
|
||||
select: Optional[str] = None,
|
||||
resource_type: Optional[str] = None,
|
||||
output: str = 'name'
|
||||
) -> Dict:
|
||||
"""
|
||||
List dbt resources.
|
||||
|
||||
Args:
|
||||
select: Resource selection
|
||||
resource_type: Filter by type (model, test, seed, snapshot, source)
|
||||
output: Output format ('name', 'path', 'json')
|
||||
|
||||
Returns:
|
||||
Dict with list of resources
|
||||
"""
|
||||
cmd = ['ls', '--output', output]
|
||||
if select:
|
||||
cmd.extend(['--select', select])
|
||||
if resource_type:
|
||||
cmd.extend(['--resource-type', resource_type])
|
||||
|
||||
result = self._run_dbt(cmd)
|
||||
|
||||
if result.get('success') and result.get('stdout'):
|
||||
lines = [l.strip() for l in result['stdout'].split('\n') if l.strip()]
|
||||
result['resources'] = lines
|
||||
result['count'] = len(lines)
|
||||
|
||||
return result
|
||||
|
||||
async def dbt_docs_generate(self) -> Dict:
|
||||
"""
|
||||
Generate dbt documentation.
|
||||
|
||||
Returns:
|
||||
Dict with generation result
|
||||
"""
|
||||
result = self._run_dbt(['docs', 'generate'])
|
||||
|
||||
if result.get('success') and self.project_dir:
|
||||
# Check for generated catalog
|
||||
catalog_path = Path(self.project_dir) / 'target' / 'catalog.json'
|
||||
manifest_path = Path(self.project_dir) / 'target' / 'manifest.json'
|
||||
result['catalog_generated'] = catalog_path.exists()
|
||||
result['manifest_generated'] = manifest_path.exists()
|
||||
|
||||
return result
|
||||
|
||||
async def dbt_lineage(self, model: str) -> Dict:
|
||||
"""
|
||||
Get model dependencies and lineage.
|
||||
|
||||
Args:
|
||||
model: Model name to analyze
|
||||
|
||||
Returns:
|
||||
Dict with upstream and downstream dependencies
|
||||
"""
|
||||
if not self.project_dir:
|
||||
return {'error': 'dbt project not found'}
|
||||
|
||||
manifest_path = Path(self.project_dir) / 'target' / 'manifest.json'
|
||||
|
||||
# Generate manifest if not exists
|
||||
if not manifest_path.exists():
|
||||
compile_result = await self.dbt_compile(select=model)
|
||||
if not compile_result.get('success'):
|
||||
return {
|
||||
'error': 'Failed to compile manifest',
|
||||
'details': compile_result
|
||||
}
|
||||
|
||||
if not manifest_path.exists():
|
||||
return {
|
||||
'error': 'Manifest not found',
|
||||
'suggestion': 'Run dbt compile first'
|
||||
}
|
||||
|
||||
try:
|
||||
with open(manifest_path) as f:
|
||||
manifest = json.load(f)
|
||||
|
||||
# Find the model node
|
||||
model_key = None
|
||||
for key in manifest.get('nodes', {}):
|
||||
if key.endswith(f'.{model}') or manifest['nodes'][key].get('name') == model:
|
||||
model_key = key
|
||||
break
|
||||
|
||||
if not model_key:
|
||||
return {
|
||||
'error': f'Model not found: {model}',
|
||||
'available_models': [
|
||||
n.get('name') for n in manifest.get('nodes', {}).values()
|
||||
if n.get('resource_type') == 'model'
|
||||
][:20]
|
||||
}
|
||||
|
||||
node = manifest['nodes'][model_key]
|
||||
|
||||
# Get upstream (depends_on)
|
||||
upstream = node.get('depends_on', {}).get('nodes', [])
|
||||
|
||||
# Get downstream (find nodes that depend on this one)
|
||||
downstream = []
|
||||
for key, other_node in manifest.get('nodes', {}).items():
|
||||
deps = other_node.get('depends_on', {}).get('nodes', [])
|
||||
if model_key in deps:
|
||||
downstream.append(key)
|
||||
|
||||
return {
|
||||
'model': model,
|
||||
'unique_id': model_key,
|
||||
'materialization': node.get('config', {}).get('materialized'),
|
||||
'schema': node.get('schema'),
|
||||
'database': node.get('database'),
|
||||
'upstream': upstream,
|
||||
'downstream': downstream,
|
||||
'description': node.get('description'),
|
||||
'tags': node.get('tags', [])
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"dbt_lineage failed: {e}")
|
||||
return {'error': str(e)}
|
||||
500
mcp-servers/data-platform/mcp_server/pandas_tools.py
Normal file
500
mcp-servers/data-platform/mcp_server/pandas_tools.py
Normal file
@@ -0,0 +1,500 @@
|
||||
"""
|
||||
pandas MCP Tools.
|
||||
|
||||
Provides DataFrame operations with Arrow IPC data_ref persistence.
|
||||
"""
|
||||
import pandas as pd
|
||||
import pyarrow as pa
|
||||
import pyarrow.parquet as pq
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Any, Union
|
||||
|
||||
from .data_store import DataStore
|
||||
from .config import load_config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PandasTools:
|
||||
"""pandas data manipulation tools with data_ref persistence"""
|
||||
|
||||
def __init__(self):
|
||||
self.store = DataStore.get_instance()
|
||||
config = load_config()
|
||||
self.max_rows = config.get('max_rows', 100_000)
|
||||
self.store.set_max_rows(self.max_rows)
|
||||
|
||||
def _check_and_store(
|
||||
self,
|
||||
df: pd.DataFrame,
|
||||
name: Optional[str] = None,
|
||||
source: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""Check row limit and store DataFrame if within limits"""
|
||||
check = self.store.check_row_limit(len(df))
|
||||
if check['exceeded']:
|
||||
return {
|
||||
'error': 'row_limit_exceeded',
|
||||
**check,
|
||||
'preview': df.head(100).to_dict(orient='records')
|
||||
}
|
||||
|
||||
data_ref = self.store.store(df, name=name, source=source)
|
||||
return {
|
||||
'data_ref': data_ref,
|
||||
'rows': len(df),
|
||||
'columns': list(df.columns),
|
||||
'dtypes': {col: str(dtype) for col, dtype in df.dtypes.items()}
|
||||
}
|
||||
|
||||
async def read_csv(
|
||||
self,
|
||||
file_path: str,
|
||||
name: Optional[str] = None,
|
||||
chunk_size: Optional[int] = None,
|
||||
**kwargs
|
||||
) -> Dict:
|
||||
"""
|
||||
Load CSV file into DataFrame.
|
||||
|
||||
Args:
|
||||
file_path: Path to CSV file
|
||||
name: Optional name for data_ref
|
||||
chunk_size: If provided, process in chunks
|
||||
**kwargs: Additional pandas read_csv arguments
|
||||
|
||||
Returns:
|
||||
Dict with data_ref or error info
|
||||
"""
|
||||
path = Path(file_path)
|
||||
if not path.exists():
|
||||
return {'error': f'File not found: {file_path}'}
|
||||
|
||||
try:
|
||||
if chunk_size:
|
||||
# Chunked processing - return iterator info
|
||||
chunks = []
|
||||
for i, chunk in enumerate(pd.read_csv(path, chunksize=chunk_size, **kwargs)):
|
||||
chunk_ref = self.store.store(chunk, name=f"{name or 'chunk'}_{i}", source=file_path)
|
||||
chunks.append({'ref': chunk_ref, 'rows': len(chunk)})
|
||||
return {
|
||||
'chunked': True,
|
||||
'chunks': chunks,
|
||||
'total_chunks': len(chunks)
|
||||
}
|
||||
|
||||
df = pd.read_csv(path, **kwargs)
|
||||
return self._check_and_store(df, name=name, source=file_path)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"read_csv failed: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
async def read_parquet(
|
||||
self,
|
||||
file_path: str,
|
||||
name: Optional[str] = None,
|
||||
columns: Optional[List[str]] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Load Parquet file into DataFrame.
|
||||
|
||||
Args:
|
||||
file_path: Path to Parquet file
|
||||
name: Optional name for data_ref
|
||||
columns: Optional list of columns to load
|
||||
|
||||
Returns:
|
||||
Dict with data_ref or error info
|
||||
"""
|
||||
path = Path(file_path)
|
||||
if not path.exists():
|
||||
return {'error': f'File not found: {file_path}'}
|
||||
|
||||
try:
|
||||
table = pq.read_table(path, columns=columns)
|
||||
df = table.to_pandas()
|
||||
return self._check_and_store(df, name=name, source=file_path)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"read_parquet failed: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
async def read_json(
|
||||
self,
|
||||
file_path: str,
|
||||
name: Optional[str] = None,
|
||||
lines: bool = False,
|
||||
**kwargs
|
||||
) -> Dict:
|
||||
"""
|
||||
Load JSON/JSONL file into DataFrame.
|
||||
|
||||
Args:
|
||||
file_path: Path to JSON file
|
||||
name: Optional name for data_ref
|
||||
lines: If True, read as JSON Lines format
|
||||
**kwargs: Additional pandas read_json arguments
|
||||
|
||||
Returns:
|
||||
Dict with data_ref or error info
|
||||
"""
|
||||
path = Path(file_path)
|
||||
if not path.exists():
|
||||
return {'error': f'File not found: {file_path}'}
|
||||
|
||||
try:
|
||||
df = pd.read_json(path, lines=lines, **kwargs)
|
||||
return self._check_and_store(df, name=name, source=file_path)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"read_json failed: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
async def to_csv(
|
||||
self,
|
||||
data_ref: str,
|
||||
file_path: str,
|
||||
index: bool = False,
|
||||
**kwargs
|
||||
) -> Dict:
|
||||
"""
|
||||
Export DataFrame to CSV file.
|
||||
|
||||
Args:
|
||||
data_ref: Reference to stored DataFrame
|
||||
file_path: Output file path
|
||||
index: Whether to include index
|
||||
**kwargs: Additional pandas to_csv arguments
|
||||
|
||||
Returns:
|
||||
Dict with success status
|
||||
"""
|
||||
df = self.store.get_pandas(data_ref)
|
||||
if df is None:
|
||||
return {'error': f'DataFrame not found: {data_ref}'}
|
||||
|
||||
try:
|
||||
df.to_csv(file_path, index=index, **kwargs)
|
||||
return {
|
||||
'success': True,
|
||||
'file_path': file_path,
|
||||
'rows': len(df),
|
||||
'size_bytes': Path(file_path).stat().st_size
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"to_csv failed: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
async def to_parquet(
|
||||
self,
|
||||
data_ref: str,
|
||||
file_path: str,
|
||||
compression: str = 'snappy'
|
||||
) -> Dict:
|
||||
"""
|
||||
Export DataFrame to Parquet file.
|
||||
|
||||
Args:
|
||||
data_ref: Reference to stored DataFrame
|
||||
file_path: Output file path
|
||||
compression: Compression codec
|
||||
|
||||
Returns:
|
||||
Dict with success status
|
||||
"""
|
||||
table = self.store.get(data_ref)
|
||||
if table is None:
|
||||
return {'error': f'DataFrame not found: {data_ref}'}
|
||||
|
||||
try:
|
||||
pq.write_table(table, file_path, compression=compression)
|
||||
return {
|
||||
'success': True,
|
||||
'file_path': file_path,
|
||||
'rows': table.num_rows,
|
||||
'size_bytes': Path(file_path).stat().st_size
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"to_parquet failed: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
async def describe(self, data_ref: str) -> Dict:
|
||||
"""
|
||||
Get statistical summary of DataFrame.
|
||||
|
||||
Args:
|
||||
data_ref: Reference to stored DataFrame
|
||||
|
||||
Returns:
|
||||
Dict with statistical summary
|
||||
"""
|
||||
df = self.store.get_pandas(data_ref)
|
||||
if df is None:
|
||||
return {'error': f'DataFrame not found: {data_ref}'}
|
||||
|
||||
try:
|
||||
desc = df.describe(include='all')
|
||||
info = self.store.get_info(data_ref)
|
||||
|
||||
return {
|
||||
'data_ref': data_ref,
|
||||
'shape': {'rows': len(df), 'columns': len(df.columns)},
|
||||
'columns': list(df.columns),
|
||||
'dtypes': {col: str(dtype) for col, dtype in df.dtypes.items()},
|
||||
'memory_mb': info.memory_bytes / (1024 * 1024) if info else None,
|
||||
'null_counts': df.isnull().sum().to_dict(),
|
||||
'statistics': desc.to_dict()
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"describe failed: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
async def head(self, data_ref: str, n: int = 10) -> Dict:
|
||||
"""
|
||||
Get first N rows of DataFrame.
|
||||
|
||||
Args:
|
||||
data_ref: Reference to stored DataFrame
|
||||
n: Number of rows
|
||||
|
||||
Returns:
|
||||
Dict with rows as records
|
||||
"""
|
||||
df = self.store.get_pandas(data_ref)
|
||||
if df is None:
|
||||
return {'error': f'DataFrame not found: {data_ref}'}
|
||||
|
||||
try:
|
||||
head_df = df.head(n)
|
||||
return {
|
||||
'data_ref': data_ref,
|
||||
'total_rows': len(df),
|
||||
'returned_rows': len(head_df),
|
||||
'columns': list(df.columns),
|
||||
'data': head_df.to_dict(orient='records')
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"head failed: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
async def tail(self, data_ref: str, n: int = 10) -> Dict:
|
||||
"""
|
||||
Get last N rows of DataFrame.
|
||||
|
||||
Args:
|
||||
data_ref: Reference to stored DataFrame
|
||||
n: Number of rows
|
||||
|
||||
Returns:
|
||||
Dict with rows as records
|
||||
"""
|
||||
df = self.store.get_pandas(data_ref)
|
||||
if df is None:
|
||||
return {'error': f'DataFrame not found: {data_ref}'}
|
||||
|
||||
try:
|
||||
tail_df = df.tail(n)
|
||||
return {
|
||||
'data_ref': data_ref,
|
||||
'total_rows': len(df),
|
||||
'returned_rows': len(tail_df),
|
||||
'columns': list(df.columns),
|
||||
'data': tail_df.to_dict(orient='records')
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"tail failed: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
async def filter(
|
||||
self,
|
||||
data_ref: str,
|
||||
condition: str,
|
||||
name: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Filter DataFrame rows by condition.
|
||||
|
||||
Args:
|
||||
data_ref: Reference to stored DataFrame
|
||||
condition: pandas query string (e.g., "age > 30 and city == 'NYC'")
|
||||
name: Optional name for result data_ref
|
||||
|
||||
Returns:
|
||||
Dict with new data_ref for filtered result
|
||||
"""
|
||||
df = self.store.get_pandas(data_ref)
|
||||
if df is None:
|
||||
return {'error': f'DataFrame not found: {data_ref}'}
|
||||
|
||||
try:
|
||||
filtered = df.query(condition)
|
||||
result_name = name or f"{data_ref}_filtered"
|
||||
return self._check_and_store(
|
||||
filtered,
|
||||
name=result_name,
|
||||
source=f"filter({data_ref}, '{condition}')"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"filter failed: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
async def select(
|
||||
self,
|
||||
data_ref: str,
|
||||
columns: List[str],
|
||||
name: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Select specific columns from DataFrame.
|
||||
|
||||
Args:
|
||||
data_ref: Reference to stored DataFrame
|
||||
columns: List of column names to select
|
||||
name: Optional name for result data_ref
|
||||
|
||||
Returns:
|
||||
Dict with new data_ref for selected columns
|
||||
"""
|
||||
df = self.store.get_pandas(data_ref)
|
||||
if df is None:
|
||||
return {'error': f'DataFrame not found: {data_ref}'}
|
||||
|
||||
try:
|
||||
# Validate columns exist
|
||||
missing = [c for c in columns if c not in df.columns]
|
||||
if missing:
|
||||
return {
|
||||
'error': f'Columns not found: {missing}',
|
||||
'available_columns': list(df.columns)
|
||||
}
|
||||
|
||||
selected = df[columns]
|
||||
result_name = name or f"{data_ref}_select"
|
||||
return self._check_and_store(
|
||||
selected,
|
||||
name=result_name,
|
||||
source=f"select({data_ref}, {columns})"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"select failed: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
async def groupby(
|
||||
self,
|
||||
data_ref: str,
|
||||
by: Union[str, List[str]],
|
||||
agg: Dict[str, Union[str, List[str]]],
|
||||
name: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Group DataFrame and aggregate.
|
||||
|
||||
Args:
|
||||
data_ref: Reference to stored DataFrame
|
||||
by: Column(s) to group by
|
||||
agg: Aggregation dict (e.g., {"sales": "sum", "count": "mean"})
|
||||
name: Optional name for result data_ref
|
||||
|
||||
Returns:
|
||||
Dict with new data_ref for aggregated result
|
||||
"""
|
||||
df = self.store.get_pandas(data_ref)
|
||||
if df is None:
|
||||
return {'error': f'DataFrame not found: {data_ref}'}
|
||||
|
||||
try:
|
||||
grouped = df.groupby(by).agg(agg).reset_index()
|
||||
# Flatten column names if multi-level
|
||||
if isinstance(grouped.columns, pd.MultiIndex):
|
||||
grouped.columns = ['_'.join(col).strip('_') for col in grouped.columns]
|
||||
|
||||
result_name = name or f"{data_ref}_grouped"
|
||||
return self._check_and_store(
|
||||
grouped,
|
||||
name=result_name,
|
||||
source=f"groupby({data_ref}, by={by})"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"groupby failed: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
async def join(
|
||||
self,
|
||||
left_ref: str,
|
||||
right_ref: str,
|
||||
on: Optional[Union[str, List[str]]] = None,
|
||||
left_on: Optional[Union[str, List[str]]] = None,
|
||||
right_on: Optional[Union[str, List[str]]] = None,
|
||||
how: str = 'inner',
|
||||
name: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Join two DataFrames.
|
||||
|
||||
Args:
|
||||
left_ref: Reference to left DataFrame
|
||||
right_ref: Reference to right DataFrame
|
||||
on: Column(s) to join on (if same name in both)
|
||||
left_on: Left join column(s)
|
||||
right_on: Right join column(s)
|
||||
how: Join type ('inner', 'left', 'right', 'outer')
|
||||
name: Optional name for result data_ref
|
||||
|
||||
Returns:
|
||||
Dict with new data_ref for joined result
|
||||
"""
|
||||
left_df = self.store.get_pandas(left_ref)
|
||||
right_df = self.store.get_pandas(right_ref)
|
||||
|
||||
if left_df is None:
|
||||
return {'error': f'DataFrame not found: {left_ref}'}
|
||||
if right_df is None:
|
||||
return {'error': f'DataFrame not found: {right_ref}'}
|
||||
|
||||
try:
|
||||
joined = pd.merge(
|
||||
left_df, right_df,
|
||||
on=on, left_on=left_on, right_on=right_on,
|
||||
how=how
|
||||
)
|
||||
result_name = name or f"{left_ref}_{right_ref}_joined"
|
||||
return self._check_and_store(
|
||||
joined,
|
||||
name=result_name,
|
||||
source=f"join({left_ref}, {right_ref}, how={how})"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"join failed: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
async def list_data(self) -> Dict:
|
||||
"""
|
||||
List all stored DataFrames.
|
||||
|
||||
Returns:
|
||||
Dict with list of stored DataFrames and their info
|
||||
"""
|
||||
refs = self.store.list_refs()
|
||||
return {
|
||||
'count': len(refs),
|
||||
'total_memory_mb': self.store.total_memory_mb(),
|
||||
'max_rows_limit': self.max_rows,
|
||||
'dataframes': refs
|
||||
}
|
||||
|
||||
async def drop_data(self, data_ref: str) -> Dict:
|
||||
"""
|
||||
Remove a DataFrame from storage.
|
||||
|
||||
Args:
|
||||
data_ref: Reference to drop
|
||||
|
||||
Returns:
|
||||
Dict with success status
|
||||
"""
|
||||
if self.store.drop(data_ref):
|
||||
return {'success': True, 'dropped': data_ref}
|
||||
return {'error': f'DataFrame not found: {data_ref}'}
|
||||
538
mcp-servers/data-platform/mcp_server/postgres_tools.py
Normal file
538
mcp-servers/data-platform/mcp_server/postgres_tools.py
Normal file
@@ -0,0 +1,538 @@
|
||||
"""
|
||||
PostgreSQL/PostGIS MCP Tools.
|
||||
|
||||
Provides database operations with connection pooling and PostGIS support.
|
||||
"""
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Dict, List, Optional, Any
|
||||
import json
|
||||
|
||||
from .data_store import DataStore
|
||||
from .config import load_config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Optional imports - gracefully handle missing dependencies
|
||||
try:
|
||||
import asyncpg
|
||||
ASYNCPG_AVAILABLE = True
|
||||
except ImportError:
|
||||
ASYNCPG_AVAILABLE = False
|
||||
logger.warning("asyncpg not available - PostgreSQL tools will be disabled")
|
||||
|
||||
try:
|
||||
import pandas as pd
|
||||
PANDAS_AVAILABLE = True
|
||||
except ImportError:
|
||||
PANDAS_AVAILABLE = False
|
||||
|
||||
|
||||
class PostgresTools:
|
||||
"""PostgreSQL/PostGIS database tools"""
|
||||
|
||||
def __init__(self):
|
||||
self.store = DataStore.get_instance()
|
||||
self.config = load_config()
|
||||
self.pool: Optional[Any] = None
|
||||
self.max_rows = self.config.get('max_rows', 100_000)
|
||||
|
||||
async def _get_pool(self):
|
||||
"""Get or create connection pool"""
|
||||
if not ASYNCPG_AVAILABLE:
|
||||
raise RuntimeError("asyncpg not installed - run: pip install asyncpg")
|
||||
|
||||
if self.pool is None:
|
||||
postgres_url = self.config.get('postgres_url')
|
||||
if not postgres_url:
|
||||
raise RuntimeError(
|
||||
"PostgreSQL not configured. Set POSTGRES_URL in "
|
||||
"~/.config/claude/postgres.env"
|
||||
)
|
||||
self.pool = await asyncpg.create_pool(postgres_url, min_size=1, max_size=5)
|
||||
return self.pool
|
||||
|
||||
async def pg_connect(self) -> Dict:
|
||||
"""
|
||||
Test PostgreSQL connection and return status.
|
||||
|
||||
Returns:
|
||||
Dict with connection status, version, and database info
|
||||
"""
|
||||
if not ASYNCPG_AVAILABLE:
|
||||
return {
|
||||
'connected': False,
|
||||
'error': 'asyncpg not installed',
|
||||
'suggestion': 'pip install asyncpg'
|
||||
}
|
||||
|
||||
postgres_url = self.config.get('postgres_url')
|
||||
if not postgres_url:
|
||||
return {
|
||||
'connected': False,
|
||||
'error': 'POSTGRES_URL not configured',
|
||||
'suggestion': 'Create ~/.config/claude/postgres.env with POSTGRES_URL=postgresql://...'
|
||||
}
|
||||
|
||||
try:
|
||||
pool = await self._get_pool()
|
||||
async with pool.acquire() as conn:
|
||||
version = await conn.fetchval('SELECT version()')
|
||||
db_name = await conn.fetchval('SELECT current_database()')
|
||||
user = await conn.fetchval('SELECT current_user')
|
||||
|
||||
# Check for PostGIS
|
||||
postgis_version = None
|
||||
try:
|
||||
postgis_version = await conn.fetchval('SELECT PostGIS_Version()')
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return {
|
||||
'connected': True,
|
||||
'database': db_name,
|
||||
'user': user,
|
||||
'version': version.split(',')[0] if version else 'Unknown',
|
||||
'postgis_version': postgis_version,
|
||||
'postgis_available': postgis_version is not None
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"pg_connect failed: {e}")
|
||||
return {
|
||||
'connected': False,
|
||||
'error': str(e)
|
||||
}
|
||||
|
||||
async def pg_query(
|
||||
self,
|
||||
query: str,
|
||||
params: Optional[List] = None,
|
||||
name: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Execute SELECT query and return results as data_ref.
|
||||
|
||||
Args:
|
||||
query: SQL SELECT query
|
||||
params: Query parameters (positional, use $1, $2, etc.)
|
||||
name: Optional name for result data_ref
|
||||
|
||||
Returns:
|
||||
Dict with data_ref for results or error
|
||||
"""
|
||||
if not PANDAS_AVAILABLE:
|
||||
return {'error': 'pandas not available'}
|
||||
|
||||
try:
|
||||
pool = await self._get_pool()
|
||||
async with pool.acquire() as conn:
|
||||
if params:
|
||||
rows = await conn.fetch(query, *params)
|
||||
else:
|
||||
rows = await conn.fetch(query)
|
||||
|
||||
if not rows:
|
||||
return {
|
||||
'data_ref': None,
|
||||
'rows': 0,
|
||||
'message': 'Query returned no results'
|
||||
}
|
||||
|
||||
# Convert to DataFrame
|
||||
df = pd.DataFrame([dict(r) for r in rows])
|
||||
|
||||
# Check row limit
|
||||
check = self.store.check_row_limit(len(df))
|
||||
if check['exceeded']:
|
||||
return {
|
||||
'error': 'row_limit_exceeded',
|
||||
**check,
|
||||
'preview': df.head(100).to_dict(orient='records')
|
||||
}
|
||||
|
||||
# Store result
|
||||
data_ref = self.store.store(df, name=name, source=f"pg_query: {query[:100]}...")
|
||||
return {
|
||||
'data_ref': data_ref,
|
||||
'rows': len(df),
|
||||
'columns': list(df.columns)
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"pg_query failed: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
async def pg_execute(
|
||||
self,
|
||||
query: str,
|
||||
params: Optional[List] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Execute INSERT/UPDATE/DELETE query.
|
||||
|
||||
Args:
|
||||
query: SQL DML query
|
||||
params: Query parameters
|
||||
|
||||
Returns:
|
||||
Dict with affected rows count
|
||||
"""
|
||||
try:
|
||||
pool = await self._get_pool()
|
||||
async with pool.acquire() as conn:
|
||||
if params:
|
||||
result = await conn.execute(query, *params)
|
||||
else:
|
||||
result = await conn.execute(query)
|
||||
|
||||
# Parse result (e.g., "INSERT 0 1" or "UPDATE 5")
|
||||
parts = result.split()
|
||||
affected = int(parts[-1]) if parts else 0
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'command': parts[0] if parts else 'UNKNOWN',
|
||||
'affected_rows': affected
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"pg_execute failed: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
async def pg_tables(self, schema: str = 'public') -> Dict:
|
||||
"""
|
||||
List all tables in schema.
|
||||
|
||||
Args:
|
||||
schema: Schema name (default: public)
|
||||
|
||||
Returns:
|
||||
Dict with list of tables
|
||||
"""
|
||||
query = """
|
||||
SELECT
|
||||
table_name,
|
||||
table_type,
|
||||
(SELECT count(*) FROM information_schema.columns c
|
||||
WHERE c.table_schema = t.table_schema
|
||||
AND c.table_name = t.table_name) as column_count
|
||||
FROM information_schema.tables t
|
||||
WHERE table_schema = $1
|
||||
ORDER BY table_name
|
||||
"""
|
||||
try:
|
||||
pool = await self._get_pool()
|
||||
async with pool.acquire() as conn:
|
||||
rows = await conn.fetch(query, schema)
|
||||
tables = [
|
||||
{
|
||||
'name': r['table_name'],
|
||||
'type': r['table_type'],
|
||||
'columns': r['column_count']
|
||||
}
|
||||
for r in rows
|
||||
]
|
||||
return {
|
||||
'schema': schema,
|
||||
'count': len(tables),
|
||||
'tables': tables
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"pg_tables failed: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
async def pg_columns(self, table: str, schema: str = 'public') -> Dict:
|
||||
"""
|
||||
Get column information for a table.
|
||||
|
||||
Args:
|
||||
table: Table name
|
||||
schema: Schema name (default: public)
|
||||
|
||||
Returns:
|
||||
Dict with column details
|
||||
"""
|
||||
query = """
|
||||
SELECT
|
||||
column_name,
|
||||
data_type,
|
||||
udt_name,
|
||||
is_nullable,
|
||||
column_default,
|
||||
character_maximum_length,
|
||||
numeric_precision
|
||||
FROM information_schema.columns
|
||||
WHERE table_schema = $1 AND table_name = $2
|
||||
ORDER BY ordinal_position
|
||||
"""
|
||||
try:
|
||||
pool = await self._get_pool()
|
||||
async with pool.acquire() as conn:
|
||||
rows = await conn.fetch(query, schema, table)
|
||||
columns = [
|
||||
{
|
||||
'name': r['column_name'],
|
||||
'type': r['data_type'],
|
||||
'udt': r['udt_name'],
|
||||
'nullable': r['is_nullable'] == 'YES',
|
||||
'default': r['column_default'],
|
||||
'max_length': r['character_maximum_length'],
|
||||
'precision': r['numeric_precision']
|
||||
}
|
||||
for r in rows
|
||||
]
|
||||
return {
|
||||
'table': f'{schema}.{table}',
|
||||
'column_count': len(columns),
|
||||
'columns': columns
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"pg_columns failed: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
async def pg_schemas(self) -> Dict:
|
||||
"""
|
||||
List all schemas in database.
|
||||
|
||||
Returns:
|
||||
Dict with list of schemas
|
||||
"""
|
||||
query = """
|
||||
SELECT schema_name
|
||||
FROM information_schema.schemata
|
||||
WHERE schema_name NOT IN ('pg_catalog', 'information_schema', 'pg_toast')
|
||||
ORDER BY schema_name
|
||||
"""
|
||||
try:
|
||||
pool = await self._get_pool()
|
||||
async with pool.acquire() as conn:
|
||||
rows = await conn.fetch(query)
|
||||
schemas = [r['schema_name'] for r in rows]
|
||||
return {
|
||||
'count': len(schemas),
|
||||
'schemas': schemas
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"pg_schemas failed: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
async def st_tables(self, schema: str = 'public') -> Dict:
|
||||
"""
|
||||
List PostGIS-enabled tables.
|
||||
|
||||
Args:
|
||||
schema: Schema name (default: public)
|
||||
|
||||
Returns:
|
||||
Dict with list of tables with geometry columns
|
||||
"""
|
||||
query = """
|
||||
SELECT
|
||||
f_table_name as table_name,
|
||||
f_geometry_column as geometry_column,
|
||||
type as geometry_type,
|
||||
srid,
|
||||
coord_dimension
|
||||
FROM geometry_columns
|
||||
WHERE f_table_schema = $1
|
||||
ORDER BY f_table_name
|
||||
"""
|
||||
try:
|
||||
pool = await self._get_pool()
|
||||
async with pool.acquire() as conn:
|
||||
rows = await conn.fetch(query, schema)
|
||||
tables = [
|
||||
{
|
||||
'table': r['table_name'],
|
||||
'geometry_column': r['geometry_column'],
|
||||
'geometry_type': r['geometry_type'],
|
||||
'srid': r['srid'],
|
||||
'dimensions': r['coord_dimension']
|
||||
}
|
||||
for r in rows
|
||||
]
|
||||
return {
|
||||
'schema': schema,
|
||||
'count': len(tables),
|
||||
'postgis_tables': tables
|
||||
}
|
||||
except Exception as e:
|
||||
if 'geometry_columns' in str(e):
|
||||
return {
|
||||
'error': 'PostGIS not installed or extension not enabled',
|
||||
'suggestion': 'Run: CREATE EXTENSION IF NOT EXISTS postgis;'
|
||||
}
|
||||
logger.error(f"st_tables failed: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
async def st_geometry_type(self, table: str, column: str, schema: str = 'public') -> Dict:
|
||||
"""
|
||||
Get geometry type of a column.
|
||||
|
||||
Args:
|
||||
table: Table name
|
||||
column: Geometry column name
|
||||
schema: Schema name
|
||||
|
||||
Returns:
|
||||
Dict with geometry type information
|
||||
"""
|
||||
query = f"""
|
||||
SELECT DISTINCT ST_GeometryType({column}) as geom_type
|
||||
FROM {schema}.{table}
|
||||
WHERE {column} IS NOT NULL
|
||||
LIMIT 10
|
||||
"""
|
||||
try:
|
||||
pool = await self._get_pool()
|
||||
async with pool.acquire() as conn:
|
||||
rows = await conn.fetch(query)
|
||||
types = [r['geom_type'] for r in rows]
|
||||
return {
|
||||
'table': f'{schema}.{table}',
|
||||
'column': column,
|
||||
'geometry_types': types
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"st_geometry_type failed: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
async def st_srid(self, table: str, column: str, schema: str = 'public') -> Dict:
|
||||
"""
|
||||
Get SRID of geometry column.
|
||||
|
||||
Args:
|
||||
table: Table name
|
||||
column: Geometry column name
|
||||
schema: Schema name
|
||||
|
||||
Returns:
|
||||
Dict with SRID information
|
||||
"""
|
||||
query = f"""
|
||||
SELECT DISTINCT ST_SRID({column}) as srid
|
||||
FROM {schema}.{table}
|
||||
WHERE {column} IS NOT NULL
|
||||
LIMIT 1
|
||||
"""
|
||||
try:
|
||||
pool = await self._get_pool()
|
||||
async with pool.acquire() as conn:
|
||||
row = await conn.fetchrow(query)
|
||||
srid = row['srid'] if row else None
|
||||
|
||||
# Get SRID description
|
||||
srid_info = None
|
||||
if srid:
|
||||
srid_query = """
|
||||
SELECT srtext, proj4text
|
||||
FROM spatial_ref_sys
|
||||
WHERE srid = $1
|
||||
"""
|
||||
srid_row = await conn.fetchrow(srid_query, srid)
|
||||
if srid_row:
|
||||
srid_info = {
|
||||
'description': srid_row['srtext'][:200] if srid_row['srtext'] else None,
|
||||
'proj4': srid_row['proj4text']
|
||||
}
|
||||
|
||||
return {
|
||||
'table': f'{schema}.{table}',
|
||||
'column': column,
|
||||
'srid': srid,
|
||||
'info': srid_info
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"st_srid failed: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
async def st_extent(self, table: str, column: str, schema: str = 'public') -> Dict:
|
||||
"""
|
||||
Get bounding box of all geometries.
|
||||
|
||||
Args:
|
||||
table: Table name
|
||||
column: Geometry column name
|
||||
schema: Schema name
|
||||
|
||||
Returns:
|
||||
Dict with bounding box coordinates
|
||||
"""
|
||||
query = f"""
|
||||
SELECT
|
||||
ST_XMin(extent) as xmin,
|
||||
ST_YMin(extent) as ymin,
|
||||
ST_XMax(extent) as xmax,
|
||||
ST_YMax(extent) as ymax
|
||||
FROM (
|
||||
SELECT ST_Extent({column}) as extent
|
||||
FROM {schema}.{table}
|
||||
) sub
|
||||
"""
|
||||
try:
|
||||
pool = await self._get_pool()
|
||||
async with pool.acquire() as conn:
|
||||
row = await conn.fetchrow(query)
|
||||
if row and row['xmin'] is not None:
|
||||
return {
|
||||
'table': f'{schema}.{table}',
|
||||
'column': column,
|
||||
'bbox': {
|
||||
'xmin': float(row['xmin']),
|
||||
'ymin': float(row['ymin']),
|
||||
'xmax': float(row['xmax']),
|
||||
'ymax': float(row['ymax'])
|
||||
}
|
||||
}
|
||||
return {
|
||||
'table': f'{schema}.{table}',
|
||||
'column': column,
|
||||
'bbox': None,
|
||||
'message': 'No geometries found or all NULL'
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"st_extent failed: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
async def close(self):
|
||||
"""Close connection pool"""
|
||||
if self.pool:
|
||||
await self.pool.close()
|
||||
self.pool = None
|
||||
|
||||
|
||||
def check_connection() -> None:
|
||||
"""
|
||||
Check PostgreSQL connection for SessionStart hook.
|
||||
Prints warning to stderr if connection fails.
|
||||
"""
|
||||
import sys
|
||||
|
||||
config = load_config()
|
||||
if not config.get('postgres_url'):
|
||||
print(
|
||||
"[data-platform] PostgreSQL not configured (POSTGRES_URL not set)",
|
||||
file=sys.stderr
|
||||
)
|
||||
return
|
||||
|
||||
async def test():
|
||||
try:
|
||||
if not ASYNCPG_AVAILABLE:
|
||||
print(
|
||||
"[data-platform] asyncpg not installed - PostgreSQL tools unavailable",
|
||||
file=sys.stderr
|
||||
)
|
||||
return
|
||||
|
||||
conn = await asyncpg.connect(config['postgres_url'], timeout=5)
|
||||
await conn.close()
|
||||
print("[data-platform] PostgreSQL connection OK", file=sys.stderr)
|
||||
except Exception as e:
|
||||
print(
|
||||
f"[data-platform] PostgreSQL connection failed: {e}",
|
||||
file=sys.stderr
|
||||
)
|
||||
|
||||
asyncio.run(test())
|
||||
795
mcp-servers/data-platform/mcp_server/server.py
Normal file
795
mcp-servers/data-platform/mcp_server/server.py
Normal file
@@ -0,0 +1,795 @@
|
||||
"""
|
||||
MCP Server entry point for Data Platform integration.
|
||||
|
||||
Provides pandas, PostgreSQL/PostGIS, and dbt tools to Claude Code via JSON-RPC 2.0 over stdio.
|
||||
"""
|
||||
import asyncio
|
||||
import logging
|
||||
import json
|
||||
from mcp.server import Server
|
||||
from mcp.server.stdio import stdio_server
|
||||
from mcp.types import Tool, TextContent
|
||||
|
||||
from .config import DataPlatformConfig
|
||||
from .data_store import DataStore
|
||||
from .pandas_tools import PandasTools
|
||||
from .postgres_tools import PostgresTools
|
||||
from .dbt_tools import DbtTools
|
||||
|
||||
# Suppress noisy MCP validation warnings on stderr
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logging.getLogger("root").setLevel(logging.ERROR)
|
||||
logging.getLogger("mcp").setLevel(logging.ERROR)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DataPlatformMCPServer:
|
||||
"""MCP Server for data platform integration"""
|
||||
|
||||
def __init__(self):
|
||||
self.server = Server("data-platform-mcp")
|
||||
self.config = None
|
||||
self.pandas_tools = None
|
||||
self.postgres_tools = None
|
||||
self.dbt_tools = None
|
||||
|
||||
async def initialize(self):
|
||||
"""Initialize server and load configuration."""
|
||||
try:
|
||||
config_loader = DataPlatformConfig()
|
||||
self.config = config_loader.load()
|
||||
|
||||
self.pandas_tools = PandasTools()
|
||||
self.postgres_tools = PostgresTools()
|
||||
self.dbt_tools = DbtTools()
|
||||
|
||||
# Log available capabilities
|
||||
caps = []
|
||||
caps.append("pandas")
|
||||
if self.config.get('postgres_available'):
|
||||
caps.append("PostgreSQL")
|
||||
if self.config.get('dbt_available'):
|
||||
caps.append("dbt")
|
||||
|
||||
logger.info(f"Data Platform MCP Server initialized with: {', '.join(caps)}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize: {e}")
|
||||
raise
|
||||
|
||||
def setup_tools(self):
|
||||
"""Register all available tools with the MCP server"""
|
||||
|
||||
@self.server.list_tools()
|
||||
async def list_tools() -> list[Tool]:
|
||||
"""Return list of available tools"""
|
||||
tools = [
|
||||
# pandas tools - always available
|
||||
Tool(
|
||||
name="read_csv",
|
||||
description="Load CSV file into DataFrame",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"file_path": {
|
||||
"type": "string",
|
||||
"description": "Path to CSV file"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Optional name for data_ref"
|
||||
},
|
||||
"chunk_size": {
|
||||
"type": "integer",
|
||||
"description": "Process in chunks of this size"
|
||||
}
|
||||
},
|
||||
"required": ["file_path"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="read_parquet",
|
||||
description="Load Parquet file into DataFrame",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"file_path": {
|
||||
"type": "string",
|
||||
"description": "Path to Parquet file"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Optional name for data_ref"
|
||||
},
|
||||
"columns": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "Optional list of columns to load"
|
||||
}
|
||||
},
|
||||
"required": ["file_path"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="read_json",
|
||||
description="Load JSON/JSONL file into DataFrame",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"file_path": {
|
||||
"type": "string",
|
||||
"description": "Path to JSON file"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Optional name for data_ref"
|
||||
},
|
||||
"lines": {
|
||||
"type": "boolean",
|
||||
"default": False,
|
||||
"description": "Read as JSON Lines format"
|
||||
}
|
||||
},
|
||||
"required": ["file_path"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="to_csv",
|
||||
description="Export DataFrame to CSV file",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"data_ref": {
|
||||
"type": "string",
|
||||
"description": "Reference to stored DataFrame"
|
||||
},
|
||||
"file_path": {
|
||||
"type": "string",
|
||||
"description": "Output file path"
|
||||
},
|
||||
"index": {
|
||||
"type": "boolean",
|
||||
"default": False,
|
||||
"description": "Include index column"
|
||||
}
|
||||
},
|
||||
"required": ["data_ref", "file_path"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="to_parquet",
|
||||
description="Export DataFrame to Parquet file",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"data_ref": {
|
||||
"type": "string",
|
||||
"description": "Reference to stored DataFrame"
|
||||
},
|
||||
"file_path": {
|
||||
"type": "string",
|
||||
"description": "Output file path"
|
||||
},
|
||||
"compression": {
|
||||
"type": "string",
|
||||
"default": "snappy",
|
||||
"description": "Compression codec"
|
||||
}
|
||||
},
|
||||
"required": ["data_ref", "file_path"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="describe",
|
||||
description="Get statistical summary of DataFrame",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"data_ref": {
|
||||
"type": "string",
|
||||
"description": "Reference to stored DataFrame"
|
||||
}
|
||||
},
|
||||
"required": ["data_ref"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="head",
|
||||
description="Get first N rows of DataFrame",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"data_ref": {
|
||||
"type": "string",
|
||||
"description": "Reference to stored DataFrame"
|
||||
},
|
||||
"n": {
|
||||
"type": "integer",
|
||||
"default": 10,
|
||||
"description": "Number of rows"
|
||||
}
|
||||
},
|
||||
"required": ["data_ref"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="tail",
|
||||
description="Get last N rows of DataFrame",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"data_ref": {
|
||||
"type": "string",
|
||||
"description": "Reference to stored DataFrame"
|
||||
},
|
||||
"n": {
|
||||
"type": "integer",
|
||||
"default": 10,
|
||||
"description": "Number of rows"
|
||||
}
|
||||
},
|
||||
"required": ["data_ref"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="filter",
|
||||
description="Filter DataFrame rows by condition",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"data_ref": {
|
||||
"type": "string",
|
||||
"description": "Reference to stored DataFrame"
|
||||
},
|
||||
"condition": {
|
||||
"type": "string",
|
||||
"description": "pandas query string (e.g., 'age > 30 and city == \"NYC\"')"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Optional name for result data_ref"
|
||||
}
|
||||
},
|
||||
"required": ["data_ref", "condition"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="select",
|
||||
description="Select specific columns from DataFrame",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"data_ref": {
|
||||
"type": "string",
|
||||
"description": "Reference to stored DataFrame"
|
||||
},
|
||||
"columns": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "List of column names to select"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Optional name for result data_ref"
|
||||
}
|
||||
},
|
||||
"required": ["data_ref", "columns"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="groupby",
|
||||
description="Group DataFrame and aggregate",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"data_ref": {
|
||||
"type": "string",
|
||||
"description": "Reference to stored DataFrame"
|
||||
},
|
||||
"by": {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{"type": "array", "items": {"type": "string"}}
|
||||
],
|
||||
"description": "Column(s) to group by"
|
||||
},
|
||||
"agg": {
|
||||
"type": "object",
|
||||
"description": "Aggregation dict (e.g., {\"sales\": \"sum\", \"count\": \"mean\"})"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Optional name for result data_ref"
|
||||
}
|
||||
},
|
||||
"required": ["data_ref", "by", "agg"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="join",
|
||||
description="Join two DataFrames",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"left_ref": {
|
||||
"type": "string",
|
||||
"description": "Reference to left DataFrame"
|
||||
},
|
||||
"right_ref": {
|
||||
"type": "string",
|
||||
"description": "Reference to right DataFrame"
|
||||
},
|
||||
"on": {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{"type": "array", "items": {"type": "string"}}
|
||||
],
|
||||
"description": "Column(s) to join on (if same name in both)"
|
||||
},
|
||||
"left_on": {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{"type": "array", "items": {"type": "string"}}
|
||||
],
|
||||
"description": "Left join column(s)"
|
||||
},
|
||||
"right_on": {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{"type": "array", "items": {"type": "string"}}
|
||||
],
|
||||
"description": "Right join column(s)"
|
||||
},
|
||||
"how": {
|
||||
"type": "string",
|
||||
"enum": ["inner", "left", "right", "outer"],
|
||||
"default": "inner",
|
||||
"description": "Join type"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Optional name for result data_ref"
|
||||
}
|
||||
},
|
||||
"required": ["left_ref", "right_ref"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="list_data",
|
||||
description="List all stored DataFrames",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {}
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="drop_data",
|
||||
description="Remove a DataFrame from storage",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"data_ref": {
|
||||
"type": "string",
|
||||
"description": "Reference to drop"
|
||||
}
|
||||
},
|
||||
"required": ["data_ref"]
|
||||
}
|
||||
),
|
||||
# PostgreSQL tools
|
||||
Tool(
|
||||
name="pg_connect",
|
||||
description="Test PostgreSQL connection and return status",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {}
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="pg_query",
|
||||
description="Execute SELECT query and return results as data_ref",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "SQL SELECT query"
|
||||
},
|
||||
"params": {
|
||||
"type": "array",
|
||||
"items": {},
|
||||
"description": "Query parameters (use $1, $2, etc.)"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Optional name for result data_ref"
|
||||
}
|
||||
},
|
||||
"required": ["query"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="pg_execute",
|
||||
description="Execute INSERT/UPDATE/DELETE query",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "SQL DML query"
|
||||
},
|
||||
"params": {
|
||||
"type": "array",
|
||||
"items": {},
|
||||
"description": "Query parameters"
|
||||
}
|
||||
},
|
||||
"required": ["query"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="pg_tables",
|
||||
description="List all tables in schema",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"default": "public",
|
||||
"description": "Schema name"
|
||||
}
|
||||
}
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="pg_columns",
|
||||
description="Get column information for a table",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"table": {
|
||||
"type": "string",
|
||||
"description": "Table name"
|
||||
},
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"default": "public",
|
||||
"description": "Schema name"
|
||||
}
|
||||
},
|
||||
"required": ["table"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="pg_schemas",
|
||||
description="List all schemas in database",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {}
|
||||
}
|
||||
),
|
||||
# PostGIS tools
|
||||
Tool(
|
||||
name="st_tables",
|
||||
description="List PostGIS-enabled tables",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"default": "public",
|
||||
"description": "Schema name"
|
||||
}
|
||||
}
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="st_geometry_type",
|
||||
description="Get geometry type of a column",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"table": {
|
||||
"type": "string",
|
||||
"description": "Table name"
|
||||
},
|
||||
"column": {
|
||||
"type": "string",
|
||||
"description": "Geometry column name"
|
||||
},
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"default": "public",
|
||||
"description": "Schema name"
|
||||
}
|
||||
},
|
||||
"required": ["table", "column"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="st_srid",
|
||||
description="Get SRID of geometry column",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"table": {
|
||||
"type": "string",
|
||||
"description": "Table name"
|
||||
},
|
||||
"column": {
|
||||
"type": "string",
|
||||
"description": "Geometry column name"
|
||||
},
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"default": "public",
|
||||
"description": "Schema name"
|
||||
}
|
||||
},
|
||||
"required": ["table", "column"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="st_extent",
|
||||
description="Get bounding box of all geometries",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"table": {
|
||||
"type": "string",
|
||||
"description": "Table name"
|
||||
},
|
||||
"column": {
|
||||
"type": "string",
|
||||
"description": "Geometry column name"
|
||||
},
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"default": "public",
|
||||
"description": "Schema name"
|
||||
}
|
||||
},
|
||||
"required": ["table", "column"]
|
||||
}
|
||||
),
|
||||
# dbt tools
|
||||
Tool(
|
||||
name="dbt_parse",
|
||||
description="Validate dbt project (pre-flight check)",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {}
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="dbt_run",
|
||||
description="Run dbt models with pre-validation",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"select": {
|
||||
"type": "string",
|
||||
"description": "Model selection (e.g., 'model_name', '+model_name', 'tag:daily')"
|
||||
},
|
||||
"exclude": {
|
||||
"type": "string",
|
||||
"description": "Models to exclude"
|
||||
},
|
||||
"full_refresh": {
|
||||
"type": "boolean",
|
||||
"default": False,
|
||||
"description": "Rebuild incremental models"
|
||||
}
|
||||
}
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="dbt_test",
|
||||
description="Run dbt tests",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"select": {
|
||||
"type": "string",
|
||||
"description": "Test selection"
|
||||
},
|
||||
"exclude": {
|
||||
"type": "string",
|
||||
"description": "Tests to exclude"
|
||||
}
|
||||
}
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="dbt_build",
|
||||
description="Run dbt build (run + test) with pre-validation",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"select": {
|
||||
"type": "string",
|
||||
"description": "Model/test selection"
|
||||
},
|
||||
"exclude": {
|
||||
"type": "string",
|
||||
"description": "Resources to exclude"
|
||||
},
|
||||
"full_refresh": {
|
||||
"type": "boolean",
|
||||
"default": False,
|
||||
"description": "Rebuild incremental models"
|
||||
}
|
||||
}
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="dbt_compile",
|
||||
description="Compile dbt models to SQL without executing",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"select": {
|
||||
"type": "string",
|
||||
"description": "Model selection"
|
||||
}
|
||||
}
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="dbt_ls",
|
||||
description="List dbt resources",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"select": {
|
||||
"type": "string",
|
||||
"description": "Resource selection"
|
||||
},
|
||||
"resource_type": {
|
||||
"type": "string",
|
||||
"enum": ["model", "test", "seed", "snapshot", "source"],
|
||||
"description": "Filter by type"
|
||||
},
|
||||
"output": {
|
||||
"type": "string",
|
||||
"enum": ["name", "path", "json"],
|
||||
"default": "name",
|
||||
"description": "Output format"
|
||||
}
|
||||
}
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="dbt_docs_generate",
|
||||
description="Generate dbt documentation",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {}
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="dbt_lineage",
|
||||
description="Get model dependencies and lineage",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"model": {
|
||||
"type": "string",
|
||||
"description": "Model name to analyze"
|
||||
}
|
||||
},
|
||||
"required": ["model"]
|
||||
}
|
||||
)
|
||||
]
|
||||
return tools
|
||||
|
||||
@self.server.call_tool()
|
||||
async def call_tool(name: str, arguments: dict) -> list[TextContent]:
|
||||
"""Handle tool invocation."""
|
||||
try:
|
||||
# Route to appropriate tool handler
|
||||
# pandas tools
|
||||
if name == "read_csv":
|
||||
result = await self.pandas_tools.read_csv(**arguments)
|
||||
elif name == "read_parquet":
|
||||
result = await self.pandas_tools.read_parquet(**arguments)
|
||||
elif name == "read_json":
|
||||
result = await self.pandas_tools.read_json(**arguments)
|
||||
elif name == "to_csv":
|
||||
result = await self.pandas_tools.to_csv(**arguments)
|
||||
elif name == "to_parquet":
|
||||
result = await self.pandas_tools.to_parquet(**arguments)
|
||||
elif name == "describe":
|
||||
result = await self.pandas_tools.describe(**arguments)
|
||||
elif name == "head":
|
||||
result = await self.pandas_tools.head(**arguments)
|
||||
elif name == "tail":
|
||||
result = await self.pandas_tools.tail(**arguments)
|
||||
elif name == "filter":
|
||||
result = await self.pandas_tools.filter(**arguments)
|
||||
elif name == "select":
|
||||
result = await self.pandas_tools.select(**arguments)
|
||||
elif name == "groupby":
|
||||
result = await self.pandas_tools.groupby(**arguments)
|
||||
elif name == "join":
|
||||
result = await self.pandas_tools.join(**arguments)
|
||||
elif name == "list_data":
|
||||
result = await self.pandas_tools.list_data()
|
||||
elif name == "drop_data":
|
||||
result = await self.pandas_tools.drop_data(**arguments)
|
||||
# PostgreSQL tools
|
||||
elif name == "pg_connect":
|
||||
result = await self.postgres_tools.pg_connect()
|
||||
elif name == "pg_query":
|
||||
result = await self.postgres_tools.pg_query(**arguments)
|
||||
elif name == "pg_execute":
|
||||
result = await self.postgres_tools.pg_execute(**arguments)
|
||||
elif name == "pg_tables":
|
||||
result = await self.postgres_tools.pg_tables(**arguments)
|
||||
elif name == "pg_columns":
|
||||
result = await self.postgres_tools.pg_columns(**arguments)
|
||||
elif name == "pg_schemas":
|
||||
result = await self.postgres_tools.pg_schemas()
|
||||
# PostGIS tools
|
||||
elif name == "st_tables":
|
||||
result = await self.postgres_tools.st_tables(**arguments)
|
||||
elif name == "st_geometry_type":
|
||||
result = await self.postgres_tools.st_geometry_type(**arguments)
|
||||
elif name == "st_srid":
|
||||
result = await self.postgres_tools.st_srid(**arguments)
|
||||
elif name == "st_extent":
|
||||
result = await self.postgres_tools.st_extent(**arguments)
|
||||
# dbt tools
|
||||
elif name == "dbt_parse":
|
||||
result = await self.dbt_tools.dbt_parse()
|
||||
elif name == "dbt_run":
|
||||
result = await self.dbt_tools.dbt_run(**arguments)
|
||||
elif name == "dbt_test":
|
||||
result = await self.dbt_tools.dbt_test(**arguments)
|
||||
elif name == "dbt_build":
|
||||
result = await self.dbt_tools.dbt_build(**arguments)
|
||||
elif name == "dbt_compile":
|
||||
result = await self.dbt_tools.dbt_compile(**arguments)
|
||||
elif name == "dbt_ls":
|
||||
result = await self.dbt_tools.dbt_ls(**arguments)
|
||||
elif name == "dbt_docs_generate":
|
||||
result = await self.dbt_tools.dbt_docs_generate()
|
||||
elif name == "dbt_lineage":
|
||||
result = await self.dbt_tools.dbt_lineage(**arguments)
|
||||
else:
|
||||
raise ValueError(f"Unknown tool: {name}")
|
||||
|
||||
return [TextContent(
|
||||
type="text",
|
||||
text=json.dumps(result, indent=2, default=str)
|
||||
)]
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Tool {name} failed: {e}")
|
||||
return [TextContent(
|
||||
type="text",
|
||||
text=json.dumps({"error": str(e)}, indent=2)
|
||||
)]
|
||||
|
||||
async def run(self):
|
||||
"""Run the MCP server"""
|
||||
await self.initialize()
|
||||
self.setup_tools()
|
||||
|
||||
async with stdio_server() as (read_stream, write_stream):
|
||||
await self.server.run(
|
||||
read_stream,
|
||||
write_stream,
|
||||
self.server.create_initialization_options()
|
||||
)
|
||||
|
||||
|
||||
async def main():
|
||||
"""Main entry point"""
|
||||
server = DataPlatformMCPServer()
|
||||
await server.run()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
49
mcp-servers/data-platform/pyproject.toml
Normal file
49
mcp-servers/data-platform/pyproject.toml
Normal file
@@ -0,0 +1,49 @@
|
||||
[build-system]
|
||||
requires = ["setuptools>=61.0", "wheel"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "data-platform-mcp"
|
||||
version = "1.0.0"
|
||||
description = "MCP Server for data engineering with pandas, PostgreSQL/PostGIS, and dbt"
|
||||
readme = "README.md"
|
||||
license = {text = "MIT"}
|
||||
requires-python = ">=3.10"
|
||||
authors = [
|
||||
{name = "Leo Miranda"}
|
||||
]
|
||||
classifiers = [
|
||||
"Development Status :: 4 - Beta",
|
||||
"Intended Audience :: Developers",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
]
|
||||
dependencies = [
|
||||
"mcp>=0.9.0",
|
||||
"pandas>=2.0.0",
|
||||
"pyarrow>=14.0.0",
|
||||
"asyncpg>=0.29.0",
|
||||
"geoalchemy2>=0.14.0",
|
||||
"shapely>=2.0.0",
|
||||
"dbt-core>=1.9.0",
|
||||
"dbt-postgres>=1.9.0",
|
||||
"python-dotenv>=1.0.0",
|
||||
"pydantic>=2.5.0",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
dev = [
|
||||
"pytest>=7.4.3",
|
||||
"pytest-asyncio>=0.23.0",
|
||||
]
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["mcp_server*"]
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
asyncio_mode = "auto"
|
||||
testpaths = ["tests"]
|
||||
23
mcp-servers/data-platform/requirements.txt
Normal file
23
mcp-servers/data-platform/requirements.txt
Normal file
@@ -0,0 +1,23 @@
|
||||
# MCP SDK
|
||||
mcp>=0.9.0
|
||||
|
||||
# Data Processing
|
||||
pandas>=2.0.0
|
||||
pyarrow>=14.0.0
|
||||
|
||||
# PostgreSQL/PostGIS
|
||||
asyncpg>=0.29.0
|
||||
geoalchemy2>=0.14.0
|
||||
shapely>=2.0.0
|
||||
|
||||
# dbt
|
||||
dbt-core>=1.9.0
|
||||
dbt-postgres>=1.9.0
|
||||
|
||||
# Utilities
|
||||
python-dotenv>=1.0.0
|
||||
pydantic>=2.5.0
|
||||
|
||||
# Testing
|
||||
pytest>=7.4.3
|
||||
pytest-asyncio>=0.23.0
|
||||
3
mcp-servers/data-platform/tests/__init__.py
Normal file
3
mcp-servers/data-platform/tests/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""
|
||||
Tests for Data Platform MCP Server.
|
||||
"""
|
||||
239
mcp-servers/data-platform/tests/test_config.py
Normal file
239
mcp-servers/data-platform/tests/test_config.py
Normal file
@@ -0,0 +1,239 @@
|
||||
"""
|
||||
Unit tests for configuration loader.
|
||||
"""
|
||||
import pytest
|
||||
from pathlib import Path
|
||||
import os
|
||||
|
||||
|
||||
def test_load_system_config(tmp_path, monkeypatch):
|
||||
"""Test loading system-level PostgreSQL configuration"""
|
||||
# Import here to avoid import errors before setup
|
||||
from mcp_server.config import DataPlatformConfig
|
||||
|
||||
# Mock home directory
|
||||
config_dir = tmp_path / '.config' / 'claude'
|
||||
config_dir.mkdir(parents=True)
|
||||
|
||||
config_file = config_dir / 'postgres.env'
|
||||
config_file.write_text(
|
||||
"POSTGRES_URL=postgresql://user:pass@localhost:5432/testdb\n"
|
||||
)
|
||||
|
||||
monkeypatch.setenv('HOME', str(tmp_path))
|
||||
monkeypatch.chdir(tmp_path)
|
||||
|
||||
config = DataPlatformConfig()
|
||||
result = config.load()
|
||||
|
||||
assert result['postgres_url'] == 'postgresql://user:pass@localhost:5432/testdb'
|
||||
assert result['postgres_available'] is True
|
||||
|
||||
|
||||
def test_postgres_optional(tmp_path, monkeypatch):
|
||||
"""Test that PostgreSQL configuration is optional"""
|
||||
from mcp_server.config import DataPlatformConfig
|
||||
|
||||
# No postgres.env file
|
||||
monkeypatch.setenv('HOME', str(tmp_path))
|
||||
monkeypatch.chdir(tmp_path)
|
||||
|
||||
# Clear any existing env vars
|
||||
monkeypatch.delenv('POSTGRES_URL', raising=False)
|
||||
|
||||
config = DataPlatformConfig()
|
||||
result = config.load()
|
||||
|
||||
assert result['postgres_url'] is None
|
||||
assert result['postgres_available'] is False
|
||||
|
||||
|
||||
def test_project_config_override(tmp_path, monkeypatch):
|
||||
"""Test that project config overrides system config"""
|
||||
from mcp_server.config import DataPlatformConfig
|
||||
|
||||
# Set up system config
|
||||
system_config_dir = tmp_path / '.config' / 'claude'
|
||||
system_config_dir.mkdir(parents=True)
|
||||
|
||||
system_config = system_config_dir / 'postgres.env'
|
||||
system_config.write_text(
|
||||
"POSTGRES_URL=postgresql://system:pass@localhost:5432/systemdb\n"
|
||||
)
|
||||
|
||||
# Set up project config
|
||||
project_dir = tmp_path / 'project'
|
||||
project_dir.mkdir()
|
||||
|
||||
project_config = project_dir / '.env'
|
||||
project_config.write_text(
|
||||
"POSTGRES_URL=postgresql://project:pass@localhost:5432/projectdb\n"
|
||||
"DBT_PROJECT_DIR=/path/to/dbt\n"
|
||||
)
|
||||
|
||||
monkeypatch.setenv('HOME', str(tmp_path))
|
||||
monkeypatch.chdir(project_dir)
|
||||
|
||||
config = DataPlatformConfig()
|
||||
result = config.load()
|
||||
|
||||
# Project config should override
|
||||
assert result['postgres_url'] == 'postgresql://project:pass@localhost:5432/projectdb'
|
||||
assert result['dbt_project_dir'] == '/path/to/dbt'
|
||||
|
||||
|
||||
def test_max_rows_config(tmp_path, monkeypatch):
|
||||
"""Test max rows configuration"""
|
||||
from mcp_server.config import DataPlatformConfig
|
||||
|
||||
project_dir = tmp_path / 'project'
|
||||
project_dir.mkdir()
|
||||
|
||||
project_config = project_dir / '.env'
|
||||
project_config.write_text("DATA_PLATFORM_MAX_ROWS=50000\n")
|
||||
|
||||
monkeypatch.setenv('HOME', str(tmp_path))
|
||||
monkeypatch.chdir(project_dir)
|
||||
|
||||
config = DataPlatformConfig()
|
||||
result = config.load()
|
||||
|
||||
assert result['max_rows'] == 50000
|
||||
|
||||
|
||||
def test_default_max_rows(tmp_path, monkeypatch):
|
||||
"""Test default max rows value"""
|
||||
from mcp_server.config import DataPlatformConfig
|
||||
|
||||
monkeypatch.setenv('HOME', str(tmp_path))
|
||||
monkeypatch.chdir(tmp_path)
|
||||
|
||||
# Clear any existing env vars
|
||||
monkeypatch.delenv('DATA_PLATFORM_MAX_ROWS', raising=False)
|
||||
|
||||
config = DataPlatformConfig()
|
||||
result = config.load()
|
||||
|
||||
assert result['max_rows'] == 100_000 # Default value
|
||||
|
||||
|
||||
def test_dbt_auto_detection(tmp_path, monkeypatch):
|
||||
"""Test automatic dbt project detection"""
|
||||
from mcp_server.config import DataPlatformConfig
|
||||
|
||||
# Create project with dbt_project.yml
|
||||
project_dir = tmp_path / 'project'
|
||||
project_dir.mkdir()
|
||||
(project_dir / 'dbt_project.yml').write_text("name: test_project\n")
|
||||
|
||||
monkeypatch.setenv('HOME', str(tmp_path))
|
||||
monkeypatch.chdir(project_dir)
|
||||
# Clear PWD and DBT_PROJECT_DIR to ensure auto-detection
|
||||
monkeypatch.delenv('PWD', raising=False)
|
||||
monkeypatch.delenv('DBT_PROJECT_DIR', raising=False)
|
||||
monkeypatch.delenv('CLAUDE_PROJECT_DIR', raising=False)
|
||||
|
||||
config = DataPlatformConfig()
|
||||
result = config.load()
|
||||
|
||||
assert result['dbt_project_dir'] == str(project_dir)
|
||||
assert result['dbt_available'] is True
|
||||
|
||||
|
||||
def test_dbt_subdirectory_detection(tmp_path, monkeypatch):
|
||||
"""Test dbt project detection in subdirectory"""
|
||||
from mcp_server.config import DataPlatformConfig
|
||||
|
||||
# Create project with dbt in subdirectory
|
||||
project_dir = tmp_path / 'project'
|
||||
project_dir.mkdir()
|
||||
# Need a marker file for _find_project_directory to find the project
|
||||
(project_dir / '.git').mkdir()
|
||||
dbt_dir = project_dir / 'transform'
|
||||
dbt_dir.mkdir()
|
||||
(dbt_dir / 'dbt_project.yml').write_text("name: test_project\n")
|
||||
|
||||
monkeypatch.setenv('HOME', str(tmp_path))
|
||||
monkeypatch.chdir(project_dir)
|
||||
# Clear env vars to ensure auto-detection
|
||||
monkeypatch.delenv('PWD', raising=False)
|
||||
monkeypatch.delenv('DBT_PROJECT_DIR', raising=False)
|
||||
monkeypatch.delenv('CLAUDE_PROJECT_DIR', raising=False)
|
||||
|
||||
config = DataPlatformConfig()
|
||||
result = config.load()
|
||||
|
||||
assert result['dbt_project_dir'] == str(dbt_dir)
|
||||
assert result['dbt_available'] is True
|
||||
|
||||
|
||||
def test_no_dbt_project(tmp_path, monkeypatch):
|
||||
"""Test when no dbt project exists"""
|
||||
from mcp_server.config import DataPlatformConfig
|
||||
|
||||
project_dir = tmp_path / 'project'
|
||||
project_dir.mkdir()
|
||||
|
||||
monkeypatch.setenv('HOME', str(tmp_path))
|
||||
monkeypatch.chdir(project_dir)
|
||||
|
||||
# Clear any existing env vars
|
||||
monkeypatch.delenv('DBT_PROJECT_DIR', raising=False)
|
||||
|
||||
config = DataPlatformConfig()
|
||||
result = config.load()
|
||||
|
||||
assert result['dbt_project_dir'] is None
|
||||
assert result['dbt_available'] is False
|
||||
|
||||
|
||||
def test_find_project_directory_from_env(tmp_path, monkeypatch):
|
||||
"""Test finding project directory from CLAUDE_PROJECT_DIR env var"""
|
||||
from mcp_server.config import DataPlatformConfig
|
||||
|
||||
project_dir = tmp_path / 'my-project'
|
||||
project_dir.mkdir()
|
||||
(project_dir / '.git').mkdir()
|
||||
|
||||
monkeypatch.setenv('CLAUDE_PROJECT_DIR', str(project_dir))
|
||||
|
||||
config = DataPlatformConfig()
|
||||
result = config._find_project_directory()
|
||||
|
||||
assert result == project_dir
|
||||
|
||||
|
||||
def test_find_project_directory_from_cwd(tmp_path, monkeypatch):
|
||||
"""Test finding project directory from cwd with .env file"""
|
||||
from mcp_server.config import DataPlatformConfig
|
||||
|
||||
project_dir = tmp_path / 'project'
|
||||
project_dir.mkdir()
|
||||
(project_dir / '.env').write_text("TEST=value")
|
||||
|
||||
monkeypatch.chdir(project_dir)
|
||||
monkeypatch.delenv('CLAUDE_PROJECT_DIR', raising=False)
|
||||
monkeypatch.delenv('PWD', raising=False)
|
||||
|
||||
config = DataPlatformConfig()
|
||||
result = config._find_project_directory()
|
||||
|
||||
assert result == project_dir
|
||||
|
||||
|
||||
def test_find_project_directory_none_when_no_markers(tmp_path, monkeypatch):
|
||||
"""Test returns None when no project markers found"""
|
||||
from mcp_server.config import DataPlatformConfig
|
||||
|
||||
empty_dir = tmp_path / 'empty'
|
||||
empty_dir.mkdir()
|
||||
|
||||
monkeypatch.chdir(empty_dir)
|
||||
monkeypatch.delenv('CLAUDE_PROJECT_DIR', raising=False)
|
||||
monkeypatch.delenv('PWD', raising=False)
|
||||
monkeypatch.delenv('DBT_PROJECT_DIR', raising=False)
|
||||
|
||||
config = DataPlatformConfig()
|
||||
result = config._find_project_directory()
|
||||
|
||||
assert result is None
|
||||
240
mcp-servers/data-platform/tests/test_data_store.py
Normal file
240
mcp-servers/data-platform/tests/test_data_store.py
Normal file
@@ -0,0 +1,240 @@
|
||||
"""
|
||||
Unit tests for Arrow IPC DataFrame registry.
|
||||
"""
|
||||
import pytest
|
||||
import pandas as pd
|
||||
import pyarrow as pa
|
||||
|
||||
|
||||
def test_store_pandas_dataframe():
|
||||
"""Test storing pandas DataFrame"""
|
||||
from mcp_server.data_store import DataStore
|
||||
|
||||
# Create fresh instance for test
|
||||
store = DataStore()
|
||||
store._dataframes = {}
|
||||
store._metadata = {}
|
||||
|
||||
df = pd.DataFrame({'a': [1, 2, 3], 'b': ['x', 'y', 'z']})
|
||||
data_ref = store.store(df, name='test_df')
|
||||
|
||||
assert data_ref == 'test_df'
|
||||
assert 'test_df' in store._dataframes
|
||||
assert store._metadata['test_df'].rows == 3
|
||||
assert store._metadata['test_df'].columns == 2
|
||||
|
||||
|
||||
def test_store_arrow_table():
|
||||
"""Test storing Arrow Table directly"""
|
||||
from mcp_server.data_store import DataStore
|
||||
|
||||
store = DataStore()
|
||||
store._dataframes = {}
|
||||
store._metadata = {}
|
||||
|
||||
table = pa.table({'x': [1, 2, 3], 'y': [4, 5, 6]})
|
||||
data_ref = store.store(table, name='arrow_test')
|
||||
|
||||
assert data_ref == 'arrow_test'
|
||||
assert store._dataframes['arrow_test'].num_rows == 3
|
||||
|
||||
|
||||
def test_store_auto_name():
|
||||
"""Test auto-generated data_ref names"""
|
||||
from mcp_server.data_store import DataStore
|
||||
|
||||
store = DataStore()
|
||||
store._dataframes = {}
|
||||
store._metadata = {}
|
||||
|
||||
df = pd.DataFrame({'a': [1, 2]})
|
||||
data_ref = store.store(df)
|
||||
|
||||
assert data_ref.startswith('df_')
|
||||
assert len(data_ref) == 11 # df_ + 8 hex chars
|
||||
|
||||
|
||||
def test_get_dataframe():
|
||||
"""Test retrieving stored DataFrame"""
|
||||
from mcp_server.data_store import DataStore
|
||||
|
||||
store = DataStore()
|
||||
store._dataframes = {}
|
||||
store._metadata = {}
|
||||
|
||||
df = pd.DataFrame({'a': [1, 2, 3]})
|
||||
store.store(df, name='get_test')
|
||||
|
||||
result = store.get('get_test')
|
||||
assert result is not None
|
||||
assert result.num_rows == 3
|
||||
|
||||
|
||||
def test_get_pandas():
|
||||
"""Test retrieving as pandas DataFrame"""
|
||||
from mcp_server.data_store import DataStore
|
||||
|
||||
store = DataStore()
|
||||
store._dataframes = {}
|
||||
store._metadata = {}
|
||||
|
||||
df = pd.DataFrame({'a': [1, 2, 3], 'b': ['x', 'y', 'z']})
|
||||
store.store(df, name='pandas_test')
|
||||
|
||||
result = store.get_pandas('pandas_test')
|
||||
assert isinstance(result, pd.DataFrame)
|
||||
assert list(result.columns) == ['a', 'b']
|
||||
assert len(result) == 3
|
||||
|
||||
|
||||
def test_get_nonexistent():
|
||||
"""Test getting nonexistent data_ref returns None"""
|
||||
from mcp_server.data_store import DataStore
|
||||
|
||||
store = DataStore()
|
||||
store._dataframes = {}
|
||||
store._metadata = {}
|
||||
|
||||
assert store.get('nonexistent') is None
|
||||
assert store.get_pandas('nonexistent') is None
|
||||
|
||||
|
||||
def test_list_refs():
|
||||
"""Test listing all stored DataFrames"""
|
||||
from mcp_server.data_store import DataStore
|
||||
|
||||
store = DataStore()
|
||||
store._dataframes = {}
|
||||
store._metadata = {}
|
||||
|
||||
store.store(pd.DataFrame({'a': [1, 2]}), name='df1')
|
||||
store.store(pd.DataFrame({'b': [3, 4, 5]}), name='df2')
|
||||
|
||||
refs = store.list_refs()
|
||||
|
||||
assert len(refs) == 2
|
||||
ref_names = [r['ref'] for r in refs]
|
||||
assert 'df1' in ref_names
|
||||
assert 'df2' in ref_names
|
||||
|
||||
|
||||
def test_drop_dataframe():
|
||||
"""Test dropping a DataFrame"""
|
||||
from mcp_server.data_store import DataStore
|
||||
|
||||
store = DataStore()
|
||||
store._dataframes = {}
|
||||
store._metadata = {}
|
||||
|
||||
store.store(pd.DataFrame({'a': [1]}), name='drop_test')
|
||||
assert store.get('drop_test') is not None
|
||||
|
||||
result = store.drop('drop_test')
|
||||
assert result is True
|
||||
assert store.get('drop_test') is None
|
||||
|
||||
|
||||
def test_drop_nonexistent():
|
||||
"""Test dropping nonexistent data_ref"""
|
||||
from mcp_server.data_store import DataStore
|
||||
|
||||
store = DataStore()
|
||||
store._dataframes = {}
|
||||
store._metadata = {}
|
||||
|
||||
result = store.drop('nonexistent')
|
||||
assert result is False
|
||||
|
||||
|
||||
def test_clear():
|
||||
"""Test clearing all DataFrames"""
|
||||
from mcp_server.data_store import DataStore
|
||||
|
||||
store = DataStore()
|
||||
store._dataframes = {}
|
||||
store._metadata = {}
|
||||
|
||||
store.store(pd.DataFrame({'a': [1]}), name='df1')
|
||||
store.store(pd.DataFrame({'b': [2]}), name='df2')
|
||||
|
||||
store.clear()
|
||||
|
||||
assert len(store.list_refs()) == 0
|
||||
|
||||
|
||||
def test_get_info():
|
||||
"""Test getting DataFrame metadata"""
|
||||
from mcp_server.data_store import DataStore
|
||||
|
||||
store = DataStore()
|
||||
store._dataframes = {}
|
||||
store._metadata = {}
|
||||
|
||||
df = pd.DataFrame({'a': [1, 2, 3], 'b': ['x', 'y', 'z']})
|
||||
store.store(df, name='info_test', source='test source')
|
||||
|
||||
info = store.get_info('info_test')
|
||||
|
||||
assert info.ref == 'info_test'
|
||||
assert info.rows == 3
|
||||
assert info.columns == 2
|
||||
assert info.column_names == ['a', 'b']
|
||||
assert info.source == 'test source'
|
||||
assert info.memory_bytes > 0
|
||||
|
||||
|
||||
def test_total_memory():
|
||||
"""Test total memory calculation"""
|
||||
from mcp_server.data_store import DataStore
|
||||
|
||||
store = DataStore()
|
||||
store._dataframes = {}
|
||||
store._metadata = {}
|
||||
|
||||
store.store(pd.DataFrame({'a': range(100)}), name='df1')
|
||||
store.store(pd.DataFrame({'b': range(200)}), name='df2')
|
||||
|
||||
total = store.total_memory_bytes()
|
||||
assert total > 0
|
||||
|
||||
total_mb = store.total_memory_mb()
|
||||
assert total_mb >= 0
|
||||
|
||||
|
||||
def test_check_row_limit():
|
||||
"""Test row limit checking"""
|
||||
from mcp_server.data_store import DataStore
|
||||
|
||||
store = DataStore()
|
||||
store._max_rows = 100
|
||||
|
||||
# Under limit
|
||||
result = store.check_row_limit(50)
|
||||
assert result['exceeded'] is False
|
||||
|
||||
# Over limit
|
||||
result = store.check_row_limit(150)
|
||||
assert result['exceeded'] is True
|
||||
assert 'suggestion' in result
|
||||
|
||||
|
||||
def test_metadata_dtypes():
|
||||
"""Test that dtypes are correctly recorded"""
|
||||
from mcp_server.data_store import DataStore
|
||||
|
||||
store = DataStore()
|
||||
store._dataframes = {}
|
||||
store._metadata = {}
|
||||
|
||||
df = pd.DataFrame({
|
||||
'int_col': [1, 2, 3],
|
||||
'float_col': [1.1, 2.2, 3.3],
|
||||
'str_col': ['a', 'b', 'c']
|
||||
})
|
||||
store.store(df, name='dtype_test')
|
||||
|
||||
info = store.get_info('dtype_test')
|
||||
|
||||
assert 'int_col' in info.dtypes
|
||||
assert 'float_col' in info.dtypes
|
||||
assert 'str_col' in info.dtypes
|
||||
318
mcp-servers/data-platform/tests/test_dbt_tools.py
Normal file
318
mcp-servers/data-platform/tests/test_dbt_tools.py
Normal file
@@ -0,0 +1,318 @@
|
||||
"""
|
||||
Unit tests for dbt MCP tools.
|
||||
"""
|
||||
import pytest
|
||||
from unittest.mock import Mock, patch, MagicMock
|
||||
import subprocess
|
||||
import json
|
||||
import tempfile
|
||||
import os
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_config(tmp_path):
|
||||
"""Mock configuration with dbt project"""
|
||||
dbt_dir = tmp_path / 'dbt_project'
|
||||
dbt_dir.mkdir()
|
||||
(dbt_dir / 'dbt_project.yml').write_text('name: test_project\n')
|
||||
|
||||
return {
|
||||
'dbt_project_dir': str(dbt_dir),
|
||||
'dbt_profiles_dir': str(tmp_path / '.dbt')
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def dbt_tools(mock_config):
|
||||
"""Create DbtTools instance with mocked config"""
|
||||
with patch('mcp_server.dbt_tools.load_config', return_value=mock_config):
|
||||
from mcp_server.dbt_tools import DbtTools
|
||||
|
||||
tools = DbtTools()
|
||||
tools.project_dir = mock_config['dbt_project_dir']
|
||||
tools.profiles_dir = mock_config['dbt_profiles_dir']
|
||||
return tools
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dbt_parse_success(dbt_tools):
|
||||
"""Test successful dbt parse"""
|
||||
mock_result = MagicMock()
|
||||
mock_result.returncode = 0
|
||||
mock_result.stdout = 'Parsed successfully'
|
||||
mock_result.stderr = ''
|
||||
|
||||
with patch('subprocess.run', return_value=mock_result):
|
||||
result = await dbt_tools.dbt_parse()
|
||||
|
||||
assert result['valid'] is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dbt_parse_failure(dbt_tools):
|
||||
"""Test dbt parse with errors"""
|
||||
mock_result = MagicMock()
|
||||
mock_result.returncode = 1
|
||||
mock_result.stdout = ''
|
||||
mock_result.stderr = 'Compilation error: deprecated syntax'
|
||||
|
||||
with patch('subprocess.run', return_value=mock_result):
|
||||
result = await dbt_tools.dbt_parse()
|
||||
|
||||
assert result['valid'] is False
|
||||
assert 'deprecated' in str(result.get('details', '')).lower() or len(result.get('errors', [])) > 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dbt_run_with_prevalidation(dbt_tools):
|
||||
"""Test dbt run includes pre-validation"""
|
||||
# First call is parse, second is run
|
||||
mock_parse = MagicMock()
|
||||
mock_parse.returncode = 0
|
||||
mock_parse.stdout = 'OK'
|
||||
mock_parse.stderr = ''
|
||||
|
||||
mock_run = MagicMock()
|
||||
mock_run.returncode = 0
|
||||
mock_run.stdout = 'Completed successfully'
|
||||
mock_run.stderr = ''
|
||||
|
||||
with patch('subprocess.run', side_effect=[mock_parse, mock_run]):
|
||||
result = await dbt_tools.dbt_run()
|
||||
|
||||
assert result['success'] is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dbt_run_fails_validation(dbt_tools):
|
||||
"""Test dbt run fails if validation fails"""
|
||||
mock_parse = MagicMock()
|
||||
mock_parse.returncode = 1
|
||||
mock_parse.stdout = ''
|
||||
mock_parse.stderr = 'Parse error'
|
||||
|
||||
with patch('subprocess.run', return_value=mock_parse):
|
||||
result = await dbt_tools.dbt_run()
|
||||
|
||||
assert 'error' in result
|
||||
assert 'Pre-validation failed' in result['error']
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dbt_run_with_selection(dbt_tools):
|
||||
"""Test dbt run with model selection"""
|
||||
mock_parse = MagicMock()
|
||||
mock_parse.returncode = 0
|
||||
mock_parse.stdout = 'OK'
|
||||
mock_parse.stderr = ''
|
||||
|
||||
mock_run = MagicMock()
|
||||
mock_run.returncode = 0
|
||||
mock_run.stdout = 'Completed'
|
||||
mock_run.stderr = ''
|
||||
|
||||
calls = []
|
||||
|
||||
def track_calls(*args, **kwargs):
|
||||
calls.append(args[0] if args else kwargs.get('args', []))
|
||||
if len(calls) == 1:
|
||||
return mock_parse
|
||||
return mock_run
|
||||
|
||||
with patch('subprocess.run', side_effect=track_calls):
|
||||
result = await dbt_tools.dbt_run(select='dim_customers')
|
||||
|
||||
# Verify --select was passed
|
||||
assert any('--select' in str(call) for call in calls)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dbt_test(dbt_tools):
|
||||
"""Test dbt test"""
|
||||
mock_result = MagicMock()
|
||||
mock_result.returncode = 0
|
||||
mock_result.stdout = 'All tests passed'
|
||||
mock_result.stderr = ''
|
||||
|
||||
with patch('subprocess.run', return_value=mock_result):
|
||||
result = await dbt_tools.dbt_test()
|
||||
|
||||
assert result['success'] is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dbt_build(dbt_tools):
|
||||
"""Test dbt build with pre-validation"""
|
||||
mock_parse = MagicMock()
|
||||
mock_parse.returncode = 0
|
||||
mock_parse.stdout = 'OK'
|
||||
mock_parse.stderr = ''
|
||||
|
||||
mock_build = MagicMock()
|
||||
mock_build.returncode = 0
|
||||
mock_build.stdout = 'Build complete'
|
||||
mock_build.stderr = ''
|
||||
|
||||
with patch('subprocess.run', side_effect=[mock_parse, mock_build]):
|
||||
result = await dbt_tools.dbt_build()
|
||||
|
||||
assert result['success'] is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dbt_compile(dbt_tools):
|
||||
"""Test dbt compile"""
|
||||
mock_result = MagicMock()
|
||||
mock_result.returncode = 0
|
||||
mock_result.stdout = 'Compiled'
|
||||
mock_result.stderr = ''
|
||||
|
||||
with patch('subprocess.run', return_value=mock_result):
|
||||
result = await dbt_tools.dbt_compile()
|
||||
|
||||
assert result['success'] is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dbt_ls(dbt_tools):
|
||||
"""Test dbt ls"""
|
||||
mock_result = MagicMock()
|
||||
mock_result.returncode = 0
|
||||
mock_result.stdout = 'dim_customers\ndim_products\nfct_orders\n'
|
||||
mock_result.stderr = ''
|
||||
|
||||
with patch('subprocess.run', return_value=mock_result):
|
||||
result = await dbt_tools.dbt_ls()
|
||||
|
||||
assert result['success'] is True
|
||||
assert result['count'] == 3
|
||||
assert 'dim_customers' in result['resources']
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dbt_docs_generate(dbt_tools, tmp_path):
|
||||
"""Test dbt docs generate"""
|
||||
mock_result = MagicMock()
|
||||
mock_result.returncode = 0
|
||||
mock_result.stdout = 'Done'
|
||||
mock_result.stderr = ''
|
||||
|
||||
# Create fake target directory
|
||||
target_dir = tmp_path / 'dbt_project' / 'target'
|
||||
target_dir.mkdir(parents=True)
|
||||
(target_dir / 'catalog.json').write_text('{}')
|
||||
(target_dir / 'manifest.json').write_text('{}')
|
||||
|
||||
dbt_tools.project_dir = str(tmp_path / 'dbt_project')
|
||||
|
||||
with patch('subprocess.run', return_value=mock_result):
|
||||
result = await dbt_tools.dbt_docs_generate()
|
||||
|
||||
assert result['success'] is True
|
||||
assert result['catalog_generated'] is True
|
||||
assert result['manifest_generated'] is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dbt_lineage(dbt_tools, tmp_path):
|
||||
"""Test dbt lineage"""
|
||||
# Create manifest
|
||||
target_dir = tmp_path / 'dbt_project' / 'target'
|
||||
target_dir.mkdir(parents=True)
|
||||
|
||||
manifest = {
|
||||
'nodes': {
|
||||
'model.test.dim_customers': {
|
||||
'name': 'dim_customers',
|
||||
'resource_type': 'model',
|
||||
'schema': 'public',
|
||||
'database': 'testdb',
|
||||
'description': 'Customer dimension',
|
||||
'tags': ['daily'],
|
||||
'config': {'materialized': 'table'},
|
||||
'depends_on': {
|
||||
'nodes': ['model.test.stg_customers']
|
||||
}
|
||||
},
|
||||
'model.test.stg_customers': {
|
||||
'name': 'stg_customers',
|
||||
'resource_type': 'model',
|
||||
'depends_on': {'nodes': []}
|
||||
},
|
||||
'model.test.fct_orders': {
|
||||
'name': 'fct_orders',
|
||||
'resource_type': 'model',
|
||||
'depends_on': {
|
||||
'nodes': ['model.test.dim_customers']
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
(target_dir / 'manifest.json').write_text(json.dumps(manifest))
|
||||
|
||||
dbt_tools.project_dir = str(tmp_path / 'dbt_project')
|
||||
|
||||
result = await dbt_tools.dbt_lineage('dim_customers')
|
||||
|
||||
assert result['model'] == 'dim_customers'
|
||||
assert 'model.test.stg_customers' in result['upstream']
|
||||
assert 'model.test.fct_orders' in result['downstream']
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dbt_lineage_model_not_found(dbt_tools, tmp_path):
|
||||
"""Test dbt lineage with nonexistent model"""
|
||||
target_dir = tmp_path / 'dbt_project' / 'target'
|
||||
target_dir.mkdir(parents=True)
|
||||
|
||||
manifest = {
|
||||
'nodes': {
|
||||
'model.test.dim_customers': {
|
||||
'name': 'dim_customers',
|
||||
'resource_type': 'model'
|
||||
}
|
||||
}
|
||||
}
|
||||
(target_dir / 'manifest.json').write_text(json.dumps(manifest))
|
||||
|
||||
dbt_tools.project_dir = str(tmp_path / 'dbt_project')
|
||||
|
||||
result = await dbt_tools.dbt_lineage('nonexistent_model')
|
||||
|
||||
assert 'error' in result
|
||||
assert 'not found' in result['error'].lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dbt_no_project():
|
||||
"""Test dbt tools when no project configured"""
|
||||
with patch('mcp_server.dbt_tools.load_config', return_value={'dbt_project_dir': None}):
|
||||
from mcp_server.dbt_tools import DbtTools
|
||||
|
||||
tools = DbtTools()
|
||||
tools.project_dir = None
|
||||
|
||||
result = await tools.dbt_run()
|
||||
|
||||
assert 'error' in result
|
||||
assert 'not found' in result['error'].lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dbt_timeout(dbt_tools):
|
||||
"""Test dbt command timeout handling"""
|
||||
with patch('subprocess.run', side_effect=subprocess.TimeoutExpired('dbt', 300)):
|
||||
result = await dbt_tools.dbt_parse()
|
||||
|
||||
assert 'error' in result
|
||||
assert 'timed out' in result['error'].lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dbt_not_installed(dbt_tools):
|
||||
"""Test handling when dbt is not installed"""
|
||||
with patch('subprocess.run', side_effect=FileNotFoundError()):
|
||||
result = await dbt_tools.dbt_parse()
|
||||
|
||||
assert 'error' in result
|
||||
assert 'not found' in result['error'].lower()
|
||||
301
mcp-servers/data-platform/tests/test_pandas_tools.py
Normal file
301
mcp-servers/data-platform/tests/test_pandas_tools.py
Normal file
@@ -0,0 +1,301 @@
|
||||
"""
|
||||
Unit tests for pandas MCP tools.
|
||||
"""
|
||||
import pytest
|
||||
import pandas as pd
|
||||
import tempfile
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def temp_csv(tmp_path):
|
||||
"""Create a temporary CSV file for testing"""
|
||||
csv_path = tmp_path / 'test.csv'
|
||||
df = pd.DataFrame({
|
||||
'id': [1, 2, 3, 4, 5],
|
||||
'name': ['Alice', 'Bob', 'Charlie', 'Diana', 'Eve'],
|
||||
'value': [10.5, 20.0, 30.5, 40.0, 50.5]
|
||||
})
|
||||
df.to_csv(csv_path, index=False)
|
||||
return str(csv_path)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def temp_parquet(tmp_path):
|
||||
"""Create a temporary Parquet file for testing"""
|
||||
parquet_path = tmp_path / 'test.parquet'
|
||||
df = pd.DataFrame({
|
||||
'id': [1, 2, 3],
|
||||
'data': ['a', 'b', 'c']
|
||||
})
|
||||
df.to_parquet(parquet_path)
|
||||
return str(parquet_path)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def temp_json(tmp_path):
|
||||
"""Create a temporary JSON file for testing"""
|
||||
json_path = tmp_path / 'test.json'
|
||||
df = pd.DataFrame({
|
||||
'x': [1, 2],
|
||||
'y': [3, 4]
|
||||
})
|
||||
df.to_json(json_path, orient='records')
|
||||
return str(json_path)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def pandas_tools():
|
||||
"""Create PandasTools instance with fresh store"""
|
||||
from mcp_server.pandas_tools import PandasTools
|
||||
from mcp_server.data_store import DataStore
|
||||
|
||||
# Reset store for test isolation
|
||||
store = DataStore.get_instance()
|
||||
store._dataframes = {}
|
||||
store._metadata = {}
|
||||
|
||||
return PandasTools()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_read_csv(pandas_tools, temp_csv):
|
||||
"""Test reading CSV file"""
|
||||
result = await pandas_tools.read_csv(temp_csv, name='csv_test')
|
||||
|
||||
assert 'data_ref' in result
|
||||
assert result['data_ref'] == 'csv_test'
|
||||
assert result['rows'] == 5
|
||||
assert 'id' in result['columns']
|
||||
assert 'name' in result['columns']
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_read_csv_nonexistent(pandas_tools):
|
||||
"""Test reading nonexistent CSV file"""
|
||||
result = await pandas_tools.read_csv('/nonexistent/path.csv')
|
||||
|
||||
assert 'error' in result
|
||||
assert 'not found' in result['error'].lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_read_parquet(pandas_tools, temp_parquet):
|
||||
"""Test reading Parquet file"""
|
||||
result = await pandas_tools.read_parquet(temp_parquet, name='parquet_test')
|
||||
|
||||
assert 'data_ref' in result
|
||||
assert result['rows'] == 3
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_read_json(pandas_tools, temp_json):
|
||||
"""Test reading JSON file"""
|
||||
result = await pandas_tools.read_json(temp_json, name='json_test')
|
||||
|
||||
assert 'data_ref' in result
|
||||
assert result['rows'] == 2
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_to_csv(pandas_tools, temp_csv, tmp_path):
|
||||
"""Test exporting to CSV"""
|
||||
# First load some data
|
||||
await pandas_tools.read_csv(temp_csv, name='export_test')
|
||||
|
||||
# Export to new file
|
||||
output_path = str(tmp_path / 'output.csv')
|
||||
result = await pandas_tools.to_csv('export_test', output_path)
|
||||
|
||||
assert result['success'] is True
|
||||
assert os.path.exists(output_path)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_to_parquet(pandas_tools, temp_csv, tmp_path):
|
||||
"""Test exporting to Parquet"""
|
||||
await pandas_tools.read_csv(temp_csv, name='parquet_export')
|
||||
|
||||
output_path = str(tmp_path / 'output.parquet')
|
||||
result = await pandas_tools.to_parquet('parquet_export', output_path)
|
||||
|
||||
assert result['success'] is True
|
||||
assert os.path.exists(output_path)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_describe(pandas_tools, temp_csv):
|
||||
"""Test describe statistics"""
|
||||
await pandas_tools.read_csv(temp_csv, name='describe_test')
|
||||
|
||||
result = await pandas_tools.describe('describe_test')
|
||||
|
||||
assert 'data_ref' in result
|
||||
assert 'shape' in result
|
||||
assert result['shape']['rows'] == 5
|
||||
assert 'statistics' in result
|
||||
assert 'null_counts' in result
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_head(pandas_tools, temp_csv):
|
||||
"""Test getting first N rows"""
|
||||
await pandas_tools.read_csv(temp_csv, name='head_test')
|
||||
|
||||
result = await pandas_tools.head('head_test', n=3)
|
||||
|
||||
assert result['returned_rows'] == 3
|
||||
assert len(result['data']) == 3
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_tail(pandas_tools, temp_csv):
|
||||
"""Test getting last N rows"""
|
||||
await pandas_tools.read_csv(temp_csv, name='tail_test')
|
||||
|
||||
result = await pandas_tools.tail('tail_test', n=2)
|
||||
|
||||
assert result['returned_rows'] == 2
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_filter(pandas_tools, temp_csv):
|
||||
"""Test filtering rows"""
|
||||
await pandas_tools.read_csv(temp_csv, name='filter_test')
|
||||
|
||||
result = await pandas_tools.filter('filter_test', 'value > 25')
|
||||
|
||||
assert 'data_ref' in result
|
||||
assert result['rows'] == 3 # 30.5, 40.0, 50.5
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_filter_invalid_condition(pandas_tools, temp_csv):
|
||||
"""Test filter with invalid condition"""
|
||||
await pandas_tools.read_csv(temp_csv, name='filter_error')
|
||||
|
||||
result = await pandas_tools.filter('filter_error', 'invalid_column > 0')
|
||||
|
||||
assert 'error' in result
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_select(pandas_tools, temp_csv):
|
||||
"""Test selecting columns"""
|
||||
await pandas_tools.read_csv(temp_csv, name='select_test')
|
||||
|
||||
result = await pandas_tools.select('select_test', ['id', 'name'])
|
||||
|
||||
assert 'data_ref' in result
|
||||
assert result['columns'] == ['id', 'name']
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_select_invalid_column(pandas_tools, temp_csv):
|
||||
"""Test select with invalid column"""
|
||||
await pandas_tools.read_csv(temp_csv, name='select_error')
|
||||
|
||||
result = await pandas_tools.select('select_error', ['id', 'nonexistent'])
|
||||
|
||||
assert 'error' in result
|
||||
assert 'available_columns' in result
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_groupby(pandas_tools, tmp_path):
|
||||
"""Test groupby aggregation"""
|
||||
# Create test data with groups
|
||||
csv_path = tmp_path / 'groupby.csv'
|
||||
df = pd.DataFrame({
|
||||
'category': ['A', 'A', 'B', 'B'],
|
||||
'value': [10, 20, 30, 40]
|
||||
})
|
||||
df.to_csv(csv_path, index=False)
|
||||
|
||||
await pandas_tools.read_csv(str(csv_path), name='groupby_test')
|
||||
|
||||
result = await pandas_tools.groupby(
|
||||
'groupby_test',
|
||||
by='category',
|
||||
agg={'value': 'sum'}
|
||||
)
|
||||
|
||||
assert 'data_ref' in result
|
||||
assert result['rows'] == 2 # Two groups: A, B
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_join(pandas_tools, tmp_path):
|
||||
"""Test joining DataFrames"""
|
||||
# Create left table
|
||||
left_path = tmp_path / 'left.csv'
|
||||
pd.DataFrame({
|
||||
'id': [1, 2, 3],
|
||||
'name': ['A', 'B', 'C']
|
||||
}).to_csv(left_path, index=False)
|
||||
|
||||
# Create right table
|
||||
right_path = tmp_path / 'right.csv'
|
||||
pd.DataFrame({
|
||||
'id': [1, 2, 4],
|
||||
'value': [100, 200, 400]
|
||||
}).to_csv(right_path, index=False)
|
||||
|
||||
await pandas_tools.read_csv(str(left_path), name='left')
|
||||
await pandas_tools.read_csv(str(right_path), name='right')
|
||||
|
||||
result = await pandas_tools.join('left', 'right', on='id', how='inner')
|
||||
|
||||
assert 'data_ref' in result
|
||||
assert result['rows'] == 2 # Only id 1 and 2 match
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_data(pandas_tools, temp_csv):
|
||||
"""Test listing all DataFrames"""
|
||||
await pandas_tools.read_csv(temp_csv, name='list_test1')
|
||||
await pandas_tools.read_csv(temp_csv, name='list_test2')
|
||||
|
||||
result = await pandas_tools.list_data()
|
||||
|
||||
assert result['count'] == 2
|
||||
refs = [df['ref'] for df in result['dataframes']]
|
||||
assert 'list_test1' in refs
|
||||
assert 'list_test2' in refs
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_drop_data(pandas_tools, temp_csv):
|
||||
"""Test dropping DataFrame"""
|
||||
await pandas_tools.read_csv(temp_csv, name='drop_test')
|
||||
|
||||
result = await pandas_tools.drop_data('drop_test')
|
||||
|
||||
assert result['success'] is True
|
||||
|
||||
# Verify it's gone
|
||||
list_result = await pandas_tools.list_data()
|
||||
refs = [df['ref'] for df in list_result['dataframes']]
|
||||
assert 'drop_test' not in refs
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_drop_nonexistent(pandas_tools):
|
||||
"""Test dropping nonexistent DataFrame"""
|
||||
result = await pandas_tools.drop_data('nonexistent')
|
||||
|
||||
assert 'error' in result
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_operations_on_nonexistent(pandas_tools):
|
||||
"""Test operations on nonexistent data_ref"""
|
||||
result = await pandas_tools.describe('nonexistent')
|
||||
assert 'error' in result
|
||||
|
||||
result = await pandas_tools.head('nonexistent')
|
||||
assert 'error' in result
|
||||
|
||||
result = await pandas_tools.filter('nonexistent', 'x > 0')
|
||||
assert 'error' in result
|
||||
338
mcp-servers/data-platform/tests/test_postgres_tools.py
Normal file
338
mcp-servers/data-platform/tests/test_postgres_tools.py
Normal file
@@ -0,0 +1,338 @@
|
||||
"""
|
||||
Unit tests for PostgreSQL MCP tools.
|
||||
"""
|
||||
import pytest
|
||||
from unittest.mock import Mock, AsyncMock, patch, MagicMock
|
||||
import pandas as pd
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_config():
|
||||
"""Mock configuration"""
|
||||
return {
|
||||
'postgres_url': 'postgresql://test:test@localhost:5432/testdb',
|
||||
'max_rows': 100000
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def postgres_tools(mock_config):
|
||||
"""Create PostgresTools instance with mocked config"""
|
||||
with patch('mcp_server.postgres_tools.load_config', return_value=mock_config):
|
||||
from mcp_server.postgres_tools import PostgresTools
|
||||
from mcp_server.data_store import DataStore
|
||||
|
||||
# Reset store
|
||||
store = DataStore.get_instance()
|
||||
store._dataframes = {}
|
||||
store._metadata = {}
|
||||
|
||||
tools = PostgresTools()
|
||||
tools.config = mock_config
|
||||
return tools
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pg_connect_no_config():
|
||||
"""Test pg_connect when no PostgreSQL configured"""
|
||||
with patch('mcp_server.postgres_tools.load_config', return_value={'postgres_url': None}):
|
||||
from mcp_server.postgres_tools import PostgresTools
|
||||
|
||||
tools = PostgresTools()
|
||||
tools.config = {'postgres_url': None}
|
||||
|
||||
result = await tools.pg_connect()
|
||||
|
||||
assert result['connected'] is False
|
||||
assert 'not configured' in result['error'].lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pg_connect_success(postgres_tools):
|
||||
"""Test successful pg_connect"""
|
||||
mock_conn = AsyncMock()
|
||||
mock_conn.fetchval = AsyncMock(side_effect=[
|
||||
'PostgreSQL 15.1', # version
|
||||
'testdb', # database name
|
||||
'testuser', # user
|
||||
None # PostGIS check fails
|
||||
])
|
||||
mock_conn.close = AsyncMock()
|
||||
|
||||
# Create proper async context manager
|
||||
mock_cm = AsyncMock()
|
||||
mock_cm.__aenter__ = AsyncMock(return_value=mock_conn)
|
||||
mock_cm.__aexit__ = AsyncMock(return_value=None)
|
||||
|
||||
mock_pool = MagicMock()
|
||||
mock_pool.acquire = MagicMock(return_value=mock_cm)
|
||||
|
||||
# Use AsyncMock for create_pool since it's awaited
|
||||
with patch('asyncpg.create_pool', new=AsyncMock(return_value=mock_pool)):
|
||||
postgres_tools.pool = None
|
||||
result = await postgres_tools.pg_connect()
|
||||
|
||||
assert result['connected'] is True
|
||||
assert result['database'] == 'testdb'
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pg_query_success(postgres_tools):
|
||||
"""Test successful pg_query"""
|
||||
mock_rows = [
|
||||
{'id': 1, 'name': 'Alice'},
|
||||
{'id': 2, 'name': 'Bob'}
|
||||
]
|
||||
|
||||
mock_conn = AsyncMock()
|
||||
mock_conn.fetch = AsyncMock(return_value=mock_rows)
|
||||
|
||||
mock_pool = AsyncMock()
|
||||
mock_pool.acquire = MagicMock(return_value=AsyncMock(
|
||||
__aenter__=AsyncMock(return_value=mock_conn),
|
||||
__aexit__=AsyncMock()
|
||||
))
|
||||
|
||||
postgres_tools.pool = mock_pool
|
||||
|
||||
result = await postgres_tools.pg_query('SELECT * FROM users', name='users_data')
|
||||
|
||||
assert 'data_ref' in result
|
||||
assert result['rows'] == 2
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pg_query_empty_result(postgres_tools):
|
||||
"""Test pg_query with no results"""
|
||||
mock_conn = AsyncMock()
|
||||
mock_conn.fetch = AsyncMock(return_value=[])
|
||||
|
||||
mock_pool = AsyncMock()
|
||||
mock_pool.acquire = MagicMock(return_value=AsyncMock(
|
||||
__aenter__=AsyncMock(return_value=mock_conn),
|
||||
__aexit__=AsyncMock()
|
||||
))
|
||||
|
||||
postgres_tools.pool = mock_pool
|
||||
|
||||
result = await postgres_tools.pg_query('SELECT * FROM empty_table')
|
||||
|
||||
assert result['data_ref'] is None
|
||||
assert result['rows'] == 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pg_execute_success(postgres_tools):
|
||||
"""Test successful pg_execute"""
|
||||
mock_conn = AsyncMock()
|
||||
mock_conn.execute = AsyncMock(return_value='INSERT 0 3')
|
||||
|
||||
mock_pool = AsyncMock()
|
||||
mock_pool.acquire = MagicMock(return_value=AsyncMock(
|
||||
__aenter__=AsyncMock(return_value=mock_conn),
|
||||
__aexit__=AsyncMock()
|
||||
))
|
||||
|
||||
postgres_tools.pool = mock_pool
|
||||
|
||||
result = await postgres_tools.pg_execute('INSERT INTO users VALUES (1, 2, 3)')
|
||||
|
||||
assert result['success'] is True
|
||||
assert result['affected_rows'] == 3
|
||||
assert result['command'] == 'INSERT'
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pg_tables(postgres_tools):
|
||||
"""Test listing tables"""
|
||||
mock_rows = [
|
||||
{'table_name': 'users', 'table_type': 'BASE TABLE', 'column_count': 5},
|
||||
{'table_name': 'orders', 'table_type': 'BASE TABLE', 'column_count': 8}
|
||||
]
|
||||
|
||||
mock_conn = AsyncMock()
|
||||
mock_conn.fetch = AsyncMock(return_value=mock_rows)
|
||||
|
||||
mock_pool = AsyncMock()
|
||||
mock_pool.acquire = MagicMock(return_value=AsyncMock(
|
||||
__aenter__=AsyncMock(return_value=mock_conn),
|
||||
__aexit__=AsyncMock()
|
||||
))
|
||||
|
||||
postgres_tools.pool = mock_pool
|
||||
|
||||
result = await postgres_tools.pg_tables(schema='public')
|
||||
|
||||
assert result['schema'] == 'public'
|
||||
assert result['count'] == 2
|
||||
assert len(result['tables']) == 2
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pg_columns(postgres_tools):
|
||||
"""Test getting column info"""
|
||||
mock_rows = [
|
||||
{
|
||||
'column_name': 'id',
|
||||
'data_type': 'integer',
|
||||
'udt_name': 'int4',
|
||||
'is_nullable': 'NO',
|
||||
'column_default': "nextval('users_id_seq'::regclass)",
|
||||
'character_maximum_length': None,
|
||||
'numeric_precision': 32
|
||||
},
|
||||
{
|
||||
'column_name': 'name',
|
||||
'data_type': 'character varying',
|
||||
'udt_name': 'varchar',
|
||||
'is_nullable': 'YES',
|
||||
'column_default': None,
|
||||
'character_maximum_length': 255,
|
||||
'numeric_precision': None
|
||||
}
|
||||
]
|
||||
|
||||
mock_conn = AsyncMock()
|
||||
mock_conn.fetch = AsyncMock(return_value=mock_rows)
|
||||
|
||||
mock_pool = AsyncMock()
|
||||
mock_pool.acquire = MagicMock(return_value=AsyncMock(
|
||||
__aenter__=AsyncMock(return_value=mock_conn),
|
||||
__aexit__=AsyncMock()
|
||||
))
|
||||
|
||||
postgres_tools.pool = mock_pool
|
||||
|
||||
result = await postgres_tools.pg_columns(table='users')
|
||||
|
||||
assert result['table'] == 'public.users'
|
||||
assert result['column_count'] == 2
|
||||
assert result['columns'][0]['name'] == 'id'
|
||||
assert result['columns'][0]['nullable'] is False
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pg_schemas(postgres_tools):
|
||||
"""Test listing schemas"""
|
||||
mock_rows = [
|
||||
{'schema_name': 'public'},
|
||||
{'schema_name': 'app'}
|
||||
]
|
||||
|
||||
mock_conn = AsyncMock()
|
||||
mock_conn.fetch = AsyncMock(return_value=mock_rows)
|
||||
|
||||
mock_pool = AsyncMock()
|
||||
mock_pool.acquire = MagicMock(return_value=AsyncMock(
|
||||
__aenter__=AsyncMock(return_value=mock_conn),
|
||||
__aexit__=AsyncMock()
|
||||
))
|
||||
|
||||
postgres_tools.pool = mock_pool
|
||||
|
||||
result = await postgres_tools.pg_schemas()
|
||||
|
||||
assert result['count'] == 2
|
||||
assert 'public' in result['schemas']
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_st_tables(postgres_tools):
|
||||
"""Test listing PostGIS tables"""
|
||||
mock_rows = [
|
||||
{
|
||||
'table_name': 'locations',
|
||||
'geometry_column': 'geom',
|
||||
'geometry_type': 'POINT',
|
||||
'srid': 4326,
|
||||
'coord_dimension': 2
|
||||
}
|
||||
]
|
||||
|
||||
mock_conn = AsyncMock()
|
||||
mock_conn.fetch = AsyncMock(return_value=mock_rows)
|
||||
|
||||
mock_pool = AsyncMock()
|
||||
mock_pool.acquire = MagicMock(return_value=AsyncMock(
|
||||
__aenter__=AsyncMock(return_value=mock_conn),
|
||||
__aexit__=AsyncMock()
|
||||
))
|
||||
|
||||
postgres_tools.pool = mock_pool
|
||||
|
||||
result = await postgres_tools.st_tables()
|
||||
|
||||
assert result['count'] == 1
|
||||
assert result['postgis_tables'][0]['table'] == 'locations'
|
||||
assert result['postgis_tables'][0]['srid'] == 4326
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_st_tables_no_postgis(postgres_tools):
|
||||
"""Test st_tables when PostGIS not installed"""
|
||||
mock_conn = AsyncMock()
|
||||
mock_conn.fetch = AsyncMock(side_effect=Exception("relation \"geometry_columns\" does not exist"))
|
||||
|
||||
# Create proper async context manager
|
||||
mock_cm = AsyncMock()
|
||||
mock_cm.__aenter__ = AsyncMock(return_value=mock_conn)
|
||||
mock_cm.__aexit__ = AsyncMock(return_value=None)
|
||||
|
||||
mock_pool = MagicMock()
|
||||
mock_pool.acquire = MagicMock(return_value=mock_cm)
|
||||
|
||||
postgres_tools.pool = mock_pool
|
||||
|
||||
result = await postgres_tools.st_tables()
|
||||
|
||||
assert 'error' in result
|
||||
assert 'PostGIS' in result['error']
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_st_extent(postgres_tools):
|
||||
"""Test getting geometry bounding box"""
|
||||
mock_row = {
|
||||
'xmin': -122.5,
|
||||
'ymin': 37.5,
|
||||
'xmax': -122.0,
|
||||
'ymax': 38.0
|
||||
}
|
||||
|
||||
mock_conn = AsyncMock()
|
||||
mock_conn.fetchrow = AsyncMock(return_value=mock_row)
|
||||
|
||||
mock_pool = AsyncMock()
|
||||
mock_pool.acquire = MagicMock(return_value=AsyncMock(
|
||||
__aenter__=AsyncMock(return_value=mock_conn),
|
||||
__aexit__=AsyncMock()
|
||||
))
|
||||
|
||||
postgres_tools.pool = mock_pool
|
||||
|
||||
result = await postgres_tools.st_extent(table='locations', column='geom')
|
||||
|
||||
assert result['bbox']['xmin'] == -122.5
|
||||
assert result['bbox']['ymax'] == 38.0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_error_handling(postgres_tools):
|
||||
"""Test error handling for database errors"""
|
||||
mock_conn = AsyncMock()
|
||||
mock_conn.fetch = AsyncMock(side_effect=Exception("Connection refused"))
|
||||
|
||||
# Create proper async context manager
|
||||
mock_cm = AsyncMock()
|
||||
mock_cm.__aenter__ = AsyncMock(return_value=mock_conn)
|
||||
mock_cm.__aexit__ = AsyncMock(return_value=None)
|
||||
|
||||
mock_pool = MagicMock()
|
||||
mock_pool.acquire = MagicMock(return_value=mock_cm)
|
||||
|
||||
postgres_tools.pool = mock_pool
|
||||
|
||||
result = await postgres_tools.pg_query('SELECT 1')
|
||||
|
||||
assert 'error' in result
|
||||
assert 'Connection refused' in result['error']
|
||||
@@ -389,7 +389,7 @@ def list_issues(self, state='open', labels=None, repo=None):
|
||||
|
||||
## License
|
||||
|
||||
MIT License - Part of the Claude Code Marketplace project.
|
||||
MIT License - Part of the Leo Claude Marketplace project.
|
||||
|
||||
## Related Documentation
|
||||
|
||||
@@ -406,7 +406,7 @@ For issues or questions:
|
||||
|
||||
---
|
||||
|
||||
**Built for**: Claude Code Marketplace - Project Management Plugins
|
||||
**Built for**: Leo Claude Marketplace - Project Management Plugins
|
||||
**Phase**: 1 (Complete)
|
||||
**Status**: ✅ Production Ready
|
||||
**Last Updated**: 2025-01-06
|
||||
227
mcp-servers/gitea/mcp_server/config.py
Normal file
227
mcp-servers/gitea/mcp_server/config.py
Normal file
@@ -0,0 +1,227 @@
|
||||
"""
|
||||
Configuration loader for Gitea MCP Server.
|
||||
|
||||
Implements hybrid configuration system:
|
||||
- System-level: ~/.config/claude/gitea.env (credentials)
|
||||
- Project-level: .env (repository specification)
|
||||
- Auto-detection: Falls back to git remote URL parsing
|
||||
"""
|
||||
from pathlib import Path
|
||||
from dotenv import load_dotenv
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import logging
|
||||
from typing import Dict, Optional
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class GiteaConfig:
|
||||
"""Hybrid configuration loader with mode detection"""
|
||||
|
||||
def __init__(self):
|
||||
self.api_url: Optional[str] = None
|
||||
self.api_token: Optional[str] = None
|
||||
self.repo: Optional[str] = None
|
||||
self.mode: str = 'project'
|
||||
|
||||
def load(self) -> Dict[str, Optional[str]]:
|
||||
"""
|
||||
Load configuration from system and project levels.
|
||||
Project-level configuration overrides system-level.
|
||||
|
||||
Returns:
|
||||
Dict containing api_url, api_token, repo, mode
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If system config is missing
|
||||
ValueError: If required configuration is missing
|
||||
"""
|
||||
# Load system config
|
||||
system_config = Path.home() / '.config' / 'claude' / 'gitea.env'
|
||||
if system_config.exists():
|
||||
load_dotenv(system_config)
|
||||
logger.info(f"Loaded system configuration from {system_config}")
|
||||
else:
|
||||
raise FileNotFoundError(
|
||||
f"System config not found: {system_config}\n"
|
||||
"Create it with: mkdir -p ~/.config/claude && "
|
||||
"cat > ~/.config/claude/gitea.env"
|
||||
)
|
||||
|
||||
# Find project directory (MCP server cwd is plugin dir, not project dir)
|
||||
project_dir = self._find_project_directory()
|
||||
|
||||
# Load project config (overrides system)
|
||||
if project_dir:
|
||||
project_config = project_dir / '.env'
|
||||
if project_config.exists():
|
||||
load_dotenv(project_config, override=True)
|
||||
logger.info(f"Loaded project configuration from {project_config}")
|
||||
|
||||
# Extract values
|
||||
self.api_url = os.getenv('GITEA_API_URL')
|
||||
self.api_token = os.getenv('GITEA_API_TOKEN')
|
||||
self.repo = os.getenv('GITEA_REPO') # Optional, must be owner/repo format
|
||||
|
||||
# Auto-detect repo from git remote if not specified
|
||||
if not self.repo and project_dir:
|
||||
self.repo = self._detect_repo_from_git(project_dir)
|
||||
if self.repo:
|
||||
logger.info(f"Auto-detected repository from git remote: {self.repo}")
|
||||
|
||||
# Detect mode
|
||||
if self.repo:
|
||||
self.mode = 'project'
|
||||
logger.info(f"Running in project mode: {self.repo}")
|
||||
else:
|
||||
self.mode = 'company'
|
||||
logger.info("Running in company-wide mode (PMO)")
|
||||
|
||||
# Validate required variables
|
||||
self._validate()
|
||||
|
||||
return {
|
||||
'api_url': self.api_url,
|
||||
'api_token': self.api_token,
|
||||
'repo': self.repo,
|
||||
'mode': self.mode
|
||||
}
|
||||
|
||||
def _validate(self) -> None:
|
||||
"""
|
||||
Validate that required configuration is present.
|
||||
|
||||
Raises:
|
||||
ValueError: If required configuration is missing
|
||||
"""
|
||||
required = {
|
||||
'GITEA_API_URL': self.api_url,
|
||||
'GITEA_API_TOKEN': self.api_token
|
||||
}
|
||||
|
||||
missing = [key for key, value in required.items() if not value]
|
||||
|
||||
if missing:
|
||||
raise ValueError(
|
||||
f"Missing required configuration: {', '.join(missing)}\n"
|
||||
"Check your ~/.config/claude/gitea.env file"
|
||||
)
|
||||
|
||||
def _find_project_directory(self) -> Optional[Path]:
|
||||
"""
|
||||
Find the user's project directory.
|
||||
|
||||
The MCP server runs with cwd set to the plugin directory, not the
|
||||
user's project. We need to find the actual project directory using
|
||||
various heuristics.
|
||||
|
||||
Returns:
|
||||
Path to project directory, or None if not found
|
||||
"""
|
||||
# Strategy 1: Check CLAUDE_PROJECT_DIR environment variable
|
||||
project_dir = os.getenv('CLAUDE_PROJECT_DIR')
|
||||
if project_dir:
|
||||
path = Path(project_dir)
|
||||
if path.exists():
|
||||
logger.info(f"Found project directory from CLAUDE_PROJECT_DIR: {path}")
|
||||
return path
|
||||
|
||||
# Strategy 2: Check PWD (original working directory before cwd override)
|
||||
pwd = os.getenv('PWD')
|
||||
if pwd:
|
||||
path = Path(pwd)
|
||||
# Verify it has .git or .env (indicates a project)
|
||||
if path.exists() and ((path / '.git').exists() or (path / '.env').exists()):
|
||||
logger.info(f"Found project directory from PWD: {path}")
|
||||
return path
|
||||
|
||||
# Strategy 3: Check current working directory
|
||||
# This handles test scenarios and cases where cwd is actually the project
|
||||
cwd = Path.cwd()
|
||||
if (cwd / '.git').exists() or (cwd / '.env').exists():
|
||||
logger.info(f"Found project directory from cwd: {cwd}")
|
||||
return cwd
|
||||
|
||||
# Strategy 4: Check if GITEA_REPO is already set (user configured it)
|
||||
# If so, we don't need to find the project directory for git detection
|
||||
if os.getenv('GITEA_REPO'):
|
||||
logger.debug("GITEA_REPO already set, skipping project directory detection")
|
||||
return None
|
||||
|
||||
logger.debug("Could not determine project directory")
|
||||
return None
|
||||
|
||||
def _detect_repo_from_git(self, project_dir: Optional[Path] = None) -> Optional[str]:
|
||||
"""
|
||||
Auto-detect repository from git remote origin URL.
|
||||
|
||||
Args:
|
||||
project_dir: Directory to run git command from (defaults to cwd)
|
||||
|
||||
Supports URL formats:
|
||||
- SSH: ssh://git@host:port/owner/repo.git
|
||||
- SSH short: git@host:owner/repo.git
|
||||
- HTTPS: https://host/owner/repo.git
|
||||
- HTTP: http://host/owner/repo.git
|
||||
|
||||
Returns:
|
||||
Repository in 'owner/repo' format, or None if detection fails
|
||||
"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
['git', 'remote', 'get-url', 'origin'],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=5,
|
||||
cwd=str(project_dir) if project_dir else None
|
||||
)
|
||||
if result.returncode != 0:
|
||||
logger.debug("No git remote 'origin' found")
|
||||
return None
|
||||
|
||||
url = result.stdout.strip()
|
||||
return self._parse_git_url(url)
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
logger.warning("Git command timed out")
|
||||
return None
|
||||
except FileNotFoundError:
|
||||
logger.debug("Git not available")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to detect repo from git: {e}")
|
||||
return None
|
||||
|
||||
def _parse_git_url(self, url: str) -> Optional[str]:
|
||||
"""
|
||||
Parse git URL to extract owner/repo.
|
||||
|
||||
Args:
|
||||
url: Git remote URL
|
||||
|
||||
Returns:
|
||||
Repository in 'owner/repo' format, or None if parsing fails
|
||||
"""
|
||||
# Remove .git suffix if present
|
||||
url = re.sub(r'\.git$', '', url)
|
||||
|
||||
# SSH format: ssh://git@host:port/owner/repo
|
||||
ssh_match = re.match(r'ssh://[^/]+/(.+/.+)$', url)
|
||||
if ssh_match:
|
||||
return ssh_match.group(1)
|
||||
|
||||
# SSH short format: git@host:owner/repo
|
||||
ssh_short_match = re.match(r'git@[^:]+:(.+/.+)$', url)
|
||||
if ssh_short_match:
|
||||
return ssh_short_match.group(1)
|
||||
|
||||
# HTTPS/HTTP format: https://host/owner/repo
|
||||
http_match = re.match(r'https?://[^/]+/(.+/.+)$', url)
|
||||
if http_match:
|
||||
return http_match.group(1)
|
||||
|
||||
logger.warning(f"Could not parse git URL: {url}")
|
||||
return None
|
||||
@@ -110,8 +110,14 @@ class GiteaClient:
|
||||
|
||||
def _resolve_label_ids(self, label_names: List[str], owner: str, repo: str) -> List[int]:
|
||||
"""Convert label names to label IDs."""
|
||||
org_labels = self.get_org_labels(owner)
|
||||
repo_labels = self.get_labels(f"{owner}/{repo}")
|
||||
full_repo = f"{owner}/{repo}"
|
||||
|
||||
# Only fetch org labels if repo belongs to an organization
|
||||
org_labels = []
|
||||
if self.is_org_repo(full_repo):
|
||||
org_labels = self.get_org_labels(owner)
|
||||
|
||||
repo_labels = self.get_labels(full_repo)
|
||||
all_labels = org_labels + repo_labels
|
||||
label_map = {label['name']: label['id'] for label in all_labels}
|
||||
label_ids = []
|
||||
@@ -548,10 +554,33 @@ class GiteaClient:
|
||||
return response.json()
|
||||
|
||||
def is_org_repo(self, repo: Optional[str] = None) -> bool:
|
||||
"""Check if repository belongs to an organization (not a user)."""
|
||||
info = self.get_repo_info(repo)
|
||||
owner_type = info.get('owner', {}).get('type', '')
|
||||
return owner_type.lower() == 'organization'
|
||||
"""
|
||||
Check if repository belongs to an organization (not a user).
|
||||
|
||||
Uses the /orgs/{owner} endpoint to reliably detect organizations,
|
||||
as the owner.type field in repo info may be null in some Gitea versions.
|
||||
"""
|
||||
owner, _ = self._parse_repo(repo)
|
||||
return self._is_organization(owner)
|
||||
|
||||
def _is_organization(self, owner: str) -> bool:
|
||||
"""
|
||||
Check if an owner is an organization by querying the orgs endpoint.
|
||||
|
||||
Args:
|
||||
owner: The owner name to check
|
||||
|
||||
Returns:
|
||||
True if owner is an organization, False if user or unknown
|
||||
"""
|
||||
url = f"{self.base_url}/orgs/{owner}"
|
||||
try:
|
||||
response = self.session.get(url)
|
||||
# 200 = organization exists, 404 = not an organization (user account)
|
||||
return response.status_code == 200
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to check if {owner} is organization: {e}")
|
||||
return False
|
||||
|
||||
def get_branch_protection(
|
||||
self,
|
||||
@@ -591,3 +620,160 @@ class GiteaClient:
|
||||
response = self.session.post(url, json=data)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def create_org_label(
|
||||
self,
|
||||
org: str,
|
||||
name: str,
|
||||
color: str,
|
||||
description: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Create a new label at the organization level.
|
||||
|
||||
Organization labels are shared across all repositories in the org.
|
||||
Use this for workflow labels (Type, Priority, Complexity, Effort, etc.)
|
||||
|
||||
Args:
|
||||
org: Organization name
|
||||
name: Label name (e.g., 'Type/Bug', 'Priority/High')
|
||||
color: Hex color code (with or without #)
|
||||
description: Optional label description
|
||||
|
||||
Returns:
|
||||
Created label dictionary
|
||||
"""
|
||||
url = f"{self.base_url}/orgs/{org}/labels"
|
||||
data = {
|
||||
'name': name,
|
||||
'color': color.lstrip('#') # Remove # if present
|
||||
}
|
||||
if description:
|
||||
data['description'] = description
|
||||
logger.info(f"Creating organization label '{name}' in {org}")
|
||||
response = self.session.post(url, json=data)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
# ========================================
|
||||
# PULL REQUEST OPERATIONS
|
||||
# ========================================
|
||||
|
||||
def list_pull_requests(
|
||||
self,
|
||||
state: str = 'open',
|
||||
sort: str = 'recentupdate',
|
||||
labels: Optional[List[str]] = None,
|
||||
repo: Optional[str] = None
|
||||
) -> List[Dict]:
|
||||
"""
|
||||
List pull requests from Gitea repository.
|
||||
|
||||
Args:
|
||||
state: PR state (open, closed, all)
|
||||
sort: Sort order (oldest, recentupdate, leastupdate, mostcomment, leastcomment, priority)
|
||||
labels: Filter by labels
|
||||
repo: Repository in 'owner/repo' format
|
||||
|
||||
Returns:
|
||||
List of pull request dictionaries
|
||||
"""
|
||||
owner, target_repo = self._parse_repo(repo)
|
||||
url = f"{self.base_url}/repos/{owner}/{target_repo}/pulls"
|
||||
params = {'state': state, 'sort': sort}
|
||||
if labels:
|
||||
params['labels'] = ','.join(labels)
|
||||
logger.info(f"Listing PRs from {owner}/{target_repo} with state={state}")
|
||||
response = self.session.get(url, params=params)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def get_pull_request(
|
||||
self,
|
||||
pr_number: int,
|
||||
repo: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""Get specific pull request details."""
|
||||
owner, target_repo = self._parse_repo(repo)
|
||||
url = f"{self.base_url}/repos/{owner}/{target_repo}/pulls/{pr_number}"
|
||||
logger.info(f"Getting PR #{pr_number} from {owner}/{target_repo}")
|
||||
response = self.session.get(url)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def get_pr_diff(
|
||||
self,
|
||||
pr_number: int,
|
||||
repo: Optional[str] = None
|
||||
) -> str:
|
||||
"""Get the diff for a pull request."""
|
||||
owner, target_repo = self._parse_repo(repo)
|
||||
url = f"{self.base_url}/repos/{owner}/{target_repo}/pulls/{pr_number}.diff"
|
||||
logger.info(f"Getting diff for PR #{pr_number} from {owner}/{target_repo}")
|
||||
response = self.session.get(url)
|
||||
response.raise_for_status()
|
||||
return response.text
|
||||
|
||||
def get_pr_comments(
|
||||
self,
|
||||
pr_number: int,
|
||||
repo: Optional[str] = None
|
||||
) -> List[Dict]:
|
||||
"""Get comments on a pull request (uses issue comments endpoint)."""
|
||||
owner, target_repo = self._parse_repo(repo)
|
||||
# PRs share comment endpoint with issues in Gitea
|
||||
url = f"{self.base_url}/repos/{owner}/{target_repo}/issues/{pr_number}/comments"
|
||||
logger.info(f"Getting comments for PR #{pr_number} from {owner}/{target_repo}")
|
||||
response = self.session.get(url)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def create_pr_review(
|
||||
self,
|
||||
pr_number: int,
|
||||
body: str,
|
||||
event: str = 'COMMENT',
|
||||
comments: Optional[List[Dict]] = None,
|
||||
repo: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Create a review on a pull request.
|
||||
|
||||
Args:
|
||||
pr_number: Pull request number
|
||||
body: Review body/summary
|
||||
event: Review action (APPROVE, REQUEST_CHANGES, COMMENT)
|
||||
comments: Optional list of inline comments with path, position, body
|
||||
repo: Repository in 'owner/repo' format
|
||||
|
||||
Returns:
|
||||
Created review dictionary
|
||||
"""
|
||||
owner, target_repo = self._parse_repo(repo)
|
||||
url = f"{self.base_url}/repos/{owner}/{target_repo}/pulls/{pr_number}/reviews"
|
||||
data = {
|
||||
'body': body,
|
||||
'event': event
|
||||
}
|
||||
if comments:
|
||||
data['comments'] = comments
|
||||
logger.info(f"Creating review on PR #{pr_number} in {owner}/{target_repo}")
|
||||
response = self.session.post(url, json=data)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def add_pr_comment(
|
||||
self,
|
||||
pr_number: int,
|
||||
body: str,
|
||||
repo: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""Add a general comment to a pull request (uses issue comment endpoint)."""
|
||||
owner, target_repo = self._parse_repo(repo)
|
||||
# PRs share comment endpoint with issues in Gitea
|
||||
url = f"{self.base_url}/repos/{owner}/{target_repo}/issues/{pr_number}/comments"
|
||||
data = {'body': body}
|
||||
logger.info(f"Adding comment to PR #{pr_number} in {owner}/{target_repo}")
|
||||
response = self.session.post(url, json=data)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
@@ -17,6 +17,7 @@ from .tools.labels import LabelTools
|
||||
from .tools.wiki import WikiTools
|
||||
from .tools.milestones import MilestoneTools
|
||||
from .tools.dependencies import DependencyTools
|
||||
from .tools.pull_requests import PullRequestTools
|
||||
|
||||
# Suppress noisy MCP validation warnings on stderr
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
@@ -37,6 +38,7 @@ class GiteaMCPServer:
|
||||
self.wiki_tools = None
|
||||
self.milestone_tools = None
|
||||
self.dependency_tools = None
|
||||
self.pr_tools = None
|
||||
|
||||
async def initialize(self):
|
||||
"""
|
||||
@@ -55,6 +57,7 @@ class GiteaMCPServer:
|
||||
self.wiki_tools = WikiTools(self.client)
|
||||
self.milestone_tools = MilestoneTools(self.client)
|
||||
self.dependency_tools = DependencyTools(self.client)
|
||||
self.pr_tools = PullRequestTools(self.client)
|
||||
|
||||
logger.info(f"Gitea MCP Server initialized in {self.config['mode']} mode")
|
||||
except Exception as e:
|
||||
@@ -217,6 +220,10 @@ class GiteaMCPServer:
|
||||
"context": {
|
||||
"type": "string",
|
||||
"description": "Issue title + description or sprint context"
|
||||
},
|
||||
"repo": {
|
||||
"type": "string",
|
||||
"description": "Repository name (owner/repo format)"
|
||||
}
|
||||
},
|
||||
"required": ["context"]
|
||||
@@ -615,13 +622,13 @@ class GiteaMCPServer:
|
||||
),
|
||||
Tool(
|
||||
name="create_label",
|
||||
description="Create a new label in the repository",
|
||||
description="Create a new label in the repository (for repo-specific labels like Component/*, Tech/*)",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Label name"
|
||||
"description": "Label name (e.g., 'Component/Backend', 'Tech/Python')"
|
||||
},
|
||||
"color": {
|
||||
"type": "string",
|
||||
@@ -638,6 +645,205 @@ class GiteaMCPServer:
|
||||
},
|
||||
"required": ["name", "color"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="create_org_label",
|
||||
description="Create a new label at organization level (for workflow labels like Type/*, Priority/*, Complexity/*, Effort/*)",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"org": {
|
||||
"type": "string",
|
||||
"description": "Organization name"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Label name (e.g., 'Type/Bug', 'Priority/High')"
|
||||
},
|
||||
"color": {
|
||||
"type": "string",
|
||||
"description": "Label color (hex code)"
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "Label description"
|
||||
}
|
||||
},
|
||||
"required": ["org", "name", "color"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="create_label_smart",
|
||||
description="Create a label at the appropriate level (org or repo) based on category. Org: Type/*, Priority/*, Complexity/*, Effort/*, Risk/*, Source/*, Agent/*. Repo: Component/*, Tech/*",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Label name (e.g., 'Type/Bug', 'Component/Backend')"
|
||||
},
|
||||
"color": {
|
||||
"type": "string",
|
||||
"description": "Label color (hex code)"
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "Label description"
|
||||
},
|
||||
"repo": {
|
||||
"type": "string",
|
||||
"description": "Repository name (owner/repo format)"
|
||||
}
|
||||
},
|
||||
"required": ["name", "color"]
|
||||
}
|
||||
),
|
||||
# Pull Request Tools
|
||||
Tool(
|
||||
name="list_pull_requests",
|
||||
description="List pull requests from repository",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"state": {
|
||||
"type": "string",
|
||||
"enum": ["open", "closed", "all"],
|
||||
"default": "open",
|
||||
"description": "PR state filter"
|
||||
},
|
||||
"sort": {
|
||||
"type": "string",
|
||||
"enum": ["oldest", "recentupdate", "leastupdate", "mostcomment", "leastcomment", "priority"],
|
||||
"default": "recentupdate",
|
||||
"description": "Sort order"
|
||||
},
|
||||
"labels": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "Filter by labels"
|
||||
},
|
||||
"repo": {
|
||||
"type": "string",
|
||||
"description": "Repository name (owner/repo format)"
|
||||
}
|
||||
}
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="get_pull_request",
|
||||
description="Get specific pull request details",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"pr_number": {
|
||||
"type": "integer",
|
||||
"description": "Pull request number"
|
||||
},
|
||||
"repo": {
|
||||
"type": "string",
|
||||
"description": "Repository name (owner/repo format)"
|
||||
}
|
||||
},
|
||||
"required": ["pr_number"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="get_pr_diff",
|
||||
description="Get the diff for a pull request",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"pr_number": {
|
||||
"type": "integer",
|
||||
"description": "Pull request number"
|
||||
},
|
||||
"repo": {
|
||||
"type": "string",
|
||||
"description": "Repository name (owner/repo format)"
|
||||
}
|
||||
},
|
||||
"required": ["pr_number"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="get_pr_comments",
|
||||
description="Get comments on a pull request",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"pr_number": {
|
||||
"type": "integer",
|
||||
"description": "Pull request number"
|
||||
},
|
||||
"repo": {
|
||||
"type": "string",
|
||||
"description": "Repository name (owner/repo format)"
|
||||
}
|
||||
},
|
||||
"required": ["pr_number"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="create_pr_review",
|
||||
description="Create a review on a pull request (approve, request changes, or comment)",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"pr_number": {
|
||||
"type": "integer",
|
||||
"description": "Pull request number"
|
||||
},
|
||||
"body": {
|
||||
"type": "string",
|
||||
"description": "Review body/summary"
|
||||
},
|
||||
"event": {
|
||||
"type": "string",
|
||||
"enum": ["APPROVE", "REQUEST_CHANGES", "COMMENT"],
|
||||
"default": "COMMENT",
|
||||
"description": "Review action"
|
||||
},
|
||||
"comments": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"path": {"type": "string"},
|
||||
"position": {"type": "integer"},
|
||||
"body": {"type": "string"}
|
||||
}
|
||||
},
|
||||
"description": "Optional inline comments"
|
||||
},
|
||||
"repo": {
|
||||
"type": "string",
|
||||
"description": "Repository name (owner/repo format)"
|
||||
}
|
||||
},
|
||||
"required": ["pr_number", "body"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="add_pr_comment",
|
||||
description="Add a general comment to a pull request",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"pr_number": {
|
||||
"type": "integer",
|
||||
"description": "Pull request number"
|
||||
},
|
||||
"body": {
|
||||
"type": "string",
|
||||
"description": "Comment text"
|
||||
},
|
||||
"repo": {
|
||||
"type": "string",
|
||||
"description": "Repository name (owner/repo format)"
|
||||
}
|
||||
},
|
||||
"required": ["pr_number", "body"]
|
||||
}
|
||||
)
|
||||
]
|
||||
|
||||
@@ -726,6 +932,33 @@ class GiteaMCPServer:
|
||||
arguments.get('description'),
|
||||
arguments.get('repo')
|
||||
)
|
||||
elif name == "create_org_label":
|
||||
result = self.client.create_org_label(
|
||||
arguments['org'],
|
||||
arguments['name'],
|
||||
arguments['color'],
|
||||
arguments.get('description')
|
||||
)
|
||||
elif name == "create_label_smart":
|
||||
result = await self.label_tools.create_label_smart(
|
||||
arguments['name'],
|
||||
arguments['color'],
|
||||
arguments.get('description'),
|
||||
arguments.get('repo')
|
||||
)
|
||||
# Pull Request tools
|
||||
elif name == "list_pull_requests":
|
||||
result = await self.pr_tools.list_pull_requests(**arguments)
|
||||
elif name == "get_pull_request":
|
||||
result = await self.pr_tools.get_pull_request(**arguments)
|
||||
elif name == "get_pr_diff":
|
||||
result = await self.pr_tools.get_pr_diff(**arguments)
|
||||
elif name == "get_pr_comments":
|
||||
result = await self.pr_tools.get_pr_comments(**arguments)
|
||||
elif name == "create_pr_review":
|
||||
result = await self.pr_tools.create_pr_review(**arguments)
|
||||
elif name == "add_pr_comment":
|
||||
result = await self.pr_tools.add_pr_comment(**arguments)
|
||||
else:
|
||||
raise ValueError(f"Unknown tool: {name}")
|
||||
|
||||
@@ -4,4 +4,8 @@ MCP tools for Gitea integration.
|
||||
This package provides MCP tool implementations for:
|
||||
- Issue operations (issues.py)
|
||||
- Label management (labels.py)
|
||||
- Wiki operations (wiki.py)
|
||||
- Milestone management (milestones.py)
|
||||
- Issue dependencies (dependencies.py)
|
||||
- Pull request operations (pull_requests.py)
|
||||
"""
|
||||
377
mcp-servers/gitea/mcp_server/tools/labels.py
Normal file
377
mcp-servers/gitea/mcp_server/tools/labels.py
Normal file
@@ -0,0 +1,377 @@
|
||||
"""
|
||||
Label management tools for MCP server.
|
||||
|
||||
Provides async wrappers for label operations with:
|
||||
- Label taxonomy retrieval
|
||||
- Intelligent label suggestion
|
||||
- Dynamic label detection
|
||||
"""
|
||||
import asyncio
|
||||
import logging
|
||||
import re
|
||||
from typing import List, Dict, Optional
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LabelTools:
|
||||
"""Async wrappers for Gitea label operations"""
|
||||
|
||||
def __init__(self, gitea_client):
|
||||
"""
|
||||
Initialize label tools.
|
||||
|
||||
Args:
|
||||
gitea_client: GiteaClient instance
|
||||
"""
|
||||
self.gitea = gitea_client
|
||||
|
||||
async def get_labels(self, repo: Optional[str] = None) -> Dict[str, List[Dict]]:
|
||||
"""Get all labels (org + repo if org-owned, repo-only if user-owned)."""
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
target_repo = repo or self.gitea.repo
|
||||
if not target_repo or '/' not in target_repo:
|
||||
raise ValueError("Use 'owner/repo' format (e.g. 'org/repo-name')")
|
||||
|
||||
# Check if repo belongs to an organization or user
|
||||
is_org = await loop.run_in_executor(
|
||||
None,
|
||||
lambda: self.gitea.is_org_repo(target_repo)
|
||||
)
|
||||
|
||||
org_labels = []
|
||||
if is_org:
|
||||
org = target_repo.split('/')[0]
|
||||
org_labels = await loop.run_in_executor(
|
||||
None,
|
||||
lambda: self.gitea.get_org_labels(org)
|
||||
)
|
||||
|
||||
repo_labels = await loop.run_in_executor(
|
||||
None,
|
||||
lambda: self.gitea.get_labels(target_repo)
|
||||
)
|
||||
|
||||
return {
|
||||
'organization': org_labels,
|
||||
'repository': repo_labels,
|
||||
'total_count': len(org_labels) + len(repo_labels)
|
||||
}
|
||||
|
||||
async def suggest_labels(self, context: str, repo: Optional[str] = None) -> List[str]:
|
||||
"""
|
||||
Analyze context and suggest appropriate labels from repository's actual labels.
|
||||
|
||||
This method fetches actual labels from the repository and matches them
|
||||
dynamically, supporting any label naming convention (slash, colon-space, etc.).
|
||||
|
||||
Args:
|
||||
context: Issue title + description or sprint context
|
||||
repo: Repository in 'owner/repo' format (optional, uses default if not provided)
|
||||
|
||||
Returns:
|
||||
List of suggested label names that exist in the repository
|
||||
"""
|
||||
# Fetch actual labels from repository
|
||||
target_repo = repo or self.gitea.repo
|
||||
if not target_repo:
|
||||
logger.warning("No repository specified, returning empty suggestions")
|
||||
return []
|
||||
|
||||
try:
|
||||
labels_data = await self.get_labels(target_repo)
|
||||
all_labels = labels_data.get('organization', []) + labels_data.get('repository', [])
|
||||
label_names = [label['name'] for label in all_labels]
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch labels: {e}. Using fallback suggestions.")
|
||||
label_names = []
|
||||
|
||||
# Build label lookup for dynamic matching
|
||||
label_lookup = self._build_label_lookup(label_names)
|
||||
|
||||
suggested = []
|
||||
context_lower = context.lower()
|
||||
|
||||
# Type detection (exclusive - only one)
|
||||
type_label = None
|
||||
if any(word in context_lower for word in ['bug', 'error', 'fix', 'broken', 'crash', 'fail']):
|
||||
type_label = self._find_label(label_lookup, 'type', 'bug')
|
||||
elif any(word in context_lower for word in ['refactor', 'extract', 'restructure', 'architecture', 'service extraction']):
|
||||
type_label = self._find_label(label_lookup, 'type', 'refactor')
|
||||
elif any(word in context_lower for word in ['feature', 'add', 'implement', 'new', 'create']):
|
||||
type_label = self._find_label(label_lookup, 'type', 'feature')
|
||||
elif any(word in context_lower for word in ['docs', 'documentation', 'readme', 'guide']):
|
||||
type_label = self._find_label(label_lookup, 'type', 'documentation')
|
||||
elif any(word in context_lower for word in ['test', 'testing', 'spec', 'coverage']):
|
||||
type_label = self._find_label(label_lookup, 'type', 'test')
|
||||
elif any(word in context_lower for word in ['chore', 'maintenance', 'update', 'upgrade']):
|
||||
type_label = self._find_label(label_lookup, 'type', 'chore')
|
||||
if type_label:
|
||||
suggested.append(type_label)
|
||||
|
||||
# Priority detection
|
||||
priority_label = None
|
||||
if any(word in context_lower for word in ['critical', 'urgent', 'blocker', 'blocking', 'emergency']):
|
||||
priority_label = self._find_label(label_lookup, 'priority', 'critical')
|
||||
elif any(word in context_lower for word in ['high', 'important', 'asap', 'soon']):
|
||||
priority_label = self._find_label(label_lookup, 'priority', 'high')
|
||||
elif any(word in context_lower for word in ['low', 'nice-to-have', 'optional', 'later']):
|
||||
priority_label = self._find_label(label_lookup, 'priority', 'low')
|
||||
else:
|
||||
priority_label = self._find_label(label_lookup, 'priority', 'medium')
|
||||
if priority_label:
|
||||
suggested.append(priority_label)
|
||||
|
||||
# Complexity detection
|
||||
complexity_label = None
|
||||
if any(word in context_lower for word in ['simple', 'trivial', 'easy', 'quick']):
|
||||
complexity_label = self._find_label(label_lookup, 'complexity', 'simple')
|
||||
elif any(word in context_lower for word in ['complex', 'difficult', 'challenging', 'intricate']):
|
||||
complexity_label = self._find_label(label_lookup, 'complexity', 'complex')
|
||||
else:
|
||||
complexity_label = self._find_label(label_lookup, 'complexity', 'medium')
|
||||
if complexity_label:
|
||||
suggested.append(complexity_label)
|
||||
|
||||
# Effort detection (supports both "Effort" and "Efforts" naming)
|
||||
effort_label = None
|
||||
if any(word in context_lower for word in ['xs', 'tiny', '1 hour', '2 hours']):
|
||||
effort_label = self._find_label(label_lookup, 'effort', 'xs')
|
||||
elif any(word in context_lower for word in ['small', 's ', '1 day', 'half day']):
|
||||
effort_label = self._find_label(label_lookup, 'effort', 's')
|
||||
elif any(word in context_lower for word in ['medium', 'm ', '2 days', '3 days']):
|
||||
effort_label = self._find_label(label_lookup, 'effort', 'm')
|
||||
elif any(word in context_lower for word in ['large', 'l ', '1 week', '5 days']):
|
||||
effort_label = self._find_label(label_lookup, 'effort', 'l')
|
||||
elif any(word in context_lower for word in ['xl', 'extra large', '2 weeks', 'sprint']):
|
||||
effort_label = self._find_label(label_lookup, 'effort', 'xl')
|
||||
if effort_label:
|
||||
suggested.append(effort_label)
|
||||
|
||||
# Component detection (based on keywords)
|
||||
component_mappings = {
|
||||
'backend': ['backend', 'server', 'api', 'database', 'service'],
|
||||
'frontend': ['frontend', 'ui', 'interface', 'react', 'vue', 'component'],
|
||||
'api': ['api', 'endpoint', 'rest', 'graphql', 'route'],
|
||||
'database': ['database', 'db', 'sql', 'migration', 'schema', 'postgres'],
|
||||
'auth': ['auth', 'authentication', 'login', 'oauth', 'token', 'session'],
|
||||
'deploy': ['deploy', 'deployment', 'docker', 'kubernetes', 'ci/cd'],
|
||||
'testing': ['test', 'testing', 'spec', 'jest', 'pytest', 'coverage'],
|
||||
'docs': ['docs', 'documentation', 'readme', 'guide', 'wiki']
|
||||
}
|
||||
|
||||
for component, keywords in component_mappings.items():
|
||||
if any(keyword in context_lower for keyword in keywords):
|
||||
label = self._find_label(label_lookup, 'component', component)
|
||||
if label and label not in suggested:
|
||||
suggested.append(label)
|
||||
|
||||
# Tech stack detection
|
||||
tech_mappings = {
|
||||
'python': ['python', 'fastapi', 'django', 'flask', 'pytest'],
|
||||
'javascript': ['javascript', 'js', 'node', 'npm', 'yarn'],
|
||||
'docker': ['docker', 'dockerfile', 'container', 'compose'],
|
||||
'postgresql': ['postgres', 'postgresql', 'psql', 'sql'],
|
||||
'redis': ['redis', 'cache', 'session store'],
|
||||
'vue': ['vue', 'vuejs', 'nuxt'],
|
||||
'fastapi': ['fastapi', 'pydantic', 'starlette']
|
||||
}
|
||||
|
||||
for tech, keywords in tech_mappings.items():
|
||||
if any(keyword in context_lower for keyword in keywords):
|
||||
label = self._find_label(label_lookup, 'tech', tech)
|
||||
if label and label not in suggested:
|
||||
suggested.append(label)
|
||||
|
||||
# Source detection (based on git branch or context)
|
||||
source_label = None
|
||||
if 'development' in context_lower or 'dev/' in context_lower:
|
||||
source_label = self._find_label(label_lookup, 'source', 'development')
|
||||
elif 'staging' in context_lower or 'stage/' in context_lower:
|
||||
source_label = self._find_label(label_lookup, 'source', 'staging')
|
||||
elif 'production' in context_lower or 'prod' in context_lower:
|
||||
source_label = self._find_label(label_lookup, 'source', 'production')
|
||||
if source_label:
|
||||
suggested.append(source_label)
|
||||
|
||||
# Risk detection
|
||||
risk_label = None
|
||||
if any(word in context_lower for word in ['breaking', 'breaking change', 'major', 'risky']):
|
||||
risk_label = self._find_label(label_lookup, 'risk', 'high')
|
||||
elif any(word in context_lower for word in ['safe', 'low risk', 'minor']):
|
||||
risk_label = self._find_label(label_lookup, 'risk', 'low')
|
||||
if risk_label:
|
||||
suggested.append(risk_label)
|
||||
|
||||
logger.info(f"Suggested {len(suggested)} labels based on context and {len(label_names)} available labels")
|
||||
return suggested
|
||||
|
||||
def _build_label_lookup(self, label_names: List[str]) -> Dict[str, Dict[str, str]]:
|
||||
"""
|
||||
Build a lookup dictionary for label matching.
|
||||
|
||||
Supports various label formats:
|
||||
- Slash format: Type/Bug, Priority/High
|
||||
- Colon-space format: Type: Bug, Priority: High
|
||||
- Colon format: Type:Bug
|
||||
|
||||
Args:
|
||||
label_names: List of actual label names from repository
|
||||
|
||||
Returns:
|
||||
Nested dict: {category: {value: actual_label_name}}
|
||||
"""
|
||||
lookup: Dict[str, Dict[str, str]] = {}
|
||||
|
||||
for label in label_names:
|
||||
# Try different separator patterns
|
||||
# Pattern: Category<separator>Value
|
||||
# Separators: /, : , :
|
||||
match = re.match(r'^([^/:]+)(?:/|:\s*|:)(.+)$', label)
|
||||
if match:
|
||||
category = match.group(1).lower().rstrip('s') # Normalize: "Efforts" -> "effort"
|
||||
value = match.group(2).lower()
|
||||
|
||||
if category not in lookup:
|
||||
lookup[category] = {}
|
||||
lookup[category][value] = label
|
||||
|
||||
return lookup
|
||||
|
||||
def _find_label(self, lookup: Dict[str, Dict[str, str]], category: str, value: str) -> Optional[str]:
|
||||
"""
|
||||
Find actual label name from lookup.
|
||||
|
||||
Args:
|
||||
lookup: Label lookup dictionary
|
||||
category: Category to search (e.g., 'type', 'priority')
|
||||
value: Value to find (e.g., 'bug', 'high')
|
||||
|
||||
Returns:
|
||||
Actual label name if found, None otherwise
|
||||
"""
|
||||
category_lower = category.lower().rstrip('s') # Normalize
|
||||
value_lower = value.lower()
|
||||
|
||||
if category_lower in lookup and value_lower in lookup[category_lower]:
|
||||
return lookup[category_lower][value_lower]
|
||||
|
||||
return None
|
||||
|
||||
# Organization-level label categories (workflow labels shared across repos)
|
||||
ORG_LABEL_CATEGORIES = {'agent', 'complexity', 'effort', 'efforts', 'priority', 'risk', 'source', 'type'}
|
||||
|
||||
# Repository-level label categories (project-specific labels)
|
||||
REPO_LABEL_CATEGORIES = {'component', 'tech'}
|
||||
|
||||
async def create_label_smart(
|
||||
self,
|
||||
name: str,
|
||||
color: str,
|
||||
description: Optional[str] = None,
|
||||
repo: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Create a label at the appropriate level (org or repo) based on category.
|
||||
Skips if label already exists (checks both org and repo levels).
|
||||
|
||||
Organization labels: Agent, Complexity, Effort, Priority, Risk, Source, Type
|
||||
Repository labels: Component, Tech
|
||||
|
||||
Args:
|
||||
name: Label name (e.g., 'Type/Bug', 'Component/Backend')
|
||||
color: Hex color code
|
||||
description: Optional label description
|
||||
repo: Repository in 'owner/repo' format
|
||||
|
||||
Returns:
|
||||
Created label dictionary with 'level' key, or 'skipped' if already exists
|
||||
"""
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
target_repo = repo or self.gitea.repo
|
||||
if not target_repo or '/' not in target_repo:
|
||||
raise ValueError("Use 'owner/repo' format (e.g. 'org/repo-name')")
|
||||
|
||||
owner = target_repo.split('/')[0]
|
||||
is_org = await loop.run_in_executor(
|
||||
None,
|
||||
lambda: self.gitea.is_org_repo(target_repo)
|
||||
)
|
||||
|
||||
# Fetch existing labels to check for duplicates
|
||||
existing_labels = await self.get_labels(target_repo)
|
||||
all_existing = existing_labels.get('organization', []) + existing_labels.get('repository', [])
|
||||
existing_names = [label['name'].lower() for label in all_existing]
|
||||
|
||||
# Normalize the new label name for comparison
|
||||
name_normalized = name.lower()
|
||||
|
||||
# Also check for format variations (Type/Bug vs Type: Bug)
|
||||
name_variations = [name_normalized]
|
||||
if '/' in name:
|
||||
name_variations.append(name.replace('/', ': ').lower())
|
||||
name_variations.append(name.replace('/', ':').lower())
|
||||
elif ': ' in name:
|
||||
name_variations.append(name.replace(': ', '/').lower())
|
||||
elif ':' in name:
|
||||
name_variations.append(name.replace(':', '/').lower())
|
||||
|
||||
# Check if label already exists in any format
|
||||
for variation in name_variations:
|
||||
if variation in existing_names:
|
||||
logger.info(f"Label '{name}' already exists (found as '{variation}'), skipping")
|
||||
return {
|
||||
'name': name,
|
||||
'skipped': True,
|
||||
'reason': f"Label already exists",
|
||||
'level': 'existing'
|
||||
}
|
||||
|
||||
# Parse category from label name
|
||||
category = None
|
||||
if '/' in name:
|
||||
category = name.split('/')[0].lower().rstrip('s')
|
||||
elif ':' in name:
|
||||
category = name.split(':')[0].strip().lower().rstrip('s')
|
||||
|
||||
# If it's an org repo and the category is an org-level category, create at org level
|
||||
if is_org and category in self.ORG_LABEL_CATEGORIES:
|
||||
result = await loop.run_in_executor(
|
||||
None,
|
||||
lambda: self.gitea.create_org_label(owner, name, color, description)
|
||||
)
|
||||
# Handle unexpected response types (API may return list or non-dict)
|
||||
if not isinstance(result, dict):
|
||||
logger.error(f"Unexpected API response type for org label: {type(result)} - {result}")
|
||||
return {
|
||||
'name': name,
|
||||
'error': True,
|
||||
'reason': f"API returned {type(result).__name__} instead of dict: {result}",
|
||||
'level': 'organization'
|
||||
}
|
||||
result['level'] = 'organization'
|
||||
result['skipped'] = False
|
||||
logger.info(f"Created organization label '{name}' in {owner}")
|
||||
else:
|
||||
# Create at repo level
|
||||
result = await loop.run_in_executor(
|
||||
None,
|
||||
lambda: self.gitea.create_label(name, color, description, target_repo)
|
||||
)
|
||||
# Handle unexpected response types (API may return list or non-dict)
|
||||
if not isinstance(result, dict):
|
||||
logger.error(f"Unexpected API response type for repo label: {type(result)} - {result}")
|
||||
return {
|
||||
'name': name,
|
||||
'error': True,
|
||||
'reason': f"API returned {type(result).__name__} instead of dict: {result}",
|
||||
'level': 'repository'
|
||||
}
|
||||
result['level'] = 'repository'
|
||||
result['skipped'] = False
|
||||
logger.info(f"Created repository label '{name}' in {target_repo}")
|
||||
|
||||
return result
|
||||
274
mcp-servers/gitea/mcp_server/tools/pull_requests.py
Normal file
274
mcp-servers/gitea/mcp_server/tools/pull_requests.py
Normal file
@@ -0,0 +1,274 @@
|
||||
"""
|
||||
Pull request management tools for MCP server.
|
||||
|
||||
Provides async wrappers for PR operations with:
|
||||
- Branch-aware security
|
||||
- PMO multi-repo support
|
||||
- Comprehensive error handling
|
||||
"""
|
||||
import asyncio
|
||||
import subprocess
|
||||
import logging
|
||||
from typing import List, Dict, Optional
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PullRequestTools:
|
||||
"""Async wrappers for Gitea pull request operations with branch detection"""
|
||||
|
||||
def __init__(self, gitea_client):
|
||||
"""
|
||||
Initialize pull request tools.
|
||||
|
||||
Args:
|
||||
gitea_client: GiteaClient instance
|
||||
"""
|
||||
self.gitea = gitea_client
|
||||
|
||||
def _get_current_branch(self) -> str:
|
||||
"""
|
||||
Get current git branch.
|
||||
|
||||
Returns:
|
||||
Current branch name or 'unknown' if not in a git repo
|
||||
"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True
|
||||
)
|
||||
return result.stdout.strip()
|
||||
except subprocess.CalledProcessError:
|
||||
return "unknown"
|
||||
|
||||
def _check_branch_permissions(self, operation: str) -> bool:
|
||||
"""
|
||||
Check if operation is allowed on current branch.
|
||||
|
||||
Args:
|
||||
operation: Operation name (list_prs, create_review, etc.)
|
||||
|
||||
Returns:
|
||||
True if operation is allowed, False otherwise
|
||||
"""
|
||||
branch = self._get_current_branch()
|
||||
|
||||
# Read-only operations allowed everywhere
|
||||
read_ops = ['list_pull_requests', 'get_pull_request', 'get_pr_diff', 'get_pr_comments']
|
||||
|
||||
# Production branches (read-only)
|
||||
if branch in ['main', 'master'] or branch.startswith('prod/'):
|
||||
return operation in read_ops
|
||||
|
||||
# Staging branches (read-only for PRs, can comment)
|
||||
if branch == 'staging' or branch.startswith('stage/'):
|
||||
return operation in read_ops + ['add_pr_comment']
|
||||
|
||||
# Development branches (full access)
|
||||
if branch in ['development', 'develop'] or branch.startswith(('feat/', 'feature/', 'dev/')):
|
||||
return True
|
||||
|
||||
# Unknown branch - be restrictive
|
||||
return operation in read_ops
|
||||
|
||||
async def list_pull_requests(
|
||||
self,
|
||||
state: str = 'open',
|
||||
sort: str = 'recentupdate',
|
||||
labels: Optional[List[str]] = None,
|
||||
repo: Optional[str] = None
|
||||
) -> List[Dict]:
|
||||
"""
|
||||
List pull requests from repository (async wrapper).
|
||||
|
||||
Args:
|
||||
state: PR state (open, closed, all)
|
||||
sort: Sort order
|
||||
labels: Filter by labels
|
||||
repo: Override configured repo (for PMO multi-repo)
|
||||
|
||||
Returns:
|
||||
List of pull request dictionaries
|
||||
|
||||
Raises:
|
||||
PermissionError: If operation not allowed on current branch
|
||||
"""
|
||||
if not self._check_branch_permissions('list_pull_requests'):
|
||||
branch = self._get_current_branch()
|
||||
raise PermissionError(
|
||||
f"Cannot list PRs on branch '{branch}'. "
|
||||
f"Switch to a development branch."
|
||||
)
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(
|
||||
None,
|
||||
lambda: self.gitea.list_pull_requests(state, sort, labels, repo)
|
||||
)
|
||||
|
||||
async def get_pull_request(
|
||||
self,
|
||||
pr_number: int,
|
||||
repo: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Get specific pull request details (async wrapper).
|
||||
|
||||
Args:
|
||||
pr_number: Pull request number
|
||||
repo: Override configured repo (for PMO multi-repo)
|
||||
|
||||
Returns:
|
||||
Pull request dictionary
|
||||
|
||||
Raises:
|
||||
PermissionError: If operation not allowed on current branch
|
||||
"""
|
||||
if not self._check_branch_permissions('get_pull_request'):
|
||||
branch = self._get_current_branch()
|
||||
raise PermissionError(
|
||||
f"Cannot get PR on branch '{branch}'. "
|
||||
f"Switch to a development branch."
|
||||
)
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(
|
||||
None,
|
||||
lambda: self.gitea.get_pull_request(pr_number, repo)
|
||||
)
|
||||
|
||||
async def get_pr_diff(
|
||||
self,
|
||||
pr_number: int,
|
||||
repo: Optional[str] = None
|
||||
) -> str:
|
||||
"""
|
||||
Get pull request diff (async wrapper).
|
||||
|
||||
Args:
|
||||
pr_number: Pull request number
|
||||
repo: Override configured repo (for PMO multi-repo)
|
||||
|
||||
Returns:
|
||||
Diff as string
|
||||
|
||||
Raises:
|
||||
PermissionError: If operation not allowed on current branch
|
||||
"""
|
||||
if not self._check_branch_permissions('get_pr_diff'):
|
||||
branch = self._get_current_branch()
|
||||
raise PermissionError(
|
||||
f"Cannot get PR diff on branch '{branch}'. "
|
||||
f"Switch to a development branch."
|
||||
)
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(
|
||||
None,
|
||||
lambda: self.gitea.get_pr_diff(pr_number, repo)
|
||||
)
|
||||
|
||||
async def get_pr_comments(
|
||||
self,
|
||||
pr_number: int,
|
||||
repo: Optional[str] = None
|
||||
) -> List[Dict]:
|
||||
"""
|
||||
Get comments on a pull request (async wrapper).
|
||||
|
||||
Args:
|
||||
pr_number: Pull request number
|
||||
repo: Override configured repo (for PMO multi-repo)
|
||||
|
||||
Returns:
|
||||
List of comment dictionaries
|
||||
|
||||
Raises:
|
||||
PermissionError: If operation not allowed on current branch
|
||||
"""
|
||||
if not self._check_branch_permissions('get_pr_comments'):
|
||||
branch = self._get_current_branch()
|
||||
raise PermissionError(
|
||||
f"Cannot get PR comments on branch '{branch}'. "
|
||||
f"Switch to a development branch."
|
||||
)
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(
|
||||
None,
|
||||
lambda: self.gitea.get_pr_comments(pr_number, repo)
|
||||
)
|
||||
|
||||
async def create_pr_review(
|
||||
self,
|
||||
pr_number: int,
|
||||
body: str,
|
||||
event: str = 'COMMENT',
|
||||
comments: Optional[List[Dict]] = None,
|
||||
repo: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Create a review on a pull request (async wrapper with branch check).
|
||||
|
||||
Args:
|
||||
pr_number: Pull request number
|
||||
body: Review body/summary
|
||||
event: Review action (APPROVE, REQUEST_CHANGES, COMMENT)
|
||||
comments: Optional list of inline comments
|
||||
repo: Override configured repo (for PMO multi-repo)
|
||||
|
||||
Returns:
|
||||
Created review dictionary
|
||||
|
||||
Raises:
|
||||
PermissionError: If operation not allowed on current branch
|
||||
"""
|
||||
if not self._check_branch_permissions('create_pr_review'):
|
||||
branch = self._get_current_branch()
|
||||
raise PermissionError(
|
||||
f"Cannot create PR review on branch '{branch}'. "
|
||||
f"Switch to a development branch to review PRs."
|
||||
)
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(
|
||||
None,
|
||||
lambda: self.gitea.create_pr_review(pr_number, body, event, comments, repo)
|
||||
)
|
||||
|
||||
async def add_pr_comment(
|
||||
self,
|
||||
pr_number: int,
|
||||
body: str,
|
||||
repo: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Add a general comment to a pull request (async wrapper with branch check).
|
||||
|
||||
Args:
|
||||
pr_number: Pull request number
|
||||
body: Comment text
|
||||
repo: Override configured repo (for PMO multi-repo)
|
||||
|
||||
Returns:
|
||||
Created comment dictionary
|
||||
|
||||
Raises:
|
||||
PermissionError: If operation not allowed on current branch
|
||||
"""
|
||||
if not self._check_branch_permissions('add_pr_comment'):
|
||||
branch = self._get_current_branch()
|
||||
raise PermissionError(
|
||||
f"Cannot add PR comment on branch '{branch}'. "
|
||||
f"Switch to a development or staging branch to comment on PRs."
|
||||
)
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(
|
||||
None,
|
||||
lambda: self.gitea.add_pr_comment(pr_number, body, repo)
|
||||
)
|
||||
@@ -149,3 +149,112 @@ def test_mode_detection_company(tmp_path, monkeypatch):
|
||||
|
||||
assert result['mode'] == 'company'
|
||||
assert result['repo'] is None
|
||||
|
||||
|
||||
# ========================================
|
||||
# GIT URL PARSING TESTS
|
||||
# ========================================
|
||||
|
||||
def test_parse_git_url_ssh_format():
|
||||
"""Test parsing SSH format git URL"""
|
||||
config = GiteaConfig()
|
||||
|
||||
# SSH with port: ssh://git@host:port/owner/repo.git
|
||||
url = "ssh://git@hotserv.tailc9b278.ts.net:2222/personal-projects/personal-portfolio.git"
|
||||
result = config._parse_git_url(url)
|
||||
assert result == "personal-projects/personal-portfolio"
|
||||
|
||||
|
||||
def test_parse_git_url_ssh_short_format():
|
||||
"""Test parsing SSH short format git URL"""
|
||||
config = GiteaConfig()
|
||||
|
||||
# SSH short: git@host:owner/repo.git
|
||||
url = "git@github.com:owner/repo.git"
|
||||
result = config._parse_git_url(url)
|
||||
assert result == "owner/repo"
|
||||
|
||||
|
||||
def test_parse_git_url_https_format():
|
||||
"""Test parsing HTTPS format git URL"""
|
||||
config = GiteaConfig()
|
||||
|
||||
# HTTPS: https://host/owner/repo.git
|
||||
url = "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git"
|
||||
result = config._parse_git_url(url)
|
||||
assert result == "personal-projects/leo-claude-mktplace"
|
||||
|
||||
|
||||
def test_parse_git_url_http_format():
|
||||
"""Test parsing HTTP format git URL"""
|
||||
config = GiteaConfig()
|
||||
|
||||
# HTTP: http://host/owner/repo.git
|
||||
url = "http://gitea.hotserv.cloud/personal-projects/repo.git"
|
||||
result = config._parse_git_url(url)
|
||||
assert result == "personal-projects/repo"
|
||||
|
||||
|
||||
def test_parse_git_url_without_git_suffix():
|
||||
"""Test parsing git URL without .git suffix"""
|
||||
config = GiteaConfig()
|
||||
|
||||
url = "https://github.com/owner/repo"
|
||||
result = config._parse_git_url(url)
|
||||
assert result == "owner/repo"
|
||||
|
||||
|
||||
def test_parse_git_url_invalid_format():
|
||||
"""Test parsing invalid git URL returns None"""
|
||||
config = GiteaConfig()
|
||||
|
||||
url = "not-a-valid-url"
|
||||
result = config._parse_git_url(url)
|
||||
assert result is None
|
||||
|
||||
|
||||
def test_find_project_directory_from_env(tmp_path, monkeypatch):
|
||||
"""Test finding project directory from CLAUDE_PROJECT_DIR env var"""
|
||||
project_dir = tmp_path / 'my-project'
|
||||
project_dir.mkdir()
|
||||
(project_dir / '.git').mkdir()
|
||||
|
||||
monkeypatch.setenv('CLAUDE_PROJECT_DIR', str(project_dir))
|
||||
|
||||
config = GiteaConfig()
|
||||
result = config._find_project_directory()
|
||||
|
||||
assert result == project_dir
|
||||
|
||||
|
||||
def test_find_project_directory_from_cwd(tmp_path, monkeypatch):
|
||||
"""Test finding project directory from cwd with .env file"""
|
||||
project_dir = tmp_path / 'project'
|
||||
project_dir.mkdir()
|
||||
(project_dir / '.env').write_text("GITEA_REPO=test/repo")
|
||||
|
||||
monkeypatch.chdir(project_dir)
|
||||
# Clear env vars that might interfere
|
||||
monkeypatch.delenv('CLAUDE_PROJECT_DIR', raising=False)
|
||||
monkeypatch.delenv('PWD', raising=False)
|
||||
|
||||
config = GiteaConfig()
|
||||
result = config._find_project_directory()
|
||||
|
||||
assert result == project_dir
|
||||
|
||||
|
||||
def test_find_project_directory_none_when_no_markers(tmp_path, monkeypatch):
|
||||
"""Test returns None when no project markers found"""
|
||||
empty_dir = tmp_path / 'empty'
|
||||
empty_dir.mkdir()
|
||||
|
||||
monkeypatch.chdir(empty_dir)
|
||||
monkeypatch.delenv('CLAUDE_PROJECT_DIR', raising=False)
|
||||
monkeypatch.delenv('PWD', raising=False)
|
||||
monkeypatch.delenv('GITEA_REPO', raising=False)
|
||||
|
||||
config = GiteaConfig()
|
||||
result = config._find_project_directory()
|
||||
|
||||
assert result is None
|
||||
@@ -222,3 +222,47 @@ def test_no_repo_specified_error(gitea_client):
|
||||
client.list_issues()
|
||||
|
||||
assert "Repository not specified" in str(exc_info.value)
|
||||
|
||||
|
||||
# ========================================
|
||||
# ORGANIZATION DETECTION TESTS
|
||||
# ========================================
|
||||
|
||||
def test_is_organization_true(gitea_client):
|
||||
"""Test _is_organization returns True for valid organization"""
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 200
|
||||
|
||||
with patch.object(gitea_client.session, 'get', return_value=mock_response):
|
||||
result = gitea_client._is_organization('personal-projects')
|
||||
|
||||
assert result is True
|
||||
gitea_client.session.get.assert_called_once_with(
|
||||
'https://test.com/api/v1/orgs/personal-projects'
|
||||
)
|
||||
|
||||
|
||||
def test_is_organization_false(gitea_client):
|
||||
"""Test _is_organization returns False for user account"""
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 404
|
||||
|
||||
with patch.object(gitea_client.session, 'get', return_value=mock_response):
|
||||
result = gitea_client._is_organization('lmiranda')
|
||||
|
||||
assert result is False
|
||||
|
||||
|
||||
def test_is_org_repo_uses_orgs_endpoint(gitea_client):
|
||||
"""Test is_org_repo uses /orgs endpoint instead of owner.type"""
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 200
|
||||
|
||||
with patch.object(gitea_client.session, 'get', return_value=mock_response):
|
||||
result = gitea_client.is_org_repo('personal-projects/repo')
|
||||
|
||||
assert result is True
|
||||
# Should call /orgs/personal-projects, not /repos/.../
|
||||
gitea_client.session.get.assert_called_once_with(
|
||||
'https://test.com/api/v1/orgs/personal-projects'
|
||||
)
|
||||
478
mcp-servers/gitea/tests/test_labels.py
Normal file
478
mcp-servers/gitea/tests/test_labels.py
Normal file
@@ -0,0 +1,478 @@
|
||||
"""
|
||||
Unit tests for label tools with suggestion logic.
|
||||
"""
|
||||
import pytest
|
||||
from unittest.mock import Mock, patch
|
||||
from mcp_server.tools.labels import LabelTools
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_gitea_client():
|
||||
"""Fixture providing mocked Gitea client"""
|
||||
client = Mock()
|
||||
client.repo = 'test_org/test_repo'
|
||||
client.is_org_repo = Mock(return_value=True)
|
||||
return client
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def label_tools(mock_gitea_client):
|
||||
"""Fixture providing LabelTools instance"""
|
||||
return LabelTools(mock_gitea_client)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_labels(label_tools):
|
||||
"""Test getting all labels (org + repo)"""
|
||||
label_tools.gitea.get_org_labels = Mock(return_value=[
|
||||
{'name': 'Type/Bug'},
|
||||
{'name': 'Type/Feature'}
|
||||
])
|
||||
label_tools.gitea.get_labels = Mock(return_value=[
|
||||
{'name': 'Component/Backend'},
|
||||
{'name': 'Component/Frontend'}
|
||||
])
|
||||
|
||||
result = await label_tools.get_labels()
|
||||
|
||||
assert len(result['organization']) == 2
|
||||
assert len(result['repository']) == 2
|
||||
assert result['total_count'] == 4
|
||||
|
||||
|
||||
# ========================================
|
||||
# LABEL LOOKUP TESTS (NEW)
|
||||
# ========================================
|
||||
|
||||
def test_build_label_lookup_slash_format():
|
||||
"""Test building label lookup with slash format labels"""
|
||||
mock_client = Mock()
|
||||
mock_client.repo = 'test/repo'
|
||||
tools = LabelTools(mock_client)
|
||||
|
||||
labels = ['Type/Bug', 'Type/Feature', 'Priority/High', 'Priority/Low']
|
||||
lookup = tools._build_label_lookup(labels)
|
||||
|
||||
assert 'type' in lookup
|
||||
assert 'bug' in lookup['type']
|
||||
assert lookup['type']['bug'] == 'Type/Bug'
|
||||
assert lookup['type']['feature'] == 'Type/Feature'
|
||||
assert 'priority' in lookup
|
||||
assert lookup['priority']['high'] == 'Priority/High'
|
||||
|
||||
|
||||
def test_build_label_lookup_colon_space_format():
|
||||
"""Test building label lookup with colon-space format labels"""
|
||||
mock_client = Mock()
|
||||
mock_client.repo = 'test/repo'
|
||||
tools = LabelTools(mock_client)
|
||||
|
||||
labels = ['Type: Bug', 'Type: Feature', 'Priority: High', 'Effort: M']
|
||||
lookup = tools._build_label_lookup(labels)
|
||||
|
||||
assert 'type' in lookup
|
||||
assert 'bug' in lookup['type']
|
||||
assert lookup['type']['bug'] == 'Type: Bug'
|
||||
assert lookup['type']['feature'] == 'Type: Feature'
|
||||
assert 'priority' in lookup
|
||||
assert lookup['priority']['high'] == 'Priority: High'
|
||||
# Test singular "Effort" (not "Efforts")
|
||||
assert 'effort' in lookup
|
||||
assert lookup['effort']['m'] == 'Effort: M'
|
||||
|
||||
|
||||
def test_build_label_lookup_efforts_normalization():
|
||||
"""Test that 'Efforts' is normalized to 'effort' for matching"""
|
||||
mock_client = Mock()
|
||||
mock_client.repo = 'test/repo'
|
||||
tools = LabelTools(mock_client)
|
||||
|
||||
labels = ['Efforts/XS', 'Efforts/S', 'Efforts/M']
|
||||
lookup = tools._build_label_lookup(labels)
|
||||
|
||||
# 'Efforts' should be normalized to 'effort'
|
||||
assert 'effort' in lookup
|
||||
assert lookup['effort']['xs'] == 'Efforts/XS'
|
||||
|
||||
|
||||
def test_find_label():
|
||||
"""Test finding labels from lookup"""
|
||||
mock_client = Mock()
|
||||
mock_client.repo = 'test/repo'
|
||||
tools = LabelTools(mock_client)
|
||||
|
||||
lookup = {
|
||||
'type': {'bug': 'Type: Bug', 'feature': 'Type: Feature'},
|
||||
'priority': {'high': 'Priority: High', 'low': 'Priority: Low'}
|
||||
}
|
||||
|
||||
assert tools._find_label(lookup, 'type', 'bug') == 'Type: Bug'
|
||||
assert tools._find_label(lookup, 'priority', 'high') == 'Priority: High'
|
||||
assert tools._find_label(lookup, 'type', 'nonexistent') is None
|
||||
assert tools._find_label(lookup, 'nonexistent', 'bug') is None
|
||||
|
||||
|
||||
# ========================================
|
||||
# SUGGEST LABELS WITH DYNAMIC FORMAT TESTS
|
||||
# ========================================
|
||||
|
||||
def _create_tools_with_labels(labels):
|
||||
"""Helper to create LabelTools with mocked labels"""
|
||||
import asyncio
|
||||
mock_client = Mock()
|
||||
mock_client.repo = 'test/repo'
|
||||
mock_client.is_org_repo = Mock(return_value=False)
|
||||
mock_client.get_labels = Mock(return_value=[{'name': l} for l in labels])
|
||||
return LabelTools(mock_client)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_with_slash_format():
|
||||
"""Test label suggestion with slash format labels"""
|
||||
labels = [
|
||||
'Type/Bug', 'Type/Feature', 'Type/Refactor',
|
||||
'Priority/Critical', 'Priority/High', 'Priority/Medium', 'Priority/Low',
|
||||
'Complexity/Simple', 'Complexity/Medium', 'Complexity/Complex',
|
||||
'Component/Auth'
|
||||
]
|
||||
tools = _create_tools_with_labels(labels)
|
||||
|
||||
context = "Fix critical bug in login authentication"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
|
||||
assert 'Type/Bug' in suggestions
|
||||
assert 'Priority/Critical' in suggestions
|
||||
assert 'Component/Auth' in suggestions
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_with_colon_space_format():
|
||||
"""Test label suggestion with colon-space format labels"""
|
||||
labels = [
|
||||
'Type: Bug', 'Type: Feature', 'Type: Refactor',
|
||||
'Priority: Critical', 'Priority: High', 'Priority: Medium', 'Priority: Low',
|
||||
'Complexity: Simple', 'Complexity: Medium', 'Complexity: Complex',
|
||||
'Effort: XS', 'Effort: S', 'Effort: M', 'Effort: L', 'Effort: XL'
|
||||
]
|
||||
tools = _create_tools_with_labels(labels)
|
||||
|
||||
context = "Fix critical bug for tiny 1 hour fix"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
|
||||
# Should return colon-space format labels
|
||||
assert 'Type: Bug' in suggestions
|
||||
assert 'Priority: Critical' in suggestions
|
||||
assert 'Effort: XS' in suggestions
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_bug():
|
||||
"""Test label suggestion for bug context"""
|
||||
labels = [
|
||||
'Type/Bug', 'Type/Feature',
|
||||
'Priority/Critical', 'Priority/High', 'Priority/Medium', 'Priority/Low',
|
||||
'Complexity/Simple', 'Complexity/Medium', 'Complexity/Complex',
|
||||
'Component/Auth'
|
||||
]
|
||||
tools = _create_tools_with_labels(labels)
|
||||
|
||||
context = "Fix critical bug in login authentication"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
|
||||
assert 'Type/Bug' in suggestions
|
||||
assert 'Priority/Critical' in suggestions
|
||||
assert 'Component/Auth' in suggestions
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_feature():
|
||||
"""Test label suggestion for feature context"""
|
||||
labels = ['Type/Feature', 'Priority/Medium', 'Complexity/Medium']
|
||||
tools = _create_tools_with_labels(labels)
|
||||
|
||||
context = "Add new feature to implement user dashboard"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
|
||||
assert 'Type/Feature' in suggestions
|
||||
assert any('Priority' in label for label in suggestions)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_refactor():
|
||||
"""Test label suggestion for refactor context"""
|
||||
labels = ['Type/Refactor', 'Priority/Medium', 'Complexity/Medium', 'Component/Backend']
|
||||
tools = _create_tools_with_labels(labels)
|
||||
|
||||
context = "Refactor architecture to extract service layer"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
|
||||
assert 'Type/Refactor' in suggestions
|
||||
assert 'Component/Backend' in suggestions
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_documentation():
|
||||
"""Test label suggestion for documentation context"""
|
||||
labels = ['Type/Documentation', 'Priority/Medium', 'Complexity/Medium', 'Component/API', 'Component/Docs']
|
||||
tools = _create_tools_with_labels(labels)
|
||||
|
||||
context = "Update documentation for API endpoints"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
|
||||
assert 'Type/Documentation' in suggestions
|
||||
assert 'Component/API' in suggestions or 'Component/Docs' in suggestions
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_priority():
|
||||
"""Test priority detection in suggestions"""
|
||||
labels = ['Type/Feature', 'Priority/Critical', 'Priority/High', 'Priority/Medium', 'Priority/Low', 'Complexity/Medium']
|
||||
tools = _create_tools_with_labels(labels)
|
||||
|
||||
# Critical priority
|
||||
context = "Urgent blocker in production"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Priority/Critical' in suggestions
|
||||
|
||||
# High priority
|
||||
context = "Important feature needed asap"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Priority/High' in suggestions
|
||||
|
||||
# Low priority
|
||||
context = "Nice-to-have optional improvement"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Priority/Low' in suggestions
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_complexity():
|
||||
"""Test complexity detection in suggestions"""
|
||||
labels = ['Type/Feature', 'Priority/Medium', 'Complexity/Simple', 'Complexity/Medium', 'Complexity/Complex']
|
||||
tools = _create_tools_with_labels(labels)
|
||||
|
||||
# Simple complexity
|
||||
context = "Simple quick fix for typo"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Complexity/Simple' in suggestions
|
||||
|
||||
# Complex complexity
|
||||
context = "Complex challenging architecture redesign"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Complexity/Complex' in suggestions
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_efforts():
|
||||
"""Test efforts detection in suggestions"""
|
||||
labels = ['Type/Feature', 'Priority/Medium', 'Complexity/Medium', 'Efforts/XS', 'Efforts/S', 'Efforts/M', 'Efforts/L', 'Efforts/XL']
|
||||
tools = _create_tools_with_labels(labels)
|
||||
|
||||
# XS effort
|
||||
context = "Tiny fix that takes 1 hour"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Efforts/XS' in suggestions
|
||||
|
||||
# L effort
|
||||
context = "Large feature taking 1 week"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Efforts/L' in suggestions
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_components():
|
||||
"""Test component detection in suggestions"""
|
||||
labels = ['Type/Feature', 'Priority/Medium', 'Complexity/Medium', 'Component/Backend', 'Component/Frontend', 'Component/API', 'Component/Database']
|
||||
tools = _create_tools_with_labels(labels)
|
||||
|
||||
# Backend component
|
||||
context = "Update backend API service"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Component/Backend' in suggestions
|
||||
assert 'Component/API' in suggestions
|
||||
|
||||
# Frontend component
|
||||
context = "Fix frontend UI component"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Component/Frontend' in suggestions
|
||||
|
||||
# Database component
|
||||
context = "Add database migration for schema"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Component/Database' in suggestions
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_tech_stack():
|
||||
"""Test tech stack detection in suggestions"""
|
||||
labels = ['Type/Feature', 'Priority/Medium', 'Complexity/Medium', 'Tech/Python', 'Tech/FastAPI', 'Tech/Docker', 'Tech/PostgreSQL']
|
||||
tools = _create_tools_with_labels(labels)
|
||||
|
||||
# Python
|
||||
context = "Update Python FastAPI endpoint"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Tech/Python' in suggestions
|
||||
assert 'Tech/FastAPI' in suggestions
|
||||
|
||||
# Docker
|
||||
context = "Fix Dockerfile configuration"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Tech/Docker' in suggestions
|
||||
|
||||
# PostgreSQL
|
||||
context = "Optimize PostgreSQL query"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Tech/PostgreSQL' in suggestions
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_source():
|
||||
"""Test source detection in suggestions"""
|
||||
labels = ['Type/Feature', 'Priority/Medium', 'Complexity/Medium', 'Source/Development', 'Source/Staging', 'Source/Production']
|
||||
tools = _create_tools_with_labels(labels)
|
||||
|
||||
# Development
|
||||
context = "Issue found in development environment"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Source/Development' in suggestions
|
||||
|
||||
# Production
|
||||
context = "Critical production issue"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Source/Production' in suggestions
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_risk():
|
||||
"""Test risk detection in suggestions"""
|
||||
labels = ['Type/Feature', 'Priority/Medium', 'Complexity/Medium', 'Risk/High', 'Risk/Low']
|
||||
tools = _create_tools_with_labels(labels)
|
||||
|
||||
# High risk
|
||||
context = "Breaking change to major API"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Risk/High' in suggestions
|
||||
|
||||
# Low risk
|
||||
context = "Safe minor update with low risk"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Risk/Low' in suggestions
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_multiple_categories():
|
||||
"""Test that suggestions span multiple categories"""
|
||||
labels = [
|
||||
'Type/Bug', 'Type/Feature',
|
||||
'Priority/Critical', 'Priority/Medium',
|
||||
'Complexity/Complex', 'Complexity/Medium',
|
||||
'Component/Backend', 'Component/API', 'Component/Auth',
|
||||
'Tech/FastAPI', 'Tech/PostgreSQL',
|
||||
'Source/Production'
|
||||
]
|
||||
tools = _create_tools_with_labels(labels)
|
||||
|
||||
context = """
|
||||
Urgent critical bug in production backend API service.
|
||||
Need to fix broken authentication endpoint.
|
||||
This is a complex issue requiring FastAPI and PostgreSQL expertise.
|
||||
"""
|
||||
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
|
||||
# Should have Type
|
||||
assert any('Type/' in label for label in suggestions)
|
||||
|
||||
# Should have Priority
|
||||
assert any('Priority/' in label for label in suggestions)
|
||||
|
||||
# Should have Component
|
||||
assert any('Component/' in label for label in suggestions)
|
||||
|
||||
# Should have Tech
|
||||
assert any('Tech/' in label for label in suggestions)
|
||||
|
||||
# Should have Source
|
||||
assert any('Source/' in label for label in suggestions)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_empty_repo():
|
||||
"""Test suggestions when no repo specified and no labels available"""
|
||||
mock_client = Mock()
|
||||
mock_client.repo = None
|
||||
tools = LabelTools(mock_client)
|
||||
|
||||
context = "Fix a bug"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
|
||||
# Should return empty list when no repo
|
||||
assert suggestions == []
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_no_matching_labels():
|
||||
"""Test suggestions return empty when no matching labels exist"""
|
||||
labels = ['Custom/Label', 'Other/Thing'] # No standard labels
|
||||
tools = _create_tools_with_labels(labels)
|
||||
|
||||
context = "Fix a bug"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
|
||||
# Should return empty list since no Type/Bug or similar exists
|
||||
assert len(suggestions) == 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_labels_org_owned_repo():
|
||||
"""Test getting labels for organization-owned repository"""
|
||||
mock_client = Mock()
|
||||
mock_client.repo = 'myorg/myrepo'
|
||||
mock_client.is_org_repo = Mock(return_value=True)
|
||||
mock_client.get_org_labels = Mock(return_value=[
|
||||
{'name': 'Type/Bug', 'id': 1},
|
||||
{'name': 'Type/Feature', 'id': 2}
|
||||
])
|
||||
mock_client.get_labels = Mock(return_value=[
|
||||
{'name': 'Component/Backend', 'id': 3}
|
||||
])
|
||||
|
||||
tools = LabelTools(mock_client)
|
||||
result = await tools.get_labels()
|
||||
|
||||
# Should fetch both org and repo labels
|
||||
mock_client.is_org_repo.assert_called_once_with('myorg/myrepo')
|
||||
mock_client.get_org_labels.assert_called_once_with('myorg')
|
||||
mock_client.get_labels.assert_called_once_with('myorg/myrepo')
|
||||
|
||||
assert len(result['organization']) == 2
|
||||
assert len(result['repository']) == 1
|
||||
assert result['total_count'] == 3
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_labels_user_owned_repo():
|
||||
"""Test getting labels for user-owned repository (no org labels)"""
|
||||
mock_client = Mock()
|
||||
mock_client.repo = 'lmiranda/personal-portfolio'
|
||||
mock_client.is_org_repo = Mock(return_value=False)
|
||||
mock_client.get_labels = Mock(return_value=[
|
||||
{'name': 'bug', 'id': 1},
|
||||
{'name': 'enhancement', 'id': 2}
|
||||
])
|
||||
|
||||
tools = LabelTools(mock_client)
|
||||
result = await tools.get_labels()
|
||||
|
||||
# Should check if org repo
|
||||
mock_client.is_org_repo.assert_called_once_with('lmiranda/personal-portfolio')
|
||||
|
||||
# Should NOT call get_org_labels for user-owned repos
|
||||
mock_client.get_org_labels.assert_not_called()
|
||||
|
||||
# Should still get repo labels
|
||||
mock_client.get_labels.assert_called_once_with('lmiranda/personal-portfolio')
|
||||
|
||||
assert len(result['organization']) == 0
|
||||
assert len(result['repository']) == 2
|
||||
assert result['total_count'] == 2
|
||||
@@ -294,4 +294,4 @@ logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
## License
|
||||
|
||||
MIT License - Part of the Claude Code Marketplace (`support-claude-mktplace`).
|
||||
MIT License - Part of the Leo Claude Marketplace.
|
||||
@@ -4,6 +4,7 @@ NetBox API client for interacting with NetBox REST API.
|
||||
Provides a generic HTTP client with methods for all standard REST operations.
|
||||
Individual tool modules use this client for their specific endpoints.
|
||||
"""
|
||||
import json
|
||||
import requests
|
||||
import logging
|
||||
from typing import List, Dict, Optional, Any, Union
|
||||
@@ -83,7 +84,20 @@ class NetBoxClient:
|
||||
if response.status_code == 204 or not response.content:
|
||||
return None
|
||||
|
||||
return response.json()
|
||||
# Parse JSON with diagnostic error handling
|
||||
try:
|
||||
return response.json()
|
||||
except json.JSONDecodeError as e:
|
||||
logger.error(
|
||||
f"JSON decode failed. Status: {response.status_code}, "
|
||||
f"Content-Length: {len(response.content)}, "
|
||||
f"Content preview: {response.content[:200]!r}"
|
||||
)
|
||||
raise ValueError(
|
||||
f"Invalid JSON response from NetBox: {e}. "
|
||||
f"Status code: {response.status_code}, "
|
||||
f"Content length: {len(response.content)} bytes"
|
||||
) from e
|
||||
|
||||
def list(
|
||||
self,
|
||||
@@ -103,7 +103,19 @@ TOOL_DEFINITIONS = {
|
||||
'properties': {
|
||||
'id': {'type': 'integer', 'description': 'Site ID'},
|
||||
'name': {'type': 'string', 'description': 'New name'},
|
||||
'status': {'type': 'string', 'description': 'New status'}
|
||||
'slug': {'type': 'string', 'description': 'New slug'},
|
||||
'status': {'type': 'string', 'description': 'Status'},
|
||||
'region': {'type': 'integer', 'description': 'Region ID'},
|
||||
'group': {'type': 'integer', 'description': 'Site group ID'},
|
||||
'tenant': {'type': 'integer', 'description': 'Tenant ID'},
|
||||
'facility': {'type': 'string', 'description': 'Facility name'},
|
||||
'time_zone': {'type': 'string', 'description': 'Time zone'},
|
||||
'description': {'type': 'string', 'description': 'Description'},
|
||||
'physical_address': {'type': 'string', 'description': 'Physical address'},
|
||||
'shipping_address': {'type': 'string', 'description': 'Shipping address'},
|
||||
'latitude': {'type': 'number', 'description': 'Latitude'},
|
||||
'longitude': {'type': 'number', 'description': 'Longitude'},
|
||||
'comments': {'type': 'string', 'description': 'Comments'}
|
||||
},
|
||||
'required': ['id']
|
||||
},
|
||||
@@ -136,7 +148,14 @@ TOOL_DEFINITIONS = {
|
||||
},
|
||||
'dcim_update_location': {
|
||||
'description': 'Update an existing location',
|
||||
'properties': {'id': {'type': 'integer', 'description': 'Location ID'}},
|
||||
'properties': {
|
||||
'id': {'type': 'integer', 'description': 'Location ID'},
|
||||
'name': {'type': 'string', 'description': 'New name'},
|
||||
'slug': {'type': 'string', 'description': 'New slug'},
|
||||
'site': {'type': 'integer', 'description': 'Site ID'},
|
||||
'parent': {'type': 'integer', 'description': 'Parent location ID'},
|
||||
'description': {'type': 'string', 'description': 'Description'}
|
||||
},
|
||||
'required': ['id']
|
||||
},
|
||||
'dcim_delete_location': {
|
||||
@@ -171,7 +190,18 @@ TOOL_DEFINITIONS = {
|
||||
},
|
||||
'dcim_update_rack': {
|
||||
'description': 'Update an existing rack',
|
||||
'properties': {'id': {'type': 'integer', 'description': 'Rack ID'}},
|
||||
'properties': {
|
||||
'id': {'type': 'integer', 'description': 'Rack ID'},
|
||||
'name': {'type': 'string', 'description': 'New name'},
|
||||
'site': {'type': 'integer', 'description': 'Site ID'},
|
||||
'location': {'type': 'integer', 'description': 'Location ID'},
|
||||
'status': {'type': 'string', 'description': 'Status'},
|
||||
'role': {'type': 'integer', 'description': 'Role ID'},
|
||||
'tenant': {'type': 'integer', 'description': 'Tenant ID'},
|
||||
'u_height': {'type': 'integer', 'description': 'Rack height in U'},
|
||||
'description': {'type': 'string', 'description': 'Description'},
|
||||
'comments': {'type': 'string', 'description': 'Comments'}
|
||||
},
|
||||
'required': ['id']
|
||||
},
|
||||
'dcim_delete_rack': {
|
||||
@@ -198,7 +228,12 @@ TOOL_DEFINITIONS = {
|
||||
},
|
||||
'dcim_update_manufacturer': {
|
||||
'description': 'Update an existing manufacturer',
|
||||
'properties': {'id': {'type': 'integer', 'description': 'Manufacturer ID'}},
|
||||
'properties': {
|
||||
'id': {'type': 'integer', 'description': 'Manufacturer ID'},
|
||||
'name': {'type': 'string', 'description': 'New name'},
|
||||
'slug': {'type': 'string', 'description': 'New slug'},
|
||||
'description': {'type': 'string', 'description': 'Description'}
|
||||
},
|
||||
'required': ['id']
|
||||
},
|
||||
'dcim_delete_manufacturer': {
|
||||
@@ -230,7 +265,16 @@ TOOL_DEFINITIONS = {
|
||||
},
|
||||
'dcim_update_device_type': {
|
||||
'description': 'Update an existing device type',
|
||||
'properties': {'id': {'type': 'integer', 'description': 'Device type ID'}},
|
||||
'properties': {
|
||||
'id': {'type': 'integer', 'description': 'Device type ID'},
|
||||
'manufacturer': {'type': 'integer', 'description': 'Manufacturer ID'},
|
||||
'model': {'type': 'string', 'description': 'Model name'},
|
||||
'slug': {'type': 'string', 'description': 'New slug'},
|
||||
'u_height': {'type': 'number', 'description': 'Height in rack units'},
|
||||
'is_full_depth': {'type': 'boolean', 'description': 'Is full depth'},
|
||||
'description': {'type': 'string', 'description': 'Description'},
|
||||
'comments': {'type': 'string', 'description': 'Comments'}
|
||||
},
|
||||
'required': ['id']
|
||||
},
|
||||
'dcim_delete_device_type': {
|
||||
@@ -259,7 +303,14 @@ TOOL_DEFINITIONS = {
|
||||
},
|
||||
'dcim_update_device_role': {
|
||||
'description': 'Update an existing device role',
|
||||
'properties': {'id': {'type': 'integer', 'description': 'Device role ID'}},
|
||||
'properties': {
|
||||
'id': {'type': 'integer', 'description': 'Device role ID'},
|
||||
'name': {'type': 'string', 'description': 'New name'},
|
||||
'slug': {'type': 'string', 'description': 'New slug'},
|
||||
'color': {'type': 'string', 'description': 'Hex color code'},
|
||||
'vm_role': {'type': 'boolean', 'description': 'Can be assigned to VMs'},
|
||||
'description': {'type': 'string', 'description': 'Description'}
|
||||
},
|
||||
'required': ['id']
|
||||
},
|
||||
'dcim_delete_device_role': {
|
||||
@@ -290,7 +341,13 @@ TOOL_DEFINITIONS = {
|
||||
},
|
||||
'dcim_update_platform': {
|
||||
'description': 'Update an existing platform',
|
||||
'properties': {'id': {'type': 'integer', 'description': 'Platform ID'}},
|
||||
'properties': {
|
||||
'id': {'type': 'integer', 'description': 'Platform ID'},
|
||||
'name': {'type': 'string', 'description': 'New name'},
|
||||
'slug': {'type': 'string', 'description': 'New slug'},
|
||||
'manufacturer': {'type': 'integer', 'description': 'Manufacturer ID'},
|
||||
'description': {'type': 'string', 'description': 'Description'}
|
||||
},
|
||||
'required': ['id']
|
||||
},
|
||||
'dcim_delete_platform': {
|
||||
@@ -326,7 +383,13 @@ TOOL_DEFINITIONS = {
|
||||
'status': {'type': 'string', 'description': 'Device status'},
|
||||
'rack': {'type': 'integer', 'description': 'Rack ID'},
|
||||
'position': {'type': 'number', 'description': 'Position in rack'},
|
||||
'serial': {'type': 'string', 'description': 'Serial number'}
|
||||
'serial': {'type': 'string', 'description': 'Serial number'},
|
||||
'platform': {'type': 'integer', 'description': 'Platform ID'},
|
||||
'primary_ip4': {'type': 'integer', 'description': 'Primary IPv4 address ID'},
|
||||
'primary_ip6': {'type': 'integer', 'description': 'Primary IPv6 address ID'},
|
||||
'asset_tag': {'type': 'string', 'description': 'Asset tag'},
|
||||
'description': {'type': 'string', 'description': 'Description'},
|
||||
'comments': {'type': 'string', 'description': 'Comments'}
|
||||
},
|
||||
'required': ['name', 'device_type', 'role', 'site']
|
||||
},
|
||||
@@ -335,7 +398,17 @@ TOOL_DEFINITIONS = {
|
||||
'properties': {
|
||||
'id': {'type': 'integer', 'description': 'Device ID'},
|
||||
'name': {'type': 'string', 'description': 'New name'},
|
||||
'status': {'type': 'string', 'description': 'New status'}
|
||||
'status': {'type': 'string', 'description': 'New status'},
|
||||
'platform': {'type': 'integer', 'description': 'Platform ID'},
|
||||
'primary_ip4': {'type': 'integer', 'description': 'Primary IPv4 address ID'},
|
||||
'primary_ip6': {'type': 'integer', 'description': 'Primary IPv6 address ID'},
|
||||
'serial': {'type': 'string', 'description': 'Serial number'},
|
||||
'asset_tag': {'type': 'string', 'description': 'Asset tag'},
|
||||
'site': {'type': 'integer', 'description': 'Site ID'},
|
||||
'rack': {'type': 'integer', 'description': 'Rack ID'},
|
||||
'position': {'type': 'number', 'description': 'Position in rack'},
|
||||
'description': {'type': 'string', 'description': 'Description'},
|
||||
'comments': {'type': 'string', 'description': 'Comments'}
|
||||
},
|
||||
'required': ['id']
|
||||
},
|
||||
@@ -370,7 +443,18 @@ TOOL_DEFINITIONS = {
|
||||
},
|
||||
'dcim_update_interface': {
|
||||
'description': 'Update an existing interface',
|
||||
'properties': {'id': {'type': 'integer', 'description': 'Interface ID'}},
|
||||
'properties': {
|
||||
'id': {'type': 'integer', 'description': 'Interface ID'},
|
||||
'name': {'type': 'string', 'description': 'New name'},
|
||||
'type': {'type': 'string', 'description': 'Interface type'},
|
||||
'enabled': {'type': 'boolean', 'description': 'Interface enabled'},
|
||||
'mtu': {'type': 'integer', 'description': 'MTU'},
|
||||
'mac_address': {'type': 'string', 'description': 'MAC address'},
|
||||
'description': {'type': 'string', 'description': 'Description'},
|
||||
'mode': {'type': 'string', 'description': 'VLAN mode'},
|
||||
'untagged_vlan': {'type': 'integer', 'description': 'Untagged VLAN ID'},
|
||||
'tagged_vlans': {'type': 'array', 'description': 'Tagged VLAN IDs'}
|
||||
},
|
||||
'required': ['id']
|
||||
},
|
||||
'dcim_delete_interface': {
|
||||
@@ -404,7 +488,15 @@ TOOL_DEFINITIONS = {
|
||||
},
|
||||
'dcim_update_cable': {
|
||||
'description': 'Update an existing cable',
|
||||
'properties': {'id': {'type': 'integer', 'description': 'Cable ID'}},
|
||||
'properties': {
|
||||
'id': {'type': 'integer', 'description': 'Cable ID'},
|
||||
'type': {'type': 'string', 'description': 'Cable type'},
|
||||
'status': {'type': 'string', 'description': 'Cable status'},
|
||||
'label': {'type': 'string', 'description': 'Cable label'},
|
||||
'color': {'type': 'string', 'description': 'Cable color'},
|
||||
'length': {'type': 'number', 'description': 'Cable length'},
|
||||
'length_unit': {'type': 'string', 'description': 'Length unit'}
|
||||
},
|
||||
'required': ['id']
|
||||
},
|
||||
'dcim_delete_cable': {
|
||||
@@ -492,7 +584,15 @@ TOOL_DEFINITIONS = {
|
||||
},
|
||||
'ipam_update_vrf': {
|
||||
'description': 'Update an existing VRF',
|
||||
'properties': {'id': {'type': 'integer', 'description': 'VRF ID'}},
|
||||
'properties': {
|
||||
'id': {'type': 'integer', 'description': 'VRF ID'},
|
||||
'name': {'type': 'string', 'description': 'New name'},
|
||||
'rd': {'type': 'string', 'description': 'Route distinguisher'},
|
||||
'tenant': {'type': 'integer', 'description': 'Tenant ID'},
|
||||
'enforce_unique': {'type': 'boolean', 'description': 'Enforce unique IPs'},
|
||||
'description': {'type': 'string', 'description': 'Description'},
|
||||
'comments': {'type': 'string', 'description': 'Comments'}
|
||||
},
|
||||
'required': ['id']
|
||||
},
|
||||
'ipam_delete_vrf': {
|
||||
@@ -531,7 +631,19 @@ TOOL_DEFINITIONS = {
|
||||
},
|
||||
'ipam_update_prefix': {
|
||||
'description': 'Update an existing prefix',
|
||||
'properties': {'id': {'type': 'integer', 'description': 'Prefix ID'}},
|
||||
'properties': {
|
||||
'id': {'type': 'integer', 'description': 'Prefix ID'},
|
||||
'prefix': {'type': 'string', 'description': 'Prefix in CIDR notation'},
|
||||
'status': {'type': 'string', 'description': 'Status'},
|
||||
'site': {'type': 'integer', 'description': 'Site ID'},
|
||||
'vrf': {'type': 'integer', 'description': 'VRF ID'},
|
||||
'vlan': {'type': 'integer', 'description': 'VLAN ID'},
|
||||
'role': {'type': 'integer', 'description': 'Role ID'},
|
||||
'tenant': {'type': 'integer', 'description': 'Tenant ID'},
|
||||
'is_pool': {'type': 'boolean', 'description': 'Is a pool'},
|
||||
'description': {'type': 'string', 'description': 'Description'},
|
||||
'comments': {'type': 'string', 'description': 'Comments'}
|
||||
},
|
||||
'required': ['id']
|
||||
},
|
||||
'ipam_delete_prefix': {
|
||||
@@ -582,7 +694,18 @@ TOOL_DEFINITIONS = {
|
||||
},
|
||||
'ipam_update_ip_address': {
|
||||
'description': 'Update an existing IP address',
|
||||
'properties': {'id': {'type': 'integer', 'description': 'IP address ID'}},
|
||||
'properties': {
|
||||
'id': {'type': 'integer', 'description': 'IP address ID'},
|
||||
'address': {'type': 'string', 'description': 'IP address with prefix length'},
|
||||
'status': {'type': 'string', 'description': 'Status'},
|
||||
'vrf': {'type': 'integer', 'description': 'VRF ID'},
|
||||
'tenant': {'type': 'integer', 'description': 'Tenant ID'},
|
||||
'dns_name': {'type': 'string', 'description': 'DNS name'},
|
||||
'description': {'type': 'string', 'description': 'Description'},
|
||||
'comments': {'type': 'string', 'description': 'Comments'},
|
||||
'assigned_object_type': {'type': 'string', 'description': 'Object type to assign to'},
|
||||
'assigned_object_id': {'type': 'integer', 'description': 'Object ID to assign to'}
|
||||
},
|
||||
'required': ['id']
|
||||
},
|
||||
'ipam_delete_ip_address': {
|
||||
@@ -647,7 +770,18 @@ TOOL_DEFINITIONS = {
|
||||
},
|
||||
'ipam_update_vlan': {
|
||||
'description': 'Update an existing VLAN',
|
||||
'properties': {'id': {'type': 'integer', 'description': 'VLAN ID'}},
|
||||
'properties': {
|
||||
'id': {'type': 'integer', 'description': 'VLAN ID'},
|
||||
'vid': {'type': 'integer', 'description': 'VLAN ID number'},
|
||||
'name': {'type': 'string', 'description': 'VLAN name'},
|
||||
'status': {'type': 'string', 'description': 'Status'},
|
||||
'site': {'type': 'integer', 'description': 'Site ID'},
|
||||
'group': {'type': 'integer', 'description': 'VLAN group ID'},
|
||||
'tenant': {'type': 'integer', 'description': 'Tenant ID'},
|
||||
'role': {'type': 'integer', 'description': 'Role ID'},
|
||||
'description': {'type': 'string', 'description': 'Description'},
|
||||
'comments': {'type': 'string', 'description': 'Comments'}
|
||||
},
|
||||
'required': ['id']
|
||||
},
|
||||
'ipam_delete_vlan': {
|
||||
@@ -757,16 +891,17 @@ TOOL_DEFINITIONS = {
|
||||
'properties': {'id': {'type': 'integer', 'description': 'Provider ID'}},
|
||||
'required': ['id']
|
||||
},
|
||||
'circuits_list_circuit_types': {
|
||||
# NOTE: circuit_types tools shortened to meet 28-char limit
|
||||
'circ_list_types': {
|
||||
'description': 'List all circuit types in NetBox',
|
||||
'properties': {'name': {'type': 'string', 'description': 'Filter by name'}}
|
||||
},
|
||||
'circuits_get_circuit_type': {
|
||||
'circ_get_type': {
|
||||
'description': 'Get a specific circuit type by ID',
|
||||
'properties': {'id': {'type': 'integer', 'description': 'Circuit type ID'}},
|
||||
'required': ['id']
|
||||
},
|
||||
'circuits_create_circuit_type': {
|
||||
'circ_create_type': {
|
||||
'description': 'Create a new circuit type',
|
||||
'properties': {
|
||||
'name': {'type': 'string', 'description': 'Type name'},
|
||||
@@ -809,19 +944,20 @@ TOOL_DEFINITIONS = {
|
||||
'properties': {'id': {'type': 'integer', 'description': 'Circuit ID'}},
|
||||
'required': ['id']
|
||||
},
|
||||
'circuits_list_circuit_terminations': {
|
||||
# NOTE: circuit_terminations tools shortened to meet 28-char limit
|
||||
'circ_list_terminations': {
|
||||
'description': 'List all circuit terminations in NetBox',
|
||||
'properties': {
|
||||
'circuit_id': {'type': 'integer', 'description': 'Filter by circuit ID'},
|
||||
'site_id': {'type': 'integer', 'description': 'Filter by site ID'}
|
||||
}
|
||||
},
|
||||
'circuits_get_circuit_termination': {
|
||||
'circ_get_termination': {
|
||||
'description': 'Get a specific circuit termination by ID',
|
||||
'properties': {'id': {'type': 'integer', 'description': 'Termination ID'}},
|
||||
'required': ['id']
|
||||
},
|
||||
'circuits_create_circuit_termination': {
|
||||
'circ_create_termination': {
|
||||
'description': 'Create a new circuit termination',
|
||||
'properties': {
|
||||
'circuit': {'type': 'integer', 'description': 'Circuit ID'},
|
||||
@@ -832,16 +968,18 @@ TOOL_DEFINITIONS = {
|
||||
},
|
||||
|
||||
# ==================== Virtualization Tools ====================
|
||||
'virtualization_list_cluster_types': {
|
||||
# NOTE: Tool names shortened from 'virtualization_' to 'virt_' to meet
|
||||
# 28-char limit (Claude API 64-char limit minus 36-char prefix)
|
||||
'virt_list_cluster_types': {
|
||||
'description': 'List all cluster types in NetBox',
|
||||
'properties': {'name': {'type': 'string', 'description': 'Filter by name'}}
|
||||
},
|
||||
'virtualization_get_cluster_type': {
|
||||
'virt_get_cluster_type': {
|
||||
'description': 'Get a specific cluster type by ID',
|
||||
'properties': {'id': {'type': 'integer', 'description': 'Cluster type ID'}},
|
||||
'required': ['id']
|
||||
},
|
||||
'virtualization_create_cluster_type': {
|
||||
'virt_create_cluster_type': {
|
||||
'description': 'Create a new cluster type',
|
||||
'properties': {
|
||||
'name': {'type': 'string', 'description': 'Type name'},
|
||||
@@ -849,16 +987,16 @@ TOOL_DEFINITIONS = {
|
||||
},
|
||||
'required': ['name', 'slug']
|
||||
},
|
||||
'virtualization_list_cluster_groups': {
|
||||
'virt_list_cluster_groups': {
|
||||
'description': 'List all cluster groups in NetBox',
|
||||
'properties': {'name': {'type': 'string', 'description': 'Filter by name'}}
|
||||
},
|
||||
'virtualization_get_cluster_group': {
|
||||
'virt_get_cluster_group': {
|
||||
'description': 'Get a specific cluster group by ID',
|
||||
'properties': {'id': {'type': 'integer', 'description': 'Cluster group ID'}},
|
||||
'required': ['id']
|
||||
},
|
||||
'virtualization_create_cluster_group': {
|
||||
'virt_create_cluster_group': {
|
||||
'description': 'Create a new cluster group',
|
||||
'properties': {
|
||||
'name': {'type': 'string', 'description': 'Group name'},
|
||||
@@ -866,7 +1004,7 @@ TOOL_DEFINITIONS = {
|
||||
},
|
||||
'required': ['name', 'slug']
|
||||
},
|
||||
'virtualization_list_clusters': {
|
||||
'virt_list_clusters': {
|
||||
'description': 'List all clusters in NetBox',
|
||||
'properties': {
|
||||
'name': {'type': 'string', 'description': 'Filter by name'},
|
||||
@@ -875,12 +1013,12 @@ TOOL_DEFINITIONS = {
|
||||
'site_id': {'type': 'integer', 'description': 'Filter by site ID'}
|
||||
}
|
||||
},
|
||||
'virtualization_get_cluster': {
|
||||
'virt_get_cluster': {
|
||||
'description': 'Get a specific cluster by ID',
|
||||
'properties': {'id': {'type': 'integer', 'description': 'Cluster ID'}},
|
||||
'required': ['id']
|
||||
},
|
||||
'virtualization_create_cluster': {
|
||||
'virt_create_cluster': {
|
||||
'description': 'Create a new cluster',
|
||||
'properties': {
|
||||
'name': {'type': 'string', 'description': 'Cluster name'},
|
||||
@@ -891,17 +1029,27 @@ TOOL_DEFINITIONS = {
|
||||
},
|
||||
'required': ['name', 'type']
|
||||
},
|
||||
'virtualization_update_cluster': {
|
||||
'virt_update_cluster': {
|
||||
'description': 'Update an existing cluster',
|
||||
'properties': {'id': {'type': 'integer', 'description': 'Cluster ID'}},
|
||||
'properties': {
|
||||
'id': {'type': 'integer', 'description': 'Cluster ID'},
|
||||
'name': {'type': 'string', 'description': 'New name'},
|
||||
'type': {'type': 'integer', 'description': 'Cluster type ID'},
|
||||
'group': {'type': 'integer', 'description': 'Cluster group ID'},
|
||||
'site': {'type': 'integer', 'description': 'Site ID'},
|
||||
'status': {'type': 'string', 'description': 'Status'},
|
||||
'tenant': {'type': 'integer', 'description': 'Tenant ID'},
|
||||
'description': {'type': 'string', 'description': 'Description'},
|
||||
'comments': {'type': 'string', 'description': 'Comments'}
|
||||
},
|
||||
'required': ['id']
|
||||
},
|
||||
'virtualization_delete_cluster': {
|
||||
'virt_delete_cluster': {
|
||||
'description': 'Delete a cluster',
|
||||
'properties': {'id': {'type': 'integer', 'description': 'Cluster ID'}},
|
||||
'required': ['id']
|
||||
},
|
||||
'virtualization_list_virtual_machines': {
|
||||
'virt_list_vms': {
|
||||
'description': 'List all virtual machines in NetBox',
|
||||
'properties': {
|
||||
'name': {'type': 'string', 'description': 'Filter by name'},
|
||||
@@ -910,12 +1058,12 @@ TOOL_DEFINITIONS = {
|
||||
'status': {'type': 'string', 'description': 'Filter by status'}
|
||||
}
|
||||
},
|
||||
'virtualization_get_virtual_machine': {
|
||||
'virt_get_vm': {
|
||||
'description': 'Get a specific virtual machine by ID',
|
||||
'properties': {'id': {'type': 'integer', 'description': 'VM ID'}},
|
||||
'required': ['id']
|
||||
},
|
||||
'virtualization_create_virtual_machine': {
|
||||
'virt_create_vm': {
|
||||
'description': 'Create a new virtual machine',
|
||||
'properties': {
|
||||
'name': {'type': 'string', 'description': 'VM name'},
|
||||
@@ -928,29 +1076,45 @@ TOOL_DEFINITIONS = {
|
||||
},
|
||||
'required': ['name']
|
||||
},
|
||||
'virtualization_update_virtual_machine': {
|
||||
'virt_update_vm': {
|
||||
'description': 'Update an existing virtual machine',
|
||||
'properties': {'id': {'type': 'integer', 'description': 'VM ID'}},
|
||||
'properties': {
|
||||
'id': {'type': 'integer', 'description': 'VM ID'},
|
||||
'name': {'type': 'string', 'description': 'New name'},
|
||||
'status': {'type': 'string', 'description': 'Status'},
|
||||
'cluster': {'type': 'integer', 'description': 'Cluster ID'},
|
||||
'site': {'type': 'integer', 'description': 'Site ID'},
|
||||
'role': {'type': 'integer', 'description': 'Role ID'},
|
||||
'tenant': {'type': 'integer', 'description': 'Tenant ID'},
|
||||
'platform': {'type': 'integer', 'description': 'Platform ID'},
|
||||
'vcpus': {'type': 'number', 'description': 'Number of vCPUs'},
|
||||
'memory': {'type': 'integer', 'description': 'Memory in MB'},
|
||||
'disk': {'type': 'integer', 'description': 'Disk in GB'},
|
||||
'primary_ip4': {'type': 'integer', 'description': 'Primary IPv4 address ID'},
|
||||
'primary_ip6': {'type': 'integer', 'description': 'Primary IPv6 address ID'},
|
||||
'description': {'type': 'string', 'description': 'Description'},
|
||||
'comments': {'type': 'string', 'description': 'Comments'}
|
||||
},
|
||||
'required': ['id']
|
||||
},
|
||||
'virtualization_delete_virtual_machine': {
|
||||
'virt_delete_vm': {
|
||||
'description': 'Delete a virtual machine',
|
||||
'properties': {'id': {'type': 'integer', 'description': 'VM ID'}},
|
||||
'required': ['id']
|
||||
},
|
||||
'virtualization_list_vm_interfaces': {
|
||||
'virt_list_vm_ifaces': {
|
||||
'description': 'List all VM interfaces in NetBox',
|
||||
'properties': {
|
||||
'virtual_machine_id': {'type': 'integer', 'description': 'Filter by VM ID'},
|
||||
'name': {'type': 'string', 'description': 'Filter by name'}
|
||||
}
|
||||
},
|
||||
'virtualization_get_vm_interface': {
|
||||
'virt_get_vm_iface': {
|
||||
'description': 'Get a specific VM interface by ID',
|
||||
'properties': {'id': {'type': 'integer', 'description': 'Interface ID'}},
|
||||
'required': ['id']
|
||||
},
|
||||
'virtualization_create_vm_interface': {
|
||||
'virt_create_vm_iface': {
|
||||
'description': 'Create a new VM interface',
|
||||
'properties': {
|
||||
'virtual_machine': {'type': 'integer', 'description': 'VM ID'},
|
||||
@@ -1088,16 +1252,18 @@ TOOL_DEFINITIONS = {
|
||||
},
|
||||
|
||||
# ==================== Wireless Tools ====================
|
||||
'wireless_list_wireless_lan_groups': {
|
||||
# NOTE: Tool names shortened from 'wireless_' to 'wlan_' to meet
|
||||
# 28-char limit (Claude API 64-char limit minus 36-char prefix)
|
||||
'wlan_list_groups': {
|
||||
'description': 'List all wireless LAN groups in NetBox',
|
||||
'properties': {'name': {'type': 'string', 'description': 'Filter by name'}}
|
||||
},
|
||||
'wireless_get_wireless_lan_group': {
|
||||
'wlan_get_group': {
|
||||
'description': 'Get a specific wireless LAN group by ID',
|
||||
'properties': {'id': {'type': 'integer', 'description': 'WLAN group ID'}},
|
||||
'required': ['id']
|
||||
},
|
||||
'wireless_create_wireless_lan_group': {
|
||||
'wlan_create_group': {
|
||||
'description': 'Create a new wireless LAN group',
|
||||
'properties': {
|
||||
'name': {'type': 'string', 'description': 'Group name'},
|
||||
@@ -1105,7 +1271,7 @@ TOOL_DEFINITIONS = {
|
||||
},
|
||||
'required': ['name', 'slug']
|
||||
},
|
||||
'wireless_list_wireless_lans': {
|
||||
'wlan_list_lans': {
|
||||
'description': 'List all wireless LANs in NetBox',
|
||||
'properties': {
|
||||
'ssid': {'type': 'string', 'description': 'Filter by SSID'},
|
||||
@@ -1113,12 +1279,12 @@ TOOL_DEFINITIONS = {
|
||||
'status': {'type': 'string', 'description': 'Filter by status'}
|
||||
}
|
||||
},
|
||||
'wireless_get_wireless_lan': {
|
||||
'wlan_get_lan': {
|
||||
'description': 'Get a specific wireless LAN by ID',
|
||||
'properties': {'id': {'type': 'integer', 'description': 'WLAN ID'}},
|
||||
'required': ['id']
|
||||
},
|
||||
'wireless_create_wireless_lan': {
|
||||
'wlan_create_lan': {
|
||||
'description': 'Create a new wireless LAN',
|
||||
'properties': {
|
||||
'ssid': {'type': 'string', 'description': 'SSID'},
|
||||
@@ -1128,14 +1294,14 @@ TOOL_DEFINITIONS = {
|
||||
},
|
||||
'required': ['ssid']
|
||||
},
|
||||
'wireless_list_wireless_links': {
|
||||
'wlan_list_links': {
|
||||
'description': 'List all wireless links in NetBox',
|
||||
'properties': {
|
||||
'ssid': {'type': 'string', 'description': 'Filter by SSID'},
|
||||
'status': {'type': 'string', 'description': 'Filter by status'}
|
||||
}
|
||||
},
|
||||
'wireless_get_wireless_link': {
|
||||
'wlan_get_link': {
|
||||
'description': 'Get a specific wireless link by ID',
|
||||
'properties': {'id': {'type': 'integer', 'description': 'Link ID'}},
|
||||
'required': ['id']
|
||||
@@ -1241,6 +1407,52 @@ TOOL_DEFINITIONS = {
|
||||
}
|
||||
|
||||
|
||||
# Map shortened tool names to (category, method_name) for routing.
|
||||
# This is necessary because tool names were shortened to meet the 28-character
|
||||
# limit imposed by Claude API's 64-character tool name limit minus the
|
||||
# 36-character prefix used by Claude Code for MCP tools.
|
||||
TOOL_NAME_MAP = {
|
||||
# Virtualization tools (virt_ -> virtualization category)
|
||||
'virt_list_cluster_types': ('virtualization', 'list_cluster_types'),
|
||||
'virt_get_cluster_type': ('virtualization', 'get_cluster_type'),
|
||||
'virt_create_cluster_type': ('virtualization', 'create_cluster_type'),
|
||||
'virt_list_cluster_groups': ('virtualization', 'list_cluster_groups'),
|
||||
'virt_get_cluster_group': ('virtualization', 'get_cluster_group'),
|
||||
'virt_create_cluster_group': ('virtualization', 'create_cluster_group'),
|
||||
'virt_list_clusters': ('virtualization', 'list_clusters'),
|
||||
'virt_get_cluster': ('virtualization', 'get_cluster'),
|
||||
'virt_create_cluster': ('virtualization', 'create_cluster'),
|
||||
'virt_update_cluster': ('virtualization', 'update_cluster'),
|
||||
'virt_delete_cluster': ('virtualization', 'delete_cluster'),
|
||||
'virt_list_vms': ('virtualization', 'list_virtual_machines'),
|
||||
'virt_get_vm': ('virtualization', 'get_virtual_machine'),
|
||||
'virt_create_vm': ('virtualization', 'create_virtual_machine'),
|
||||
'virt_update_vm': ('virtualization', 'update_virtual_machine'),
|
||||
'virt_delete_vm': ('virtualization', 'delete_virtual_machine'),
|
||||
'virt_list_vm_ifaces': ('virtualization', 'list_vm_interfaces'),
|
||||
'virt_get_vm_iface': ('virtualization', 'get_vm_interface'),
|
||||
'virt_create_vm_iface': ('virtualization', 'create_vm_interface'),
|
||||
|
||||
# Circuits tools (circ_ -> circuits category, for shortened names only)
|
||||
'circ_list_types': ('circuits', 'list_circuit_types'),
|
||||
'circ_get_type': ('circuits', 'get_circuit_type'),
|
||||
'circ_create_type': ('circuits', 'create_circuit_type'),
|
||||
'circ_list_terminations': ('circuits', 'list_circuit_terminations'),
|
||||
'circ_get_termination': ('circuits', 'get_circuit_termination'),
|
||||
'circ_create_termination': ('circuits', 'create_circuit_termination'),
|
||||
|
||||
# Wireless tools (wlan_ -> wireless category)
|
||||
'wlan_list_groups': ('wireless', 'list_wireless_lan_groups'),
|
||||
'wlan_get_group': ('wireless', 'get_wireless_lan_group'),
|
||||
'wlan_create_group': ('wireless', 'create_wireless_lan_group'),
|
||||
'wlan_list_lans': ('wireless', 'list_wireless_lans'),
|
||||
'wlan_get_lan': ('wireless', 'get_wireless_lan'),
|
||||
'wlan_create_lan': ('wireless', 'create_wireless_lan'),
|
||||
'wlan_list_links': ('wireless', 'list_wireless_links'),
|
||||
'wlan_get_link': ('wireless', 'get_wireless_link'),
|
||||
}
|
||||
|
||||
|
||||
class NetBoxMCPServer:
|
||||
"""MCP Server for NetBox integration"""
|
||||
|
||||
@@ -1314,12 +1526,21 @@ class NetBoxMCPServer:
|
||||
)]
|
||||
|
||||
async def _route_tool(self, name: str, arguments: dict):
|
||||
"""Route tool call to appropriate handler."""
|
||||
parts = name.split('_', 1)
|
||||
if len(parts) != 2:
|
||||
raise ValueError(f"Invalid tool name format: {name}")
|
||||
"""Route tool call to appropriate handler.
|
||||
|
||||
category, method_name = parts[0], parts[1]
|
||||
Tool names may be shortened (e.g., 'virt_list_vms' instead of
|
||||
'virtualization_list_virtual_machines') to meet the 28-character
|
||||
limit. TOOL_NAME_MAP handles the translation to actual method names.
|
||||
"""
|
||||
# Check if this is a mapped short name
|
||||
if name in TOOL_NAME_MAP:
|
||||
category, method_name = TOOL_NAME_MAP[name]
|
||||
else:
|
||||
# Fall back to original logic for unchanged tools
|
||||
parts = name.split('_', 1)
|
||||
if len(parts) != 2:
|
||||
raise ValueError(f"Invalid tool name format: {name}")
|
||||
category, method_name = parts[0], parts[1]
|
||||
|
||||
# Map category to tool class
|
||||
tool_map = {
|
||||
20
plugins/clarity-assist/.claude-plugin/plugin.json
Normal file
20
plugins/clarity-assist/.claude-plugin/plugin.json
Normal file
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"name": "clarity-assist",
|
||||
"version": "1.0.0",
|
||||
"description": "Prompt optimization and requirement clarification with ND-friendly accommodations",
|
||||
"author": {
|
||||
"name": "Leo Miranda",
|
||||
"email": "leobmiranda@gmail.com"
|
||||
},
|
||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/clarity-assist/README.md",
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"prompt-optimization",
|
||||
"clarification",
|
||||
"neurodivergent",
|
||||
"requirements",
|
||||
"methodology"
|
||||
],
|
||||
"commands": ["./commands/"]
|
||||
}
|
||||
99
plugins/clarity-assist/README.md
Normal file
99
plugins/clarity-assist/README.md
Normal file
@@ -0,0 +1,99 @@
|
||||
# clarity-assist
|
||||
|
||||
Prompt optimization and requirement clarification plugin with neurodivergent-friendly accommodations.
|
||||
|
||||
## Overview
|
||||
|
||||
clarity-assist helps transform vague, incomplete, or ambiguous requests into clear, actionable specifications. It uses a structured 4-D methodology (Deconstruct, Diagnose, Develop, Deliver) and ND-friendly communication patterns.
|
||||
|
||||
## Commands
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `/clarify` | Full 4-D prompt optimization for complex requests |
|
||||
| `/quick-clarify` | Rapid single-pass clarification for simple requests |
|
||||
|
||||
## Features
|
||||
|
||||
### 4-D Methodology
|
||||
|
||||
1. **Deconstruct** - Break down the request into components
|
||||
2. **Diagnose** - Analyze gaps and potential issues
|
||||
3. **Develop** - Gather clarifications through structured questions
|
||||
4. **Deliver** - Produce refined specification
|
||||
|
||||
### ND-Friendly Design
|
||||
|
||||
- **Option-based questioning** - Always provide 2-4 concrete choices
|
||||
- **Chunked questions** - Ask 1-2 questions at a time
|
||||
- **Context for questions** - Explain why you're asking
|
||||
- **Conflict detection** - Check previous answers before new questions
|
||||
- **Progress acknowledgment** - Summarize frequently
|
||||
|
||||
### Escalation Protocol
|
||||
|
||||
When requests are complex or users seem overwhelmed:
|
||||
- Acknowledge complexity
|
||||
- Offer to focus on one aspect at a time
|
||||
- Build incrementally
|
||||
|
||||
## Installation
|
||||
|
||||
Add to your project's `.claude/settings.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"plugins": ["clarity-assist"]
|
||||
}
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### Full Clarification
|
||||
|
||||
```
|
||||
/clarify
|
||||
|
||||
[Your vague or complex request here]
|
||||
```
|
||||
|
||||
### Quick Clarification
|
||||
|
||||
```
|
||||
/quick-clarify
|
||||
|
||||
[Your mostly-clear request here]
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
No configuration required. The plugin uses sensible defaults.
|
||||
|
||||
## Output Format
|
||||
|
||||
After clarification, you receive a structured specification:
|
||||
|
||||
```markdown
|
||||
## Clarified Request
|
||||
|
||||
### Summary
|
||||
[Description of what will be built]
|
||||
|
||||
### Scope
|
||||
**In Scope:** [items]
|
||||
**Out of Scope:** [items]
|
||||
|
||||
### Requirements
|
||||
[Prioritized table]
|
||||
|
||||
### Assumptions
|
||||
[List of assumptions]
|
||||
```
|
||||
|
||||
## Integration
|
||||
|
||||
For CLAUDE.md integration instructions, see `claude-md-integration.md`.
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
140
plugins/clarity-assist/agents/clarity-coach.md
Normal file
140
plugins/clarity-assist/agents/clarity-coach.md
Normal file
@@ -0,0 +1,140 @@
|
||||
# Clarity Coach Agent
|
||||
|
||||
## Role
|
||||
|
||||
You are a patient, structured coach specializing in helping users articulate their requirements clearly. You are trained in neurodivergent-friendly communication patterns and use evidence-based techniques for effective requirement gathering.
|
||||
|
||||
## Core Principles
|
||||
|
||||
### 1. Never Open-Ended Questions Alone
|
||||
|
||||
Bad: "What do you want the button to do?"
|
||||
Good: "What should happen when the button is clicked?
|
||||
1. Navigate to another page
|
||||
2. Submit a form
|
||||
3. Open a modal/popup
|
||||
4. Other (please describe)"
|
||||
|
||||
### 2. Chunked Questions (1-2 at a Time)
|
||||
|
||||
Bad: "What color, size, position, and behavior should the button have?"
|
||||
Good: "Let's start with the basics. Where should this button appear?
|
||||
1. In the header
|
||||
2. In the main content area
|
||||
3. In a sidebar
|
||||
4. Floating/fixed position"
|
||||
|
||||
Then after answer: "Now for the appearance - should it match your existing button style or stand out?"
|
||||
|
||||
### 3. Provide Context for Questions
|
||||
|
||||
Always explain why you're asking:
|
||||
|
||||
"I'm asking about error handling because it affects whether we need to build a retry mechanism."
|
||||
|
||||
### 4. Conflict Detection
|
||||
|
||||
Before each new question, mentally review:
|
||||
- What has the user already said?
|
||||
- Does this question potentially contradict earlier answers?
|
||||
- If yes, acknowledge it: "Earlier you mentioned X, so when thinking about Y..."
|
||||
|
||||
### 5. Progress Acknowledgment
|
||||
|
||||
After every 2-3 questions, summarize progress:
|
||||
|
||||
"Great, so far we've established:
|
||||
- The feature is for [X]
|
||||
- It should [Y]
|
||||
- And integrate with [Z]
|
||||
|
||||
A couple more questions and we'll have everything we need."
|
||||
|
||||
## Communication Style
|
||||
|
||||
### Tone
|
||||
- Patient, never rushing
|
||||
- Validating ("That makes sense", "Good point")
|
||||
- Non-judgmental about vague initial requests
|
||||
|
||||
### Pacing
|
||||
- Respect user's energy levels
|
||||
- Offer breaks: "This is good progress. Want to continue or pick this up later?"
|
||||
- Notice signs of overwhelm and simplify
|
||||
|
||||
### Clarity
|
||||
- Use concrete examples, not abstract descriptions
|
||||
- Prefer "For example..." over "Consider..."
|
||||
- Visual when possible: "Picture a button in the top-right corner that..."
|
||||
|
||||
## Handling Uncertainty
|
||||
|
||||
When user says "I don't know":
|
||||
|
||||
```
|
||||
"That's totally fine - let's approach it differently.
|
||||
|
||||
If I showed you two versions:
|
||||
- Version A: [concrete description]
|
||||
- Version B: [concrete description]
|
||||
|
||||
Which feels closer to what you're imagining? Or neither?"
|
||||
```
|
||||
|
||||
When user is overwhelmed:
|
||||
|
||||
```
|
||||
"There's a lot to decide here. Let me suggest we focus on just
|
||||
the [most critical aspect] for now. The other details can be
|
||||
figured out as we build. What matters most to you right now?"
|
||||
```
|
||||
|
||||
## Output Artifacts
|
||||
|
||||
### During Clarification
|
||||
|
||||
Track gathered information in a mental model:
|
||||
- Requirements (explicit)
|
||||
- Preferences (implicit)
|
||||
- Constraints (technical/business)
|
||||
- Open questions
|
||||
|
||||
### After Clarification
|
||||
|
||||
Produce a clear specification (see /clarify command for format).
|
||||
|
||||
## Example Session
|
||||
|
||||
**User:** I need a dashboard
|
||||
|
||||
**Coach:**
|
||||
|
||||
```
|
||||
A dashboard can mean many things - let me help narrow it down.
|
||||
|
||||
**What's the primary purpose of this dashboard?**
|
||||
1. Monitor system health/metrics
|
||||
2. View business KPIs and analytics
|
||||
3. Manage tasks or workflows
|
||||
4. Combination / Other
|
||||
|
||||
[Knowing this helps me understand what data and visualizations you'll need]
|
||||
```
|
||||
|
||||
**User:** Option 2, business analytics
|
||||
|
||||
**Coach:**
|
||||
|
||||
```
|
||||
Business analytics dashboard - got it.
|
||||
|
||||
**Who will be using this dashboard?**
|
||||
1. Just you / personal use
|
||||
2. Your team (5-20 people)
|
||||
3. Company-wide (many departments)
|
||||
4. External clients
|
||||
|
||||
[This affects permissions, performance requirements, and how customizable it needs to be]
|
||||
```
|
||||
|
||||
And so on, building toward a complete picture.
|
||||
58
plugins/clarity-assist/claude-md-integration.md
Normal file
58
plugins/clarity-assist/claude-md-integration.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# clarity-assist - CLAUDE.md Integration
|
||||
|
||||
Add the following section to your project's CLAUDE.md file to enable clarity-assist.
|
||||
|
||||
---
|
||||
|
||||
## Prompt Clarification
|
||||
|
||||
This project uses the clarity-assist plugin for requirement gathering.
|
||||
|
||||
### When to Use
|
||||
|
||||
- Complex or vague requests
|
||||
- Multi-step implementations
|
||||
- When requirements seem incomplete
|
||||
|
||||
### Commands
|
||||
|
||||
| Command | Use Case |
|
||||
|---------|----------|
|
||||
| `/clarify` | Full 4-D methodology for complex requests |
|
||||
| `/quick-clarify` | Rapid mode for simple disambiguation |
|
||||
|
||||
### Communication Style
|
||||
|
||||
When gathering requirements:
|
||||
- Present 2-4 concrete options (never open-ended alone)
|
||||
- Ask 1-2 questions at a time
|
||||
- Explain why you're asking each question
|
||||
- Check for conflicts with previous answers
|
||||
- Summarize progress frequently
|
||||
|
||||
### Output Format
|
||||
|
||||
After clarification, produce a structured specification:
|
||||
|
||||
```markdown
|
||||
## Clarified Request
|
||||
|
||||
### Summary
|
||||
[1-2 sentence description]
|
||||
|
||||
### Scope
|
||||
**In Scope:** [items]
|
||||
**Out of Scope:** [items]
|
||||
|
||||
### Requirements
|
||||
| # | Requirement | Priority | Notes |
|
||||
|---|-------------|----------|-------|
|
||||
| 1 | ... | Must | ... |
|
||||
|
||||
### Assumptions
|
||||
[List made during conversation]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
Copy the section between the horizontal rules into your CLAUDE.md.
|
||||
137
plugins/clarity-assist/commands/clarify.md
Normal file
137
plugins/clarity-assist/commands/clarify.md
Normal file
@@ -0,0 +1,137 @@
|
||||
# /clarify - Full Prompt Optimization
|
||||
|
||||
## Purpose
|
||||
|
||||
Transform vague, incomplete, or ambiguous requests into clear, actionable specifications using the 4-D methodology with neurodivergent-friendly accommodations.
|
||||
|
||||
## When to Use
|
||||
|
||||
- Complex multi-step requests
|
||||
- Requirements with multiple possible interpretations
|
||||
- Tasks requiring significant context gathering
|
||||
- When user seems uncertain about what they want
|
||||
|
||||
## 4-D Methodology
|
||||
|
||||
### Phase 1: Deconstruct
|
||||
|
||||
Break down the user's request into components:
|
||||
|
||||
1. **Extract explicit requirements** - What was directly stated
|
||||
2. **Identify implicit assumptions** - What seems assumed but not stated
|
||||
3. **Note ambiguities** - Points that could go multiple ways
|
||||
4. **List dependencies** - External factors that might affect implementation
|
||||
|
||||
### Phase 2: Diagnose
|
||||
|
||||
Analyze gaps and potential issues:
|
||||
|
||||
1. **Missing information** - What do we need to know?
|
||||
2. **Conflicting requirements** - Do any stated goals contradict?
|
||||
3. **Scope boundaries** - What's in/out of scope?
|
||||
4. **Technical constraints** - Platform, language, architecture limits
|
||||
|
||||
### Phase 3: Develop
|
||||
|
||||
Gather clarifications through structured questioning:
|
||||
|
||||
**ND-Friendly Question Rules:**
|
||||
- Present 2-4 concrete options (never open-ended alone)
|
||||
- Include "Other" for custom responses
|
||||
- Ask 1-2 questions at a time maximum
|
||||
- Provide brief context for why you're asking
|
||||
- Check for conflicts with previous answers
|
||||
|
||||
**Example Format:**
|
||||
```
|
||||
To help me understand the scope better:
|
||||
|
||||
**How should errors be handled?**
|
||||
1. Silent logging (user sees nothing)
|
||||
2. Toast notifications (brief, dismissible)
|
||||
3. Modal dialogs (requires user action)
|
||||
4. Other
|
||||
|
||||
[Context: This affects both UX and how much error-handling code we need]
|
||||
```
|
||||
|
||||
### Phase 4: Deliver
|
||||
|
||||
Produce the refined specification:
|
||||
|
||||
```markdown
|
||||
## Clarified Request
|
||||
|
||||
### Summary
|
||||
[1-2 sentence description of what will be built]
|
||||
|
||||
### Scope
|
||||
**In Scope:**
|
||||
- [Item 1]
|
||||
- [Item 2]
|
||||
|
||||
**Out of Scope:**
|
||||
- [Item 1]
|
||||
|
||||
### Requirements
|
||||
|
||||
| # | Requirement | Priority | Notes |
|
||||
|---|-------------|----------|-------|
|
||||
| 1 | ... | Must | ... |
|
||||
| 2 | ... | Should | ... |
|
||||
|
||||
### Assumptions
|
||||
- [Assumption made based on conversation]
|
||||
|
||||
### Open Questions
|
||||
- [Any remaining ambiguities, if any]
|
||||
```
|
||||
|
||||
## Conflict Detection
|
||||
|
||||
Before each question, review previous answers for conflicts:
|
||||
|
||||
```
|
||||
[Internal check]
|
||||
Previous: User said "keep it simple"
|
||||
Current question about: Adding configuration options
|
||||
Potential conflict: More options = more complexity
|
||||
→ Acknowledge: "Earlier you mentioned keeping it simple. With that in mind..."
|
||||
```
|
||||
|
||||
## Escalation Protocol
|
||||
|
||||
If the request is particularly messy or the user seems overwhelmed:
|
||||
|
||||
1. Acknowledge the complexity
|
||||
2. Offer to start with just ONE aspect
|
||||
3. Build incrementally
|
||||
|
||||
```
|
||||
"This touches a lot of areas. Rather than tackle everything at once,
|
||||
let's start with [most critical piece]. Once that's clear, we can
|
||||
add the other parts. Sound good?"
|
||||
```
|
||||
|
||||
## Example Interaction
|
||||
|
||||
**User:** Make the app faster
|
||||
|
||||
**Clarify Response:**
|
||||
|
||||
```
|
||||
Let me help clarify what kind of performance improvements you're looking for.
|
||||
|
||||
**What type of slowness are you experiencing?**
|
||||
1. Initial page load takes too long
|
||||
2. Interactions feel sluggish (clicking, typing)
|
||||
3. API calls are slow
|
||||
4. All of the above
|
||||
5. Other
|
||||
|
||||
[I'm asking because each has very different solutions]
|
||||
```
|
||||
|
||||
## Output Format
|
||||
|
||||
After gathering all necessary information, use the Deliver phase format to present the clarified specification for user confirmation.
|
||||
84
plugins/clarity-assist/commands/quick-clarify.md
Normal file
84
plugins/clarity-assist/commands/quick-clarify.md
Normal file
@@ -0,0 +1,84 @@
|
||||
# /quick-clarify - Rapid Clarification Mode
|
||||
|
||||
## Purpose
|
||||
|
||||
Single-pass clarification for requests that are mostly clear but need minor disambiguation.
|
||||
|
||||
## When to Use
|
||||
|
||||
- Request is fairly clear, just one or two ambiguities
|
||||
- User is in a hurry
|
||||
- Follow-up to an already-clarified request
|
||||
- Simple feature additions or bug fixes
|
||||
|
||||
## Process
|
||||
|
||||
### Step 1: Echo Understanding
|
||||
|
||||
Restate what you understood in a single sentence:
|
||||
|
||||
```
|
||||
"I understand you want [X] that does [Y]."
|
||||
```
|
||||
|
||||
### Step 2: Quick Disambiguation
|
||||
|
||||
If needed, ask ONE multiple-choice question:
|
||||
|
||||
```
|
||||
**Just to confirm:** Should this [A] or [B]?
|
||||
1. [Option A with brief description]
|
||||
2. [Option B with brief description]
|
||||
3. Both
|
||||
4. Neither / Other approach
|
||||
```
|
||||
|
||||
### Step 3: Proceed or Confirm
|
||||
|
||||
If confident (>90%), proceed with a brief confirmation:
|
||||
|
||||
```
|
||||
"Got it. I'll implement [X with chosen option]. Starting now."
|
||||
```
|
||||
|
||||
If less confident, offer a micro-summary:
|
||||
|
||||
```
|
||||
"Quick summary before I start:
|
||||
- [Key point 1]
|
||||
- [Key point 2]
|
||||
- [Assumption made]
|
||||
|
||||
Proceed? (Or clarify anything)"
|
||||
```
|
||||
|
||||
## Example Interaction
|
||||
|
||||
**User:** Add a delete button to the user list
|
||||
|
||||
**Quick-Clarify Response:**
|
||||
|
||||
```
|
||||
I'll add a delete button to each row in the user list.
|
||||
|
||||
**Should deletion require confirmation?**
|
||||
1. Yes - show confirmation dialog (Recommended)
|
||||
2. No - delete immediately
|
||||
3. Soft delete - mark as deleted but keep data
|
||||
|
||||
[Asking because accidental deletions can be problematic]
|
||||
```
|
||||
|
||||
## Escalation to Full /clarify
|
||||
|
||||
If quick-clarify reveals complexity:
|
||||
|
||||
```
|
||||
"This is more involved than it first appeared - there are
|
||||
several decisions to make. Want me to switch to a more
|
||||
thorough clarification process? (Just say 'yes' or 'clarify')"
|
||||
```
|
||||
|
||||
## Output Format
|
||||
|
||||
For quick-clarify, no formal specification document is needed. Just proceed with the task after brief confirmation, documenting assumptions inline with the work.
|
||||
@@ -0,0 +1,134 @@
|
||||
# Prompt Optimization Rules
|
||||
|
||||
## Core Rules
|
||||
|
||||
### Rule 1: Specificity Over Generality
|
||||
|
||||
| Instead of | Use |
|
||||
|------------|-----|
|
||||
| "Make it better" | "Reduce load time to under 2 seconds" |
|
||||
| "Add some validation" | "Validate email format and require 8+ char password" |
|
||||
| "Handle errors" | "Show toast notification on API failure, log to console" |
|
||||
|
||||
### Rule 2: Include Context
|
||||
|
||||
Every good prompt includes:
|
||||
- **What**: The action/feature/fix needed
|
||||
- **Where**: Location in codebase or UI
|
||||
- **Why**: Purpose or problem being solved
|
||||
- **Constraints**: Technical limits, compatibility, standards
|
||||
|
||||
### Rule 3: Define Success
|
||||
|
||||
Specify how to know when the task is done:
|
||||
- Acceptance criteria
|
||||
- Test cases to pass
|
||||
- Behavior to verify
|
||||
|
||||
### Rule 4: Scope Boundaries
|
||||
|
||||
Explicitly state:
|
||||
- What IS in scope
|
||||
- What is NOT in scope
|
||||
- What MIGHT be in scope (user's call)
|
||||
|
||||
## Anti-Patterns to Detect
|
||||
|
||||
### Vague Requests
|
||||
|
||||
Triggers: "improve", "fix", "update", "change", "better", "faster", "cleaner"
|
||||
|
||||
Response: Ask for specific metrics or outcomes
|
||||
|
||||
### Scope Creep Signals
|
||||
|
||||
Triggers: "while you're at it", "also", "might as well", "and another thing"
|
||||
|
||||
Response: Acknowledge, then isolate: "I'll note that for after the main task"
|
||||
|
||||
### Assumption Gaps
|
||||
|
||||
Triggers: References to "the" thing (which thing?), "it" (what's it?), "there" (where?)
|
||||
|
||||
Response: Echo back specific understanding
|
||||
|
||||
### Conflicting Requirements
|
||||
|
||||
Triggers: "Simple but comprehensive", "Fast but thorough", "Minimal but complete"
|
||||
|
||||
Response: Prioritize: "Which matters more: simplicity or completeness?"
|
||||
|
||||
## Question Templates
|
||||
|
||||
### For Unclear Purpose
|
||||
|
||||
```
|
||||
**What problem does this solve?**
|
||||
1. [Specific problem A]
|
||||
2. [Specific problem B]
|
||||
3. Combination
|
||||
4. Different problem: ____
|
||||
```
|
||||
|
||||
### For Missing Scope
|
||||
|
||||
```
|
||||
**What should this include?**
|
||||
- [ ] Feature A
|
||||
- [ ] Feature B
|
||||
- [ ] Feature C
|
||||
- [ ] Other: ____
|
||||
```
|
||||
|
||||
### For Ambiguous Behavior
|
||||
|
||||
```
|
||||
**When [trigger event], what should happen?**
|
||||
1. [Behavior option A]
|
||||
2. [Behavior option B]
|
||||
3. Nothing (ignore)
|
||||
4. Depends on: ____
|
||||
```
|
||||
|
||||
### For Technical Decisions
|
||||
|
||||
```
|
||||
**Implementation approach:**
|
||||
1. [Approach A] - pros: X, cons: Y
|
||||
2. [Approach B] - pros: X, cons: Y
|
||||
3. Let me decide based on codebase
|
||||
4. Need more info about: ____
|
||||
```
|
||||
|
||||
## Optimization Checklist
|
||||
|
||||
Before proceeding with any task, verify:
|
||||
|
||||
- [ ] **Specific outcome** - Can measure success
|
||||
- [ ] **Clear location** - Know where changes go
|
||||
- [ ] **Defined scope** - Know what's in/out
|
||||
- [ ] **Error handling** - Know what happens on failure
|
||||
- [ ] **Edge cases** - Major scenarios covered
|
||||
- [ ] **Dependencies** - Know what this affects/relies on
|
||||
|
||||
## ND-Friendly Adaptations
|
||||
|
||||
### Reduce Cognitive Load
|
||||
- Maximum 4 options per question
|
||||
- Always include "Other" escape hatch
|
||||
- Provide examples, not just descriptions
|
||||
|
||||
### Support Working Memory
|
||||
- Summarize frequently
|
||||
- Reference earlier decisions explicitly
|
||||
- Don't assume user remembers context
|
||||
|
||||
### Allow Processing Time
|
||||
- Don't rapid-fire questions
|
||||
- Validate answers before moving on
|
||||
- Offer to revisit/change earlier answers
|
||||
|
||||
### Manage Overwhelm
|
||||
- Offer to break into smaller sessions
|
||||
- Prioritize must-haves vs nice-to-haves
|
||||
- Provide "good enough for now" options
|
||||
@@ -6,8 +6,8 @@
|
||||
"name": "Leo Miranda",
|
||||
"email": "leobmiranda@gmail.com"
|
||||
},
|
||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace/src/branch/main/plugins/claude-config-maintainer/README.md",
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace.git",
|
||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/claude-config-maintainer/README.md",
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"claude-code",
|
||||
@@ -16,7 +16,5 @@
|
||||
"claude-md",
|
||||
"developer-tools"
|
||||
],
|
||||
"entryPoint": "agents/maintainer.md",
|
||||
"commands": ["./commands/"],
|
||||
"agents": ["./agents/"]
|
||||
"commands": ["./commands/"]
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ CLAUDE.md files provide instructions to Claude Code when working with a project.
|
||||
|
||||
## Installation
|
||||
|
||||
This plugin is part of the support-claude-mktplace collection. Install the marketplace and the plugin will be available.
|
||||
This plugin is part of the Leo Claude Marketplace. Install the marketplace and the plugin will be available.
|
||||
|
||||
## Commands
|
||||
|
||||
@@ -96,4 +96,4 @@ Target score: **70+** for effective Claude Code usage.
|
||||
|
||||
## Contributing
|
||||
|
||||
This plugin is part of the personal-projects/support-claude-mktplace repository.
|
||||
This plugin is part of the personal-projects/leo-claude-mktplace repository.
|
||||
|
||||
68
plugins/claude-config-maintainer/hooks/enforce-rules.sh
Executable file
68
plugins/claude-config-maintainer/hooks/enforce-rules.sh
Executable file
@@ -0,0 +1,68 @@
|
||||
#!/bin/bash
|
||||
# claude-config-maintainer: enforce mandatory behavior rules
|
||||
# Checks if CLAUDE.md has the rules, adds them if missing
|
||||
|
||||
PREFIX="[claude-config-maintainer]"
|
||||
|
||||
# Find CLAUDE.md in current directory or parent
|
||||
CLAUDE_MD=""
|
||||
if [ -f "./CLAUDE.md" ]; then
|
||||
CLAUDE_MD="./CLAUDE.md"
|
||||
elif [ -f "../CLAUDE.md" ]; then
|
||||
CLAUDE_MD="../CLAUDE.md"
|
||||
fi
|
||||
|
||||
# If no CLAUDE.md found, exit silently
|
||||
if [ -z "$CLAUDE_MD" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check if mandatory rules exist
|
||||
if grep -q "MANDATORY BEHAVIOR RULES" "$CLAUDE_MD" 2>/dev/null; then
|
||||
# Rules exist, all good
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Rules missing - add them
|
||||
RULES='## ⛔ MANDATORY BEHAVIOR RULES - READ FIRST
|
||||
|
||||
**These rules are NON-NEGOTIABLE. Violating them wastes the user'\''s time and money.**
|
||||
|
||||
### 1. WHEN USER ASKS YOU TO CHECK SOMETHING - CHECK EVERYTHING
|
||||
- Search ALL locations, not just where you think it is
|
||||
- Check cache directories: `~/.claude/plugins/cache/`
|
||||
- Check installed: `~/.claude/plugins/marketplaces/`
|
||||
- Check source directories
|
||||
- **NEVER say "no" or "that'\''s not the issue" without exhaustive verification**
|
||||
|
||||
### 2. WHEN USER SAYS SOMETHING IS WRONG - BELIEVE THEM
|
||||
- The user knows their system better than you
|
||||
- Investigate thoroughly before disagreeing
|
||||
- **Your confidence is often wrong. User'\''s instincts are often right.**
|
||||
|
||||
### 3. NEVER SAY "DONE" WITHOUT VERIFICATION
|
||||
- Run the actual command/script to verify
|
||||
- Show the output to the user
|
||||
- **"Done" means VERIFIED WORKING, not "I made changes"**
|
||||
|
||||
### 4. SHOW EXACTLY WHAT USER ASKS FOR
|
||||
- If user asks for messages, show the MESSAGES
|
||||
- If user asks for code, show the CODE
|
||||
- **Do not interpret or summarize unless asked**
|
||||
|
||||
**FAILURE TO FOLLOW THESE RULES = WASTED USER TIME = UNACCEPTABLE**
|
||||
|
||||
---
|
||||
|
||||
'
|
||||
|
||||
# Create temp file with rules + existing content
|
||||
{
|
||||
head -1 "$CLAUDE_MD"
|
||||
echo ""
|
||||
echo "$RULES"
|
||||
tail -n +2 "$CLAUDE_MD"
|
||||
} > "${CLAUDE_MD}.tmp"
|
||||
|
||||
mv "${CLAUDE_MD}.tmp" "$CLAUDE_MD"
|
||||
echo "$PREFIX Added mandatory behavior rules to CLAUDE.md"
|
||||
10
plugins/claude-config-maintainer/hooks/hooks.json
Normal file
10
plugins/claude-config-maintainer/hooks/hooks.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"hooks": {
|
||||
"SessionStart": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/hooks/enforce-rules.sh"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -6,8 +6,8 @@
|
||||
"name": "Leo Miranda",
|
||||
"email": "leobmiranda@gmail.com"
|
||||
},
|
||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace/src/branch/main/plugins/cmdb-assistant/README.md",
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace.git",
|
||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/cmdb-assistant/README.md",
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"netbox",
|
||||
@@ -18,6 +18,5 @@
|
||||
"dcim"
|
||||
],
|
||||
"commands": ["./commands/"],
|
||||
"agents": ["./agents/"],
|
||||
"mcpServers": "./.mcp.json"
|
||||
"mcpServers": ["./.mcp.json"]
|
||||
}
|
||||
|
||||
@@ -111,6 +111,7 @@ cmdb-assistant/
|
||||
│ └── plugin.json # Plugin manifest
|
||||
├── .mcp.json # MCP server configuration
|
||||
├── commands/
|
||||
│ ├── initial-setup.md # Setup wizard
|
||||
│ ├── cmdb-search.md # Search command
|
||||
│ ├── cmdb-device.md # Device management
|
||||
│ ├── cmdb-ip.md # IP management
|
||||
@@ -167,4 +168,4 @@ The plugin uses the shared NetBox MCP server at `../mcp-servers/netbox/`.
|
||||
|
||||
## License
|
||||
|
||||
MIT License - Part of the Claude Code Marketplace.
|
||||
MIT License - Part of the Leo Claude Marketplace.
|
||||
|
||||
164
plugins/cmdb-assistant/commands/initial-setup.md
Normal file
164
plugins/cmdb-assistant/commands/initial-setup.md
Normal file
@@ -0,0 +1,164 @@
|
||||
---
|
||||
description: Interactive setup wizard for cmdb-assistant plugin - configures NetBox MCP server
|
||||
---
|
||||
|
||||
# CMDB Assistant Setup Wizard
|
||||
|
||||
This command sets up the cmdb-assistant plugin with NetBox integration.
|
||||
|
||||
## Important Context
|
||||
|
||||
- **This command uses Bash, Read, Write, and AskUserQuestion tools** - NOT MCP tools
|
||||
- **MCP tools won't work until after setup + session restart**
|
||||
- **Uses NetBox MCP server (separate from Gitea MCP)**
|
||||
|
||||
---
|
||||
|
||||
## Phase 1: Environment Validation
|
||||
|
||||
### Step 1.1: Check Python Version
|
||||
|
||||
```bash
|
||||
python3 --version
|
||||
```
|
||||
|
||||
If below 3.10, stop setup and inform user.
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: MCP Server Setup
|
||||
|
||||
### Step 2.1: Locate NetBox MCP Server
|
||||
|
||||
```bash
|
||||
find ~/.claude ~/.config/claude -name "mcp_server" -path "*netbox*" 2>/dev/null | head -5
|
||||
```
|
||||
|
||||
If not found, ask user for marketplace location.
|
||||
|
||||
### Step 2.2: Check Virtual Environment
|
||||
|
||||
```bash
|
||||
ls -la /path/to/mcp-servers/netbox/.venv/bin/python 2>/dev/null && echo "VENV_EXISTS" || echo "VENV_MISSING"
|
||||
```
|
||||
|
||||
### Step 2.3: Create Virtual Environment (if missing)
|
||||
|
||||
```bash
|
||||
cd /path/to/mcp-servers/netbox && python3 -m venv .venv && source .venv/bin/activate && pip install --upgrade pip && pip install -r requirements.txt && deactivate
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 3: System Configuration
|
||||
|
||||
### Step 3.1: Create Config Directory
|
||||
|
||||
```bash
|
||||
mkdir -p ~/.config/claude
|
||||
```
|
||||
|
||||
### Step 3.2: Check NetBox Configuration
|
||||
|
||||
```bash
|
||||
cat ~/.config/claude/netbox.env 2>/dev/null || echo "FILE_NOT_FOUND"
|
||||
```
|
||||
|
||||
**If file exists with valid values:** Skip to Phase 4.
|
||||
**If missing or has placeholders:** Continue.
|
||||
|
||||
### Step 3.3: Gather NetBox Information
|
||||
|
||||
Use AskUserQuestion:
|
||||
- Question: "What is your NetBox API URL? (e.g., https://netbox.company.com/api)"
|
||||
- Header: "NetBox URL"
|
||||
- Options:
|
||||
- "Other (I'll provide the URL)"
|
||||
|
||||
Ask user to provide the URL.
|
||||
|
||||
**Important:** The URL must include `/api` at the end. If the user provides a URL without `/api`, append it automatically.
|
||||
|
||||
### Step 3.4: Create Configuration File
|
||||
|
||||
```bash
|
||||
cat > ~/.config/claude/netbox.env << 'EOF'
|
||||
# NetBox API Configuration
|
||||
# Generated by cmdb-assistant /initial-setup
|
||||
|
||||
NETBOX_API_URL=<USER_PROVIDED_URL>
|
||||
NETBOX_API_TOKEN=PASTE_YOUR_TOKEN_HERE
|
||||
EOF
|
||||
chmod 600 ~/.config/claude/netbox.env
|
||||
```
|
||||
|
||||
### Step 3.5: Token Instructions
|
||||
|
||||
---
|
||||
|
||||
**Action Required: Add Your NetBox API Token**
|
||||
|
||||
I've created `~/.config/claude/netbox.env` but you need to add your API token manually.
|
||||
|
||||
**Steps:**
|
||||
1. Open: `nano ~/.config/claude/netbox.env`
|
||||
2. Generate token in NetBox: Admin → API Tokens → Add Token
|
||||
3. Replace `PASTE_YOUR_TOKEN_HERE` with your token
|
||||
4. Save the file
|
||||
|
||||
---
|
||||
|
||||
Use AskUserQuestion:
|
||||
- Question: "Have you added your NetBox token?"
|
||||
- Header: "Token"
|
||||
- Options:
|
||||
- "Yes, I've added the token"
|
||||
- "Skip for now"
|
||||
|
||||
---
|
||||
|
||||
## Phase 4: Validation
|
||||
|
||||
### Step 4.1: Test Configuration (if token was added)
|
||||
|
||||
```bash
|
||||
source ~/.config/claude/netbox.env && curl -s -o /dev/null -w "%{http_code}" -H "Authorization: Token $NETBOX_API_TOKEN" "$NETBOX_API_URL/"
|
||||
```
|
||||
|
||||
**Note:** The URL already includes `/api`, so we just append `/` for the root API endpoint.
|
||||
|
||||
Report result:
|
||||
- 200: Success
|
||||
- 403: Invalid token
|
||||
- Other: Connection issue
|
||||
|
||||
### Step 4.2: Summary
|
||||
|
||||
```
|
||||
╔════════════════════════════════════════════════════════════╗
|
||||
║ CMDB-ASSISTANT SETUP COMPLETE ║
|
||||
╠════════════════════════════════════════════════════════════╣
|
||||
║ MCP Server (NetBox): ✓ Ready ║
|
||||
║ System Config: ✓ ~/.config/claude/netbox.env ║
|
||||
╚════════════════════════════════════════════════════════════╝
|
||||
```
|
||||
|
||||
### Step 4.3: Session Restart Notice
|
||||
|
||||
---
|
||||
|
||||
**⚠️ Session Restart Required**
|
||||
|
||||
Restart your Claude Code session for MCP tools to become available.
|
||||
|
||||
**After restart, you can:**
|
||||
- Run `/cmdb-device <hostname>` to look up a device
|
||||
- Run `/cmdb-ip <address>` to look up an IP address
|
||||
- Run `/cmdb-site <name>` to look up a site
|
||||
- Run `/cmdb-search <query>` for general search
|
||||
|
||||
---
|
||||
|
||||
## Note on Project Configuration
|
||||
|
||||
cmdb-assistant does not require project-level configuration. The NetBox connection is system-wide and not tied to specific repositories.
|
||||
1
plugins/cmdb-assistant/mcp-servers/netbox
Symbolic link
1
plugins/cmdb-assistant/mcp-servers/netbox
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../mcp-servers/netbox
|
||||
@@ -6,8 +6,9 @@
|
||||
"name": "Leo Miranda",
|
||||
"email": "leobmiranda@gmail.com"
|
||||
},
|
||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace/src/branch/main/plugins/code-sentinel/README.md",
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace.git",
|
||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/code-sentinel/README.md",
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||
"license": "MIT",
|
||||
"keywords": ["security", "refactoring", "code-quality", "static-analysis", "hooks"]
|
||||
"keywords": ["security", "refactoring", "code-quality", "static-analysis", "hooks"],
|
||||
"commands": ["./commands/"]
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ Security scanning and code refactoring tools for Claude Code projects.
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
/plugin marketplace add https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace.git
|
||||
/plugin marketplace add https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git
|
||||
/plugin install code-sentinel
|
||||
```
|
||||
|
||||
|
||||
@@ -5,8 +5,8 @@
|
||||
"matcher": "Write|Edit|MultiEdit",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "prompt",
|
||||
"prompt": "SECURITY CHECK - Before writing this code, scan for these patterns:\n\n**Critical (BLOCK if found):**\n- eval(), exec() with user input\n- SQL string concatenation (SQL injection)\n- shell=True with user input (command injection)\n- Hardcoded secrets (API keys, passwords, tokens)\n- Pickle/marshal deserialization of untrusted data\n- innerHTML/dangerouslySetInnerHTML with user content (XSS)\n\n**Warning (WARN but allow):**\n- subprocess without input validation\n- File operations without path sanitization\n- HTTP requests without timeout\n- Broad exception catches (except:)\n- Debug/print statements with sensitive data\n\n**Response:**\n- If CRITICAL found: STOP, explain the issue, suggest safe alternative\n- If WARNING found: Note it briefly, proceed with suggestion\n- If clean: Proceed silently (say nothing)\n\nDo NOT announce clean scans. Only speak if issues found."
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/hooks/security-check.sh"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
62
plugins/code-sentinel/hooks/security-check.sh
Executable file
62
plugins/code-sentinel/hooks/security-check.sh
Executable file
@@ -0,0 +1,62 @@
|
||||
#!/bin/bash
|
||||
# code-sentinel security check hook
|
||||
# Checks for obvious security issues in code files, skips config/docs
|
||||
# Command hook - guaranteed predictable behavior
|
||||
|
||||
# Read tool input from stdin
|
||||
INPUT=$(cat)
|
||||
|
||||
# Extract file_path from JSON input
|
||||
FILE_PATH=$(echo "$INPUT" | grep -o '"file_path"[[:space:]]*:[[:space:]]*"[^"]*"' | head -1 | sed 's/.*"file_path"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/')
|
||||
|
||||
# If no file_path, exit silently
|
||||
if [ -z "$FILE_PATH" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# SKIP config/doc files entirely - exit silently
|
||||
case "$FILE_PATH" in
|
||||
*.md|*.json|*.yml|*.yaml|*.txt|*.toml|*.ini|*.cfg|*.conf)
|
||||
exit 0
|
||||
;;
|
||||
*/docs/*|*/README*|*/CHANGELOG*|*/LICENSE*)
|
||||
exit 0
|
||||
;;
|
||||
*/.claude/*|*/.github/*|*/.vscode/*)
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
|
||||
# For code files, extract content to check
|
||||
# For Edit tool: check new_string
|
||||
# For Write tool: check content
|
||||
CONTENT=$(echo "$INPUT" | grep -o '"new_string"[[:space:]]*:[[:space:]]*"[^"]*"' | head -1 | sed 's/.*"new_string"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/')
|
||||
if [ -z "$CONTENT" ]; then
|
||||
CONTENT=$(echo "$INPUT" | grep -o '"content"[[:space:]]*:[[:space:]]*"[^"]*"' | head -1 | sed 's/.*"content"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/')
|
||||
fi
|
||||
|
||||
# If no content to check, exit silently
|
||||
if [ -z "$CONTENT" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check for hardcoded secrets patterns (obvious cases only)
|
||||
if echo "$CONTENT" | grep -qiE '(api[_-]?key|api[_-]?secret|password|passwd|secret[_-]?key|auth[_-]?token)[[:space:]]*[=:][[:space:]]*["\x27][A-Za-z0-9+/=_-]{20,}["\x27]'; then
|
||||
echo "[code-sentinel] BLOCKED: Hardcoded secret detected"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check for AWS keys pattern
|
||||
if echo "$CONTENT" | grep -qE 'AKIA[A-Z0-9]{16}'; then
|
||||
echo "[code-sentinel] BLOCKED: AWS access key detected"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check for private key headers
|
||||
if echo "$CONTENT" | grep -qE '\-\-\-\-\-BEGIN (RSA |DSA |EC |OPENSSH )?PRIVATE KEY\-\-\-\-\-'; then
|
||||
echo "[code-sentinel] BLOCKED: Private key detected"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# All other cases: exit silently (allow the edit)
|
||||
exit 0
|
||||
25
plugins/data-platform/.claude-plugin/plugin.json
Normal file
25
plugins/data-platform/.claude-plugin/plugin.json
Normal file
@@ -0,0 +1,25 @@
|
||||
{
|
||||
"name": "data-platform",
|
||||
"version": "1.0.0",
|
||||
"description": "Data engineering tools with pandas, PostgreSQL/PostGIS, and dbt integration",
|
||||
"author": {
|
||||
"name": "Leo Miranda",
|
||||
"email": "leobmiranda@gmail.com"
|
||||
},
|
||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/data-platform/README.md",
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"pandas",
|
||||
"postgresql",
|
||||
"postgis",
|
||||
"dbt",
|
||||
"data-engineering",
|
||||
"etl",
|
||||
"dataframe"
|
||||
],
|
||||
"hooks": "hooks/hooks.json",
|
||||
"commands": ["./commands/"],
|
||||
"agents": ["./agents/"],
|
||||
"mcpServers": ["./.mcp.json"]
|
||||
}
|
||||
10
plugins/data-platform/.mcp.json
Normal file
10
plugins/data-platform/.mcp.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"data-platform": {
|
||||
"type": "stdio",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/mcp-servers/data-platform/.venv/bin/python",
|
||||
"args": ["-m", "mcp_server.server"],
|
||||
"cwd": "${CLAUDE_PLUGIN_ROOT}/mcp-servers/data-platform"
|
||||
}
|
||||
}
|
||||
}
|
||||
119
plugins/data-platform/README.md
Normal file
119
plugins/data-platform/README.md
Normal file
@@ -0,0 +1,119 @@
|
||||
# data-platform Plugin
|
||||
|
||||
Data engineering tools with pandas, PostgreSQL/PostGIS, and dbt integration for Claude Code.
|
||||
|
||||
## Features
|
||||
|
||||
- **pandas Operations**: Load, transform, and export DataFrames with persistent data_ref system
|
||||
- **PostgreSQL/PostGIS**: Database queries with connection pooling and spatial data support
|
||||
- **dbt Integration**: Build tool wrapper with pre-execution validation
|
||||
|
||||
## Installation
|
||||
|
||||
This plugin is part of the leo-claude-mktplace. Install via:
|
||||
|
||||
```bash
|
||||
# From marketplace
|
||||
claude plugins install leo-claude-mktplace/data-platform
|
||||
|
||||
# Setup MCP server venv
|
||||
cd ~/.claude/plugins/marketplaces/leo-claude-mktplace/mcp-servers/data-platform
|
||||
python -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### PostgreSQL (Optional)
|
||||
|
||||
Create `~/.config/claude/postgres.env`:
|
||||
|
||||
```env
|
||||
POSTGRES_URL=postgresql://user:password@host:5432/database
|
||||
```
|
||||
|
||||
### dbt (Optional)
|
||||
|
||||
Add to project `.env`:
|
||||
|
||||
```env
|
||||
DBT_PROJECT_DIR=/path/to/dbt/project
|
||||
DBT_PROFILES_DIR=~/.dbt
|
||||
```
|
||||
|
||||
## Commands
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `/initial-setup` | Interactive setup wizard for PostgreSQL and dbt configuration |
|
||||
| `/ingest` | Load data from files or database |
|
||||
| `/profile` | Generate data profile and statistics |
|
||||
| `/schema` | Show database/DataFrame schema |
|
||||
| `/explain` | Explain dbt model lineage |
|
||||
| `/lineage` | Visualize data dependencies |
|
||||
| `/run` | Execute dbt models |
|
||||
|
||||
## Agents
|
||||
|
||||
| Agent | Description |
|
||||
|-------|-------------|
|
||||
| `data-ingestion` | Data loading and transformation specialist |
|
||||
| `data-analysis` | Exploration and profiling specialist |
|
||||
|
||||
## data_ref System
|
||||
|
||||
All DataFrame operations use a `data_ref` system for persistence:
|
||||
|
||||
```
|
||||
# Load returns a reference
|
||||
read_csv("data.csv") → {"data_ref": "sales_data"}
|
||||
|
||||
# Use reference in subsequent operations
|
||||
filter("sales_data", "amount > 100") → {"data_ref": "sales_data_filtered"}
|
||||
describe("sales_data_filtered") → {statistics}
|
||||
```
|
||||
|
||||
## Example Workflow
|
||||
|
||||
```
|
||||
/ingest data/sales.csv
|
||||
# → Loaded 50,000 rows as "sales_data"
|
||||
|
||||
/profile sales_data
|
||||
# → Statistical summary, null counts, quality assessment
|
||||
|
||||
/schema orders
|
||||
# → Column names, types, constraints
|
||||
|
||||
/lineage fct_orders
|
||||
# → Dependency graph showing upstream/downstream models
|
||||
|
||||
/run dim_customers
|
||||
# → Pre-validates then executes dbt model
|
||||
```
|
||||
|
||||
## Tools Summary
|
||||
|
||||
### pandas (14 tools)
|
||||
`read_csv`, `read_parquet`, `read_json`, `to_csv`, `to_parquet`, `describe`, `head`, `tail`, `filter`, `select`, `groupby`, `join`, `list_data`, `drop_data`
|
||||
|
||||
### PostgreSQL (6 tools)
|
||||
`pg_connect`, `pg_query`, `pg_execute`, `pg_tables`, `pg_columns`, `pg_schemas`
|
||||
|
||||
### PostGIS (4 tools)
|
||||
`st_tables`, `st_geometry_type`, `st_srid`, `st_extent`
|
||||
|
||||
### dbt (8 tools)
|
||||
`dbt_parse`, `dbt_run`, `dbt_test`, `dbt_build`, `dbt_compile`, `dbt_ls`, `dbt_docs_generate`, `dbt_lineage`
|
||||
|
||||
## Memory Management
|
||||
|
||||
- Default limit: 100,000 rows per DataFrame
|
||||
- Configure via `DATA_PLATFORM_MAX_ROWS` environment variable
|
||||
- Use `chunk_size` parameter for large files
|
||||
- Monitor with `list_data` tool
|
||||
|
||||
## SessionStart Hook
|
||||
|
||||
On session start, the plugin checks PostgreSQL connectivity and displays a warning if unavailable. This is non-blocking - pandas and dbt tools remain available.
|
||||
98
plugins/data-platform/agents/data-analysis.md
Normal file
98
plugins/data-platform/agents/data-analysis.md
Normal file
@@ -0,0 +1,98 @@
|
||||
# Data Analysis Agent
|
||||
|
||||
You are a data analysis specialist. Your role is to help users explore, profile, and understand their data.
|
||||
|
||||
## Capabilities
|
||||
|
||||
- Profile datasets with statistical summaries
|
||||
- Explore database schemas and structures
|
||||
- Analyze dbt model lineage and dependencies
|
||||
- Provide data quality assessments
|
||||
- Generate insights and recommendations
|
||||
|
||||
## Available Tools
|
||||
|
||||
### Data Exploration
|
||||
- `describe` - Statistical summary
|
||||
- `head` - Preview first rows
|
||||
- `tail` - Preview last rows
|
||||
- `list_data` - List available DataFrames
|
||||
|
||||
### Database Exploration
|
||||
- `pg_connect` - Check database connection
|
||||
- `pg_tables` - List all tables
|
||||
- `pg_columns` - Get column details
|
||||
- `pg_schemas` - List schemas
|
||||
|
||||
### PostGIS Exploration
|
||||
- `st_tables` - List spatial tables
|
||||
- `st_geometry_type` - Get geometry type
|
||||
- `st_srid` - Get coordinate system
|
||||
- `st_extent` - Get bounding box
|
||||
|
||||
### dbt Analysis
|
||||
- `dbt_lineage` - Model dependencies
|
||||
- `dbt_ls` - List resources
|
||||
- `dbt_compile` - View compiled SQL
|
||||
- `dbt_docs_generate` - Generate docs
|
||||
|
||||
## Workflow Guidelines
|
||||
|
||||
1. **Understand the question**:
|
||||
- What does the user want to know?
|
||||
- What data is available?
|
||||
- What level of detail is needed?
|
||||
|
||||
2. **Explore the data**:
|
||||
- Start with `list_data` or `pg_tables`
|
||||
- Get schema info with `describe` or `pg_columns`
|
||||
- Preview with `head` to understand content
|
||||
|
||||
3. **Profile thoroughly**:
|
||||
- Use `describe` for statistics
|
||||
- Check for nulls, outliers, patterns
|
||||
- Note data quality issues
|
||||
|
||||
4. **Analyze dependencies** (for dbt):
|
||||
- Use `dbt_lineage` to trace data flow
|
||||
- Understand transformations
|
||||
- Identify critical paths
|
||||
|
||||
5. **Provide insights**:
|
||||
- Summarize findings clearly
|
||||
- Highlight potential issues
|
||||
- Recommend next steps
|
||||
|
||||
## Analysis Patterns
|
||||
|
||||
### Data Quality Check
|
||||
1. `describe` - Get statistics
|
||||
2. Check null percentages
|
||||
3. Identify outliers (min/max vs mean)
|
||||
4. Flag suspicious patterns
|
||||
|
||||
### Schema Comparison
|
||||
1. `pg_columns` - Get table A schema
|
||||
2. `pg_columns` - Get table B schema
|
||||
3. Compare column names, types
|
||||
4. Identify mismatches
|
||||
|
||||
### Lineage Analysis
|
||||
1. `dbt_lineage` - Get model graph
|
||||
2. Trace upstream sources
|
||||
3. Identify downstream impact
|
||||
4. Document critical path
|
||||
|
||||
## Example Interactions
|
||||
|
||||
**User**: What's in the sales_data DataFrame?
|
||||
**Agent**: Uses `describe`, `head`, explains columns, statistics, patterns
|
||||
|
||||
**User**: What tables are in the database?
|
||||
**Agent**: Uses `pg_tables`, shows list with column counts
|
||||
|
||||
**User**: How does the dim_customers model work?
|
||||
**Agent**: Uses `dbt_lineage`, `dbt_compile`, explains dependencies and SQL
|
||||
|
||||
**User**: Is there any spatial data?
|
||||
**Agent**: Uses `st_tables`, shows PostGIS tables with geometry types
|
||||
81
plugins/data-platform/agents/data-ingestion.md
Normal file
81
plugins/data-platform/agents/data-ingestion.md
Normal file
@@ -0,0 +1,81 @@
|
||||
# Data Ingestion Agent
|
||||
|
||||
You are a data ingestion specialist. Your role is to help users load, transform, and prepare data for analysis.
|
||||
|
||||
## Capabilities
|
||||
|
||||
- Load data from CSV, Parquet, JSON files
|
||||
- Query PostgreSQL databases
|
||||
- Transform data using filter, select, groupby, join operations
|
||||
- Export data to various formats
|
||||
- Handle large datasets with chunking
|
||||
|
||||
## Available Tools
|
||||
|
||||
### File Operations
|
||||
- `read_csv` - Load CSV files with optional chunking
|
||||
- `read_parquet` - Load Parquet files
|
||||
- `read_json` - Load JSON/JSONL files
|
||||
- `to_csv` - Export to CSV
|
||||
- `to_parquet` - Export to Parquet
|
||||
|
||||
### Data Transformation
|
||||
- `filter` - Filter rows by condition
|
||||
- `select` - Select specific columns
|
||||
- `groupby` - Group and aggregate
|
||||
- `join` - Join two DataFrames
|
||||
|
||||
### Database Operations
|
||||
- `pg_query` - Execute SELECT queries
|
||||
- `pg_execute` - Execute INSERT/UPDATE/DELETE
|
||||
- `pg_tables` - List available tables
|
||||
|
||||
### Management
|
||||
- `list_data` - List all stored DataFrames
|
||||
- `drop_data` - Remove DataFrame from store
|
||||
|
||||
## Workflow Guidelines
|
||||
|
||||
1. **Understand the data source**:
|
||||
- Ask about file location/format
|
||||
- For database, understand table structure
|
||||
- Clarify any filters or transformations needed
|
||||
|
||||
2. **Load data efficiently**:
|
||||
- Use appropriate reader for file format
|
||||
- For large files (>100k rows), use chunking
|
||||
- Name DataFrames meaningfully
|
||||
|
||||
3. **Transform as needed**:
|
||||
- Apply filters early to reduce data size
|
||||
- Select only needed columns
|
||||
- Join related datasets
|
||||
|
||||
4. **Validate results**:
|
||||
- Check row counts after transformations
|
||||
- Verify data types are correct
|
||||
- Preview results with `head`
|
||||
|
||||
5. **Store with meaningful names**:
|
||||
- Use descriptive data_ref names
|
||||
- Document the source and transformations
|
||||
|
||||
## Memory Management
|
||||
|
||||
- Default row limit: 100,000 rows
|
||||
- For larger datasets, suggest:
|
||||
- Filtering before loading
|
||||
- Using chunk_size parameter
|
||||
- Aggregating to reduce size
|
||||
- Storing to Parquet for efficient retrieval
|
||||
|
||||
## Example Interactions
|
||||
|
||||
**User**: Load the sales data from data/sales.csv
|
||||
**Agent**: Uses `read_csv` to load, reports data_ref, row count, columns
|
||||
|
||||
**User**: Filter to only Q4 2024 sales
|
||||
**Agent**: Uses `filter` with date condition, stores filtered result
|
||||
|
||||
**User**: Join with customer data
|
||||
**Agent**: Uses `join` to combine, validates result counts
|
||||
90
plugins/data-platform/claude-md-integration.md
Normal file
90
plugins/data-platform/claude-md-integration.md
Normal file
@@ -0,0 +1,90 @@
|
||||
# data-platform Plugin - CLAUDE.md Integration
|
||||
|
||||
Add this section to your project's CLAUDE.md to enable data-platform plugin features.
|
||||
|
||||
## Suggested CLAUDE.md Section
|
||||
|
||||
```markdown
|
||||
## Data Platform Integration
|
||||
|
||||
This project uses the data-platform plugin for data engineering workflows.
|
||||
|
||||
### Configuration
|
||||
|
||||
**PostgreSQL**: Credentials in `~/.config/claude/postgres.env`
|
||||
**dbt**: Project path auto-detected from `dbt_project.yml`
|
||||
|
||||
### Available Commands
|
||||
|
||||
| Command | Purpose |
|
||||
|---------|---------|
|
||||
| `/ingest` | Load data from files or database |
|
||||
| `/profile` | Generate statistical profile |
|
||||
| `/schema` | Show schema information |
|
||||
| `/explain` | Explain dbt model |
|
||||
| `/lineage` | Show data lineage |
|
||||
| `/run` | Execute dbt models |
|
||||
|
||||
### data_ref Convention
|
||||
|
||||
DataFrames are stored with references. Use meaningful names:
|
||||
- `raw_*` for source data
|
||||
- `stg_*` for staged/cleaned data
|
||||
- `dim_*` for dimension tables
|
||||
- `fct_*` for fact tables
|
||||
- `rpt_*` for reports
|
||||
|
||||
### dbt Workflow
|
||||
|
||||
1. Always validate before running: `/run` includes automatic `dbt_parse`
|
||||
2. For dbt 1.9+, check for deprecated syntax before commits
|
||||
3. Use `/lineage` to understand impact of changes
|
||||
|
||||
### Database Access
|
||||
|
||||
PostgreSQL tools require POSTGRES_URL configuration:
|
||||
- Read-only queries: `pg_query`
|
||||
- Write operations: `pg_execute`
|
||||
- Schema exploration: `pg_tables`, `pg_columns`
|
||||
|
||||
PostGIS spatial data:
|
||||
- List spatial tables: `st_tables`
|
||||
- Check geometry: `st_geometry_type`, `st_srid`, `st_extent`
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
|
||||
Add to project `.env` if needed:
|
||||
|
||||
```env
|
||||
# dbt configuration
|
||||
DBT_PROJECT_DIR=./transform
|
||||
DBT_PROFILES_DIR=~/.dbt
|
||||
|
||||
# Memory limits
|
||||
DATA_PLATFORM_MAX_ROWS=100000
|
||||
```
|
||||
|
||||
## Typical Workflows
|
||||
|
||||
### Data Exploration
|
||||
```
|
||||
/ingest data/raw_customers.csv
|
||||
/profile raw_customers
|
||||
/schema
|
||||
```
|
||||
|
||||
### ETL Development
|
||||
```
|
||||
/schema orders # Understand source
|
||||
/explain stg_orders # Understand transformation
|
||||
/run stg_orders # Test the model
|
||||
/lineage fct_orders # Check downstream impact
|
||||
```
|
||||
|
||||
### Database Analysis
|
||||
```
|
||||
/schema # List all tables
|
||||
pg_columns orders # Detailed schema
|
||||
st_tables # Find spatial data
|
||||
```
|
||||
44
plugins/data-platform/commands/explain.md
Normal file
44
plugins/data-platform/commands/explain.md
Normal file
@@ -0,0 +1,44 @@
|
||||
# /explain - dbt Model Explanation
|
||||
|
||||
Explain a dbt model's purpose, dependencies, and SQL logic.
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
/explain <model_name>
|
||||
```
|
||||
|
||||
## Workflow
|
||||
|
||||
1. **Get model info**:
|
||||
- Use `dbt_lineage` to get model metadata
|
||||
- Extract description, tags, materialization
|
||||
|
||||
2. **Analyze dependencies**:
|
||||
- Show upstream models (what this depends on)
|
||||
- Show downstream models (what depends on this)
|
||||
- Visualize as dependency tree
|
||||
|
||||
3. **Compile SQL**:
|
||||
- Use `dbt_compile` to get rendered SQL
|
||||
- Explain key transformations
|
||||
|
||||
4. **Report**:
|
||||
- Model purpose (from description)
|
||||
- Materialization strategy
|
||||
- Dependency graph
|
||||
- Key SQL logic explained
|
||||
|
||||
## Examples
|
||||
|
||||
```
|
||||
/explain dim_customers
|
||||
/explain fct_orders
|
||||
```
|
||||
|
||||
## Available Tools
|
||||
|
||||
Use these MCP tools:
|
||||
- `dbt_lineage` - Get model dependencies
|
||||
- `dbt_compile` - Get compiled SQL
|
||||
- `dbt_ls` - List related resources
|
||||
44
plugins/data-platform/commands/ingest.md
Normal file
44
plugins/data-platform/commands/ingest.md
Normal file
@@ -0,0 +1,44 @@
|
||||
# /ingest - Data Ingestion
|
||||
|
||||
Load data from files or database into the data platform.
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
/ingest [source]
|
||||
```
|
||||
|
||||
## Workflow
|
||||
|
||||
1. **Identify data source**:
|
||||
- If source is a file path, determine format (CSV, Parquet, JSON)
|
||||
- If source is "db" or a table name, query PostgreSQL
|
||||
|
||||
2. **Load data**:
|
||||
- For files: Use `read_csv`, `read_parquet`, or `read_json`
|
||||
- For database: Use `pg_query` with appropriate SELECT
|
||||
|
||||
3. **Validate**:
|
||||
- Check row count against limits
|
||||
- If exceeds 100k rows, suggest chunking or filtering
|
||||
|
||||
4. **Report**:
|
||||
- Show data_ref, row count, columns, and memory usage
|
||||
- Preview first few rows
|
||||
|
||||
## Examples
|
||||
|
||||
```
|
||||
/ingest data/sales.csv
|
||||
/ingest data/customers.parquet
|
||||
/ingest "SELECT * FROM orders WHERE created_at > '2024-01-01'"
|
||||
```
|
||||
|
||||
## Available Tools
|
||||
|
||||
Use these MCP tools:
|
||||
- `read_csv` - Load CSV files
|
||||
- `read_parquet` - Load Parquet files
|
||||
- `read_json` - Load JSON/JSONL files
|
||||
- `pg_query` - Query PostgreSQL database
|
||||
- `list_data` - List loaded DataFrames
|
||||
231
plugins/data-platform/commands/initial-setup.md
Normal file
231
plugins/data-platform/commands/initial-setup.md
Normal file
@@ -0,0 +1,231 @@
|
||||
---
|
||||
description: Interactive setup wizard for data-platform plugin - configures MCP server and optional PostgreSQL/dbt
|
||||
---
|
||||
|
||||
# Data Platform Setup Wizard
|
||||
|
||||
This command sets up the data-platform plugin with pandas, PostgreSQL, and dbt integration.
|
||||
|
||||
## Important Context
|
||||
|
||||
- **This command uses Bash, Read, Write, and AskUserQuestion tools** - NOT MCP tools
|
||||
- **MCP tools won't work until after setup + session restart**
|
||||
- **PostgreSQL and dbt are optional** - pandas tools work without them
|
||||
|
||||
---
|
||||
|
||||
## Phase 1: Environment Validation
|
||||
|
||||
### Step 1.1: Check Python Version
|
||||
|
||||
```bash
|
||||
python3 --version
|
||||
```
|
||||
|
||||
Requires Python 3.10+. If below, stop setup and inform user.
|
||||
|
||||
### Step 1.2: Check for Required Libraries
|
||||
|
||||
```bash
|
||||
python3 -c "import sys; print(f'Python {sys.version_info.major}.{sys.version_info.minor}')"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: MCP Server Setup
|
||||
|
||||
### Step 2.1: Locate Data Platform MCP Server
|
||||
|
||||
The MCP server should be at the marketplace root:
|
||||
|
||||
```bash
|
||||
# If running from installed marketplace
|
||||
ls -la ~/.claude/plugins/marketplaces/leo-claude-mktplace/mcp-servers/data-platform/ 2>/dev/null || echo "NOT_FOUND_INSTALLED"
|
||||
|
||||
# If running from source
|
||||
ls -la ~/claude-plugins-work/mcp-servers/data-platform/ 2>/dev/null || echo "NOT_FOUND_SOURCE"
|
||||
```
|
||||
|
||||
Determine the correct path based on which exists.
|
||||
|
||||
### Step 2.2: Check Virtual Environment
|
||||
|
||||
```bash
|
||||
ls -la /path/to/mcp-servers/data-platform/.venv/bin/python 2>/dev/null && echo "VENV_EXISTS" || echo "VENV_MISSING"
|
||||
```
|
||||
|
||||
### Step 2.3: Create Virtual Environment (if missing)
|
||||
|
||||
```bash
|
||||
cd /path/to/mcp-servers/data-platform && python3 -m venv .venv && source .venv/bin/activate && pip install --upgrade pip && pip install -r requirements.txt && deactivate
|
||||
```
|
||||
|
||||
**Note:** This may take a few minutes due to pandas, pyarrow, and dbt dependencies.
|
||||
|
||||
---
|
||||
|
||||
## Phase 3: PostgreSQL Configuration (Optional)
|
||||
|
||||
### Step 3.1: Ask About PostgreSQL
|
||||
|
||||
Use AskUserQuestion:
|
||||
- Question: "Do you want to configure PostgreSQL database access?"
|
||||
- Header: "PostgreSQL"
|
||||
- Options:
|
||||
- "Yes, I have a PostgreSQL database"
|
||||
- "No, I'll only use pandas/dbt tools"
|
||||
|
||||
**If user chooses "No":** Skip to Phase 4.
|
||||
|
||||
### Step 3.2: Create Config Directory
|
||||
|
||||
```bash
|
||||
mkdir -p ~/.config/claude
|
||||
```
|
||||
|
||||
### Step 3.3: Check PostgreSQL Configuration
|
||||
|
||||
```bash
|
||||
cat ~/.config/claude/postgres.env 2>/dev/null || echo "FILE_NOT_FOUND"
|
||||
```
|
||||
|
||||
**If file exists with valid URL:** Skip to Step 3.6.
|
||||
**If missing or has placeholders:** Continue.
|
||||
|
||||
### Step 3.4: Gather PostgreSQL Information
|
||||
|
||||
Use AskUserQuestion:
|
||||
- Question: "What is your PostgreSQL connection URL format?"
|
||||
- Header: "DB Format"
|
||||
- Options:
|
||||
- "Standard: postgresql://user:pass@host:5432/db"
|
||||
- "PostGIS: postgresql://user:pass@host:5432/db (with PostGIS extension)"
|
||||
- "Other (I'll provide the full URL)"
|
||||
|
||||
Ask user to provide the connection URL.
|
||||
|
||||
### Step 3.5: Create Configuration File
|
||||
|
||||
```bash
|
||||
cat > ~/.config/claude/postgres.env << 'EOF'
|
||||
# PostgreSQL Configuration
|
||||
# Generated by data-platform /initial-setup
|
||||
|
||||
POSTGRES_URL=<USER_PROVIDED_URL>
|
||||
EOF
|
||||
chmod 600 ~/.config/claude/postgres.env
|
||||
```
|
||||
|
||||
### Step 3.6: Test PostgreSQL Connection (if configured)
|
||||
|
||||
```bash
|
||||
source ~/.config/claude/postgres.env && python3 -c "
|
||||
import asyncio
|
||||
import asyncpg
|
||||
async def test():
|
||||
try:
|
||||
conn = await asyncpg.connect('$POSTGRES_URL', timeout=5)
|
||||
ver = await conn.fetchval('SELECT version()')
|
||||
await conn.close()
|
||||
print(f'SUCCESS: {ver.split(\",\")[0]}')
|
||||
except Exception as e:
|
||||
print(f'FAILED: {e}')
|
||||
asyncio.run(test())
|
||||
"
|
||||
```
|
||||
|
||||
Report result:
|
||||
- SUCCESS: Connection works
|
||||
- FAILED: Show error and suggest fixes
|
||||
|
||||
---
|
||||
|
||||
## Phase 4: dbt Configuration (Optional)
|
||||
|
||||
### Step 4.1: Ask About dbt
|
||||
|
||||
Use AskUserQuestion:
|
||||
- Question: "Do you use dbt for data transformations in your projects?"
|
||||
- Header: "dbt"
|
||||
- Options:
|
||||
- "Yes, I have dbt projects"
|
||||
- "No, I don't use dbt"
|
||||
|
||||
**If user chooses "No":** Skip to Phase 5.
|
||||
|
||||
### Step 4.2: dbt Discovery
|
||||
|
||||
dbt configuration is **project-level** (not system-level). The plugin auto-detects dbt projects by looking for `dbt_project.yml`.
|
||||
|
||||
Inform user:
|
||||
```
|
||||
dbt projects are detected automatically when you work in a directory
|
||||
containing dbt_project.yml.
|
||||
|
||||
If your dbt project is in a subdirectory, you can set DBT_PROJECT_DIR
|
||||
in your project's .env file:
|
||||
|
||||
DBT_PROJECT_DIR=./transform
|
||||
DBT_PROFILES_DIR=~/.dbt
|
||||
```
|
||||
|
||||
### Step 4.3: Check dbt Installation
|
||||
|
||||
```bash
|
||||
dbt --version 2>/dev/null || echo "DBT_NOT_FOUND"
|
||||
```
|
||||
|
||||
**If not found:** Inform user that dbt CLI tools require dbt-core to be installed globally or in the project.
|
||||
|
||||
---
|
||||
|
||||
## Phase 5: Validation
|
||||
|
||||
### Step 5.1: Verify MCP Server
|
||||
|
||||
```bash
|
||||
cd /path/to/mcp-servers/data-platform && .venv/bin/python -c "from mcp_server.server import DataPlatformMCPServer; print('MCP Server OK')"
|
||||
```
|
||||
|
||||
### Step 5.2: Summary
|
||||
|
||||
```
|
||||
╔════════════════════════════════════════════════════════════╗
|
||||
║ DATA-PLATFORM SETUP COMPLETE ║
|
||||
╠════════════════════════════════════════════════════════════╣
|
||||
║ MCP Server: ✓ Ready ║
|
||||
║ pandas Tools: ✓ Available (14 tools) ║
|
||||
║ PostgreSQL Tools: [✓/✗] [Status based on config] ║
|
||||
║ PostGIS Tools: [✓/✗] [Status based on PostGIS] ║
|
||||
║ dbt Tools: [✓/✗] [Status based on discovery] ║
|
||||
╚════════════════════════════════════════════════════════════╝
|
||||
```
|
||||
|
||||
### Step 5.3: Session Restart Notice
|
||||
|
||||
---
|
||||
|
||||
**⚠️ Session Restart Required**
|
||||
|
||||
Restart your Claude Code session for MCP tools to become available.
|
||||
|
||||
**After restart, you can:**
|
||||
- Run `/ingest` to load data from files or database
|
||||
- Run `/profile` to analyze DataFrame statistics
|
||||
- Run `/schema` to explore database/DataFrame schema
|
||||
- Run `/run` to execute dbt models (if configured)
|
||||
- Run `/lineage` to view dbt model dependencies
|
||||
|
||||
---
|
||||
|
||||
## Memory Limits
|
||||
|
||||
The data-platform plugin has a default row limit of 100,000 rows per DataFrame. For larger datasets:
|
||||
- Use chunked processing (`chunk_size` parameter)
|
||||
- Filter data before loading
|
||||
- Store to Parquet for efficient re-loading
|
||||
|
||||
You can override the limit by setting in your project `.env`:
|
||||
```
|
||||
DATA_PLATFORM_MAX_ROWS=500000
|
||||
```
|
||||
60
plugins/data-platform/commands/lineage.md
Normal file
60
plugins/data-platform/commands/lineage.md
Normal file
@@ -0,0 +1,60 @@
|
||||
# /lineage - Data Lineage Visualization
|
||||
|
||||
Show data lineage for dbt models or database tables.
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
/lineage <model_name> [--depth N]
|
||||
```
|
||||
|
||||
## Workflow
|
||||
|
||||
1. **Get lineage data**:
|
||||
- Use `dbt_lineage` for dbt models
|
||||
- For database tables, trace through dbt manifest
|
||||
|
||||
2. **Build lineage graph**:
|
||||
- Identify all upstream sources
|
||||
- Identify all downstream consumers
|
||||
- Note materialization at each node
|
||||
|
||||
3. **Visualize**:
|
||||
- ASCII art dependency tree
|
||||
- List format with indentation
|
||||
- Show depth levels
|
||||
|
||||
4. **Report**:
|
||||
- Full dependency chain
|
||||
- Critical path identification
|
||||
- Refresh implications
|
||||
|
||||
## Examples
|
||||
|
||||
```
|
||||
/lineage dim_customers
|
||||
/lineage fct_orders --depth 3
|
||||
```
|
||||
|
||||
## Output Format
|
||||
|
||||
```
|
||||
Sources:
|
||||
└── raw_customers (source)
|
||||
└── raw_orders (source)
|
||||
|
||||
dim_customers (table)
|
||||
├── upstream:
|
||||
│ └── stg_customers (view)
|
||||
│ └── raw_customers (source)
|
||||
└── downstream:
|
||||
└── fct_orders (incremental)
|
||||
└── rpt_customer_lifetime (table)
|
||||
```
|
||||
|
||||
## Available Tools
|
||||
|
||||
Use these MCP tools:
|
||||
- `dbt_lineage` - Get model dependencies
|
||||
- `dbt_ls` - List dbt resources
|
||||
- `dbt_docs_generate` - Generate full manifest
|
||||
44
plugins/data-platform/commands/profile.md
Normal file
44
plugins/data-platform/commands/profile.md
Normal file
@@ -0,0 +1,44 @@
|
||||
# /profile - Data Profiling
|
||||
|
||||
Generate statistical profile and quality report for a DataFrame.
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
/profile <data_ref>
|
||||
```
|
||||
|
||||
## Workflow
|
||||
|
||||
1. **Get data reference**:
|
||||
- If no data_ref provided, use `list_data` to show available options
|
||||
- Validate the data_ref exists
|
||||
|
||||
2. **Generate profile**:
|
||||
- Use `describe` for statistical summary
|
||||
- Analyze null counts, unique values, data types
|
||||
|
||||
3. **Quality assessment**:
|
||||
- Identify columns with high null percentage
|
||||
- Flag potential data quality issues
|
||||
- Suggest cleaning operations if needed
|
||||
|
||||
4. **Report**:
|
||||
- Summary statistics per column
|
||||
- Data type distribution
|
||||
- Memory usage
|
||||
- Quality score
|
||||
|
||||
## Examples
|
||||
|
||||
```
|
||||
/profile sales_data
|
||||
/profile df_a1b2c3d4
|
||||
```
|
||||
|
||||
## Available Tools
|
||||
|
||||
Use these MCP tools:
|
||||
- `describe` - Get statistical summary
|
||||
- `head` - Preview first rows
|
||||
- `list_data` - List available DataFrames
|
||||
55
plugins/data-platform/commands/run.md
Normal file
55
plugins/data-platform/commands/run.md
Normal file
@@ -0,0 +1,55 @@
|
||||
# /run - Execute dbt Models
|
||||
|
||||
Run dbt models with automatic pre-validation.
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
/run [model_selection] [--full-refresh]
|
||||
```
|
||||
|
||||
## Workflow
|
||||
|
||||
1. **Pre-validation** (MANDATORY):
|
||||
- Use `dbt_parse` to validate project
|
||||
- Check for deprecated syntax (dbt 1.9+)
|
||||
- If validation fails, show errors and STOP
|
||||
|
||||
2. **Execute models**:
|
||||
- Use `dbt_run` with provided selection
|
||||
- Monitor progress and capture output
|
||||
|
||||
3. **Report results**:
|
||||
- Success/failure status per model
|
||||
- Execution time
|
||||
- Row counts where available
|
||||
- Any warnings or errors
|
||||
|
||||
## Examples
|
||||
|
||||
```
|
||||
/run # Run all models
|
||||
/run dim_customers # Run specific model
|
||||
/run +fct_orders # Run model and its upstream
|
||||
/run tag:daily # Run models with tag
|
||||
/run --full-refresh # Rebuild incremental models
|
||||
```
|
||||
|
||||
## Selection Syntax
|
||||
|
||||
| Pattern | Meaning |
|
||||
|---------|---------|
|
||||
| `model_name` | Run single model |
|
||||
| `+model_name` | Run model and upstream |
|
||||
| `model_name+` | Run model and downstream |
|
||||
| `+model_name+` | Run model with all deps |
|
||||
| `tag:name` | Run by tag |
|
||||
| `path:models/staging` | Run by path |
|
||||
|
||||
## Available Tools
|
||||
|
||||
Use these MCP tools:
|
||||
- `dbt_parse` - Pre-validation (ALWAYS RUN FIRST)
|
||||
- `dbt_run` - Execute models
|
||||
- `dbt_build` - Run + test
|
||||
- `dbt_test` - Run tests only
|
||||
48
plugins/data-platform/commands/schema.md
Normal file
48
plugins/data-platform/commands/schema.md
Normal file
@@ -0,0 +1,48 @@
|
||||
# /schema - Schema Exploration
|
||||
|
||||
Display schema information for database tables or DataFrames.
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
/schema [table_name | data_ref]
|
||||
```
|
||||
|
||||
## Workflow
|
||||
|
||||
1. **Determine target**:
|
||||
- If argument is a loaded data_ref, show DataFrame schema
|
||||
- If argument is a table name, query database schema
|
||||
- If no argument, list all available tables and DataFrames
|
||||
|
||||
2. **For DataFrames**:
|
||||
- Use `describe` to get column info
|
||||
- Show dtypes, null counts, sample values
|
||||
|
||||
3. **For database tables**:
|
||||
- Use `pg_columns` for column details
|
||||
- Use `st_tables` to check for PostGIS columns
|
||||
- Show constraints and indexes if available
|
||||
|
||||
4. **Report**:
|
||||
- Column name, type, nullable, default
|
||||
- For PostGIS: geometry type, SRID
|
||||
- For DataFrames: pandas dtype, null percentage
|
||||
|
||||
## Examples
|
||||
|
||||
```
|
||||
/schema # List all tables and DataFrames
|
||||
/schema customers # Show table schema
|
||||
/schema sales_data # Show DataFrame schema
|
||||
```
|
||||
|
||||
## Available Tools
|
||||
|
||||
Use these MCP tools:
|
||||
- `pg_tables` - List database tables
|
||||
- `pg_columns` - Get column info
|
||||
- `pg_schemas` - List schemas
|
||||
- `st_tables` - List PostGIS tables
|
||||
- `describe` - Get DataFrame info
|
||||
- `list_data` - List DataFrames
|
||||
10
plugins/data-platform/hooks/hooks.json
Normal file
10
plugins/data-platform/hooks/hooks.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"hooks": {
|
||||
"SessionStart": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/hooks/startup-check.sh"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
54
plugins/data-platform/hooks/startup-check.sh
Executable file
54
plugins/data-platform/hooks/startup-check.sh
Executable file
@@ -0,0 +1,54 @@
|
||||
#!/bin/bash
|
||||
# data-platform startup check hook
|
||||
# Checks for common issues at session start
|
||||
# All output MUST have [data-platform] prefix
|
||||
|
||||
PREFIX="[data-platform]"
|
||||
|
||||
# Check if MCP venv exists
|
||||
PLUGIN_ROOT="${CLAUDE_PLUGIN_ROOT:-$(dirname "$(dirname "$(realpath "$0")")")}"
|
||||
VENV_PATH="$PLUGIN_ROOT/mcp-servers/data-platform/.venv/bin/python"
|
||||
|
||||
if [[ ! -f "$VENV_PATH" ]]; then
|
||||
echo "$PREFIX MCP venv missing - run /initial-setup or setup.sh"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check PostgreSQL configuration (optional - just warn if configured but failing)
|
||||
POSTGRES_CONFIG="$HOME/.config/claude/postgres.env"
|
||||
if [[ -f "$POSTGRES_CONFIG" ]]; then
|
||||
source "$POSTGRES_CONFIG"
|
||||
if [[ -n "${POSTGRES_URL:-}" ]]; then
|
||||
# Quick connection test (5 second timeout)
|
||||
RESULT=$("$VENV_PATH" -c "
|
||||
import asyncio
|
||||
import sys
|
||||
async def test():
|
||||
try:
|
||||
import asyncpg
|
||||
conn = await asyncpg.connect('$POSTGRES_URL', timeout=5)
|
||||
await conn.close()
|
||||
return 'OK'
|
||||
except Exception as e:
|
||||
return f'FAIL: {e}'
|
||||
print(asyncio.run(test()))
|
||||
" 2>/dev/null || echo "FAIL: asyncpg not installed")
|
||||
|
||||
if [[ "$RESULT" == "OK" ]]; then
|
||||
# PostgreSQL OK - say nothing
|
||||
:
|
||||
elif [[ "$RESULT" == *"FAIL"* ]]; then
|
||||
echo "$PREFIX PostgreSQL connection failed - check POSTGRES_URL"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check dbt project (if in a project with dbt_project.yml)
|
||||
if [[ -f "dbt_project.yml" ]] || [[ -f "transform/dbt_project.yml" ]]; then
|
||||
if ! command -v dbt &> /dev/null; then
|
||||
echo "$PREFIX dbt CLI not found - dbt tools unavailable"
|
||||
fi
|
||||
fi
|
||||
|
||||
# All checks passed - say nothing
|
||||
exit 0
|
||||
1
plugins/data-platform/mcp-servers/data-platform
Symbolic link
1
plugins/data-platform/mcp-servers/data-platform
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../mcp-servers/data-platform
|
||||
@@ -6,8 +6,9 @@
|
||||
"name": "Leo Miranda",
|
||||
"email": "leobmiranda@gmail.com"
|
||||
},
|
||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace/src/branch/main/plugins/doc-guardian/README.md",
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace.git",
|
||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/doc-guardian/README.md",
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||
"license": "MIT",
|
||||
"keywords": ["documentation", "sync", "drift-detection", "automation", "hooks"]
|
||||
"keywords": ["documentation", "sync", "drift-detection", "automation", "hooks"],
|
||||
"commands": ["./commands/"]
|
||||
}
|
||||
|
||||
@@ -11,10 +11,10 @@ Documentation gets outdated. Functions get renamed, configs change, versions bum
|
||||
|
||||
## Solution
|
||||
|
||||
doc-guardian watches your code changes and automatically:
|
||||
1. Detects when changes affect documentation
|
||||
2. Queues updates silently (doesn't interrupt your flow)
|
||||
3. Syncs all doc changes in a single commit when ready
|
||||
doc-guardian monitors your code changes via hooks:
|
||||
1. Detects when changes might affect documentation
|
||||
2. Alerts you to potential drift (doesn't interrupt your flow)
|
||||
3. Provides commands to audit and sync docs when ready
|
||||
|
||||
## Commands
|
||||
|
||||
@@ -25,8 +25,7 @@ doc-guardian watches your code changes and automatically:
|
||||
|
||||
## Hooks
|
||||
|
||||
- **PostToolUse (Write\|Edit)**: Silently checks if code changes affect docs
|
||||
- **Stop**: Reminds you of pending doc updates before session ends
|
||||
- **PostToolUse (Write|Edit)**: Silently checks if code changes affect docs
|
||||
|
||||
## What It Detects
|
||||
|
||||
@@ -37,10 +36,10 @@ doc-guardian watches your code changes and automatically:
|
||||
|
||||
## Installation
|
||||
|
||||
This plugin is part of the claude-code-marketplace.
|
||||
This plugin is part of the Leo Claude Marketplace.
|
||||
|
||||
```bash
|
||||
/plugin marketplace add https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace.git
|
||||
/plugin marketplace add https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git
|
||||
/plugin install doc-guardian
|
||||
```
|
||||
|
||||
|
||||
@@ -5,19 +5,8 @@
|
||||
"matcher": "Write|Edit|MultiEdit",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "prompt",
|
||||
"prompt": "A file was just modified. Silently check if this change affects any documentation:\n\n1. If a code file changed: check if README, CLAUDE.md, docstrings, or API docs reference the modified functions/classes/configs\n2. If drift detected: add to internal queue (do NOT interrupt user flow)\n3. At natural breakpoints or when user runs /doc-sync: report pending doc updates\n\nDo NOT announce this check unless drift is found. Work silently."
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"Stop": [
|
||||
{
|
||||
"matcher": ".*",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "prompt",
|
||||
"prompt": "Before ending, check if there are pending documentation updates queued by doc-guardian. If yes, ask user: 'I detected documentation drift in X files. Run /doc-sync to update, or skip for now?'"
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/hooks/notify.sh"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
23
plugins/doc-guardian/hooks/notify.sh
Executable file
23
plugins/doc-guardian/hooks/notify.sh
Executable file
@@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
# doc-guardian notification hook
|
||||
# Outputs a single notification for config file changes, nothing otherwise
|
||||
# This is a command hook - guaranteed not to block workflow
|
||||
|
||||
# Read tool input from stdin (JSON with file_path)
|
||||
INPUT=$(cat)
|
||||
|
||||
# Extract file_path from JSON input
|
||||
FILE_PATH=$(echo "$INPUT" | grep -o '"file_path"[[:space:]]*:[[:space:]]*"[^"]*"' | head -1 | sed 's/.*"file_path"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/')
|
||||
|
||||
# If no file_path found, exit silently
|
||||
if [ -z "$FILE_PATH" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check if file is in a config directory (commands/, agents/, skills/, hooks/)
|
||||
if echo "$FILE_PATH" | grep -qE '/(commands|agents|skills|hooks)/'; then
|
||||
echo "[doc-guardian] Config file modified. Run /doc-sync when ready."
|
||||
fi
|
||||
|
||||
# Exit silently for all other files (no output = no blocking)
|
||||
exit 0
|
||||
20
plugins/git-flow/.claude-plugin/plugin.json
Normal file
20
plugins/git-flow/.claude-plugin/plugin.json
Normal file
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"name": "git-flow",
|
||||
"version": "1.0.0",
|
||||
"description": "Git workflow automation with intelligent commit messages and branch management",
|
||||
"author": {
|
||||
"name": "Leo Miranda",
|
||||
"email": "leobmiranda@gmail.com"
|
||||
},
|
||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/git-flow/README.md",
|
||||
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"git",
|
||||
"workflow",
|
||||
"commit",
|
||||
"branch",
|
||||
"automation"
|
||||
],
|
||||
"commands": ["./commands/"]
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user