Compare commits
410 Commits
v2.3.0
...
c0d62f4957
| Author | SHA1 | Date | |
|---|---|---|---|
| c0d62f4957 | |||
| 5b1dde694c | |||
| eafcfe5bd1 | |||
| dc113d8b09 | |||
| aca5c6e5b1 | |||
| 120f00ece6 | |||
| 3012a7af68 | |||
| d12d9b4962 | |||
| b302a4237d | |||
| a034c12eb6 | |||
| 636bd0af59 | |||
| bf5029d6dc | |||
| e939af0689 | |||
| eb6ce62a76 | |||
| f7bcd48fc0 | |||
| 72b3436a24 | |||
| bea46d7689 | |||
| f2fddafca3 | |||
| fdcb5d9874 | |||
| 45510b44c0 | |||
| 79468f5d9e | |||
| 9148e21bc5 | |||
| 2925ec4229 | |||
| 4340873f4e | |||
| 98fd4e45e2 | |||
| 793565aaaa | |||
| 27f3603f52 | |||
| 2872092554 | |||
| 4c857dde47 | |||
| efe3808bd2 | |||
| 61c315a605 | |||
| 56d3307cd3 | |||
| ea9d501a5d | |||
| cd27b92b28 | |||
| 61654057b8 | |||
| 2c998dff1d | |||
| 192f808f48 | |||
| 65574a03fb | |||
| 571c713697 | |||
| 5429f193b3 | |||
| a866e0d43d | |||
| 2f5161675c | |||
| f95b7eb650 | |||
| 8095f16cd6 | |||
| 7d2705e3bf | |||
| 0d04c8703a | |||
| 407dd1b93b | |||
| 4614726350 | |||
| 69358a78ba | |||
| 557bf6115b | |||
| 6eeb4a4e9a | |||
| fde61f5f73 | |||
| addfc9c1d5 | |||
| 733b8679c9 | |||
| 5becd3d41c | |||
| 305c534402 | |||
| 7b7e7dce16 | |||
| b9eaae5317 | |||
| 16acc0609e | |||
| f0f4369eac | |||
| 89e841d448 | |||
| b6b09c1754 | |||
| 94d8f03cf9 | |||
| f23e047842 | |||
| dc59cb3713 | |||
| a78bde2e42 | |||
| f082b78c0b | |||
| f684b47161 | |||
| 7c8a20c804 | |||
| aad02ef2d9 | |||
| db8ffa23ec | |||
| 5bf1271347 | |||
| 747a2b15e5 | |||
| 5152cda161 | |||
| 80b9e919c7 | |||
| 3437ece76e | |||
| 2e65b60725 | |||
| 8fe685037e | |||
| c9276d983b | |||
| 63327ecf65 | |||
| 3839575272 | |||
| 11d77ebe84 | |||
| e190eb8b28 | |||
| c81c4a9981 | |||
| 45af713366 | |||
| 9550a85f4d | |||
| e1d7ec46ae | |||
| c8b91f6a87 | |||
| ce106ace8a | |||
| afd4c44d11 | |||
| 7c3a2ac31c | |||
| e0ab4c2ddf | |||
| 5f82f8ebbd | |||
| b492a13702 | |||
| 5aaab4cb9a | |||
| 3c3b3b4575 | |||
| 6e9b703151 | |||
| b603743811 | |||
| a63ccc079d | |||
| d4481ec09f | |||
| 50951378f7 | |||
| b3975c2f4f | |||
| 8a95e061ad | |||
| 4983cc9feb | |||
| cf4d1b595c | |||
| 5aff53972e | |||
| d429319392 | |||
| 5b1b0f609c | |||
| 0acd42ea65 | |||
| 8c1890c258 | |||
| e44d97edc2 | |||
| dc96207da7 | |||
| c998c0a2dc | |||
| 14633736aa | |||
| af46046bc8 | |||
| f669479122 | |||
| d457e458a8 | |||
| 7c4959fb77 | |||
| ba4db941ab | |||
| 1dad393eaf | |||
| d8971efafe | |||
| e3a8ebd4da | |||
| 193908721c | |||
| 7e9f70d0a7 | |||
| 86413c4801 | |||
| b5d36865ee | |||
| 79ee93ea88 | |||
| 3561025dfc | |||
| d27c440631 | |||
| e56d685a68 | |||
| 5638891d01 | |||
| 611b50b150 | |||
| cde5c67134 | |||
| baad41da98 | |||
| f6d9fcaae2 | |||
| 8d94bb606c | |||
| b175d4d890 | |||
| 6973f657d7 | |||
| a0d1b38c6e | |||
| c5f68256c5 | |||
| 9698e8724d | |||
| f3e1f42413 | |||
| 8a957b1b69 | |||
| 6e90064160 | |||
| 5c4e97a3f6 | |||
| 351be5a40d | |||
| 67944a7e1c | |||
| e37653f956 | |||
| 235e72d3d7 | |||
| ba8e86e31c | |||
| 67f330be6c | |||
| 445b744196 | |||
| ad73c526b7 | |||
| 26310d05f0 | |||
| 459550e7d3 | |||
| a69a4d19d0 | |||
| f2a62627d0 | |||
| 0abf510ec0 | |||
| 008187a0a4 | |||
| 4bd15e5deb | |||
| 8234683bc3 | |||
| 5b3da8da85 | |||
| 894e015c01 | |||
| a66a2bc519 | |||
| b8851a0ae3 | |||
| aee199e6cf | |||
| 223a2d626a | |||
| b7fce0fafd | |||
| 551c60fb45 | |||
| af6a42b2ac | |||
| 7cae21f7c9 | |||
| 8048fba931 | |||
| 1b36ca77ab | |||
| eb85ea31bb | |||
| 8627d9e968 | |||
| 3da9adf44e | |||
| bcb24ae641 | |||
| c8ede3c30b | |||
| fb1c664309 | |||
| 90f19dfc0f | |||
| 75492b0d38 | |||
| 54bb347ee1 | |||
| 51bcc26ea9 | |||
| d813147ca7 | |||
| dbb6d46fa4 | |||
| e7050e2ad8 | |||
| 206f1c378e | |||
| 35380594b4 | |||
| 0055c9ecf2 | |||
| a74a048898 | |||
| 37676d4645 | |||
| 7492cfad66 | |||
| 59db9ea0b0 | |||
| 9234cf1add | |||
| a21199d3db | |||
| 1abda1ca0f | |||
| 0118bc7b9b | |||
| bbb822db16 | |||
| 08e1dcb1f5 | |||
| ec7141a5aa | |||
| 1b029d97b8 | |||
| 4ed3ed7e14 | |||
| c5232bd7bf | |||
| f9e23fd6eb | |||
| 457ed9c9ff | |||
| dadb4d3576 | |||
| ba771f100f | |||
| 2b9cb5defd | |||
| 34227126c2 | |||
| ef94602eba | |||
| 155e7be399 | |||
| 1fb9e6cece | |||
| f2cf082ba8 | |||
| d580464f4a | |||
| fe6b354ee2 | |||
| ec965dc8ee | |||
| cb07a382ea | |||
| 8f450c0e7b | |||
| fdee539371 | |||
| 0e9187c5a9 | |||
| 46af00019c | |||
| 2b041cb771 | |||
| 0fc40d0fda | |||
| 68f50fed55 | |||
| 9d5615409c | |||
| 48ce693bb5 | |||
| 74531e06d0 | |||
| 20458add3f | |||
| 45b899b093 | |||
| 12133f698e | |||
| 797b3064c9 | |||
| 057c61fb16 | |||
| c91f21f3d1 | |||
| 67fae6a93d | |||
| 9e4b3d5a91 | |||
| bf0745cc94 | |||
| 72049c2518 | |||
| bc0282b5f8 | |||
| 4c262a7227 | |||
| 530c5f4aa0 | |||
| 61a3b4611f | |||
| 834cf3ac56 | |||
| 28c9552d1d | |||
| db67d3cc76 | |||
| 896cdcfa0f | |||
| 34de0e4e82 | |||
| 1a0f3aa779 | |||
| 30b379b68c | |||
| 842ce3f6bc | |||
| a933edeef1 | |||
| 74ff305b67 | |||
| f22d49ed07 | |||
| 3f79288b54 | |||
| 1df9573f7a | |||
| 35d5f14003 | |||
| 5321b2929e | |||
| 05aa50d409 | |||
| a910e9327d | |||
| 9d1fedd3a5 | |||
| 320ea6b72b | |||
| 54e8e694b1 | |||
| 69bbffd9cc | |||
| 6b666bff46 | |||
| 8ec7cbb1e9 | |||
| a77b8ee123 | |||
| 498dac5230 | |||
| 3519a96d06 | |||
| af0b92461a | |||
| 89f0354ccc | |||
| f809c672b5 | |||
| 6a267d074b | |||
| bcde33c7d0 | |||
| ee3268fbe0 | |||
| f6a38ffaa8 | |||
| b13ffce0a0 | |||
| 5d205c9c13 | |||
| b39e01efd7 | |||
| 98eea5b6f9 | |||
| fe36ed91f2 | |||
| 8c85f9ca5f | |||
| 98df35a33e | |||
| 70d6963d0d | |||
| efb83e0f28 | |||
| 54c6694117 | |||
| 2402f88daf | |||
| 7bedfa2c65 | |||
| 1cf1dbefb8 | |||
| dafa8db8bb | |||
| 42ab4f13cf | |||
| 65e79efb24 | |||
| 5ffc13b635 | |||
| 77dc122079 | |||
| 50bfd20fd4 | |||
| c14f1f46cd | |||
| ce774bcc6f | |||
| 52c8371f4a | |||
| f8d6d42150 | |||
| b3abe863af | |||
| 469487f6ed | |||
| 7a2966367d | |||
| 0466b299a7 | |||
| b34304ed57 | |||
| 96963531fc | |||
| 4c9a7c55ae | |||
| 8a75203251 | |||
| da6e81260e | |||
| e1f1335655 | |||
| b017db83a1 | |||
| bc136fab7e | |||
| 6c24bcbb91 | |||
| 11a05799d3 | |||
| 403271dc0c | |||
| cc4abf67b9 | |||
| 35cf20e02d | |||
| 5209f82efb | |||
| 1f55387e9e | |||
| 32bbca73ba | |||
| 0e6999ea21 | |||
| 0d120bd041 | |||
| 508832dae1 | |||
| 6cf3c1830c | |||
| 0b23a02886 | |||
| 71987ee537 | |||
| b7829dca05 | |||
| 9b0e9a69b1 | |||
| ad0e14d07f | |||
| 7fd5fffedf | |||
| 620173eef6 | |||
| 0fe4f62a30 | |||
| 533810f018 | |||
| 2ee23a39d8 | |||
| 894c85bd54 | |||
| 01809a7367 | |||
| a20f1bfdf8 | |||
| 7879e07815 | |||
| eced0fbd07 | |||
| aa6d7f5866 | |||
| 3e5197779d | |||
| 9206931a3c | |||
| ff3be54f1c | |||
| 1b0f5f4973 | |||
| 8ed0d8f207 | |||
| 007b55916c | |||
| eeef35aa61 | |||
| be2d989899 | |||
| 306143882a | |||
| 0c07820b5a | |||
| d2ad90d5bb | |||
| 642dca7062 | |||
| faafced061 | |||
| c3df0f95e6 | |||
| f714957d83 | |||
| 40af243229 | |||
| 69b71fc7cf | |||
| 5ad207520a | |||
| 78d77c1e0a | |||
| 5cf43d5de2 | |||
| 51ef10633b | |||
| 83094598c5 | |||
| 5da29c8e35 | |||
| 4f3560d121 | |||
| d5e521a759 | |||
| b2c51251f3 | |||
| 71efa1aafa | |||
| aa3ff016e2 | |||
| 4557d2ce40 | |||
| d282a65fc6 | |||
| ad56700059 | |||
| df2f5ebb47 | |||
| feb86b059f | |||
| c23e84f965 | |||
| 195ca5c10c | |||
| 53f1b9662f | |||
| eeffb9e853 | |||
| 6c142a9710 | |||
| f781c6f7b1 | |||
| 8228c20d47 | |||
| 85953d8e1e | |||
| f8b6131bfc | |||
| cd3d4c69f0 | |||
| 7f6e0893dd | |||
| 39105688a5 | |||
| 2a6b3df8e1 | |||
| 0c2fc8c0d9 | |||
| b5144de0cf | |||
| 29c54279a9 | |||
| 178593f355 | |||
| a70df64cae | |||
| 2a2ac5f85e | |||
| e01ba74e84 | |||
| 565540d0ba | |||
| 394c91f8cf | |||
| 89bfd98d9f | |||
| 5c9dd8d6e0 | |||
| 374912b463 | |||
| debb91aa7e | |||
| 40860c172e | |||
| 50ebe83c0a | |||
| 7295345013 | |||
| a2502c708b | |||
| 4ede59e89a | |||
| 3017e4c097 | |||
| de7675a649 | |||
| aa7bb8f1a4 | |||
| 0a8af05f9c | |||
| 04322732bc | |||
| 09d82b310e | |||
| 50b45f4834 | |||
| 39ad0043c6 | |||
| e5ca804692 |
@@ -1,57 +1,61 @@
|
|||||||
{
|
{
|
||||||
"name": "claude-code-marketplace",
|
"name": "leo-claude-mktplace",
|
||||||
"owner": {
|
"owner": {
|
||||||
"name": "Leo Miranda",
|
"name": "Leo Miranda",
|
||||||
"email": "leobmiranda@gmail.com"
|
"email": "leobmiranda@gmail.com"
|
||||||
},
|
},
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"description": "Project management plugins with Gitea and NetBox integrations",
|
"description": "Project management plugins with Gitea and NetBox integrations",
|
||||||
"version": "2.3.0"
|
"version": "5.8.0"
|
||||||
},
|
},
|
||||||
"plugins": [
|
"plugins": [
|
||||||
{
|
{
|
||||||
"name": "projman",
|
"name": "projman",
|
||||||
"version": "2.3.0",
|
"version": "3.4.0",
|
||||||
"description": "Sprint planning and project management with Gitea integration",
|
"description": "Sprint planning and project management with Gitea integration",
|
||||||
"source": "./plugins/projman",
|
"source": "./plugins/projman",
|
||||||
"author": {
|
"author": {
|
||||||
"name": "Leo Miranda",
|
"name": "Leo Miranda",
|
||||||
"email": "leobmiranda@gmail.com"
|
"email": "leobmiranda@gmail.com"
|
||||||
},
|
},
|
||||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace/src/branch/main/plugins/projman/README.md",
|
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/projman/README.md",
|
||||||
"repository": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace.git",
|
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||||
"mcpServers": ["gitea"],
|
"hooks": ["./hooks/hooks.json"],
|
||||||
"integrationFile": "claude-md-integration.md"
|
"category": "development",
|
||||||
|
"tags": ["sprint", "agile", "gitea", "project-management"],
|
||||||
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "doc-guardian",
|
"name": "doc-guardian",
|
||||||
"version": "1.0.0",
|
"version": "1.1.0",
|
||||||
"description": "Automatic documentation drift detection and synchronization",
|
"description": "Automatic documentation drift detection and synchronization",
|
||||||
"source": "./plugins/doc-guardian",
|
"source": "./plugins/doc-guardian",
|
||||||
"author": {
|
"author": {
|
||||||
"name": "Leo Miranda",
|
"name": "Leo Miranda",
|
||||||
"email": "leobmiranda@gmail.com"
|
"email": "leobmiranda@gmail.com"
|
||||||
},
|
},
|
||||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace/src/branch/main/plugins/doc-guardian/README.md",
|
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/doc-guardian/README.md",
|
||||||
"repository": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace.git",
|
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||||
"mcpServers": [],
|
"hooks": ["./hooks/hooks.json"],
|
||||||
"integrationFile": "claude-md-integration.md",
|
"category": "productivity",
|
||||||
"hooks": ["PostToolUse", "Stop"]
|
"tags": ["documentation", "drift-detection", "sync"],
|
||||||
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "code-sentinel",
|
"name": "code-sentinel",
|
||||||
"version": "1.0.0",
|
"version": "1.0.1",
|
||||||
"description": "Security scanning and code refactoring tools",
|
"description": "Security scanning and code refactoring tools",
|
||||||
"source": "./plugins/code-sentinel",
|
"source": "./plugins/code-sentinel",
|
||||||
"author": {
|
"author": {
|
||||||
"name": "Leo Miranda",
|
"name": "Leo Miranda",
|
||||||
"email": "leobmiranda@gmail.com"
|
"email": "leobmiranda@gmail.com"
|
||||||
},
|
},
|
||||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace/src/branch/main/plugins/code-sentinel/README.md",
|
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/code-sentinel/README.md",
|
||||||
"repository": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace.git",
|
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||||
"mcpServers": [],
|
"hooks": ["./hooks/hooks.json"],
|
||||||
"integrationFile": "claude-md-integration.md",
|
"category": "security",
|
||||||
"hooks": ["PreToolUse"]
|
"tags": ["security-scan", "refactoring", "vulnerabilities"],
|
||||||
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "project-hygiene",
|
"name": "project-hygiene",
|
||||||
@@ -62,50 +66,140 @@
|
|||||||
"name": "Leo Miranda",
|
"name": "Leo Miranda",
|
||||||
"email": "leobmiranda@gmail.com"
|
"email": "leobmiranda@gmail.com"
|
||||||
},
|
},
|
||||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace/src/branch/main/plugins/project-hygiene/README.md",
|
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/project-hygiene/README.md",
|
||||||
"repository": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace.git",
|
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||||
"mcpServers": [],
|
"hooks": ["./hooks/hooks.json"],
|
||||||
"integrationFile": "claude-md-integration.md",
|
"category": "productivity",
|
||||||
"hooks": ["PostToolUse"]
|
"tags": ["cleanup", "automation", "hygiene"],
|
||||||
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "cmdb-assistant",
|
"name": "cmdb-assistant",
|
||||||
"version": "1.0.0",
|
"version": "1.2.0",
|
||||||
"description": "NetBox CMDB integration for infrastructure management",
|
"description": "NetBox CMDB integration with data quality validation and machine registration",
|
||||||
"source": "./plugins/cmdb-assistant",
|
"source": "./plugins/cmdb-assistant",
|
||||||
"author": {
|
"author": {
|
||||||
"name": "Leo Miranda",
|
"name": "Leo Miranda",
|
||||||
"email": "leobmiranda@gmail.com"
|
"email": "leobmiranda@gmail.com"
|
||||||
},
|
},
|
||||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace/src/branch/main/plugins/cmdb-assistant/README.md",
|
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/cmdb-assistant/README.md",
|
||||||
"repository": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace.git",
|
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||||
"mcpServers": ["netbox"],
|
"hooks": ["./hooks/hooks.json"],
|
||||||
"integrationFile": "claude-md-integration.md"
|
"category": "infrastructure",
|
||||||
|
"tags": ["cmdb", "netbox", "dcim", "ipam", "data-quality", "validation"],
|
||||||
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "claude-config-maintainer",
|
"name": "claude-config-maintainer",
|
||||||
"version": "1.0.0",
|
"version": "1.2.0",
|
||||||
"description": "CLAUDE.md optimization and maintenance for Claude Code projects",
|
"description": "CLAUDE.md and settings.local.json optimization for Claude Code projects",
|
||||||
"source": "./plugins/claude-config-maintainer",
|
"source": "./plugins/claude-config-maintainer",
|
||||||
"author": {
|
"author": {
|
||||||
"name": "Leo Miranda",
|
"name": "Leo Miranda",
|
||||||
"email": "leobmiranda@gmail.com"
|
"email": "leobmiranda@gmail.com"
|
||||||
},
|
},
|
||||||
"homepage": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace/src/branch/main/plugins/claude-config-maintainer/README.md",
|
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/claude-config-maintainer/README.md",
|
||||||
"repository": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace.git",
|
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||||
"mcpServers": [],
|
"hooks": ["./hooks/hooks.json"],
|
||||||
"integrationFile": "claude-md-integration.md"
|
"category": "development",
|
||||||
}
|
"tags": ["claude-md", "configuration", "optimization"],
|
||||||
],
|
"license": "MIT"
|
||||||
"pluginDetection": {
|
|
||||||
"mcpServerMapping": {
|
|
||||||
"gitea": "projman",
|
|
||||||
"netbox": "cmdb-assistant"
|
|
||||||
},
|
},
|
||||||
"hookMapping": {
|
{
|
||||||
"PostToolUse:Write|Edit": "project-hygiene",
|
"name": "clarity-assist",
|
||||||
"PostToolUse:Write|Edit|MultiEdit": "doc-guardian",
|
"version": "1.2.0",
|
||||||
"PreToolUse:Write|Edit|MultiEdit": "code-sentinel"
|
"description": "Prompt optimization and requirement clarification with ND-friendly accommodations",
|
||||||
|
"source": "./plugins/clarity-assist",
|
||||||
|
"author": {
|
||||||
|
"name": "Leo Miranda",
|
||||||
|
"email": "leobmiranda@gmail.com"
|
||||||
|
},
|
||||||
|
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/clarity-assist/README.md",
|
||||||
|
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||||
|
"hooks": ["./hooks/hooks.json"],
|
||||||
|
"category": "productivity",
|
||||||
|
"tags": ["prompts", "requirements", "clarification", "nd-friendly"],
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "git-flow",
|
||||||
|
"version": "1.2.0",
|
||||||
|
"description": "Git workflow automation with intelligent commit messages and branch management",
|
||||||
|
"source": "./plugins/git-flow",
|
||||||
|
"author": {
|
||||||
|
"name": "Leo Miranda",
|
||||||
|
"email": "leobmiranda@gmail.com"
|
||||||
|
},
|
||||||
|
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/git-flow/README.md",
|
||||||
|
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||||
|
"hooks": ["./hooks/hooks.json"],
|
||||||
|
"category": "development",
|
||||||
|
"tags": ["git", "workflow", "commits", "branching"],
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "pr-review",
|
||||||
|
"version": "1.1.0",
|
||||||
|
"description": "Multi-agent pull request review with confidence scoring and actionable feedback",
|
||||||
|
"source": "./plugins/pr-review",
|
||||||
|
"author": {
|
||||||
|
"name": "Leo Miranda",
|
||||||
|
"email": "leobmiranda@gmail.com"
|
||||||
|
},
|
||||||
|
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/pr-review/README.md",
|
||||||
|
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||||
|
"hooks": ["./hooks/hooks.json"],
|
||||||
|
"category": "development",
|
||||||
|
"tags": ["code-review", "pull-requests", "security", "quality"],
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "data-platform",
|
||||||
|
"version": "1.3.0",
|
||||||
|
"description": "Data engineering tools with pandas, PostgreSQL/PostGIS, and dbt integration",
|
||||||
|
"source": "./plugins/data-platform",
|
||||||
|
"author": {
|
||||||
|
"name": "Leo Miranda",
|
||||||
|
"email": "leobmiranda@gmail.com"
|
||||||
|
},
|
||||||
|
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/data-platform/README.md",
|
||||||
|
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||||
|
"hooks": ["./hooks/hooks.json"],
|
||||||
|
"category": "data",
|
||||||
|
"tags": ["pandas", "postgresql", "postgis", "dbt", "data-engineering", "etl"],
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "viz-platform",
|
||||||
|
"version": "1.1.0",
|
||||||
|
"description": "Visualization tools with Dash Mantine Components validation, Plotly charts, and theming",
|
||||||
|
"source": "./plugins/viz-platform",
|
||||||
|
"author": {
|
||||||
|
"name": "Leo Miranda",
|
||||||
|
"email": "leobmiranda@gmail.com"
|
||||||
|
},
|
||||||
|
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/viz-platform/README.md",
|
||||||
|
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||||
|
"hooks": ["./hooks/hooks.json"],
|
||||||
|
"category": "visualization",
|
||||||
|
"tags": ["dash", "plotly", "mantine", "charts", "dashboards", "theming", "dmc"],
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "contract-validator",
|
||||||
|
"version": "1.2.0",
|
||||||
|
"description": "Cross-plugin compatibility validation and Claude.md agent verification",
|
||||||
|
"source": "./plugins/contract-validator",
|
||||||
|
"author": {
|
||||||
|
"name": "Leo Miranda",
|
||||||
|
"email": "leobmiranda@gmail.com"
|
||||||
|
},
|
||||||
|
"homepage": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/src/branch/main/plugins/contract-validator/README.md",
|
||||||
|
"repository": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git",
|
||||||
|
"hooks": ["./hooks/hooks.json"],
|
||||||
|
"category": "development",
|
||||||
|
"tags": ["validation", "contracts", "compatibility", "agents", "interfaces", "cross-plugin"],
|
||||||
|
"license": "MIT"
|
||||||
}
|
}
|
||||||
}
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
249
.claude/backups/CLAUDE.md.2026-01-22_132037
Normal file
249
.claude/backups/CLAUDE.md.2026-01-22_132037
Normal file
@@ -0,0 +1,249 @@
|
|||||||
|
# CLAUDE.md
|
||||||
|
|
||||||
|
This file provides guidance to Claude Code when working with code in this repository.
|
||||||
|
|
||||||
|
## Project Overview
|
||||||
|
|
||||||
|
**Repository:** leo-claude-mktplace
|
||||||
|
**Version:** 3.0.1
|
||||||
|
**Status:** Production Ready
|
||||||
|
|
||||||
|
A plugin marketplace for Claude Code containing:
|
||||||
|
|
||||||
|
| Plugin | Description | Version |
|
||||||
|
|--------|-------------|---------|
|
||||||
|
| `projman` | Sprint planning and project management with Gitea integration | 3.0.0 |
|
||||||
|
| `git-flow` | Git workflow automation with smart commits and branch management | 1.0.0 |
|
||||||
|
| `pr-review` | Multi-agent PR review with confidence scoring | 1.0.0 |
|
||||||
|
| `clarity-assist` | Prompt optimization with ND-friendly accommodations | 1.0.0 |
|
||||||
|
| `doc-guardian` | Automatic documentation drift detection and synchronization | 1.0.0 |
|
||||||
|
| `code-sentinel` | Security scanning and code refactoring tools | 1.0.0 |
|
||||||
|
| `claude-config-maintainer` | CLAUDE.md optimization and maintenance | 1.0.0 |
|
||||||
|
| `cmdb-assistant` | NetBox CMDB integration for infrastructure management | 1.0.0 |
|
||||||
|
| `project-hygiene` | Post-task cleanup automation via hooks | 0.1.0 |
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Validate marketplace compliance
|
||||||
|
./scripts/validate-marketplace.sh
|
||||||
|
|
||||||
|
# Setup commands (in a target project with plugin installed)
|
||||||
|
/initial-setup # First time: full setup wizard
|
||||||
|
/project-init # New project: quick config
|
||||||
|
/project-sync # After repo move: sync config
|
||||||
|
|
||||||
|
# Run projman commands
|
||||||
|
/sprint-plan # Start sprint planning
|
||||||
|
/sprint-status # Check progress
|
||||||
|
/review # Pre-close code quality review
|
||||||
|
/test-check # Verify tests before close
|
||||||
|
/sprint-close # Complete sprint
|
||||||
|
```
|
||||||
|
|
||||||
|
## Repository Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
leo-claude-mktplace/
|
||||||
|
├── .claude-plugin/
|
||||||
|
│ └── marketplace.json # Marketplace manifest
|
||||||
|
├── mcp-servers/ # SHARED MCP servers (v3.0.0+)
|
||||||
|
│ ├── gitea/ # Gitea MCP (issues, PRs, wiki)
|
||||||
|
│ └── netbox/ # NetBox MCP (CMDB)
|
||||||
|
├── plugins/
|
||||||
|
│ ├── projman/ # Sprint management
|
||||||
|
│ │ ├── .claude-plugin/plugin.json
|
||||||
|
│ │ ├── .mcp.json
|
||||||
|
│ │ ├── mcp-servers/gitea -> ../../../mcp-servers/gitea # SYMLINK
|
||||||
|
│ │ ├── commands/ # 12 commands (incl. setup)
|
||||||
|
│ │ ├── hooks/ # SessionStart mismatch detection
|
||||||
|
│ │ ├── agents/ # 4 agents
|
||||||
|
│ │ └── skills/label-taxonomy/
|
||||||
|
│ ├── git-flow/ # Git workflow automation
|
||||||
|
│ │ ├── .claude-plugin/plugin.json
|
||||||
|
│ │ ├── commands/ # 8 commands
|
||||||
|
│ │ └── agents/
|
||||||
|
│ ├── pr-review/ # Multi-agent PR review
|
||||||
|
│ │ ├── .claude-plugin/plugin.json
|
||||||
|
│ │ ├── .mcp.json
|
||||||
|
│ │ ├── mcp-servers/gitea -> ../../../mcp-servers/gitea # SYMLINK
|
||||||
|
│ │ ├── commands/ # 6 commands (incl. setup)
|
||||||
|
│ │ ├── hooks/ # SessionStart mismatch detection
|
||||||
|
│ │ └── agents/ # 5 agents
|
||||||
|
│ ├── clarity-assist/ # Prompt optimization (NEW v3.0.0)
|
||||||
|
│ │ ├── .claude-plugin/plugin.json
|
||||||
|
│ │ ├── commands/ # 2 commands
|
||||||
|
│ │ └── agents/
|
||||||
|
│ ├── doc-guardian/ # Documentation drift detection
|
||||||
|
│ ├── code-sentinel/ # Security scanning & refactoring
|
||||||
|
│ ├── claude-config-maintainer/
|
||||||
|
│ ├── cmdb-assistant/
|
||||||
|
│ └── project-hygiene/
|
||||||
|
├── scripts/
|
||||||
|
│ ├── setup.sh, post-update.sh
|
||||||
|
│ └── validate-marketplace.sh # Marketplace compliance validation
|
||||||
|
└── docs/
|
||||||
|
├── CANONICAL-PATHS.md # Single source of truth for paths
|
||||||
|
└── CONFIGURATION.md # Centralized configuration guide
|
||||||
|
```
|
||||||
|
|
||||||
|
## CRITICAL: Rules You MUST Follow
|
||||||
|
|
||||||
|
### File Operations
|
||||||
|
- **NEVER** create files in repository root unless listed in "Allowed Root Files"
|
||||||
|
- **NEVER** modify `.gitignore` without explicit permission
|
||||||
|
- **ALWAYS** use `.scratch/` for temporary/exploratory work
|
||||||
|
- **ALWAYS** verify paths against `docs/CANONICAL-PATHS.md` before creating files
|
||||||
|
|
||||||
|
### Plugin Development
|
||||||
|
- **plugin.json MUST be in `.claude-plugin/` directory** (not plugin root)
|
||||||
|
- **Every plugin MUST be listed in marketplace.json**
|
||||||
|
- **MCP servers are SHARED at root** with symlinks from plugins
|
||||||
|
- **MCP server venv path**: `${CLAUDE_PLUGIN_ROOT}/mcp-servers/{name}/.venv/bin/python`
|
||||||
|
- **CLI tools forbidden** - Use MCP tools exclusively (never `tea`, `gh`, etc.)
|
||||||
|
|
||||||
|
### Hooks (Valid Events Only)
|
||||||
|
`PreToolUse`, `PostToolUse`, `UserPromptSubmit`, `SessionStart`, `SessionEnd`, `Notification`, `Stop`, `SubagentStop`, `PreCompact`
|
||||||
|
|
||||||
|
**INVALID:** `task-completed`, `file-changed`, `git-commit-msg-needed`
|
||||||
|
|
||||||
|
### Allowed Root Files
|
||||||
|
`CLAUDE.md`, `README.md`, `LICENSE`, `CHANGELOG.md`, `.gitignore`, `.env.example`
|
||||||
|
|
||||||
|
### Allowed Root Directories
|
||||||
|
`.claude/`, `.claude-plugin/`, `.claude-plugins/`, `.scratch/`, `docs/`, `hooks/`, `mcp-servers/`, `plugins/`, `scripts/`
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
### Four-Agent Model (projman)
|
||||||
|
|
||||||
|
| Agent | Personality | Responsibilities |
|
||||||
|
|-------|-------------|------------------|
|
||||||
|
| **Planner** | Thoughtful, methodical | Sprint planning, architecture analysis, issue creation, lesson search |
|
||||||
|
| **Orchestrator** | Concise, action-oriented | Sprint execution, parallel batching, Git operations, lesson capture |
|
||||||
|
| **Executor** | Implementation-focused | Code implementation, branch management, MR creation |
|
||||||
|
| **Code Reviewer** | Thorough, practical | Pre-close quality review, security scan, test verification |
|
||||||
|
|
||||||
|
### MCP Server Tools (Gitea)
|
||||||
|
|
||||||
|
| Category | Tools |
|
||||||
|
|----------|-------|
|
||||||
|
| Issues | `list_issues`, `get_issue`, `create_issue`, `update_issue`, `add_comment` |
|
||||||
|
| Labels | `get_labels`, `suggest_labels`, `create_label` |
|
||||||
|
| Milestones | `list_milestones`, `get_milestone`, `create_milestone`, `update_milestone` |
|
||||||
|
| Dependencies | `list_issue_dependencies`, `create_issue_dependency`, `get_execution_order` |
|
||||||
|
| Wiki | `list_wiki_pages`, `get_wiki_page`, `create_wiki_page`, `create_lesson`, `search_lessons` |
|
||||||
|
| **Pull Requests** | `list_pull_requests`, `get_pull_request`, `get_pr_diff`, `get_pr_comments`, `create_pr_review`, `add_pr_comment` *(NEW v3.0.0)* |
|
||||||
|
| Validation | `validate_repo_org`, `get_branch_protection` |
|
||||||
|
|
||||||
|
### Hybrid Configuration
|
||||||
|
|
||||||
|
| Level | Location | Purpose |
|
||||||
|
|-------|----------|---------|
|
||||||
|
| System | `~/.config/claude/gitea.env` | Credentials (GITEA_API_URL, GITEA_API_TOKEN) |
|
||||||
|
| Project | `.env` in project root | Repository specification (GITEA_ORG, GITEA_REPO) |
|
||||||
|
|
||||||
|
**Note:** `GITEA_ORG` is at project level since different projects may belong to different organizations.
|
||||||
|
|
||||||
|
### Branch-Aware Security
|
||||||
|
|
||||||
|
| Branch Pattern | Mode | Capabilities |
|
||||||
|
|----------------|------|--------------|
|
||||||
|
| `development`, `feat/*` | Development | Full access |
|
||||||
|
| `staging` | Staging | Read-only code, can create issues |
|
||||||
|
| `main`, `master` | Production | Read-only, emergency only |
|
||||||
|
|
||||||
|
## Label Taxonomy
|
||||||
|
|
||||||
|
43 labels total: 27 organization + 16 repository
|
||||||
|
|
||||||
|
**Organization:** Agent/2, Complexity/3, Efforts/5, Priority/4, Risk/3, Source/4, Type/6
|
||||||
|
**Repository:** Component/9, Tech/7
|
||||||
|
|
||||||
|
Sync with `/labels-sync` command.
|
||||||
|
|
||||||
|
## Lessons Learned System
|
||||||
|
|
||||||
|
Stored in Gitea Wiki under `lessons-learned/sprints/`.
|
||||||
|
|
||||||
|
**Workflow:**
|
||||||
|
1. Orchestrator captures at sprint close via MCP tools
|
||||||
|
2. Planner searches at sprint start using `search_lessons`
|
||||||
|
3. Tags enable cross-project discovery
|
||||||
|
|
||||||
|
## Common Operations
|
||||||
|
|
||||||
|
### Adding a New Plugin
|
||||||
|
|
||||||
|
1. Create `plugins/{name}/.claude-plugin/plugin.json`
|
||||||
|
2. Add entry to `.claude-plugin/marketplace.json` with category, tags, license
|
||||||
|
3. Create `README.md` and `claude-md-integration.md`
|
||||||
|
4. If using MCP server, create symlink: `ln -s ../../../mcp-servers/{server} plugins/{name}/mcp-servers/{server}`
|
||||||
|
5. Run `./scripts/validate-marketplace.sh`
|
||||||
|
6. Update `CHANGELOG.md`
|
||||||
|
|
||||||
|
### Adding a Command to projman
|
||||||
|
|
||||||
|
1. Create `plugins/projman/commands/{name}.md`
|
||||||
|
2. Update `plugins/projman/README.md`
|
||||||
|
3. Update marketplace description if significant
|
||||||
|
|
||||||
|
### Validation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/validate-marketplace.sh # Validates all manifests
|
||||||
|
```
|
||||||
|
|
||||||
|
## Path Verification Protocol
|
||||||
|
|
||||||
|
**Before creating any file:**
|
||||||
|
|
||||||
|
1. Read `docs/CANONICAL-PATHS.md`
|
||||||
|
2. List all paths to be created/modified
|
||||||
|
3. Verify each against canonical paths
|
||||||
|
4. If not in canonical paths, STOP and ask
|
||||||
|
|
||||||
|
## Documentation Index
|
||||||
|
|
||||||
|
| Document | Purpose |
|
||||||
|
|----------|---------|
|
||||||
|
| `docs/CANONICAL-PATHS.md` | **Single source of truth** for paths |
|
||||||
|
| `docs/COMMANDS-CHEATSHEET.md` | All commands quick reference with workflow examples |
|
||||||
|
| `docs/CONFIGURATION.md` | Centralized setup guide |
|
||||||
|
| `docs/UPDATING.md` | Update guide for the marketplace |
|
||||||
|
| `plugins/projman/CONFIGURATION.md` | Quick reference (links to central) |
|
||||||
|
| `plugins/projman/README.md` | Projman full documentation |
|
||||||
|
|
||||||
|
## Versioning and Changelog Rules
|
||||||
|
|
||||||
|
### Version Display
|
||||||
|
**The marketplace version is displayed ONLY in the main `README.md` title.**
|
||||||
|
|
||||||
|
- Format: `# Leo Claude Marketplace - vX.Y.Z`
|
||||||
|
- Do NOT add version numbers to individual plugin documentation titles
|
||||||
|
- Do NOT add version numbers to configuration guides
|
||||||
|
- Do NOT add version numbers to CLAUDE.md or other docs
|
||||||
|
|
||||||
|
### Changelog Maintenance (MANDATORY)
|
||||||
|
**`CHANGELOG.md` is the authoritative source for version history.**
|
||||||
|
|
||||||
|
When releasing a new version:
|
||||||
|
1. Update main `README.md` title with new version
|
||||||
|
2. Update `CHANGELOG.md` with:
|
||||||
|
- Version number and date: `## [X.Y.Z] - YYYY-MM-DD`
|
||||||
|
- **Added**: New features, commands, files
|
||||||
|
- **Changed**: Modifications to existing functionality
|
||||||
|
- **Fixed**: Bug fixes
|
||||||
|
- **Removed**: Deleted features, files, deprecated items
|
||||||
|
3. Update `marketplace.json` metadata version
|
||||||
|
4. Update plugin `plugin.json` versions if plugin-specific changes
|
||||||
|
|
||||||
|
### Version Format
|
||||||
|
- Follow [Semantic Versioning](https://semver.org/): MAJOR.MINOR.PATCH
|
||||||
|
- MAJOR: Breaking changes
|
||||||
|
- MINOR: New features, backward compatible
|
||||||
|
- PATCH: Bug fixes, minor improvements
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Last Updated:** 2026-01-20
|
||||||
27
.doc-guardian-queue
Normal file
27
.doc-guardian-queue
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
# Doc Guardian Queue - cleared after sync on 2026-02-02
|
||||||
|
2026-02-02T11:41:00 | .claude-plugin | /home/lmiranda/claude-plugins-work/.claude-plugin/marketplace.json | CLAUDE.md .claude-plugin/marketplace.json
|
||||||
|
2026-02-02T13:35:48 | skills | /home/lmiranda/claude-plugins-work/plugins/projman/skills/sprint-approval.md | README.md
|
||||||
|
2026-02-02T13:36:03 | commands | /home/lmiranda/claude-plugins-work/plugins/projman/commands/sprint-start.md | docs/COMMANDS-CHEATSHEET.md README.md
|
||||||
|
2026-02-02T13:36:16 | agents | /home/lmiranda/claude-plugins-work/plugins/projman/agents/orchestrator.md | README.md CLAUDE.md
|
||||||
|
2026-02-02T13:39:07 | commands | /home/lmiranda/claude-plugins-work/plugins/projman/commands/rfc.md | docs/COMMANDS-CHEATSHEET.md README.md
|
||||||
|
2026-02-02T13:39:15 | commands | /home/lmiranda/claude-plugins-work/plugins/projman/commands/setup.md | docs/COMMANDS-CHEATSHEET.md README.md
|
||||||
|
2026-02-02T13:39:32 | skills | /home/lmiranda/claude-plugins-work/plugins/projman/skills/rfc-workflow.md | README.md
|
||||||
|
2026-02-02T13:43:14 | skills | /home/lmiranda/claude-plugins-work/plugins/projman/skills/rfc-templates.md | README.md
|
||||||
|
2026-02-02T13:44:55 | skills | /home/lmiranda/claude-plugins-work/plugins/projman/skills/sprint-lifecycle.md | README.md
|
||||||
|
2026-02-02T13:45:04 | skills | /home/lmiranda/claude-plugins-work/plugins/projman/skills/label-taxonomy/labels-reference.md | README.md
|
||||||
|
2026-02-02T13:45:14 | commands | /home/lmiranda/claude-plugins-work/plugins/projman/commands/sprint-plan.md | docs/COMMANDS-CHEATSHEET.md README.md
|
||||||
|
2026-02-02T13:45:48 | commands | /home/lmiranda/claude-plugins-work/plugins/projman/commands/review.md | docs/COMMANDS-CHEATSHEET.md README.md
|
||||||
|
2026-02-02T13:46:07 | commands | /home/lmiranda/claude-plugins-work/plugins/projman/commands/sprint-close.md | docs/COMMANDS-CHEATSHEET.md README.md
|
||||||
|
2026-02-02T13:46:21 | commands | /home/lmiranda/claude-plugins-work/plugins/projman/commands/sprint-status.md | docs/COMMANDS-CHEATSHEET.md README.md
|
||||||
|
2026-02-02T13:46:38 | agents | /home/lmiranda/claude-plugins-work/plugins/projman/agents/planner.md | README.md CLAUDE.md
|
||||||
|
2026-02-02T13:46:57 | agents | /home/lmiranda/claude-plugins-work/plugins/projman/agents/code-reviewer.md | README.md CLAUDE.md
|
||||||
|
2026-02-02T13:49:13 | commands | /home/lmiranda/claude-plugins-work/plugins/viz-platform/commands/design-gate.md | docs/COMMANDS-CHEATSHEET.md README.md
|
||||||
|
2026-02-02T13:49:24 | commands | /home/lmiranda/claude-plugins-work/plugins/data-platform/commands/data-gate.md | docs/COMMANDS-CHEATSHEET.md README.md
|
||||||
|
2026-02-02T13:49:35 | skills | /home/lmiranda/claude-plugins-work/plugins/projman/skills/domain-consultation.md | README.md
|
||||||
|
2026-02-02T13:50:04 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/validation_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
|
||||||
|
2026-02-02T13:50:59 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/server.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
|
||||||
|
2026-02-02T13:51:32 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/tests/test_validation_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
|
||||||
|
2026-02-02T13:51:49 | skills | /home/lmiranda/claude-plugins-work/plugins/contract-validator/skills/validation-rules.md | README.md
|
||||||
|
2026-02-02T13:52:07 | skills | /home/lmiranda/claude-plugins-work/plugins/contract-validator/skills/mcp-tools-reference.md | README.md
|
||||||
|
2026-02-02T13:59:09 | skills | /home/lmiranda/claude-plugins-work/plugins/projman/skills/progress-tracking.md | README.md
|
||||||
|
2026-02-02T14:01:34 | commands | /home/lmiranda/claude-plugins-work/plugins/projman/commands/test.md | docs/COMMANDS-CHEATSHEET.md README.md
|
||||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -31,6 +31,8 @@ venv/
|
|||||||
ENV/
|
ENV/
|
||||||
env/
|
env/
|
||||||
.venv/
|
.venv/
|
||||||
|
.venv
|
||||||
|
**/.venv
|
||||||
|
|
||||||
# PyCharm
|
# PyCharm
|
||||||
.idea/
|
.idea/
|
||||||
|
|||||||
24
.mcp.json
Normal file
24
.mcp.json
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{
|
||||||
|
"mcpServers": {
|
||||||
|
"gitea": {
|
||||||
|
"command": "./mcp-servers/gitea/run.sh",
|
||||||
|
"args": []
|
||||||
|
},
|
||||||
|
"netbox": {
|
||||||
|
"command": "./mcp-servers/netbox/run.sh",
|
||||||
|
"args": []
|
||||||
|
},
|
||||||
|
"viz-platform": {
|
||||||
|
"command": "./mcp-servers/viz-platform/run.sh",
|
||||||
|
"args": []
|
||||||
|
},
|
||||||
|
"data-platform": {
|
||||||
|
"command": "./mcp-servers/data-platform/run.sh",
|
||||||
|
"args": []
|
||||||
|
},
|
||||||
|
"contract-validator": {
|
||||||
|
"command": "./mcp-servers/contract-validator/run.sh",
|
||||||
|
"args": []
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
753
CHANGELOG.md
753
CHANGELOG.md
@@ -1,9 +1,760 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
All notable changes to support-claude-mktplace will be documented in this file.
|
All notable changes to the Leo Claude Marketplace will be documented in this file.
|
||||||
|
|
||||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
|
||||||
|
|
||||||
|
## [Unreleased]
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
#### Plugin Installation Scripts
|
||||||
|
New scripts for installing marketplace plugins into consumer projects:
|
||||||
|
|
||||||
|
- **`scripts/install-plugin.sh`** — Install a plugin to a consumer project
|
||||||
|
- Adds MCP server entry to target's `.mcp.json` (if plugin has MCP server)
|
||||||
|
- Appends integration snippet to target's `CLAUDE.md`
|
||||||
|
- Idempotent: safe to run multiple times
|
||||||
|
- Validates plugin exists and target path is valid
|
||||||
|
|
||||||
|
- **`scripts/uninstall-plugin.sh`** — Remove a plugin from a consumer project
|
||||||
|
- Removes MCP server entry from `.mcp.json`
|
||||||
|
- Removes integration section from `CLAUDE.md`
|
||||||
|
|
||||||
|
- **`scripts/list-installed.sh`** — Show installed plugins in a project
|
||||||
|
- Lists fully installed, partially installed, and available plugins
|
||||||
|
- Shows plugin versions and descriptions
|
||||||
|
|
||||||
|
**Usage:**
|
||||||
|
```bash
|
||||||
|
./scripts/install-plugin.sh data-platform ~/projects/personal-portfolio
|
||||||
|
./scripts/list-installed.sh ~/projects/personal-portfolio
|
||||||
|
./scripts/uninstall-plugin.sh data-platform ~/projects/personal-portfolio
|
||||||
|
```
|
||||||
|
|
||||||
|
**Documentation:** `docs/CONFIGURATION.md` updated with "Installing Plugins to Consumer Projects" section.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
#### Plugin Installation Scripts — MCP Mapping & Section Markers
|
||||||
|
|
||||||
|
**MCP Server Mapping:**
|
||||||
|
- Added `mcp_servers` field to plugin.json for plugins that use shared MCP servers
|
||||||
|
- `projman` and `pr-review` now correctly install `gitea` MCP server
|
||||||
|
- `cmdb-assistant` now correctly installs `netbox` MCP server
|
||||||
|
- Scripts read MCP server names from plugin.json instead of assuming plugin name = server name
|
||||||
|
|
||||||
|
**CLAUDE.md Section Markers:**
|
||||||
|
- Install script now wraps integration content with HTML comment markers:
|
||||||
|
`<!-- BEGIN marketplace-plugin: {name} -->` and `<!-- END marketplace-plugin: {name} -->`
|
||||||
|
- Uninstall script uses markers for precise section removal (no more code block false positives)
|
||||||
|
- Backward compatible: falls back to legacy header detection for pre-marker installations
|
||||||
|
|
||||||
|
**Plugins updated with `mcp_servers` field:**
|
||||||
|
- `projman` → `["gitea"]`
|
||||||
|
- `pr-review` → `["gitea"]`
|
||||||
|
- `cmdb-assistant` → `["netbox"]`
|
||||||
|
- `data-platform` → `["data-platform"]`
|
||||||
|
- `viz-platform` → `["viz-platform"]`
|
||||||
|
- `contract-validator` → `["contract-validator"]`
|
||||||
|
|
||||||
|
#### Agent Model Selection
|
||||||
|
|
||||||
|
Per-agent model selection using Claude Code's now-supported `model` frontmatter field.
|
||||||
|
|
||||||
|
- All 25 marketplace agents assigned appropriate model (`sonnet`, `haiku`, or `inherit`)
|
||||||
|
- Model assignment based on reasoning depth, tool complexity, and latency requirements
|
||||||
|
- Documentation added to `CLAUDE.md` and `docs/CONFIGURATION.md`
|
||||||
|
|
||||||
|
**Supported values:** `sonnet` (default), `opus`, `haiku`, `inherit`
|
||||||
|
|
||||||
|
**Model assignments:**
|
||||||
|
| Model | Agent Types |
|
||||||
|
|-------|-------------|
|
||||||
|
| sonnet | Planner, Orchestrator, Executor, Code Reviewer, Coordinator, Security Reviewers, Data Advisor, Design Reviewer, etc. |
|
||||||
|
| haiku | Maintainability Auditor, Test Validator, Component Check, Theme Setup, Git Assistant, Data Ingestion, Agent Check |
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
#### Agent Frontmatter Standardization
|
||||||
|
|
||||||
|
- Fixed viz-platform and data-platform agents using non-standard `agent:` field (now `name:`)
|
||||||
|
- Removed non-standard `triggers:` field from domain agents (trigger info already in agent body)
|
||||||
|
- Added missing frontmatter to 13 agents across pr-review, viz-platform, contract-validator, clarity-assist, git-flow, doc-guardian, code-sentinel, cmdb-assistant, and data-platform
|
||||||
|
- All 25 agents now have consistent `name`, `description`, and `model` fields
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## [5.8.0] - 2026-02-02
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
#### claude-config-maintainer v1.2.0 - Settings Audit Feature
|
||||||
|
|
||||||
|
New commands for auditing and optimizing `settings.local.json` permission configurations:
|
||||||
|
|
||||||
|
- **`/config-audit-settings`** — Audit `settings.local.json` permissions with 100-point scoring across redundancy, coverage, safety alignment, and profile fit
|
||||||
|
- **`/config-optimize-settings`** — Apply permission optimizations with dry-run, named profiles (`conservative`, `reviewed`, `autonomous`), and consolidation modes
|
||||||
|
- **`/config-permissions-map`** — Generate Mermaid diagram of review layer coverage and permission gaps
|
||||||
|
- **`skills/settings-optimization.md`** — Comprehensive skill for permission pattern analysis, consolidation rules, review-layer-aware recommendations, and named profiles
|
||||||
|
|
||||||
|
**Key Features:**
|
||||||
|
- Settings Efficiency Score (100 points) alongside existing CLAUDE.md score
|
||||||
|
- Review layer verification — agent reads `hooks/hooks.json` from installed plugins before recommending auto-allow patterns
|
||||||
|
- Three named profiles: `conservative` (prompts for most writes), `reviewed` (for projects with ≥2 review layers), `autonomous` (sandboxed environments)
|
||||||
|
- Pattern consolidation detection: duplicates, subsets, merge candidates, stale entries, conflicts
|
||||||
|
|
||||||
|
#### Projman Hardening Sprint
|
||||||
|
Targeted improvements to safety gates, command structure, lifecycle tracking, and cross-plugin contracts.
|
||||||
|
|
||||||
|
**Sprint Lifecycle State Machine:**
|
||||||
|
- New `skills/sprint-lifecycle.md` - defines valid states and transitions via milestone metadata
|
||||||
|
- States: idle -> Sprint/Planning -> Sprint/Executing -> Sprint/Reviewing -> idle
|
||||||
|
- All sprint commands check and set lifecycle state on entry/exit
|
||||||
|
- Out-of-order calls produce warnings with guidance, `--force` override available
|
||||||
|
|
||||||
|
**Sprint Dispatch Log:**
|
||||||
|
- Orchestrator now maintains a structured dispatch log during execution
|
||||||
|
- Records task dispatch, completion, failure, gate checks, and resume events
|
||||||
|
- Enables timeline reconstruction after interrupted sessions
|
||||||
|
|
||||||
|
**Gate Contract Versioning:**
|
||||||
|
- Gate commands (`/design-gate`, `/data-gate`) declare `gate_contract: v1` in frontmatter
|
||||||
|
- `domain-consultation.md` Gate Command Reference includes expected contract version
|
||||||
|
- `validate_workflow_integration` now checks contract version compatibility
|
||||||
|
- Mismatch produces WARNING, missing contract produces INFO suggestion
|
||||||
|
|
||||||
|
**Shared Visual Output Skill:**
|
||||||
|
- New `skills/visual-output.md` - single source of truth for projman visual headers
|
||||||
|
- All 4 agent files reference the skill instead of inline templates
|
||||||
|
- Phase Registry maps agents to emoji and phase names
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
**Sprint Approval Gate Hardened:**
|
||||||
|
- Approval is now a hard block, not a warning (was "recommended", now required)
|
||||||
|
- `--force` flag added to bypass in emergencies (logged to milestone)
|
||||||
|
- Consistent language across sprint-approval.md, sprint-start.md, and orchestrator.md
|
||||||
|
|
||||||
|
**RFC Commands Normalized:**
|
||||||
|
- 5 individual commands (`/rfc-create`, `/rfc-list`, `/rfc-review`, `/rfc-approve`, `/rfc-reject`) consolidated into `/rfc create|list|review|approve|reject`
|
||||||
|
- `/clear-cache` absorbed into `/setup --clear-cache`
|
||||||
|
- Command count reduced from 17 to 12
|
||||||
|
|
||||||
|
**`/test` Command Documentation Expanded:**
|
||||||
|
- Sprint integration section (pre-close verification workflow)
|
||||||
|
- Concrete usage examples for all modes
|
||||||
|
- Edge cases table
|
||||||
|
- DO NOT rules for both modes
|
||||||
|
|
||||||
|
### Removed
|
||||||
|
|
||||||
|
- `plugins/projman/commands/rfc-create.md` (replaced by `/rfc create`)
|
||||||
|
- `plugins/projman/commands/rfc-list.md` (replaced by `/rfc list`)
|
||||||
|
- `plugins/projman/commands/rfc-review.md` (replaced by `/rfc review`)
|
||||||
|
- `plugins/projman/commands/rfc-approve.md` (replaced by `/rfc approve`)
|
||||||
|
- `plugins/projman/commands/rfc-reject.md` (replaced by `/rfc reject`)
|
||||||
|
- `plugins/projman/commands/clear-cache.md` (replaced by `/setup --clear-cache`)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## [5.7.1] - 2026-02-02
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- **contract-validator**: New `validate_workflow_integration` MCP tool — validates domain plugins expose required advisory interfaces (gate command, review command, advisory agent)
|
||||||
|
- **contract-validator**: New `MISSING_INTEGRATION` issue type for workflow integration validation
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- `scripts/setup.sh` banner version updated from v5.1.0 to v5.7.1
|
||||||
|
|
||||||
|
### Reverted
|
||||||
|
- **marketplace.json**: Removed `integrates_with` field — Claude Code schema does not support custom plugin fields (causes marketplace load failure)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## [5.7.0] - 2026-02-02
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- **data-platform**: New `data-advisor` agent for data integrity, schema, and dbt compliance validation
|
||||||
|
- **data-platform**: New `data-integrity-audit.md` skill defining audit rules, severity levels, and scanning strategies
|
||||||
|
- **data-platform**: New `/data-gate` command for binary pass/fail data integrity gates (projman integration)
|
||||||
|
- **data-platform**: New `/data-review` command for comprehensive data integrity audits
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Domain Advisory Pattern now fully operational for both Viz and Data domains
|
||||||
|
- projman orchestrator `Domain/Data` gates now resolve to live `/data-gate` command (previously fell through to "gate unavailable" warning)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## [5.6.0] - 2026-02-01
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- **Domain Advisory Pattern**: Cross-plugin integration enabling projman to consult domain-specific plugins during sprint lifecycle
|
||||||
|
- **projman**: New `domain-consultation.md` skill for domain detection and gate protocols
|
||||||
|
- **viz-platform**: New `design-reviewer` agent for design system compliance auditing
|
||||||
|
- **viz-platform**: New `design-system-audit.md` skill defining audit rules and severity levels
|
||||||
|
- **viz-platform**: New `/design-review` command for detailed design system audits
|
||||||
|
- **viz-platform**: New `/design-gate` command for binary pass/fail validation gates
|
||||||
|
- **Labels**: New `Domain/Viz` and `Domain/Data` labels for domain routing
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- **projman planner**: Now loads domain-consultation skill and performs domain detection during planning
|
||||||
|
- **projman orchestrator**: Now runs domain gates before marking Domain/* labeled issues as complete
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## [5.5.0] - 2026-02-01
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
#### RFC System for Feature Tracking
|
||||||
|
Wiki-based Request for Comments (RFC) system for capturing, reviewing, and tracking feature ideas through their lifecycle.
|
||||||
|
|
||||||
|
**New Commands (projman):**
|
||||||
|
- `/rfc-create` - Create new RFC from conversation or clarified specification
|
||||||
|
- `/rfc-list` - List all RFCs grouped by status (Draft, Review, Approved, Implementing, Implemented, Rejected, Stale)
|
||||||
|
- `/rfc-review` - Submit Draft RFC for maintainer review
|
||||||
|
- `/rfc-approve` - Approve RFC, making it available for sprint planning
|
||||||
|
- `/rfc-reject` - Reject RFC with documented reason
|
||||||
|
|
||||||
|
**RFC Lifecycle:**
|
||||||
|
- Draft → Review → Approved → Implementing → Implemented
|
||||||
|
- Terminal states: Rejected, Superseded
|
||||||
|
- Stale: Drafts with no activity >90 days
|
||||||
|
|
||||||
|
**Sprint Integration:**
|
||||||
|
- `/sprint-plan` now detects approved RFCs and offers selection
|
||||||
|
- `/sprint-close` updates RFC status to Implemented on completion
|
||||||
|
- RFC-Index wiki page auto-maintained with status sections
|
||||||
|
|
||||||
|
**Clarity-Assist Integration:**
|
||||||
|
- Vagueness hook now detects feature request patterns
|
||||||
|
- Suggests `/rfc-create` for feature ideas
|
||||||
|
- `/clarify` offers RFC creation after delivering clarified spec
|
||||||
|
|
||||||
|
**New MCP Tool:**
|
||||||
|
- `allocate_rfc_number` - Allocates next sequential RFC number
|
||||||
|
|
||||||
|
**New Skills:**
|
||||||
|
- `skills/rfc-workflow.md` - RFC lifecycle and state transitions
|
||||||
|
- `skills/rfc-templates.md` - RFC page template specifications
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
#### Sprint 8: Hook Efficiency Quick Wins
|
||||||
|
Performance optimizations for plugin hooks to reduce overhead on every command.
|
||||||
|
|
||||||
|
**Changes:**
|
||||||
|
- **viz-platform:** Remove SessionStart hook that only echoed "loaded" (zero value)
|
||||||
|
- **git-flow:** Add early exit to `branch-check.sh` for non-git commands (skip JSON parsing)
|
||||||
|
- **git-flow:** Add early exit to `commit-msg-check.sh` for non-git commands (skip Python spawn)
|
||||||
|
- **project-hygiene:** Add 60-second cooldown to `cleanup.sh` (reduce find operations)
|
||||||
|
|
||||||
|
**Impact:** Hooks now exit immediately for 90%+ of Bash commands that don't need processing.
|
||||||
|
|
||||||
|
**Issues:** #321, #322, #323, #324
|
||||||
|
**PR:** #334
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## [5.4.1] - 2026-01-30
|
||||||
|
|
||||||
|
### Removed
|
||||||
|
|
||||||
|
#### Multi-Model Agent Support (REVERTED)
|
||||||
|
|
||||||
|
**Reason:** Claude Code does not support `defaultModel` in plugin.json or `model` in agent frontmatter. The schema validation rejects these as "Unrecognized key".
|
||||||
|
|
||||||
|
**Removed:**
|
||||||
|
- `defaultModel` field from all plugin.json files (6 plugins)
|
||||||
|
- `model` field references from agent frontmatter
|
||||||
|
- `docs/MODEL-RECOMMENDATIONS.md` - Deleted entirely
|
||||||
|
- Model configuration sections from `docs/CONFIGURATION.md` and `CLAUDE.md`
|
||||||
|
|
||||||
|
**Lesson:** Do not implement features without verifying they are supported by Claude Code's plugin schema.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## [5.4.0] - 2026-01-28 [REVERTED]
|
||||||
|
|
||||||
|
### Added (NOW REMOVED - See 5.4.1)
|
||||||
|
|
||||||
|
#### Sprint 7: Multi-Model Agent Support
|
||||||
|
~~Configurable model selection for agents with inheritance chain.~~
|
||||||
|
|
||||||
|
**This feature was reverted in 5.4.1 - Claude Code does not support these fields.**
|
||||||
|
|
||||||
|
Original sprint work:
|
||||||
|
- Issues: #302, #303, #304, #305, #306
|
||||||
|
- PRs: #307, #308
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## [5.3.0] - 2026-01-28
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
#### Sprint 6: Visual Branding Overhaul
|
||||||
|
Consistent visual headers and progress tracking across all plugins.
|
||||||
|
|
||||||
|
**Visual Output Headers (109 files):**
|
||||||
|
- **Projman**: Double-line headers (╔═╗) with phase indicators (🎯 PLANNING, ⚡ EXECUTION, 🏁 CLOSING)
|
||||||
|
- **Other Plugins**: Single-line headers (┌─┐) with plugin icons
|
||||||
|
- **All 23 agents** updated with Visual Output Requirements section
|
||||||
|
- **All 86 commands** updated with Visual Output section and header templates
|
||||||
|
|
||||||
|
**Plugin Icon Registry:**
|
||||||
|
| Plugin | Icon |
|
||||||
|
|--------|------|
|
||||||
|
| projman | 📋 |
|
||||||
|
| code-sentinel | 🔒 |
|
||||||
|
| doc-guardian | 📝 |
|
||||||
|
| pr-review | 🔍 |
|
||||||
|
| clarity-assist | 💬 |
|
||||||
|
| git-flow | 🔀 |
|
||||||
|
| cmdb-assistant | 🖥️ |
|
||||||
|
| data-platform | 📊 |
|
||||||
|
| viz-platform | 🎨 |
|
||||||
|
| contract-validator | ✅ |
|
||||||
|
| claude-config-maintainer | ⚙️ |
|
||||||
|
|
||||||
|
**Wiki Branding Specification (4 pages):**
|
||||||
|
- [branding/visual-spec](https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/wiki/branding%2Fvisual-spec) - Central specification
|
||||||
|
- [branding/plugin-registry](https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/wiki/branding%2Fplugin-registry) - Icons and styles
|
||||||
|
- [branding/header-templates](https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/wiki/branding%2Fheader-templates) - Copy-paste templates
|
||||||
|
- [branding/progress-templates](https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/wiki/branding%2Fprogress-templates) - Sprint progress blocks
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- **Docs:** Version sync - CLAUDE.md, marketplace.json, README.md now consistent
|
||||||
|
- **Docs:** Added 18 missing commands from Sprint 4 & 5 to README.md and COMMANDS-CHEATSHEET.md
|
||||||
|
- **MCP:** Registered `/sprint-diagram` as invokable skill
|
||||||
|
|
||||||
|
**Sprint Completed:**
|
||||||
|
- Milestone: Sprint 6 - Visual Branding Overhaul (closed 2026-01-28)
|
||||||
|
- Issues: #272, #273, #274, #275, #276, #277, #278
|
||||||
|
- PRs: #284, #285
|
||||||
|
- Wiki: [Sprint 6 Lessons](https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/wiki/lessons/sprints/sprint-6---visual-branding-and-documentation-maintenance)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## [5.2.0] - 2026-01-28
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
#### Sprint 5: Documentation (V5.2.0 Plugin Enhancements)
|
||||||
|
Documentation and guides for the plugin enhancements initiative.
|
||||||
|
|
||||||
|
**git-flow v1.2.0:**
|
||||||
|
- **Branching Strategy Guide** (`docs/BRANCHING-STRATEGY.md`) - Complete documentation of `development -> staging -> main` promotion flow with Mermaid diagrams
|
||||||
|
|
||||||
|
**clarity-assist v1.2.0:**
|
||||||
|
- **ND Support Guide** (`docs/ND-SUPPORT.md`) - Documentation of neurodivergent accommodations, features, and usage examples
|
||||||
|
|
||||||
|
**Gitea MCP Server:**
|
||||||
|
- **`update_issue` milestone parameter** - Can now assign/change milestones programmatically
|
||||||
|
|
||||||
|
**Sprint Completed:**
|
||||||
|
- Milestone: Sprint 5 - Documentation (closed 2026-01-28)
|
||||||
|
- Issues: #266, #267, #268, #269
|
||||||
|
- Wiki: [Change V5.2.0: Plugin Enhancements (Sprint 5 Documentation)](https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/wiki/Change-V5.2.0%3A-Plugin-Enhancements-%28Sprint-5-Documentation%29)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### Sprint 4: Commands (V5.2.0 Plugin Enhancements)
|
||||||
|
Implementation of 18 new user-facing commands across 8 plugins.
|
||||||
|
|
||||||
|
**projman v3.3.0:**
|
||||||
|
- **`/sprint-diagram`** - Generate Mermaid diagram of sprint issues with dependencies and status
|
||||||
|
|
||||||
|
**pr-review v1.1.0:**
|
||||||
|
- **`/pr-diff`** - Formatted diff with inline review comments and annotations
|
||||||
|
- **Confidence threshold config** - `PR_REVIEW_CONFIDENCE_THRESHOLD` env var (default: 0.7)
|
||||||
|
|
||||||
|
**data-platform v1.2.0:**
|
||||||
|
- **`/data-quality`** - DataFrame quality checks (nulls, duplicates, types, outliers) with pass/warn/fail scoring
|
||||||
|
- **`/lineage-viz`** - dbt lineage visualization as Mermaid diagrams
|
||||||
|
- **`/dbt-test`** - Formatted dbt test runner with summary and failure details
|
||||||
|
|
||||||
|
**viz-platform v1.1.0:**
|
||||||
|
- **`/chart-export`** - Export charts to PNG, SVG, PDF via kaleido
|
||||||
|
- **`/accessibility-check`** - Color blind validation (WCAG contrast ratios)
|
||||||
|
- **`/breakpoints`** - Responsive layout breakpoint configuration
|
||||||
|
- **New MCP tools**: `chart_export`, `accessibility_validate_colors`, `accessibility_validate_theme`, `accessibility_suggest_alternative`, `layout_set_breakpoints`
|
||||||
|
- **New dependency**: kaleido>=0.2.1 for chart rendering
|
||||||
|
|
||||||
|
**contract-validator v1.2.0:**
|
||||||
|
- **`/dependency-graph`** - Mermaid visualization of plugin dependencies with data flow
|
||||||
|
|
||||||
|
**doc-guardian v1.1.0:**
|
||||||
|
- **`/changelog-gen`** - Generate changelog from conventional commits
|
||||||
|
- **`/doc-coverage`** - Documentation coverage metrics by function/class
|
||||||
|
- **`/stale-docs`** - Flag documentation behind code changes
|
||||||
|
|
||||||
|
**claude-config-maintainer v1.1.0:**
|
||||||
|
- **`/config-diff`** - Track CLAUDE.md changes over time with behavioral impact analysis
|
||||||
|
- **`/config-lint`** - 31 lint rules for CLAUDE.md (security, structure, content, format, best practices)
|
||||||
|
|
||||||
|
**cmdb-assistant v1.2.0:**
|
||||||
|
- **`/cmdb-topology`** - Infrastructure topology diagrams (rack, network, site views)
|
||||||
|
- **`/change-audit`** - NetBox audit trail queries with filtering
|
||||||
|
- **`/ip-conflicts`** - Detect IP conflicts and overlapping prefixes
|
||||||
|
|
||||||
|
**Sprint Completed:**
|
||||||
|
- Milestone: Sprint 4 - Commands (closed 2026-01-28)
|
||||||
|
- Issues: #241-#258 (18/18 closed)
|
||||||
|
- Wiki: [Change V5.2.0: Plugin Enhancements (Sprint 4 Commands)](https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/wiki/Change-V5.2.0%3A-Plugin-Enhancements-%28Sprint-4-Commands%29)
|
||||||
|
- Lessons: [Sprint 4 - Plugin Commands Implementation](https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/wiki/lessons/sprints/sprint-4---plugin-commands-implementation)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- **MCP:** Project directory detection - all run.sh scripts now capture `CLAUDE_PROJECT_DIR` from PWD before changing directories
|
||||||
|
- **Docs:** Added Gitea auto-close behavior and MCP session restart notes to DEBUGGING-CHECKLIST.md
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### Sprint 3: Hooks (V5.2.0 Plugin Enhancements)
|
||||||
|
Implementation of 6 foundational hooks across 4 plugins.
|
||||||
|
|
||||||
|
**git-flow v1.1.0:**
|
||||||
|
- **Commit message enforcement hook** - PreToolUse hook validates conventional commit format on all `git commit` commands (not just `/commit`). Blocks invalid commits with format guidance.
|
||||||
|
- **Branch name validation hook** - PreToolUse hook validates branch naming on `git checkout -b` and `git switch -c`. Enforces `type/description` format, lowercase, max 50 chars.
|
||||||
|
|
||||||
|
**clarity-assist v1.1.0:**
|
||||||
|
- **Vagueness detection hook** - UserPromptSubmit hook detects vague prompts and suggests `/clarify` when ambiguity, missing context, or unclear scope detected.
|
||||||
|
|
||||||
|
**data-platform v1.1.0:**
|
||||||
|
- **Schema diff detection hook** - PostToolUse hook monitors edits to schema files (dbt models, SQL migrations). Warns on breaking changes (column removal, type narrowing, constraint addition).
|
||||||
|
|
||||||
|
**contract-validator v1.1.0:**
|
||||||
|
- **SessionStart auto-validate hook** - Smart validation that only runs when plugin files changed since last check. Detects interface compatibility issues at session start.
|
||||||
|
- **Breaking change detection hook** - PostToolUse hook monitors plugin interface files (README.md, plugin.json). Warns when changes would break consumers.
|
||||||
|
|
||||||
|
**Sprint Completed:**
|
||||||
|
- Milestone: Sprint 3 - Hooks (closed 2026-01-28)
|
||||||
|
- Issues: #225, #226, #227, #228, #229, #230
|
||||||
|
- Wiki: [Change V5.2.0: Plugin Enhancements Proposal](https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/wiki/Change-V5.2.0:-Plugin-Enhancements-Proposal)
|
||||||
|
- Lessons: Background agent permissions, agent runaway detection, MCP branch detection bug
|
||||||
|
|
||||||
|
### Known Issues
|
||||||
|
- **MCP Bug #231:** Branch detection in Gitea MCP runs from installed plugin directory, not user's project directory. Workaround: close issues via Gitea web UI.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
#### Gitea MCP Server - create_pull_request Tool
|
||||||
|
- **`create_pull_request`**: Create new pull requests via MCP
|
||||||
|
- Parameters: title, body, head (source branch), base (target branch), labels
|
||||||
|
- Branch-aware security: only allowed on development/feature branches
|
||||||
|
- Completes the PR lifecycle (was previously missing - only had list/get/review/comment)
|
||||||
|
|
||||||
|
#### cmdb-assistant v1.1.0 - Data Quality Validation
|
||||||
|
- **SessionStart Hook**: Tests NetBox API connectivity at session start
|
||||||
|
- Warns if VMs exist without site assignment
|
||||||
|
- Warns if devices exist without platform
|
||||||
|
- Non-blocking: displays warning, doesn't prevent work
|
||||||
|
- **PreToolUse Hook**: Validates input parameters before VM/device operations
|
||||||
|
- Warns about missing site, tenant, platform
|
||||||
|
- Non-blocking: suggests best practices without blocking
|
||||||
|
- **`/cmdb-audit` Command**: Comprehensive data quality analysis
|
||||||
|
- Scopes: all, vms, devices, naming, roles
|
||||||
|
- Identifies Critical/High/Medium/Low issues
|
||||||
|
- Provides prioritized remediation recommendations
|
||||||
|
- **`/cmdb-register` Command**: Register current machine into NetBox
|
||||||
|
- Discovers system info: hostname, platform, hardware, network interfaces
|
||||||
|
- Discovers running apps: Docker containers, systemd services
|
||||||
|
- Creates device with interfaces, IPs, and sets primary IP
|
||||||
|
- Creates cluster and VMs for Docker containers
|
||||||
|
- **`/cmdb-sync` Command**: Sync machine state with NetBox
|
||||||
|
- Compares current state with NetBox record
|
||||||
|
- Shows diff of changes (interfaces, IPs, containers)
|
||||||
|
- Updates with user confirmation
|
||||||
|
- Supports --full and --dry-run flags
|
||||||
|
- **NetBox Best Practices Skill**: Reference documentation
|
||||||
|
- Dependency order for object creation
|
||||||
|
- Naming conventions (`{role}-{site}-{number}`, `{env}-{app}-{number}`)
|
||||||
|
- Role consolidation guidance
|
||||||
|
- Site/tenant/platform assignment requirements
|
||||||
|
- **Agent Enhancement**: Updated cmdb-assistant agent with validation requirements
|
||||||
|
- Proactive suggestions for missing fields
|
||||||
|
- Naming convention checks
|
||||||
|
- Dependency order enforcement
|
||||||
|
- Duplicate prevention
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## [5.0.0] - 2026-01-26
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
#### Sprint 1: viz-platform Plugin ✅ Completed
|
||||||
|
- **viz-platform** v1.0.0 - Visualization tools with Dash Mantine Components validation and theming
|
||||||
|
- **DMC Tools** (3 tools): `list_components`, `get_component_props`, `validate_component`
|
||||||
|
- Version-locked component registry prevents Claude from hallucinating invalid props
|
||||||
|
- Static JSON registry approach for fast, deterministic validation
|
||||||
|
- **Chart Tools** (2 tools): `chart_create`, `chart_configure_interaction`
|
||||||
|
- Plotly-based visualization with theme token support
|
||||||
|
- **Layout Tools** (5 tools): `layout_create`, `layout_add_filter`, `layout_set_grid`, `layout_get`, `layout_add_section`
|
||||||
|
- Dashboard composition with responsive grid system
|
||||||
|
- **Theme Tools** (6 tools): `theme_create`, `theme_extend`, `theme_validate`, `theme_export_css`, `theme_list`, `theme_activate`
|
||||||
|
- Design token-based theming system
|
||||||
|
- Dual storage: user-level (`~/.config/claude/themes/`) and project-level
|
||||||
|
- **Page Tools** (5 tools): `page_create`, `page_add_navbar`, `page_set_auth`, `page_list`, `page_get_app_config`
|
||||||
|
- Multi-page Dash app structure generation
|
||||||
|
- **Commands**: `/chart`, `/dashboard`, `/theme`, `/theme-new`, `/theme-css`, `/component`, `/initial-setup`
|
||||||
|
- **Agents**: `theme-setup`, `layout-builder`, `component-check`
|
||||||
|
- **SessionStart Hook**: DMC version check (non-blocking)
|
||||||
|
- **Tests**: 94 tests passing
|
||||||
|
- config.py: 82% coverage
|
||||||
|
- component_registry.py: 92% coverage
|
||||||
|
- dmc_tools.py: 88% coverage
|
||||||
|
- chart_tools.py: 68% coverage
|
||||||
|
- theme_tools.py: 99% coverage
|
||||||
|
|
||||||
|
**Sprint Completed:**
|
||||||
|
- Milestone: Sprint 1 - viz-platform Plugin (closed 2026-01-26)
|
||||||
|
- Issues: #170-#182 (13/13 closed)
|
||||||
|
- Wiki: [Sprint-1-viz-platform-Implementation-Plan](https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/wiki/Sprint-1-viz-platform-Implementation-Plan)
|
||||||
|
- Lessons: [sprint-1---viz-platform-plugin-implementation](https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/wiki/lessons/sprints/sprint-1---viz-platform-plugin-implementation)
|
||||||
|
- Reference: `docs/changes/CHANGE_V04_0_0_PROPOSAL_ORIGINAL.md` (Phases 4-5)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## [4.1.0] - 2026-01-26
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- **projman:** Wiki-based planning workflow enhancement (V04.1.0)
|
||||||
|
- Flexible input source detection in `/sprint-plan` (file, wiki, or conversation)
|
||||||
|
- Wiki proposal and implementation page creation during sprint planning
|
||||||
|
- Wiki reference linking in created issues
|
||||||
|
- Wiki status updates in `/sprint-close` (Implemented/Partial/Failed)
|
||||||
|
- Metadata section in lessons learned with implementation link for traceability
|
||||||
|
- New `/proposal-status` command for viewing proposal/implementation tree
|
||||||
|
- **projman:** `/suggest-version` command - Analyzes CHANGELOG and recommends semantic version bump
|
||||||
|
- **projman:** SessionStart hook now suggests sprint planning when open issues exist without milestone
|
||||||
|
- **projman:** SessionStart hook now warns about unreleased CHANGELOG entries
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- **doc-guardian:** Hook now tracks documentation dependencies and queues specific files needing updates
|
||||||
|
- Outputs which specific docs need updating (e.g., "commands changed → update needed: docs/COMMANDS-CHEATSHEET.md README.md")
|
||||||
|
- Maintains queue file (`.doc-guardian-queue`) for batch processing
|
||||||
|
- **docs:** COMMANDS-CHEATSHEET.md updated with data-platform plugin (7 commands + hook)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Documentation drift: COMMANDS-CHEATSHEET.md was missing data-platform plugin added in v4.0.0
|
||||||
|
- Proactive sprint planning: projman now suggests `/sprint-plan` at session start when unplanned issues exist
|
||||||
|
|
||||||
|
### Known Issues
|
||||||
|
- **MCP Bug #160:** `update_wiki_page` tool renames pages to "unnamed" when page_name contains URL-encoded characters (`:` → `%3A`). Workaround: use `create_wiki_page` to overwrite instead.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## [4.0.0] - 2026-01-25
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
#### New Plugin: data-platform v1.0.0
|
||||||
|
- **pandas MCP Tools** (14 tools): DataFrame operations with Arrow IPC data_ref persistence
|
||||||
|
- `read_csv`, `read_parquet`, `read_json` - Load data with chunking support
|
||||||
|
- `to_csv`, `to_parquet` - Export to various formats
|
||||||
|
- `describe`, `head`, `tail` - Data exploration
|
||||||
|
- `filter`, `select`, `groupby`, `join` - Data transformation
|
||||||
|
- `list_data`, `drop_data` - Memory management
|
||||||
|
|
||||||
|
- **PostgreSQL MCP Tools** (10 tools): Database operations with asyncpg connection pooling
|
||||||
|
- `pg_connect`, `pg_query`, `pg_execute` - Core database operations
|
||||||
|
- `pg_tables`, `pg_columns`, `pg_schemas` - Schema exploration
|
||||||
|
- `st_tables`, `st_geometry_type`, `st_srid`, `st_extent` - PostGIS spatial support
|
||||||
|
|
||||||
|
- **dbt MCP Tools** (8 tools): Build tool wrapper with pre-execution validation
|
||||||
|
- `dbt_parse` - Pre-flight validation (catches dbt 1.9+ deprecations)
|
||||||
|
- `dbt_run`, `dbt_test`, `dbt_build` - Execution with auto-validation
|
||||||
|
- `dbt_compile`, `dbt_ls`, `dbt_docs_generate`, `dbt_lineage` - Analysis tools
|
||||||
|
|
||||||
|
- **Commands**: `/ingest`, `/profile`, `/schema`, `/explain`, `/lineage`, `/run`
|
||||||
|
- **Agents**: `data-ingestion` (loading/transformation), `data-analysis` (exploration/profiling)
|
||||||
|
- **SessionStart Hook**: Graceful PostgreSQL connection check (non-blocking warning)
|
||||||
|
|
||||||
|
- **Key Features**:
|
||||||
|
- data_ref system for DataFrame persistence across tool calls
|
||||||
|
- 100k row limit with chunking support for large datasets
|
||||||
|
- Hybrid configuration (system: `~/.config/claude/postgres.env`, project: `.env`)
|
||||||
|
- Auto-detection of dbt projects
|
||||||
|
- Arrow IPC format for efficient memory management
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## [3.2.0] - 2026-01-24
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- **git-flow:** `/commit` now detects protected branches before committing
|
||||||
|
- Warns when on protected branch (main, master, development, staging, production)
|
||||||
|
- Offers to create feature branch automatically instead of committing directly
|
||||||
|
- Configurable via `GIT_PROTECTED_BRANCHES` environment variable
|
||||||
|
- **netbox:** Platform and primary_ip parameters added to device update tools
|
||||||
|
- **claude-config-maintainer:** Auto-enforce mandatory behavior rules via SessionStart hook
|
||||||
|
- **scripts:** `release.sh` - Versioning workflow script for consistent releases
|
||||||
|
- **scripts:** `verify-hooks.sh` - Verify all hooks are command type
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- **doc-guardian:** Hook switched from `prompt` type to `command` type
|
||||||
|
- Prompt hooks unreliable - Claude ignores explicit instructions
|
||||||
|
- New `notify.sh` bash script guarantees exact output behavior
|
||||||
|
- Only notifies for config file changes (commands/, agents/, skills/, hooks/)
|
||||||
|
- Silent exit for all other files - no blocking possible
|
||||||
|
- **All hooks:** Converted to command type with stricter plugin prefix enforcement
|
||||||
|
- All hooks now mandate `[plugin-name]` prefix with "NO EXCEPTIONS" rule
|
||||||
|
- Simplified output formats with word limits
|
||||||
|
- Consistent structure across projman, pr-review, code-sentinel, doc-guardian
|
||||||
|
- **CLAUDE.md:** Replaced destructive "ALWAYS CLEAR CACHE" rule with "VERIFY AND RESTART"
|
||||||
|
- Cache clearing mid-session breaks MCP tools
|
||||||
|
- Added guidance for proper plugin development workflow
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- **cmdb-assistant:** Complete MCP tool schemas for update operations (#138)
|
||||||
|
- **netbox:** Shorten tool names to meet 64-char API limit (#134)
|
||||||
|
- **cmdb-assistant:** Correct NetBox API URL format in setup wizard (#132)
|
||||||
|
- **gitea/projman:** Type safety for `create_label_smart`, curl-based debug-report (#124)
|
||||||
|
- **netbox:** Add diagnostic logging for JSON parse errors (#121)
|
||||||
|
- **labels:** Add duplicate check before creating labels (#116)
|
||||||
|
- **hooks:** Convert ALL hooks to command type with proper prefixes (#114)
|
||||||
|
- Protected branch workflow: Claude no longer commits directly to protected branches (fixes #109)
|
||||||
|
- doc-guardian hook no longer blocks workflow (fixes #110)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## [3.1.1] - 2026-01-22
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- **git-flow:** `/commit-sync` now prunes stale remote-tracking branches with `git fetch --prune`
|
||||||
|
- **git-flow:** `/commit-sync` detects and reports local branches with deleted upstreams
|
||||||
|
- **git-flow:** `/branch-cleanup` now handles stale branches (upstream gone) separately from merged branches
|
||||||
|
- **git-flow:** New `GIT_CLEANUP_STALE` environment variable for stale branch cleanup control
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- **All hooks:** Added `[plugin-name]` prefix to all hook messages for better identification
|
||||||
|
- `[projman]`, `[pr-review]`, `[code-sentinel]`, `[doc-guardian]` prefixes
|
||||||
|
- **doc-guardian:** Hook now notification-only (no file reads or blocking operations)
|
||||||
|
- Suggests running `/doc-sync` instead of performing inline checks
|
||||||
|
- Significantly reduces workflow interruption
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- doc-guardian hook no longer stalls workflow with deep file analysis
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## [3.1.0] - 2026-01-21
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
#### Debug Workflow Commands (projman)
|
||||||
|
- **`/debug-report`** - Run diagnostics in test projects, create structured issues in marketplace
|
||||||
|
- Runs 5 diagnostic MCP tool tests with explicit repo parameter
|
||||||
|
- Captures full project context (git remote, cwd, branch)
|
||||||
|
- Generates structured issue with hypothesis and investigation steps
|
||||||
|
- Creates issue in configured marketplace repository automatically
|
||||||
|
|
||||||
|
- **`/debug-review`** - Investigate diagnostic issues with human approval gates
|
||||||
|
- Lists open diagnostic issues for triage
|
||||||
|
- Maps errors to relevant code files using error-to-file mapping
|
||||||
|
- MANDATORY: Reads relevant files before proposing any fix
|
||||||
|
- Three approval gates: investigation summary, fix approach, PR creation
|
||||||
|
- Creates feature branch, commits, and PR with proper linking
|
||||||
|
|
||||||
|
#### MCP Server Improvements
|
||||||
|
- Dynamic label format detection in `suggest_labels`
|
||||||
|
- Supports slash format (`Type/Bug`) and colon-space format (`Type: Bug`)
|
||||||
|
- Fetches actual labels from repo and matches suggestions to real format
|
||||||
|
- Handles Effort/Efforts singular/plural normalization
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- **`/labels-sync`** completely rewritten with explicit execution steps
|
||||||
|
- Step 1 now explicitly requires running `git remote get-url origin` via Bash
|
||||||
|
- All MCP tool calls show required `repo` parameter
|
||||||
|
- Added "DO NOT" section preventing common mistakes
|
||||||
|
- Removed confusing "Label Reference" section that caused file creation prompts
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- MCP tools no longer fail with "Use 'owner/repo' format" error
|
||||||
|
- Root cause: MCP server is sandboxed and cannot auto-detect project directory
|
||||||
|
- Solution: Command documentation now instructs Claude to detect repo via Bash first
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## [3.0.1] - 2026-01-21
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- `/project-init` command for quick project setup when system is already configured
|
||||||
|
- `/project-sync` command to sync .env with git remote after repository move/rename
|
||||||
|
- SessionStart hooks for automatic mismatch detection between git remote and .env
|
||||||
|
- Interactive setup wizard (`/initial-setup`) redesigned to use Claude tools instead of bash script
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- `GITEA_ORG` moved from system-level to project-level configuration (different projects may belong to different organizations)
|
||||||
|
- Environment variables renamed to match MCP server expectations:
|
||||||
|
- `GITEA_URL` → `GITEA_API_URL` (must include `/api/v1`)
|
||||||
|
- `GITEA_TOKEN` → `GITEA_API_TOKEN`
|
||||||
|
- `NETBOX_URL` → `NETBOX_API_URL` (must include `/api`)
|
||||||
|
- `NETBOX_TOKEN` → `NETBOX_API_TOKEN`
|
||||||
|
- Setup commands now validate repository via Gitea API before saving configuration
|
||||||
|
- README.md simplified to show only wizard setup path (manual setup moved to CONFIGURATION.md)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- API URL paths in curl commands (removed redundant `/api/v1` since it's now in the URL variable)
|
||||||
|
- Documentation now correctly references environment variable names
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## [3.0.0] - 2026-01-20
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
#### New Plugins
|
||||||
|
- **clarity-assist** v1.0.0 - Prompt optimization with ND accommodations
|
||||||
|
- `/clarify` command for full 4-D methodology optimization
|
||||||
|
- `/quick-clarify` command for rapid single-pass clarification
|
||||||
|
- clarity-coach agent with ND-friendly questioning patterns
|
||||||
|
- prompt-patterns skill with optimization rules
|
||||||
|
|
||||||
|
- **git-flow** v1.0.0 - Git workflow automation
|
||||||
|
- `/commit` command with smart conventional commit messages
|
||||||
|
- `/commit-push`, `/commit-merge`, `/commit-sync` workflow commands
|
||||||
|
- `/branch-start`, `/branch-cleanup` branch management commands
|
||||||
|
- `/git-status` enhanced status with recommendations
|
||||||
|
- `/git-config` interactive configuration
|
||||||
|
- git-assistant agent for complex operations
|
||||||
|
- workflow-patterns skill with branching strategies
|
||||||
|
|
||||||
|
- **pr-review** v1.0.0 - Multi-agent pull request review
|
||||||
|
- `/pr-review` command for comprehensive multi-agent review
|
||||||
|
- `/pr-summary` command for quick PR overview
|
||||||
|
- `/pr-findings` command for filtering review findings
|
||||||
|
- coordinator agent for orchestrating reviews
|
||||||
|
- security-reviewer, performance-analyst, maintainability-auditor, test-validator agents
|
||||||
|
- review-patterns skill with confidence scoring rules
|
||||||
|
|
||||||
|
#### Gitea MCP Server Enhancements
|
||||||
|
- 6 new Pull Request tools:
|
||||||
|
- `list_pull_requests` - List PRs with filters
|
||||||
|
- `get_pull_request` - Get PR details
|
||||||
|
- `get_pr_diff` - Get PR diff
|
||||||
|
- `get_pr_comments` - Get PR comments
|
||||||
|
- `create_pr_review` - Create review (approve, request changes, comment)
|
||||||
|
- `add_pr_comment` - Add comment to PR
|
||||||
|
|
||||||
|
#### Documentation
|
||||||
|
- `docs/CONFIGURATION.md` - Centralized configuration guide for all plugins
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- **BREAKING:** Marketplace renamed from `claude-code-marketplace` to `leo-claude-mktplace`
|
||||||
|
- **BREAKING:** MCP servers moved from plugin directories to shared `mcp-servers/` at repository root
|
||||||
|
- All plugins now have `category`, `tags`, and `license` fields in marketplace.json
|
||||||
|
- Plugin MCP dependencies now use symlinks to shared servers
|
||||||
|
- projman version bumped to 3.0.0 (includes PR tools integration)
|
||||||
|
- projman CONFIGURATION.md slimmed down, links to central docs
|
||||||
|
|
||||||
|
### Removed
|
||||||
|
- Standalone MCP server directories inside plugins (replaced with symlinks)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## [2.3.0] - 2026-01-20
|
## [2.3.0] - 2026-01-20
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|||||||
460
CLAUDE.md
460
CLAUDE.md
@@ -1,22 +1,169 @@
|
|||||||
# CLAUDE.md
|
# CLAUDE.md
|
||||||
|
|
||||||
|
## ⛔ MANDATORY BEHAVIOR RULES - READ FIRST
|
||||||
|
|
||||||
|
**These rules are NON-NEGOTIABLE. Violating them wastes the user's time and money.**
|
||||||
|
|
||||||
|
### 1. WHEN USER ASKS YOU TO CHECK SOMETHING - CHECK EVERYTHING
|
||||||
|
- Search ALL locations, not just where you think it is
|
||||||
|
- Check cache directories: `~/.claude/plugins/cache/`
|
||||||
|
- Check installed: `~/.claude/plugins/marketplaces/`
|
||||||
|
- Check source directories
|
||||||
|
- **NEVER say "no" or "that's not the issue" without exhaustive verification**
|
||||||
|
|
||||||
|
### 2. WHEN USER SAYS SOMETHING IS WRONG - BELIEVE THEM
|
||||||
|
- The user knows their system better than you
|
||||||
|
- Investigate thoroughly before disagreeing
|
||||||
|
- **Your confidence is often wrong. User's instincts are often right.**
|
||||||
|
|
||||||
|
### 3. NEVER SAY "DONE" WITHOUT VERIFICATION
|
||||||
|
- Run the actual command/script to verify
|
||||||
|
- Show the output to the user
|
||||||
|
- **"Done" means VERIFIED WORKING, not "I made changes"**
|
||||||
|
|
||||||
|
### 4. SHOW EXACTLY WHAT USER ASKS FOR
|
||||||
|
- If user asks for messages, show the MESSAGES
|
||||||
|
- If user asks for code, show the CODE
|
||||||
|
- **Do not interpret or summarize unless asked**
|
||||||
|
|
||||||
|
**FAILURE TO FOLLOW THESE RULES = WASTED USER TIME = UNACCEPTABLE**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
This file provides guidance to Claude Code when working with code in this repository.
|
This file provides guidance to Claude Code when working with code in this repository.
|
||||||
|
|
||||||
|
## ⛔ RULES - READ FIRST
|
||||||
|
|
||||||
|
### Behavioral Rules
|
||||||
|
|
||||||
|
| Rule | Summary |
|
||||||
|
|------|---------|
|
||||||
|
| **Check everything** | Search cache (`~/.claude/plugins/cache/`), installed (`~/.claude/plugins/marketplaces/`), and source (`~/claude-plugins-work/`) |
|
||||||
|
| **Believe the user** | User knows their system. Investigate before disagreeing. |
|
||||||
|
| **Verify before "done"** | Run commands, show output, check all locations. "Done" = verified working. |
|
||||||
|
| **Show what's asked** | Don't interpret or summarize unless asked. |
|
||||||
|
|
||||||
|
### After Plugin Updates
|
||||||
|
|
||||||
|
Run `./scripts/verify-hooks.sh`. If changes affect MCP servers or hooks, inform user to restart session.
|
||||||
|
**DO NOT clear cache mid-session** - breaks loaded MCP tools.
|
||||||
|
|
||||||
|
### NEVER USE CLI TOOLS FOR EXTERNAL SERVICES
|
||||||
|
- **FORBIDDEN:** `gh`, `tea`, `curl` to APIs, any CLI that talks to Gitea/GitHub/external services
|
||||||
|
- **REQUIRED:** Use MCP tools exclusively (`mcp__plugin_projman_gitea__*`, `mcp__plugin_pr-review_gitea__*`)
|
||||||
|
- **NO EXCEPTIONS.** Don't try CLI first. Don't fall back to CLI. MCP ONLY.
|
||||||
|
|
||||||
|
### NEVER PUSH DIRECTLY TO PROTECTED BRANCHES
|
||||||
|
- **FORBIDDEN:** `git push origin development`, `git push origin main`, `git push origin master`
|
||||||
|
- **REQUIRED:** Create feature branch → push feature branch → create PR via MCP
|
||||||
|
- If you accidentally commit to a protected branch locally: `git checkout -b fix/branch-name` then reset the protected branch
|
||||||
|
|
||||||
|
### Repository Rules
|
||||||
|
|
||||||
|
| Rule | Details |
|
||||||
|
|------|---------|
|
||||||
|
| **File creation** | Only in allowed paths. Use `.scratch/` for temp work. Verify against `docs/CANONICAL-PATHS.md` |
|
||||||
|
| **plugin.json location** | Must be in `.claude-plugin/` directory |
|
||||||
|
| **Hooks** | Use `hooks/hooks.json` (auto-discovered). Never inline in plugin.json |
|
||||||
|
| **MCP servers** | Defined in root `.mcp.json`. Use MCP tools, never CLI (`tea`, `gh`) |
|
||||||
|
| **Allowed root files** | `CLAUDE.md`, `README.md`, `LICENSE`, `CHANGELOG.md`, `.gitignore`, `.env.example` |
|
||||||
|
|
||||||
|
**Valid hook events:** `PreToolUse`, `PostToolUse`, `UserPromptSubmit`, `SessionStart`, `SessionEnd`, `Notification`, `Stop`, `SubagentStop`, `PreCompact`
|
||||||
|
|
||||||
|
### ⛔ MANDATORY: Before Any Code Change
|
||||||
|
|
||||||
|
**Claude MUST show this checklist BEFORE editing any file:**
|
||||||
|
|
||||||
|
#### 1. Impact Search Results
|
||||||
|
Run and show output of:
|
||||||
|
```bash
|
||||||
|
grep -rn "PATTERN" --include="*.sh" --include="*.md" --include="*.json" --include="*.py" | grep -v ".git"
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 2. Files That Will Be Affected
|
||||||
|
Numbered list of every file to be modified, with the specific change for each.
|
||||||
|
|
||||||
|
#### 3. Files Searched But Not Changed (and why)
|
||||||
|
Proof that related files were checked and determined unchanged.
|
||||||
|
|
||||||
|
#### 4. Documentation That References This
|
||||||
|
List of docs that mention this feature/script/function.
|
||||||
|
|
||||||
|
**User verifies this list before Claude proceeds. If Claude skips this, STOP IMMEDIATELY.**
|
||||||
|
|
||||||
|
#### After Changes
|
||||||
|
Run the same grep and show results proving no references remain unaddressed.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## ⚠️ Development Context: We Build AND Use These Plugins
|
||||||
|
|
||||||
|
**This is a self-referential project.** We are:
|
||||||
|
1. **BUILDING** a plugin marketplace (source code in `plugins/`)
|
||||||
|
2. **USING** the installed marketplace to build it (dogfooding)
|
||||||
|
|
||||||
|
### Plugins ACTIVELY USED in This Project
|
||||||
|
|
||||||
|
These plugins are installed and should be used during development:
|
||||||
|
|
||||||
|
| Plugin | Used For |
|
||||||
|
|--------|----------|
|
||||||
|
| **projman** | Sprint planning, issue management, lessons learned |
|
||||||
|
| **git-flow** | Commits, branch management |
|
||||||
|
| **pr-review** | Pull request reviews |
|
||||||
|
| **doc-guardian** | Documentation drift detection |
|
||||||
|
| **code-sentinel** | Security scanning, refactoring |
|
||||||
|
| **clarity-assist** | Prompt clarification |
|
||||||
|
| **claude-config-maintainer** | CLAUDE.md optimization |
|
||||||
|
| **contract-validator** | Cross-plugin compatibility |
|
||||||
|
|
||||||
|
### Plugins NOT Used Here (Development Only)
|
||||||
|
|
||||||
|
These plugins exist in source but are **NOT relevant** to this project's workflow:
|
||||||
|
|
||||||
|
| Plugin | Why Not Used |
|
||||||
|
|--------|--------------|
|
||||||
|
| **data-platform** | For data engineering projects (pandas, PostgreSQL, dbt) |
|
||||||
|
| **viz-platform** | For dashboard projects (Dash, Plotly) |
|
||||||
|
| **cmdb-assistant** | For infrastructure projects (NetBox) |
|
||||||
|
|
||||||
|
**Do NOT suggest** `/ingest`, `/profile`, `/chart`, `/cmdb-*` commands - they don't apply here.
|
||||||
|
|
||||||
|
### Key Distinction
|
||||||
|
|
||||||
|
| Context | Path | What To Do |
|
||||||
|
|---------|------|------------|
|
||||||
|
| **Editing plugin source** | `~/claude-plugins-work/plugins/` | Modify code, add features |
|
||||||
|
| **Using installed plugins** | `~/.claude/plugins/marketplaces/` | Run commands like `/sprint-plan` |
|
||||||
|
|
||||||
|
When user says "run /sprint-plan", use the INSTALLED plugin.
|
||||||
|
When user says "fix the sprint-plan command", edit the SOURCE code.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## Project Overview
|
## Project Overview
|
||||||
|
|
||||||
**Repository:** support-claude-mktplace
|
**Repository:** leo-claude-mktplace
|
||||||
**Version:** 2.3.0
|
**Version:** 5.8.0
|
||||||
**Status:** Production Ready
|
**Status:** Production Ready
|
||||||
|
|
||||||
A Claude Code plugin marketplace containing:
|
A plugin marketplace for Claude Code containing:
|
||||||
|
|
||||||
| Plugin | Description | Version |
|
| Plugin | Description | Version |
|
||||||
|--------|-------------|---------|
|
|--------|-------------|---------|
|
||||||
| `projman` | Sprint planning and project management with Gitea integration | 2.3.0 |
|
| `projman` | Sprint planning and project management with Gitea integration | 3.3.0 |
|
||||||
|
| `git-flow` | Git workflow automation with smart commits and branch management | 1.0.0 |
|
||||||
|
| `pr-review` | Multi-agent PR review with confidence scoring | 1.1.0 |
|
||||||
|
| `clarity-assist` | Prompt optimization with ND-friendly accommodations | 1.0.0 |
|
||||||
| `doc-guardian` | Automatic documentation drift detection and synchronization | 1.0.0 |
|
| `doc-guardian` | Automatic documentation drift detection and synchronization | 1.0.0 |
|
||||||
| `code-sentinel` | Security scanning and code refactoring tools | 1.0.0 |
|
| `code-sentinel` | Security scanning and code refactoring tools | 1.0.1 |
|
||||||
| `claude-config-maintainer` | CLAUDE.md optimization and maintenance | 1.0.0 |
|
| `claude-config-maintainer` | CLAUDE.md optimization and maintenance | 1.0.0 |
|
||||||
| `cmdb-assistant` | NetBox CMDB integration for infrastructure management | 1.0.0 |
|
| `cmdb-assistant` | NetBox CMDB integration for infrastructure management | 1.2.0 |
|
||||||
|
| `data-platform` | pandas, PostgreSQL, and dbt integration for data engineering | 1.3.0 |
|
||||||
|
| `viz-platform` | DMC validation, Plotly charts, and theming for dashboards | 1.1.0 |
|
||||||
|
| `contract-validator` | Cross-plugin compatibility validation and agent verification | 1.1.0 |
|
||||||
| `project-hygiene` | Post-task cleanup automation via hooks | 0.1.0 |
|
| `project-hygiene` | Post-task cleanup automation via hooks | 0.1.0 |
|
||||||
|
|
||||||
## Quick Start
|
## Quick Start
|
||||||
@@ -25,84 +172,97 @@ A Claude Code plugin marketplace containing:
|
|||||||
# Validate marketplace compliance
|
# Validate marketplace compliance
|
||||||
./scripts/validate-marketplace.sh
|
./scripts/validate-marketplace.sh
|
||||||
|
|
||||||
# Run projman commands (in a target project with plugin installed)
|
# After updates
|
||||||
/sprint-plan # Start sprint planning
|
./scripts/post-update.sh # Rebuild venvs
|
||||||
/sprint-status # Check progress
|
|
||||||
/review # Pre-close code quality review
|
|
||||||
/test-check # Verify tests before close
|
|
||||||
/sprint-close # Complete sprint
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Plugin Commands - USE THESE in This Project
|
||||||
|
|
||||||
|
| Category | Commands |
|
||||||
|
|----------|----------|
|
||||||
|
| **Setup** | `/setup` (modes: `--full`, `--quick`, `--sync`) |
|
||||||
|
| **Sprint** | `/sprint-plan`, `/sprint-start`, `/sprint-status` (with `--diagram`), `/sprint-close` |
|
||||||
|
| **Quality** | `/review`, `/test` (modes: `run`, `gen`) |
|
||||||
|
| **Versioning** | `/suggest-version` |
|
||||||
|
| **PR Review** | `/pr-review`, `/pr-summary`, `/pr-findings`, `/pr-diff` |
|
||||||
|
| **Docs** | `/doc-audit`, `/doc-sync`, `/changelog-gen`, `/doc-coverage`, `/stale-docs` |
|
||||||
|
| **Security** | `/security-scan`, `/refactor`, `/refactor-dry` |
|
||||||
|
| **Config** | `/config-analyze`, `/config-optimize`, `/config-diff`, `/config-lint` |
|
||||||
|
| **Validation** | `/validate-contracts`, `/check-agent`, `/list-interfaces`, `/dependency-graph` |
|
||||||
|
| **Debug** | `/debug` (modes: `report`, `review`) |
|
||||||
|
|
||||||
|
### Plugin Commands - NOT RELEVANT to This Project
|
||||||
|
|
||||||
|
These commands are being developed but don't apply to this project's workflow:
|
||||||
|
|
||||||
|
| Category | Commands | For Projects Using |
|
||||||
|
|----------|----------|-------------------|
|
||||||
|
| **Data** | `/ingest`, `/profile`, `/schema`, `/lineage`, `/dbt-test` | pandas, PostgreSQL, dbt |
|
||||||
|
| **Visualization** | `/component`, `/chart`, `/dashboard`, `/theme` | Dash, Plotly dashboards |
|
||||||
|
| **CMDB** | `/cmdb-search`, `/cmdb-device`, `/cmdb-sync` | NetBox infrastructure |
|
||||||
|
|
||||||
## Repository Structure
|
## Repository Structure
|
||||||
|
|
||||||
```
|
```
|
||||||
support-claude-mktplace/
|
leo-claude-mktplace/
|
||||||
├── .claude-plugin/
|
├── .claude-plugin/
|
||||||
│ └── marketplace.json # Marketplace manifest
|
│ └── marketplace.json # Marketplace manifest
|
||||||
|
├── .mcp.json # MCP server configuration (all servers)
|
||||||
|
├── mcp-servers/ # SHARED MCP servers
|
||||||
|
│ ├── gitea/ # Gitea MCP (issues, PRs, wiki)
|
||||||
|
│ ├── netbox/ # NetBox MCP (CMDB)
|
||||||
|
│ ├── data-platform/ # pandas, PostgreSQL, dbt
|
||||||
|
│ ├── viz-platform/ # DMC validation, charts, themes
|
||||||
|
│ └── contract-validator/ # Plugin compatibility validation
|
||||||
├── plugins/
|
├── plugins/
|
||||||
│ ├── projman/ # Sprint management
|
│ ├── projman/ # Sprint management
|
||||||
│ │ ├── .claude-plugin/plugin.json
|
│ │ ├── .claude-plugin/plugin.json
|
||||||
│ │ ├── .mcp.json
|
│ │ ├── commands/ # 12 commands
|
||||||
│ │ ├── mcp-servers/gitea/ # Bundled MCP server
|
│ │ ├── hooks/ # SessionStart: mismatch detection
|
||||||
│ │ ├── commands/ # 9 commands
|
|
||||||
│ │ │ ├── sprint-plan.md, sprint-start.md, sprint-status.md
|
|
||||||
│ │ │ ├── sprint-close.md, labels-sync.md, initial-setup.md
|
|
||||||
│ │ │ └── review.md, test-check.md, test-gen.md
|
|
||||||
│ │ ├── agents/ # 4 agents
|
│ │ ├── agents/ # 4 agents
|
||||||
│ │ │ ├── planner.md, orchestrator.md, executor.md
|
│ │ └── skills/ # 17 reusable skill files
|
||||||
│ │ │ └── code-reviewer.md
|
│ ├── git-flow/ # Git workflow automation
|
||||||
│ │ └── skills/label-taxonomy/
|
│ │ ├── .claude-plugin/plugin.json
|
||||||
|
│ │ ├── commands/ # 8 commands
|
||||||
|
│ │ └── agents/
|
||||||
|
│ ├── pr-review/ # Multi-agent PR review
|
||||||
|
│ │ ├── .claude-plugin/plugin.json
|
||||||
|
│ │ ├── commands/ # 6 commands
|
||||||
|
│ │ ├── hooks/ # SessionStart mismatch detection
|
||||||
|
│ │ └── agents/ # 5 agents
|
||||||
|
│ ├── clarity-assist/ # Prompt optimization
|
||||||
|
│ │ ├── .claude-plugin/plugin.json
|
||||||
|
│ │ ├── commands/ # 2 commands
|
||||||
|
│ │ └── agents/
|
||||||
|
│ ├── data-platform/ # Data engineering
|
||||||
|
│ │ ├── .claude-plugin/plugin.json
|
||||||
|
│ │ ├── commands/ # 7 commands
|
||||||
|
│ │ ├── hooks/ # SessionStart PostgreSQL check
|
||||||
|
│ │ └── agents/ # 2 agents
|
||||||
|
│ ├── viz-platform/ # Visualization
|
||||||
|
│ │ ├── .claude-plugin/plugin.json
|
||||||
|
│ │ ├── commands/ # 7 commands
|
||||||
|
│ │ ├── hooks/ # SessionStart DMC check
|
||||||
|
│ │ └── agents/ # 3 agents
|
||||||
│ ├── doc-guardian/ # Documentation drift detection
|
│ ├── doc-guardian/ # Documentation drift detection
|
||||||
│ │ ├── .claude-plugin/plugin.json
|
|
||||||
│ │ ├── hooks/hooks.json # PostToolUse, Stop hooks
|
|
||||||
│ │ ├── commands/ # doc-audit.md, doc-sync.md
|
|
||||||
│ │ ├── agents/ # doc-analyzer.md
|
|
||||||
│ │ └── skills/doc-patterns/
|
|
||||||
│ ├── code-sentinel/ # Security scanning & refactoring
|
│ ├── code-sentinel/ # Security scanning & refactoring
|
||||||
│ │ ├── .claude-plugin/plugin.json
|
|
||||||
│ │ ├── hooks/hooks.json # PreToolUse hook
|
|
||||||
│ │ ├── commands/ # security-scan.md, refactor.md, refactor-dry.md
|
|
||||||
│ │ ├── agents/ # security-reviewer.md, refactor-advisor.md
|
|
||||||
│ │ └── skills/security-patterns/
|
|
||||||
│ ├── claude-config-maintainer/
|
│ ├── claude-config-maintainer/
|
||||||
│ ├── cmdb-assistant/
|
│ ├── cmdb-assistant/
|
||||||
|
│ ├── contract-validator/
|
||||||
│ └── project-hygiene/
|
│ └── project-hygiene/
|
||||||
├── scripts/
|
├── scripts/
|
||||||
│ ├── setup.sh, post-update.sh
|
│ ├── setup.sh, post-update.sh
|
||||||
│ └── validate-marketplace.sh # Marketplace compliance validation
|
│ ├── validate-marketplace.sh # Marketplace compliance validation
|
||||||
|
│ ├── verify-hooks.sh # Verify all hooks are command type
|
||||||
|
│ └── check-venv.sh # Check MCP server venvs exist
|
||||||
└── docs/
|
└── docs/
|
||||||
├── CANONICAL-PATHS.md # Single source of truth for paths
|
├── CANONICAL-PATHS.md # Single source of truth for paths
|
||||||
└── references/
|
└── CONFIGURATION.md # Centralized configuration guide
|
||||||
```
|
```
|
||||||
|
|
||||||
## CRITICAL: Rules You MUST Follow
|
|
||||||
|
|
||||||
### File Operations
|
|
||||||
- **NEVER** create files in repository root unless listed in "Allowed Root Files"
|
|
||||||
- **NEVER** modify `.gitignore` without explicit permission
|
|
||||||
- **ALWAYS** use `.scratch/` for temporary/exploratory work
|
|
||||||
- **ALWAYS** verify paths against `docs/CANONICAL-PATHS.md` before creating files
|
|
||||||
|
|
||||||
### Plugin Development
|
|
||||||
- **plugin.json MUST be in `.claude-plugin/` directory** (not plugin root)
|
|
||||||
- **Every plugin MUST be listed in marketplace.json**
|
|
||||||
- **MCP servers MUST use venv python path**: `${CLAUDE_PLUGIN_ROOT}/mcp-servers/{name}/.venv/bin/python`
|
|
||||||
- **CLI tools forbidden** - Use MCP tools exclusively (never `tea`, `gh`, etc.)
|
|
||||||
|
|
||||||
### Hooks (Valid Events Only)
|
|
||||||
`PreToolUse`, `PostToolUse`, `UserPromptSubmit`, `SessionStart`, `SessionEnd`, `Notification`, `Stop`, `SubagentStop`, `PreCompact`
|
|
||||||
|
|
||||||
**INVALID:** `task-completed`, `file-changed`, `git-commit-msg-needed`
|
|
||||||
|
|
||||||
### Allowed Root Files
|
|
||||||
`CLAUDE.md`, `README.md`, `LICENSE`, `CHANGELOG.md`, `.gitignore`, `.env.example`
|
|
||||||
|
|
||||||
### Allowed Root Directories
|
|
||||||
`.claude/`, `.claude-plugin/`, `.claude-plugins/`, `.scratch/`, `docs/`, `hooks/`, `plugins/`, `scripts/`
|
|
||||||
|
|
||||||
## Architecture
|
## Architecture
|
||||||
|
|
||||||
### Four-Agent Model
|
### Four-Agent Model (projman)
|
||||||
|
|
||||||
| Agent | Personality | Responsibilities |
|
| Agent | Personality | Responsibilities |
|
||||||
|-------|-------------|------------------|
|
|-------|-------------|------------------|
|
||||||
@@ -111,23 +271,60 @@ support-claude-mktplace/
|
|||||||
| **Executor** | Implementation-focused | Code implementation, branch management, MR creation |
|
| **Executor** | Implementation-focused | Code implementation, branch management, MR creation |
|
||||||
| **Code Reviewer** | Thorough, practical | Pre-close quality review, security scan, test verification |
|
| **Code Reviewer** | Thorough, practical | Pre-close quality review, security scan, test verification |
|
||||||
|
|
||||||
|
### Agent Model Selection
|
||||||
|
|
||||||
|
Agents specify their model in frontmatter using Claude Code's `model` field. Supported values: `sonnet` (default), `opus`, `haiku`, `inherit`.
|
||||||
|
|
||||||
|
| Plugin | Agent | Model | Rationale |
|
||||||
|
|--------|-------|-------|-----------|
|
||||||
|
| projman | Planner | sonnet | Architectural analysis, sprint planning |
|
||||||
|
| projman | Orchestrator | sonnet | Coordination and tool dispatch |
|
||||||
|
| projman | Executor | sonnet | Code generation and implementation |
|
||||||
|
| projman | Code Reviewer | sonnet | Quality gate, pattern detection |
|
||||||
|
| pr-review | Coordinator | sonnet | Orchestrates sub-agents, aggregates findings |
|
||||||
|
| pr-review | Security Reviewer | sonnet | Security analysis |
|
||||||
|
| pr-review | Performance Analyst | sonnet | Performance pattern detection |
|
||||||
|
| pr-review | Maintainability Auditor | haiku | Pattern matching (complexity, duplication) |
|
||||||
|
| pr-review | Test Validator | haiku | Coverage gap detection |
|
||||||
|
| data-platform | Data Advisor | sonnet | Schema validation, dbt orchestration |
|
||||||
|
| data-platform | Data Analysis | sonnet | Data exploration and profiling |
|
||||||
|
| data-platform | Data Ingestion | haiku | Data loading operations |
|
||||||
|
| viz-platform | Design Reviewer | sonnet | DMC validation + accessibility |
|
||||||
|
| viz-platform | Layout Builder | sonnet | Dashboard design guidance |
|
||||||
|
| viz-platform | Component Check | haiku | Quick component validation |
|
||||||
|
| viz-platform | Theme Setup | haiku | Theme configuration |
|
||||||
|
| contract-validator | Agent Check | haiku | Reference checking |
|
||||||
|
| contract-validator | Full Validation | sonnet | Marketplace sweep |
|
||||||
|
| code-sentinel | Security Reviewer | sonnet | Security analysis |
|
||||||
|
| code-sentinel | Refactor Advisor | sonnet | Code refactoring advice |
|
||||||
|
| doc-guardian | Doc Analyzer | sonnet | Documentation drift detection |
|
||||||
|
| clarity-assist | Clarity Coach | sonnet | Conversational coaching |
|
||||||
|
| git-flow | Git Assistant | haiku | Git operations |
|
||||||
|
| claude-config-maintainer | Maintainer | sonnet | CLAUDE.md optimization |
|
||||||
|
| cmdb-assistant | CMDB Assistant | sonnet | NetBox operations |
|
||||||
|
|
||||||
|
Override by editing the `model:` field in `plugins/{plugin}/agents/{agent}.md`.
|
||||||
|
|
||||||
### MCP Server Tools (Gitea)
|
### MCP Server Tools (Gitea)
|
||||||
|
|
||||||
| Category | Tools |
|
| Category | Tools |
|
||||||
|----------|-------|
|
|----------|-------|
|
||||||
| Issues | `list_issues`, `get_issue`, `create_issue`, `update_issue`, `add_comment` |
|
| Issues | `list_issues`, `get_issue`, `create_issue`, `update_issue`, `add_comment`, `aggregate_issues` |
|
||||||
| Labels | `get_labels`, `suggest_labels`, `create_label` |
|
| Labels | `get_labels`, `suggest_labels`, `create_label`, `create_label_smart` |
|
||||||
| Milestones | `list_milestones`, `get_milestone`, `create_milestone`, `update_milestone` |
|
| Milestones | `list_milestones`, `get_milestone`, `create_milestone`, `update_milestone`, `delete_milestone` |
|
||||||
| Dependencies | `list_issue_dependencies`, `create_issue_dependency`, `get_execution_order` |
|
| Dependencies | `list_issue_dependencies`, `create_issue_dependency`, `remove_issue_dependency`, `get_execution_order` |
|
||||||
| Wiki | `list_wiki_pages`, `get_wiki_page`, `create_wiki_page`, `create_lesson`, `search_lessons` |
|
| Wiki | `list_wiki_pages`, `get_wiki_page`, `create_wiki_page`, `update_wiki_page`, `create_lesson`, `search_lessons`, `allocate_rfc_number` |
|
||||||
|
| **Pull Requests** | `list_pull_requests`, `get_pull_request`, `get_pr_diff`, `get_pr_comments`, `create_pr_review`, `add_pr_comment` |
|
||||||
| Validation | `validate_repo_org`, `get_branch_protection` |
|
| Validation | `validate_repo_org`, `get_branch_protection` |
|
||||||
|
|
||||||
### Hybrid Configuration
|
### Hybrid Configuration
|
||||||
|
|
||||||
| Level | Location | Purpose |
|
| Level | Location | Purpose |
|
||||||
|-------|----------|---------|
|
|-------|----------|---------|
|
||||||
| System | `~/.config/claude/gitea.env` | Credentials (GITEA_URL, GITEA_TOKEN, GITEA_ORG) |
|
| System | `~/.config/claude/gitea.env` | Credentials (GITEA_API_URL, GITEA_API_TOKEN) |
|
||||||
| Project | `.env` in project root | Repository specification (GITEA_REPO) |
|
| Project | `.env` in project root | Repository specification (GITEA_ORG, GITEA_REPO) |
|
||||||
|
|
||||||
|
**Note:** `GITEA_ORG` is at project level since different projects may belong to different organizations.
|
||||||
|
|
||||||
### Branch-Aware Security
|
### Branch-Aware Security
|
||||||
|
|
||||||
@@ -137,6 +334,20 @@ support-claude-mktplace/
|
|||||||
| `staging` | Staging | Read-only code, can create issues |
|
| `staging` | Staging | Read-only code, can create issues |
|
||||||
| `main`, `master` | Production | Read-only, emergency only |
|
| `main`, `master` | Production | Read-only, emergency only |
|
||||||
|
|
||||||
|
### RFC System
|
||||||
|
|
||||||
|
Wiki-based Request for Comments system for tracking feature ideas from proposal through implementation.
|
||||||
|
|
||||||
|
**RFC Wiki Naming:**
|
||||||
|
- RFC pages: `RFC-NNNN: Short Title` (4-digit zero-padded)
|
||||||
|
- Index page: `RFC-Index` (auto-maintained)
|
||||||
|
|
||||||
|
**Lifecycle:** Draft → Review → Approved → Implementing → Implemented
|
||||||
|
|
||||||
|
**Integration with Sprint Planning:**
|
||||||
|
- `/sprint-plan` detects approved RFCs and offers selection
|
||||||
|
- `/sprint-close` updates RFC status on completion
|
||||||
|
|
||||||
## Label Taxonomy
|
## Label Taxonomy
|
||||||
|
|
||||||
43 labels total: 27 organization + 16 repository
|
43 labels total: 27 organization + 16 repository
|
||||||
@@ -160,16 +371,16 @@ Stored in Gitea Wiki under `lessons-learned/sprints/`.
|
|||||||
### Adding a New Plugin
|
### Adding a New Plugin
|
||||||
|
|
||||||
1. Create `plugins/{name}/.claude-plugin/plugin.json`
|
1. Create `plugins/{name}/.claude-plugin/plugin.json`
|
||||||
2. Add entry to `.claude-plugin/marketplace.json`
|
2. Add entry to `.claude-plugin/marketplace.json` with category, tags, license
|
||||||
3. Create `README.md` and `claude-md-integration.md`
|
3. Create `claude-md-integration.md`
|
||||||
4. Run `./scripts/validate-marketplace.sh`
|
4. If using new MCP server, add to root `mcp-servers/` and update `.mcp.json`
|
||||||
5. Update `CHANGELOG.md`
|
5. Run `./scripts/validate-marketplace.sh`
|
||||||
|
6. Update `CHANGELOG.md`
|
||||||
|
|
||||||
### Adding a Command to projman
|
### Adding a Command to projman
|
||||||
|
|
||||||
1. Create `plugins/projman/commands/{name}.md`
|
1. Create `plugins/projman/commands/{name}.md`
|
||||||
2. Update `plugins/projman/README.md`
|
2. Update marketplace description if significant
|
||||||
3. Update marketplace description if significant
|
|
||||||
|
|
||||||
### Validation
|
### Validation
|
||||||
|
|
||||||
@@ -191,40 +402,89 @@ Stored in Gitea Wiki under `lessons-learned/sprints/`.
|
|||||||
| Document | Purpose |
|
| Document | Purpose |
|
||||||
|----------|---------|
|
|----------|---------|
|
||||||
| `docs/CANONICAL-PATHS.md` | **Single source of truth** for paths |
|
| `docs/CANONICAL-PATHS.md` | **Single source of truth** for paths |
|
||||||
|
| `docs/COMMANDS-CHEATSHEET.md` | All commands quick reference |
|
||||||
|
| `docs/CONFIGURATION.md` | Centralized setup guide |
|
||||||
|
| `docs/DEBUGGING-CHECKLIST.md` | Systematic troubleshooting guide |
|
||||||
| `docs/UPDATING.md` | Update guide for the marketplace |
|
| `docs/UPDATING.md` | Update guide for the marketplace |
|
||||||
| `plugins/projman/CONFIGURATION.md` | Projman setup guide |
|
| `plugins/projman/CONFIGURATION.md` | Projman quick reference (links to central) |
|
||||||
| `plugins/projman/README.md` | Projman full documentation |
|
|
||||||
|
|
||||||
## Versioning and Changelog Rules
|
## Installation Paths
|
||||||
|
|
||||||
### Version Display
|
Understanding where files live is critical for debugging:
|
||||||
**The marketplace version is displayed ONLY in the main `README.md` title.**
|
|
||||||
|
|
||||||
- Format: `# Claude Code Marketplace - vX.Y.Z`
|
| Context | Path | Purpose |
|
||||||
- Do NOT add version numbers to individual plugin documentation titles
|
|---------|------|---------|
|
||||||
- Do NOT add version numbers to configuration guides
|
| **Source** | `~/claude-plugins-work/` | Development - edit here |
|
||||||
- Do NOT add version numbers to CLAUDE.md or other docs
|
| **Installed** | `~/.claude/plugins/marketplaces/leo-claude-mktplace/` | Runtime - Claude uses this |
|
||||||
|
| **Cache** | `~/.claude/` | Plugin metadata and settings |
|
||||||
|
|
||||||
### Changelog Maintenance (MANDATORY)
|
**Key insight:** Edits to source require reinstall/update to take effect at runtime.
|
||||||
**`CHANGELOG.md` is the authoritative source for version history.**
|
|
||||||
|
|
||||||
When releasing a new version:
|
## Debugging & Troubleshooting
|
||||||
1. Update main `README.md` title with new version
|
|
||||||
2. Update `CHANGELOG.md` with:
|
|
||||||
- Version number and date: `## [X.Y.Z] - YYYY-MM-DD`
|
|
||||||
- **Added**: New features, commands, files
|
|
||||||
- **Changed**: Modifications to existing functionality
|
|
||||||
- **Fixed**: Bug fixes
|
|
||||||
- **Removed**: Deleted features, files, deprecated items
|
|
||||||
3. Update `marketplace.json` metadata version
|
|
||||||
4. Update plugin `plugin.json` versions if plugin-specific changes
|
|
||||||
|
|
||||||
### Version Format
|
See `docs/DEBUGGING-CHECKLIST.md` for systematic troubleshooting.
|
||||||
- Follow [Semantic Versioning](https://semver.org/): MAJOR.MINOR.PATCH
|
|
||||||
- MAJOR: Breaking changes
|
**Common Issues:**
|
||||||
- MINOR: New features, backward compatible
|
| Symptom | Likely Cause | Fix |
|
||||||
- PATCH: Bug fixes, minor improvements
|
|---------|--------------|-----|
|
||||||
|
| "X MCP servers failed" | Missing venv in installed path | `cd ~/.claude/plugins/marketplaces/leo-claude-mktplace && ./scripts/setup.sh` |
|
||||||
|
| MCP tools not available | Venv missing or .mcp.json misconfigured | Run `/debug report` to diagnose |
|
||||||
|
| Changes not taking effect | Editing source, not installed | Reinstall plugin or edit installed path |
|
||||||
|
|
||||||
|
**Debug Commands:**
|
||||||
|
- `/debug report` - Run full diagnostics, create issue if needed
|
||||||
|
- `/debug review` - Investigate and propose fixes
|
||||||
|
|
||||||
|
## Versioning Workflow
|
||||||
|
|
||||||
|
This project follows [SemVer](https://semver.org/) and [Keep a Changelog](https://keepachangelog.com).
|
||||||
|
|
||||||
|
### Version Locations (must stay in sync)
|
||||||
|
|
||||||
|
| Location | Format | Example |
|
||||||
|
|----------|--------|---------|
|
||||||
|
| Git tags | `vX.Y.Z` | `v3.2.0` |
|
||||||
|
| README.md title | `# Leo Claude Marketplace - vX.Y.Z` | `v3.2.0` |
|
||||||
|
| marketplace.json | `"version": "X.Y.Z"` | `3.2.0` |
|
||||||
|
| CHANGELOG.md | `## [X.Y.Z] - YYYY-MM-DD` | `[3.2.0] - 2026-01-24` |
|
||||||
|
|
||||||
|
### During Development
|
||||||
|
|
||||||
|
**All changes go under `[Unreleased]` in CHANGELOG.md.** Never create a versioned section until release time.
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
## [Unreleased]
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- New feature description
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Bug fix description
|
||||||
|
```
|
||||||
|
|
||||||
|
### Creating a Release
|
||||||
|
|
||||||
|
Use the release script to ensure consistency:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/release.sh 3.2.0
|
||||||
|
```
|
||||||
|
|
||||||
|
The script will:
|
||||||
|
1. Validate `[Unreleased]` section has content
|
||||||
|
2. Replace `[Unreleased]` with `[3.2.0] - YYYY-MM-DD`
|
||||||
|
3. Update README.md title
|
||||||
|
4. Update marketplace.json version
|
||||||
|
5. Commit and create git tag
|
||||||
|
|
||||||
|
### SemVer Guidelines
|
||||||
|
|
||||||
|
| Change Type | Version Bump | Example |
|
||||||
|
|-------------|--------------|---------|
|
||||||
|
| Bug fixes only | PATCH (x.y.**Z**) | 3.1.1 → 3.1.2 |
|
||||||
|
| New features (backwards compatible) | MINOR (x.**Y**.0) | 3.1.2 → 3.2.0 |
|
||||||
|
| Breaking changes | MAJOR (**X**.0.0) | 3.2.0 → 4.0.0 |
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
**Last Updated:** 2026-01-20
|
**Last Updated:** 2026-02-02
|
||||||
|
|||||||
400
README.md
400
README.md
@@ -1,15 +1,17 @@
|
|||||||
# Claude Code Marketplace - v2.3.0
|
# Leo Claude Marketplace - v5.8.0
|
||||||
|
|
||||||
A collection of Claude Code plugins for project management, infrastructure automation, and development workflows.
|
A collection of Claude Code plugins for project management, infrastructure automation, and development workflows.
|
||||||
|
|
||||||
## Plugins
|
## Plugins
|
||||||
|
|
||||||
### [projman](./plugins/projman/README.md)
|
### Development & Project Management
|
||||||
|
|
||||||
|
#### [projman](./plugins/projman)
|
||||||
**Sprint Planning and Project Management**
|
**Sprint Planning and Project Management**
|
||||||
|
|
||||||
AI-guided sprint planning with full Gitea integration. Transforms a proven 15-sprint workflow into a distributable plugin.
|
AI-guided sprint planning with full Gitea integration. Transforms a proven 15-sprint workflow into a distributable plugin.
|
||||||
|
|
||||||
- Three-agent model: Planner, Orchestrator, Executor, Code Reviewer
|
- Four-agent model: Planner, Orchestrator, Executor, Code Reviewer
|
||||||
- Intelligent label suggestions from 43-label taxonomy
|
- Intelligent label suggestions from 43-label taxonomy
|
||||||
- Lessons learned capture via Gitea Wiki
|
- Lessons learned capture via Gitea Wiki
|
||||||
- Native issue dependencies with parallel execution
|
- Native issue dependencies with parallel execution
|
||||||
@@ -17,82 +19,161 @@ AI-guided sprint planning with full Gitea integration. Transforms a proven 15-sp
|
|||||||
- Branch-aware security (development/staging/production)
|
- Branch-aware security (development/staging/production)
|
||||||
- Pre-sprint-close code quality review and test verification
|
- Pre-sprint-close code quality review and test verification
|
||||||
|
|
||||||
**Commands:** `/sprint-plan`, `/sprint-start`, `/sprint-status`, `/sprint-close`, `/labels-sync`, `/initial-setup`, `/review`, `/test-check`, `/test-gen`
|
**Commands:** `/sprint-plan`, `/sprint-start`, `/sprint-status`, `/sprint-close`, `/labels-sync`, `/setup`, `/review`, `/test`, `/debug`, `/suggest-version`, `/proposal-status`, `/rfc`
|
||||||
|
|
||||||
### [claude-config-maintainer](./plugins/claude-config-maintainer/README.md)
|
#### [git-flow](./plugins/git-flow) *NEW in v3.0.0*
|
||||||
**CLAUDE.md Optimization and Maintenance**
|
**Git Workflow Automation**
|
||||||
|
|
||||||
Analyze, optimize, and create CLAUDE.md configuration files for Claude Code projects.
|
Smart git operations with intelligent commit messages and branch management.
|
||||||
|
|
||||||
- Structure and clarity scoring (100-point system)
|
- Auto-generated conventional commit messages
|
||||||
- Automatic optimization with preview and backup
|
- Multiple workflow styles (simple, feature-branch, pr-required, trunk-based)
|
||||||
- Project-aware initialization with stack detection
|
- Branch naming enforcement
|
||||||
- Best practices enforcement
|
- Merge and cleanup automation
|
||||||
|
- Protected branch awareness
|
||||||
|
|
||||||
**Commands:** `/config-analyze`, `/config-optimize`, `/config-init`
|
**Commands:** `/commit`, `/commit-push`, `/commit-merge`, `/commit-sync`, `/branch-start`, `/branch-cleanup`, `/git-status`, `/git-config`
|
||||||
|
|
||||||
### [cmdb-assistant](./plugins/cmdb-assistant/README.md)
|
#### [pr-review](./plugins/pr-review) *NEW in v3.0.0*
|
||||||
**NetBox CMDB Integration**
|
**Multi-Agent PR Review**
|
||||||
|
|
||||||
Full CRUD operations for network infrastructure management directly from Claude Code.
|
Comprehensive pull request review using specialized agents.
|
||||||
|
|
||||||
- Device, IP, site, and rack management
|
- Multi-agent review: Security, Performance, Maintainability, Tests
|
||||||
- Smart search across all NetBox modules
|
- Confidence scoring (only reports HIGH/MEDIUM confidence findings)
|
||||||
- Conversational infrastructure queries
|
- Actionable feedback with suggested fixes
|
||||||
- Audit trail and change tracking
|
- Gitea integration for automated review submission
|
||||||
|
|
||||||
**Commands:** `/cmdb-search`, `/cmdb-device`, `/cmdb-ip`, `/cmdb-site`
|
**Commands:** `/pr-review`, `/pr-summary`, `/pr-findings`, `/pr-diff`, `/initial-setup`, `/project-init`, `/project-sync`
|
||||||
|
|
||||||
### [project-hygiene](./plugins/project-hygiene/README.md)
|
#### [claude-config-maintainer](./plugins/claude-config-maintainer)
|
||||||
|
**CLAUDE.md and Settings Optimization**
|
||||||
|
|
||||||
|
Analyze, optimize, and create CLAUDE.md configuration files. Audit and optimize settings.local.json permissions.
|
||||||
|
|
||||||
|
**Commands:** `/analyze`, `/optimize`, `/init`, `/config-diff`, `/config-lint`, `/config-audit-settings`, `/config-optimize-settings`, `/config-permissions-map`
|
||||||
|
|
||||||
|
#### [contract-validator](./plugins/contract-validator) *NEW in v5.0.0*
|
||||||
|
**Cross-Plugin Compatibility Validation**
|
||||||
|
|
||||||
|
Validate plugin marketplaces for command conflicts, tool overlaps, and broken agent references.
|
||||||
|
|
||||||
|
- Interface parsing from plugin README.md files
|
||||||
|
- Agent extraction from CLAUDE.md definitions
|
||||||
|
- Pairwise compatibility checks between all plugins
|
||||||
|
- Data flow validation for agent sequences
|
||||||
|
- Markdown or JSON reports with actionable suggestions
|
||||||
|
|
||||||
|
**Commands:** `/validate-contracts`, `/check-agent`, `/list-interfaces`, `/dependency-graph`, `/initial-setup`
|
||||||
|
|
||||||
|
### Productivity
|
||||||
|
|
||||||
|
#### [clarity-assist](./plugins/clarity-assist) *NEW in v3.0.0*
|
||||||
|
**Prompt Optimization with ND Accommodations**
|
||||||
|
|
||||||
|
Transform vague requests into clear specifications using structured methodology.
|
||||||
|
|
||||||
|
- 4-D methodology: Deconstruct, Diagnose, Develop, Deliver
|
||||||
|
- ND-friendly question patterns (option-based, chunked)
|
||||||
|
- Conflict detection and escalation protocols
|
||||||
|
|
||||||
|
**Commands:** `/clarify`, `/quick-clarify`
|
||||||
|
|
||||||
|
#### [doc-guardian](./plugins/doc-guardian)
|
||||||
|
**Documentation Lifecycle Management**
|
||||||
|
|
||||||
|
Automatic documentation drift detection and synchronization.
|
||||||
|
|
||||||
|
**Commands:** `/doc-audit`, `/doc-sync`, `/changelog-gen`, `/doc-coverage`, `/stale-docs`
|
||||||
|
|
||||||
|
#### [project-hygiene](./plugins/project-hygiene)
|
||||||
**Post-Task Cleanup Automation**
|
**Post-Task Cleanup Automation**
|
||||||
|
|
||||||
Hook-based cleanup that runs after Claude completes work.
|
Hook-based cleanup that runs after Claude completes work.
|
||||||
|
|
||||||
- Deletes temp files (`*.tmp`, `*.bak`, `__pycache__`, etc.)
|
### Security
|
||||||
- Warns about unexpected files in project root
|
|
||||||
- Identifies orphaned supporting files
|
|
||||||
- Configurable via `.hygiene.json`
|
|
||||||
|
|
||||||
### [doc-guardian](./plugins/doc-guardian/README.md)
|
#### [code-sentinel](./plugins/code-sentinel)
|
||||||
**Documentation Lifecycle Management**
|
|
||||||
|
|
||||||
Automatic documentation drift detection and synchronization. Eliminates manual doc update cycles.
|
|
||||||
|
|
||||||
- PostToolUse hook detects when code changes affect documentation
|
|
||||||
- Stop hook reminds of pending updates before session ends
|
|
||||||
- Batched updates in single commit
|
|
||||||
|
|
||||||
**Commands:** `/doc-audit`, `/doc-sync`
|
|
||||||
|
|
||||||
### [code-sentinel](./plugins/code-sentinel/README.md)
|
|
||||||
**Security Scanning & Refactoring**
|
**Security Scanning & Refactoring**
|
||||||
|
|
||||||
Security vulnerability detection and code refactoring tools.
|
Security vulnerability detection and code refactoring tools.
|
||||||
|
|
||||||
- PreToolUse hook catches security issues before code is written
|
|
||||||
- Pattern library: SQL injection, XSS, command injection, hardcoded secrets
|
|
||||||
- Refactoring patterns: extract method, simplify conditional, modernize syntax
|
|
||||||
|
|
||||||
**Commands:** `/security-scan`, `/refactor`, `/refactor-dry`
|
**Commands:** `/security-scan`, `/refactor`, `/refactor-dry`
|
||||||
|
|
||||||
|
### Infrastructure
|
||||||
|
|
||||||
|
#### [cmdb-assistant](./plugins/cmdb-assistant)
|
||||||
|
**NetBox CMDB Integration**
|
||||||
|
|
||||||
|
Full CRUD operations for network infrastructure management directly from Claude Code.
|
||||||
|
|
||||||
|
**Commands:** `/initial-setup`, `/cmdb-search`, `/cmdb-device`, `/cmdb-ip`, `/cmdb-site`, `/cmdb-audit`, `/cmdb-register`, `/cmdb-sync`, `/cmdb-topology`, `/change-audit`, `/ip-conflicts`
|
||||||
|
|
||||||
|
### Data Engineering
|
||||||
|
|
||||||
|
#### [data-platform](./plugins/data-platform) *NEW in v4.0.0*
|
||||||
|
**pandas, PostgreSQL/PostGIS, and dbt Integration**
|
||||||
|
|
||||||
|
Comprehensive data engineering toolkit with persistent DataFrame storage.
|
||||||
|
|
||||||
|
- 14 pandas tools with Arrow IPC data_ref system
|
||||||
|
- 10 PostgreSQL/PostGIS tools with connection pooling
|
||||||
|
- 8 dbt tools with automatic pre-validation
|
||||||
|
- 100k row limit with chunking support
|
||||||
|
- Auto-detection of dbt projects
|
||||||
|
|
||||||
|
**Commands:** `/ingest`, `/profile`, `/schema`, `/explain`, `/lineage`, `/lineage-viz`, `/run`, `/dbt-test`, `/data-quality`, `/data-review`, `/data-gate`, `/initial-setup`
|
||||||
|
|
||||||
|
### Visualization
|
||||||
|
|
||||||
|
#### [viz-platform](./plugins/viz-platform) *NEW in v4.0.0*
|
||||||
|
**Dash Mantine Components Validation and Theming**
|
||||||
|
|
||||||
|
Visualization toolkit with version-locked component validation and design token theming.
|
||||||
|
|
||||||
|
- 3 DMC tools with static JSON registry (prevents prop hallucination)
|
||||||
|
- 2 Chart tools with Plotly and theme integration
|
||||||
|
- 5 Layout tools for dashboard composition
|
||||||
|
- 6 Theme tools with design token system
|
||||||
|
- 5 Page tools for multi-page app structure
|
||||||
|
- Dual theme storage: user-level and project-level
|
||||||
|
|
||||||
|
**Commands:** `/chart`, `/chart-export`, `/dashboard`, `/theme`, `/theme-new`, `/theme-css`, `/component`, `/accessibility-check`, `/breakpoints`, `/design-review`, `/design-gate`, `/initial-setup`
|
||||||
|
|
||||||
|
## Domain Advisory Pattern
|
||||||
|
|
||||||
|
The marketplace supports cross-plugin domain advisory integration:
|
||||||
|
|
||||||
|
- **Domain Detection**: projman automatically detects when issues involve specialized domains (frontend/viz, data engineering)
|
||||||
|
- **Acceptance Criteria**: Domain-specific acceptance criteria are added to issues during planning
|
||||||
|
- **Execution Gates**: Domain validation gates (`/design-gate`, `/data-gate`) run before issue completion
|
||||||
|
- **Extensible**: New domains can be added by creating advisory agents and gate commands
|
||||||
|
|
||||||
|
**Current Domains:**
|
||||||
|
| Domain | Plugin | Gate Command |
|
||||||
|
|--------|--------|--------------|
|
||||||
|
| Visualization | viz-platform | `/design-gate` |
|
||||||
|
| Data | data-platform | `/data-gate` |
|
||||||
|
|
||||||
## MCP Servers
|
## MCP Servers
|
||||||
|
|
||||||
MCP servers are **bundled inside each plugin** that needs them. This ensures plugins work when cached by Claude Code.
|
MCP servers are **shared at repository root** and configured in `.mcp.json`.
|
||||||
|
|
||||||
### Gitea MCP Server (bundled in projman)
|
### Gitea MCP Server (shared)
|
||||||
|
|
||||||
Full Gitea API integration for project management.
|
Full Gitea API integration for project management.
|
||||||
|
|
||||||
| Category | Tools |
|
| Category | Tools |
|
||||||
|----------|-------|
|
|----------|-------|
|
||||||
| Issues | `list_issues`, `get_issue`, `create_issue`, `update_issue`, `add_comment` |
|
| Issues | `list_issues`, `get_issue`, `create_issue`, `update_issue`, `add_comment`, `aggregate_issues` |
|
||||||
| Labels | `get_labels`, `suggest_labels`, `create_label` |
|
| Labels | `get_labels`, `suggest_labels`, `create_label`, `create_label_smart` |
|
||||||
| Wiki | `list_wiki_pages`, `get_wiki_page`, `create_wiki_page`, `create_lesson`, `search_lessons` |
|
| Wiki | `list_wiki_pages`, `get_wiki_page`, `create_wiki_page`, `update_wiki_page`, `create_lesson`, `search_lessons` |
|
||||||
| Milestones | `list_milestones`, `get_milestone`, `create_milestone`, `update_milestone` |
|
| Milestones | `list_milestones`, `get_milestone`, `create_milestone`, `update_milestone`, `delete_milestone` |
|
||||||
| Dependencies | `list_issue_dependencies`, `create_issue_dependency`, `get_execution_order` |
|
| Dependencies | `list_issue_dependencies`, `create_issue_dependency`, `remove_issue_dependency`, `get_execution_order` |
|
||||||
|
| **Pull Requests** | `list_pull_requests`, `get_pull_request`, `get_pr_diff`, `get_pr_comments`, `create_pr_review`, `add_pr_comment` *(NEW in v3.0.0)* |
|
||||||
| Validation | `validate_repo_org`, `get_branch_protection` |
|
| Validation | `validate_repo_org`, `get_branch_protection` |
|
||||||
|
|
||||||
### NetBox MCP Server (bundled in cmdb-assistant)
|
### NetBox MCP Server (shared)
|
||||||
|
|
||||||
Comprehensive NetBox REST API integration for infrastructure management.
|
Comprehensive NetBox REST API integration for infrastructure management.
|
||||||
|
|
||||||
@@ -104,6 +185,39 @@ Comprehensive NetBox REST API integration for infrastructure management.
|
|||||||
| Virtualization | Clusters, VMs, Interfaces |
|
| Virtualization | Clusters, VMs, Interfaces |
|
||||||
| Extras | Tags, Custom Fields, Audit Log |
|
| Extras | Tags, Custom Fields, Audit Log |
|
||||||
|
|
||||||
|
### Data Platform MCP Server (shared) *NEW in v4.0.0*
|
||||||
|
|
||||||
|
pandas, PostgreSQL/PostGIS, and dbt integration for data engineering.
|
||||||
|
|
||||||
|
| Category | Tools |
|
||||||
|
|----------|-------|
|
||||||
|
| pandas | `read_csv`, `read_parquet`, `read_json`, `to_csv`, `to_parquet`, `describe`, `head`, `tail`, `filter`, `select`, `groupby`, `join`, `list_data`, `drop_data` |
|
||||||
|
| PostgreSQL | `pg_connect`, `pg_query`, `pg_execute`, `pg_tables`, `pg_columns`, `pg_schemas` |
|
||||||
|
| PostGIS | `st_tables`, `st_geometry_type`, `st_srid`, `st_extent` |
|
||||||
|
| dbt | `dbt_parse`, `dbt_run`, `dbt_test`, `dbt_build`, `dbt_compile`, `dbt_ls`, `dbt_docs_generate`, `dbt_lineage` |
|
||||||
|
|
||||||
|
### Viz Platform MCP Server (shared) *NEW in v4.0.0*
|
||||||
|
|
||||||
|
Dash Mantine Components validation and visualization tools.
|
||||||
|
|
||||||
|
| Category | Tools |
|
||||||
|
|----------|-------|
|
||||||
|
| DMC | `list_components`, `get_component_props`, `validate_component` |
|
||||||
|
| Chart | `chart_create`, `chart_configure_interaction` |
|
||||||
|
| Layout | `layout_create`, `layout_add_filter`, `layout_set_grid`, `layout_get`, `layout_add_section` |
|
||||||
|
| Theme | `theme_create`, `theme_extend`, `theme_validate`, `theme_export_css`, `theme_list`, `theme_activate` |
|
||||||
|
| Page | `page_create`, `page_add_navbar`, `page_set_auth`, `page_list`, `page_get_app_config` |
|
||||||
|
|
||||||
|
### Contract Validator MCP Server (shared) *NEW in v5.0.0*
|
||||||
|
|
||||||
|
Cross-plugin compatibility validation tools.
|
||||||
|
|
||||||
|
| Category | Tools |
|
||||||
|
|----------|-------|
|
||||||
|
| Parse | `parse_plugin_interface`, `parse_claude_md_agents` |
|
||||||
|
| Validation | `validate_compatibility`, `validate_agent_refs`, `validate_data_flow`, `validate_workflow_integration` |
|
||||||
|
| Report | `generate_compatibility_report`, `list_issues` |
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
### Prerequisites
|
### Prerequisites
|
||||||
@@ -116,7 +230,7 @@ Comprehensive NetBox REST API integration for infrastructure management.
|
|||||||
|
|
||||||
**Option 1 - CLI command (recommended):**
|
**Option 1 - CLI command (recommended):**
|
||||||
```bash
|
```bash
|
||||||
/plugin marketplace add https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace.git
|
/plugin marketplace add https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git
|
||||||
```
|
```
|
||||||
|
|
||||||
**Option 2 - Settings file (for team distribution):**
|
**Option 2 - Settings file (for team distribution):**
|
||||||
@@ -125,141 +239,128 @@ Add to `.claude/settings.json` in your target project:
|
|||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"extraKnownMarketplaces": {
|
"extraKnownMarketplaces": {
|
||||||
"support-claude-mktplace": {
|
"leo-claude-mktplace": {
|
||||||
"source": {
|
"source": {
|
||||||
"source": "git",
|
"source": "git",
|
||||||
"url": "https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace.git"
|
"url": "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
**Option 3 - Local development:**
|
### Run Interactive Setup
|
||||||
```bash
|
|
||||||
# Clone the repository first
|
|
||||||
git clone https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace.git
|
|
||||||
|
|
||||||
# Then add from local path
|
After installing plugins, run the setup wizard:
|
||||||
/plugin marketplace add /path/to/support-claude-mktplace
|
|
||||||
|
```
|
||||||
|
/initial-setup
|
||||||
```
|
```
|
||||||
|
|
||||||
**Alternative SSH URL (for authenticated access):**
|
The wizard handles everything:
|
||||||
|
- Sets up MCP server (Python venv + dependencies)
|
||||||
|
- Creates system config (`~/.config/claude/gitea.env`)
|
||||||
|
- Guides you through adding your API token
|
||||||
|
- Detects and validates your repository via API
|
||||||
|
- Creates project config (`.env`)
|
||||||
|
|
||||||
|
**For new projects** (when system is already configured):
|
||||||
```
|
```
|
||||||
ssh://git@hotserv.tailc9b278.ts.net:2222/personal-projects/support-claude-mktplace.git
|
/project-init
|
||||||
```
|
```
|
||||||
|
|
||||||
### Configure MCP Server Dependencies
|
**After moving a repository:**
|
||||||
|
```
|
||||||
If using plugins with MCP servers (projman, cmdb-assistant), install dependencies:
|
/project-sync
|
||||||
|
|
||||||
```bash
|
|
||||||
# Gitea MCP (for projman)
|
|
||||||
cd plugins/projman/mcp-servers/gitea
|
|
||||||
python3 -m venv .venv
|
|
||||||
source .venv/bin/activate
|
|
||||||
pip install -r requirements.txt
|
|
||||||
deactivate
|
|
||||||
|
|
||||||
# NetBox MCP (for cmdb-assistant)
|
|
||||||
cd ../../../cmdb-assistant/mcp-servers/netbox
|
|
||||||
python3 -m venv .venv
|
|
||||||
source .venv/bin/activate
|
|
||||||
pip install -r requirements.txt
|
|
||||||
deactivate
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Configure Credentials
|
See [docs/CONFIGURATION.md](./docs/CONFIGURATION.md) for manual setup and advanced options.
|
||||||
|
|
||||||
**System-level credentials:**
|
## Verifying Plugin Installation
|
||||||
```bash
|
|
||||||
mkdir -p ~/.config/claude
|
|
||||||
|
|
||||||
# Gitea credentials
|
After installing plugins, the `/plugin` command may show `(no content)` - this is normal Claude Code behavior and doesn't indicate an error.
|
||||||
cat > ~/.config/claude/gitea.env << 'EOF'
|
|
||||||
GITEA_URL=https://gitea.example.com
|
|
||||||
GITEA_TOKEN=your_token
|
|
||||||
GITEA_ORG=your_org
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# NetBox credentials
|
**To verify a plugin is installed correctly:**
|
||||||
cat > ~/.config/claude/netbox.env << 'EOF'
|
|
||||||
NETBOX_API_URL=https://netbox.example.com/api
|
|
||||||
NETBOX_API_TOKEN=your_token
|
|
||||||
EOF
|
|
||||||
|
|
||||||
chmod 600 ~/.config/claude/*.env
|
1. **Check installed plugins list:**
|
||||||
```
|
```
|
||||||
|
/plugin list
|
||||||
|
```
|
||||||
|
Look for `✔ plugin-name · Installed`
|
||||||
|
|
||||||
**Project-level settings:**
|
2. **Test a plugin command directly:**
|
||||||
```bash
|
```
|
||||||
# In your target project root
|
/git-flow:git-status
|
||||||
cat > .env << 'EOF'
|
/projman:sprint-status
|
||||||
GITEA_REPO=your-repository-name
|
/clarity-assist:clarify
|
||||||
EOF
|
```
|
||||||
```
|
If the command executes and shows output, the plugin is working.
|
||||||
|
|
||||||
|
3. **Check for loading errors:**
|
||||||
|
```
|
||||||
|
/plugin list
|
||||||
|
```
|
||||||
|
Look for any `Plugin Loading Errors` section - this indicates manifest issues.
|
||||||
|
|
||||||
|
**Command format:** All plugin commands use the format `/plugin-name:command-name`
|
||||||
|
|
||||||
|
| Plugin | Test Command |
|
||||||
|
|--------|--------------|
|
||||||
|
| git-flow | `/git-flow:git-status` |
|
||||||
|
| projman | `/projman:sprint-status` |
|
||||||
|
| pr-review | `/pr-review:pr-summary` |
|
||||||
|
| clarity-assist | `/clarity-assist:clarify` |
|
||||||
|
| doc-guardian | `/doc-guardian:doc-audit` |
|
||||||
|
| code-sentinel | `/code-sentinel:security-scan` |
|
||||||
|
| claude-config-maintainer | `/claude-config-maintainer:analyze` |
|
||||||
|
| cmdb-assistant | `/cmdb-assistant:cmdb-search` |
|
||||||
|
| data-platform | `/data-platform:ingest` |
|
||||||
|
| viz-platform | `/viz-platform:chart` |
|
||||||
|
| contract-validator | `/contract-validator:validate-contracts` |
|
||||||
|
|
||||||
## Repository Structure
|
## Repository Structure
|
||||||
|
|
||||||
```
|
```
|
||||||
support-claude-mktplace/
|
leo-claude-mktplace/
|
||||||
├── .claude-plugin/ # Marketplace manifest
|
├── .claude-plugin/ # Marketplace manifest
|
||||||
│ └── marketplace.json
|
│ └── marketplace.json
|
||||||
├── plugins/ # All plugins (with bundled MCP servers)
|
├── mcp-servers/ # SHARED MCP servers (v3.0.0+)
|
||||||
│ ├── projman/ # Sprint management plugin
|
│ ├── gitea/ # Gitea MCP (issues, PRs, wiki)
|
||||||
│ │ ├── .claude-plugin/
|
│ ├── netbox/ # NetBox MCP (CMDB)
|
||||||
│ │ ├── .mcp.json
|
│ ├── data-platform/ # Data engineering (pandas, PostgreSQL, dbt)
|
||||||
│ │ ├── mcp-servers/ # Bundled MCP server
|
│ ├── viz-platform/ # Visualization (DMC, Plotly, theming)
|
||||||
│ │ │ └── gitea/
|
│ └── contract-validator/ # Cross-plugin validation (v5.0.0)
|
||||||
│ │ ├── commands/
|
├── plugins/ # All plugins
|
||||||
│ │ ├── agents/
|
│ ├── projman/ # Sprint management
|
||||||
│ │ └── skills/
|
│ ├── git-flow/ # Git workflow automation
|
||||||
│ ├── claude-config-maintainer/ # CLAUDE.md optimization plugin
|
│ ├── pr-review/ # PR review
|
||||||
│ │ ├── .claude-plugin/
|
│ ├── clarity-assist/ # Prompt optimization
|
||||||
│ │ ├── commands/
|
│ ├── data-platform/ # Data engineering
|
||||||
│ │ └── agents/
|
│ ├── viz-platform/ # Visualization
|
||||||
|
│ ├── contract-validator/ # Cross-plugin validation (NEW)
|
||||||
|
│ ├── claude-config-maintainer/ # CLAUDE.md optimization
|
||||||
│ ├── cmdb-assistant/ # NetBox CMDB integration
|
│ ├── cmdb-assistant/ # NetBox CMDB integration
|
||||||
│ │ ├── .claude-plugin/
|
|
||||||
│ │ ├── .mcp.json
|
|
||||||
│ │ ├── mcp-servers/ # Bundled MCP server
|
|
||||||
│ │ │ └── netbox/
|
|
||||||
│ │ ├── commands/
|
|
||||||
│ │ └── agents/
|
|
||||||
│ ├── projman-pmo/ # PMO coordination plugin (planned)
|
|
||||||
│ ├── project-hygiene/ # Cleanup automation plugin
|
|
||||||
│ ├── doc-guardian/ # Documentation drift detection
|
│ ├── doc-guardian/ # Documentation drift detection
|
||||||
│ └── code-sentinel/ # Security scanning & refactoring
|
│ ├── code-sentinel/ # Security scanning
|
||||||
├── docs/ # Reference documentation
|
│ └── project-hygiene/ # Cleanup automation
|
||||||
│ ├── CANONICAL-PATHS.md # Single source of truth for paths
|
├── docs/ # Documentation
|
||||||
│ └── references/
|
│ ├── CANONICAL-PATHS.md # Path reference
|
||||||
└── scripts/ # Setup and maintenance scripts
|
│ └── CONFIGURATION.md # Setup guide
|
||||||
└── validate-marketplace.sh # Marketplace compliance validation
|
├── scripts/ # Setup scripts
|
||||||
|
└── CHANGELOG.md # Version history
|
||||||
```
|
```
|
||||||
|
|
||||||
## Key Features
|
|
||||||
|
|
||||||
### Parallel Execution
|
|
||||||
Tasks are batched by dependency graph for optimal parallel execution:
|
|
||||||
```
|
|
||||||
Batch 1 (parallel): Task A, Task B, Task C
|
|
||||||
Batch 2 (parallel): Task D, Task E (depend on Batch 1)
|
|
||||||
Batch 3 (sequential): Task F (depends on Batch 2)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Naming Conventions
|
|
||||||
- **Tasks:** `[Sprint XX] <type>: <description>`
|
|
||||||
- **Branches:** `feat/`, `fix/`, `debug/` prefixes with issue numbers
|
|
||||||
|
|
||||||
### CLI Tools Blocked
|
|
||||||
All agents use MCP tools exclusively. CLI tools like `tea` or `gh` are forbidden to ensure consistent, auditable operations.
|
|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
| Document | Description |
|
| Document | Description |
|
||||||
|----------|-------------|
|
|----------|-------------|
|
||||||
| [CLAUDE.md](./CLAUDE.md) | Main project instructions |
|
| [CLAUDE.md](./CLAUDE.md) | Main project instructions |
|
||||||
|
| [CONFIGURATION.md](./docs/CONFIGURATION.md) | Centralized setup guide |
|
||||||
|
| [COMMANDS-CHEATSHEET.md](./docs/COMMANDS-CHEATSHEET.md) | All commands quick reference |
|
||||||
|
| [UPDATING.md](./docs/UPDATING.md) | Update guide for the marketplace |
|
||||||
| [CANONICAL-PATHS.md](./docs/CANONICAL-PATHS.md) | Authoritative path reference |
|
| [CANONICAL-PATHS.md](./docs/CANONICAL-PATHS.md) | Authoritative path reference |
|
||||||
| [projman/CONFIGURATION.md](./plugins/projman/CONFIGURATION.md) | Projman setup guide |
|
| [DEBUGGING-CHECKLIST.md](./docs/DEBUGGING-CHECKLIST.md) | Systematic troubleshooting guide |
|
||||||
|
| [CHANGELOG.md](./CHANGELOG.md) | Version history |
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
@@ -268,5 +369,4 @@ MIT License
|
|||||||
## Support
|
## Support
|
||||||
|
|
||||||
- **Issues**: Contact repository maintainer
|
- **Issues**: Contact repository maintainer
|
||||||
- **Repository**: `https://gitea.hotserv.cloud/personal-projects/support-claude-mktplace.git`
|
- **Repository**: `https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git`
|
||||||
- **SSH URL**: `ssh://git@hotserv.tailc9b278.ts.net:2222/personal-projects/support-claude-mktplace.git`
|
|
||||||
|
|||||||
@@ -2,65 +2,155 @@
|
|||||||
|
|
||||||
**This file defines ALL valid paths in this repository. No exceptions. No inference. No assumptions.**
|
**This file defines ALL valid paths in this repository. No exceptions. No inference. No assumptions.**
|
||||||
|
|
||||||
Last Updated: 2026-01-20
|
Last Updated: 2026-01-30 (v5.4.1)
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Repository Root Structure
|
## Repository Root Structure
|
||||||
|
|
||||||
```
|
```
|
||||||
support-claude-mktplace/
|
leo-claude-mktplace/
|
||||||
├── .claude/ # Claude Code local settings
|
├── .claude/ # Claude Code local settings
|
||||||
├── .claude-plugin/ # Marketplace manifest (claude-code-marketplace)
|
├── .claude-plugin/ # Marketplace manifest
|
||||||
│ └── marketplace.json
|
│ └── marketplace.json
|
||||||
├── .scratch/ # Transient work (auto-cleaned)
|
├── .scratch/ # Transient work (auto-cleaned)
|
||||||
├── docs/ # All documentation
|
├── docs/ # All documentation
|
||||||
│ ├── architecture/ # Draw.io diagrams and specs
|
│ ├── architecture/ # Draw.io diagrams and specs
|
||||||
│ ├── CANONICAL-PATHS.md # This file - single source of truth
|
│ ├── CANONICAL-PATHS.md # This file - single source of truth
|
||||||
│ ├── UPDATING.md # Update guide
|
│ ├── COMMANDS-CHEATSHEET.md # All commands quick reference
|
||||||
│ └── workflows/ # Workflow documentation
|
│ ├── CONFIGURATION.md # Centralized configuration guide
|
||||||
|
│ ├── DEBUGGING-CHECKLIST.md # Systematic troubleshooting guide
|
||||||
|
│ └── UPDATING.md # Update guide
|
||||||
├── hooks/ # Shared hooks (if any)
|
├── hooks/ # Shared hooks (if any)
|
||||||
├── plugins/ # ALL plugins with bundled MCP servers
|
├── mcp-servers/ # SHARED MCP servers (v3.0.0+)
|
||||||
│ ├── projman/
|
│ ├── gitea/ # Gitea MCP server
|
||||||
|
│ │ ├── mcp_server/
|
||||||
|
│ │ │ ├── server.py
|
||||||
|
│ │ │ ├── gitea_client.py
|
||||||
|
│ │ │ ├── config.py
|
||||||
|
│ │ │ └── tools/
|
||||||
|
│ │ │ ├── issues.py
|
||||||
|
│ │ │ ├── labels.py
|
||||||
|
│ │ │ ├── wiki.py
|
||||||
|
│ │ │ ├── milestones.py
|
||||||
|
│ │ │ ├── dependencies.py
|
||||||
|
│ │ │ └── pull_requests.py # NEW in v3.0.0
|
||||||
|
│ │ ├── requirements.txt
|
||||||
|
│ │ └── .venv/
|
||||||
|
│ ├── netbox/ # NetBox MCP server
|
||||||
|
│ │ ├── mcp_server/
|
||||||
|
│ │ ├── requirements.txt
|
||||||
|
│ │ └── .venv/
|
||||||
|
│ ├── data-platform/ # Data engineering MCP (NEW v4.0.0)
|
||||||
|
│ │ ├── mcp_server/
|
||||||
|
│ │ │ ├── server.py
|
||||||
|
│ │ │ ├── pandas_tools.py
|
||||||
|
│ │ │ ├── postgres_tools.py
|
||||||
|
│ │ │ └── dbt_tools.py
|
||||||
|
│ │ ├── requirements.txt
|
||||||
|
│ │ └── .venv/
|
||||||
|
│ ├── contract-validator/ # Contract validation MCP (NEW v5.0.0)
|
||||||
|
│ │ ├── mcp_server/
|
||||||
|
│ │ │ ├── server.py
|
||||||
|
│ │ │ ├── parse_tools.py
|
||||||
|
│ │ │ ├── validation_tools.py
|
||||||
|
│ │ │ └── report_tools.py
|
||||||
|
│ │ ├── tests/
|
||||||
|
│ │ ├── requirements.txt
|
||||||
|
│ │ └── .venv/
|
||||||
|
│ └── viz-platform/ # Visualization MCP (NEW v4.1.0)
|
||||||
|
│ ├── mcp_server/
|
||||||
|
│ │ ├── server.py
|
||||||
|
│ │ ├── config.py
|
||||||
|
│ │ ├── component_registry.py
|
||||||
|
│ │ ├── dmc_tools.py
|
||||||
|
│ │ ├── chart_tools.py
|
||||||
|
│ │ ├── layout_tools.py
|
||||||
|
│ │ ├── theme_tools.py
|
||||||
|
│ │ ├── theme_store.py
|
||||||
|
│ │ └── page_tools.py
|
||||||
|
│ ├── registry/ # DMC component JSON registries
|
||||||
|
│ ├── tests/ # 94 tests
|
||||||
|
│ ├── requirements.txt
|
||||||
|
│ └── .venv/
|
||||||
|
├── plugins/ # ALL plugins
|
||||||
|
│ ├── projman/ # Sprint management
|
||||||
│ │ ├── .claude-plugin/
|
│ │ ├── .claude-plugin/
|
||||||
│ │ ├── mcp-servers/ # MCP servers bundled IN plugin
|
|
||||||
│ │ │ └── gitea/ # Gitea + Wiki tools
|
|
||||||
│ │ ├── commands/
|
│ │ ├── commands/
|
||||||
│ │ ├── agents/
|
│ │ ├── agents/
|
||||||
│ │ ├── skills/
|
│ │ ├── skills/
|
||||||
│ │ └── claude-md-integration.md # CLAUDE.md integration snippet
|
│ │ └── claude-md-integration.md
|
||||||
│ ├── doc-guardian/ # Documentation drift detection
|
│ ├── doc-guardian/ # Documentation drift detection
|
||||||
│ │ ├── .claude-plugin/
|
│ │ ├── .claude-plugin/
|
||||||
│ │ ├── hooks/ # PostToolUse, Stop hooks
|
│ │ ├── hooks/
|
||||||
│ │ ├── commands/
|
│ │ ├── commands/
|
||||||
│ │ ├── agents/
|
│ │ ├── agents/
|
||||||
│ │ ├── skills/
|
│ │ ├── skills/
|
||||||
│ │ └── claude-md-integration.md
|
│ │ └── claude-md-integration.md
|
||||||
│ ├── code-sentinel/ # Security scanning & refactoring
|
│ ├── code-sentinel/ # Security scanning & refactoring
|
||||||
│ │ ├── .claude-plugin/
|
│ │ ├── .claude-plugin/
|
||||||
│ │ ├── hooks/ # PreToolUse hook
|
│ │ ├── hooks/
|
||||||
│ │ ├── commands/
|
│ │ ├── commands/
|
||||||
│ │ ├── agents/
|
│ │ ├── agents/
|
||||||
│ │ ├── skills/
|
│ │ ├── skills/
|
||||||
│ │ └── claude-md-integration.md
|
│ │ └── claude-md-integration.md
|
||||||
│ ├── projman-pmo/
|
│ ├── cmdb-assistant/ # NetBox CMDB integration
|
||||||
│ ├── cmdb-assistant/
|
|
||||||
│ │ ├── .claude-plugin/
|
│ │ ├── .claude-plugin/
|
||||||
│ │ ├── mcp-servers/ # MCP servers bundled IN plugin
|
|
||||||
│ │ │ └── netbox/
|
|
||||||
│ │ ├── commands/
|
│ │ ├── commands/
|
||||||
│ │ ├── agents/
|
│ │ ├── agents/
|
||||||
│ │ └── claude-md-integration.md # CLAUDE.md integration snippet
|
│ │ └── claude-md-integration.md
|
||||||
│ ├── claude-config-maintainer/
|
│ ├── claude-config-maintainer/
|
||||||
│ │ ├── .claude-plugin/
|
│ │ ├── .claude-plugin/
|
||||||
│ │ ├── commands/
|
│ │ ├── commands/
|
||||||
│ │ ├── agents/
|
│ │ ├── agents/
|
||||||
│ │ └── claude-md-integration.md # CLAUDE.md integration snippet
|
│ │ └── claude-md-integration.md
|
||||||
│ └── project-hygiene/
|
│ ├── project-hygiene/
|
||||||
|
│ │ ├── .claude-plugin/
|
||||||
|
│ │ ├── hooks/
|
||||||
|
│ │ └── claude-md-integration.md
|
||||||
|
│ ├── clarity-assist/
|
||||||
|
│ │ ├── .claude-plugin/
|
||||||
|
│ │ ├── commands/
|
||||||
|
│ │ ├── agents/
|
||||||
|
│ │ ├── skills/
|
||||||
|
│ │ └── claude-md-integration.md
|
||||||
|
│ ├── git-flow/
|
||||||
|
│ │ ├── .claude-plugin/
|
||||||
|
│ │ ├── commands/
|
||||||
|
│ │ ├── agents/
|
||||||
|
│ │ ├── skills/
|
||||||
|
│ │ └── claude-md-integration.md
|
||||||
|
│ ├── pr-review/
|
||||||
|
│ │ ├── .claude-plugin/
|
||||||
|
│ │ ├── commands/
|
||||||
|
│ │ ├── agents/
|
||||||
|
│ │ ├── skills/
|
||||||
|
│ │ └── claude-md-integration.md
|
||||||
|
│ ├── data-platform/
|
||||||
|
│ │ ├── .claude-plugin/
|
||||||
|
│ │ ├── commands/
|
||||||
|
│ │ ├── agents/
|
||||||
|
│ │ ├── hooks/
|
||||||
|
│ │ └── claude-md-integration.md
|
||||||
|
│ ├── contract-validator/
|
||||||
|
│ │ ├── .claude-plugin/
|
||||||
|
│ │ ├── commands/
|
||||||
|
│ │ ├── agents/
|
||||||
|
│ │ └── claude-md-integration.md
|
||||||
|
│ └── viz-platform/
|
||||||
│ ├── .claude-plugin/
|
│ ├── .claude-plugin/
|
||||||
|
│ ├── commands/
|
||||||
|
│ ├── agents/
|
||||||
│ ├── hooks/
|
│ ├── hooks/
|
||||||
│ └── claude-md-integration.md # CLAUDE.md integration snippet
|
│ └── claude-md-integration.md
|
||||||
├── scripts/ # Setup and maintenance scripts
|
├── scripts/ # Setup and maintenance scripts
|
||||||
|
│ ├── setup.sh # Initial setup (create venvs, config templates)
|
||||||
|
│ ├── post-update.sh # Post-update (clear cache, show changelog)
|
||||||
|
│ ├── check-venv.sh # Check if venvs exist (read-only)
|
||||||
|
│ ├── validate-marketplace.sh # Marketplace compliance validation
|
||||||
|
│ ├── verify-hooks.sh # Verify all hooks use correct event types
|
||||||
|
│ ├── setup-venvs.sh # Setup MCP server venvs (create only, never delete)
|
||||||
|
│ └── release.sh # Release automation with version bumping
|
||||||
├── CLAUDE.md
|
├── CLAUDE.md
|
||||||
├── README.md
|
├── README.md
|
||||||
├── LICENSE
|
├── LICENSE
|
||||||
@@ -80,34 +170,32 @@ support-claude-mktplace/
|
|||||||
| Plugin manifest | `plugins/{plugin-name}/.claude-plugin/plugin.json` | `plugins/projman/.claude-plugin/plugin.json` |
|
| Plugin manifest | `plugins/{plugin-name}/.claude-plugin/plugin.json` | `plugins/projman/.claude-plugin/plugin.json` |
|
||||||
| Plugin commands | `plugins/{plugin-name}/commands/` | `plugins/projman/commands/` |
|
| Plugin commands | `plugins/{plugin-name}/commands/` | `plugins/projman/commands/` |
|
||||||
| Plugin agents | `plugins/{plugin-name}/agents/` | `plugins/projman/agents/` |
|
| Plugin agents | `plugins/{plugin-name}/agents/` | `plugins/projman/agents/` |
|
||||||
| Plugin .mcp.json | `plugins/{plugin-name}/.mcp.json` | `plugins/projman/.mcp.json` |
|
| Plugin skills | `plugins/{plugin-name}/skills/` | `plugins/projman/skills/` |
|
||||||
| Plugin integration snippet | `plugins/{plugin-name}/claude-md-integration.md` | `plugins/projman/claude-md-integration.md` |
|
| Plugin integration snippet | `plugins/{plugin-name}/claude-md-integration.md` | `plugins/projman/claude-md-integration.md` |
|
||||||
|
|
||||||
### MCP Server Paths (Bundled in Plugins)
|
### MCP Server Paths
|
||||||
|
|
||||||
MCP servers are now **bundled inside each plugin** to ensure they work when plugins are cached.
|
MCP servers are **shared at repository root** and configured in `.mcp.json`.
|
||||||
|
|
||||||
| Context | Pattern | Example |
|
| Context | Pattern | Example |
|
||||||
|---------|---------|---------|
|
|---------|---------|---------|
|
||||||
| MCP server location | `plugins/{plugin}/mcp-servers/{server}/` | `plugins/projman/mcp-servers/gitea/` |
|
| MCP configuration | `.mcp.json` | `.mcp.json` (at repo root) |
|
||||||
| MCP server code | `plugins/{plugin}/mcp-servers/{server}/mcp_server/` | `plugins/projman/mcp-servers/gitea/mcp_server/` |
|
| Shared MCP server | `mcp-servers/{server}/` | `mcp-servers/gitea/` |
|
||||||
| MCP venv | `plugins/{plugin}/mcp-servers/{server}/.venv/` | `plugins/projman/mcp-servers/gitea/.venv/` |
|
| MCP server code | `mcp-servers/{server}/mcp_server/` | `mcp-servers/gitea/mcp_server/` |
|
||||||
|
| MCP venv | `mcp-servers/{server}/.venv/` | `mcp-servers/gitea/.venv/` |
|
||||||
|
|
||||||
### Relative Path Patterns (CRITICAL)
|
**Note:** Plugins do NOT have their own `mcp-servers/` directories. All MCP servers are shared at root and configured via `.mcp.json`.
|
||||||
|
|
||||||
| From | To | Pattern |
|
|
||||||
|------|----|---------|
|
|
||||||
| Plugin .mcp.json | Bundled MCP server | `${CLAUDE_PLUGIN_ROOT}/mcp-servers/{server}` |
|
|
||||||
| marketplace.json | Plugin | `./plugins/{plugin-name}` |
|
|
||||||
|
|
||||||
### Documentation Paths
|
### Documentation Paths
|
||||||
|
|
||||||
| Type | Location |
|
| Type | Location |
|
||||||
|------|----------|
|
|------|----------|
|
||||||
| Architecture diagrams | `docs/architecture/` |
|
| Architecture diagrams | `docs/architecture/` |
|
||||||
| Workflow docs | `docs/workflows/` |
|
|
||||||
| This file | `docs/CANONICAL-PATHS.md` |
|
| This file | `docs/CANONICAL-PATHS.md` |
|
||||||
| Update guide | `docs/UPDATING.md` |
|
| Update guide | `docs/UPDATING.md` |
|
||||||
|
| Configuration guide | `docs/CONFIGURATION.md` |
|
||||||
|
| Commands cheat sheet | `docs/COMMANDS-CHEATSHEET.md` |
|
||||||
|
| Debugging checklist | `docs/DEBUGGING-CHECKLIST.md` |
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -127,13 +215,10 @@ MCP servers are now **bundled inside each plugin** to ensure they work when plug
|
|||||||
|
|
||||||
### Relative Path Calculation
|
### Relative Path Calculation
|
||||||
|
|
||||||
From `plugins/projman/.mcp.json` to bundled `mcp-servers/gitea/`:
|
From `.mcp.json` (at root) to `mcp-servers/gitea/`:
|
||||||
```
|
```
|
||||||
plugins/projman/.mcp.json
|
.mcp.json (at repository root)
|
||||||
→ MCP servers are IN the plugin at mcp-servers/
|
→ Uses absolute installed path: ~/.claude/plugins/marketplaces/.../mcp-servers/gitea/run.sh
|
||||||
|
|
||||||
Result: mcp-servers/gitea/
|
|
||||||
With variable: ${CLAUDE_PLUGIN_ROOT}/mcp-servers/gitea/
|
|
||||||
```
|
```
|
||||||
|
|
||||||
From `.claude-plugin/marketplace.json` to `plugins/projman/`:
|
From `.claude-plugin/marketplace.json` to `plugins/projman/`:
|
||||||
@@ -152,18 +237,35 @@ Result: ./plugins/projman
|
|||||||
| Wrong | Why | Correct |
|
| Wrong | Why | Correct |
|
||||||
|-------|-----|---------|
|
|-------|-----|---------|
|
||||||
| `projman/` at root | Plugins go in `plugins/` | `plugins/projman/` |
|
| `projman/` at root | Plugins go in `plugins/` | `plugins/projman/` |
|
||||||
| `mcp-servers/` at root | MCP servers are bundled in plugins | `plugins/{plugin}/mcp-servers/` |
|
| `mcp-servers/` inside plugins | MCP servers are shared at root | Use root `mcp-servers/` |
|
||||||
| `../../mcp-servers/` from plugin | Old pattern, doesn't work with caching | `${CLAUDE_PLUGIN_ROOT}/mcp-servers/` |
|
| Plugin-level `.mcp.json` | MCP config is at root | Use root `.mcp.json` |
|
||||||
| `./../../../plugins/projman` in marketplace | Wrong (old nested structure) | `./plugins/projman` |
|
| Hardcoding absolute paths in source | Breaks portability | Use relative paths or `${CLAUDE_PLUGIN_ROOT}` |
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Architecture Note
|
## Architecture Note
|
||||||
|
|
||||||
MCP servers are bundled inside each plugin (not shared at root) because:
|
MCP servers are **shared at repository root** and configured in a single `.mcp.json` file.
|
||||||
- Claude Code caches only the plugin directory when installed
|
|
||||||
- Relative paths to parent directories break in the cache
|
**Benefits:**
|
||||||
- Each plugin must be self-contained to work properly
|
- Single source of truth for each MCP server
|
||||||
|
- Updates apply to all plugins automatically
|
||||||
|
- No duplication - clean plugin structure
|
||||||
|
- Simple configuration in one place
|
||||||
|
|
||||||
|
**Configuration:**
|
||||||
|
All MCP servers are defined in `.mcp.json` at repository root:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"mcpServers": {
|
||||||
|
"gitea": { "command": ".../mcp-servers/gitea/run.sh" },
|
||||||
|
"netbox": { "command": ".../mcp-servers/netbox/run.sh" },
|
||||||
|
"data-platform": { "command": ".../mcp-servers/data-platform/run.sh" },
|
||||||
|
"viz-platform": { "command": ".../mcp-servers/viz-platform/run.sh" },
|
||||||
|
"contract-validator": { "command": ".../mcp-servers/contract-validator/run.sh" }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -171,7 +273,15 @@ MCP servers are bundled inside each plugin (not shared at root) because:
|
|||||||
|
|
||||||
| Date | Change | By |
|
| Date | Change | By |
|
||||||
|------|--------|-----|
|
|------|--------|-----|
|
||||||
|
| 2026-01-30 | v5.5.0: Removed plugin-level mcp-servers symlinks - all MCP config now in root .mcp.json | Claude Code |
|
||||||
|
| 2026-01-26 | v5.0.0: Added contract-validator plugin and MCP server | Claude Code |
|
||||||
|
| 2026-01-26 | v4.1.0: Added viz-platform plugin and MCP server | Claude Code |
|
||||||
|
| 2026-01-25 | v4.0.0: Added data-platform plugin and MCP server | Claude Code |
|
||||||
|
| 2026-01-20 | v3.0.0: MCP servers moved to root with symlinks | Claude Code |
|
||||||
|
| 2026-01-20 | v3.0.0: Added clarity-assist, git-flow, pr-review plugins | Claude Code |
|
||||||
|
| 2026-01-20 | v3.0.0: Added docs/CONFIGURATION.md | Claude Code |
|
||||||
|
| 2026-01-20 | v3.0.0: Renamed marketplace to leo-claude-mktplace | Claude Code |
|
||||||
| 2026-01-20 | Removed docs/references/ (obsolete planning docs) | Claude Code |
|
| 2026-01-20 | Removed docs/references/ (obsolete planning docs) | Claude Code |
|
||||||
| 2026-01-19 | Added claude-md-integration.md path pattern for plugin integration snippets | Claude Code |
|
| 2026-01-19 | Added claude-md-integration.md path pattern | Claude Code |
|
||||||
| 2025-12-15 | Restructured: MCP servers now bundled in plugins | Claude Code |
|
| 2025-12-15 | Restructured: MCP servers bundled in plugins | Claude Code |
|
||||||
| 2025-12-12 | Initial creation | Claude Code |
|
| 2025-12-12 | Initial creation | Claude Code |
|
||||||
|
|||||||
299
docs/COMMANDS-CHEATSHEET.md
Normal file
299
docs/COMMANDS-CHEATSHEET.md
Normal file
@@ -0,0 +1,299 @@
|
|||||||
|
# Plugin Commands Cheat Sheet
|
||||||
|
|
||||||
|
Quick reference for all commands in the Leo Claude Marketplace.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Command Reference Table
|
||||||
|
|
||||||
|
| Plugin | Command | Auto | Manual | Description |
|
||||||
|
|--------|---------|:----:|:------:|-------------|
|
||||||
|
| **projman** | `/sprint-plan` | | X | Start sprint planning with AI-guided architecture analysis and issue creation |
|
||||||
|
| **projman** | `/sprint-start` | | X | Begin sprint execution with dependency analysis and parallel task coordination (requires approval or `--force`) |
|
||||||
|
| **projman** | `/sprint-status` | | X | Check current sprint progress (add `--diagram` for Mermaid visualization) |
|
||||||
|
| **projman** | `/review` | | X | Pre-sprint-close code quality review (debug artifacts, security, error handling) |
|
||||||
|
| **projman** | `/test` | | X | Run tests (`/test run`) or generate tests (`/test gen <target>`) |
|
||||||
|
| **projman** | `/sprint-close` | | X | Complete sprint and capture lessons learned to Gitea Wiki |
|
||||||
|
| **projman** | `/labels-sync` | | X | Synchronize label taxonomy from Gitea |
|
||||||
|
| **projman** | `/setup` | | X | Auto-detect mode or use `--full`, `--quick`, `--sync`, `--clear-cache` |
|
||||||
|
| **projman** | *SessionStart hook* | X | | Detects git remote vs .env mismatch, warns to run `/setup --sync` |
|
||||||
|
| **projman** | `/debug` | | X | Diagnostics (`/debug report`) or investigate (`/debug review`) |
|
||||||
|
| **projman** | `/suggest-version` | | X | Analyze CHANGELOG and recommend semantic version bump |
|
||||||
|
| **projman** | `/proposal-status` | | X | View proposal and implementation hierarchy with status |
|
||||||
|
| **projman** | `/rfc` | | X | RFC lifecycle management (`/rfc create\|list\|review\|approve\|reject`) |
|
||||||
|
| **git-flow** | `/commit` | | X | Create commit with auto-generated conventional message |
|
||||||
|
| **git-flow** | `/commit-push` | | X | Commit and push to remote in one operation |
|
||||||
|
| **git-flow** | `/commit-merge` | | X | Commit current changes, then merge into target branch |
|
||||||
|
| **git-flow** | `/commit-sync` | | X | Full sync: commit, push, and sync with upstream/base branch |
|
||||||
|
| **git-flow** | `/branch-start` | | X | Create new feature/fix/chore branch with naming conventions |
|
||||||
|
| **git-flow** | `/branch-cleanup` | | X | Remove merged branches locally and optionally on remote |
|
||||||
|
| **git-flow** | `/git-status` | | X | Enhanced git status with recommendations |
|
||||||
|
| **git-flow** | `/git-config` | | X | Configure git-flow settings for the project |
|
||||||
|
| **pr-review** | `/initial-setup` | | X | Setup wizard for pr-review (shares Gitea MCP with projman) |
|
||||||
|
| **pr-review** | `/project-init` | | X | Quick project setup for PR reviews |
|
||||||
|
| **pr-review** | `/project-sync` | | X | Sync config with git remote after repo move/rename |
|
||||||
|
| **pr-review** | *SessionStart hook* | X | | Detects git remote vs .env mismatch |
|
||||||
|
| **pr-review** | `/pr-review` | | X | Full multi-agent PR review with confidence scoring |
|
||||||
|
| **pr-review** | `/pr-summary` | | X | Quick summary of PR changes |
|
||||||
|
| **pr-review** | `/pr-findings` | | X | List and filter review findings by category/severity |
|
||||||
|
| **pr-review** | `/pr-diff` | | X | Formatted diff with inline review comments and annotations |
|
||||||
|
| **clarity-assist** | `/clarify` | | X | Full 4-D prompt optimization with ND accommodations |
|
||||||
|
| **clarity-assist** | `/quick-clarify` | | X | Rapid single-pass clarification for simple requests |
|
||||||
|
| **doc-guardian** | `/doc-audit` | | X | Full documentation audit - scans for doc drift |
|
||||||
|
| **doc-guardian** | `/doc-sync` | | X | Synchronize pending documentation updates |
|
||||||
|
| **doc-guardian** | `/changelog-gen` | | X | Generate changelog from conventional commits |
|
||||||
|
| **doc-guardian** | `/doc-coverage` | | X | Documentation coverage metrics by function/class |
|
||||||
|
| **doc-guardian** | `/stale-docs` | | X | Flag documentation behind code changes |
|
||||||
|
| **doc-guardian** | *PostToolUse hook* | X | | Silently detects doc drift on Write/Edit |
|
||||||
|
| **code-sentinel** | `/security-scan` | | X | Full security audit (SQL injection, XSS, secrets, etc.) |
|
||||||
|
| **code-sentinel** | `/refactor` | | X | Apply refactoring patterns to improve code |
|
||||||
|
| **code-sentinel** | `/refactor-dry` | | X | Preview refactoring without applying changes |
|
||||||
|
| **code-sentinel** | *PreToolUse hook* | X | | Scans code before writing; blocks critical issues |
|
||||||
|
| **claude-config-maintainer** | `/config-analyze` | | X | Analyze CLAUDE.md for optimization opportunities |
|
||||||
|
| **claude-config-maintainer** | `/config-optimize` | | X | Optimize CLAUDE.md structure with preview/backup |
|
||||||
|
| **claude-config-maintainer** | `/config-init` | | X | Initialize new CLAUDE.md for a project |
|
||||||
|
| **claude-config-maintainer** | `/config-diff` | | X | Track CLAUDE.md changes over time with behavioral impact |
|
||||||
|
| **claude-config-maintainer** | `/config-lint` | | X | Lint CLAUDE.md for anti-patterns and best practices |
|
||||||
|
| **claude-config-maintainer** | `/config-audit-settings` | | X | Audit settings.local.json permissions (100-point score) |
|
||||||
|
| **claude-config-maintainer** | `/config-optimize-settings` | | X | Optimize permissions (profiles, consolidation, dry-run) |
|
||||||
|
| **claude-config-maintainer** | `/config-permissions-map` | | X | Visual review layer + permission coverage map |
|
||||||
|
| **cmdb-assistant** | `/initial-setup` | | X | Setup wizard for NetBox MCP server |
|
||||||
|
| **cmdb-assistant** | `/cmdb-search` | | X | Search NetBox for devices, IPs, sites |
|
||||||
|
| **cmdb-assistant** | `/cmdb-device` | | X | Manage network devices (create, view, update, delete) |
|
||||||
|
| **cmdb-assistant** | `/cmdb-ip` | | X | Manage IP addresses and prefixes |
|
||||||
|
| **cmdb-assistant** | `/cmdb-site` | | X | Manage sites, locations, racks, and regions |
|
||||||
|
| **cmdb-assistant** | `/cmdb-audit` | | X | Data quality analysis (VMs, devices, naming, roles) |
|
||||||
|
| **cmdb-assistant** | `/cmdb-register` | | X | Register current machine into NetBox with running apps |
|
||||||
|
| **cmdb-assistant** | `/cmdb-sync` | | X | Sync machine state with NetBox (detect drift, update) |
|
||||||
|
| **cmdb-assistant** | `/cmdb-topology` | | X | Infrastructure topology diagrams (rack, network, site views) |
|
||||||
|
| **cmdb-assistant** | `/change-audit` | | X | NetBox audit trail queries with filtering |
|
||||||
|
| **cmdb-assistant** | `/ip-conflicts` | | X | Detect IP conflicts and overlapping prefixes |
|
||||||
|
| **project-hygiene** | *PostToolUse hook* | X | | Removes temp files, warns about unexpected root files |
|
||||||
|
| **data-platform** | `/ingest` | | X | Load data from CSV, Parquet, JSON into DataFrame |
|
||||||
|
| **data-platform** | `/profile` | | X | Generate data profiling report with statistics |
|
||||||
|
| **data-platform** | `/schema` | | X | Explore database schemas, tables, columns |
|
||||||
|
| **data-platform** | `/explain` | | X | Explain query execution plan |
|
||||||
|
| **data-platform** | `/lineage` | | X | Show dbt model lineage and dependencies |
|
||||||
|
| **data-platform** | `/run` | | X | Run dbt models with validation |
|
||||||
|
| **data-platform** | `/lineage-viz` | | X | dbt lineage visualization as Mermaid diagrams |
|
||||||
|
| **data-platform** | `/dbt-test` | | X | Formatted dbt test runner with summary and failure details |
|
||||||
|
| **data-platform** | `/data-quality` | | X | DataFrame quality checks (nulls, duplicates, types, outliers) |
|
||||||
|
| **data-platform** | `/initial-setup` | | X | Setup wizard for data-platform MCP servers |
|
||||||
|
| **data-platform** | *SessionStart hook* | X | | Checks PostgreSQL connection (non-blocking warning) |
|
||||||
|
| **viz-platform** | `/initial-setup` | | X | Setup wizard for viz-platform MCP server |
|
||||||
|
| **viz-platform** | `/chart` | | X | Create Plotly charts with theme integration |
|
||||||
|
| **viz-platform** | `/dashboard` | | X | Create dashboard layouts with filters and grids |
|
||||||
|
| **viz-platform** | `/theme` | | X | Apply existing theme to visualizations |
|
||||||
|
| **viz-platform** | `/theme-new` | | X | Create new custom theme with design tokens |
|
||||||
|
| **viz-platform** | `/theme-css` | | X | Export theme as CSS custom properties |
|
||||||
|
| **viz-platform** | `/component` | | X | Inspect DMC component props and validation |
|
||||||
|
| **viz-platform** | `/chart-export` | | X | Export charts to PNG, SVG, PDF via kaleido |
|
||||||
|
| **viz-platform** | `/accessibility-check` | | X | Color blind validation (WCAG contrast ratios) |
|
||||||
|
| **viz-platform** | `/breakpoints` | | X | Configure responsive layout breakpoints |
|
||||||
|
| **viz-platform** | `/design-review` | | X | Detailed design system audits |
|
||||||
|
| **viz-platform** | `/design-gate` | | X | Binary pass/fail design system validation gates |
|
||||||
|
| **viz-platform** | *SessionStart hook* | X | | Checks DMC version (non-blocking warning) |
|
||||||
|
| **data-platform** | `/data-review` | | X | Comprehensive data integrity audits |
|
||||||
|
| **data-platform** | `/data-gate` | | X | Binary pass/fail data integrity gates |
|
||||||
|
| **contract-validator** | `/validate-contracts` | | X | Full marketplace compatibility validation |
|
||||||
|
| **contract-validator** | `/check-agent` | | X | Validate single agent definition |
|
||||||
|
| **contract-validator** | `/list-interfaces` | | X | Show all plugin interfaces |
|
||||||
|
| **contract-validator** | `/dependency-graph` | | X | Mermaid visualization of plugin dependencies |
|
||||||
|
| **contract-validator** | `/initial-setup` | | X | Setup wizard for contract-validator MCP |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Plugins by Category
|
||||||
|
|
||||||
|
| Category | Plugins | Primary Use |
|
||||||
|
|----------|---------|-------------|
|
||||||
|
| **Setup** | projman, pr-review, cmdb-assistant, data-platform | `/setup`, `/initial-setup` |
|
||||||
|
| **Task Planning** | projman, clarity-assist | Sprint management, requirement clarification |
|
||||||
|
| **Code Quality** | code-sentinel, pr-review | Security scanning, PR reviews |
|
||||||
|
| **Documentation** | doc-guardian, claude-config-maintainer | Doc sync, CLAUDE.md maintenance |
|
||||||
|
| **Git Operations** | git-flow | Commits, branches, workflow automation |
|
||||||
|
| **Infrastructure** | cmdb-assistant | NetBox CMDB management |
|
||||||
|
| **Data Engineering** | data-platform | pandas, PostgreSQL, dbt operations |
|
||||||
|
| **Visualization** | viz-platform | DMC validation, Plotly charts, theming |
|
||||||
|
| **Validation** | contract-validator | Cross-plugin compatibility checks |
|
||||||
|
| **Maintenance** | project-hygiene | Automatic cleanup |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Hook-Based Automation Summary
|
||||||
|
|
||||||
|
| Plugin | Hook Event | Behavior |
|
||||||
|
|--------|------------|----------|
|
||||||
|
| **projman** | SessionStart | Checks git remote vs .env; warns if mismatch detected; suggests sprint planning if issues exist |
|
||||||
|
| **pr-review** | SessionStart | Checks git remote vs .env; warns if mismatch detected |
|
||||||
|
| **doc-guardian** | PostToolUse (Write/Edit) | Tracks documentation drift; auto-updates dependent docs |
|
||||||
|
| **code-sentinel** | PreToolUse (Write/Edit) | Scans for security issues; blocks critical vulnerabilities |
|
||||||
|
| **project-hygiene** | PostToolUse (Write/Edit) | Cleans temp files, warns about misplaced files |
|
||||||
|
| **data-platform** | SessionStart | Checks PostgreSQL connection; non-blocking warning if unavailable |
|
||||||
|
| **viz-platform** | SessionStart | Checks DMC version; non-blocking warning if mismatch detected |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Dev Workflow Examples
|
||||||
|
|
||||||
|
### Example 0: RFC-Driven Feature Development
|
||||||
|
|
||||||
|
Full workflow from idea to implementation using RFCs:
|
||||||
|
|
||||||
|
```
|
||||||
|
1. /clarify # Clarify the feature idea
|
||||||
|
2. /rfc create # Create RFC from clarified spec
|
||||||
|
... refine RFC content ...
|
||||||
|
3. /rfc review 0001 # Submit RFC for review
|
||||||
|
... review discussion ...
|
||||||
|
4. /rfc approve 0001 # Approve RFC for implementation
|
||||||
|
5. /sprint-plan # Select approved RFC for sprint
|
||||||
|
... implement feature ...
|
||||||
|
6. /sprint-close # Complete sprint, RFC marked Implemented
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example 1: Starting a New Feature Sprint
|
||||||
|
|
||||||
|
A typical workflow for planning and executing a feature sprint:
|
||||||
|
|
||||||
|
```
|
||||||
|
1. /clarify # Clarify requirements if vague
|
||||||
|
2. /sprint-plan # Plan the sprint with architecture analysis
|
||||||
|
3. /labels-sync # Ensure labels are up-to-date
|
||||||
|
4. /sprint-start # Begin execution with dependency ordering
|
||||||
|
5. /branch-start feat/... # Create feature branch
|
||||||
|
... implement features ...
|
||||||
|
6. /commit # Commit with conventional message
|
||||||
|
7. /sprint-status --diagram # Check progress with visualization
|
||||||
|
8. /review # Pre-close quality review
|
||||||
|
9. /test run # Verify test coverage
|
||||||
|
10. /sprint-close # Capture lessons learned
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example 2: Daily Development Cycle
|
||||||
|
|
||||||
|
Quick daily workflow with git-flow:
|
||||||
|
|
||||||
|
```
|
||||||
|
1. /git-status # Check current state
|
||||||
|
2. /branch-start fix/... # Start bugfix branch
|
||||||
|
... make changes ...
|
||||||
|
3. /commit # Auto-generate commit message
|
||||||
|
4. /commit-push # Push to remote
|
||||||
|
5. /branch-cleanup # Clean merged branches
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example 3: Pull Request Review Workflow
|
||||||
|
|
||||||
|
Reviewing a PR before merge:
|
||||||
|
|
||||||
|
```
|
||||||
|
1. /pr-summary # Quick overview of changes
|
||||||
|
2. /pr-review # Full multi-agent review
|
||||||
|
3. /pr-findings # Filter findings by severity
|
||||||
|
4. /security-scan # Deep security audit if needed
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example 4: Documentation Maintenance
|
||||||
|
|
||||||
|
Keeping docs in sync:
|
||||||
|
|
||||||
|
```
|
||||||
|
1. /doc-audit # Scan for documentation drift
|
||||||
|
2. /doc-sync # Apply pending updates
|
||||||
|
3. /config-analyze # Check CLAUDE.md health
|
||||||
|
4. /config-optimize # Optimize if needed
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example 5: Code Refactoring Session
|
||||||
|
|
||||||
|
Safe refactoring with preview:
|
||||||
|
|
||||||
|
```
|
||||||
|
1. /refactor-dry # Preview opportunities
|
||||||
|
2. /security-scan # Baseline security check
|
||||||
|
3. /refactor # Apply improvements
|
||||||
|
4. /test run # Verify nothing broke
|
||||||
|
5. /commit # Commit with descriptive message
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example 6: Infrastructure Documentation
|
||||||
|
|
||||||
|
Managing infrastructure with CMDB:
|
||||||
|
|
||||||
|
```
|
||||||
|
1. /cmdb-search "server" # Find existing devices
|
||||||
|
2. /cmdb-device view X # Check device details
|
||||||
|
3. /cmdb-ip list # List available IPs
|
||||||
|
4. /cmdb-site view Y # Check site info
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example 6b: Data Engineering Workflow
|
||||||
|
|
||||||
|
Working with data pipelines:
|
||||||
|
|
||||||
|
```
|
||||||
|
1. /ingest file.csv # Load data into DataFrame
|
||||||
|
2. /profile # Generate data profiling report
|
||||||
|
3. /schema # Explore database schemas
|
||||||
|
4. /lineage model_name # View dbt model dependencies
|
||||||
|
5. /run model_name # Execute dbt models
|
||||||
|
6. /explain "SELECT ..." # Analyze query execution plan
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example 7: First-Time Setup (New Machine)
|
||||||
|
|
||||||
|
Setting up the marketplace for the first time:
|
||||||
|
|
||||||
|
```
|
||||||
|
1. /setup --full # Full setup: MCP + system config + project
|
||||||
|
# → Follow prompts for Gitea URL, org
|
||||||
|
# → Add token manually when prompted
|
||||||
|
# → Confirm repository name
|
||||||
|
2. # Restart Claude Code session
|
||||||
|
3. /labels-sync # Sync Gitea labels
|
||||||
|
4. /sprint-plan # Plan first sprint
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example 8: New Project Setup (System Already Configured)
|
||||||
|
|
||||||
|
Adding a new project when system config exists:
|
||||||
|
|
||||||
|
```
|
||||||
|
1. /setup --quick # Quick project setup (auto-detected)
|
||||||
|
# → Confirms detected repo name
|
||||||
|
# → Creates .env
|
||||||
|
2. /labels-sync # Sync Gitea labels
|
||||||
|
3. /sprint-plan # Plan first sprint
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Quick Tips
|
||||||
|
|
||||||
|
- **Hooks run automatically** - doc-guardian and code-sentinel protect you without manual invocation
|
||||||
|
- **Use `/commit` over `git commit`** - generates better commit messages following conventions
|
||||||
|
- **Run `/review` before `/sprint-close`** - catches issues before closing the sprint
|
||||||
|
- **Use `/clarify` for vague requests** - especially helpful for complex requirements
|
||||||
|
- **`/refactor-dry` is safe** - always preview before applying refactoring changes
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## MCP Server Requirements
|
||||||
|
|
||||||
|
Some plugins require MCP server connectivity:
|
||||||
|
|
||||||
|
| Plugin | MCP Server | Purpose |
|
||||||
|
|--------|------------|---------|
|
||||||
|
| projman | Gitea | Issues, PRs, wiki, labels, milestones |
|
||||||
|
| pr-review | Gitea | PR operations and reviews |
|
||||||
|
| cmdb-assistant | NetBox | Infrastructure CMDB |
|
||||||
|
| data-platform | pandas, PostgreSQL, dbt | DataFrames, database queries, dbt builds |
|
||||||
|
| viz-platform | viz-platform | DMC validation, charts, layouts, themes, pages |
|
||||||
|
| contract-validator | contract-validator | Plugin interface parsing, compatibility validation |
|
||||||
|
|
||||||
|
Ensure credentials are configured in `~/.config/claude/gitea.env`, `~/.config/claude/netbox.env`, or `~/.config/claude/postgres.env`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*Last Updated: 2026-02-02*
|
||||||
678
docs/CONFIGURATION.md
Normal file
678
docs/CONFIGURATION.md
Normal file
@@ -0,0 +1,678 @@
|
|||||||
|
# Configuration Guide
|
||||||
|
|
||||||
|
Centralized configuration documentation for all plugins and MCP servers in the Leo Claude Marketplace.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
**After installing the marketplace and plugins via Claude Code:**
|
||||||
|
|
||||||
|
```
|
||||||
|
/setup
|
||||||
|
```
|
||||||
|
|
||||||
|
The interactive wizard auto-detects what's needed and handles everything except manually adding your API tokens.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Setup Flow Diagram
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────────────────────────────────────────────────────────────────────────┐
|
||||||
|
│ FIRST TIME SETUP │
|
||||||
|
│ (once per machine) │
|
||||||
|
└─────────────────────────────────────────────────────────────────────────────┘
|
||||||
|
│
|
||||||
|
▼
|
||||||
|
/setup --full
|
||||||
|
(or /setup auto-detects)
|
||||||
|
│
|
||||||
|
┌──────────────────────────────┼──────────────────────────────┐
|
||||||
|
▼ ▼ ▼
|
||||||
|
┌─────────────┐ ┌─────────────────┐ ┌─────────────────┐
|
||||||
|
│ PHASE 1 │ │ PHASE 2 │ │ PHASE 3 │
|
||||||
|
│ Automated │───────────▶│ Automated │───────────▶│ Interactive │
|
||||||
|
│ │ │ │ │ │
|
||||||
|
│ • Check │ │ • Find MCP path │ │ • Ask Gitea URL │
|
||||||
|
│ Python │ │ • Create venv │ │ • Ask Org name │
|
||||||
|
│ version │ │ • Install deps │ │ • Create config │
|
||||||
|
└─────────────┘ └─────────────────┘ └─────────────────┘
|
||||||
|
│
|
||||||
|
▼
|
||||||
|
┌───────────────────────────┐
|
||||||
|
│ PHASE 4 │
|
||||||
|
│ USER ACTION │
|
||||||
|
│ │
|
||||||
|
│ Edit config file to add │
|
||||||
|
│ API token (for security) │
|
||||||
|
│ │
|
||||||
|
│ nano ~/.config/claude/ │
|
||||||
|
│ gitea.env │
|
||||||
|
└───────────────────────────┘
|
||||||
|
│
|
||||||
|
▼
|
||||||
|
┌──────────────────────────────┬──────────────────────────────┐
|
||||||
|
▼ ▼ ▼
|
||||||
|
┌─────────────┐ ┌─────────────────┐ ┌─────────────────┐
|
||||||
|
│ PHASE 5 │ │ PHASE 6 │ │ PHASE 7 │
|
||||||
|
│ Interactive │ │ Automated │ │ Automated │
|
||||||
|
│ │ │ │ │ │
|
||||||
|
│ • Confirm │ │ • Create .env │ │ • Test API │
|
||||||
|
│ repo name │ │ • Check │ │ • Show summary │
|
||||||
|
│ from git │ │ .gitignore │ │ • Restart note │
|
||||||
|
└─────────────┘ └─────────────────┘ └─────────────────┘
|
||||||
|
│
|
||||||
|
▼
|
||||||
|
┌───────────────────────────┐
|
||||||
|
│ RESTART SESSION │
|
||||||
|
│ │
|
||||||
|
│ MCP tools available │
|
||||||
|
│ after restart │
|
||||||
|
└───────────────────────────┘
|
||||||
|
|
||||||
|
|
||||||
|
┌─────────────────────────────────────────────────────────────────────────────┐
|
||||||
|
│ NEW PROJECT SETUP │
|
||||||
|
│ (once per project) │
|
||||||
|
└─────────────────────────────────────────────────────────────────────────────┘
|
||||||
|
│
|
||||||
|
┌───────────────┴───────────────┐
|
||||||
|
▼ ▼
|
||||||
|
/setup --quick /setup
|
||||||
|
(explicit mode) (auto-detects mode)
|
||||||
|
│ │
|
||||||
|
│ ┌──────────┴──────────┐
|
||||||
|
│ ▼ ▼
|
||||||
|
│ "Quick setup" "Full setup"
|
||||||
|
│ (skips to (re-runs
|
||||||
|
│ project config) everything)
|
||||||
|
│ │ │
|
||||||
|
└────────────────────┴─────────────────────┘
|
||||||
|
│
|
||||||
|
▼
|
||||||
|
┌─────────────────────┐
|
||||||
|
│ PROJECT CONFIG │
|
||||||
|
│ │
|
||||||
|
│ • Detect repo from │
|
||||||
|
│ git remote │
|
||||||
|
│ • Confirm with user │
|
||||||
|
│ • Create .env │
|
||||||
|
│ • Check .gitignore │
|
||||||
|
└─────────────────────┘
|
||||||
|
│
|
||||||
|
▼
|
||||||
|
Done!
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## What Runs Automatically vs User Interaction
|
||||||
|
|
||||||
|
### `/setup --full` - Full Setup
|
||||||
|
|
||||||
|
| Phase | Type | What Happens |
|
||||||
|
|-------|------|--------------|
|
||||||
|
| **1. Environment Check** | Automated | Verifies Python 3.10+ is installed |
|
||||||
|
| **2. MCP Server Setup** | Automated | Finds plugin path, creates venv, installs dependencies |
|
||||||
|
| **3. System Config Creation** | Interactive | Asks for Gitea URL and organization name |
|
||||||
|
| **4. Token Entry** | **User Action** | User manually edits config file to add API token |
|
||||||
|
| **5. Project Detection** | Interactive | Shows detected repo name, asks for confirmation |
|
||||||
|
| **6. Project Config** | Automated | Creates `.env` file, checks `.gitignore` |
|
||||||
|
| **7. Validation** | Automated | Tests API connectivity, shows summary |
|
||||||
|
|
||||||
|
### `/setup --quick` - Quick Project Setup
|
||||||
|
|
||||||
|
| Phase | Type | What Happens |
|
||||||
|
|-------|------|--------------|
|
||||||
|
| **1. Pre-flight Check** | Automated | Verifies system config exists |
|
||||||
|
| **2. Project Detection** | Interactive | Shows detected repo name, asks for confirmation |
|
||||||
|
| **3. Project Config** | Automated | Creates/updates `.env` file |
|
||||||
|
| **4. Gitignore Check** | Interactive | Asks to add `.env` to `.gitignore` if missing |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## One Command, Three Modes
|
||||||
|
|
||||||
|
| Mode | When to Use | What It Does |
|
||||||
|
|------|-------------|--------------|
|
||||||
|
| `/setup` | Any time | Auto-detects: runs full, quick, or sync as needed |
|
||||||
|
| `/setup --full` | First time on a machine | Full setup: MCP server + system config + project config |
|
||||||
|
| `/setup --quick` | Starting a new project | Quick setup: project config only (assumes system is ready) |
|
||||||
|
| `/setup --sync` | After repo move/rename | Updates .env to match current git remote |
|
||||||
|
|
||||||
|
**Auto-detection logic:**
|
||||||
|
1. No system config → **full** mode
|
||||||
|
2. System config exists, no project config → **quick** mode
|
||||||
|
3. Both exist, git remote differs → **sync** mode
|
||||||
|
4. Both exist, match → already configured, offer to reconfigure
|
||||||
|
|
||||||
|
**Typical workflow:**
|
||||||
|
1. Install plugin → run `/setup` (auto-runs full mode)
|
||||||
|
2. Start new project → run `/setup` (auto-runs quick mode)
|
||||||
|
3. Repository moved? → run `/setup` (auto-runs sync mode)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Configuration Architecture
|
||||||
|
|
||||||
|
This marketplace uses a **hybrid configuration** approach:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────────────────────────────────────────────────────────────┐
|
||||||
|
│ SYSTEM-LEVEL (once per machine) │
|
||||||
|
│ ~/.config/claude/ │
|
||||||
|
├─────────────────────────────────────────────────────────────────┤
|
||||||
|
│ gitea.env │ GITEA_API_URL, GITEA_API_TOKEN │
|
||||||
|
│ netbox.env │ NETBOX_API_URL, NETBOX_API_TOKEN │
|
||||||
|
│ git-flow.env │ GIT_WORKFLOW_STYLE, GIT_DEFAULT_BASE, etc. │
|
||||||
|
└─────────────────────────────────────────────────────────────────┘
|
||||||
|
│
|
||||||
|
│ Shared across all projects
|
||||||
|
▼
|
||||||
|
┌─────────────────────────────────────────────────────────────────┐
|
||||||
|
│ PROJECT-LEVEL (once per project) │
|
||||||
|
│ <project-root>/.env │
|
||||||
|
├─────────────────────────────────────────────────────────────────┤
|
||||||
|
│ GITEA_REPO │ Repository as owner/repo format │
|
||||||
|
│ GIT_WORKFLOW_STYLE │ (optional) Override system default │
|
||||||
|
│ PR_REVIEW_* │ (optional) PR review settings │
|
||||||
|
└─────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
**Benefits:**
|
||||||
|
- Single token per service (update once, use everywhere)
|
||||||
|
- Easy multi-project setup (just run `/setup` in each project)
|
||||||
|
- Security (tokens never committed to git, never typed into AI chat)
|
||||||
|
- Project isolation (each project can override defaults)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
Before running `/setup`:
|
||||||
|
|
||||||
|
1. **Python 3.10+** installed
|
||||||
|
```bash
|
||||||
|
python3 --version # Should be 3.10.0 or higher
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Git repository** initialized (for project setup)
|
||||||
|
```bash
|
||||||
|
git status # Should show initialized repository
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Claude Code** installed and working with the marketplace
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Setup Methods
|
||||||
|
|
||||||
|
### Method 1: Interactive Wizard (Recommended)
|
||||||
|
|
||||||
|
Run the setup wizard in Claude Code:
|
||||||
|
|
||||||
|
```
|
||||||
|
/setup
|
||||||
|
```
|
||||||
|
|
||||||
|
The wizard will guide you through each step interactively and auto-detect the appropriate mode.
|
||||||
|
|
||||||
|
**Note:** After first-time setup, you'll need to restart your Claude Code session for MCP tools to become available.
|
||||||
|
|
||||||
|
### Method 2: Manual Setup
|
||||||
|
|
||||||
|
If you prefer to set up manually or need to troubleshoot:
|
||||||
|
|
||||||
|
#### Step 1: MCP Server Setup
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Navigate to marketplace directory
|
||||||
|
cd /path/to/leo-claude-mktplace
|
||||||
|
|
||||||
|
# Set up Gitea MCP server
|
||||||
|
cd mcp-servers/gitea
|
||||||
|
python3 -m venv .venv
|
||||||
|
source .venv/bin/activate
|
||||||
|
pip install -r requirements.txt
|
||||||
|
deactivate
|
||||||
|
|
||||||
|
# (Optional) Set up NetBox MCP server
|
||||||
|
cd ../netbox
|
||||||
|
python3 -m venv .venv
|
||||||
|
source .venv/bin/activate
|
||||||
|
pip install -r requirements.txt
|
||||||
|
deactivate
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Step 2: System Configuration
|
||||||
|
|
||||||
|
```bash
|
||||||
|
mkdir -p ~/.config/claude
|
||||||
|
|
||||||
|
# Gitea configuration (credentials only)
|
||||||
|
cat > ~/.config/claude/gitea.env << 'EOF'
|
||||||
|
GITEA_API_URL=https://gitea.example.com
|
||||||
|
GITEA_API_TOKEN=your_token_here
|
||||||
|
EOF
|
||||||
|
chmod 600 ~/.config/claude/gitea.env
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Step 3: Project Configuration
|
||||||
|
|
||||||
|
In each project root:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cat > .env << 'EOF'
|
||||||
|
GITEA_REPO=your-organization/your-repo-name
|
||||||
|
EOF
|
||||||
|
```
|
||||||
|
|
||||||
|
Add `.env` to `.gitignore` if not already there.
|
||||||
|
|
||||||
|
### Method 3: Automation Script (CI/Scripting)
|
||||||
|
|
||||||
|
For automated setups or CI environments:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd /path/to/leo-claude-mktplace
|
||||||
|
./scripts/setup.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
This script is useful for CI/CD pipelines and bulk provisioning.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Configuration Reference
|
||||||
|
|
||||||
|
### System-Level Files
|
||||||
|
|
||||||
|
Located in `~/.config/claude/`:
|
||||||
|
|
||||||
|
| File | Required By | Purpose |
|
||||||
|
|------|-------------|---------|
|
||||||
|
| `gitea.env` | projman, pr-review | Gitea API credentials |
|
||||||
|
| `netbox.env` | cmdb-assistant | NetBox API credentials |
|
||||||
|
| `git-flow.env` | git-flow | Default git workflow settings |
|
||||||
|
|
||||||
|
### Gitea Configuration
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# ~/.config/claude/gitea.env
|
||||||
|
GITEA_API_URL=https://gitea.example.com/api/v1
|
||||||
|
GITEA_API_TOKEN=your_gitea_token_here
|
||||||
|
```
|
||||||
|
|
||||||
|
| Variable | Description | Example |
|
||||||
|
|----------|-------------|---------|
|
||||||
|
| `GITEA_API_URL` | Gitea API endpoint (with `/api/v1`) | `https://gitea.example.com/api/v1` |
|
||||||
|
| `GITEA_API_TOKEN` | Personal access token | `abc123...` |
|
||||||
|
|
||||||
|
**Note:** `GITEA_REPO` is configured at the project level in `owner/repo` format since different projects may belong to different organizations.
|
||||||
|
|
||||||
|
**Generating a Gitea Token:**
|
||||||
|
1. Log into Gitea → **User Icon** → **Settings**
|
||||||
|
2. **Applications** tab → **Manage Access Tokens**
|
||||||
|
3. **Generate New Token** with permissions:
|
||||||
|
- `repo` (all sub-permissions)
|
||||||
|
- `read:org`
|
||||||
|
- `read:user`
|
||||||
|
- `write:repo` (for wiki access)
|
||||||
|
4. Copy token immediately (shown only once)
|
||||||
|
|
||||||
|
### NetBox Configuration
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# ~/.config/claude/netbox.env
|
||||||
|
NETBOX_API_URL=https://netbox.example.com
|
||||||
|
NETBOX_API_TOKEN=your_netbox_token_here
|
||||||
|
```
|
||||||
|
|
||||||
|
| Variable | Description | Example |
|
||||||
|
|----------|-------------|---------|
|
||||||
|
| `NETBOX_API_URL` | NetBox base URL | `https://netbox.example.com` |
|
||||||
|
| `NETBOX_API_TOKEN` | API token | `abc123...` |
|
||||||
|
|
||||||
|
### Git-Flow Configuration
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# ~/.config/claude/git-flow.env
|
||||||
|
GIT_WORKFLOW_STYLE=feature-branch
|
||||||
|
GIT_DEFAULT_BASE=development
|
||||||
|
GIT_AUTO_DELETE_MERGED=true
|
||||||
|
GIT_AUTO_PUSH=false
|
||||||
|
GIT_PROTECTED_BRANCHES=main,master,development,staging,production
|
||||||
|
GIT_COMMIT_STYLE=conventional
|
||||||
|
GIT_CO_AUTHOR=true
|
||||||
|
```
|
||||||
|
|
||||||
|
| Variable | Default | Description |
|
||||||
|
|----------|---------|-------------|
|
||||||
|
| `GIT_WORKFLOW_STYLE` | `feature-branch` | Branching strategy |
|
||||||
|
| `GIT_DEFAULT_BASE` | `development` | Default base branch |
|
||||||
|
| `GIT_AUTO_DELETE_MERGED` | `true` | Delete merged branches |
|
||||||
|
| `GIT_AUTO_PUSH` | `false` | Auto-push after commit |
|
||||||
|
| `GIT_PROTECTED_BRANCHES` | `main,master,...` | Protected branches |
|
||||||
|
| `GIT_COMMIT_STYLE` | `conventional` | Commit message style |
|
||||||
|
| `GIT_CO_AUTHOR` | `true` | Include Claude co-author |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Project-Level Configuration
|
||||||
|
|
||||||
|
Create `.env` in each project root:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Required for projman, pr-review (use owner/repo format)
|
||||||
|
GITEA_REPO=your-organization/your-repo-name
|
||||||
|
|
||||||
|
# Optional: Override git-flow defaults
|
||||||
|
GIT_WORKFLOW_STYLE=pr-required
|
||||||
|
GIT_DEFAULT_BASE=main
|
||||||
|
|
||||||
|
# Optional: PR review settings
|
||||||
|
PR_REVIEW_CONFIDENCE_THRESHOLD=0.5
|
||||||
|
PR_REVIEW_AUTO_SUBMIT=false
|
||||||
|
```
|
||||||
|
|
||||||
|
| Variable | Required | Description |
|
||||||
|
|----------|----------|-------------|
|
||||||
|
| `GITEA_REPO` | Yes | Repository in `owner/repo` format (e.g., `my-org/my-repo`) |
|
||||||
|
| `GIT_WORKFLOW_STYLE` | No | Override system default |
|
||||||
|
| `PR_REVIEW_*` | No | PR review settings |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Plugin Configuration Summary
|
||||||
|
|
||||||
|
| Plugin | System Config | Project Config | Setup Command |
|
||||||
|
|--------|---------------|----------------|---------------|
|
||||||
|
| **projman** | gitea.env | .env (GITEA_REPO=owner/repo) | `/setup` |
|
||||||
|
| **pr-review** | gitea.env | .env (GITEA_REPO=owner/repo) | `/initial-setup` |
|
||||||
|
| **git-flow** | git-flow.env (optional) | .env (optional) | None needed |
|
||||||
|
| **clarity-assist** | None | None | None needed |
|
||||||
|
| **cmdb-assistant** | netbox.env | None | `/initial-setup` |
|
||||||
|
| **data-platform** | postgres.env | .env (optional) | `/initial-setup` |
|
||||||
|
| **viz-platform** | None | .env (optional DMC_VERSION) | `/initial-setup` |
|
||||||
|
| **doc-guardian** | None | None | None needed |
|
||||||
|
| **code-sentinel** | None | None | None needed |
|
||||||
|
| **project-hygiene** | None | None | None needed |
|
||||||
|
| **claude-config-maintainer** | None | None | None needed |
|
||||||
|
| **contract-validator** | None | None | `/initial-setup` |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Multi-Project Workflow
|
||||||
|
|
||||||
|
Once system-level config is set up, adding new projects is simple:
|
||||||
|
|
||||||
|
```
|
||||||
|
cd ~/projects/new-project
|
||||||
|
/setup
|
||||||
|
```
|
||||||
|
|
||||||
|
The command auto-detects that system config exists and runs quick project setup.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Installing Plugins to Consumer Projects
|
||||||
|
|
||||||
|
The marketplace provides scripts to install plugins into consumer projects. This sets up the MCP server connections and adds CLAUDE.md integration snippets.
|
||||||
|
|
||||||
|
### Install a Plugin
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd /path/to/leo-claude-mktplace
|
||||||
|
./scripts/install-plugin.sh <plugin-name> <target-project-path>
|
||||||
|
```
|
||||||
|
|
||||||
|
**Examples:**
|
||||||
|
```bash
|
||||||
|
# Install data-platform to a portfolio project
|
||||||
|
./scripts/install-plugin.sh data-platform ~/projects/personal-portfolio
|
||||||
|
|
||||||
|
# Install multiple plugins
|
||||||
|
./scripts/install-plugin.sh viz-platform ~/projects/personal-portfolio
|
||||||
|
./scripts/install-plugin.sh projman ~/projects/personal-portfolio
|
||||||
|
```
|
||||||
|
|
||||||
|
**What it does:**
|
||||||
|
1. Validates the plugin exists in the marketplace
|
||||||
|
2. Adds MCP server entry to target's `.mcp.json` (if plugin has MCP server)
|
||||||
|
3. Appends integration snippet to target's `CLAUDE.md`
|
||||||
|
4. Reports changes and lists available commands
|
||||||
|
|
||||||
|
**After installation:** Restart your Claude Code session for MCP tools to become available.
|
||||||
|
|
||||||
|
### Uninstall a Plugin
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/uninstall-plugin.sh <plugin-name> <target-project-path>
|
||||||
|
```
|
||||||
|
|
||||||
|
Removes the MCP server entry and CLAUDE.md integration section.
|
||||||
|
|
||||||
|
### List Installed Plugins
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/list-installed.sh <target-project-path>
|
||||||
|
```
|
||||||
|
|
||||||
|
Shows which marketplace plugins are installed, partially installed, or available.
|
||||||
|
|
||||||
|
**Output example:**
|
||||||
|
```
|
||||||
|
✓ Fully Installed:
|
||||||
|
PLUGIN VERSION DESCRIPTION
|
||||||
|
------ ------- -----------
|
||||||
|
data-platform 1.3.0 pandas, PostgreSQL, and dbt integration...
|
||||||
|
viz-platform 1.1.0 DMC validation, Plotly charts, and theming...
|
||||||
|
|
||||||
|
○ Available (not installed):
|
||||||
|
projman 3.4.0 Sprint planning and project management...
|
||||||
|
```
|
||||||
|
|
||||||
|
### Plugins with MCP Servers
|
||||||
|
|
||||||
|
Not all plugins have MCP servers. The install script handles this automatically:
|
||||||
|
|
||||||
|
| Plugin | Has MCP Server | Notes |
|
||||||
|
|--------|---------------|-------|
|
||||||
|
| data-platform | ✓ | pandas, PostgreSQL, dbt tools |
|
||||||
|
| viz-platform | ✓ | DMC validation, chart, theme tools |
|
||||||
|
| contract-validator | ✓ | Plugin compatibility validation |
|
||||||
|
| cmdb-assistant | ✓ (via netbox) | NetBox CMDB tools |
|
||||||
|
| projman | ✓ (via gitea) | Issue, wiki, PR tools |
|
||||||
|
| pr-review | ✓ (via gitea) | PR review tools |
|
||||||
|
| git-flow | ✗ | Commands only |
|
||||||
|
| doc-guardian | ✗ | Commands and hooks only |
|
||||||
|
| code-sentinel | ✗ | Commands and hooks only |
|
||||||
|
| clarity-assist | ✗ | Commands only |
|
||||||
|
|
||||||
|
### Script Requirements
|
||||||
|
|
||||||
|
- **jq** must be installed (`sudo apt install jq`)
|
||||||
|
- Scripts are idempotent (safe to run multiple times)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Agent Model Selection
|
||||||
|
|
||||||
|
Marketplace agents specify their preferred model using Claude Code's `model` frontmatter field. This allows cost/performance optimization per agent.
|
||||||
|
|
||||||
|
### Supported Values
|
||||||
|
|
||||||
|
| Value | Description |
|
||||||
|
|-------|-------------|
|
||||||
|
| `sonnet` | Default. Balanced performance and cost. |
|
||||||
|
| `opus` | Higher reasoning depth. Use for complex analysis. |
|
||||||
|
| `haiku` | Faster, lower cost. Use for mechanical tasks. |
|
||||||
|
| `inherit` | Use session's current model setting. |
|
||||||
|
|
||||||
|
### How It Works
|
||||||
|
|
||||||
|
Each agent in `plugins/{plugin}/agents/{agent}.md` has frontmatter like:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
---
|
||||||
|
name: planner
|
||||||
|
description: Sprint planning agent - thoughtful architecture analysis
|
||||||
|
model: sonnet
|
||||||
|
---
|
||||||
|
```
|
||||||
|
|
||||||
|
Claude Code reads this field when invoking the agent as a subagent.
|
||||||
|
|
||||||
|
### Model Assignments
|
||||||
|
|
||||||
|
Agents are assigned models based on their task complexity:
|
||||||
|
|
||||||
|
| Model | Agents | Rationale |
|
||||||
|
|-------|--------|-----------|
|
||||||
|
| **sonnet** | Planner, Orchestrator, Executor, Code Reviewer, Coordinator, Security Reviewers, Performance Analyst, Data Advisor, Data Analysis, Design Reviewer, Layout Builder, Full Validation, Doc Analyzer, Clarity Coach, Maintainer, CMDB Assistant, Refactor Advisor | Standard reasoning, tool orchestration, code generation |
|
||||||
|
| **haiku** | Maintainability Auditor, Test Validator, Component Check, Theme Setup, Agent Check, Data Ingestion, Git Assistant | Pattern matching, quick validation, mechanical tasks |
|
||||||
|
|
||||||
|
### Overriding Model Selection
|
||||||
|
|
||||||
|
**Per-agent override:** Edit the `model:` field in the agent file:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Change executor to use opus for heavy implementation work
|
||||||
|
nano plugins/projman/agents/executor.md
|
||||||
|
# Change model: sonnet to model: opus
|
||||||
|
```
|
||||||
|
|
||||||
|
**Session-level:** Users on Opus subscription can change the agent's model to `inherit` to use whatever model the session is using.
|
||||||
|
|
||||||
|
### Best Practices
|
||||||
|
|
||||||
|
1. **Default to sonnet** - Good balance for most tasks
|
||||||
|
2. **Use haiku for speed-sensitive agents** - Sub-agents dispatched in parallel, read-only tasks
|
||||||
|
3. **Reserve opus for heavy analysis** - Only when sonnet's reasoning isn't sufficient
|
||||||
|
4. **Use inherit sparingly** - Only when you want session-level control
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Automatic Validation Features
|
||||||
|
|
||||||
|
### API Validation
|
||||||
|
|
||||||
|
When running `/setup`, the command:
|
||||||
|
|
||||||
|
1. **Detects** organization and repository from git remote URL
|
||||||
|
2. **Validates** via Gitea API: `GET /api/v1/repos/{org}/{repo}`
|
||||||
|
3. **Auto-fills** if repository exists and is accessible (no confirmation needed)
|
||||||
|
4. **Asks for confirmation** only if validation fails (404 or permission error)
|
||||||
|
|
||||||
|
This catches typos and permission issues before saving configuration.
|
||||||
|
|
||||||
|
### Mismatch Detection (SessionStart Hook)
|
||||||
|
|
||||||
|
When you start a Claude Code session, a hook automatically:
|
||||||
|
|
||||||
|
1. Reads `GITEA_REPO` (in `owner/repo` format) from `.env`
|
||||||
|
2. Compares with current `git remote get-url origin`
|
||||||
|
3. **Warns** if mismatch detected: "Repository location mismatch. Run `/setup --sync` to update."
|
||||||
|
|
||||||
|
This helps when you:
|
||||||
|
- Move a repository to a different organization
|
||||||
|
- Rename a repository
|
||||||
|
- Clone a repo but forget to update `.env`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Verification
|
||||||
|
|
||||||
|
### Test Gitea Connection
|
||||||
|
|
||||||
|
```bash
|
||||||
|
source ~/.config/claude/gitea.env
|
||||||
|
curl -H "Authorization: token $GITEA_API_TOKEN" "$GITEA_API_URL/user"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Verify Project Setup
|
||||||
|
|
||||||
|
In Claude Code, after restarting your session:
|
||||||
|
```
|
||||||
|
/labels-sync
|
||||||
|
```
|
||||||
|
|
||||||
|
If this works, your setup is complete.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### MCP tools not available
|
||||||
|
|
||||||
|
**Cause:** Session wasn't restarted after setup.
|
||||||
|
**Solution:** Exit Claude Code and start a new session.
|
||||||
|
|
||||||
|
### "Configuration not found" error
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check system config exists
|
||||||
|
ls -la ~/.config/claude/gitea.env
|
||||||
|
|
||||||
|
# Check permissions (should be 600)
|
||||||
|
stat ~/.config/claude/gitea.env
|
||||||
|
```
|
||||||
|
|
||||||
|
### Authentication failed
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Test token directly
|
||||||
|
source ~/.config/claude/gitea.env
|
||||||
|
curl -H "Authorization: token $GITEA_API_TOKEN" "$GITEA_API_URL/user"
|
||||||
|
```
|
||||||
|
|
||||||
|
If you get 401, regenerate your token in Gitea.
|
||||||
|
|
||||||
|
### MCP server won't start
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check venv exists
|
||||||
|
ls /path/to/mcp-servers/gitea/.venv
|
||||||
|
|
||||||
|
# If missing, create venv (do NOT delete existing venvs)
|
||||||
|
cd /path/to/mcp-servers/gitea
|
||||||
|
python3 -m venv .venv
|
||||||
|
source .venv/bin/activate
|
||||||
|
pip install -r requirements.txt
|
||||||
|
deactivate
|
||||||
|
```
|
||||||
|
|
||||||
|
### Wrong repository
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check project .env
|
||||||
|
cat .env
|
||||||
|
|
||||||
|
# Verify GITEA_REPO is in owner/repo format and matches Gitea exactly
|
||||||
|
# Example: GITEA_REPO=my-org/my-repo
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Security Best Practices
|
||||||
|
|
||||||
|
1. **Never commit tokens**
|
||||||
|
- Keep credentials in `~/.config/claude/` only
|
||||||
|
- Add `.env` to `.gitignore`
|
||||||
|
|
||||||
|
2. **Secure configuration files**
|
||||||
|
```bash
|
||||||
|
chmod 600 ~/.config/claude/*.env
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Never type tokens into AI chat**
|
||||||
|
- Always edit config files directly in your editor
|
||||||
|
- The `/setup` wizard respects this
|
||||||
|
|
||||||
|
4. **Rotate tokens periodically**
|
||||||
|
- Every 6-12 months
|
||||||
|
- Immediately if compromised
|
||||||
|
|
||||||
|
5. **Minimum permissions**
|
||||||
|
- Only grant required token permissions
|
||||||
|
- Use separate tokens for different environments
|
||||||
291
docs/DEBUGGING-CHECKLIST.md
Normal file
291
docs/DEBUGGING-CHECKLIST.md
Normal file
@@ -0,0 +1,291 @@
|
|||||||
|
# Debugging Checklist for Marketplace Troubleshooting
|
||||||
|
|
||||||
|
**Purpose:** Systematic approach to diagnose and fix plugin loading issues.
|
||||||
|
|
||||||
|
Last Updated: 2026-01-28
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 1: Identify the Loading Path
|
||||||
|
|
||||||
|
Claude Code loads plugins from different locations depending on context:
|
||||||
|
|
||||||
|
| Location | Path | When Used |
|
||||||
|
|----------|------|-----------|
|
||||||
|
| **Source** | `~/claude-plugins-work/` | When developing in this directory |
|
||||||
|
| **Installed** | `~/.claude/plugins/marketplaces/leo-claude-mktplace/` | After marketplace install |
|
||||||
|
| **Cache** | `~/.claude/` | Plugin metadata, settings |
|
||||||
|
|
||||||
|
**Determine which path Claude is using:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check if installed marketplace exists
|
||||||
|
ls -la ~/.claude/plugins/marketplaces/leo-claude-mktplace/
|
||||||
|
|
||||||
|
# Check Claude's current plugin loading
|
||||||
|
cat ~/.claude/settings.local.json | grep -A5 "mcpServers"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Key insight:** If you're editing source but Claude uses installed, your changes won't take effect.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 2: Verify Files Exist at Runtime Location
|
||||||
|
|
||||||
|
Check the files Claude will actually load:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# For installed marketplace
|
||||||
|
RUNTIME=~/.claude/plugins/marketplaces/leo-claude-mktplace
|
||||||
|
|
||||||
|
# Check MCP server exists
|
||||||
|
ls -la $RUNTIME/mcp-servers/gitea/
|
||||||
|
ls -la $RUNTIME/mcp-servers/netbox/
|
||||||
|
|
||||||
|
# Check plugin manifests
|
||||||
|
ls -la $RUNTIME/plugins/projman/.claude-plugin/plugin.json
|
||||||
|
ls -la $RUNTIME/plugins/pr-review/.claude-plugin/plugin.json
|
||||||
|
|
||||||
|
# Check .mcp.json files
|
||||||
|
cat $RUNTIME/plugins/projman/.mcp.json
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 3: Verify Virtual Environments Exist
|
||||||
|
|
||||||
|
**This is the most common failure point after installation.**
|
||||||
|
|
||||||
|
MCP servers require Python venvs to exist at the INSTALLED location:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
RUNTIME=~/.claude/plugins/marketplaces/leo-claude-mktplace
|
||||||
|
|
||||||
|
# Check venvs exist
|
||||||
|
ls -la $RUNTIME/mcp-servers/gitea/.venv/bin/python
|
||||||
|
ls -la $RUNTIME/mcp-servers/netbox/.venv/bin/python
|
||||||
|
|
||||||
|
# If missing, create them:
|
||||||
|
cd $RUNTIME && ./scripts/setup.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
**Common error:** "X MCP servers failed to start" = venvs don't exist in installed path.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 4: Verify MCP Configuration
|
||||||
|
|
||||||
|
Check `.mcp.json` at marketplace root is correctly configured:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
RUNTIME=~/.claude/plugins/marketplaces/leo-claude-mktplace
|
||||||
|
|
||||||
|
# Check .mcp.json exists and has valid content
|
||||||
|
cat $RUNTIME/.mcp.json | jq '.mcpServers | keys'
|
||||||
|
|
||||||
|
# Should list: gitea, netbox, data-platform, viz-platform, contract-validator
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 5: Test MCP Server Startup
|
||||||
|
|
||||||
|
Manually test if the MCP server can start:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
RUNTIME=~/.claude/plugins/marketplaces/leo-claude-mktplace
|
||||||
|
|
||||||
|
# Test Gitea MCP
|
||||||
|
cd $RUNTIME/mcp-servers/gitea
|
||||||
|
PYTHONPATH=. .venv/bin/python -c "from mcp_server.server import main; print('OK')"
|
||||||
|
|
||||||
|
# Test NetBox MCP
|
||||||
|
cd $RUNTIME/mcp-servers/netbox
|
||||||
|
PYTHONPATH=. .venv/bin/python -c "from mcp_server.server import main; print('OK')"
|
||||||
|
```
|
||||||
|
|
||||||
|
**If import fails:** Check requirements.txt installed, check Python version compatibility.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 6: Verify Configuration Files
|
||||||
|
|
||||||
|
Check environment variables are set:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# System-level credentials (should exist)
|
||||||
|
cat ~/.config/claude/gitea.env
|
||||||
|
# Should contain: GITEA_API_URL, GITEA_API_TOKEN
|
||||||
|
|
||||||
|
cat ~/.config/claude/netbox.env
|
||||||
|
# Should contain: NETBOX_API_URL, NETBOX_API_TOKEN
|
||||||
|
|
||||||
|
# Project-level config (in target project)
|
||||||
|
cat /path/to/project/.env
|
||||||
|
# Should contain: GITEA_REPO=owner/repo (e.g., my-org/my-repo)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 7: Verify Hooks Configuration
|
||||||
|
|
||||||
|
Check hooks are valid:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
RUNTIME=~/.claude/plugins/marketplaces/leo-claude-mktplace
|
||||||
|
|
||||||
|
# List all hooks.json files
|
||||||
|
find $RUNTIME/plugins -name "hooks.json" -exec echo "=== {} ===" \; -exec cat {} \;
|
||||||
|
|
||||||
|
# Verify hook events are valid
|
||||||
|
# Valid: PreToolUse, PostToolUse, UserPromptSubmit, SessionStart, SessionEnd,
|
||||||
|
# Notification, Stop, SubagentStop, PreCompact
|
||||||
|
# INVALID: task-completed, file-changed, git-commit-msg-needed
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Quick Diagnostic Commands
|
||||||
|
|
||||||
|
Run these to quickly identify issues:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
RUNTIME=~/.claude/plugins/marketplaces/leo-claude-mktplace
|
||||||
|
|
||||||
|
echo "=== Installation Status ==="
|
||||||
|
[ -d "$RUNTIME" ] && echo "Installed: YES" || echo "Installed: NO"
|
||||||
|
|
||||||
|
echo -e "\n=== Virtual Environments ==="
|
||||||
|
[ -f "$RUNTIME/mcp-servers/gitea/.venv/bin/python" ] && echo "Gitea venv: OK" || echo "Gitea venv: MISSING"
|
||||||
|
[ -f "$RUNTIME/mcp-servers/netbox/.venv/bin/python" ] && echo "NetBox venv: OK" || echo "NetBox venv: MISSING"
|
||||||
|
|
||||||
|
echo -e "\n=== MCP Configuration ==="
|
||||||
|
[ -f "$RUNTIME/.mcp.json" ] && echo ".mcp.json: OK" || echo ".mcp.json: MISSING"
|
||||||
|
|
||||||
|
echo -e "\n=== Config Files ==="
|
||||||
|
[ -f ~/.config/claude/gitea.env ] && echo "gitea.env: OK" || echo "gitea.env: MISSING"
|
||||||
|
[ -f ~/.config/claude/netbox.env ] && echo "netbox.env: OK" || echo "netbox.env: MISSING"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Common Issues and Fixes
|
||||||
|
|
||||||
|
| Issue | Symptom | Fix |
|
||||||
|
|-------|---------|-----|
|
||||||
|
| Missing venvs | "X MCP servers failed" | `cd ~/.claude/plugins/marketplaces/leo-claude-mktplace && ./scripts/setup.sh` |
|
||||||
|
| Missing .mcp.json | MCP tools not available | Check `.mcp.json` exists at marketplace root |
|
||||||
|
| Wrong path edits | Changes don't take effect | Edit installed path or reinstall after source changes |
|
||||||
|
| Missing credentials | MCP connection errors | Create `~/.config/claude/gitea.env` with API credentials |
|
||||||
|
| Invalid hook events | Hooks don't fire | Use only valid event names (see Step 7) |
|
||||||
|
| Gitea issues not closing | Merged to non-default branch | Manually close issues (see below) |
|
||||||
|
| MCP changes not taking effect | Session caching | Restart Claude Code session (see below) |
|
||||||
|
|
||||||
|
### Gitea Auto-Close Behavior
|
||||||
|
|
||||||
|
**Issue:** Using `Closes #XX` or `Fixes #XX` in commit/PR messages does NOT auto-close issues when merging to `development`.
|
||||||
|
|
||||||
|
**Root Cause:** Gitea only auto-closes issues when merging to the **default branch** (typically `main` or `master`). Merging to `development`, `staging`, or any other branch will NOT trigger auto-close.
|
||||||
|
|
||||||
|
**Workaround:**
|
||||||
|
1. Use the Gitea MCP tool to manually close issues after merging to development:
|
||||||
|
```
|
||||||
|
mcp__plugin_projman_gitea__update_issue(issue_number=XX, state="closed")
|
||||||
|
```
|
||||||
|
2. Or close issues via the Gitea web UI
|
||||||
|
3. The auto-close keywords will still work when the changes are eventually merged to `main`
|
||||||
|
|
||||||
|
**Recommendation:** Include the `Closes #XX` keywords in commits anyway - they'll work when the final merge to `main` happens.
|
||||||
|
|
||||||
|
### MCP Session Restart Requirement
|
||||||
|
|
||||||
|
**Issue:** Changes to MCP servers, hooks, or plugin configuration don't take effect immediately.
|
||||||
|
|
||||||
|
**Root Cause:** Claude Code loads MCP tools and plugin configuration at session start. These are cached in session memory and not reloaded dynamically.
|
||||||
|
|
||||||
|
**What requires a session restart:**
|
||||||
|
- MCP server code changes (Python files in `mcp-servers/`)
|
||||||
|
- Changes to `.mcp.json` files
|
||||||
|
- Changes to `hooks/hooks.json`
|
||||||
|
- Changes to `plugin.json`
|
||||||
|
- Adding new MCP tools or modifying tool signatures
|
||||||
|
|
||||||
|
**What does NOT require a restart:**
|
||||||
|
- Command/skill markdown files (`.md`) - these are read on invocation
|
||||||
|
- Agent markdown files - read when agent is invoked
|
||||||
|
|
||||||
|
**Correct workflow after plugin changes:**
|
||||||
|
1. Make changes to source files
|
||||||
|
2. Run `./scripts/verify-hooks.sh` to validate
|
||||||
|
3. Inform user: "Please restart Claude Code for changes to take effect"
|
||||||
|
4. **Do NOT clear cache mid-session** - see "Cache Clearing" section
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## After Fixing Issues
|
||||||
|
|
||||||
|
1. **Restart Claude Code** - Plugins are loaded at startup
|
||||||
|
2. **Verify fix works** - Run a simple command that uses the MCP
|
||||||
|
3. **Document the issue** - If it's a new failure mode, add to this checklist
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Cache Clearing: When It's Safe vs Destructive
|
||||||
|
|
||||||
|
**⚠️ CRITICAL: Never clear plugin cache mid-session.**
|
||||||
|
|
||||||
|
### Why Cache Clearing Breaks MCP Tools
|
||||||
|
|
||||||
|
When Claude Code starts a session:
|
||||||
|
1. MCP tools are loaded from the cache directory
|
||||||
|
2. Tool definitions include **absolute paths** to the venv (e.g., `~/.claude/plugins/cache/.../venv/`)
|
||||||
|
3. These paths are cached in the session memory
|
||||||
|
4. Deleting the cache removes the venv, but the session still references the old paths
|
||||||
|
5. Any MCP tool making HTTP requests fails with TLS certificate errors
|
||||||
|
|
||||||
|
### When Cache Clearing is SAFE
|
||||||
|
|
||||||
|
| Scenario | Safe? | Action |
|
||||||
|
|----------|-------|--------|
|
||||||
|
| Before starting Claude Code | ✅ Yes | Clear cache, then start session |
|
||||||
|
| Between sessions | ✅ Yes | Clear cache after `/exit`, before next session |
|
||||||
|
| During a session | ❌ NO | Never - will break MCP tools |
|
||||||
|
| After plugin source edits | ❌ NO | Restart session instead |
|
||||||
|
|
||||||
|
### Recovery: MCP Tools Broken Mid-Session
|
||||||
|
|
||||||
|
If you accidentally cleared cache during a session and MCP tools fail:
|
||||||
|
|
||||||
|
```
|
||||||
|
Error: Could not find a suitable TLS CA certificate bundle, invalid path:
|
||||||
|
/home/.../.claude/plugins/cache/.../certifi/cacert.pem
|
||||||
|
```
|
||||||
|
|
||||||
|
**Fix:**
|
||||||
|
1. Exit the current session (`/exit` or Ctrl+C)
|
||||||
|
2. Start a new Claude Code session
|
||||||
|
3. MCP tools will reload from the reinstalled cache
|
||||||
|
|
||||||
|
### Correct Workflow for Plugin Development
|
||||||
|
|
||||||
|
1. Make changes to plugin source files
|
||||||
|
2. Run `./scripts/verify-hooks.sh` (verifies hook types)
|
||||||
|
3. Tell user: "Please restart Claude Code for changes to take effect"
|
||||||
|
4. **Do NOT clear cache** - session restart handles reloading
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Automated Diagnostics
|
||||||
|
|
||||||
|
Use these commands for automated checking:
|
||||||
|
|
||||||
|
- `/debug report` - Run full diagnostics, create issue if problems found
|
||||||
|
- `/debug review` - Investigate existing diagnostic issues and propose fixes
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Related Documentation
|
||||||
|
|
||||||
|
- `CLAUDE.md` - Installation Paths and Troubleshooting sections
|
||||||
|
- `docs/CONFIGURATION.md` - Setup and configuration guide
|
||||||
|
- `docs/UPDATING.md` - Update procedures
|
||||||
173
docs/UPDATING.md
173
docs/UPDATING.md
@@ -1,24 +1,71 @@
|
|||||||
# Updating support-claude-mktplace
|
# Updating Leo Claude Marketplace
|
||||||
|
|
||||||
This guide covers how to update your local installation when new versions are released.
|
This guide covers how to update your local installation when new versions are released.
|
||||||
|
|
||||||
## Quick Update
|
---
|
||||||
|
|
||||||
|
## ⚠️ CRITICAL: Run Setup in Installed Location
|
||||||
|
|
||||||
|
When Claude Code installs a marketplace, it copies files to `~/.claude/plugins/marketplaces/` but **does NOT create Python virtual environments**. You must run setup manually after installation or update.
|
||||||
|
|
||||||
|
**After installing or updating the marketplace:**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# 1. Pull latest changes
|
cd ~/.claude/plugins/marketplaces/leo-claude-mktplace && ./scripts/setup.sh
|
||||||
cd /path/to/support-claude-mktplace
|
```
|
||||||
|
|
||||||
|
This creates the required `.venv` directories for MCP servers. Without this step, **all MCP servers will fail to start**.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Quick Update (Source Repository)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Pull latest changes to source
|
||||||
|
cd /path/to/leo-claude-mktplace
|
||||||
git pull origin main
|
git pull origin main
|
||||||
|
|
||||||
# 2. Run post-update script
|
# 2. Run post-update script (updates source repo venvs)
|
||||||
./scripts/post-update.sh
|
./scripts/post-update.sh
|
||||||
|
|
||||||
|
# 3. CRITICAL: Run setup in installed marketplace location
|
||||||
|
cd ~/.claude/plugins/marketplaces/leo-claude-mktplace && ./scripts/setup.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Then restart your Claude Code session** to load any changes.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## What the Post-Update Script Does
|
## What the Post-Update Script Does
|
||||||
|
|
||||||
1. **Updates Python dependencies** for MCP servers
|
1. **Updates Python dependencies** for MCP servers (gitea, netbox)
|
||||||
2. **Shows recent changelog entries** so you know what changed
|
2. **Shows recent changelog entries** so you know what changed
|
||||||
3. **Validates your configuration** is still compatible
|
3. **Validates your configuration** is still compatible
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## After Updating: Re-run Setup if Needed
|
||||||
|
|
||||||
|
### When to Re-run `/initial-setup`
|
||||||
|
|
||||||
|
You typically **don't need** to re-run setup after updates. However, re-run if:
|
||||||
|
|
||||||
|
- Changelog mentions **new required environment variables**
|
||||||
|
- Changelog mentions **breaking changes** to configuration
|
||||||
|
- MCP tools stop working after update
|
||||||
|
|
||||||
|
### For Existing Projects
|
||||||
|
|
||||||
|
If an update requires new project-level configuration:
|
||||||
|
|
||||||
|
```
|
||||||
|
/project-init
|
||||||
|
```
|
||||||
|
|
||||||
|
This will detect existing settings and only add what's missing.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## Manual Steps After Update
|
## Manual Steps After Update
|
||||||
|
|
||||||
Some updates may require manual configuration changes:
|
Some updates may require manual configuration changes:
|
||||||
@@ -29,7 +76,7 @@ If the changelog mentions new environment variables:
|
|||||||
|
|
||||||
1. Check the variable name and purpose in the changelog
|
1. Check the variable name and purpose in the changelog
|
||||||
2. Add it to the appropriate config file:
|
2. Add it to the appropriate config file:
|
||||||
- Gitea variables → `~/.config/claude/gitea.env`
|
- System variables → `~/.config/claude/gitea.env` or `netbox.env`
|
||||||
- Project variables → `.env` in your project root
|
- Project variables → `.env` in your project root
|
||||||
|
|
||||||
### New MCP Server Features
|
### New MCP Server Features
|
||||||
@@ -37,22 +84,56 @@ If the changelog mentions new environment variables:
|
|||||||
If a new MCP server tool is added:
|
If a new MCP server tool is added:
|
||||||
|
|
||||||
1. The post-update script handles dependency installation
|
1. The post-update script handles dependency installation
|
||||||
2. Check `plugins/projman/README.md` for usage documentation
|
2. Check plugin documentation for usage
|
||||||
3. New tools are available immediately after update
|
3. New tools are available immediately after session restart
|
||||||
|
|
||||||
### Breaking Changes
|
### Breaking Changes
|
||||||
|
|
||||||
Breaking changes will be clearly marked in CHANGELOG.md with migration instructions.
|
Breaking changes will be clearly marked in CHANGELOG.md with migration instructions.
|
||||||
|
|
||||||
## Troubleshooting
|
### Setup Script and Configuration Workflow Changes
|
||||||
|
|
||||||
|
When updating, review if changes affect the setup workflow:
|
||||||
|
|
||||||
|
1. **Check for setup command changes:**
|
||||||
|
```bash
|
||||||
|
git diff HEAD~1 plugins/*/commands/initial-setup.md
|
||||||
|
git diff HEAD~1 plugins/*/commands/project-init.md
|
||||||
|
git diff HEAD~1 plugins/*/commands/project-sync.md
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Check for hook changes:**
|
||||||
|
```bash
|
||||||
|
git diff HEAD~1 plugins/*/hooks/hooks.json
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Check for configuration structure changes:**
|
||||||
|
```bash
|
||||||
|
git diff HEAD~1 docs/CONFIGURATION.md
|
||||||
|
```
|
||||||
|
|
||||||
|
**If setup commands changed:**
|
||||||
|
- Review what's new (new validation steps, new prompts, etc.)
|
||||||
|
- Consider re-running `/initial-setup` or `/project-init` to benefit from improvements
|
||||||
|
- Existing configurations remain valid unless changelog notes breaking changes
|
||||||
|
|
||||||
|
**If hooks changed:**
|
||||||
|
- Restart your Claude Code session to load new hooks
|
||||||
|
- New hooks (like SessionStart validation) activate automatically
|
||||||
|
|
||||||
|
**If configuration structure changed:**
|
||||||
|
- Check if new variables are required
|
||||||
|
- Run `/project-sync` if repository detection logic improved
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Troubleshooting Updates
|
||||||
|
|
||||||
### Dependencies fail to install
|
### Dependencies fail to install
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Rebuild virtual environment
|
# Install missing dependencies (do NOT delete .venv)
|
||||||
cd plugins/projman/mcp-servers/gitea
|
cd mcp-servers/gitea
|
||||||
rm -rf .venv
|
|
||||||
python3 -m venv .venv
|
|
||||||
source .venv/bin/activate
|
source .venv/bin/activate
|
||||||
pip install -r requirements.txt
|
pip install -r requirements.txt
|
||||||
deactivate
|
deactivate
|
||||||
@@ -61,14 +142,47 @@ deactivate
|
|||||||
### Configuration no longer works
|
### Configuration no longer works
|
||||||
|
|
||||||
1. Check CHANGELOG.md for breaking changes
|
1. Check CHANGELOG.md for breaking changes
|
||||||
2. Compare your config files with updated `.env.example` (if provided)
|
2. Run `/initial-setup` to re-validate and fix configuration
|
||||||
3. Run `./scripts/setup.sh` to validate configuration
|
3. Compare your config files with documentation in `docs/CONFIGURATION.md`
|
||||||
|
|
||||||
### MCP server won't start
|
### MCP server won't start after update
|
||||||
|
|
||||||
|
**Most common cause:** Virtual environments don't exist in the installed marketplace.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Fix: Run setup in installed location
|
||||||
|
cd ~/.claude/plugins/marketplaces/leo-claude-mktplace && ./scripts/setup.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
If that doesn't work:
|
||||||
|
|
||||||
1. Check Python version: `python3 --version` (requires 3.10+)
|
1. Check Python version: `python3 --version` (requires 3.10+)
|
||||||
2. Verify venv exists: `ls plugins/projman/mcp-servers/gitea/.venv`
|
2. Verify venv exists in INSTALLED location:
|
||||||
3. Check logs for specific errors
|
```bash
|
||||||
|
ls ~/.claude/plugins/marketplaces/leo-claude-mktplace/mcp-servers/gitea/.venv
|
||||||
|
ls ~/.claude/plugins/marketplaces/leo-claude-mktplace/mcp-servers/netbox/.venv
|
||||||
|
```
|
||||||
|
3. If missing, run setup.sh as shown above.
|
||||||
|
4. Restart Claude Code session
|
||||||
|
5. Check logs for specific errors
|
||||||
|
|
||||||
|
### "X MCP servers failed" on startup
|
||||||
|
|
||||||
|
This almost always means the venvs don't exist in the installed marketplace:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd ~/.claude/plugins/marketplaces/leo-claude-mktplace && ./scripts/setup.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
Then restart Claude Code.
|
||||||
|
|
||||||
|
### New commands not available
|
||||||
|
|
||||||
|
1. Restart your Claude Code session
|
||||||
|
2. Verify the plugin is still installed
|
||||||
|
3. Check if the command requires additional setup
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## Version Pinning
|
## Version Pinning
|
||||||
|
|
||||||
@@ -79,15 +193,28 @@ To stay on a specific version:
|
|||||||
git tag
|
git tag
|
||||||
|
|
||||||
# Checkout specific version
|
# Checkout specific version
|
||||||
git checkout v2.2.0
|
git checkout v3.0.0
|
||||||
|
|
||||||
# Run post-update
|
# Run post-update
|
||||||
./scripts/post-update.sh
|
./scripts/post-update.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Checking Current Version
|
||||||
|
|
||||||
|
The version is displayed in the main README.md title and in `CHANGELOG.md`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check version from changelog
|
||||||
|
head -20 CHANGELOG.md
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## Getting Help
|
## Getting Help
|
||||||
|
|
||||||
- Check `plugins/projman/README.md` for projman documentation
|
- Check `docs/CONFIGURATION.md` for setup guide
|
||||||
- Check `plugins/projman/CONFIGURATION.md` for setup guide
|
- Check `docs/COMMANDS-CHEATSHEET.md` for command reference
|
||||||
- Review CHANGELOG.md for recent changes
|
- Review `CHANGELOG.md` for recent changes
|
||||||
- Search existing issues in Gitea
|
- Search existing issues in Gitea
|
||||||
|
|||||||
20
mcp-servers/contract-validator/.doc-guardian-queue
Normal file
20
mcp-servers/contract-validator/.doc-guardian-queue
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
2026-01-26T14:36:42 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/parse_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
|
||||||
|
2026-01-26T14:37:38 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/parse_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
|
||||||
|
2026-01-26T14:37:48 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/parse_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
|
||||||
|
2026-01-26T14:38:05 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/parse_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
|
||||||
|
2026-01-26T14:38:55 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/parse_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
|
||||||
|
2026-01-26T14:39:35 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/parse_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
|
||||||
|
2026-01-26T14:40:19 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/parse_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
|
||||||
|
2026-01-26T15:02:30 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/tests/test_parse_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
|
||||||
|
2026-01-26T15:02:37 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/tests/test_parse_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
|
||||||
|
2026-01-26T15:03:41 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/tests/test_report_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
|
||||||
|
2026-02-02T10:56:19 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/mcp_server/validation_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
|
||||||
|
2026-02-02T10:57:49 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/contract-validator/tests/test_validation_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
|
||||||
|
2026-02-02T10:58:22 | skills | /home/lmiranda/claude-plugins-work/plugins/contract-validator/skills/mcp-tools-reference.md | README.md
|
||||||
|
2026-02-02T10:58:38 | skills | /home/lmiranda/claude-plugins-work/plugins/contract-validator/skills/validation-rules.md | README.md
|
||||||
|
2026-02-02T10:59:13 | .claude-plugin | /home/lmiranda/claude-plugins-work/.claude-plugin/marketplace.json | CLAUDE.md .claude-plugin/marketplace.json
|
||||||
|
2026-02-02T13:55:33 | skills | /home/lmiranda/claude-plugins-work/plugins/projman/skills/visual-output.md | README.md
|
||||||
|
2026-02-02T13:55:41 | agents | /home/lmiranda/claude-plugins-work/plugins/projman/agents/planner.md | README.md CLAUDE.md
|
||||||
|
2026-02-02T13:55:55 | agents | /home/lmiranda/claude-plugins-work/plugins/projman/agents/orchestrator.md | README.md CLAUDE.md
|
||||||
|
2026-02-02T13:56:14 | agents | /home/lmiranda/claude-plugins-work/plugins/projman/agents/executor.md | README.md CLAUDE.md
|
||||||
|
2026-02-02T13:56:34 | agents | /home/lmiranda/claude-plugins-work/plugins/projman/agents/code-reviewer.md | README.md CLAUDE.md
|
||||||
3
mcp-servers/contract-validator/mcp_server/__init__.py
Normal file
3
mcp-servers/contract-validator/mcp_server/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""Contract Validator MCP Server - Cross-plugin compatibility validation."""
|
||||||
|
|
||||||
|
__version__ = "1.0.0"
|
||||||
415
mcp-servers/contract-validator/mcp_server/parse_tools.py
Normal file
415
mcp-servers/contract-validator/mcp_server/parse_tools.py
Normal file
@@ -0,0 +1,415 @@
|
|||||||
|
"""
|
||||||
|
Parse tools for extracting interfaces from plugin documentation.
|
||||||
|
|
||||||
|
Provides structured extraction of:
|
||||||
|
- Plugin interfaces from README.md (commands, agents, tools)
|
||||||
|
- Agent definitions from CLAUDE.md (tool sequences, workflows)
|
||||||
|
"""
|
||||||
|
import re
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
|
||||||
|
class ToolInfo(BaseModel):
|
||||||
|
"""Information about a single tool"""
|
||||||
|
name: str
|
||||||
|
category: Optional[str] = None
|
||||||
|
description: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
|
class CommandInfo(BaseModel):
|
||||||
|
"""Information about a plugin command"""
|
||||||
|
name: str
|
||||||
|
description: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
|
class AgentInfo(BaseModel):
|
||||||
|
"""Information about a plugin agent"""
|
||||||
|
name: str
|
||||||
|
description: Optional[str] = None
|
||||||
|
tools: list[str] = []
|
||||||
|
|
||||||
|
|
||||||
|
class PluginInterface(BaseModel):
|
||||||
|
"""Structured plugin interface extracted from README"""
|
||||||
|
plugin_name: str
|
||||||
|
description: Optional[str] = None
|
||||||
|
commands: list[CommandInfo] = []
|
||||||
|
agents: list[AgentInfo] = []
|
||||||
|
tools: list[ToolInfo] = []
|
||||||
|
tool_categories: dict[str, list[str]] = {}
|
||||||
|
features: list[str] = []
|
||||||
|
|
||||||
|
|
||||||
|
class ClaudeMdAgent(BaseModel):
|
||||||
|
"""Agent definition extracted from CLAUDE.md"""
|
||||||
|
name: str
|
||||||
|
personality: Optional[str] = None
|
||||||
|
responsibilities: list[str] = []
|
||||||
|
tool_refs: list[str] = []
|
||||||
|
workflow_steps: list[str] = []
|
||||||
|
|
||||||
|
|
||||||
|
class ParseTools:
|
||||||
|
"""Tools for parsing plugin documentation"""
|
||||||
|
|
||||||
|
async def parse_plugin_interface(self, plugin_path: str) -> dict:
|
||||||
|
"""
|
||||||
|
Parse plugin README.md to extract interface declarations.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
plugin_path: Path to plugin directory or README.md file
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Structured interface with commands, agents, tools, etc.
|
||||||
|
"""
|
||||||
|
# Resolve path to README
|
||||||
|
path = Path(plugin_path)
|
||||||
|
if path.is_dir():
|
||||||
|
readme_path = path / "README.md"
|
||||||
|
else:
|
||||||
|
readme_path = path
|
||||||
|
|
||||||
|
if not readme_path.exists():
|
||||||
|
return {
|
||||||
|
"error": f"README.md not found at {readme_path}",
|
||||||
|
"plugin_path": plugin_path
|
||||||
|
}
|
||||||
|
|
||||||
|
content = readme_path.read_text()
|
||||||
|
plugin_name = self._extract_plugin_name(content, path)
|
||||||
|
|
||||||
|
interface = PluginInterface(
|
||||||
|
plugin_name=plugin_name,
|
||||||
|
description=self._extract_description(content),
|
||||||
|
commands=self._extract_commands(content),
|
||||||
|
agents=self._extract_agents_from_readme(content),
|
||||||
|
tools=self._extract_tools(content),
|
||||||
|
tool_categories=self._extract_tool_categories(content),
|
||||||
|
features=self._extract_features(content)
|
||||||
|
)
|
||||||
|
|
||||||
|
return interface.model_dump()
|
||||||
|
|
||||||
|
async def parse_claude_md_agents(self, claude_md_path: str) -> dict:
|
||||||
|
"""
|
||||||
|
Parse CLAUDE.md to extract agent definitions and tool sequences.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
claude_md_path: Path to CLAUDE.md file
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of agents with their tool sequences
|
||||||
|
"""
|
||||||
|
path = Path(claude_md_path)
|
||||||
|
|
||||||
|
if not path.exists():
|
||||||
|
return {
|
||||||
|
"error": f"CLAUDE.md not found at {path}",
|
||||||
|
"claude_md_path": claude_md_path
|
||||||
|
}
|
||||||
|
|
||||||
|
content = path.read_text()
|
||||||
|
agents = self._extract_agents_from_claude_md(content)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"file": str(path),
|
||||||
|
"agents": [a.model_dump() for a in agents],
|
||||||
|
"agent_count": len(agents)
|
||||||
|
}
|
||||||
|
|
||||||
|
def _extract_plugin_name(self, content: str, path: Path) -> str:
|
||||||
|
"""Extract plugin name from content or path"""
|
||||||
|
# Try to get from H1 header
|
||||||
|
match = re.search(r'^#\s+(.+?)(?:\s+Plugin|\s*$)', content, re.MULTILINE)
|
||||||
|
if match:
|
||||||
|
name = match.group(1).strip()
|
||||||
|
# Handle cases like "# data-platform Plugin"
|
||||||
|
name = re.sub(r'\s*Plugin\s*$', '', name, flags=re.IGNORECASE)
|
||||||
|
return name
|
||||||
|
|
||||||
|
# Fall back to directory name
|
||||||
|
if path.is_dir():
|
||||||
|
return path.name
|
||||||
|
return path.parent.name
|
||||||
|
|
||||||
|
def _extract_description(self, content: str) -> Optional[str]:
|
||||||
|
"""Extract plugin description from first paragraph after title"""
|
||||||
|
# Get content after H1, before first H2
|
||||||
|
match = re.search(r'^#\s+.+?\n\n(.+?)(?=\n##|\n\n##|\Z)', content, re.MULTILINE | re.DOTALL)
|
||||||
|
if match:
|
||||||
|
desc = match.group(1).strip()
|
||||||
|
# Take first paragraph only
|
||||||
|
desc = desc.split('\n\n')[0].strip()
|
||||||
|
return desc
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _extract_commands(self, content: str) -> list[CommandInfo]:
|
||||||
|
"""Extract commands from Commands section"""
|
||||||
|
commands = []
|
||||||
|
|
||||||
|
# Find Commands section
|
||||||
|
commands_section = self._extract_section(content, "Commands")
|
||||||
|
if not commands_section:
|
||||||
|
return commands
|
||||||
|
|
||||||
|
# Parse table format: | Command | Description |
|
||||||
|
# Only match actual command names (start with / or alphanumeric)
|
||||||
|
table_pattern = r'\|\s*`?(/[a-z][-a-z0-9]*)`?\s*\|\s*([^|]+)\s*\|'
|
||||||
|
for match in re.finditer(table_pattern, commands_section):
|
||||||
|
cmd_name = match.group(1).strip()
|
||||||
|
desc = match.group(2).strip()
|
||||||
|
|
||||||
|
# Skip header row and separators
|
||||||
|
if cmd_name.lower() in ('command', 'commands') or cmd_name.startswith('-'):
|
||||||
|
continue
|
||||||
|
|
||||||
|
commands.append(CommandInfo(
|
||||||
|
name=cmd_name,
|
||||||
|
description=desc
|
||||||
|
))
|
||||||
|
|
||||||
|
# Also look for ### `/command-name` format (with backticks)
|
||||||
|
cmd_header_pattern = r'^###\s+`(/[a-z][-a-z0-9]*)`\s*\n(.+?)(?=\n###|\n##|\Z)'
|
||||||
|
for match in re.finditer(cmd_header_pattern, commands_section, re.MULTILINE | re.DOTALL):
|
||||||
|
cmd_name = match.group(1).strip()
|
||||||
|
desc_block = match.group(2).strip()
|
||||||
|
# Get first line or paragraph as description
|
||||||
|
desc = desc_block.split('\n')[0].strip()
|
||||||
|
|
||||||
|
# Don't duplicate if already found in table
|
||||||
|
if not any(c.name == cmd_name for c in commands):
|
||||||
|
commands.append(CommandInfo(name=cmd_name, description=desc))
|
||||||
|
|
||||||
|
# Also look for ### /command-name format (without backticks)
|
||||||
|
cmd_header_pattern2 = r'^###\s+(/[a-z][-a-z0-9]*)\s*\n(.+?)(?=\n###|\n##|\Z)'
|
||||||
|
for match in re.finditer(cmd_header_pattern2, commands_section, re.MULTILINE | re.DOTALL):
|
||||||
|
cmd_name = match.group(1).strip()
|
||||||
|
desc_block = match.group(2).strip()
|
||||||
|
# Get first line or paragraph as description
|
||||||
|
desc = desc_block.split('\n')[0].strip()
|
||||||
|
|
||||||
|
# Don't duplicate if already found in table
|
||||||
|
if not any(c.name == cmd_name for c in commands):
|
||||||
|
commands.append(CommandInfo(name=cmd_name, description=desc))
|
||||||
|
|
||||||
|
return commands
|
||||||
|
|
||||||
|
def _extract_agents_from_readme(self, content: str) -> list[AgentInfo]:
|
||||||
|
"""Extract agents from Agents section in README"""
|
||||||
|
agents = []
|
||||||
|
|
||||||
|
# Find Agents section
|
||||||
|
agents_section = self._extract_section(content, "Agents")
|
||||||
|
if not agents_section:
|
||||||
|
return agents
|
||||||
|
|
||||||
|
# Parse table format: | Agent | Description |
|
||||||
|
# Only match actual agent names (alphanumeric with dashes/underscores)
|
||||||
|
table_pattern = r'\|\s*`?([a-z][-a-z0-9_]*)`?\s*\|\s*([^|]+)\s*\|'
|
||||||
|
for match in re.finditer(table_pattern, agents_section):
|
||||||
|
agent_name = match.group(1).strip()
|
||||||
|
desc = match.group(2).strip()
|
||||||
|
|
||||||
|
# Skip header row and separators
|
||||||
|
if agent_name.lower() in ('agent', 'agents') or agent_name.startswith('-'):
|
||||||
|
continue
|
||||||
|
|
||||||
|
agents.append(AgentInfo(name=agent_name, description=desc))
|
||||||
|
|
||||||
|
return agents
|
||||||
|
|
||||||
|
def _extract_tools(self, content: str) -> list[ToolInfo]:
|
||||||
|
"""Extract tool list from Tools Summary or similar section"""
|
||||||
|
tools = []
|
||||||
|
|
||||||
|
# Find Tools Summary section
|
||||||
|
tools_section = self._extract_section(content, "Tools Summary")
|
||||||
|
if not tools_section:
|
||||||
|
tools_section = self._extract_section(content, "Tools")
|
||||||
|
if not tools_section:
|
||||||
|
tools_section = self._extract_section(content, "MCP Server Tools")
|
||||||
|
|
||||||
|
if not tools_section:
|
||||||
|
return tools
|
||||||
|
|
||||||
|
# Parse category headers: ### category (N tools)
|
||||||
|
category_pattern = r'###\s*(.+?)\s*(?:\((\d+)\s*tools?\))?\s*\n([^#]+)'
|
||||||
|
for match in re.finditer(category_pattern, tools_section):
|
||||||
|
category = match.group(1).strip()
|
||||||
|
tool_list_text = match.group(3).strip()
|
||||||
|
|
||||||
|
# Extract tool names from backtick lists
|
||||||
|
tool_names = re.findall(r'`([a-z_]+)`', tool_list_text)
|
||||||
|
for name in tool_names:
|
||||||
|
tools.append(ToolInfo(name=name, category=category))
|
||||||
|
|
||||||
|
# Also look for inline tool lists without categories
|
||||||
|
inline_pattern = r'`([a-z_]+)`'
|
||||||
|
all_tool_names = set(t.name for t in tools)
|
||||||
|
for match in re.finditer(inline_pattern, tools_section):
|
||||||
|
name = match.group(1)
|
||||||
|
if name not in all_tool_names:
|
||||||
|
tools.append(ToolInfo(name=name))
|
||||||
|
all_tool_names.add(name)
|
||||||
|
|
||||||
|
return tools
|
||||||
|
|
||||||
|
def _extract_tool_categories(self, content: str) -> dict[str, list[str]]:
|
||||||
|
"""Extract tool categories with their tool lists"""
|
||||||
|
categories = {}
|
||||||
|
|
||||||
|
tools_section = self._extract_section(content, "Tools Summary")
|
||||||
|
if not tools_section:
|
||||||
|
tools_section = self._extract_section(content, "Tools")
|
||||||
|
if not tools_section:
|
||||||
|
return categories
|
||||||
|
|
||||||
|
# Parse category headers: ### category (N tools)
|
||||||
|
category_pattern = r'###\s*(.+?)\s*(?:\((\d+)\s*tools?\))?\s*\n([^#]+)'
|
||||||
|
for match in re.finditer(category_pattern, tools_section):
|
||||||
|
category = match.group(1).strip()
|
||||||
|
tool_list_text = match.group(3).strip()
|
||||||
|
|
||||||
|
# Extract tool names from backtick lists
|
||||||
|
tool_names = re.findall(r'`([a-z_]+)`', tool_list_text)
|
||||||
|
if tool_names:
|
||||||
|
categories[category] = tool_names
|
||||||
|
|
||||||
|
return categories
|
||||||
|
|
||||||
|
def _extract_features(self, content: str) -> list[str]:
|
||||||
|
"""Extract features from Features section"""
|
||||||
|
features = []
|
||||||
|
|
||||||
|
features_section = self._extract_section(content, "Features")
|
||||||
|
if not features_section:
|
||||||
|
return features
|
||||||
|
|
||||||
|
# Parse bullet points
|
||||||
|
bullet_pattern = r'^[-*]\s+\*\*(.+?)\*\*'
|
||||||
|
for match in re.finditer(bullet_pattern, features_section, re.MULTILINE):
|
||||||
|
features.append(match.group(1).strip())
|
||||||
|
|
||||||
|
return features
|
||||||
|
|
||||||
|
def _extract_section(self, content: str, section_name: str) -> Optional[str]:
|
||||||
|
"""Extract content of a markdown section by header name"""
|
||||||
|
# Match ## Section Name - include all content until next ## (same level or higher)
|
||||||
|
pattern = rf'^##\s+{re.escape(section_name)}(?:\s*\([^)]*\))?\s*\n(.*?)(?=\n##[^#]|\Z)'
|
||||||
|
match = re.search(pattern, content, re.MULTILINE | re.DOTALL | re.IGNORECASE)
|
||||||
|
if match:
|
||||||
|
return match.group(1).strip()
|
||||||
|
|
||||||
|
# Try ### level - include content until next ## or ###
|
||||||
|
pattern = rf'^###\s+{re.escape(section_name)}(?:\s*\([^)]*\))?\s*\n(.*?)(?=\n##|\n###[^#]|\Z)'
|
||||||
|
match = re.search(pattern, content, re.MULTILINE | re.DOTALL | re.IGNORECASE)
|
||||||
|
if match:
|
||||||
|
return match.group(1).strip()
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _extract_agents_from_claude_md(self, content: str) -> list[ClaudeMdAgent]:
|
||||||
|
"""Extract agent definitions from CLAUDE.md"""
|
||||||
|
agents = []
|
||||||
|
|
||||||
|
# Look for Four-Agent Model section specifically
|
||||||
|
# Match section headers like "### Four-Agent Model (projman)" or "## Four-Agent Model"
|
||||||
|
agent_model_match = re.search(
|
||||||
|
r'^##[#]?\s+Four-Agent Model.*?\n(.*?)(?=\n##[^#]|\Z)',
|
||||||
|
content, re.MULTILINE | re.DOTALL
|
||||||
|
)
|
||||||
|
agent_model_section = agent_model_match.group(1) if agent_model_match else None
|
||||||
|
|
||||||
|
if agent_model_section:
|
||||||
|
# Parse agent table within this section
|
||||||
|
# | **Planner** | Thoughtful, methodical | Sprint planning, ... |
|
||||||
|
# Match rows where first cell starts with ** (bold) and contains a capitalized word
|
||||||
|
agent_table_pattern = r'\|\s*\*\*([A-Z][a-zA-Z\s]+?)\*\*\s*\|\s*([^|]+)\s*\|\s*([^|]+)\s*\|'
|
||||||
|
|
||||||
|
for match in re.finditer(agent_table_pattern, agent_model_section):
|
||||||
|
agent_name = match.group(1).strip()
|
||||||
|
personality = match.group(2).strip()
|
||||||
|
responsibilities = match.group(3).strip()
|
||||||
|
|
||||||
|
# Skip header rows and separator rows
|
||||||
|
if agent_name.lower() in ('agent', 'agents', '---', '-', ''):
|
||||||
|
continue
|
||||||
|
if 'personality' in personality.lower() or '---' in personality:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Skip if personality looks like tool names (contains backticks)
|
||||||
|
if '`' in personality:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Extract tool references from responsibilities
|
||||||
|
tool_refs = re.findall(r'`([a-z_]+)`', responsibilities)
|
||||||
|
|
||||||
|
# Split responsibilities by comma
|
||||||
|
resp_list = [r.strip() for r in responsibilities.split(',')]
|
||||||
|
|
||||||
|
agents.append(ClaudeMdAgent(
|
||||||
|
name=agent_name,
|
||||||
|
personality=personality,
|
||||||
|
responsibilities=resp_list,
|
||||||
|
tool_refs=tool_refs
|
||||||
|
))
|
||||||
|
|
||||||
|
# Also look for agents table in ## Agents section
|
||||||
|
agents_section = self._extract_section(content, "Agents")
|
||||||
|
if agents_section:
|
||||||
|
# Parse table: | Agent | Description |
|
||||||
|
table_pattern = r'\|\s*`?([a-z][-a-z0-9_]+)`?\s*\|\s*([^|]+)\s*\|'
|
||||||
|
for match in re.finditer(table_pattern, agents_section):
|
||||||
|
agent_name = match.group(1).strip()
|
||||||
|
desc = match.group(2).strip()
|
||||||
|
|
||||||
|
# Skip header rows
|
||||||
|
if agent_name.lower() in ('agent', 'agents', '---', '-'):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check if agent already exists
|
||||||
|
if not any(a.name.lower() == agent_name.lower() for a in agents):
|
||||||
|
agents.append(ClaudeMdAgent(
|
||||||
|
name=agent_name,
|
||||||
|
responsibilities=[desc] if desc else []
|
||||||
|
))
|
||||||
|
|
||||||
|
# Look for workflow sections to enrich agent data
|
||||||
|
workflow_section = self._extract_section(content, "Workflow")
|
||||||
|
if workflow_section:
|
||||||
|
# Parse numbered steps
|
||||||
|
step_pattern = r'^\d+\.\s+(.+?)$'
|
||||||
|
workflow_steps = re.findall(step_pattern, workflow_section, re.MULTILINE)
|
||||||
|
|
||||||
|
# Associate workflow steps with agents mentioned
|
||||||
|
for agent in agents:
|
||||||
|
for step in workflow_steps:
|
||||||
|
if agent.name.lower() in step.lower():
|
||||||
|
agent.workflow_steps.append(step)
|
||||||
|
# Extract any tool references in the step
|
||||||
|
step_tools = re.findall(r'`([a-z_]+)`', step)
|
||||||
|
agent.tool_refs.extend(t for t in step_tools if t not in agent.tool_refs)
|
||||||
|
|
||||||
|
# Look for agent-specific sections (### Planner Agent)
|
||||||
|
agent_section_pattern = r'^###?\s+([A-Z][a-z]+(?:\s+[A-Z][a-z]+)?)\s+Agent\s*\n(.*?)(?=\n##|\n###|\Z)'
|
||||||
|
for match in re.finditer(agent_section_pattern, content, re.MULTILINE | re.DOTALL):
|
||||||
|
agent_name = match.group(1).strip()
|
||||||
|
section_content = match.group(2).strip()
|
||||||
|
|
||||||
|
# Check if agent already exists
|
||||||
|
existing = next((a for a in agents if a.name.lower() == agent_name.lower()), None)
|
||||||
|
if existing:
|
||||||
|
# Add tool refs from this section
|
||||||
|
tool_refs = re.findall(r'`([a-z_]+)`', section_content)
|
||||||
|
existing.tool_refs.extend(t for t in tool_refs if t not in existing.tool_refs)
|
||||||
|
else:
|
||||||
|
tool_refs = re.findall(r'`([a-z_]+)`', section_content)
|
||||||
|
agents.append(ClaudeMdAgent(
|
||||||
|
name=agent_name,
|
||||||
|
tool_refs=tool_refs
|
||||||
|
))
|
||||||
|
|
||||||
|
return agents
|
||||||
337
mcp-servers/contract-validator/mcp_server/report_tools.py
Normal file
337
mcp-servers/contract-validator/mcp_server/report_tools.py
Normal file
@@ -0,0 +1,337 @@
|
|||||||
|
"""
|
||||||
|
Report tools for generating compatibility reports and listing issues.
|
||||||
|
|
||||||
|
Provides:
|
||||||
|
- generate_compatibility_report: Full marketplace validation report
|
||||||
|
- list_issues: Filtered issue listing
|
||||||
|
"""
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import Optional
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
from .parse_tools import ParseTools
|
||||||
|
from .validation_tools import ValidationTools, IssueSeverity, IssueType, ValidationIssue
|
||||||
|
|
||||||
|
|
||||||
|
class ReportSummary(BaseModel):
|
||||||
|
"""Summary statistics for a report"""
|
||||||
|
total_plugins: int = 0
|
||||||
|
total_commands: int = 0
|
||||||
|
total_agents: int = 0
|
||||||
|
total_tools: int = 0
|
||||||
|
total_issues: int = 0
|
||||||
|
errors: int = 0
|
||||||
|
warnings: int = 0
|
||||||
|
info: int = 0
|
||||||
|
|
||||||
|
|
||||||
|
class ReportTools:
|
||||||
|
"""Tools for generating reports and listing issues"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.parse_tools = ParseTools()
|
||||||
|
self.validation_tools = ValidationTools()
|
||||||
|
|
||||||
|
async def generate_compatibility_report(
|
||||||
|
self,
|
||||||
|
marketplace_path: str,
|
||||||
|
format: str = "markdown"
|
||||||
|
) -> dict:
|
||||||
|
"""
|
||||||
|
Generate a comprehensive compatibility report for all plugins.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
marketplace_path: Path to marketplace root directory
|
||||||
|
format: Output format ("markdown" or "json")
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Full compatibility report with all findings
|
||||||
|
"""
|
||||||
|
marketplace = Path(marketplace_path)
|
||||||
|
plugins_dir = marketplace / "plugins"
|
||||||
|
|
||||||
|
if not plugins_dir.exists():
|
||||||
|
return {
|
||||||
|
"error": f"Plugins directory not found at {plugins_dir}",
|
||||||
|
"marketplace_path": marketplace_path
|
||||||
|
}
|
||||||
|
|
||||||
|
# Discover all plugins
|
||||||
|
plugins = []
|
||||||
|
for item in plugins_dir.iterdir():
|
||||||
|
if item.is_dir() and (item / ".claude-plugin").exists():
|
||||||
|
plugins.append(item)
|
||||||
|
|
||||||
|
if not plugins:
|
||||||
|
return {
|
||||||
|
"error": "No plugins found in marketplace",
|
||||||
|
"marketplace_path": marketplace_path
|
||||||
|
}
|
||||||
|
|
||||||
|
# Parse all plugin interfaces
|
||||||
|
interfaces = {}
|
||||||
|
all_issues = []
|
||||||
|
summary = ReportSummary(total_plugins=len(plugins))
|
||||||
|
|
||||||
|
for plugin_path in plugins:
|
||||||
|
interface = await self.parse_tools.parse_plugin_interface(str(plugin_path))
|
||||||
|
if "error" not in interface:
|
||||||
|
interfaces[interface["plugin_name"]] = interface
|
||||||
|
summary.total_commands += len(interface.get("commands", []))
|
||||||
|
summary.total_agents += len(interface.get("agents", []))
|
||||||
|
summary.total_tools += len(interface.get("tools", []))
|
||||||
|
|
||||||
|
# Run pairwise compatibility checks
|
||||||
|
plugin_names = list(interfaces.keys())
|
||||||
|
compatibility_results = []
|
||||||
|
|
||||||
|
for i, name_a in enumerate(plugin_names):
|
||||||
|
for name_b in plugin_names[i+1:]:
|
||||||
|
path_a = plugins_dir / self._find_plugin_dir(plugins_dir, name_a)
|
||||||
|
path_b = plugins_dir / self._find_plugin_dir(plugins_dir, name_b)
|
||||||
|
|
||||||
|
result = await self.validation_tools.validate_compatibility(
|
||||||
|
str(path_a), str(path_b)
|
||||||
|
)
|
||||||
|
|
||||||
|
if "error" not in result:
|
||||||
|
compatibility_results.append(result)
|
||||||
|
all_issues.extend(result.get("issues", []))
|
||||||
|
|
||||||
|
# Parse CLAUDE.md if exists
|
||||||
|
claude_md = marketplace / "CLAUDE.md"
|
||||||
|
agents_from_claude = []
|
||||||
|
if claude_md.exists():
|
||||||
|
agents_result = await self.parse_tools.parse_claude_md_agents(str(claude_md))
|
||||||
|
if "error" not in agents_result:
|
||||||
|
agents_from_claude = agents_result.get("agents", [])
|
||||||
|
|
||||||
|
# Validate each agent
|
||||||
|
for agent in agents_from_claude:
|
||||||
|
agent_result = await self.validation_tools.validate_agent_refs(
|
||||||
|
agent["name"],
|
||||||
|
str(claude_md),
|
||||||
|
[str(p) for p in plugins]
|
||||||
|
)
|
||||||
|
if "error" not in agent_result:
|
||||||
|
all_issues.extend(agent_result.get("issues", []))
|
||||||
|
|
||||||
|
# Count issues by severity
|
||||||
|
for issue in all_issues:
|
||||||
|
severity = issue.get("severity", "info")
|
||||||
|
if isinstance(severity, str):
|
||||||
|
severity_str = severity.lower()
|
||||||
|
else:
|
||||||
|
severity_str = severity.value if hasattr(severity, 'value') else str(severity).lower()
|
||||||
|
|
||||||
|
if "error" in severity_str:
|
||||||
|
summary.errors += 1
|
||||||
|
elif "warning" in severity_str:
|
||||||
|
summary.warnings += 1
|
||||||
|
else:
|
||||||
|
summary.info += 1
|
||||||
|
|
||||||
|
summary.total_issues = len(all_issues)
|
||||||
|
|
||||||
|
# Generate report
|
||||||
|
if format == "json":
|
||||||
|
return {
|
||||||
|
"generated_at": datetime.now().isoformat(),
|
||||||
|
"marketplace_path": marketplace_path,
|
||||||
|
"summary": summary.model_dump(),
|
||||||
|
"plugins": interfaces,
|
||||||
|
"compatibility_checks": compatibility_results,
|
||||||
|
"claude_md_agents": agents_from_claude,
|
||||||
|
"all_issues": all_issues
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
# Generate markdown report
|
||||||
|
report = self._generate_markdown_report(
|
||||||
|
marketplace_path,
|
||||||
|
summary,
|
||||||
|
interfaces,
|
||||||
|
compatibility_results,
|
||||||
|
agents_from_claude,
|
||||||
|
all_issues
|
||||||
|
)
|
||||||
|
return {
|
||||||
|
"generated_at": datetime.now().isoformat(),
|
||||||
|
"marketplace_path": marketplace_path,
|
||||||
|
"summary": summary.model_dump(),
|
||||||
|
"report": report
|
||||||
|
}
|
||||||
|
|
||||||
|
def _find_plugin_dir(self, plugins_dir: Path, plugin_name: str) -> str:
|
||||||
|
"""Find plugin directory by name (handles naming variations)"""
|
||||||
|
# Try exact match first
|
||||||
|
for item in plugins_dir.iterdir():
|
||||||
|
if item.is_dir():
|
||||||
|
if item.name.lower() == plugin_name.lower():
|
||||||
|
return item.name
|
||||||
|
# Check plugin.json for name
|
||||||
|
plugin_json = item / ".claude-plugin" / "plugin.json"
|
||||||
|
if plugin_json.exists():
|
||||||
|
import json
|
||||||
|
try:
|
||||||
|
data = json.loads(plugin_json.read_text())
|
||||||
|
if data.get("name", "").lower() == plugin_name.lower():
|
||||||
|
return item.name
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
return plugin_name
|
||||||
|
|
||||||
|
def _generate_markdown_report(
|
||||||
|
self,
|
||||||
|
marketplace_path: str,
|
||||||
|
summary: ReportSummary,
|
||||||
|
interfaces: dict,
|
||||||
|
compatibility_results: list,
|
||||||
|
agents: list,
|
||||||
|
issues: list
|
||||||
|
) -> str:
|
||||||
|
"""Generate markdown formatted report"""
|
||||||
|
lines = [
|
||||||
|
"# Contract Validation Report",
|
||||||
|
"",
|
||||||
|
f"**Generated:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
|
||||||
|
f"**Marketplace:** `{marketplace_path}`",
|
||||||
|
"",
|
||||||
|
"## Summary",
|
||||||
|
"",
|
||||||
|
f"| Metric | Count |",
|
||||||
|
f"|--------|-------|",
|
||||||
|
f"| Plugins | {summary.total_plugins} |",
|
||||||
|
f"| Commands | {summary.total_commands} |",
|
||||||
|
f"| Agents | {summary.total_agents} |",
|
||||||
|
f"| Tools | {summary.total_tools} |",
|
||||||
|
f"| **Issues** | **{summary.total_issues}** |",
|
||||||
|
f"| - Errors | {summary.errors} |",
|
||||||
|
f"| - Warnings | {summary.warnings} |",
|
||||||
|
f"| - Info | {summary.info} |",
|
||||||
|
"",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Plugin details
|
||||||
|
lines.extend([
|
||||||
|
"## Plugins",
|
||||||
|
"",
|
||||||
|
])
|
||||||
|
|
||||||
|
for name, interface in interfaces.items():
|
||||||
|
cmds = len(interface.get("commands", []))
|
||||||
|
agents_count = len(interface.get("agents", []))
|
||||||
|
tools = len(interface.get("tools", []))
|
||||||
|
lines.append(f"### {name}")
|
||||||
|
lines.append("")
|
||||||
|
lines.append(f"- Commands: {cmds}")
|
||||||
|
lines.append(f"- Agents: {agents_count}")
|
||||||
|
lines.append(f"- Tools: {tools}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Compatibility results
|
||||||
|
if compatibility_results:
|
||||||
|
lines.extend([
|
||||||
|
"## Compatibility Checks",
|
||||||
|
"",
|
||||||
|
])
|
||||||
|
|
||||||
|
for result in compatibility_results:
|
||||||
|
status = "✓" if result.get("compatible", True) else "✗"
|
||||||
|
lines.append(f"### {result['plugin_a']} ↔ {result['plugin_b']} {status}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
if result.get("shared_tools"):
|
||||||
|
lines.append(f"- Shared tools: `{', '.join(result['shared_tools'])}`")
|
||||||
|
if result.get("issues"):
|
||||||
|
for issue in result["issues"]:
|
||||||
|
sev = issue.get("severity", "info")
|
||||||
|
if hasattr(sev, 'value'):
|
||||||
|
sev = sev.value
|
||||||
|
lines.append(f"- [{sev.upper()}] {issue['message']}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Issues section
|
||||||
|
if issues:
|
||||||
|
lines.extend([
|
||||||
|
"## All Issues",
|
||||||
|
"",
|
||||||
|
"| Severity | Type | Message |",
|
||||||
|
"|----------|------|---------|",
|
||||||
|
])
|
||||||
|
|
||||||
|
for issue in issues:
|
||||||
|
sev = issue.get("severity", "info")
|
||||||
|
itype = issue.get("issue_type", "unknown")
|
||||||
|
msg = issue.get("message", "")
|
||||||
|
|
||||||
|
if hasattr(sev, 'value'):
|
||||||
|
sev = sev.value
|
||||||
|
if hasattr(itype, 'value'):
|
||||||
|
itype = itype.value
|
||||||
|
|
||||||
|
# Truncate message for table
|
||||||
|
msg_short = msg[:60] + "..." if len(msg) > 60 else msg
|
||||||
|
lines.append(f"| {sev} | {itype} | {msg_short} |")
|
||||||
|
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
async def list_issues(
|
||||||
|
self,
|
||||||
|
marketplace_path: str,
|
||||||
|
severity: str = "all",
|
||||||
|
issue_type: str = "all"
|
||||||
|
) -> dict:
|
||||||
|
"""
|
||||||
|
List validation issues with optional filtering.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
marketplace_path: Path to marketplace root directory
|
||||||
|
severity: Filter by severity ("error", "warning", "info", "all")
|
||||||
|
issue_type: Filter by type ("missing_tool", "interface_mismatch", etc., "all")
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Filtered list of issues
|
||||||
|
"""
|
||||||
|
# Generate full report first
|
||||||
|
report = await self.generate_compatibility_report(marketplace_path, format="json")
|
||||||
|
|
||||||
|
if "error" in report:
|
||||||
|
return report
|
||||||
|
|
||||||
|
all_issues = report.get("all_issues", [])
|
||||||
|
|
||||||
|
# Filter by severity
|
||||||
|
if severity != "all":
|
||||||
|
filtered = []
|
||||||
|
for issue in all_issues:
|
||||||
|
issue_sev = issue.get("severity", "info")
|
||||||
|
if hasattr(issue_sev, 'value'):
|
||||||
|
issue_sev = issue_sev.value
|
||||||
|
if isinstance(issue_sev, str) and severity.lower() in issue_sev.lower():
|
||||||
|
filtered.append(issue)
|
||||||
|
all_issues = filtered
|
||||||
|
|
||||||
|
# Filter by type
|
||||||
|
if issue_type != "all":
|
||||||
|
filtered = []
|
||||||
|
for issue in all_issues:
|
||||||
|
itype = issue.get("issue_type", "unknown")
|
||||||
|
if hasattr(itype, 'value'):
|
||||||
|
itype = itype.value
|
||||||
|
if isinstance(itype, str) and issue_type.lower() in itype.lower():
|
||||||
|
filtered.append(issue)
|
||||||
|
all_issues = filtered
|
||||||
|
|
||||||
|
return {
|
||||||
|
"marketplace_path": marketplace_path,
|
||||||
|
"filters": {
|
||||||
|
"severity": severity,
|
||||||
|
"issue_type": issue_type
|
||||||
|
},
|
||||||
|
"total_issues": len(all_issues),
|
||||||
|
"issues": all_issues
|
||||||
|
}
|
||||||
309
mcp-servers/contract-validator/mcp_server/server.py
Normal file
309
mcp-servers/contract-validator/mcp_server/server.py
Normal file
@@ -0,0 +1,309 @@
|
|||||||
|
"""
|
||||||
|
MCP Server entry point for Contract Validator.
|
||||||
|
|
||||||
|
Provides cross-plugin compatibility validation and Claude.md agent verification
|
||||||
|
tools to Claude Code via JSON-RPC 2.0 over stdio.
|
||||||
|
"""
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
import json
|
||||||
|
from mcp.server import Server
|
||||||
|
from mcp.server.stdio import stdio_server
|
||||||
|
from mcp.types import Tool, TextContent
|
||||||
|
|
||||||
|
from .parse_tools import ParseTools
|
||||||
|
from .validation_tools import ValidationTools
|
||||||
|
from .report_tools import ReportTools
|
||||||
|
|
||||||
|
# Suppress noisy MCP validation warnings on stderr
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
logging.getLogger("root").setLevel(logging.ERROR)
|
||||||
|
logging.getLogger("mcp").setLevel(logging.ERROR)
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ContractValidatorMCPServer:
|
||||||
|
"""MCP Server for cross-plugin compatibility validation"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.server = Server("contract-validator-mcp")
|
||||||
|
self.parse_tools = ParseTools()
|
||||||
|
self.validation_tools = ValidationTools()
|
||||||
|
self.report_tools = ReportTools()
|
||||||
|
|
||||||
|
async def initialize(self):
|
||||||
|
"""Initialize server."""
|
||||||
|
logger.info("Contract Validator MCP Server initialized")
|
||||||
|
|
||||||
|
def setup_tools(self):
|
||||||
|
"""Register all available tools with the MCP server"""
|
||||||
|
|
||||||
|
@self.server.list_tools()
|
||||||
|
async def list_tools() -> list[Tool]:
|
||||||
|
"""Return list of available tools"""
|
||||||
|
tools = [
|
||||||
|
# Parse tools (to be implemented in #186)
|
||||||
|
Tool(
|
||||||
|
name="parse_plugin_interface",
|
||||||
|
description="Parse plugin README.md to extract interface declarations (inputs, outputs, tools)",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"plugin_path": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Path to plugin directory or README.md"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["plugin_path"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="parse_claude_md_agents",
|
||||||
|
description="Parse Claude.md to extract agent definitions and their tool sequences",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"claude_md_path": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Path to CLAUDE.md file"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["claude_md_path"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
# Validation tools (to be implemented in #187)
|
||||||
|
Tool(
|
||||||
|
name="validate_compatibility",
|
||||||
|
description="Validate compatibility between two plugin interfaces",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"plugin_a": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Path to first plugin"
|
||||||
|
},
|
||||||
|
"plugin_b": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Path to second plugin"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["plugin_a", "plugin_b"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="validate_agent_refs",
|
||||||
|
description="Validate that all tool references in an agent definition exist",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"agent_name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Name of agent to validate"
|
||||||
|
},
|
||||||
|
"claude_md_path": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Path to CLAUDE.md containing agent"
|
||||||
|
},
|
||||||
|
"plugin_paths": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"description": "Paths to available plugins"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["agent_name", "claude_md_path"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="validate_data_flow",
|
||||||
|
description="Validate data flow through an agent's tool sequence",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"agent_name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Name of agent to validate"
|
||||||
|
},
|
||||||
|
"claude_md_path": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Path to CLAUDE.md containing agent"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["agent_name", "claude_md_path"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="validate_workflow_integration",
|
||||||
|
description="Validate that a domain plugin exposes the required advisory interfaces (gate command, review command, advisory agent) expected by projman's domain-consultation skill. Also checks gate contract version compatibility.",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"plugin_path": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Path to the domain plugin directory"
|
||||||
|
},
|
||||||
|
"domain_label": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The Domain/* label it claims to handle, e.g. Domain/Viz"
|
||||||
|
},
|
||||||
|
"expected_contract": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Expected contract version (e.g., 'v1'). If provided, validates the gate command's contract matches."
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["plugin_path", "domain_label"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
# Report tools (to be implemented in #188)
|
||||||
|
Tool(
|
||||||
|
name="generate_compatibility_report",
|
||||||
|
description="Generate a comprehensive compatibility report for all plugins",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"marketplace_path": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Path to marketplace root directory"
|
||||||
|
},
|
||||||
|
"format": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["markdown", "json"],
|
||||||
|
"default": "markdown",
|
||||||
|
"description": "Output format"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["marketplace_path"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="list_issues",
|
||||||
|
description="List validation issues with optional filtering",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"marketplace_path": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Path to marketplace root directory"
|
||||||
|
},
|
||||||
|
"severity": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["error", "warning", "info", "all"],
|
||||||
|
"default": "all",
|
||||||
|
"description": "Filter by severity"
|
||||||
|
},
|
||||||
|
"issue_type": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["missing_tool", "interface_mismatch", "optional_dependency", "undeclared_output", "all"],
|
||||||
|
"default": "all",
|
||||||
|
"description": "Filter by issue type"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["marketplace_path"]
|
||||||
|
}
|
||||||
|
)
|
||||||
|
]
|
||||||
|
return tools
|
||||||
|
|
||||||
|
@self.server.call_tool()
|
||||||
|
async def call_tool(name: str, arguments: dict) -> list[TextContent]:
|
||||||
|
"""Handle tool invocation."""
|
||||||
|
try:
|
||||||
|
# All tools return placeholder responses for now
|
||||||
|
# Actual implementation will be added in issues #186, #187, #188
|
||||||
|
|
||||||
|
if name == "parse_plugin_interface":
|
||||||
|
result = await self._parse_plugin_interface(**arguments)
|
||||||
|
elif name == "parse_claude_md_agents":
|
||||||
|
result = await self._parse_claude_md_agents(**arguments)
|
||||||
|
elif name == "validate_compatibility":
|
||||||
|
result = await self._validate_compatibility(**arguments)
|
||||||
|
elif name == "validate_agent_refs":
|
||||||
|
result = await self._validate_agent_refs(**arguments)
|
||||||
|
elif name == "validate_data_flow":
|
||||||
|
result = await self._validate_data_flow(**arguments)
|
||||||
|
elif name == "validate_workflow_integration":
|
||||||
|
result = await self._validate_workflow_integration(**arguments)
|
||||||
|
elif name == "generate_compatibility_report":
|
||||||
|
result = await self._generate_compatibility_report(**arguments)
|
||||||
|
elif name == "list_issues":
|
||||||
|
result = await self._list_issues(**arguments)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unknown tool: {name}")
|
||||||
|
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps(result, indent=2, default=str)
|
||||||
|
)]
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Tool {name} failed: {e}")
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps({"error": str(e)}, indent=2)
|
||||||
|
)]
|
||||||
|
|
||||||
|
# Parse tool implementations (Issue #186)
|
||||||
|
|
||||||
|
async def _parse_plugin_interface(self, plugin_path: str) -> dict:
|
||||||
|
"""Parse plugin interface from README.md"""
|
||||||
|
return await self.parse_tools.parse_plugin_interface(plugin_path)
|
||||||
|
|
||||||
|
async def _parse_claude_md_agents(self, claude_md_path: str) -> dict:
|
||||||
|
"""Parse agents from CLAUDE.md"""
|
||||||
|
return await self.parse_tools.parse_claude_md_agents(claude_md_path)
|
||||||
|
|
||||||
|
# Validation tool implementations (Issue #187)
|
||||||
|
|
||||||
|
async def _validate_compatibility(self, plugin_a: str, plugin_b: str) -> dict:
|
||||||
|
"""Validate compatibility between plugins"""
|
||||||
|
return await self.validation_tools.validate_compatibility(plugin_a, plugin_b)
|
||||||
|
|
||||||
|
async def _validate_agent_refs(self, agent_name: str, claude_md_path: str, plugin_paths: list = None) -> dict:
|
||||||
|
"""Validate agent tool references"""
|
||||||
|
return await self.validation_tools.validate_agent_refs(agent_name, claude_md_path, plugin_paths)
|
||||||
|
|
||||||
|
async def _validate_data_flow(self, agent_name: str, claude_md_path: str) -> dict:
|
||||||
|
"""Validate agent data flow"""
|
||||||
|
return await self.validation_tools.validate_data_flow(agent_name, claude_md_path)
|
||||||
|
|
||||||
|
async def _validate_workflow_integration(
|
||||||
|
self,
|
||||||
|
plugin_path: str,
|
||||||
|
domain_label: str,
|
||||||
|
expected_contract: str = None
|
||||||
|
) -> dict:
|
||||||
|
"""Validate domain plugin exposes required advisory interfaces"""
|
||||||
|
return await self.validation_tools.validate_workflow_integration(
|
||||||
|
plugin_path, domain_label, expected_contract
|
||||||
|
)
|
||||||
|
|
||||||
|
# Report tool implementations (Issue #188)
|
||||||
|
|
||||||
|
async def _generate_compatibility_report(self, marketplace_path: str, format: str = "markdown") -> dict:
|
||||||
|
"""Generate comprehensive compatibility report"""
|
||||||
|
return await self.report_tools.generate_compatibility_report(marketplace_path, format)
|
||||||
|
|
||||||
|
async def _list_issues(self, marketplace_path: str, severity: str = "all", issue_type: str = "all") -> dict:
|
||||||
|
"""List validation issues with filtering"""
|
||||||
|
return await self.report_tools.list_issues(marketplace_path, severity, issue_type)
|
||||||
|
|
||||||
|
async def run(self):
|
||||||
|
"""Run the MCP server"""
|
||||||
|
await self.initialize()
|
||||||
|
self.setup_tools()
|
||||||
|
|
||||||
|
async with stdio_server() as (read_stream, write_stream):
|
||||||
|
await self.server.run(
|
||||||
|
read_stream,
|
||||||
|
write_stream,
|
||||||
|
self.server.create_initialization_options()
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
"""Main entry point"""
|
||||||
|
server = ContractValidatorMCPServer()
|
||||||
|
await server.run()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
asyncio.run(main())
|
||||||
493
mcp-servers/contract-validator/mcp_server/validation_tools.py
Normal file
493
mcp-servers/contract-validator/mcp_server/validation_tools.py
Normal file
@@ -0,0 +1,493 @@
|
|||||||
|
"""
|
||||||
|
Validation tools for checking cross-plugin compatibility and agent references.
|
||||||
|
|
||||||
|
Provides:
|
||||||
|
- validate_compatibility: Compare two plugin interfaces
|
||||||
|
- validate_agent_refs: Check agent tool references exist
|
||||||
|
- validate_data_flow: Verify data flow through agent sequences
|
||||||
|
"""
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
from pydantic import BaseModel
|
||||||
|
from enum import Enum
|
||||||
|
|
||||||
|
from .parse_tools import ParseTools, PluginInterface, ClaudeMdAgent
|
||||||
|
|
||||||
|
|
||||||
|
class IssueSeverity(str, Enum):
|
||||||
|
ERROR = "error"
|
||||||
|
WARNING = "warning"
|
||||||
|
INFO = "info"
|
||||||
|
|
||||||
|
|
||||||
|
class IssueType(str, Enum):
|
||||||
|
MISSING_TOOL = "missing_tool"
|
||||||
|
INTERFACE_MISMATCH = "interface_mismatch"
|
||||||
|
OPTIONAL_DEPENDENCY = "optional_dependency"
|
||||||
|
UNDECLARED_OUTPUT = "undeclared_output"
|
||||||
|
INVALID_SEQUENCE = "invalid_sequence"
|
||||||
|
MISSING_INTEGRATION = "missing_integration"
|
||||||
|
|
||||||
|
|
||||||
|
class ValidationIssue(BaseModel):
|
||||||
|
"""A single validation issue"""
|
||||||
|
severity: IssueSeverity
|
||||||
|
issue_type: IssueType
|
||||||
|
message: str
|
||||||
|
location: Optional[str] = None
|
||||||
|
suggestion: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
|
class CompatibilityResult(BaseModel):
|
||||||
|
"""Result of compatibility check between two plugins"""
|
||||||
|
plugin_a: str
|
||||||
|
plugin_b: str
|
||||||
|
compatible: bool
|
||||||
|
shared_tools: list[str] = []
|
||||||
|
a_only_tools: list[str] = []
|
||||||
|
b_only_tools: list[str] = []
|
||||||
|
issues: list[ValidationIssue] = []
|
||||||
|
|
||||||
|
|
||||||
|
class AgentValidationResult(BaseModel):
|
||||||
|
"""Result of agent reference validation"""
|
||||||
|
agent_name: str
|
||||||
|
valid: bool
|
||||||
|
tool_refs_found: list[str] = []
|
||||||
|
tool_refs_missing: list[str] = []
|
||||||
|
issues: list[ValidationIssue] = []
|
||||||
|
|
||||||
|
|
||||||
|
class DataFlowResult(BaseModel):
|
||||||
|
"""Result of data flow validation"""
|
||||||
|
agent_name: str
|
||||||
|
valid: bool
|
||||||
|
flow_steps: list[str] = []
|
||||||
|
issues: list[ValidationIssue] = []
|
||||||
|
|
||||||
|
|
||||||
|
class WorkflowIntegrationResult(BaseModel):
|
||||||
|
"""Result of workflow integration validation for domain plugins"""
|
||||||
|
plugin_name: str
|
||||||
|
domain_label: str
|
||||||
|
valid: bool
|
||||||
|
gate_command_found: bool
|
||||||
|
gate_contract: Optional[str] = None # Contract version declared by gate command
|
||||||
|
review_command_found: bool
|
||||||
|
advisory_agent_found: bool
|
||||||
|
issues: list[ValidationIssue] = []
|
||||||
|
|
||||||
|
|
||||||
|
class ValidationTools:
|
||||||
|
"""Tools for validating plugin compatibility and agent references"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.parse_tools = ParseTools()
|
||||||
|
|
||||||
|
async def validate_compatibility(self, plugin_a: str, plugin_b: str) -> dict:
|
||||||
|
"""
|
||||||
|
Validate compatibility between two plugin interfaces.
|
||||||
|
|
||||||
|
Compares tools, commands, and agents to identify overlaps and gaps.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
plugin_a: Path to first plugin directory
|
||||||
|
plugin_b: Path to second plugin directory
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Compatibility report with shared tools, unique tools, and issues
|
||||||
|
"""
|
||||||
|
# Parse both plugins
|
||||||
|
interface_a = await self.parse_tools.parse_plugin_interface(plugin_a)
|
||||||
|
interface_b = await self.parse_tools.parse_plugin_interface(plugin_b)
|
||||||
|
|
||||||
|
# Check for parse errors
|
||||||
|
if "error" in interface_a:
|
||||||
|
return {
|
||||||
|
"error": f"Failed to parse plugin A: {interface_a['error']}",
|
||||||
|
"plugin_a": plugin_a,
|
||||||
|
"plugin_b": plugin_b
|
||||||
|
}
|
||||||
|
if "error" in interface_b:
|
||||||
|
return {
|
||||||
|
"error": f"Failed to parse plugin B: {interface_b['error']}",
|
||||||
|
"plugin_a": plugin_a,
|
||||||
|
"plugin_b": plugin_b
|
||||||
|
}
|
||||||
|
|
||||||
|
# Extract tool names
|
||||||
|
tools_a = set(t["name"] for t in interface_a.get("tools", []))
|
||||||
|
tools_b = set(t["name"] for t in interface_b.get("tools", []))
|
||||||
|
|
||||||
|
# Find overlaps and differences
|
||||||
|
shared = tools_a & tools_b
|
||||||
|
a_only = tools_a - tools_b
|
||||||
|
b_only = tools_b - tools_a
|
||||||
|
|
||||||
|
issues = []
|
||||||
|
|
||||||
|
# Check for potential naming conflicts
|
||||||
|
if shared:
|
||||||
|
issues.append(ValidationIssue(
|
||||||
|
severity=IssueSeverity.WARNING,
|
||||||
|
issue_type=IssueType.INTERFACE_MISMATCH,
|
||||||
|
message=f"Both plugins define tools with same names: {list(shared)}",
|
||||||
|
location=f"{interface_a['plugin_name']} and {interface_b['plugin_name']}",
|
||||||
|
suggestion="Ensure tools with same names have compatible interfaces"
|
||||||
|
))
|
||||||
|
|
||||||
|
# Check command overlaps
|
||||||
|
cmds_a = set(c["name"] for c in interface_a.get("commands", []))
|
||||||
|
cmds_b = set(c["name"] for c in interface_b.get("commands", []))
|
||||||
|
shared_cmds = cmds_a & cmds_b
|
||||||
|
|
||||||
|
if shared_cmds:
|
||||||
|
issues.append(ValidationIssue(
|
||||||
|
severity=IssueSeverity.ERROR,
|
||||||
|
issue_type=IssueType.INTERFACE_MISMATCH,
|
||||||
|
message=f"Command name conflict: {list(shared_cmds)}",
|
||||||
|
location=f"{interface_a['plugin_name']} and {interface_b['plugin_name']}",
|
||||||
|
suggestion="Rename conflicting commands to avoid ambiguity"
|
||||||
|
))
|
||||||
|
|
||||||
|
result = CompatibilityResult(
|
||||||
|
plugin_a=interface_a["plugin_name"],
|
||||||
|
plugin_b=interface_b["plugin_name"],
|
||||||
|
compatible=len([i for i in issues if i.severity == IssueSeverity.ERROR]) == 0,
|
||||||
|
shared_tools=list(shared),
|
||||||
|
a_only_tools=list(a_only),
|
||||||
|
b_only_tools=list(b_only),
|
||||||
|
issues=issues
|
||||||
|
)
|
||||||
|
|
||||||
|
return result.model_dump()
|
||||||
|
|
||||||
|
async def validate_agent_refs(
|
||||||
|
self,
|
||||||
|
agent_name: str,
|
||||||
|
claude_md_path: str,
|
||||||
|
plugin_paths: list[str] = None
|
||||||
|
) -> dict:
|
||||||
|
"""
|
||||||
|
Validate that all tool references in an agent definition exist.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
agent_name: Name of the agent to validate
|
||||||
|
claude_md_path: Path to CLAUDE.md containing the agent
|
||||||
|
plugin_paths: Optional list of plugin paths to check for tools
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Validation result with found/missing tools and issues
|
||||||
|
"""
|
||||||
|
# Parse CLAUDE.md for agents
|
||||||
|
agents_result = await self.parse_tools.parse_claude_md_agents(claude_md_path)
|
||||||
|
|
||||||
|
if "error" in agents_result:
|
||||||
|
return {
|
||||||
|
"error": agents_result["error"],
|
||||||
|
"agent_name": agent_name
|
||||||
|
}
|
||||||
|
|
||||||
|
# Find the specific agent
|
||||||
|
agent = None
|
||||||
|
for a in agents_result.get("agents", []):
|
||||||
|
if a["name"].lower() == agent_name.lower():
|
||||||
|
agent = a
|
||||||
|
break
|
||||||
|
|
||||||
|
if not agent:
|
||||||
|
return {
|
||||||
|
"error": f"Agent '{agent_name}' not found in {claude_md_path}",
|
||||||
|
"agent_name": agent_name,
|
||||||
|
"available_agents": [a["name"] for a in agents_result.get("agents", [])]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Collect all available tools from plugins
|
||||||
|
available_tools = set()
|
||||||
|
if plugin_paths:
|
||||||
|
for plugin_path in plugin_paths:
|
||||||
|
interface = await self.parse_tools.parse_plugin_interface(plugin_path)
|
||||||
|
if "error" not in interface:
|
||||||
|
for tool in interface.get("tools", []):
|
||||||
|
available_tools.add(tool["name"])
|
||||||
|
|
||||||
|
# Check agent tool references
|
||||||
|
tool_refs = set(agent.get("tool_refs", []))
|
||||||
|
found = tool_refs & available_tools if available_tools else tool_refs
|
||||||
|
missing = tool_refs - available_tools if available_tools else set()
|
||||||
|
|
||||||
|
issues = []
|
||||||
|
|
||||||
|
# Report missing tools
|
||||||
|
for tool in missing:
|
||||||
|
issues.append(ValidationIssue(
|
||||||
|
severity=IssueSeverity.ERROR,
|
||||||
|
issue_type=IssueType.MISSING_TOOL,
|
||||||
|
message=f"Agent '{agent_name}' references tool '{tool}' which is not found",
|
||||||
|
location=claude_md_path,
|
||||||
|
suggestion=f"Check if tool '{tool}' exists or fix the reference"
|
||||||
|
))
|
||||||
|
|
||||||
|
# Check if agent has no tool refs (might be incomplete)
|
||||||
|
if not tool_refs:
|
||||||
|
issues.append(ValidationIssue(
|
||||||
|
severity=IssueSeverity.INFO,
|
||||||
|
issue_type=IssueType.UNDECLARED_OUTPUT,
|
||||||
|
message=f"Agent '{agent_name}' has no documented tool references",
|
||||||
|
location=claude_md_path,
|
||||||
|
suggestion="Consider documenting which tools this agent uses"
|
||||||
|
))
|
||||||
|
|
||||||
|
result = AgentValidationResult(
|
||||||
|
agent_name=agent_name,
|
||||||
|
valid=len([i for i in issues if i.severity == IssueSeverity.ERROR]) == 0,
|
||||||
|
tool_refs_found=list(found),
|
||||||
|
tool_refs_missing=list(missing),
|
||||||
|
issues=issues
|
||||||
|
)
|
||||||
|
|
||||||
|
return result.model_dump()
|
||||||
|
|
||||||
|
async def validate_data_flow(self, agent_name: str, claude_md_path: str) -> dict:
|
||||||
|
"""
|
||||||
|
Validate data flow through an agent's tool sequence.
|
||||||
|
|
||||||
|
Checks that each step's expected output can be used by the next step.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
agent_name: Name of the agent to validate
|
||||||
|
claude_md_path: Path to CLAUDE.md containing the agent
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Data flow validation result with steps and issues
|
||||||
|
"""
|
||||||
|
# Parse CLAUDE.md for agents
|
||||||
|
agents_result = await self.parse_tools.parse_claude_md_agents(claude_md_path)
|
||||||
|
|
||||||
|
if "error" in agents_result:
|
||||||
|
return {
|
||||||
|
"error": agents_result["error"],
|
||||||
|
"agent_name": agent_name
|
||||||
|
}
|
||||||
|
|
||||||
|
# Find the specific agent
|
||||||
|
agent = None
|
||||||
|
for a in agents_result.get("agents", []):
|
||||||
|
if a["name"].lower() == agent_name.lower():
|
||||||
|
agent = a
|
||||||
|
break
|
||||||
|
|
||||||
|
if not agent:
|
||||||
|
return {
|
||||||
|
"error": f"Agent '{agent_name}' not found in {claude_md_path}",
|
||||||
|
"agent_name": agent_name,
|
||||||
|
"available_agents": [a["name"] for a in agents_result.get("agents", [])]
|
||||||
|
}
|
||||||
|
|
||||||
|
issues = []
|
||||||
|
flow_steps = []
|
||||||
|
|
||||||
|
# Extract workflow steps
|
||||||
|
workflow_steps = agent.get("workflow_steps", [])
|
||||||
|
responsibilities = agent.get("responsibilities", [])
|
||||||
|
|
||||||
|
# Build flow from workflow steps or responsibilities
|
||||||
|
steps = workflow_steps if workflow_steps else responsibilities
|
||||||
|
|
||||||
|
for i, step in enumerate(steps):
|
||||||
|
flow_steps.append(f"Step {i+1}: {step}")
|
||||||
|
|
||||||
|
# Check for data flow patterns
|
||||||
|
tool_refs = agent.get("tool_refs", [])
|
||||||
|
|
||||||
|
# Known data flow patterns
|
||||||
|
# e.g., data-platform produces data_ref, viz-platform consumes it
|
||||||
|
known_producers = {
|
||||||
|
"read_csv": "data_ref",
|
||||||
|
"read_parquet": "data_ref",
|
||||||
|
"pg_query": "data_ref",
|
||||||
|
"filter": "data_ref",
|
||||||
|
"groupby": "data_ref",
|
||||||
|
}
|
||||||
|
|
||||||
|
known_consumers = {
|
||||||
|
"describe": "data_ref",
|
||||||
|
"head": "data_ref",
|
||||||
|
"tail": "data_ref",
|
||||||
|
"to_csv": "data_ref",
|
||||||
|
"to_parquet": "data_ref",
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if agent uses tools that require data_ref
|
||||||
|
has_producer = any(t in known_producers for t in tool_refs)
|
||||||
|
has_consumer = any(t in known_consumers for t in tool_refs)
|
||||||
|
|
||||||
|
if has_consumer and not has_producer:
|
||||||
|
issues.append(ValidationIssue(
|
||||||
|
severity=IssueSeverity.WARNING,
|
||||||
|
issue_type=IssueType.INTERFACE_MISMATCH,
|
||||||
|
message=f"Agent '{agent_name}' uses tools that consume data_ref but no producer found",
|
||||||
|
location=claude_md_path,
|
||||||
|
suggestion="Ensure a data loading tool (read_csv, pg_query, etc.) is used before data consumers"
|
||||||
|
))
|
||||||
|
|
||||||
|
# Check for empty workflow
|
||||||
|
if not steps and not tool_refs:
|
||||||
|
issues.append(ValidationIssue(
|
||||||
|
severity=IssueSeverity.INFO,
|
||||||
|
issue_type=IssueType.UNDECLARED_OUTPUT,
|
||||||
|
message=f"Agent '{agent_name}' has no documented workflow or tool sequence",
|
||||||
|
location=claude_md_path,
|
||||||
|
suggestion="Consider documenting the agent's workflow steps"
|
||||||
|
))
|
||||||
|
|
||||||
|
result = DataFlowResult(
|
||||||
|
agent_name=agent_name,
|
||||||
|
valid=len([i for i in issues if i.severity == IssueSeverity.ERROR]) == 0,
|
||||||
|
flow_steps=flow_steps,
|
||||||
|
issues=issues
|
||||||
|
)
|
||||||
|
|
||||||
|
return result.model_dump()
|
||||||
|
|
||||||
|
async def validate_workflow_integration(
|
||||||
|
self,
|
||||||
|
plugin_path: str,
|
||||||
|
domain_label: str,
|
||||||
|
expected_contract: Optional[str] = None
|
||||||
|
) -> dict:
|
||||||
|
"""
|
||||||
|
Validate that a domain plugin exposes required advisory interfaces.
|
||||||
|
|
||||||
|
Checks for:
|
||||||
|
- Gate command (e.g., /design-gate, /data-gate) - REQUIRED
|
||||||
|
- Gate contract version (gate_contract in frontmatter) - INFO if missing
|
||||||
|
- Review command (e.g., /design-review, /data-review) - recommended
|
||||||
|
- Advisory agent referencing the domain label - recommended
|
||||||
|
|
||||||
|
Args:
|
||||||
|
plugin_path: Path to the domain plugin directory
|
||||||
|
domain_label: The Domain/* label it claims to handle (e.g., Domain/Viz)
|
||||||
|
expected_contract: Expected contract version (e.g., 'v1'). If provided,
|
||||||
|
validates the gate command's contract matches.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Validation result with found interfaces and issues
|
||||||
|
"""
|
||||||
|
import re
|
||||||
|
|
||||||
|
plugin_path_obj = Path(plugin_path)
|
||||||
|
issues = []
|
||||||
|
|
||||||
|
# Extract plugin name from path
|
||||||
|
plugin_name = plugin_path_obj.name
|
||||||
|
if not plugin_path_obj.exists():
|
||||||
|
return {
|
||||||
|
"error": f"Plugin directory not found: {plugin_path}",
|
||||||
|
"plugin_path": plugin_path,
|
||||||
|
"domain_label": domain_label
|
||||||
|
}
|
||||||
|
|
||||||
|
# Extract domain short name from label (e.g., "Domain/Viz" -> "viz", "Domain/Data" -> "data")
|
||||||
|
domain_short = domain_label.split("/")[-1].lower() if "/" in domain_label else domain_label.lower()
|
||||||
|
|
||||||
|
# Check for gate command
|
||||||
|
commands_dir = plugin_path_obj / "commands"
|
||||||
|
gate_command_found = False
|
||||||
|
gate_contract = None
|
||||||
|
gate_patterns = ["pass", "fail", "PASS", "FAIL", "Binary pass/fail", "gate"]
|
||||||
|
|
||||||
|
if commands_dir.exists():
|
||||||
|
for cmd_file in commands_dir.glob("*.md"):
|
||||||
|
if "gate" in cmd_file.name.lower():
|
||||||
|
# Verify it's actually a gate command by checking content
|
||||||
|
content = cmd_file.read_text()
|
||||||
|
if any(pattern in content for pattern in gate_patterns):
|
||||||
|
gate_command_found = True
|
||||||
|
# Parse frontmatter for gate_contract
|
||||||
|
frontmatter_match = re.match(r'^---\n(.*?)\n---', content, re.DOTALL)
|
||||||
|
if frontmatter_match:
|
||||||
|
frontmatter = frontmatter_match.group(1)
|
||||||
|
contract_match = re.search(r'gate_contract:\s*(\S+)', frontmatter)
|
||||||
|
if contract_match:
|
||||||
|
gate_contract = contract_match.group(1)
|
||||||
|
break
|
||||||
|
|
||||||
|
if not gate_command_found:
|
||||||
|
issues.append(ValidationIssue(
|
||||||
|
severity=IssueSeverity.ERROR,
|
||||||
|
issue_type=IssueType.MISSING_INTEGRATION,
|
||||||
|
message=f"Plugin '{plugin_name}' lacks a gate command for domain '{domain_label}'",
|
||||||
|
location=str(commands_dir),
|
||||||
|
suggestion=f"Create commands/{domain_short}-gate.md with binary PASS/FAIL output"
|
||||||
|
))
|
||||||
|
|
||||||
|
# Check for review command
|
||||||
|
review_command_found = False
|
||||||
|
if commands_dir.exists():
|
||||||
|
for cmd_file in commands_dir.glob("*.md"):
|
||||||
|
if "review" in cmd_file.name.lower() and "gate" not in cmd_file.name.lower():
|
||||||
|
review_command_found = True
|
||||||
|
break
|
||||||
|
|
||||||
|
if not review_command_found:
|
||||||
|
issues.append(ValidationIssue(
|
||||||
|
severity=IssueSeverity.WARNING,
|
||||||
|
issue_type=IssueType.MISSING_INTEGRATION,
|
||||||
|
message=f"Plugin '{plugin_name}' lacks a review command for domain '{domain_label}'",
|
||||||
|
location=str(commands_dir),
|
||||||
|
suggestion=f"Create commands/{domain_short}-review.md for detailed audits"
|
||||||
|
))
|
||||||
|
|
||||||
|
# Check for advisory agent
|
||||||
|
agents_dir = plugin_path_obj / "agents"
|
||||||
|
advisory_agent_found = False
|
||||||
|
|
||||||
|
if agents_dir.exists():
|
||||||
|
for agent_file in agents_dir.glob("*.md"):
|
||||||
|
content = agent_file.read_text()
|
||||||
|
# Check if agent references the domain label or gate command
|
||||||
|
if domain_label in content or f"{domain_short}-gate" in content.lower() or "advisor" in agent_file.name.lower() or "reviewer" in agent_file.name.lower():
|
||||||
|
advisory_agent_found = True
|
||||||
|
break
|
||||||
|
|
||||||
|
if not advisory_agent_found:
|
||||||
|
issues.append(ValidationIssue(
|
||||||
|
severity=IssueSeverity.WARNING,
|
||||||
|
issue_type=IssueType.MISSING_INTEGRATION,
|
||||||
|
message=f"Plugin '{plugin_name}' lacks an advisory agent for domain '{domain_label}'",
|
||||||
|
location=str(agents_dir) if agents_dir.exists() else str(plugin_path_obj),
|
||||||
|
suggestion=f"Create agents/{domain_short}-advisor.md referencing '{domain_label}'"
|
||||||
|
))
|
||||||
|
|
||||||
|
# Check gate contract version
|
||||||
|
if gate_command_found:
|
||||||
|
if not gate_contract:
|
||||||
|
issues.append(ValidationIssue(
|
||||||
|
severity=IssueSeverity.INFO,
|
||||||
|
issue_type=IssueType.MISSING_INTEGRATION,
|
||||||
|
message=f"Gate command does not declare a contract version",
|
||||||
|
location=str(commands_dir),
|
||||||
|
suggestion="Consider adding `gate_contract: v1` to frontmatter for version tracking"
|
||||||
|
))
|
||||||
|
elif expected_contract and gate_contract != expected_contract:
|
||||||
|
issues.append(ValidationIssue(
|
||||||
|
severity=IssueSeverity.WARNING,
|
||||||
|
issue_type=IssueType.INTERFACE_MISMATCH,
|
||||||
|
message=f"Contract version mismatch: gate declares {gate_contract}, projman expects {expected_contract}",
|
||||||
|
location=str(commands_dir),
|
||||||
|
suggestion=f"Update domain-consultation.md Gate Command Reference table to {gate_contract}, or update gate command to {expected_contract}"
|
||||||
|
))
|
||||||
|
|
||||||
|
result = WorkflowIntegrationResult(
|
||||||
|
plugin_name=plugin_name,
|
||||||
|
domain_label=domain_label,
|
||||||
|
valid=gate_command_found, # Only gate is required for validity
|
||||||
|
gate_command_found=gate_command_found,
|
||||||
|
gate_contract=gate_contract,
|
||||||
|
review_command_found=review_command_found,
|
||||||
|
advisory_agent_found=advisory_agent_found,
|
||||||
|
issues=issues
|
||||||
|
)
|
||||||
|
|
||||||
|
return result.model_dump()
|
||||||
41
mcp-servers/contract-validator/pyproject.toml
Normal file
41
mcp-servers/contract-validator/pyproject.toml
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0", "wheel"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "contract-validator-mcp"
|
||||||
|
version = "1.0.0"
|
||||||
|
description = "MCP Server for cross-plugin compatibility validation and agent verification"
|
||||||
|
readme = "README.md"
|
||||||
|
license = {text = "MIT"}
|
||||||
|
requires-python = ">=3.10"
|
||||||
|
authors = [
|
||||||
|
{name = "Leo Miranda"}
|
||||||
|
]
|
||||||
|
classifiers = [
|
||||||
|
"Development Status :: 4 - Beta",
|
||||||
|
"Intended Audience :: Developers",
|
||||||
|
"License :: OSI Approved :: MIT License",
|
||||||
|
"Programming Language :: Python :: 3",
|
||||||
|
"Programming Language :: Python :: 3.10",
|
||||||
|
"Programming Language :: Python :: 3.11",
|
||||||
|
"Programming Language :: Python :: 3.12",
|
||||||
|
]
|
||||||
|
dependencies = [
|
||||||
|
"mcp>=0.9.0",
|
||||||
|
"pydantic>=2.5.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[project.optional-dependencies]
|
||||||
|
dev = [
|
||||||
|
"pytest>=7.4.3",
|
||||||
|
"pytest-asyncio>=0.23.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["mcp_server*"]
|
||||||
|
|
||||||
|
[tool.pytest.ini_options]
|
||||||
|
asyncio_mode = "auto"
|
||||||
|
testpaths = ["tests"]
|
||||||
9
mcp-servers/contract-validator/requirements.txt
Normal file
9
mcp-servers/contract-validator/requirements.txt
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
# MCP SDK
|
||||||
|
mcp>=0.9.0
|
||||||
|
|
||||||
|
# Utilities
|
||||||
|
pydantic>=2.5.0
|
||||||
|
|
||||||
|
# Testing
|
||||||
|
pytest>=7.4.3
|
||||||
|
pytest-asyncio>=0.23.0
|
||||||
21
mcp-servers/contract-validator/run.sh
Executable file
21
mcp-servers/contract-validator/run.sh
Executable file
@@ -0,0 +1,21 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Capture original working directory before any cd operations
|
||||||
|
# This should be the user's project directory when launched by Claude Code
|
||||||
|
export CLAUDE_PROJECT_DIR="${CLAUDE_PROJECT_DIR:-$PWD}"
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
CACHE_VENV="$HOME/.cache/claude-mcp-venvs/leo-claude-mktplace/contract-validator/.venv"
|
||||||
|
LOCAL_VENV="$SCRIPT_DIR/.venv"
|
||||||
|
|
||||||
|
if [[ -f "$CACHE_VENV/bin/python" ]]; then
|
||||||
|
PYTHON="$CACHE_VENV/bin/python"
|
||||||
|
elif [[ -f "$LOCAL_VENV/bin/python" ]]; then
|
||||||
|
PYTHON="$LOCAL_VENV/bin/python"
|
||||||
|
else
|
||||||
|
echo "ERROR: No venv found. Run: ./scripts/setup-venvs.sh" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd "$SCRIPT_DIR"
|
||||||
|
export PYTHONPATH="$SCRIPT_DIR"
|
||||||
|
exec "$PYTHON" -m mcp_server.server "$@"
|
||||||
1
mcp-servers/contract-validator/tests/__init__.py
Normal file
1
mcp-servers/contract-validator/tests/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
# Tests for contract-validator MCP server
|
||||||
193
mcp-servers/contract-validator/tests/test_parse_tools.py
Normal file
193
mcp-servers/contract-validator/tests/test_parse_tools.py
Normal file
@@ -0,0 +1,193 @@
|
|||||||
|
"""
|
||||||
|
Unit tests for parse tools.
|
||||||
|
"""
|
||||||
|
import pytest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def parse_tools():
|
||||||
|
"""Create ParseTools instance"""
|
||||||
|
from mcp_server.parse_tools import ParseTools
|
||||||
|
return ParseTools()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def sample_readme(tmp_path):
|
||||||
|
"""Create a sample README.md for testing"""
|
||||||
|
readme = tmp_path / "README.md"
|
||||||
|
readme.write_text("""# Test Plugin
|
||||||
|
|
||||||
|
A test plugin for validation.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- **Feature One**: Does something
|
||||||
|
- **Feature Two**: Does something else
|
||||||
|
|
||||||
|
## Commands
|
||||||
|
|
||||||
|
| Command | Description |
|
||||||
|
|---------|-------------|
|
||||||
|
| `/test-cmd` | Test command |
|
||||||
|
| `/another-cmd` | Another test command |
|
||||||
|
|
||||||
|
## Agents
|
||||||
|
|
||||||
|
| Agent | Description |
|
||||||
|
|-------|-------------|
|
||||||
|
| `test-agent` | A test agent |
|
||||||
|
|
||||||
|
## Tools Summary
|
||||||
|
|
||||||
|
### Category A (3 tools)
|
||||||
|
`tool_a`, `tool_b`, `tool_c`
|
||||||
|
|
||||||
|
### Category B (2 tools)
|
||||||
|
`tool_d`, `tool_e`
|
||||||
|
""")
|
||||||
|
return str(tmp_path)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def sample_claude_md(tmp_path):
|
||||||
|
"""Create a sample CLAUDE.md for testing"""
|
||||||
|
claude_md = tmp_path / "CLAUDE.md"
|
||||||
|
claude_md.write_text("""# CLAUDE.md
|
||||||
|
|
||||||
|
## Project Overview
|
||||||
|
|
||||||
|
### Four-Agent Model (test)
|
||||||
|
|
||||||
|
| Agent | Personality | Responsibilities |
|
||||||
|
|-------|-------------|------------------|
|
||||||
|
| **Planner** | Thoughtful | Planning via `create_issue`, `search_lessons` |
|
||||||
|
| **Executor** | Focused | Implementation via `write`, `edit` |
|
||||||
|
|
||||||
|
## Workflow
|
||||||
|
|
||||||
|
1. Planner creates issues
|
||||||
|
2. Executor implements code
|
||||||
|
""")
|
||||||
|
return str(claude_md)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_parse_plugin_interface_basic(parse_tools, sample_readme):
|
||||||
|
"""Test basic plugin interface parsing"""
|
||||||
|
result = await parse_tools.parse_plugin_interface(sample_readme)
|
||||||
|
|
||||||
|
assert "error" not in result
|
||||||
|
# Plugin name extraction strips "Plugin" suffix
|
||||||
|
assert result["plugin_name"] == "Test"
|
||||||
|
assert "A test plugin" in result["description"]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_parse_plugin_interface_commands(parse_tools, sample_readme):
|
||||||
|
"""Test command extraction from README"""
|
||||||
|
result = await parse_tools.parse_plugin_interface(sample_readme)
|
||||||
|
|
||||||
|
commands = result["commands"]
|
||||||
|
assert len(commands) == 2
|
||||||
|
assert commands[0]["name"] == "/test-cmd"
|
||||||
|
assert commands[1]["name"] == "/another-cmd"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_parse_plugin_interface_agents(parse_tools, sample_readme):
|
||||||
|
"""Test agent extraction from README"""
|
||||||
|
result = await parse_tools.parse_plugin_interface(sample_readme)
|
||||||
|
|
||||||
|
agents = result["agents"]
|
||||||
|
assert len(agents) == 1
|
||||||
|
assert agents[0]["name"] == "test-agent"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_parse_plugin_interface_tools(parse_tools, sample_readme):
|
||||||
|
"""Test tool extraction from README"""
|
||||||
|
result = await parse_tools.parse_plugin_interface(sample_readme)
|
||||||
|
|
||||||
|
tools = result["tools"]
|
||||||
|
tool_names = [t["name"] for t in tools]
|
||||||
|
assert "tool_a" in tool_names
|
||||||
|
assert "tool_b" in tool_names
|
||||||
|
assert "tool_e" in tool_names
|
||||||
|
assert len(tools) >= 5
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_parse_plugin_interface_categories(parse_tools, sample_readme):
|
||||||
|
"""Test tool category extraction"""
|
||||||
|
result = await parse_tools.parse_plugin_interface(sample_readme)
|
||||||
|
|
||||||
|
categories = result["tool_categories"]
|
||||||
|
assert "Category A" in categories
|
||||||
|
assert "Category B" in categories
|
||||||
|
assert "tool_a" in categories["Category A"]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_parse_plugin_interface_features(parse_tools, sample_readme):
|
||||||
|
"""Test feature extraction"""
|
||||||
|
result = await parse_tools.parse_plugin_interface(sample_readme)
|
||||||
|
|
||||||
|
features = result["features"]
|
||||||
|
assert "Feature One" in features
|
||||||
|
assert "Feature Two" in features
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_parse_plugin_interface_not_found(parse_tools, tmp_path):
|
||||||
|
"""Test error when README not found"""
|
||||||
|
result = await parse_tools.parse_plugin_interface(str(tmp_path / "nonexistent"))
|
||||||
|
|
||||||
|
assert "error" in result
|
||||||
|
assert "not found" in result["error"].lower()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_parse_claude_md_agents(parse_tools, sample_claude_md):
|
||||||
|
"""Test agent extraction from CLAUDE.md"""
|
||||||
|
result = await parse_tools.parse_claude_md_agents(sample_claude_md)
|
||||||
|
|
||||||
|
assert "error" not in result
|
||||||
|
assert result["agent_count"] == 2
|
||||||
|
|
||||||
|
agents = result["agents"]
|
||||||
|
agent_names = [a["name"] for a in agents]
|
||||||
|
assert "Planner" in agent_names
|
||||||
|
assert "Executor" in agent_names
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_parse_claude_md_tool_refs(parse_tools, sample_claude_md):
|
||||||
|
"""Test tool reference extraction from agents"""
|
||||||
|
result = await parse_tools.parse_claude_md_agents(sample_claude_md)
|
||||||
|
|
||||||
|
agents = {a["name"]: a for a in result["agents"]}
|
||||||
|
planner = agents["Planner"]
|
||||||
|
|
||||||
|
assert "create_issue" in planner["tool_refs"]
|
||||||
|
assert "search_lessons" in planner["tool_refs"]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_parse_claude_md_not_found(parse_tools, tmp_path):
|
||||||
|
"""Test error when CLAUDE.md not found"""
|
||||||
|
result = await parse_tools.parse_claude_md_agents(str(tmp_path / "CLAUDE.md"))
|
||||||
|
|
||||||
|
assert "error" in result
|
||||||
|
assert "not found" in result["error"].lower()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_parse_plugin_with_direct_file(parse_tools, sample_readme):
|
||||||
|
"""Test parsing with direct file path instead of directory"""
|
||||||
|
readme_path = Path(sample_readme) / "README.md"
|
||||||
|
result = await parse_tools.parse_plugin_interface(str(readme_path))
|
||||||
|
|
||||||
|
assert "error" not in result
|
||||||
|
# Plugin name extraction strips "Plugin" suffix
|
||||||
|
assert result["plugin_name"] == "Test"
|
||||||
261
mcp-servers/contract-validator/tests/test_report_tools.py
Normal file
261
mcp-servers/contract-validator/tests/test_report_tools.py
Normal file
@@ -0,0 +1,261 @@
|
|||||||
|
"""
|
||||||
|
Unit tests for report tools.
|
||||||
|
"""
|
||||||
|
import pytest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def report_tools():
|
||||||
|
"""Create ReportTools instance"""
|
||||||
|
from mcp_server.report_tools import ReportTools
|
||||||
|
return ReportTools()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def sample_marketplace(tmp_path):
|
||||||
|
"""Create a sample marketplace structure"""
|
||||||
|
import json
|
||||||
|
|
||||||
|
plugins_dir = tmp_path / "plugins"
|
||||||
|
plugins_dir.mkdir()
|
||||||
|
|
||||||
|
# Plugin 1
|
||||||
|
plugin1 = plugins_dir / "plugin-one"
|
||||||
|
plugin1.mkdir()
|
||||||
|
plugin1_meta = plugin1 / ".claude-plugin"
|
||||||
|
plugin1_meta.mkdir()
|
||||||
|
(plugin1_meta / "plugin.json").write_text(json.dumps({"name": "plugin-one"}))
|
||||||
|
(plugin1 / "README.md").write_text("""# plugin-one
|
||||||
|
|
||||||
|
First test plugin.
|
||||||
|
|
||||||
|
## Commands
|
||||||
|
|
||||||
|
| Command | Description |
|
||||||
|
|---------|-------------|
|
||||||
|
| `/cmd-one` | Command one |
|
||||||
|
|
||||||
|
## Tools Summary
|
||||||
|
|
||||||
|
### Tools (2 tools)
|
||||||
|
`tool_a`, `tool_b`
|
||||||
|
""")
|
||||||
|
|
||||||
|
# Plugin 2
|
||||||
|
plugin2 = plugins_dir / "plugin-two"
|
||||||
|
plugin2.mkdir()
|
||||||
|
plugin2_meta = plugin2 / ".claude-plugin"
|
||||||
|
plugin2_meta.mkdir()
|
||||||
|
(plugin2_meta / "plugin.json").write_text(json.dumps({"name": "plugin-two"}))
|
||||||
|
(plugin2 / "README.md").write_text("""# plugin-two
|
||||||
|
|
||||||
|
Second test plugin.
|
||||||
|
|
||||||
|
## Commands
|
||||||
|
|
||||||
|
| Command | Description |
|
||||||
|
|---------|-------------|
|
||||||
|
| `/cmd-two` | Command two |
|
||||||
|
|
||||||
|
## Tools Summary
|
||||||
|
|
||||||
|
### Tools (2 tools)
|
||||||
|
`tool_c`, `tool_d`
|
||||||
|
""")
|
||||||
|
|
||||||
|
# Plugin 3 (with conflict)
|
||||||
|
plugin3 = plugins_dir / "plugin-three"
|
||||||
|
plugin3.mkdir()
|
||||||
|
plugin3_meta = plugin3 / ".claude-plugin"
|
||||||
|
plugin3_meta.mkdir()
|
||||||
|
(plugin3_meta / "plugin.json").write_text(json.dumps({"name": "plugin-three"}))
|
||||||
|
(plugin3 / "README.md").write_text("""# plugin-three
|
||||||
|
|
||||||
|
Third test plugin with conflict.
|
||||||
|
|
||||||
|
## Commands
|
||||||
|
|
||||||
|
| Command | Description |
|
||||||
|
|---------|-------------|
|
||||||
|
| `/cmd-one` | Conflicting command |
|
||||||
|
|
||||||
|
## Tools Summary
|
||||||
|
|
||||||
|
### Tools (1 tool)
|
||||||
|
`tool_e`
|
||||||
|
""")
|
||||||
|
|
||||||
|
return str(tmp_path)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def marketplace_no_plugins(tmp_path):
|
||||||
|
"""Create marketplace with no plugins"""
|
||||||
|
plugins_dir = tmp_path / "plugins"
|
||||||
|
plugins_dir.mkdir()
|
||||||
|
return str(tmp_path)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def marketplace_no_dir(tmp_path):
|
||||||
|
"""Create path without plugins directory"""
|
||||||
|
return str(tmp_path)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_generate_report_json_format(report_tools, sample_marketplace):
|
||||||
|
"""Test JSON format report generation"""
|
||||||
|
result = await report_tools.generate_compatibility_report(
|
||||||
|
sample_marketplace, "json"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "error" not in result
|
||||||
|
assert "generated_at" in result
|
||||||
|
assert "summary" in result
|
||||||
|
assert "plugins" in result
|
||||||
|
assert result["summary"]["total_plugins"] == 3
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_generate_report_markdown_format(report_tools, sample_marketplace):
|
||||||
|
"""Test markdown format report generation"""
|
||||||
|
result = await report_tools.generate_compatibility_report(
|
||||||
|
sample_marketplace, "markdown"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "error" not in result
|
||||||
|
assert "report" in result
|
||||||
|
assert "# Contract Validation Report" in result["report"]
|
||||||
|
assert "## Summary" in result["report"]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_generate_report_finds_conflicts(report_tools, sample_marketplace):
|
||||||
|
"""Test that report finds command conflicts"""
|
||||||
|
result = await report_tools.generate_compatibility_report(
|
||||||
|
sample_marketplace, "json"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "error" not in result
|
||||||
|
assert result["summary"]["errors"] > 0
|
||||||
|
assert result["summary"]["total_issues"] > 0
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_generate_report_counts_correctly(report_tools, sample_marketplace):
|
||||||
|
"""Test summary counts are correct"""
|
||||||
|
result = await report_tools.generate_compatibility_report(
|
||||||
|
sample_marketplace, "json"
|
||||||
|
)
|
||||||
|
|
||||||
|
summary = result["summary"]
|
||||||
|
assert summary["total_plugins"] == 3
|
||||||
|
assert summary["total_commands"] == 3 # 3 commands total
|
||||||
|
assert summary["total_tools"] == 5 # a, b, c, d, e
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_generate_report_no_plugins(report_tools, marketplace_no_plugins):
|
||||||
|
"""Test error when no plugins found"""
|
||||||
|
result = await report_tools.generate_compatibility_report(
|
||||||
|
marketplace_no_plugins, "json"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "error" in result
|
||||||
|
assert "no plugins" in result["error"].lower()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_generate_report_no_plugins_dir(report_tools, marketplace_no_dir):
|
||||||
|
"""Test error when plugins directory doesn't exist"""
|
||||||
|
result = await report_tools.generate_compatibility_report(
|
||||||
|
marketplace_no_dir, "json"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "error" in result
|
||||||
|
assert "not found" in result["error"].lower()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_list_issues_all(report_tools, sample_marketplace):
|
||||||
|
"""Test listing all issues"""
|
||||||
|
result = await report_tools.list_issues(sample_marketplace, "all", "all")
|
||||||
|
|
||||||
|
assert "error" not in result
|
||||||
|
assert "issues" in result
|
||||||
|
assert result["total_issues"] > 0
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_list_issues_filter_by_severity(report_tools, sample_marketplace):
|
||||||
|
"""Test filtering issues by severity"""
|
||||||
|
all_result = await report_tools.list_issues(sample_marketplace, "all", "all")
|
||||||
|
error_result = await report_tools.list_issues(sample_marketplace, "error", "all")
|
||||||
|
|
||||||
|
# Error count should be less than or equal to all
|
||||||
|
assert error_result["total_issues"] <= all_result["total_issues"]
|
||||||
|
|
||||||
|
# All issues should have error severity
|
||||||
|
for issue in error_result["issues"]:
|
||||||
|
sev = issue.get("severity", "")
|
||||||
|
if hasattr(sev, 'value'):
|
||||||
|
sev = sev.value
|
||||||
|
assert "error" in str(sev).lower()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_list_issues_filter_by_type(report_tools, sample_marketplace):
|
||||||
|
"""Test filtering issues by type"""
|
||||||
|
result = await report_tools.list_issues(
|
||||||
|
sample_marketplace, "all", "interface_mismatch"
|
||||||
|
)
|
||||||
|
|
||||||
|
# All issues should have matching type
|
||||||
|
for issue in result["issues"]:
|
||||||
|
itype = issue.get("issue_type", "")
|
||||||
|
if hasattr(itype, 'value'):
|
||||||
|
itype = itype.value
|
||||||
|
assert "interface_mismatch" in str(itype).lower()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_list_issues_combined_filters(report_tools, sample_marketplace):
|
||||||
|
"""Test combined severity and type filters"""
|
||||||
|
result = await report_tools.list_issues(
|
||||||
|
sample_marketplace, "error", "interface_mismatch"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "error" not in result
|
||||||
|
# Should have command conflict errors
|
||||||
|
assert result["total_issues"] > 0
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_report_markdown_has_all_sections(report_tools, sample_marketplace):
|
||||||
|
"""Test markdown report contains all expected sections"""
|
||||||
|
result = await report_tools.generate_compatibility_report(
|
||||||
|
sample_marketplace, "markdown"
|
||||||
|
)
|
||||||
|
|
||||||
|
report = result["report"]
|
||||||
|
assert "## Summary" in report
|
||||||
|
assert "## Plugins" in report
|
||||||
|
# Compatibility section only if there are checks
|
||||||
|
assert "Plugin One" in report or "plugin-one" in report.lower()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_report_includes_suggestions(report_tools, sample_marketplace):
|
||||||
|
"""Test that issues include suggestions"""
|
||||||
|
result = await report_tools.generate_compatibility_report(
|
||||||
|
sample_marketplace, "json"
|
||||||
|
)
|
||||||
|
|
||||||
|
issues = result.get("all_issues", [])
|
||||||
|
# Find an issue with a suggestion
|
||||||
|
issues_with_suggestions = [
|
||||||
|
i for i in issues
|
||||||
|
if i.get("suggestion")
|
||||||
|
]
|
||||||
|
assert len(issues_with_suggestions) > 0
|
||||||
514
mcp-servers/contract-validator/tests/test_validation_tools.py
Normal file
514
mcp-servers/contract-validator/tests/test_validation_tools.py
Normal file
@@ -0,0 +1,514 @@
|
|||||||
|
"""
|
||||||
|
Unit tests for validation tools.
|
||||||
|
"""
|
||||||
|
import pytest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def validation_tools():
|
||||||
|
"""Create ValidationTools instance"""
|
||||||
|
from mcp_server.validation_tools import ValidationTools
|
||||||
|
return ValidationTools()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def plugin_a(tmp_path):
|
||||||
|
"""Create first test plugin"""
|
||||||
|
plugin_dir = tmp_path / "plugin-a"
|
||||||
|
plugin_dir.mkdir()
|
||||||
|
(plugin_dir / ".claude-plugin").mkdir()
|
||||||
|
|
||||||
|
readme = plugin_dir / "README.md"
|
||||||
|
readme.write_text("""# Plugin A
|
||||||
|
|
||||||
|
Test plugin A.
|
||||||
|
|
||||||
|
## Commands
|
||||||
|
|
||||||
|
| Command | Description |
|
||||||
|
|---------|-------------|
|
||||||
|
| `/setup-a` | Setup A |
|
||||||
|
| `/shared-cmd` | Shared command |
|
||||||
|
|
||||||
|
## Tools Summary
|
||||||
|
|
||||||
|
### Core (2 tools)
|
||||||
|
`tool_one`, `tool_two`
|
||||||
|
""")
|
||||||
|
return str(plugin_dir)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def plugin_b(tmp_path):
|
||||||
|
"""Create second test plugin"""
|
||||||
|
plugin_dir = tmp_path / "plugin-b"
|
||||||
|
plugin_dir.mkdir()
|
||||||
|
(plugin_dir / ".claude-plugin").mkdir()
|
||||||
|
|
||||||
|
readme = plugin_dir / "README.md"
|
||||||
|
readme.write_text("""# Plugin B
|
||||||
|
|
||||||
|
Test plugin B.
|
||||||
|
|
||||||
|
## Commands
|
||||||
|
|
||||||
|
| Command | Description |
|
||||||
|
|---------|-------------|
|
||||||
|
| `/setup-b` | Setup B |
|
||||||
|
| `/shared-cmd` | Shared command (conflict!) |
|
||||||
|
|
||||||
|
## Tools Summary
|
||||||
|
|
||||||
|
### Core (2 tools)
|
||||||
|
`tool_two`, `tool_three`
|
||||||
|
""")
|
||||||
|
return str(plugin_dir)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def plugin_no_conflict(tmp_path):
|
||||||
|
"""Create plugin with no conflicts"""
|
||||||
|
plugin_dir = tmp_path / "plugin-c"
|
||||||
|
plugin_dir.mkdir()
|
||||||
|
(plugin_dir / ".claude-plugin").mkdir()
|
||||||
|
|
||||||
|
readme = plugin_dir / "README.md"
|
||||||
|
readme.write_text("""# Plugin C
|
||||||
|
|
||||||
|
Test plugin C.
|
||||||
|
|
||||||
|
## Commands
|
||||||
|
|
||||||
|
| Command | Description |
|
||||||
|
|---------|-------------|
|
||||||
|
| `/unique-cmd` | Unique command |
|
||||||
|
|
||||||
|
## Tools Summary
|
||||||
|
|
||||||
|
### Core (1 tool)
|
||||||
|
`unique_tool`
|
||||||
|
""")
|
||||||
|
return str(plugin_dir)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def claude_md_with_agents(tmp_path):
|
||||||
|
"""Create CLAUDE.md with agent definitions"""
|
||||||
|
claude_md = tmp_path / "CLAUDE.md"
|
||||||
|
claude_md.write_text("""# CLAUDE.md
|
||||||
|
|
||||||
|
### Four-Agent Model
|
||||||
|
|
||||||
|
| Agent | Personality | Responsibilities |
|
||||||
|
|-------|-------------|------------------|
|
||||||
|
| **TestAgent** | Careful | Uses `tool_one`, `tool_two`, `missing_tool` |
|
||||||
|
| **ValidAgent** | Thorough | Uses `tool_one` only |
|
||||||
|
| **EmptyAgent** | Unknown | General tasks |
|
||||||
|
""")
|
||||||
|
return str(claude_md)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_validate_compatibility_command_conflict(validation_tools, plugin_a, plugin_b):
|
||||||
|
"""Test detection of command name conflicts"""
|
||||||
|
result = await validation_tools.validate_compatibility(plugin_a, plugin_b)
|
||||||
|
|
||||||
|
assert "error" not in result
|
||||||
|
assert result["compatible"] is False
|
||||||
|
|
||||||
|
# Find the command conflict issue
|
||||||
|
error_issues = [i for i in result["issues"] if i["severity"].value == "error"]
|
||||||
|
assert len(error_issues) > 0
|
||||||
|
assert any("/shared-cmd" in str(i["message"]) for i in error_issues)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_validate_compatibility_tool_overlap(validation_tools, plugin_a, plugin_b):
|
||||||
|
"""Test detection of tool name overlaps"""
|
||||||
|
result = await validation_tools.validate_compatibility(plugin_a, plugin_b)
|
||||||
|
|
||||||
|
assert "tool_two" in result["shared_tools"]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_validate_compatibility_unique_tools(validation_tools, plugin_a, plugin_b):
|
||||||
|
"""Test identification of unique tools per plugin"""
|
||||||
|
result = await validation_tools.validate_compatibility(plugin_a, plugin_b)
|
||||||
|
|
||||||
|
assert "tool_one" in result["a_only_tools"]
|
||||||
|
assert "tool_three" in result["b_only_tools"]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_validate_compatibility_no_conflict(validation_tools, plugin_a, plugin_no_conflict):
|
||||||
|
"""Test compatible plugins"""
|
||||||
|
result = await validation_tools.validate_compatibility(plugin_a, plugin_no_conflict)
|
||||||
|
|
||||||
|
assert "error" not in result
|
||||||
|
assert result["compatible"] is True
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_validate_compatibility_missing_plugin(validation_tools, plugin_a, tmp_path):
|
||||||
|
"""Test error when plugin not found"""
|
||||||
|
result = await validation_tools.validate_compatibility(
|
||||||
|
plugin_a,
|
||||||
|
str(tmp_path / "nonexistent")
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "error" in result
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_validate_agent_refs_with_missing_tools(validation_tools, claude_md_with_agents, plugin_a):
|
||||||
|
"""Test detection of missing tool references"""
|
||||||
|
result = await validation_tools.validate_agent_refs(
|
||||||
|
"TestAgent",
|
||||||
|
claude_md_with_agents,
|
||||||
|
[plugin_a]
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "error" not in result
|
||||||
|
assert result["valid"] is False
|
||||||
|
assert "missing_tool" in result["tool_refs_missing"]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_validate_agent_refs_valid_agent(validation_tools, claude_md_with_agents, plugin_a):
|
||||||
|
"""Test valid agent with all tools found"""
|
||||||
|
result = await validation_tools.validate_agent_refs(
|
||||||
|
"ValidAgent",
|
||||||
|
claude_md_with_agents,
|
||||||
|
[plugin_a]
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "error" not in result
|
||||||
|
assert result["valid"] is True
|
||||||
|
assert "tool_one" in result["tool_refs_found"]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_validate_agent_refs_empty_agent(validation_tools, claude_md_with_agents, plugin_a):
|
||||||
|
"""Test agent with no tool references"""
|
||||||
|
result = await validation_tools.validate_agent_refs(
|
||||||
|
"EmptyAgent",
|
||||||
|
claude_md_with_agents,
|
||||||
|
[plugin_a]
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "error" not in result
|
||||||
|
# Should have info issue about undocumented references
|
||||||
|
info_issues = [i for i in result["issues"] if i["severity"].value == "info"]
|
||||||
|
assert len(info_issues) > 0
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_validate_agent_refs_agent_not_found(validation_tools, claude_md_with_agents, plugin_a):
|
||||||
|
"""Test error when agent not found"""
|
||||||
|
result = await validation_tools.validate_agent_refs(
|
||||||
|
"NonexistentAgent",
|
||||||
|
claude_md_with_agents,
|
||||||
|
[plugin_a]
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "error" in result
|
||||||
|
assert "not found" in result["error"].lower()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_validate_data_flow_valid(validation_tools, tmp_path):
|
||||||
|
"""Test data flow validation with valid flow"""
|
||||||
|
claude_md = tmp_path / "CLAUDE.md"
|
||||||
|
claude_md.write_text("""# CLAUDE.md
|
||||||
|
|
||||||
|
### Four-Agent Model
|
||||||
|
|
||||||
|
| Agent | Personality | Responsibilities |
|
||||||
|
|-------|-------------|------------------|
|
||||||
|
| **DataAgent** | Analytical | Load with `read_csv`, analyze with `describe`, export with `to_csv` |
|
||||||
|
""")
|
||||||
|
|
||||||
|
result = await validation_tools.validate_data_flow("DataAgent", str(claude_md))
|
||||||
|
|
||||||
|
assert "error" not in result
|
||||||
|
assert result["valid"] is True
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_validate_data_flow_missing_producer(validation_tools, tmp_path):
|
||||||
|
"""Test data flow with consumer but no producer"""
|
||||||
|
claude_md = tmp_path / "CLAUDE.md"
|
||||||
|
claude_md.write_text("""# CLAUDE.md
|
||||||
|
|
||||||
|
### Four-Agent Model
|
||||||
|
|
||||||
|
| Agent | Personality | Responsibilities |
|
||||||
|
|-------|-------------|------------------|
|
||||||
|
| **BadAgent** | Careless | Just runs `describe`, `head`, `tail` without loading |
|
||||||
|
""")
|
||||||
|
|
||||||
|
result = await validation_tools.validate_data_flow("BadAgent", str(claude_md))
|
||||||
|
|
||||||
|
assert "error" not in result
|
||||||
|
# Should have warning about missing producer
|
||||||
|
warning_issues = [i for i in result["issues"] if i["severity"].value == "warning"]
|
||||||
|
assert len(warning_issues) > 0
|
||||||
|
|
||||||
|
|
||||||
|
# --- Workflow Integration Tests ---
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def domain_plugin_complete(tmp_path):
|
||||||
|
"""Create a complete domain plugin with gate, review, and advisory agent"""
|
||||||
|
plugin_dir = tmp_path / "viz-platform"
|
||||||
|
plugin_dir.mkdir()
|
||||||
|
(plugin_dir / ".claude-plugin").mkdir()
|
||||||
|
(plugin_dir / "commands").mkdir()
|
||||||
|
(plugin_dir / "agents").mkdir()
|
||||||
|
|
||||||
|
# Gate command with PASS/FAIL pattern
|
||||||
|
gate_cmd = plugin_dir / "commands" / "design-gate.md"
|
||||||
|
gate_cmd.write_text("""# /design-gate
|
||||||
|
|
||||||
|
Binary pass/fail validation gate for design system compliance.
|
||||||
|
|
||||||
|
## Output
|
||||||
|
|
||||||
|
- **PASS**: All design system checks passed
|
||||||
|
- **FAIL**: Design system violations detected
|
||||||
|
""")
|
||||||
|
|
||||||
|
# Review command
|
||||||
|
review_cmd = plugin_dir / "commands" / "design-review.md"
|
||||||
|
review_cmd.write_text("""# /design-review
|
||||||
|
|
||||||
|
Comprehensive design system audit.
|
||||||
|
""")
|
||||||
|
|
||||||
|
# Advisory agent
|
||||||
|
agent = plugin_dir / "agents" / "design-reviewer.md"
|
||||||
|
agent.write_text("""# design-reviewer
|
||||||
|
|
||||||
|
Design system compliance auditor.
|
||||||
|
|
||||||
|
Handles issues with `Domain/Viz` label.
|
||||||
|
""")
|
||||||
|
|
||||||
|
return str(plugin_dir)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def domain_plugin_missing_gate(tmp_path):
|
||||||
|
"""Create domain plugin with review and agent but no gate command"""
|
||||||
|
plugin_dir = tmp_path / "data-platform"
|
||||||
|
plugin_dir.mkdir()
|
||||||
|
(plugin_dir / ".claude-plugin").mkdir()
|
||||||
|
(plugin_dir / "commands").mkdir()
|
||||||
|
(plugin_dir / "agents").mkdir()
|
||||||
|
|
||||||
|
# Review command (but no gate)
|
||||||
|
review_cmd = plugin_dir / "commands" / "data-review.md"
|
||||||
|
review_cmd.write_text("""# /data-review
|
||||||
|
|
||||||
|
Data integrity audit.
|
||||||
|
""")
|
||||||
|
|
||||||
|
# Advisory agent
|
||||||
|
agent = plugin_dir / "agents" / "data-advisor.md"
|
||||||
|
agent.write_text("""# data-advisor
|
||||||
|
|
||||||
|
Data integrity advisor for Domain/Data issues.
|
||||||
|
""")
|
||||||
|
|
||||||
|
return str(plugin_dir)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def domain_plugin_minimal(tmp_path):
|
||||||
|
"""Create minimal plugin with no commands or agents"""
|
||||||
|
plugin_dir = tmp_path / "minimal-plugin"
|
||||||
|
plugin_dir.mkdir()
|
||||||
|
(plugin_dir / ".claude-plugin").mkdir()
|
||||||
|
|
||||||
|
readme = plugin_dir / "README.md"
|
||||||
|
readme.write_text("# Minimal Plugin\n\nNo commands or agents.")
|
||||||
|
|
||||||
|
return str(plugin_dir)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_validate_workflow_integration_complete(validation_tools, domain_plugin_complete):
|
||||||
|
"""Test complete domain plugin returns valid with all interfaces found"""
|
||||||
|
result = await validation_tools.validate_workflow_integration(
|
||||||
|
domain_plugin_complete,
|
||||||
|
"Domain/Viz"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "error" not in result
|
||||||
|
assert result["valid"] is True
|
||||||
|
assert result["gate_command_found"] is True
|
||||||
|
assert result["review_command_found"] is True
|
||||||
|
assert result["advisory_agent_found"] is True
|
||||||
|
# May have INFO issue about missing contract version (not an error/warning)
|
||||||
|
error_or_warning = [i for i in result["issues"]
|
||||||
|
if i["severity"].value in ("error", "warning")]
|
||||||
|
assert len(error_or_warning) == 0
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_validate_workflow_integration_missing_gate(validation_tools, domain_plugin_missing_gate):
|
||||||
|
"""Test plugin missing gate command returns invalid with ERROR"""
|
||||||
|
result = await validation_tools.validate_workflow_integration(
|
||||||
|
domain_plugin_missing_gate,
|
||||||
|
"Domain/Data"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "error" not in result
|
||||||
|
assert result["valid"] is False
|
||||||
|
assert result["gate_command_found"] is False
|
||||||
|
assert result["review_command_found"] is True
|
||||||
|
assert result["advisory_agent_found"] is True
|
||||||
|
|
||||||
|
# Should have one ERROR for missing gate
|
||||||
|
error_issues = [i for i in result["issues"] if i["severity"].value == "error"]
|
||||||
|
assert len(error_issues) == 1
|
||||||
|
assert "gate" in error_issues[0]["message"].lower()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_validate_workflow_integration_minimal(validation_tools, domain_plugin_minimal):
|
||||||
|
"""Test minimal plugin returns invalid with multiple issues"""
|
||||||
|
result = await validation_tools.validate_workflow_integration(
|
||||||
|
domain_plugin_minimal,
|
||||||
|
"Domain/Test"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "error" not in result
|
||||||
|
assert result["valid"] is False
|
||||||
|
assert result["gate_command_found"] is False
|
||||||
|
assert result["review_command_found"] is False
|
||||||
|
assert result["advisory_agent_found"] is False
|
||||||
|
|
||||||
|
# Should have one ERROR (gate) and two WARNINGs (review, agent)
|
||||||
|
error_issues = [i for i in result["issues"] if i["severity"].value == "error"]
|
||||||
|
warning_issues = [i for i in result["issues"] if i["severity"].value == "warning"]
|
||||||
|
assert len(error_issues) == 1
|
||||||
|
assert len(warning_issues) == 2
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_validate_workflow_integration_nonexistent_plugin(validation_tools, tmp_path):
|
||||||
|
"""Test error when plugin directory doesn't exist"""
|
||||||
|
result = await validation_tools.validate_workflow_integration(
|
||||||
|
str(tmp_path / "nonexistent"),
|
||||||
|
"Domain/Test"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "error" in result
|
||||||
|
assert "not found" in result["error"].lower()
|
||||||
|
|
||||||
|
|
||||||
|
# --- Gate Contract Version Tests ---
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def domain_plugin_with_contract(tmp_path):
|
||||||
|
"""Create domain plugin with gate_contract: v1 in frontmatter"""
|
||||||
|
plugin_dir = tmp_path / "viz-platform-versioned"
|
||||||
|
plugin_dir.mkdir()
|
||||||
|
(plugin_dir / ".claude-plugin").mkdir()
|
||||||
|
(plugin_dir / "commands").mkdir()
|
||||||
|
(plugin_dir / "agents").mkdir()
|
||||||
|
|
||||||
|
# Gate command with gate_contract in frontmatter
|
||||||
|
gate_cmd = plugin_dir / "commands" / "design-gate.md"
|
||||||
|
gate_cmd.write_text("""---
|
||||||
|
description: Design system compliance gate (pass/fail)
|
||||||
|
gate_contract: v1
|
||||||
|
---
|
||||||
|
|
||||||
|
# /design-gate
|
||||||
|
|
||||||
|
Binary pass/fail validation gate for design system compliance.
|
||||||
|
|
||||||
|
## Output
|
||||||
|
|
||||||
|
- **PASS**: All design system checks passed
|
||||||
|
- **FAIL**: Design system violations detected
|
||||||
|
""")
|
||||||
|
|
||||||
|
# Review command
|
||||||
|
review_cmd = plugin_dir / "commands" / "design-review.md"
|
||||||
|
review_cmd.write_text("""# /design-review
|
||||||
|
|
||||||
|
Comprehensive design system audit.
|
||||||
|
""")
|
||||||
|
|
||||||
|
# Advisory agent
|
||||||
|
agent = plugin_dir / "agents" / "design-reviewer.md"
|
||||||
|
agent.write_text("""# design-reviewer
|
||||||
|
|
||||||
|
Design system compliance auditor for Domain/Viz issues.
|
||||||
|
""")
|
||||||
|
|
||||||
|
return str(plugin_dir)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_validate_workflow_contract_match(validation_tools, domain_plugin_with_contract):
|
||||||
|
"""Test that matching expected_contract produces no warning"""
|
||||||
|
result = await validation_tools.validate_workflow_integration(
|
||||||
|
domain_plugin_with_contract,
|
||||||
|
"Domain/Viz",
|
||||||
|
expected_contract="v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "error" not in result
|
||||||
|
assert result["valid"] is True
|
||||||
|
assert result["gate_contract"] == "v1"
|
||||||
|
|
||||||
|
# Should have no warnings about contract mismatch
|
||||||
|
warning_issues = [i for i in result["issues"] if i["severity"].value == "warning"]
|
||||||
|
contract_warnings = [i for i in warning_issues if "contract" in i["message"].lower()]
|
||||||
|
assert len(contract_warnings) == 0
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_validate_workflow_contract_mismatch(validation_tools, domain_plugin_with_contract):
|
||||||
|
"""Test that mismatched expected_contract produces WARNING"""
|
||||||
|
result = await validation_tools.validate_workflow_integration(
|
||||||
|
domain_plugin_with_contract,
|
||||||
|
"Domain/Viz",
|
||||||
|
expected_contract="v2" # Gate has v1
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "error" not in result
|
||||||
|
assert result["valid"] is True # Contract mismatch doesn't affect validity
|
||||||
|
assert result["gate_contract"] == "v1"
|
||||||
|
|
||||||
|
# Should have warning about contract mismatch
|
||||||
|
warning_issues = [i for i in result["issues"] if i["severity"].value == "warning"]
|
||||||
|
contract_warnings = [i for i in warning_issues if "contract" in i["message"].lower()]
|
||||||
|
assert len(contract_warnings) == 1
|
||||||
|
assert "mismatch" in contract_warnings[0]["message"].lower()
|
||||||
|
assert "v1" in contract_warnings[0]["message"]
|
||||||
|
assert "v2" in contract_warnings[0]["message"]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_validate_workflow_no_contract(validation_tools, domain_plugin_complete):
|
||||||
|
"""Test that missing gate_contract produces INFO suggestion"""
|
||||||
|
result = await validation_tools.validate_workflow_integration(
|
||||||
|
domain_plugin_complete,
|
||||||
|
"Domain/Viz"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "error" not in result
|
||||||
|
assert result["valid"] is True
|
||||||
|
assert result["gate_contract"] is None
|
||||||
|
|
||||||
|
# Should have info issue about missing contract
|
||||||
|
info_issues = [i for i in result["issues"] if i["severity"].value == "info"]
|
||||||
|
contract_info = [i for i in info_issues if "contract" in i["message"].lower()]
|
||||||
|
assert len(contract_info) == 1
|
||||||
|
assert "does not declare" in contract_info[0]["message"].lower()
|
||||||
131
mcp-servers/data-platform/README.md
Normal file
131
mcp-servers/data-platform/README.md
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
# Data Platform MCP Server
|
||||||
|
|
||||||
|
MCP Server providing pandas, PostgreSQL/PostGIS, and dbt tools for Claude Code.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- **pandas Tools**: DataFrame operations with Arrow IPC data_ref persistence
|
||||||
|
- **PostgreSQL Tools**: Database queries with asyncpg connection pooling
|
||||||
|
- **PostGIS Tools**: Spatial data operations
|
||||||
|
- **dbt Tools**: Build tool wrapper with pre-execution validation
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd mcp-servers/data-platform
|
||||||
|
python -m venv .venv
|
||||||
|
source .venv/bin/activate # On Windows: .venv\Scripts\activate
|
||||||
|
pip install -r requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### System-Level (PostgreSQL credentials)
|
||||||
|
|
||||||
|
Create `~/.config/claude/postgres.env`:
|
||||||
|
|
||||||
|
```env
|
||||||
|
POSTGRES_URL=postgresql://user:password@host:5432/database
|
||||||
|
```
|
||||||
|
|
||||||
|
### Project-Level (dbt paths)
|
||||||
|
|
||||||
|
Create `.env` in your project root:
|
||||||
|
|
||||||
|
```env
|
||||||
|
DBT_PROJECT_DIR=/path/to/dbt/project
|
||||||
|
DBT_PROFILES_DIR=/path/to/.dbt
|
||||||
|
DATA_PLATFORM_MAX_ROWS=100000
|
||||||
|
```
|
||||||
|
|
||||||
|
## Tools
|
||||||
|
|
||||||
|
### pandas Tools (14 tools)
|
||||||
|
|
||||||
|
| Tool | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `read_csv` | Load CSV file into DataFrame |
|
||||||
|
| `read_parquet` | Load Parquet file into DataFrame |
|
||||||
|
| `read_json` | Load JSON/JSONL file into DataFrame |
|
||||||
|
| `to_csv` | Export DataFrame to CSV file |
|
||||||
|
| `to_parquet` | Export DataFrame to Parquet file |
|
||||||
|
| `describe` | Get statistical summary of DataFrame |
|
||||||
|
| `head` | Get first N rows of DataFrame |
|
||||||
|
| `tail` | Get last N rows of DataFrame |
|
||||||
|
| `filter` | Filter DataFrame rows by condition |
|
||||||
|
| `select` | Select specific columns from DataFrame |
|
||||||
|
| `groupby` | Group DataFrame and aggregate |
|
||||||
|
| `join` | Join two DataFrames |
|
||||||
|
| `list_data` | List all stored DataFrames |
|
||||||
|
| `drop_data` | Remove a DataFrame from storage |
|
||||||
|
|
||||||
|
### PostgreSQL Tools (6 tools)
|
||||||
|
|
||||||
|
| Tool | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `pg_connect` | Test connection and return status |
|
||||||
|
| `pg_query` | Execute SELECT, return as data_ref |
|
||||||
|
| `pg_execute` | Execute INSERT/UPDATE/DELETE |
|
||||||
|
| `pg_tables` | List all tables in schema |
|
||||||
|
| `pg_columns` | Get column info for table |
|
||||||
|
| `pg_schemas` | List all schemas |
|
||||||
|
|
||||||
|
### PostGIS Tools (4 tools)
|
||||||
|
|
||||||
|
| Tool | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `st_tables` | List PostGIS-enabled tables |
|
||||||
|
| `st_geometry_type` | Get geometry type of column |
|
||||||
|
| `st_srid` | Get SRID of geometry column |
|
||||||
|
| `st_extent` | Get bounding box of geometries |
|
||||||
|
|
||||||
|
### dbt Tools (8 tools)
|
||||||
|
|
||||||
|
| Tool | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `dbt_parse` | Validate project (pre-execution) |
|
||||||
|
| `dbt_run` | Run models with selection |
|
||||||
|
| `dbt_test` | Run tests |
|
||||||
|
| `dbt_build` | Run + test |
|
||||||
|
| `dbt_compile` | Compile SQL without executing |
|
||||||
|
| `dbt_ls` | List resources |
|
||||||
|
| `dbt_docs_generate` | Generate documentation |
|
||||||
|
| `dbt_lineage` | Get model dependencies |
|
||||||
|
|
||||||
|
## data_ref System
|
||||||
|
|
||||||
|
All DataFrame operations use a `data_ref` system to persist data across tool calls:
|
||||||
|
|
||||||
|
1. **Load data**: Returns a `data_ref` string (e.g., `"df_a1b2c3d4"`)
|
||||||
|
2. **Use data_ref**: Pass to other tools (filter, join, export)
|
||||||
|
3. **List data**: Use `list_data` to see all stored DataFrames
|
||||||
|
4. **Clean up**: Use `drop_data` when done
|
||||||
|
|
||||||
|
### Example Flow
|
||||||
|
|
||||||
|
```
|
||||||
|
read_csv("data.csv") → {"data_ref": "sales_data", "rows": 1000}
|
||||||
|
filter("sales_data", "amount > 100") → {"data_ref": "sales_data_filtered"}
|
||||||
|
describe("sales_data_filtered") → {statistics}
|
||||||
|
to_parquet("sales_data_filtered", "output.parquet") → {success}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Memory Management
|
||||||
|
|
||||||
|
- Default row limit: 100,000 rows per DataFrame
|
||||||
|
- Configure via `DATA_PLATFORM_MAX_ROWS` environment variable
|
||||||
|
- Use chunked processing for large files (`chunk_size` parameter)
|
||||||
|
- Monitor with `list_data` tool (shows memory usage)
|
||||||
|
|
||||||
|
## Running
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python -m mcp_server.server
|
||||||
|
```
|
||||||
|
|
||||||
|
## Development
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install -e ".[dev]"
|
||||||
|
pytest
|
||||||
|
```
|
||||||
7
mcp-servers/data-platform/mcp_server/__init__.py
Normal file
7
mcp-servers/data-platform/mcp_server/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
"""
|
||||||
|
Data Platform MCP Server.
|
||||||
|
|
||||||
|
Provides pandas, PostgreSQL/PostGIS, and dbt tools to Claude Code via MCP.
|
||||||
|
"""
|
||||||
|
|
||||||
|
__version__ = "1.0.0"
|
||||||
195
mcp-servers/data-platform/mcp_server/config.py
Normal file
195
mcp-servers/data-platform/mcp_server/config.py
Normal file
@@ -0,0 +1,195 @@
|
|||||||
|
"""
|
||||||
|
Configuration loader for Data Platform MCP Server.
|
||||||
|
|
||||||
|
Implements hybrid configuration system:
|
||||||
|
- System-level: ~/.config/claude/postgres.env (credentials)
|
||||||
|
- Project-level: .env (dbt project paths, overrides)
|
||||||
|
- Auto-detection: dbt_project.yml discovery
|
||||||
|
"""
|
||||||
|
from pathlib import Path
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import os
|
||||||
|
import logging
|
||||||
|
from typing import Dict, Optional
|
||||||
|
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class DataPlatformConfig:
|
||||||
|
"""Hybrid configuration loader for data platform tools"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.postgres_url: Optional[str] = None
|
||||||
|
self.dbt_project_dir: Optional[str] = None
|
||||||
|
self.dbt_profiles_dir: Optional[str] = None
|
||||||
|
self.max_rows: int = 100_000
|
||||||
|
|
||||||
|
def load(self) -> Dict[str, Optional[str]]:
|
||||||
|
"""
|
||||||
|
Load configuration from system and project levels.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict containing postgres_url, dbt_project_dir, dbt_profiles_dir, max_rows
|
||||||
|
|
||||||
|
Note:
|
||||||
|
PostgreSQL credentials are optional - server can run in pandas-only mode.
|
||||||
|
"""
|
||||||
|
# Load system config (PostgreSQL credentials)
|
||||||
|
system_config = Path.home() / '.config' / 'claude' / 'postgres.env'
|
||||||
|
if system_config.exists():
|
||||||
|
load_dotenv(system_config)
|
||||||
|
logger.info(f"Loaded system configuration from {system_config}")
|
||||||
|
else:
|
||||||
|
logger.info(
|
||||||
|
f"System config not found: {system_config} - "
|
||||||
|
"PostgreSQL tools will be unavailable"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Find project directory
|
||||||
|
project_dir = self._find_project_directory()
|
||||||
|
|
||||||
|
# Load project config (overrides system)
|
||||||
|
if project_dir:
|
||||||
|
project_config = project_dir / '.env'
|
||||||
|
if project_config.exists():
|
||||||
|
load_dotenv(project_config, override=True)
|
||||||
|
logger.info(f"Loaded project configuration from {project_config}")
|
||||||
|
|
||||||
|
# Extract values
|
||||||
|
self.postgres_url = os.getenv('POSTGRES_URL')
|
||||||
|
self.dbt_project_dir = os.getenv('DBT_PROJECT_DIR')
|
||||||
|
self.dbt_profiles_dir = os.getenv('DBT_PROFILES_DIR')
|
||||||
|
self.max_rows = int(os.getenv('DATA_PLATFORM_MAX_ROWS', '100000'))
|
||||||
|
|
||||||
|
# Auto-detect dbt project if not specified
|
||||||
|
if not self.dbt_project_dir and project_dir:
|
||||||
|
self.dbt_project_dir = self._find_dbt_project(project_dir)
|
||||||
|
if self.dbt_project_dir:
|
||||||
|
logger.info(f"Auto-detected dbt project: {self.dbt_project_dir}")
|
||||||
|
|
||||||
|
# Default dbt profiles dir to ~/.dbt
|
||||||
|
if not self.dbt_profiles_dir:
|
||||||
|
default_profiles = Path.home() / '.dbt'
|
||||||
|
if default_profiles.exists():
|
||||||
|
self.dbt_profiles_dir = str(default_profiles)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'postgres_url': self.postgres_url,
|
||||||
|
'dbt_project_dir': self.dbt_project_dir,
|
||||||
|
'dbt_profiles_dir': self.dbt_profiles_dir,
|
||||||
|
'max_rows': self.max_rows,
|
||||||
|
'postgres_available': self.postgres_url is not None,
|
||||||
|
'dbt_available': self.dbt_project_dir is not None
|
||||||
|
}
|
||||||
|
|
||||||
|
def _find_project_directory(self) -> Optional[Path]:
|
||||||
|
"""
|
||||||
|
Find the user's project directory.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Path to project directory, or None if not found
|
||||||
|
"""
|
||||||
|
# Strategy 1: Check CLAUDE_PROJECT_DIR environment variable
|
||||||
|
project_dir = os.getenv('CLAUDE_PROJECT_DIR')
|
||||||
|
if project_dir:
|
||||||
|
path = Path(project_dir)
|
||||||
|
if path.exists():
|
||||||
|
logger.info(f"Found project directory from CLAUDE_PROJECT_DIR: {path}")
|
||||||
|
return path
|
||||||
|
|
||||||
|
# Strategy 2: Check PWD
|
||||||
|
pwd = os.getenv('PWD')
|
||||||
|
if pwd:
|
||||||
|
path = Path(pwd)
|
||||||
|
if path.exists() and (
|
||||||
|
(path / '.git').exists() or
|
||||||
|
(path / '.env').exists() or
|
||||||
|
(path / 'dbt_project.yml').exists()
|
||||||
|
):
|
||||||
|
logger.info(f"Found project directory from PWD: {path}")
|
||||||
|
return path
|
||||||
|
|
||||||
|
# Strategy 3: Check current working directory
|
||||||
|
cwd = Path.cwd()
|
||||||
|
if (cwd / '.git').exists() or (cwd / '.env').exists() or (cwd / 'dbt_project.yml').exists():
|
||||||
|
logger.info(f"Found project directory from cwd: {cwd}")
|
||||||
|
return cwd
|
||||||
|
|
||||||
|
logger.debug("Could not determine project directory")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _find_dbt_project(self, start_dir: Path) -> Optional[str]:
|
||||||
|
"""
|
||||||
|
Find dbt_project.yml in the project or its subdirectories.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
start_dir: Directory to start searching from
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Path to dbt project directory, or None if not found
|
||||||
|
"""
|
||||||
|
# Check root
|
||||||
|
if (start_dir / 'dbt_project.yml').exists():
|
||||||
|
return str(start_dir)
|
||||||
|
|
||||||
|
# Check common subdirectories
|
||||||
|
for subdir in ['dbt', 'transform', 'analytics', 'models']:
|
||||||
|
candidate = start_dir / subdir
|
||||||
|
if (candidate / 'dbt_project.yml').exists():
|
||||||
|
return str(candidate)
|
||||||
|
|
||||||
|
# Search one level deep
|
||||||
|
for item in start_dir.iterdir():
|
||||||
|
if item.is_dir() and not item.name.startswith('.'):
|
||||||
|
if (item / 'dbt_project.yml').exists():
|
||||||
|
return str(item)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def load_config() -> Dict[str, Optional[str]]:
|
||||||
|
"""
|
||||||
|
Convenience function to load configuration.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Configuration dictionary
|
||||||
|
"""
|
||||||
|
config = DataPlatformConfig()
|
||||||
|
return config.load()
|
||||||
|
|
||||||
|
|
||||||
|
def check_postgres_connection() -> Dict[str, any]:
|
||||||
|
"""
|
||||||
|
Check PostgreSQL connection status for SessionStart hook.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with connection status and message
|
||||||
|
"""
|
||||||
|
import asyncio
|
||||||
|
|
||||||
|
config = load_config()
|
||||||
|
if not config.get('postgres_url'):
|
||||||
|
return {
|
||||||
|
'connected': False,
|
||||||
|
'message': 'PostgreSQL not configured (POSTGRES_URL not set)'
|
||||||
|
}
|
||||||
|
|
||||||
|
async def test_connection():
|
||||||
|
try:
|
||||||
|
import asyncpg
|
||||||
|
conn = await asyncpg.connect(config['postgres_url'], timeout=5)
|
||||||
|
version = await conn.fetchval('SELECT version()')
|
||||||
|
await conn.close()
|
||||||
|
return {
|
||||||
|
'connected': True,
|
||||||
|
'message': f'Connected to PostgreSQL',
|
||||||
|
'version': version.split(',')[0] if version else 'Unknown'
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
return {
|
||||||
|
'connected': False,
|
||||||
|
'message': f'PostgreSQL connection failed: {str(e)}'
|
||||||
|
}
|
||||||
|
|
||||||
|
return asyncio.run(test_connection())
|
||||||
219
mcp-servers/data-platform/mcp_server/data_store.py
Normal file
219
mcp-servers/data-platform/mcp_server/data_store.py
Normal file
@@ -0,0 +1,219 @@
|
|||||||
|
"""
|
||||||
|
Arrow IPC DataFrame Registry.
|
||||||
|
|
||||||
|
Provides persistent storage for DataFrames across tool calls using Apache Arrow
|
||||||
|
for efficient memory management and serialization.
|
||||||
|
"""
|
||||||
|
import pyarrow as pa
|
||||||
|
import pandas as pd
|
||||||
|
import uuid
|
||||||
|
import logging
|
||||||
|
from typing import Dict, Optional, List, Union
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class DataFrameInfo:
|
||||||
|
"""Metadata about a stored DataFrame"""
|
||||||
|
ref: str
|
||||||
|
rows: int
|
||||||
|
columns: int
|
||||||
|
column_names: List[str]
|
||||||
|
dtypes: Dict[str, str]
|
||||||
|
memory_bytes: int
|
||||||
|
created_at: datetime
|
||||||
|
source: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
|
class DataStore:
|
||||||
|
"""
|
||||||
|
Singleton registry for Arrow Tables (DataFrames).
|
||||||
|
|
||||||
|
Uses Arrow IPC format for efficient memory usage and supports
|
||||||
|
data_ref based retrieval across multiple tool calls.
|
||||||
|
"""
|
||||||
|
_instance = None
|
||||||
|
_dataframes: Dict[str, pa.Table] = {}
|
||||||
|
_metadata: Dict[str, DataFrameInfo] = {}
|
||||||
|
_max_rows: int = 100_000
|
||||||
|
|
||||||
|
def __new__(cls):
|
||||||
|
if cls._instance is None:
|
||||||
|
cls._instance = super().__new__(cls)
|
||||||
|
cls._dataframes = {}
|
||||||
|
cls._metadata = {}
|
||||||
|
return cls._instance
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_instance(cls) -> 'DataStore':
|
||||||
|
"""Get the singleton instance"""
|
||||||
|
if cls._instance is None:
|
||||||
|
cls._instance = cls()
|
||||||
|
return cls._instance
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def set_max_rows(cls, max_rows: int):
|
||||||
|
"""Set the maximum rows limit"""
|
||||||
|
cls._max_rows = max_rows
|
||||||
|
|
||||||
|
def store(
|
||||||
|
self,
|
||||||
|
data: Union[pa.Table, pd.DataFrame],
|
||||||
|
name: Optional[str] = None,
|
||||||
|
source: Optional[str] = None
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Store a DataFrame and return its reference.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: Arrow Table or pandas DataFrame
|
||||||
|
name: Optional name for the reference (auto-generated if not provided)
|
||||||
|
source: Optional source description (e.g., file path, query)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
data_ref string to retrieve the DataFrame later
|
||||||
|
"""
|
||||||
|
# Convert pandas to Arrow if needed
|
||||||
|
if isinstance(data, pd.DataFrame):
|
||||||
|
table = pa.Table.from_pandas(data)
|
||||||
|
else:
|
||||||
|
table = data
|
||||||
|
|
||||||
|
# Generate reference
|
||||||
|
data_ref = name or f"df_{uuid.uuid4().hex[:8]}"
|
||||||
|
|
||||||
|
# Ensure unique reference
|
||||||
|
if data_ref in self._dataframes and name is None:
|
||||||
|
data_ref = f"{data_ref}_{uuid.uuid4().hex[:4]}"
|
||||||
|
|
||||||
|
# Store table
|
||||||
|
self._dataframes[data_ref] = table
|
||||||
|
|
||||||
|
# Store metadata
|
||||||
|
schema = table.schema
|
||||||
|
self._metadata[data_ref] = DataFrameInfo(
|
||||||
|
ref=data_ref,
|
||||||
|
rows=table.num_rows,
|
||||||
|
columns=table.num_columns,
|
||||||
|
column_names=[f.name for f in schema],
|
||||||
|
dtypes={f.name: str(f.type) for f in schema},
|
||||||
|
memory_bytes=table.nbytes,
|
||||||
|
created_at=datetime.now(),
|
||||||
|
source=source
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info(f"Stored DataFrame '{data_ref}': {table.num_rows} rows, {table.num_columns} cols")
|
||||||
|
return data_ref
|
||||||
|
|
||||||
|
def get(self, data_ref: str) -> Optional[pa.Table]:
|
||||||
|
"""
|
||||||
|
Retrieve an Arrow Table by reference.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data_ref: Reference string from store()
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Arrow Table or None if not found
|
||||||
|
"""
|
||||||
|
return self._dataframes.get(data_ref)
|
||||||
|
|
||||||
|
def get_pandas(self, data_ref: str) -> Optional[pd.DataFrame]:
|
||||||
|
"""
|
||||||
|
Retrieve a DataFrame as pandas.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data_ref: Reference string from store()
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
pandas DataFrame or None if not found
|
||||||
|
"""
|
||||||
|
table = self.get(data_ref)
|
||||||
|
if table is not None:
|
||||||
|
return table.to_pandas()
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_info(self, data_ref: str) -> Optional[DataFrameInfo]:
|
||||||
|
"""
|
||||||
|
Get metadata about a stored DataFrame.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data_ref: Reference string
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
DataFrameInfo or None if not found
|
||||||
|
"""
|
||||||
|
return self._metadata.get(data_ref)
|
||||||
|
|
||||||
|
def list_refs(self) -> List[Dict]:
|
||||||
|
"""
|
||||||
|
List all stored DataFrame references with metadata.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of dicts with ref, rows, columns, memory info
|
||||||
|
"""
|
||||||
|
result = []
|
||||||
|
for ref, info in self._metadata.items():
|
||||||
|
result.append({
|
||||||
|
'ref': ref,
|
||||||
|
'rows': info.rows,
|
||||||
|
'columns': info.columns,
|
||||||
|
'column_names': info.column_names,
|
||||||
|
'memory_mb': round(info.memory_bytes / (1024 * 1024), 2),
|
||||||
|
'source': info.source,
|
||||||
|
'created_at': info.created_at.isoformat()
|
||||||
|
})
|
||||||
|
return result
|
||||||
|
|
||||||
|
def drop(self, data_ref: str) -> bool:
|
||||||
|
"""
|
||||||
|
Remove a DataFrame from the store.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data_ref: Reference string
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if removed, False if not found
|
||||||
|
"""
|
||||||
|
if data_ref in self._dataframes:
|
||||||
|
del self._dataframes[data_ref]
|
||||||
|
del self._metadata[data_ref]
|
||||||
|
logger.info(f"Dropped DataFrame '{data_ref}'")
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def clear(self):
|
||||||
|
"""Remove all stored DataFrames"""
|
||||||
|
count = len(self._dataframes)
|
||||||
|
self._dataframes.clear()
|
||||||
|
self._metadata.clear()
|
||||||
|
logger.info(f"Cleared {count} DataFrames from store")
|
||||||
|
|
||||||
|
def total_memory_bytes(self) -> int:
|
||||||
|
"""Get total memory used by all stored DataFrames"""
|
||||||
|
return sum(info.memory_bytes for info in self._metadata.values())
|
||||||
|
|
||||||
|
def total_memory_mb(self) -> float:
|
||||||
|
"""Get total memory in MB"""
|
||||||
|
return round(self.total_memory_bytes() / (1024 * 1024), 2)
|
||||||
|
|
||||||
|
def check_row_limit(self, row_count: int) -> Dict:
|
||||||
|
"""
|
||||||
|
Check if row count exceeds limit.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
row_count: Number of rows
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with 'exceeded' bool and 'message' if exceeded
|
||||||
|
"""
|
||||||
|
if row_count > self._max_rows:
|
||||||
|
return {
|
||||||
|
'exceeded': True,
|
||||||
|
'message': f"Row count ({row_count:,}) exceeds limit ({self._max_rows:,})",
|
||||||
|
'suggestion': f"Use chunked processing or filter data first",
|
||||||
|
'limit': self._max_rows
|
||||||
|
}
|
||||||
|
return {'exceeded': False}
|
||||||
387
mcp-servers/data-platform/mcp_server/dbt_tools.py
Normal file
387
mcp-servers/data-platform/mcp_server/dbt_tools.py
Normal file
@@ -0,0 +1,387 @@
|
|||||||
|
"""
|
||||||
|
dbt MCP Tools.
|
||||||
|
|
||||||
|
Provides dbt CLI wrapper with pre-execution validation.
|
||||||
|
"""
|
||||||
|
import subprocess
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Optional, Any
|
||||||
|
|
||||||
|
from .config import load_config
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class DbtTools:
|
||||||
|
"""dbt CLI wrapper tools with pre-validation"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.config = load_config()
|
||||||
|
self.project_dir = self.config.get('dbt_project_dir')
|
||||||
|
self.profiles_dir = self.config.get('dbt_profiles_dir')
|
||||||
|
|
||||||
|
def _get_dbt_command(self, cmd: List[str]) -> List[str]:
|
||||||
|
"""Build dbt command with project and profiles directories"""
|
||||||
|
base = ['dbt']
|
||||||
|
if self.project_dir:
|
||||||
|
base.extend(['--project-dir', self.project_dir])
|
||||||
|
if self.profiles_dir:
|
||||||
|
base.extend(['--profiles-dir', self.profiles_dir])
|
||||||
|
base.extend(cmd)
|
||||||
|
return base
|
||||||
|
|
||||||
|
def _run_dbt(
|
||||||
|
self,
|
||||||
|
cmd: List[str],
|
||||||
|
timeout: int = 300,
|
||||||
|
capture_json: bool = False
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Run dbt command and return result.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
cmd: dbt subcommand and arguments
|
||||||
|
timeout: Command timeout in seconds
|
||||||
|
capture_json: If True, parse JSON output
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with command result
|
||||||
|
"""
|
||||||
|
if not self.project_dir:
|
||||||
|
return {
|
||||||
|
'error': 'dbt project not found',
|
||||||
|
'suggestion': 'Set DBT_PROJECT_DIR in project .env or ensure dbt_project.yml exists'
|
||||||
|
}
|
||||||
|
|
||||||
|
full_cmd = self._get_dbt_command(cmd)
|
||||||
|
logger.info(f"Running: {' '.join(full_cmd)}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
env = os.environ.copy()
|
||||||
|
# Disable dbt analytics/tracking
|
||||||
|
env['DBT_SEND_ANONYMOUS_USAGE_STATS'] = 'false'
|
||||||
|
|
||||||
|
result = subprocess.run(
|
||||||
|
full_cmd,
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
timeout=timeout,
|
||||||
|
cwd=self.project_dir,
|
||||||
|
env=env
|
||||||
|
)
|
||||||
|
|
||||||
|
output = {
|
||||||
|
'success': result.returncode == 0,
|
||||||
|
'command': ' '.join(cmd),
|
||||||
|
'stdout': result.stdout,
|
||||||
|
'stderr': result.stderr if result.returncode != 0 else None
|
||||||
|
}
|
||||||
|
|
||||||
|
if capture_json and result.returncode == 0:
|
||||||
|
try:
|
||||||
|
output['data'] = json.loads(result.stdout)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return output
|
||||||
|
|
||||||
|
except subprocess.TimeoutExpired:
|
||||||
|
return {
|
||||||
|
'error': f'Command timed out after {timeout}s',
|
||||||
|
'command': ' '.join(cmd)
|
||||||
|
}
|
||||||
|
except FileNotFoundError:
|
||||||
|
return {
|
||||||
|
'error': 'dbt not found in PATH',
|
||||||
|
'suggestion': 'Install dbt: pip install dbt-core dbt-postgres'
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"dbt command failed: {e}")
|
||||||
|
return {'error': str(e)}
|
||||||
|
|
||||||
|
async def dbt_parse(self) -> Dict:
|
||||||
|
"""
|
||||||
|
Validate dbt project without executing (pre-flight check).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with validation result and any errors
|
||||||
|
"""
|
||||||
|
result = self._run_dbt(['parse'])
|
||||||
|
|
||||||
|
# Check if _run_dbt returned an error (e.g., project not found, timeout, dbt not installed)
|
||||||
|
if 'error' in result:
|
||||||
|
return result
|
||||||
|
|
||||||
|
if not result.get('success'):
|
||||||
|
# Extract useful error info from stderr
|
||||||
|
stderr = result.get('stderr', '') or result.get('stdout', '')
|
||||||
|
errors = []
|
||||||
|
|
||||||
|
# Look for common dbt 1.9+ deprecation warnings
|
||||||
|
if 'deprecated' in stderr.lower():
|
||||||
|
errors.append({
|
||||||
|
'type': 'deprecation',
|
||||||
|
'message': 'Deprecated syntax found - check dbt 1.9+ migration guide'
|
||||||
|
})
|
||||||
|
|
||||||
|
# Look for compilation errors
|
||||||
|
if 'compilation error' in stderr.lower():
|
||||||
|
errors.append({
|
||||||
|
'type': 'compilation',
|
||||||
|
'message': 'SQL compilation error - check model syntax'
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
'valid': False,
|
||||||
|
'errors': errors,
|
||||||
|
'details': stderr[:2000] if stderr else None,
|
||||||
|
'suggestion': 'Fix issues before running dbt models'
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
'valid': True,
|
||||||
|
'message': 'dbt project validation passed'
|
||||||
|
}
|
||||||
|
|
||||||
|
async def dbt_run(
|
||||||
|
self,
|
||||||
|
select: Optional[str] = None,
|
||||||
|
exclude: Optional[str] = None,
|
||||||
|
full_refresh: bool = False
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Run dbt models with pre-validation.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
select: Model selection (e.g., "model_name", "+model_name", "tag:daily")
|
||||||
|
exclude: Models to exclude
|
||||||
|
full_refresh: If True, rebuild incremental models
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with run result
|
||||||
|
"""
|
||||||
|
# ALWAYS validate first
|
||||||
|
parse_result = await self.dbt_parse()
|
||||||
|
if not parse_result.get('valid'):
|
||||||
|
return {
|
||||||
|
'error': 'Pre-validation failed',
|
||||||
|
**parse_result
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd = ['run']
|
||||||
|
if select:
|
||||||
|
cmd.extend(['--select', select])
|
||||||
|
if exclude:
|
||||||
|
cmd.extend(['--exclude', exclude])
|
||||||
|
if full_refresh:
|
||||||
|
cmd.append('--full-refresh')
|
||||||
|
|
||||||
|
return self._run_dbt(cmd)
|
||||||
|
|
||||||
|
async def dbt_test(
|
||||||
|
self,
|
||||||
|
select: Optional[str] = None,
|
||||||
|
exclude: Optional[str] = None
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Run dbt tests.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
select: Test selection
|
||||||
|
exclude: Tests to exclude
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with test results
|
||||||
|
"""
|
||||||
|
cmd = ['test']
|
||||||
|
if select:
|
||||||
|
cmd.extend(['--select', select])
|
||||||
|
if exclude:
|
||||||
|
cmd.extend(['--exclude', exclude])
|
||||||
|
|
||||||
|
return self._run_dbt(cmd)
|
||||||
|
|
||||||
|
async def dbt_build(
|
||||||
|
self,
|
||||||
|
select: Optional[str] = None,
|
||||||
|
exclude: Optional[str] = None,
|
||||||
|
full_refresh: bool = False
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Run dbt build (run + test) with pre-validation.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
select: Model/test selection
|
||||||
|
exclude: Resources to exclude
|
||||||
|
full_refresh: If True, rebuild incremental models
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with build result
|
||||||
|
"""
|
||||||
|
# ALWAYS validate first
|
||||||
|
parse_result = await self.dbt_parse()
|
||||||
|
if not parse_result.get('valid'):
|
||||||
|
return {
|
||||||
|
'error': 'Pre-validation failed',
|
||||||
|
**parse_result
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd = ['build']
|
||||||
|
if select:
|
||||||
|
cmd.extend(['--select', select])
|
||||||
|
if exclude:
|
||||||
|
cmd.extend(['--exclude', exclude])
|
||||||
|
if full_refresh:
|
||||||
|
cmd.append('--full-refresh')
|
||||||
|
|
||||||
|
return self._run_dbt(cmd)
|
||||||
|
|
||||||
|
async def dbt_compile(
|
||||||
|
self,
|
||||||
|
select: Optional[str] = None
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Compile dbt models to SQL without executing.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
select: Model selection
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with compiled SQL info
|
||||||
|
"""
|
||||||
|
cmd = ['compile']
|
||||||
|
if select:
|
||||||
|
cmd.extend(['--select', select])
|
||||||
|
|
||||||
|
return self._run_dbt(cmd)
|
||||||
|
|
||||||
|
async def dbt_ls(
|
||||||
|
self,
|
||||||
|
select: Optional[str] = None,
|
||||||
|
resource_type: Optional[str] = None,
|
||||||
|
output: str = 'name'
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
List dbt resources.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
select: Resource selection
|
||||||
|
resource_type: Filter by type (model, test, seed, snapshot, source)
|
||||||
|
output: Output format ('name', 'path', 'json')
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with list of resources
|
||||||
|
"""
|
||||||
|
cmd = ['ls', '--output', output]
|
||||||
|
if select:
|
||||||
|
cmd.extend(['--select', select])
|
||||||
|
if resource_type:
|
||||||
|
cmd.extend(['--resource-type', resource_type])
|
||||||
|
|
||||||
|
result = self._run_dbt(cmd)
|
||||||
|
|
||||||
|
if result.get('success') and result.get('stdout'):
|
||||||
|
lines = [l.strip() for l in result['stdout'].split('\n') if l.strip()]
|
||||||
|
result['resources'] = lines
|
||||||
|
result['count'] = len(lines)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
async def dbt_docs_generate(self) -> Dict:
|
||||||
|
"""
|
||||||
|
Generate dbt documentation.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with generation result
|
||||||
|
"""
|
||||||
|
result = self._run_dbt(['docs', 'generate'])
|
||||||
|
|
||||||
|
if result.get('success') and self.project_dir:
|
||||||
|
# Check for generated catalog
|
||||||
|
catalog_path = Path(self.project_dir) / 'target' / 'catalog.json'
|
||||||
|
manifest_path = Path(self.project_dir) / 'target' / 'manifest.json'
|
||||||
|
result['catalog_generated'] = catalog_path.exists()
|
||||||
|
result['manifest_generated'] = manifest_path.exists()
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
async def dbt_lineage(self, model: str) -> Dict:
|
||||||
|
"""
|
||||||
|
Get model dependencies and lineage.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model: Model name to analyze
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with upstream and downstream dependencies
|
||||||
|
"""
|
||||||
|
if not self.project_dir:
|
||||||
|
return {'error': 'dbt project not found'}
|
||||||
|
|
||||||
|
manifest_path = Path(self.project_dir) / 'target' / 'manifest.json'
|
||||||
|
|
||||||
|
# Generate manifest if not exists
|
||||||
|
if not manifest_path.exists():
|
||||||
|
compile_result = await self.dbt_compile(select=model)
|
||||||
|
if not compile_result.get('success'):
|
||||||
|
return {
|
||||||
|
'error': 'Failed to compile manifest',
|
||||||
|
'details': compile_result
|
||||||
|
}
|
||||||
|
|
||||||
|
if not manifest_path.exists():
|
||||||
|
return {
|
||||||
|
'error': 'Manifest not found',
|
||||||
|
'suggestion': 'Run dbt compile first'
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(manifest_path) as f:
|
||||||
|
manifest = json.load(f)
|
||||||
|
|
||||||
|
# Find the model node
|
||||||
|
model_key = None
|
||||||
|
for key in manifest.get('nodes', {}):
|
||||||
|
if key.endswith(f'.{model}') or manifest['nodes'][key].get('name') == model:
|
||||||
|
model_key = key
|
||||||
|
break
|
||||||
|
|
||||||
|
if not model_key:
|
||||||
|
return {
|
||||||
|
'error': f'Model not found: {model}',
|
||||||
|
'available_models': [
|
||||||
|
n.get('name') for n in manifest.get('nodes', {}).values()
|
||||||
|
if n.get('resource_type') == 'model'
|
||||||
|
][:20]
|
||||||
|
}
|
||||||
|
|
||||||
|
node = manifest['nodes'][model_key]
|
||||||
|
|
||||||
|
# Get upstream (depends_on)
|
||||||
|
upstream = node.get('depends_on', {}).get('nodes', [])
|
||||||
|
|
||||||
|
# Get downstream (find nodes that depend on this one)
|
||||||
|
downstream = []
|
||||||
|
for key, other_node in manifest.get('nodes', {}).items():
|
||||||
|
deps = other_node.get('depends_on', {}).get('nodes', [])
|
||||||
|
if model_key in deps:
|
||||||
|
downstream.append(key)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'model': model,
|
||||||
|
'unique_id': model_key,
|
||||||
|
'materialization': node.get('config', {}).get('materialized'),
|
||||||
|
'schema': node.get('schema'),
|
||||||
|
'database': node.get('database'),
|
||||||
|
'upstream': upstream,
|
||||||
|
'downstream': downstream,
|
||||||
|
'description': node.get('description'),
|
||||||
|
'tags': node.get('tags', [])
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"dbt_lineage failed: {e}")
|
||||||
|
return {'error': str(e)}
|
||||||
500
mcp-servers/data-platform/mcp_server/pandas_tools.py
Normal file
500
mcp-servers/data-platform/mcp_server/pandas_tools.py
Normal file
@@ -0,0 +1,500 @@
|
|||||||
|
"""
|
||||||
|
pandas MCP Tools.
|
||||||
|
|
||||||
|
Provides DataFrame operations with Arrow IPC data_ref persistence.
|
||||||
|
"""
|
||||||
|
import pandas as pd
|
||||||
|
import pyarrow as pa
|
||||||
|
import pyarrow.parquet as pq
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Optional, Any, Union
|
||||||
|
|
||||||
|
from .data_store import DataStore
|
||||||
|
from .config import load_config
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class PandasTools:
|
||||||
|
"""pandas data manipulation tools with data_ref persistence"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.store = DataStore.get_instance()
|
||||||
|
config = load_config()
|
||||||
|
self.max_rows = config.get('max_rows', 100_000)
|
||||||
|
self.store.set_max_rows(self.max_rows)
|
||||||
|
|
||||||
|
def _check_and_store(
|
||||||
|
self,
|
||||||
|
df: pd.DataFrame,
|
||||||
|
name: Optional[str] = None,
|
||||||
|
source: Optional[str] = None
|
||||||
|
) -> Dict:
|
||||||
|
"""Check row limit and store DataFrame if within limits"""
|
||||||
|
check = self.store.check_row_limit(len(df))
|
||||||
|
if check['exceeded']:
|
||||||
|
return {
|
||||||
|
'error': 'row_limit_exceeded',
|
||||||
|
**check,
|
||||||
|
'preview': df.head(100).to_dict(orient='records')
|
||||||
|
}
|
||||||
|
|
||||||
|
data_ref = self.store.store(df, name=name, source=source)
|
||||||
|
return {
|
||||||
|
'data_ref': data_ref,
|
||||||
|
'rows': len(df),
|
||||||
|
'columns': list(df.columns),
|
||||||
|
'dtypes': {col: str(dtype) for col, dtype in df.dtypes.items()}
|
||||||
|
}
|
||||||
|
|
||||||
|
async def read_csv(
|
||||||
|
self,
|
||||||
|
file_path: str,
|
||||||
|
name: Optional[str] = None,
|
||||||
|
chunk_size: Optional[int] = None,
|
||||||
|
**kwargs
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Load CSV file into DataFrame.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
file_path: Path to CSV file
|
||||||
|
name: Optional name for data_ref
|
||||||
|
chunk_size: If provided, process in chunks
|
||||||
|
**kwargs: Additional pandas read_csv arguments
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with data_ref or error info
|
||||||
|
"""
|
||||||
|
path = Path(file_path)
|
||||||
|
if not path.exists():
|
||||||
|
return {'error': f'File not found: {file_path}'}
|
||||||
|
|
||||||
|
try:
|
||||||
|
if chunk_size:
|
||||||
|
# Chunked processing - return iterator info
|
||||||
|
chunks = []
|
||||||
|
for i, chunk in enumerate(pd.read_csv(path, chunksize=chunk_size, **kwargs)):
|
||||||
|
chunk_ref = self.store.store(chunk, name=f"{name or 'chunk'}_{i}", source=file_path)
|
||||||
|
chunks.append({'ref': chunk_ref, 'rows': len(chunk)})
|
||||||
|
return {
|
||||||
|
'chunked': True,
|
||||||
|
'chunks': chunks,
|
||||||
|
'total_chunks': len(chunks)
|
||||||
|
}
|
||||||
|
|
||||||
|
df = pd.read_csv(path, **kwargs)
|
||||||
|
return self._check_and_store(df, name=name, source=file_path)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"read_csv failed: {e}")
|
||||||
|
return {'error': str(e)}
|
||||||
|
|
||||||
|
async def read_parquet(
|
||||||
|
self,
|
||||||
|
file_path: str,
|
||||||
|
name: Optional[str] = None,
|
||||||
|
columns: Optional[List[str]] = None
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Load Parquet file into DataFrame.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
file_path: Path to Parquet file
|
||||||
|
name: Optional name for data_ref
|
||||||
|
columns: Optional list of columns to load
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with data_ref or error info
|
||||||
|
"""
|
||||||
|
path = Path(file_path)
|
||||||
|
if not path.exists():
|
||||||
|
return {'error': f'File not found: {file_path}'}
|
||||||
|
|
||||||
|
try:
|
||||||
|
table = pq.read_table(path, columns=columns)
|
||||||
|
df = table.to_pandas()
|
||||||
|
return self._check_and_store(df, name=name, source=file_path)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"read_parquet failed: {e}")
|
||||||
|
return {'error': str(e)}
|
||||||
|
|
||||||
|
async def read_json(
|
||||||
|
self,
|
||||||
|
file_path: str,
|
||||||
|
name: Optional[str] = None,
|
||||||
|
lines: bool = False,
|
||||||
|
**kwargs
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Load JSON/JSONL file into DataFrame.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
file_path: Path to JSON file
|
||||||
|
name: Optional name for data_ref
|
||||||
|
lines: If True, read as JSON Lines format
|
||||||
|
**kwargs: Additional pandas read_json arguments
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with data_ref or error info
|
||||||
|
"""
|
||||||
|
path = Path(file_path)
|
||||||
|
if not path.exists():
|
||||||
|
return {'error': f'File not found: {file_path}'}
|
||||||
|
|
||||||
|
try:
|
||||||
|
df = pd.read_json(path, lines=lines, **kwargs)
|
||||||
|
return self._check_and_store(df, name=name, source=file_path)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"read_json failed: {e}")
|
||||||
|
return {'error': str(e)}
|
||||||
|
|
||||||
|
async def to_csv(
|
||||||
|
self,
|
||||||
|
data_ref: str,
|
||||||
|
file_path: str,
|
||||||
|
index: bool = False,
|
||||||
|
**kwargs
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Export DataFrame to CSV file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data_ref: Reference to stored DataFrame
|
||||||
|
file_path: Output file path
|
||||||
|
index: Whether to include index
|
||||||
|
**kwargs: Additional pandas to_csv arguments
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with success status
|
||||||
|
"""
|
||||||
|
df = self.store.get_pandas(data_ref)
|
||||||
|
if df is None:
|
||||||
|
return {'error': f'DataFrame not found: {data_ref}'}
|
||||||
|
|
||||||
|
try:
|
||||||
|
df.to_csv(file_path, index=index, **kwargs)
|
||||||
|
return {
|
||||||
|
'success': True,
|
||||||
|
'file_path': file_path,
|
||||||
|
'rows': len(df),
|
||||||
|
'size_bytes': Path(file_path).stat().st_size
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"to_csv failed: {e}")
|
||||||
|
return {'error': str(e)}
|
||||||
|
|
||||||
|
async def to_parquet(
|
||||||
|
self,
|
||||||
|
data_ref: str,
|
||||||
|
file_path: str,
|
||||||
|
compression: str = 'snappy'
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Export DataFrame to Parquet file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data_ref: Reference to stored DataFrame
|
||||||
|
file_path: Output file path
|
||||||
|
compression: Compression codec
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with success status
|
||||||
|
"""
|
||||||
|
table = self.store.get(data_ref)
|
||||||
|
if table is None:
|
||||||
|
return {'error': f'DataFrame not found: {data_ref}'}
|
||||||
|
|
||||||
|
try:
|
||||||
|
pq.write_table(table, file_path, compression=compression)
|
||||||
|
return {
|
||||||
|
'success': True,
|
||||||
|
'file_path': file_path,
|
||||||
|
'rows': table.num_rows,
|
||||||
|
'size_bytes': Path(file_path).stat().st_size
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"to_parquet failed: {e}")
|
||||||
|
return {'error': str(e)}
|
||||||
|
|
||||||
|
async def describe(self, data_ref: str) -> Dict:
|
||||||
|
"""
|
||||||
|
Get statistical summary of DataFrame.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data_ref: Reference to stored DataFrame
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with statistical summary
|
||||||
|
"""
|
||||||
|
df = self.store.get_pandas(data_ref)
|
||||||
|
if df is None:
|
||||||
|
return {'error': f'DataFrame not found: {data_ref}'}
|
||||||
|
|
||||||
|
try:
|
||||||
|
desc = df.describe(include='all')
|
||||||
|
info = self.store.get_info(data_ref)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'data_ref': data_ref,
|
||||||
|
'shape': {'rows': len(df), 'columns': len(df.columns)},
|
||||||
|
'columns': list(df.columns),
|
||||||
|
'dtypes': {col: str(dtype) for col, dtype in df.dtypes.items()},
|
||||||
|
'memory_mb': info.memory_bytes / (1024 * 1024) if info else None,
|
||||||
|
'null_counts': df.isnull().sum().to_dict(),
|
||||||
|
'statistics': desc.to_dict()
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"describe failed: {e}")
|
||||||
|
return {'error': str(e)}
|
||||||
|
|
||||||
|
async def head(self, data_ref: str, n: int = 10) -> Dict:
|
||||||
|
"""
|
||||||
|
Get first N rows of DataFrame.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data_ref: Reference to stored DataFrame
|
||||||
|
n: Number of rows
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with rows as records
|
||||||
|
"""
|
||||||
|
df = self.store.get_pandas(data_ref)
|
||||||
|
if df is None:
|
||||||
|
return {'error': f'DataFrame not found: {data_ref}'}
|
||||||
|
|
||||||
|
try:
|
||||||
|
head_df = df.head(n)
|
||||||
|
return {
|
||||||
|
'data_ref': data_ref,
|
||||||
|
'total_rows': len(df),
|
||||||
|
'returned_rows': len(head_df),
|
||||||
|
'columns': list(df.columns),
|
||||||
|
'data': head_df.to_dict(orient='records')
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"head failed: {e}")
|
||||||
|
return {'error': str(e)}
|
||||||
|
|
||||||
|
async def tail(self, data_ref: str, n: int = 10) -> Dict:
|
||||||
|
"""
|
||||||
|
Get last N rows of DataFrame.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data_ref: Reference to stored DataFrame
|
||||||
|
n: Number of rows
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with rows as records
|
||||||
|
"""
|
||||||
|
df = self.store.get_pandas(data_ref)
|
||||||
|
if df is None:
|
||||||
|
return {'error': f'DataFrame not found: {data_ref}'}
|
||||||
|
|
||||||
|
try:
|
||||||
|
tail_df = df.tail(n)
|
||||||
|
return {
|
||||||
|
'data_ref': data_ref,
|
||||||
|
'total_rows': len(df),
|
||||||
|
'returned_rows': len(tail_df),
|
||||||
|
'columns': list(df.columns),
|
||||||
|
'data': tail_df.to_dict(orient='records')
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"tail failed: {e}")
|
||||||
|
return {'error': str(e)}
|
||||||
|
|
||||||
|
async def filter(
|
||||||
|
self,
|
||||||
|
data_ref: str,
|
||||||
|
condition: str,
|
||||||
|
name: Optional[str] = None
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Filter DataFrame rows by condition.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data_ref: Reference to stored DataFrame
|
||||||
|
condition: pandas query string (e.g., "age > 30 and city == 'NYC'")
|
||||||
|
name: Optional name for result data_ref
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with new data_ref for filtered result
|
||||||
|
"""
|
||||||
|
df = self.store.get_pandas(data_ref)
|
||||||
|
if df is None:
|
||||||
|
return {'error': f'DataFrame not found: {data_ref}'}
|
||||||
|
|
||||||
|
try:
|
||||||
|
filtered = df.query(condition).reset_index(drop=True)
|
||||||
|
result_name = name or f"{data_ref}_filtered"
|
||||||
|
return self._check_and_store(
|
||||||
|
filtered,
|
||||||
|
name=result_name,
|
||||||
|
source=f"filter({data_ref}, '{condition}')"
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"filter failed: {e}")
|
||||||
|
return {'error': str(e)}
|
||||||
|
|
||||||
|
async def select(
|
||||||
|
self,
|
||||||
|
data_ref: str,
|
||||||
|
columns: List[str],
|
||||||
|
name: Optional[str] = None
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Select specific columns from DataFrame.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data_ref: Reference to stored DataFrame
|
||||||
|
columns: List of column names to select
|
||||||
|
name: Optional name for result data_ref
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with new data_ref for selected columns
|
||||||
|
"""
|
||||||
|
df = self.store.get_pandas(data_ref)
|
||||||
|
if df is None:
|
||||||
|
return {'error': f'DataFrame not found: {data_ref}'}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Validate columns exist
|
||||||
|
missing = [c for c in columns if c not in df.columns]
|
||||||
|
if missing:
|
||||||
|
return {
|
||||||
|
'error': f'Columns not found: {missing}',
|
||||||
|
'available_columns': list(df.columns)
|
||||||
|
}
|
||||||
|
|
||||||
|
selected = df[columns]
|
||||||
|
result_name = name or f"{data_ref}_select"
|
||||||
|
return self._check_and_store(
|
||||||
|
selected,
|
||||||
|
name=result_name,
|
||||||
|
source=f"select({data_ref}, {columns})"
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"select failed: {e}")
|
||||||
|
return {'error': str(e)}
|
||||||
|
|
||||||
|
async def groupby(
|
||||||
|
self,
|
||||||
|
data_ref: str,
|
||||||
|
by: Union[str, List[str]],
|
||||||
|
agg: Dict[str, Union[str, List[str]]],
|
||||||
|
name: Optional[str] = None
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Group DataFrame and aggregate.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data_ref: Reference to stored DataFrame
|
||||||
|
by: Column(s) to group by
|
||||||
|
agg: Aggregation dict (e.g., {"sales": "sum", "count": "mean"})
|
||||||
|
name: Optional name for result data_ref
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with new data_ref for aggregated result
|
||||||
|
"""
|
||||||
|
df = self.store.get_pandas(data_ref)
|
||||||
|
if df is None:
|
||||||
|
return {'error': f'DataFrame not found: {data_ref}'}
|
||||||
|
|
||||||
|
try:
|
||||||
|
grouped = df.groupby(by).agg(agg).reset_index()
|
||||||
|
# Flatten column names if multi-level
|
||||||
|
if isinstance(grouped.columns, pd.MultiIndex):
|
||||||
|
grouped.columns = ['_'.join(col).strip('_') for col in grouped.columns]
|
||||||
|
|
||||||
|
result_name = name or f"{data_ref}_grouped"
|
||||||
|
return self._check_and_store(
|
||||||
|
grouped,
|
||||||
|
name=result_name,
|
||||||
|
source=f"groupby({data_ref}, by={by})"
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"groupby failed: {e}")
|
||||||
|
return {'error': str(e)}
|
||||||
|
|
||||||
|
async def join(
|
||||||
|
self,
|
||||||
|
left_ref: str,
|
||||||
|
right_ref: str,
|
||||||
|
on: Optional[Union[str, List[str]]] = None,
|
||||||
|
left_on: Optional[Union[str, List[str]]] = None,
|
||||||
|
right_on: Optional[Union[str, List[str]]] = None,
|
||||||
|
how: str = 'inner',
|
||||||
|
name: Optional[str] = None
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Join two DataFrames.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
left_ref: Reference to left DataFrame
|
||||||
|
right_ref: Reference to right DataFrame
|
||||||
|
on: Column(s) to join on (if same name in both)
|
||||||
|
left_on: Left join column(s)
|
||||||
|
right_on: Right join column(s)
|
||||||
|
how: Join type ('inner', 'left', 'right', 'outer')
|
||||||
|
name: Optional name for result data_ref
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with new data_ref for joined result
|
||||||
|
"""
|
||||||
|
left_df = self.store.get_pandas(left_ref)
|
||||||
|
right_df = self.store.get_pandas(right_ref)
|
||||||
|
|
||||||
|
if left_df is None:
|
||||||
|
return {'error': f'DataFrame not found: {left_ref}'}
|
||||||
|
if right_df is None:
|
||||||
|
return {'error': f'DataFrame not found: {right_ref}'}
|
||||||
|
|
||||||
|
try:
|
||||||
|
joined = pd.merge(
|
||||||
|
left_df, right_df,
|
||||||
|
on=on, left_on=left_on, right_on=right_on,
|
||||||
|
how=how
|
||||||
|
)
|
||||||
|
result_name = name or f"{left_ref}_{right_ref}_joined"
|
||||||
|
return self._check_and_store(
|
||||||
|
joined,
|
||||||
|
name=result_name,
|
||||||
|
source=f"join({left_ref}, {right_ref}, how={how})"
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"join failed: {e}")
|
||||||
|
return {'error': str(e)}
|
||||||
|
|
||||||
|
async def list_data(self) -> Dict:
|
||||||
|
"""
|
||||||
|
List all stored DataFrames.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with list of stored DataFrames and their info
|
||||||
|
"""
|
||||||
|
refs = self.store.list_refs()
|
||||||
|
return {
|
||||||
|
'count': len(refs),
|
||||||
|
'total_memory_mb': self.store.total_memory_mb(),
|
||||||
|
'max_rows_limit': self.max_rows,
|
||||||
|
'dataframes': refs
|
||||||
|
}
|
||||||
|
|
||||||
|
async def drop_data(self, data_ref: str) -> Dict:
|
||||||
|
"""
|
||||||
|
Remove a DataFrame from storage.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data_ref: Reference to drop
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with success status
|
||||||
|
"""
|
||||||
|
if self.store.drop(data_ref):
|
||||||
|
return {'success': True, 'dropped': data_ref}
|
||||||
|
return {'error': f'DataFrame not found: {data_ref}'}
|
||||||
538
mcp-servers/data-platform/mcp_server/postgres_tools.py
Normal file
538
mcp-servers/data-platform/mcp_server/postgres_tools.py
Normal file
@@ -0,0 +1,538 @@
|
|||||||
|
"""
|
||||||
|
PostgreSQL/PostGIS MCP Tools.
|
||||||
|
|
||||||
|
Provides database operations with connection pooling and PostGIS support.
|
||||||
|
"""
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
from typing import Dict, List, Optional, Any
|
||||||
|
import json
|
||||||
|
|
||||||
|
from .data_store import DataStore
|
||||||
|
from .config import load_config
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Optional imports - gracefully handle missing dependencies
|
||||||
|
try:
|
||||||
|
import asyncpg
|
||||||
|
ASYNCPG_AVAILABLE = True
|
||||||
|
except ImportError:
|
||||||
|
ASYNCPG_AVAILABLE = False
|
||||||
|
logger.warning("asyncpg not available - PostgreSQL tools will be disabled")
|
||||||
|
|
||||||
|
try:
|
||||||
|
import pandas as pd
|
||||||
|
PANDAS_AVAILABLE = True
|
||||||
|
except ImportError:
|
||||||
|
PANDAS_AVAILABLE = False
|
||||||
|
|
||||||
|
|
||||||
|
class PostgresTools:
|
||||||
|
"""PostgreSQL/PostGIS database tools"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.store = DataStore.get_instance()
|
||||||
|
self.config = load_config()
|
||||||
|
self.pool: Optional[Any] = None
|
||||||
|
self.max_rows = self.config.get('max_rows', 100_000)
|
||||||
|
|
||||||
|
async def _get_pool(self):
|
||||||
|
"""Get or create connection pool"""
|
||||||
|
if not ASYNCPG_AVAILABLE:
|
||||||
|
raise RuntimeError("asyncpg not installed - run: pip install asyncpg")
|
||||||
|
|
||||||
|
if self.pool is None:
|
||||||
|
postgres_url = self.config.get('postgres_url')
|
||||||
|
if not postgres_url:
|
||||||
|
raise RuntimeError(
|
||||||
|
"PostgreSQL not configured. Set POSTGRES_URL in "
|
||||||
|
"~/.config/claude/postgres.env"
|
||||||
|
)
|
||||||
|
self.pool = await asyncpg.create_pool(postgres_url, min_size=1, max_size=5)
|
||||||
|
return self.pool
|
||||||
|
|
||||||
|
async def pg_connect(self) -> Dict:
|
||||||
|
"""
|
||||||
|
Test PostgreSQL connection and return status.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with connection status, version, and database info
|
||||||
|
"""
|
||||||
|
if not ASYNCPG_AVAILABLE:
|
||||||
|
return {
|
||||||
|
'connected': False,
|
||||||
|
'error': 'asyncpg not installed',
|
||||||
|
'suggestion': 'pip install asyncpg'
|
||||||
|
}
|
||||||
|
|
||||||
|
postgres_url = self.config.get('postgres_url')
|
||||||
|
if not postgres_url:
|
||||||
|
return {
|
||||||
|
'connected': False,
|
||||||
|
'error': 'POSTGRES_URL not configured',
|
||||||
|
'suggestion': 'Create ~/.config/claude/postgres.env with POSTGRES_URL=postgresql://...'
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
pool = await self._get_pool()
|
||||||
|
async with pool.acquire() as conn:
|
||||||
|
version = await conn.fetchval('SELECT version()')
|
||||||
|
db_name = await conn.fetchval('SELECT current_database()')
|
||||||
|
user = await conn.fetchval('SELECT current_user')
|
||||||
|
|
||||||
|
# Check for PostGIS
|
||||||
|
postgis_version = None
|
||||||
|
try:
|
||||||
|
postgis_version = await conn.fetchval('SELECT PostGIS_Version()')
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return {
|
||||||
|
'connected': True,
|
||||||
|
'database': db_name,
|
||||||
|
'user': user,
|
||||||
|
'version': version.split(',')[0] if version else 'Unknown',
|
||||||
|
'postgis_version': postgis_version,
|
||||||
|
'postgis_available': postgis_version is not None
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"pg_connect failed: {e}")
|
||||||
|
return {
|
||||||
|
'connected': False,
|
||||||
|
'error': str(e)
|
||||||
|
}
|
||||||
|
|
||||||
|
async def pg_query(
|
||||||
|
self,
|
||||||
|
query: str,
|
||||||
|
params: Optional[List] = None,
|
||||||
|
name: Optional[str] = None
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Execute SELECT query and return results as data_ref.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
query: SQL SELECT query
|
||||||
|
params: Query parameters (positional, use $1, $2, etc.)
|
||||||
|
name: Optional name for result data_ref
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with data_ref for results or error
|
||||||
|
"""
|
||||||
|
if not PANDAS_AVAILABLE:
|
||||||
|
return {'error': 'pandas not available'}
|
||||||
|
|
||||||
|
try:
|
||||||
|
pool = await self._get_pool()
|
||||||
|
async with pool.acquire() as conn:
|
||||||
|
if params:
|
||||||
|
rows = await conn.fetch(query, *params)
|
||||||
|
else:
|
||||||
|
rows = await conn.fetch(query)
|
||||||
|
|
||||||
|
if not rows:
|
||||||
|
return {
|
||||||
|
'data_ref': None,
|
||||||
|
'rows': 0,
|
||||||
|
'message': 'Query returned no results'
|
||||||
|
}
|
||||||
|
|
||||||
|
# Convert to DataFrame
|
||||||
|
df = pd.DataFrame([dict(r) for r in rows])
|
||||||
|
|
||||||
|
# Check row limit
|
||||||
|
check = self.store.check_row_limit(len(df))
|
||||||
|
if check['exceeded']:
|
||||||
|
return {
|
||||||
|
'error': 'row_limit_exceeded',
|
||||||
|
**check,
|
||||||
|
'preview': df.head(100).to_dict(orient='records')
|
||||||
|
}
|
||||||
|
|
||||||
|
# Store result
|
||||||
|
data_ref = self.store.store(df, name=name, source=f"pg_query: {query[:100]}...")
|
||||||
|
return {
|
||||||
|
'data_ref': data_ref,
|
||||||
|
'rows': len(df),
|
||||||
|
'columns': list(df.columns)
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"pg_query failed: {e}")
|
||||||
|
return {'error': str(e)}
|
||||||
|
|
||||||
|
async def pg_execute(
|
||||||
|
self,
|
||||||
|
query: str,
|
||||||
|
params: Optional[List] = None
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Execute INSERT/UPDATE/DELETE query.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
query: SQL DML query
|
||||||
|
params: Query parameters
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with affected rows count
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
pool = await self._get_pool()
|
||||||
|
async with pool.acquire() as conn:
|
||||||
|
if params:
|
||||||
|
result = await conn.execute(query, *params)
|
||||||
|
else:
|
||||||
|
result = await conn.execute(query)
|
||||||
|
|
||||||
|
# Parse result (e.g., "INSERT 0 1" or "UPDATE 5")
|
||||||
|
parts = result.split()
|
||||||
|
affected = int(parts[-1]) if parts else 0
|
||||||
|
|
||||||
|
return {
|
||||||
|
'success': True,
|
||||||
|
'command': parts[0] if parts else 'UNKNOWN',
|
||||||
|
'affected_rows': affected
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"pg_execute failed: {e}")
|
||||||
|
return {'error': str(e)}
|
||||||
|
|
||||||
|
async def pg_tables(self, schema: str = 'public') -> Dict:
|
||||||
|
"""
|
||||||
|
List all tables in schema.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
schema: Schema name (default: public)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with list of tables
|
||||||
|
"""
|
||||||
|
query = """
|
||||||
|
SELECT
|
||||||
|
table_name,
|
||||||
|
table_type,
|
||||||
|
(SELECT count(*) FROM information_schema.columns c
|
||||||
|
WHERE c.table_schema = t.table_schema
|
||||||
|
AND c.table_name = t.table_name) as column_count
|
||||||
|
FROM information_schema.tables t
|
||||||
|
WHERE table_schema = $1
|
||||||
|
ORDER BY table_name
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
pool = await self._get_pool()
|
||||||
|
async with pool.acquire() as conn:
|
||||||
|
rows = await conn.fetch(query, schema)
|
||||||
|
tables = [
|
||||||
|
{
|
||||||
|
'name': r['table_name'],
|
||||||
|
'type': r['table_type'],
|
||||||
|
'columns': r['column_count']
|
||||||
|
}
|
||||||
|
for r in rows
|
||||||
|
]
|
||||||
|
return {
|
||||||
|
'schema': schema,
|
||||||
|
'count': len(tables),
|
||||||
|
'tables': tables
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"pg_tables failed: {e}")
|
||||||
|
return {'error': str(e)}
|
||||||
|
|
||||||
|
async def pg_columns(self, table: str, schema: str = 'public') -> Dict:
|
||||||
|
"""
|
||||||
|
Get column information for a table.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
table: Table name
|
||||||
|
schema: Schema name (default: public)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with column details
|
||||||
|
"""
|
||||||
|
query = """
|
||||||
|
SELECT
|
||||||
|
column_name,
|
||||||
|
data_type,
|
||||||
|
udt_name,
|
||||||
|
is_nullable,
|
||||||
|
column_default,
|
||||||
|
character_maximum_length,
|
||||||
|
numeric_precision
|
||||||
|
FROM information_schema.columns
|
||||||
|
WHERE table_schema = $1 AND table_name = $2
|
||||||
|
ORDER BY ordinal_position
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
pool = await self._get_pool()
|
||||||
|
async with pool.acquire() as conn:
|
||||||
|
rows = await conn.fetch(query, schema, table)
|
||||||
|
columns = [
|
||||||
|
{
|
||||||
|
'name': r['column_name'],
|
||||||
|
'type': r['data_type'],
|
||||||
|
'udt': r['udt_name'],
|
||||||
|
'nullable': r['is_nullable'] == 'YES',
|
||||||
|
'default': r['column_default'],
|
||||||
|
'max_length': r['character_maximum_length'],
|
||||||
|
'precision': r['numeric_precision']
|
||||||
|
}
|
||||||
|
for r in rows
|
||||||
|
]
|
||||||
|
return {
|
||||||
|
'table': f'{schema}.{table}',
|
||||||
|
'column_count': len(columns),
|
||||||
|
'columns': columns
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"pg_columns failed: {e}")
|
||||||
|
return {'error': str(e)}
|
||||||
|
|
||||||
|
async def pg_schemas(self) -> Dict:
|
||||||
|
"""
|
||||||
|
List all schemas in database.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with list of schemas
|
||||||
|
"""
|
||||||
|
query = """
|
||||||
|
SELECT schema_name
|
||||||
|
FROM information_schema.schemata
|
||||||
|
WHERE schema_name NOT IN ('pg_catalog', 'information_schema', 'pg_toast')
|
||||||
|
ORDER BY schema_name
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
pool = await self._get_pool()
|
||||||
|
async with pool.acquire() as conn:
|
||||||
|
rows = await conn.fetch(query)
|
||||||
|
schemas = [r['schema_name'] for r in rows]
|
||||||
|
return {
|
||||||
|
'count': len(schemas),
|
||||||
|
'schemas': schemas
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"pg_schemas failed: {e}")
|
||||||
|
return {'error': str(e)}
|
||||||
|
|
||||||
|
async def st_tables(self, schema: str = 'public') -> Dict:
|
||||||
|
"""
|
||||||
|
List PostGIS-enabled tables.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
schema: Schema name (default: public)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with list of tables with geometry columns
|
||||||
|
"""
|
||||||
|
query = """
|
||||||
|
SELECT
|
||||||
|
f_table_name as table_name,
|
||||||
|
f_geometry_column as geometry_column,
|
||||||
|
type as geometry_type,
|
||||||
|
srid,
|
||||||
|
coord_dimension
|
||||||
|
FROM geometry_columns
|
||||||
|
WHERE f_table_schema = $1
|
||||||
|
ORDER BY f_table_name
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
pool = await self._get_pool()
|
||||||
|
async with pool.acquire() as conn:
|
||||||
|
rows = await conn.fetch(query, schema)
|
||||||
|
tables = [
|
||||||
|
{
|
||||||
|
'table': r['table_name'],
|
||||||
|
'geometry_column': r['geometry_column'],
|
||||||
|
'geometry_type': r['geometry_type'],
|
||||||
|
'srid': r['srid'],
|
||||||
|
'dimensions': r['coord_dimension']
|
||||||
|
}
|
||||||
|
for r in rows
|
||||||
|
]
|
||||||
|
return {
|
||||||
|
'schema': schema,
|
||||||
|
'count': len(tables),
|
||||||
|
'postgis_tables': tables
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
if 'geometry_columns' in str(e):
|
||||||
|
return {
|
||||||
|
'error': 'PostGIS not installed or extension not enabled',
|
||||||
|
'suggestion': 'Run: CREATE EXTENSION IF NOT EXISTS postgis;'
|
||||||
|
}
|
||||||
|
logger.error(f"st_tables failed: {e}")
|
||||||
|
return {'error': str(e)}
|
||||||
|
|
||||||
|
async def st_geometry_type(self, table: str, column: str, schema: str = 'public') -> Dict:
|
||||||
|
"""
|
||||||
|
Get geometry type of a column.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
table: Table name
|
||||||
|
column: Geometry column name
|
||||||
|
schema: Schema name
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with geometry type information
|
||||||
|
"""
|
||||||
|
query = f"""
|
||||||
|
SELECT DISTINCT ST_GeometryType({column}) as geom_type
|
||||||
|
FROM {schema}.{table}
|
||||||
|
WHERE {column} IS NOT NULL
|
||||||
|
LIMIT 10
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
pool = await self._get_pool()
|
||||||
|
async with pool.acquire() as conn:
|
||||||
|
rows = await conn.fetch(query)
|
||||||
|
types = [r['geom_type'] for r in rows]
|
||||||
|
return {
|
||||||
|
'table': f'{schema}.{table}',
|
||||||
|
'column': column,
|
||||||
|
'geometry_types': types
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"st_geometry_type failed: {e}")
|
||||||
|
return {'error': str(e)}
|
||||||
|
|
||||||
|
async def st_srid(self, table: str, column: str, schema: str = 'public') -> Dict:
|
||||||
|
"""
|
||||||
|
Get SRID of geometry column.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
table: Table name
|
||||||
|
column: Geometry column name
|
||||||
|
schema: Schema name
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with SRID information
|
||||||
|
"""
|
||||||
|
query = f"""
|
||||||
|
SELECT DISTINCT ST_SRID({column}) as srid
|
||||||
|
FROM {schema}.{table}
|
||||||
|
WHERE {column} IS NOT NULL
|
||||||
|
LIMIT 1
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
pool = await self._get_pool()
|
||||||
|
async with pool.acquire() as conn:
|
||||||
|
row = await conn.fetchrow(query)
|
||||||
|
srid = row['srid'] if row else None
|
||||||
|
|
||||||
|
# Get SRID description
|
||||||
|
srid_info = None
|
||||||
|
if srid:
|
||||||
|
srid_query = """
|
||||||
|
SELECT srtext, proj4text
|
||||||
|
FROM spatial_ref_sys
|
||||||
|
WHERE srid = $1
|
||||||
|
"""
|
||||||
|
srid_row = await conn.fetchrow(srid_query, srid)
|
||||||
|
if srid_row:
|
||||||
|
srid_info = {
|
||||||
|
'description': srid_row['srtext'][:200] if srid_row['srtext'] else None,
|
||||||
|
'proj4': srid_row['proj4text']
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
'table': f'{schema}.{table}',
|
||||||
|
'column': column,
|
||||||
|
'srid': srid,
|
||||||
|
'info': srid_info
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"st_srid failed: {e}")
|
||||||
|
return {'error': str(e)}
|
||||||
|
|
||||||
|
async def st_extent(self, table: str, column: str, schema: str = 'public') -> Dict:
|
||||||
|
"""
|
||||||
|
Get bounding box of all geometries.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
table: Table name
|
||||||
|
column: Geometry column name
|
||||||
|
schema: Schema name
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with bounding box coordinates
|
||||||
|
"""
|
||||||
|
query = f"""
|
||||||
|
SELECT
|
||||||
|
ST_XMin(extent) as xmin,
|
||||||
|
ST_YMin(extent) as ymin,
|
||||||
|
ST_XMax(extent) as xmax,
|
||||||
|
ST_YMax(extent) as ymax
|
||||||
|
FROM (
|
||||||
|
SELECT ST_Extent({column}) as extent
|
||||||
|
FROM {schema}.{table}
|
||||||
|
) sub
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
pool = await self._get_pool()
|
||||||
|
async with pool.acquire() as conn:
|
||||||
|
row = await conn.fetchrow(query)
|
||||||
|
if row and row['xmin'] is not None:
|
||||||
|
return {
|
||||||
|
'table': f'{schema}.{table}',
|
||||||
|
'column': column,
|
||||||
|
'bbox': {
|
||||||
|
'xmin': float(row['xmin']),
|
||||||
|
'ymin': float(row['ymin']),
|
||||||
|
'xmax': float(row['xmax']),
|
||||||
|
'ymax': float(row['ymax'])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return {
|
||||||
|
'table': f'{schema}.{table}',
|
||||||
|
'column': column,
|
||||||
|
'bbox': None,
|
||||||
|
'message': 'No geometries found or all NULL'
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"st_extent failed: {e}")
|
||||||
|
return {'error': str(e)}
|
||||||
|
|
||||||
|
async def close(self):
|
||||||
|
"""Close connection pool"""
|
||||||
|
if self.pool:
|
||||||
|
await self.pool.close()
|
||||||
|
self.pool = None
|
||||||
|
|
||||||
|
|
||||||
|
def check_connection() -> None:
|
||||||
|
"""
|
||||||
|
Check PostgreSQL connection for SessionStart hook.
|
||||||
|
Prints warning to stderr if connection fails.
|
||||||
|
"""
|
||||||
|
import sys
|
||||||
|
|
||||||
|
config = load_config()
|
||||||
|
if not config.get('postgres_url'):
|
||||||
|
print(
|
||||||
|
"[data-platform] PostgreSQL not configured (POSTGRES_URL not set)",
|
||||||
|
file=sys.stderr
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
async def test():
|
||||||
|
try:
|
||||||
|
if not ASYNCPG_AVAILABLE:
|
||||||
|
print(
|
||||||
|
"[data-platform] asyncpg not installed - PostgreSQL tools unavailable",
|
||||||
|
file=sys.stderr
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
conn = await asyncpg.connect(config['postgres_url'], timeout=5)
|
||||||
|
await conn.close()
|
||||||
|
print("[data-platform] PostgreSQL connection OK", file=sys.stderr)
|
||||||
|
except Exception as e:
|
||||||
|
print(
|
||||||
|
f"[data-platform] PostgreSQL connection failed: {e}",
|
||||||
|
file=sys.stderr
|
||||||
|
)
|
||||||
|
|
||||||
|
asyncio.run(test())
|
||||||
795
mcp-servers/data-platform/mcp_server/server.py
Normal file
795
mcp-servers/data-platform/mcp_server/server.py
Normal file
@@ -0,0 +1,795 @@
|
|||||||
|
"""
|
||||||
|
MCP Server entry point for Data Platform integration.
|
||||||
|
|
||||||
|
Provides pandas, PostgreSQL/PostGIS, and dbt tools to Claude Code via JSON-RPC 2.0 over stdio.
|
||||||
|
"""
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
import json
|
||||||
|
from mcp.server import Server
|
||||||
|
from mcp.server.stdio import stdio_server
|
||||||
|
from mcp.types import Tool, TextContent
|
||||||
|
|
||||||
|
from .config import DataPlatformConfig
|
||||||
|
from .data_store import DataStore
|
||||||
|
from .pandas_tools import PandasTools
|
||||||
|
from .postgres_tools import PostgresTools
|
||||||
|
from .dbt_tools import DbtTools
|
||||||
|
|
||||||
|
# Suppress noisy MCP validation warnings on stderr
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
logging.getLogger("root").setLevel(logging.ERROR)
|
||||||
|
logging.getLogger("mcp").setLevel(logging.ERROR)
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class DataPlatformMCPServer:
|
||||||
|
"""MCP Server for data platform integration"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.server = Server("data-platform-mcp")
|
||||||
|
self.config = None
|
||||||
|
self.pandas_tools = None
|
||||||
|
self.postgres_tools = None
|
||||||
|
self.dbt_tools = None
|
||||||
|
|
||||||
|
async def initialize(self):
|
||||||
|
"""Initialize server and load configuration."""
|
||||||
|
try:
|
||||||
|
config_loader = DataPlatformConfig()
|
||||||
|
self.config = config_loader.load()
|
||||||
|
|
||||||
|
self.pandas_tools = PandasTools()
|
||||||
|
self.postgres_tools = PostgresTools()
|
||||||
|
self.dbt_tools = DbtTools()
|
||||||
|
|
||||||
|
# Log available capabilities
|
||||||
|
caps = []
|
||||||
|
caps.append("pandas")
|
||||||
|
if self.config.get('postgres_available'):
|
||||||
|
caps.append("PostgreSQL")
|
||||||
|
if self.config.get('dbt_available'):
|
||||||
|
caps.append("dbt")
|
||||||
|
|
||||||
|
logger.info(f"Data Platform MCP Server initialized with: {', '.join(caps)}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to initialize: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
def setup_tools(self):
|
||||||
|
"""Register all available tools with the MCP server"""
|
||||||
|
|
||||||
|
@self.server.list_tools()
|
||||||
|
async def list_tools() -> list[Tool]:
|
||||||
|
"""Return list of available tools"""
|
||||||
|
tools = [
|
||||||
|
# pandas tools - always available
|
||||||
|
Tool(
|
||||||
|
name="read_csv",
|
||||||
|
description="Load CSV file into DataFrame",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"file_path": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Path to CSV file"
|
||||||
|
},
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Optional name for data_ref"
|
||||||
|
},
|
||||||
|
"chunk_size": {
|
||||||
|
"type": "integer",
|
||||||
|
"description": "Process in chunks of this size"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["file_path"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="read_parquet",
|
||||||
|
description="Load Parquet file into DataFrame",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"file_path": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Path to Parquet file"
|
||||||
|
},
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Optional name for data_ref"
|
||||||
|
},
|
||||||
|
"columns": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"description": "Optional list of columns to load"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["file_path"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="read_json",
|
||||||
|
description="Load JSON/JSONL file into DataFrame",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"file_path": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Path to JSON file"
|
||||||
|
},
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Optional name for data_ref"
|
||||||
|
},
|
||||||
|
"lines": {
|
||||||
|
"type": "boolean",
|
||||||
|
"default": False,
|
||||||
|
"description": "Read as JSON Lines format"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["file_path"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="to_csv",
|
||||||
|
description="Export DataFrame to CSV file",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"data_ref": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Reference to stored DataFrame"
|
||||||
|
},
|
||||||
|
"file_path": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Output file path"
|
||||||
|
},
|
||||||
|
"index": {
|
||||||
|
"type": "boolean",
|
||||||
|
"default": False,
|
||||||
|
"description": "Include index column"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["data_ref", "file_path"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="to_parquet",
|
||||||
|
description="Export DataFrame to Parquet file",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"data_ref": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Reference to stored DataFrame"
|
||||||
|
},
|
||||||
|
"file_path": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Output file path"
|
||||||
|
},
|
||||||
|
"compression": {
|
||||||
|
"type": "string",
|
||||||
|
"default": "snappy",
|
||||||
|
"description": "Compression codec"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["data_ref", "file_path"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="describe",
|
||||||
|
description="Get statistical summary of DataFrame",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"data_ref": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Reference to stored DataFrame"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["data_ref"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="head",
|
||||||
|
description="Get first N rows of DataFrame",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"data_ref": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Reference to stored DataFrame"
|
||||||
|
},
|
||||||
|
"n": {
|
||||||
|
"type": "integer",
|
||||||
|
"default": 10,
|
||||||
|
"description": "Number of rows"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["data_ref"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="tail",
|
||||||
|
description="Get last N rows of DataFrame",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"data_ref": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Reference to stored DataFrame"
|
||||||
|
},
|
||||||
|
"n": {
|
||||||
|
"type": "integer",
|
||||||
|
"default": 10,
|
||||||
|
"description": "Number of rows"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["data_ref"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="filter",
|
||||||
|
description="Filter DataFrame rows by condition",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"data_ref": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Reference to stored DataFrame"
|
||||||
|
},
|
||||||
|
"condition": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "pandas query string (e.g., 'age > 30 and city == \"NYC\"')"
|
||||||
|
},
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Optional name for result data_ref"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["data_ref", "condition"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="select",
|
||||||
|
description="Select specific columns from DataFrame",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"data_ref": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Reference to stored DataFrame"
|
||||||
|
},
|
||||||
|
"columns": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"description": "List of column names to select"
|
||||||
|
},
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Optional name for result data_ref"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["data_ref", "columns"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="groupby",
|
||||||
|
description="Group DataFrame and aggregate",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"data_ref": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Reference to stored DataFrame"
|
||||||
|
},
|
||||||
|
"by": {
|
||||||
|
"oneOf": [
|
||||||
|
{"type": "string"},
|
||||||
|
{"type": "array", "items": {"type": "string"}}
|
||||||
|
],
|
||||||
|
"description": "Column(s) to group by"
|
||||||
|
},
|
||||||
|
"agg": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Aggregation dict (e.g., {\"sales\": \"sum\", \"count\": \"mean\"})"
|
||||||
|
},
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Optional name for result data_ref"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["data_ref", "by", "agg"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="join",
|
||||||
|
description="Join two DataFrames",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"left_ref": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Reference to left DataFrame"
|
||||||
|
},
|
||||||
|
"right_ref": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Reference to right DataFrame"
|
||||||
|
},
|
||||||
|
"on": {
|
||||||
|
"oneOf": [
|
||||||
|
{"type": "string"},
|
||||||
|
{"type": "array", "items": {"type": "string"}}
|
||||||
|
],
|
||||||
|
"description": "Column(s) to join on (if same name in both)"
|
||||||
|
},
|
||||||
|
"left_on": {
|
||||||
|
"oneOf": [
|
||||||
|
{"type": "string"},
|
||||||
|
{"type": "array", "items": {"type": "string"}}
|
||||||
|
],
|
||||||
|
"description": "Left join column(s)"
|
||||||
|
},
|
||||||
|
"right_on": {
|
||||||
|
"oneOf": [
|
||||||
|
{"type": "string"},
|
||||||
|
{"type": "array", "items": {"type": "string"}}
|
||||||
|
],
|
||||||
|
"description": "Right join column(s)"
|
||||||
|
},
|
||||||
|
"how": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["inner", "left", "right", "outer"],
|
||||||
|
"default": "inner",
|
||||||
|
"description": "Join type"
|
||||||
|
},
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Optional name for result data_ref"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["left_ref", "right_ref"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="list_data",
|
||||||
|
description="List all stored DataFrames",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {}
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="drop_data",
|
||||||
|
description="Remove a DataFrame from storage",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"data_ref": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Reference to drop"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["data_ref"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
# PostgreSQL tools
|
||||||
|
Tool(
|
||||||
|
name="pg_connect",
|
||||||
|
description="Test PostgreSQL connection and return status",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {}
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="pg_query",
|
||||||
|
description="Execute SELECT query and return results as data_ref",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"query": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "SQL SELECT query"
|
||||||
|
},
|
||||||
|
"params": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {},
|
||||||
|
"description": "Query parameters (use $1, $2, etc.)"
|
||||||
|
},
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Optional name for result data_ref"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["query"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="pg_execute",
|
||||||
|
description="Execute INSERT/UPDATE/DELETE query",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"query": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "SQL DML query"
|
||||||
|
},
|
||||||
|
"params": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {},
|
||||||
|
"description": "Query parameters"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["query"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="pg_tables",
|
||||||
|
description="List all tables in schema",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"schema": {
|
||||||
|
"type": "string",
|
||||||
|
"default": "public",
|
||||||
|
"description": "Schema name"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="pg_columns",
|
||||||
|
description="Get column information for a table",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"table": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Table name"
|
||||||
|
},
|
||||||
|
"schema": {
|
||||||
|
"type": "string",
|
||||||
|
"default": "public",
|
||||||
|
"description": "Schema name"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["table"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="pg_schemas",
|
||||||
|
description="List all schemas in database",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {}
|
||||||
|
}
|
||||||
|
),
|
||||||
|
# PostGIS tools
|
||||||
|
Tool(
|
||||||
|
name="st_tables",
|
||||||
|
description="List PostGIS-enabled tables",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"schema": {
|
||||||
|
"type": "string",
|
||||||
|
"default": "public",
|
||||||
|
"description": "Schema name"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="st_geometry_type",
|
||||||
|
description="Get geometry type of a column",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"table": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Table name"
|
||||||
|
},
|
||||||
|
"column": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Geometry column name"
|
||||||
|
},
|
||||||
|
"schema": {
|
||||||
|
"type": "string",
|
||||||
|
"default": "public",
|
||||||
|
"description": "Schema name"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["table", "column"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="st_srid",
|
||||||
|
description="Get SRID of geometry column",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"table": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Table name"
|
||||||
|
},
|
||||||
|
"column": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Geometry column name"
|
||||||
|
},
|
||||||
|
"schema": {
|
||||||
|
"type": "string",
|
||||||
|
"default": "public",
|
||||||
|
"description": "Schema name"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["table", "column"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="st_extent",
|
||||||
|
description="Get bounding box of all geometries",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"table": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Table name"
|
||||||
|
},
|
||||||
|
"column": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Geometry column name"
|
||||||
|
},
|
||||||
|
"schema": {
|
||||||
|
"type": "string",
|
||||||
|
"default": "public",
|
||||||
|
"description": "Schema name"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["table", "column"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
# dbt tools
|
||||||
|
Tool(
|
||||||
|
name="dbt_parse",
|
||||||
|
description="Validate dbt project (pre-flight check)",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {}
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="dbt_run",
|
||||||
|
description="Run dbt models with pre-validation",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"select": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Model selection (e.g., 'model_name', '+model_name', 'tag:daily')"
|
||||||
|
},
|
||||||
|
"exclude": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Models to exclude"
|
||||||
|
},
|
||||||
|
"full_refresh": {
|
||||||
|
"type": "boolean",
|
||||||
|
"default": False,
|
||||||
|
"description": "Rebuild incremental models"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="dbt_test",
|
||||||
|
description="Run dbt tests",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"select": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Test selection"
|
||||||
|
},
|
||||||
|
"exclude": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Tests to exclude"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="dbt_build",
|
||||||
|
description="Run dbt build (run + test) with pre-validation",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"select": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Model/test selection"
|
||||||
|
},
|
||||||
|
"exclude": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Resources to exclude"
|
||||||
|
},
|
||||||
|
"full_refresh": {
|
||||||
|
"type": "boolean",
|
||||||
|
"default": False,
|
||||||
|
"description": "Rebuild incremental models"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="dbt_compile",
|
||||||
|
description="Compile dbt models to SQL without executing",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"select": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Model selection"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="dbt_ls",
|
||||||
|
description="List dbt resources",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"select": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Resource selection"
|
||||||
|
},
|
||||||
|
"resource_type": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["model", "test", "seed", "snapshot", "source"],
|
||||||
|
"description": "Filter by type"
|
||||||
|
},
|
||||||
|
"output": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["name", "path", "json"],
|
||||||
|
"default": "name",
|
||||||
|
"description": "Output format"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="dbt_docs_generate",
|
||||||
|
description="Generate dbt documentation",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {}
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="dbt_lineage",
|
||||||
|
description="Get model dependencies and lineage",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"model": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Model name to analyze"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["model"]
|
||||||
|
}
|
||||||
|
)
|
||||||
|
]
|
||||||
|
return tools
|
||||||
|
|
||||||
|
@self.server.call_tool()
|
||||||
|
async def call_tool(name: str, arguments: dict) -> list[TextContent]:
|
||||||
|
"""Handle tool invocation."""
|
||||||
|
try:
|
||||||
|
# Route to appropriate tool handler
|
||||||
|
# pandas tools
|
||||||
|
if name == "read_csv":
|
||||||
|
result = await self.pandas_tools.read_csv(**arguments)
|
||||||
|
elif name == "read_parquet":
|
||||||
|
result = await self.pandas_tools.read_parquet(**arguments)
|
||||||
|
elif name == "read_json":
|
||||||
|
result = await self.pandas_tools.read_json(**arguments)
|
||||||
|
elif name == "to_csv":
|
||||||
|
result = await self.pandas_tools.to_csv(**arguments)
|
||||||
|
elif name == "to_parquet":
|
||||||
|
result = await self.pandas_tools.to_parquet(**arguments)
|
||||||
|
elif name == "describe":
|
||||||
|
result = await self.pandas_tools.describe(**arguments)
|
||||||
|
elif name == "head":
|
||||||
|
result = await self.pandas_tools.head(**arguments)
|
||||||
|
elif name == "tail":
|
||||||
|
result = await self.pandas_tools.tail(**arguments)
|
||||||
|
elif name == "filter":
|
||||||
|
result = await self.pandas_tools.filter(**arguments)
|
||||||
|
elif name == "select":
|
||||||
|
result = await self.pandas_tools.select(**arguments)
|
||||||
|
elif name == "groupby":
|
||||||
|
result = await self.pandas_tools.groupby(**arguments)
|
||||||
|
elif name == "join":
|
||||||
|
result = await self.pandas_tools.join(**arguments)
|
||||||
|
elif name == "list_data":
|
||||||
|
result = await self.pandas_tools.list_data()
|
||||||
|
elif name == "drop_data":
|
||||||
|
result = await self.pandas_tools.drop_data(**arguments)
|
||||||
|
# PostgreSQL tools
|
||||||
|
elif name == "pg_connect":
|
||||||
|
result = await self.postgres_tools.pg_connect()
|
||||||
|
elif name == "pg_query":
|
||||||
|
result = await self.postgres_tools.pg_query(**arguments)
|
||||||
|
elif name == "pg_execute":
|
||||||
|
result = await self.postgres_tools.pg_execute(**arguments)
|
||||||
|
elif name == "pg_tables":
|
||||||
|
result = await self.postgres_tools.pg_tables(**arguments)
|
||||||
|
elif name == "pg_columns":
|
||||||
|
result = await self.postgres_tools.pg_columns(**arguments)
|
||||||
|
elif name == "pg_schemas":
|
||||||
|
result = await self.postgres_tools.pg_schemas()
|
||||||
|
# PostGIS tools
|
||||||
|
elif name == "st_tables":
|
||||||
|
result = await self.postgres_tools.st_tables(**arguments)
|
||||||
|
elif name == "st_geometry_type":
|
||||||
|
result = await self.postgres_tools.st_geometry_type(**arguments)
|
||||||
|
elif name == "st_srid":
|
||||||
|
result = await self.postgres_tools.st_srid(**arguments)
|
||||||
|
elif name == "st_extent":
|
||||||
|
result = await self.postgres_tools.st_extent(**arguments)
|
||||||
|
# dbt tools
|
||||||
|
elif name == "dbt_parse":
|
||||||
|
result = await self.dbt_tools.dbt_parse()
|
||||||
|
elif name == "dbt_run":
|
||||||
|
result = await self.dbt_tools.dbt_run(**arguments)
|
||||||
|
elif name == "dbt_test":
|
||||||
|
result = await self.dbt_tools.dbt_test(**arguments)
|
||||||
|
elif name == "dbt_build":
|
||||||
|
result = await self.dbt_tools.dbt_build(**arguments)
|
||||||
|
elif name == "dbt_compile":
|
||||||
|
result = await self.dbt_tools.dbt_compile(**arguments)
|
||||||
|
elif name == "dbt_ls":
|
||||||
|
result = await self.dbt_tools.dbt_ls(**arguments)
|
||||||
|
elif name == "dbt_docs_generate":
|
||||||
|
result = await self.dbt_tools.dbt_docs_generate()
|
||||||
|
elif name == "dbt_lineage":
|
||||||
|
result = await self.dbt_tools.dbt_lineage(**arguments)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unknown tool: {name}")
|
||||||
|
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps(result, indent=2, default=str)
|
||||||
|
)]
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Tool {name} failed: {e}")
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps({"error": str(e)}, indent=2)
|
||||||
|
)]
|
||||||
|
|
||||||
|
async def run(self):
|
||||||
|
"""Run the MCP server"""
|
||||||
|
await self.initialize()
|
||||||
|
self.setup_tools()
|
||||||
|
|
||||||
|
async with stdio_server() as (read_stream, write_stream):
|
||||||
|
await self.server.run(
|
||||||
|
read_stream,
|
||||||
|
write_stream,
|
||||||
|
self.server.create_initialization_options()
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
"""Main entry point"""
|
||||||
|
server = DataPlatformMCPServer()
|
||||||
|
await server.run()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
asyncio.run(main())
|
||||||
49
mcp-servers/data-platform/pyproject.toml
Normal file
49
mcp-servers/data-platform/pyproject.toml
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0", "wheel"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "data-platform-mcp"
|
||||||
|
version = "1.0.0"
|
||||||
|
description = "MCP Server for data engineering with pandas, PostgreSQL/PostGIS, and dbt"
|
||||||
|
readme = "README.md"
|
||||||
|
license = {text = "MIT"}
|
||||||
|
requires-python = ">=3.10"
|
||||||
|
authors = [
|
||||||
|
{name = "Leo Miranda"}
|
||||||
|
]
|
||||||
|
classifiers = [
|
||||||
|
"Development Status :: 4 - Beta",
|
||||||
|
"Intended Audience :: Developers",
|
||||||
|
"License :: OSI Approved :: MIT License",
|
||||||
|
"Programming Language :: Python :: 3",
|
||||||
|
"Programming Language :: Python :: 3.10",
|
||||||
|
"Programming Language :: Python :: 3.11",
|
||||||
|
"Programming Language :: Python :: 3.12",
|
||||||
|
]
|
||||||
|
dependencies = [
|
||||||
|
"mcp>=0.9.0",
|
||||||
|
"pandas>=2.0.0",
|
||||||
|
"pyarrow>=14.0.0",
|
||||||
|
"asyncpg>=0.29.0",
|
||||||
|
"geoalchemy2>=0.14.0",
|
||||||
|
"shapely>=2.0.0",
|
||||||
|
"dbt-core>=1.9.0",
|
||||||
|
"dbt-postgres>=1.9.0",
|
||||||
|
"python-dotenv>=1.0.0",
|
||||||
|
"pydantic>=2.5.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[project.optional-dependencies]
|
||||||
|
dev = [
|
||||||
|
"pytest>=7.4.3",
|
||||||
|
"pytest-asyncio>=0.23.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["mcp_server*"]
|
||||||
|
|
||||||
|
[tool.pytest.ini_options]
|
||||||
|
asyncio_mode = "auto"
|
||||||
|
testpaths = ["tests"]
|
||||||
23
mcp-servers/data-platform/requirements.txt
Normal file
23
mcp-servers/data-platform/requirements.txt
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
# MCP SDK
|
||||||
|
mcp>=0.9.0
|
||||||
|
|
||||||
|
# Data Processing
|
||||||
|
pandas>=2.0.0
|
||||||
|
pyarrow>=14.0.0
|
||||||
|
|
||||||
|
# PostgreSQL/PostGIS
|
||||||
|
asyncpg>=0.29.0
|
||||||
|
geoalchemy2>=0.14.0
|
||||||
|
shapely>=2.0.0
|
||||||
|
|
||||||
|
# dbt
|
||||||
|
dbt-core>=1.9.0
|
||||||
|
dbt-postgres>=1.9.0
|
||||||
|
|
||||||
|
# Utilities
|
||||||
|
python-dotenv>=1.0.0
|
||||||
|
pydantic>=2.5.0
|
||||||
|
|
||||||
|
# Testing
|
||||||
|
pytest>=7.4.3
|
||||||
|
pytest-asyncio>=0.23.0
|
||||||
21
mcp-servers/data-platform/run.sh
Executable file
21
mcp-servers/data-platform/run.sh
Executable file
@@ -0,0 +1,21 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Capture original working directory before any cd operations
|
||||||
|
# This should be the user's project directory when launched by Claude Code
|
||||||
|
export CLAUDE_PROJECT_DIR="${CLAUDE_PROJECT_DIR:-$PWD}"
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
CACHE_VENV="$HOME/.cache/claude-mcp-venvs/leo-claude-mktplace/data-platform/.venv"
|
||||||
|
LOCAL_VENV="$SCRIPT_DIR/.venv"
|
||||||
|
|
||||||
|
if [[ -f "$CACHE_VENV/bin/python" ]]; then
|
||||||
|
PYTHON="$CACHE_VENV/bin/python"
|
||||||
|
elif [[ -f "$LOCAL_VENV/bin/python" ]]; then
|
||||||
|
PYTHON="$LOCAL_VENV/bin/python"
|
||||||
|
else
|
||||||
|
echo "ERROR: No venv found. Run: ./scripts/setup-venvs.sh" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd "$SCRIPT_DIR"
|
||||||
|
export PYTHONPATH="$SCRIPT_DIR"
|
||||||
|
exec "$PYTHON" -m mcp_server.server "$@"
|
||||||
3
mcp-servers/data-platform/tests/__init__.py
Normal file
3
mcp-servers/data-platform/tests/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""
|
||||||
|
Tests for Data Platform MCP Server.
|
||||||
|
"""
|
||||||
239
mcp-servers/data-platform/tests/test_config.py
Normal file
239
mcp-servers/data-platform/tests/test_config.py
Normal file
@@ -0,0 +1,239 @@
|
|||||||
|
"""
|
||||||
|
Unit tests for configuration loader.
|
||||||
|
"""
|
||||||
|
import pytest
|
||||||
|
from pathlib import Path
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
def test_load_system_config(tmp_path, monkeypatch):
|
||||||
|
"""Test loading system-level PostgreSQL configuration"""
|
||||||
|
# Import here to avoid import errors before setup
|
||||||
|
from mcp_server.config import DataPlatformConfig
|
||||||
|
|
||||||
|
# Mock home directory
|
||||||
|
config_dir = tmp_path / '.config' / 'claude'
|
||||||
|
config_dir.mkdir(parents=True)
|
||||||
|
|
||||||
|
config_file = config_dir / 'postgres.env'
|
||||||
|
config_file.write_text(
|
||||||
|
"POSTGRES_URL=postgresql://user:pass@localhost:5432/testdb\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
monkeypatch.setenv('HOME', str(tmp_path))
|
||||||
|
monkeypatch.chdir(tmp_path)
|
||||||
|
|
||||||
|
config = DataPlatformConfig()
|
||||||
|
result = config.load()
|
||||||
|
|
||||||
|
assert result['postgres_url'] == 'postgresql://user:pass@localhost:5432/testdb'
|
||||||
|
assert result['postgres_available'] is True
|
||||||
|
|
||||||
|
|
||||||
|
def test_postgres_optional(tmp_path, monkeypatch):
|
||||||
|
"""Test that PostgreSQL configuration is optional"""
|
||||||
|
from mcp_server.config import DataPlatformConfig
|
||||||
|
|
||||||
|
# No postgres.env file
|
||||||
|
monkeypatch.setenv('HOME', str(tmp_path))
|
||||||
|
monkeypatch.chdir(tmp_path)
|
||||||
|
|
||||||
|
# Clear any existing env vars
|
||||||
|
monkeypatch.delenv('POSTGRES_URL', raising=False)
|
||||||
|
|
||||||
|
config = DataPlatformConfig()
|
||||||
|
result = config.load()
|
||||||
|
|
||||||
|
assert result['postgres_url'] is None
|
||||||
|
assert result['postgres_available'] is False
|
||||||
|
|
||||||
|
|
||||||
|
def test_project_config_override(tmp_path, monkeypatch):
|
||||||
|
"""Test that project config overrides system config"""
|
||||||
|
from mcp_server.config import DataPlatformConfig
|
||||||
|
|
||||||
|
# Set up system config
|
||||||
|
system_config_dir = tmp_path / '.config' / 'claude'
|
||||||
|
system_config_dir.mkdir(parents=True)
|
||||||
|
|
||||||
|
system_config = system_config_dir / 'postgres.env'
|
||||||
|
system_config.write_text(
|
||||||
|
"POSTGRES_URL=postgresql://system:pass@localhost:5432/systemdb\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Set up project config
|
||||||
|
project_dir = tmp_path / 'project'
|
||||||
|
project_dir.mkdir()
|
||||||
|
|
||||||
|
project_config = project_dir / '.env'
|
||||||
|
project_config.write_text(
|
||||||
|
"POSTGRES_URL=postgresql://project:pass@localhost:5432/projectdb\n"
|
||||||
|
"DBT_PROJECT_DIR=/path/to/dbt\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
monkeypatch.setenv('HOME', str(tmp_path))
|
||||||
|
monkeypatch.chdir(project_dir)
|
||||||
|
|
||||||
|
config = DataPlatformConfig()
|
||||||
|
result = config.load()
|
||||||
|
|
||||||
|
# Project config should override
|
||||||
|
assert result['postgres_url'] == 'postgresql://project:pass@localhost:5432/projectdb'
|
||||||
|
assert result['dbt_project_dir'] == '/path/to/dbt'
|
||||||
|
|
||||||
|
|
||||||
|
def test_max_rows_config(tmp_path, monkeypatch):
|
||||||
|
"""Test max rows configuration"""
|
||||||
|
from mcp_server.config import DataPlatformConfig
|
||||||
|
|
||||||
|
project_dir = tmp_path / 'project'
|
||||||
|
project_dir.mkdir()
|
||||||
|
|
||||||
|
project_config = project_dir / '.env'
|
||||||
|
project_config.write_text("DATA_PLATFORM_MAX_ROWS=50000\n")
|
||||||
|
|
||||||
|
monkeypatch.setenv('HOME', str(tmp_path))
|
||||||
|
monkeypatch.chdir(project_dir)
|
||||||
|
|
||||||
|
config = DataPlatformConfig()
|
||||||
|
result = config.load()
|
||||||
|
|
||||||
|
assert result['max_rows'] == 50000
|
||||||
|
|
||||||
|
|
||||||
|
def test_default_max_rows(tmp_path, monkeypatch):
|
||||||
|
"""Test default max rows value"""
|
||||||
|
from mcp_server.config import DataPlatformConfig
|
||||||
|
|
||||||
|
monkeypatch.setenv('HOME', str(tmp_path))
|
||||||
|
monkeypatch.chdir(tmp_path)
|
||||||
|
|
||||||
|
# Clear any existing env vars
|
||||||
|
monkeypatch.delenv('DATA_PLATFORM_MAX_ROWS', raising=False)
|
||||||
|
|
||||||
|
config = DataPlatformConfig()
|
||||||
|
result = config.load()
|
||||||
|
|
||||||
|
assert result['max_rows'] == 100_000 # Default value
|
||||||
|
|
||||||
|
|
||||||
|
def test_dbt_auto_detection(tmp_path, monkeypatch):
|
||||||
|
"""Test automatic dbt project detection"""
|
||||||
|
from mcp_server.config import DataPlatformConfig
|
||||||
|
|
||||||
|
# Create project with dbt_project.yml
|
||||||
|
project_dir = tmp_path / 'project'
|
||||||
|
project_dir.mkdir()
|
||||||
|
(project_dir / 'dbt_project.yml').write_text("name: test_project\n")
|
||||||
|
|
||||||
|
monkeypatch.setenv('HOME', str(tmp_path))
|
||||||
|
monkeypatch.chdir(project_dir)
|
||||||
|
# Clear PWD and DBT_PROJECT_DIR to ensure auto-detection
|
||||||
|
monkeypatch.delenv('PWD', raising=False)
|
||||||
|
monkeypatch.delenv('DBT_PROJECT_DIR', raising=False)
|
||||||
|
monkeypatch.delenv('CLAUDE_PROJECT_DIR', raising=False)
|
||||||
|
|
||||||
|
config = DataPlatformConfig()
|
||||||
|
result = config.load()
|
||||||
|
|
||||||
|
assert result['dbt_project_dir'] == str(project_dir)
|
||||||
|
assert result['dbt_available'] is True
|
||||||
|
|
||||||
|
|
||||||
|
def test_dbt_subdirectory_detection(tmp_path, monkeypatch):
|
||||||
|
"""Test dbt project detection in subdirectory"""
|
||||||
|
from mcp_server.config import DataPlatformConfig
|
||||||
|
|
||||||
|
# Create project with dbt in subdirectory
|
||||||
|
project_dir = tmp_path / 'project'
|
||||||
|
project_dir.mkdir()
|
||||||
|
# Need a marker file for _find_project_directory to find the project
|
||||||
|
(project_dir / '.git').mkdir()
|
||||||
|
dbt_dir = project_dir / 'transform'
|
||||||
|
dbt_dir.mkdir()
|
||||||
|
(dbt_dir / 'dbt_project.yml').write_text("name: test_project\n")
|
||||||
|
|
||||||
|
monkeypatch.setenv('HOME', str(tmp_path))
|
||||||
|
monkeypatch.chdir(project_dir)
|
||||||
|
# Clear env vars to ensure auto-detection
|
||||||
|
monkeypatch.delenv('PWD', raising=False)
|
||||||
|
monkeypatch.delenv('DBT_PROJECT_DIR', raising=False)
|
||||||
|
monkeypatch.delenv('CLAUDE_PROJECT_DIR', raising=False)
|
||||||
|
|
||||||
|
config = DataPlatformConfig()
|
||||||
|
result = config.load()
|
||||||
|
|
||||||
|
assert result['dbt_project_dir'] == str(dbt_dir)
|
||||||
|
assert result['dbt_available'] is True
|
||||||
|
|
||||||
|
|
||||||
|
def test_no_dbt_project(tmp_path, monkeypatch):
|
||||||
|
"""Test when no dbt project exists"""
|
||||||
|
from mcp_server.config import DataPlatformConfig
|
||||||
|
|
||||||
|
project_dir = tmp_path / 'project'
|
||||||
|
project_dir.mkdir()
|
||||||
|
|
||||||
|
monkeypatch.setenv('HOME', str(tmp_path))
|
||||||
|
monkeypatch.chdir(project_dir)
|
||||||
|
|
||||||
|
# Clear any existing env vars
|
||||||
|
monkeypatch.delenv('DBT_PROJECT_DIR', raising=False)
|
||||||
|
|
||||||
|
config = DataPlatformConfig()
|
||||||
|
result = config.load()
|
||||||
|
|
||||||
|
assert result['dbt_project_dir'] is None
|
||||||
|
assert result['dbt_available'] is False
|
||||||
|
|
||||||
|
|
||||||
|
def test_find_project_directory_from_env(tmp_path, monkeypatch):
|
||||||
|
"""Test finding project directory from CLAUDE_PROJECT_DIR env var"""
|
||||||
|
from mcp_server.config import DataPlatformConfig
|
||||||
|
|
||||||
|
project_dir = tmp_path / 'my-project'
|
||||||
|
project_dir.mkdir()
|
||||||
|
(project_dir / '.git').mkdir()
|
||||||
|
|
||||||
|
monkeypatch.setenv('CLAUDE_PROJECT_DIR', str(project_dir))
|
||||||
|
|
||||||
|
config = DataPlatformConfig()
|
||||||
|
result = config._find_project_directory()
|
||||||
|
|
||||||
|
assert result == project_dir
|
||||||
|
|
||||||
|
|
||||||
|
def test_find_project_directory_from_cwd(tmp_path, monkeypatch):
|
||||||
|
"""Test finding project directory from cwd with .env file"""
|
||||||
|
from mcp_server.config import DataPlatformConfig
|
||||||
|
|
||||||
|
project_dir = tmp_path / 'project'
|
||||||
|
project_dir.mkdir()
|
||||||
|
(project_dir / '.env').write_text("TEST=value")
|
||||||
|
|
||||||
|
monkeypatch.chdir(project_dir)
|
||||||
|
monkeypatch.delenv('CLAUDE_PROJECT_DIR', raising=False)
|
||||||
|
monkeypatch.delenv('PWD', raising=False)
|
||||||
|
|
||||||
|
config = DataPlatformConfig()
|
||||||
|
result = config._find_project_directory()
|
||||||
|
|
||||||
|
assert result == project_dir
|
||||||
|
|
||||||
|
|
||||||
|
def test_find_project_directory_none_when_no_markers(tmp_path, monkeypatch):
|
||||||
|
"""Test returns None when no project markers found"""
|
||||||
|
from mcp_server.config import DataPlatformConfig
|
||||||
|
|
||||||
|
empty_dir = tmp_path / 'empty'
|
||||||
|
empty_dir.mkdir()
|
||||||
|
|
||||||
|
monkeypatch.chdir(empty_dir)
|
||||||
|
monkeypatch.delenv('CLAUDE_PROJECT_DIR', raising=False)
|
||||||
|
monkeypatch.delenv('PWD', raising=False)
|
||||||
|
monkeypatch.delenv('DBT_PROJECT_DIR', raising=False)
|
||||||
|
|
||||||
|
config = DataPlatformConfig()
|
||||||
|
result = config._find_project_directory()
|
||||||
|
|
||||||
|
assert result is None
|
||||||
240
mcp-servers/data-platform/tests/test_data_store.py
Normal file
240
mcp-servers/data-platform/tests/test_data_store.py
Normal file
@@ -0,0 +1,240 @@
|
|||||||
|
"""
|
||||||
|
Unit tests for Arrow IPC DataFrame registry.
|
||||||
|
"""
|
||||||
|
import pytest
|
||||||
|
import pandas as pd
|
||||||
|
import pyarrow as pa
|
||||||
|
|
||||||
|
|
||||||
|
def test_store_pandas_dataframe():
|
||||||
|
"""Test storing pandas DataFrame"""
|
||||||
|
from mcp_server.data_store import DataStore
|
||||||
|
|
||||||
|
# Create fresh instance for test
|
||||||
|
store = DataStore()
|
||||||
|
store._dataframes = {}
|
||||||
|
store._metadata = {}
|
||||||
|
|
||||||
|
df = pd.DataFrame({'a': [1, 2, 3], 'b': ['x', 'y', 'z']})
|
||||||
|
data_ref = store.store(df, name='test_df')
|
||||||
|
|
||||||
|
assert data_ref == 'test_df'
|
||||||
|
assert 'test_df' in store._dataframes
|
||||||
|
assert store._metadata['test_df'].rows == 3
|
||||||
|
assert store._metadata['test_df'].columns == 2
|
||||||
|
|
||||||
|
|
||||||
|
def test_store_arrow_table():
|
||||||
|
"""Test storing Arrow Table directly"""
|
||||||
|
from mcp_server.data_store import DataStore
|
||||||
|
|
||||||
|
store = DataStore()
|
||||||
|
store._dataframes = {}
|
||||||
|
store._metadata = {}
|
||||||
|
|
||||||
|
table = pa.table({'x': [1, 2, 3], 'y': [4, 5, 6]})
|
||||||
|
data_ref = store.store(table, name='arrow_test')
|
||||||
|
|
||||||
|
assert data_ref == 'arrow_test'
|
||||||
|
assert store._dataframes['arrow_test'].num_rows == 3
|
||||||
|
|
||||||
|
|
||||||
|
def test_store_auto_name():
|
||||||
|
"""Test auto-generated data_ref names"""
|
||||||
|
from mcp_server.data_store import DataStore
|
||||||
|
|
||||||
|
store = DataStore()
|
||||||
|
store._dataframes = {}
|
||||||
|
store._metadata = {}
|
||||||
|
|
||||||
|
df = pd.DataFrame({'a': [1, 2]})
|
||||||
|
data_ref = store.store(df)
|
||||||
|
|
||||||
|
assert data_ref.startswith('df_')
|
||||||
|
assert len(data_ref) == 11 # df_ + 8 hex chars
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_dataframe():
|
||||||
|
"""Test retrieving stored DataFrame"""
|
||||||
|
from mcp_server.data_store import DataStore
|
||||||
|
|
||||||
|
store = DataStore()
|
||||||
|
store._dataframes = {}
|
||||||
|
store._metadata = {}
|
||||||
|
|
||||||
|
df = pd.DataFrame({'a': [1, 2, 3]})
|
||||||
|
store.store(df, name='get_test')
|
||||||
|
|
||||||
|
result = store.get('get_test')
|
||||||
|
assert result is not None
|
||||||
|
assert result.num_rows == 3
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_pandas():
|
||||||
|
"""Test retrieving as pandas DataFrame"""
|
||||||
|
from mcp_server.data_store import DataStore
|
||||||
|
|
||||||
|
store = DataStore()
|
||||||
|
store._dataframes = {}
|
||||||
|
store._metadata = {}
|
||||||
|
|
||||||
|
df = pd.DataFrame({'a': [1, 2, 3], 'b': ['x', 'y', 'z']})
|
||||||
|
store.store(df, name='pandas_test')
|
||||||
|
|
||||||
|
result = store.get_pandas('pandas_test')
|
||||||
|
assert isinstance(result, pd.DataFrame)
|
||||||
|
assert list(result.columns) == ['a', 'b']
|
||||||
|
assert len(result) == 3
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_nonexistent():
|
||||||
|
"""Test getting nonexistent data_ref returns None"""
|
||||||
|
from mcp_server.data_store import DataStore
|
||||||
|
|
||||||
|
store = DataStore()
|
||||||
|
store._dataframes = {}
|
||||||
|
store._metadata = {}
|
||||||
|
|
||||||
|
assert store.get('nonexistent') is None
|
||||||
|
assert store.get_pandas('nonexistent') is None
|
||||||
|
|
||||||
|
|
||||||
|
def test_list_refs():
|
||||||
|
"""Test listing all stored DataFrames"""
|
||||||
|
from mcp_server.data_store import DataStore
|
||||||
|
|
||||||
|
store = DataStore()
|
||||||
|
store._dataframes = {}
|
||||||
|
store._metadata = {}
|
||||||
|
|
||||||
|
store.store(pd.DataFrame({'a': [1, 2]}), name='df1')
|
||||||
|
store.store(pd.DataFrame({'b': [3, 4, 5]}), name='df2')
|
||||||
|
|
||||||
|
refs = store.list_refs()
|
||||||
|
|
||||||
|
assert len(refs) == 2
|
||||||
|
ref_names = [r['ref'] for r in refs]
|
||||||
|
assert 'df1' in ref_names
|
||||||
|
assert 'df2' in ref_names
|
||||||
|
|
||||||
|
|
||||||
|
def test_drop_dataframe():
|
||||||
|
"""Test dropping a DataFrame"""
|
||||||
|
from mcp_server.data_store import DataStore
|
||||||
|
|
||||||
|
store = DataStore()
|
||||||
|
store._dataframes = {}
|
||||||
|
store._metadata = {}
|
||||||
|
|
||||||
|
store.store(pd.DataFrame({'a': [1]}), name='drop_test')
|
||||||
|
assert store.get('drop_test') is not None
|
||||||
|
|
||||||
|
result = store.drop('drop_test')
|
||||||
|
assert result is True
|
||||||
|
assert store.get('drop_test') is None
|
||||||
|
|
||||||
|
|
||||||
|
def test_drop_nonexistent():
|
||||||
|
"""Test dropping nonexistent data_ref"""
|
||||||
|
from mcp_server.data_store import DataStore
|
||||||
|
|
||||||
|
store = DataStore()
|
||||||
|
store._dataframes = {}
|
||||||
|
store._metadata = {}
|
||||||
|
|
||||||
|
result = store.drop('nonexistent')
|
||||||
|
assert result is False
|
||||||
|
|
||||||
|
|
||||||
|
def test_clear():
|
||||||
|
"""Test clearing all DataFrames"""
|
||||||
|
from mcp_server.data_store import DataStore
|
||||||
|
|
||||||
|
store = DataStore()
|
||||||
|
store._dataframes = {}
|
||||||
|
store._metadata = {}
|
||||||
|
|
||||||
|
store.store(pd.DataFrame({'a': [1]}), name='df1')
|
||||||
|
store.store(pd.DataFrame({'b': [2]}), name='df2')
|
||||||
|
|
||||||
|
store.clear()
|
||||||
|
|
||||||
|
assert len(store.list_refs()) == 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_info():
|
||||||
|
"""Test getting DataFrame metadata"""
|
||||||
|
from mcp_server.data_store import DataStore
|
||||||
|
|
||||||
|
store = DataStore()
|
||||||
|
store._dataframes = {}
|
||||||
|
store._metadata = {}
|
||||||
|
|
||||||
|
df = pd.DataFrame({'a': [1, 2, 3], 'b': ['x', 'y', 'z']})
|
||||||
|
store.store(df, name='info_test', source='test source')
|
||||||
|
|
||||||
|
info = store.get_info('info_test')
|
||||||
|
|
||||||
|
assert info.ref == 'info_test'
|
||||||
|
assert info.rows == 3
|
||||||
|
assert info.columns == 2
|
||||||
|
assert info.column_names == ['a', 'b']
|
||||||
|
assert info.source == 'test source'
|
||||||
|
assert info.memory_bytes > 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_total_memory():
|
||||||
|
"""Test total memory calculation"""
|
||||||
|
from mcp_server.data_store import DataStore
|
||||||
|
|
||||||
|
store = DataStore()
|
||||||
|
store._dataframes = {}
|
||||||
|
store._metadata = {}
|
||||||
|
|
||||||
|
store.store(pd.DataFrame({'a': range(100)}), name='df1')
|
||||||
|
store.store(pd.DataFrame({'b': range(200)}), name='df2')
|
||||||
|
|
||||||
|
total = store.total_memory_bytes()
|
||||||
|
assert total > 0
|
||||||
|
|
||||||
|
total_mb = store.total_memory_mb()
|
||||||
|
assert total_mb >= 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_check_row_limit():
|
||||||
|
"""Test row limit checking"""
|
||||||
|
from mcp_server.data_store import DataStore
|
||||||
|
|
||||||
|
store = DataStore()
|
||||||
|
store._max_rows = 100
|
||||||
|
|
||||||
|
# Under limit
|
||||||
|
result = store.check_row_limit(50)
|
||||||
|
assert result['exceeded'] is False
|
||||||
|
|
||||||
|
# Over limit
|
||||||
|
result = store.check_row_limit(150)
|
||||||
|
assert result['exceeded'] is True
|
||||||
|
assert 'suggestion' in result
|
||||||
|
|
||||||
|
|
||||||
|
def test_metadata_dtypes():
|
||||||
|
"""Test that dtypes are correctly recorded"""
|
||||||
|
from mcp_server.data_store import DataStore
|
||||||
|
|
||||||
|
store = DataStore()
|
||||||
|
store._dataframes = {}
|
||||||
|
store._metadata = {}
|
||||||
|
|
||||||
|
df = pd.DataFrame({
|
||||||
|
'int_col': [1, 2, 3],
|
||||||
|
'float_col': [1.1, 2.2, 3.3],
|
||||||
|
'str_col': ['a', 'b', 'c']
|
||||||
|
})
|
||||||
|
store.store(df, name='dtype_test')
|
||||||
|
|
||||||
|
info = store.get_info('dtype_test')
|
||||||
|
|
||||||
|
assert 'int_col' in info.dtypes
|
||||||
|
assert 'float_col' in info.dtypes
|
||||||
|
assert 'str_col' in info.dtypes
|
||||||
318
mcp-servers/data-platform/tests/test_dbt_tools.py
Normal file
318
mcp-servers/data-platform/tests/test_dbt_tools.py
Normal file
@@ -0,0 +1,318 @@
|
|||||||
|
"""
|
||||||
|
Unit tests for dbt MCP tools.
|
||||||
|
"""
|
||||||
|
import pytest
|
||||||
|
from unittest.mock import Mock, patch, MagicMock
|
||||||
|
import subprocess
|
||||||
|
import json
|
||||||
|
import tempfile
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def mock_config(tmp_path):
|
||||||
|
"""Mock configuration with dbt project"""
|
||||||
|
dbt_dir = tmp_path / 'dbt_project'
|
||||||
|
dbt_dir.mkdir()
|
||||||
|
(dbt_dir / 'dbt_project.yml').write_text('name: test_project\n')
|
||||||
|
|
||||||
|
return {
|
||||||
|
'dbt_project_dir': str(dbt_dir),
|
||||||
|
'dbt_profiles_dir': str(tmp_path / '.dbt')
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def dbt_tools(mock_config):
|
||||||
|
"""Create DbtTools instance with mocked config"""
|
||||||
|
with patch('mcp_server.dbt_tools.load_config', return_value=mock_config):
|
||||||
|
from mcp_server.dbt_tools import DbtTools
|
||||||
|
|
||||||
|
tools = DbtTools()
|
||||||
|
tools.project_dir = mock_config['dbt_project_dir']
|
||||||
|
tools.profiles_dir = mock_config['dbt_profiles_dir']
|
||||||
|
return tools
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_dbt_parse_success(dbt_tools):
|
||||||
|
"""Test successful dbt parse"""
|
||||||
|
mock_result = MagicMock()
|
||||||
|
mock_result.returncode = 0
|
||||||
|
mock_result.stdout = 'Parsed successfully'
|
||||||
|
mock_result.stderr = ''
|
||||||
|
|
||||||
|
with patch('subprocess.run', return_value=mock_result):
|
||||||
|
result = await dbt_tools.dbt_parse()
|
||||||
|
|
||||||
|
assert result['valid'] is True
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_dbt_parse_failure(dbt_tools):
|
||||||
|
"""Test dbt parse with errors"""
|
||||||
|
mock_result = MagicMock()
|
||||||
|
mock_result.returncode = 1
|
||||||
|
mock_result.stdout = ''
|
||||||
|
mock_result.stderr = 'Compilation error: deprecated syntax'
|
||||||
|
|
||||||
|
with patch('subprocess.run', return_value=mock_result):
|
||||||
|
result = await dbt_tools.dbt_parse()
|
||||||
|
|
||||||
|
assert result['valid'] is False
|
||||||
|
assert 'deprecated' in str(result.get('details', '')).lower() or len(result.get('errors', [])) > 0
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_dbt_run_with_prevalidation(dbt_tools):
|
||||||
|
"""Test dbt run includes pre-validation"""
|
||||||
|
# First call is parse, second is run
|
||||||
|
mock_parse = MagicMock()
|
||||||
|
mock_parse.returncode = 0
|
||||||
|
mock_parse.stdout = 'OK'
|
||||||
|
mock_parse.stderr = ''
|
||||||
|
|
||||||
|
mock_run = MagicMock()
|
||||||
|
mock_run.returncode = 0
|
||||||
|
mock_run.stdout = 'Completed successfully'
|
||||||
|
mock_run.stderr = ''
|
||||||
|
|
||||||
|
with patch('subprocess.run', side_effect=[mock_parse, mock_run]):
|
||||||
|
result = await dbt_tools.dbt_run()
|
||||||
|
|
||||||
|
assert result['success'] is True
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_dbt_run_fails_validation(dbt_tools):
|
||||||
|
"""Test dbt run fails if validation fails"""
|
||||||
|
mock_parse = MagicMock()
|
||||||
|
mock_parse.returncode = 1
|
||||||
|
mock_parse.stdout = ''
|
||||||
|
mock_parse.stderr = 'Parse error'
|
||||||
|
|
||||||
|
with patch('subprocess.run', return_value=mock_parse):
|
||||||
|
result = await dbt_tools.dbt_run()
|
||||||
|
|
||||||
|
assert 'error' in result
|
||||||
|
assert 'Pre-validation failed' in result['error']
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_dbt_run_with_selection(dbt_tools):
|
||||||
|
"""Test dbt run with model selection"""
|
||||||
|
mock_parse = MagicMock()
|
||||||
|
mock_parse.returncode = 0
|
||||||
|
mock_parse.stdout = 'OK'
|
||||||
|
mock_parse.stderr = ''
|
||||||
|
|
||||||
|
mock_run = MagicMock()
|
||||||
|
mock_run.returncode = 0
|
||||||
|
mock_run.stdout = 'Completed'
|
||||||
|
mock_run.stderr = ''
|
||||||
|
|
||||||
|
calls = []
|
||||||
|
|
||||||
|
def track_calls(*args, **kwargs):
|
||||||
|
calls.append(args[0] if args else kwargs.get('args', []))
|
||||||
|
if len(calls) == 1:
|
||||||
|
return mock_parse
|
||||||
|
return mock_run
|
||||||
|
|
||||||
|
with patch('subprocess.run', side_effect=track_calls):
|
||||||
|
result = await dbt_tools.dbt_run(select='dim_customers')
|
||||||
|
|
||||||
|
# Verify --select was passed
|
||||||
|
assert any('--select' in str(call) for call in calls)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_dbt_test(dbt_tools):
|
||||||
|
"""Test dbt test"""
|
||||||
|
mock_result = MagicMock()
|
||||||
|
mock_result.returncode = 0
|
||||||
|
mock_result.stdout = 'All tests passed'
|
||||||
|
mock_result.stderr = ''
|
||||||
|
|
||||||
|
with patch('subprocess.run', return_value=mock_result):
|
||||||
|
result = await dbt_tools.dbt_test()
|
||||||
|
|
||||||
|
assert result['success'] is True
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_dbt_build(dbt_tools):
|
||||||
|
"""Test dbt build with pre-validation"""
|
||||||
|
mock_parse = MagicMock()
|
||||||
|
mock_parse.returncode = 0
|
||||||
|
mock_parse.stdout = 'OK'
|
||||||
|
mock_parse.stderr = ''
|
||||||
|
|
||||||
|
mock_build = MagicMock()
|
||||||
|
mock_build.returncode = 0
|
||||||
|
mock_build.stdout = 'Build complete'
|
||||||
|
mock_build.stderr = ''
|
||||||
|
|
||||||
|
with patch('subprocess.run', side_effect=[mock_parse, mock_build]):
|
||||||
|
result = await dbt_tools.dbt_build()
|
||||||
|
|
||||||
|
assert result['success'] is True
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_dbt_compile(dbt_tools):
|
||||||
|
"""Test dbt compile"""
|
||||||
|
mock_result = MagicMock()
|
||||||
|
mock_result.returncode = 0
|
||||||
|
mock_result.stdout = 'Compiled'
|
||||||
|
mock_result.stderr = ''
|
||||||
|
|
||||||
|
with patch('subprocess.run', return_value=mock_result):
|
||||||
|
result = await dbt_tools.dbt_compile()
|
||||||
|
|
||||||
|
assert result['success'] is True
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_dbt_ls(dbt_tools):
|
||||||
|
"""Test dbt ls"""
|
||||||
|
mock_result = MagicMock()
|
||||||
|
mock_result.returncode = 0
|
||||||
|
mock_result.stdout = 'dim_customers\ndim_products\nfct_orders\n'
|
||||||
|
mock_result.stderr = ''
|
||||||
|
|
||||||
|
with patch('subprocess.run', return_value=mock_result):
|
||||||
|
result = await dbt_tools.dbt_ls()
|
||||||
|
|
||||||
|
assert result['success'] is True
|
||||||
|
assert result['count'] == 3
|
||||||
|
assert 'dim_customers' in result['resources']
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_dbt_docs_generate(dbt_tools, tmp_path):
|
||||||
|
"""Test dbt docs generate"""
|
||||||
|
mock_result = MagicMock()
|
||||||
|
mock_result.returncode = 0
|
||||||
|
mock_result.stdout = 'Done'
|
||||||
|
mock_result.stderr = ''
|
||||||
|
|
||||||
|
# Create fake target directory
|
||||||
|
target_dir = tmp_path / 'dbt_project' / 'target'
|
||||||
|
target_dir.mkdir(parents=True)
|
||||||
|
(target_dir / 'catalog.json').write_text('{}')
|
||||||
|
(target_dir / 'manifest.json').write_text('{}')
|
||||||
|
|
||||||
|
dbt_tools.project_dir = str(tmp_path / 'dbt_project')
|
||||||
|
|
||||||
|
with patch('subprocess.run', return_value=mock_result):
|
||||||
|
result = await dbt_tools.dbt_docs_generate()
|
||||||
|
|
||||||
|
assert result['success'] is True
|
||||||
|
assert result['catalog_generated'] is True
|
||||||
|
assert result['manifest_generated'] is True
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_dbt_lineage(dbt_tools, tmp_path):
|
||||||
|
"""Test dbt lineage"""
|
||||||
|
# Create manifest
|
||||||
|
target_dir = tmp_path / 'dbt_project' / 'target'
|
||||||
|
target_dir.mkdir(parents=True)
|
||||||
|
|
||||||
|
manifest = {
|
||||||
|
'nodes': {
|
||||||
|
'model.test.dim_customers': {
|
||||||
|
'name': 'dim_customers',
|
||||||
|
'resource_type': 'model',
|
||||||
|
'schema': 'public',
|
||||||
|
'database': 'testdb',
|
||||||
|
'description': 'Customer dimension',
|
||||||
|
'tags': ['daily'],
|
||||||
|
'config': {'materialized': 'table'},
|
||||||
|
'depends_on': {
|
||||||
|
'nodes': ['model.test.stg_customers']
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'model.test.stg_customers': {
|
||||||
|
'name': 'stg_customers',
|
||||||
|
'resource_type': 'model',
|
||||||
|
'depends_on': {'nodes': []}
|
||||||
|
},
|
||||||
|
'model.test.fct_orders': {
|
||||||
|
'name': 'fct_orders',
|
||||||
|
'resource_type': 'model',
|
||||||
|
'depends_on': {
|
||||||
|
'nodes': ['model.test.dim_customers']
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(target_dir / 'manifest.json').write_text(json.dumps(manifest))
|
||||||
|
|
||||||
|
dbt_tools.project_dir = str(tmp_path / 'dbt_project')
|
||||||
|
|
||||||
|
result = await dbt_tools.dbt_lineage('dim_customers')
|
||||||
|
|
||||||
|
assert result['model'] == 'dim_customers'
|
||||||
|
assert 'model.test.stg_customers' in result['upstream']
|
||||||
|
assert 'model.test.fct_orders' in result['downstream']
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_dbt_lineage_model_not_found(dbt_tools, tmp_path):
|
||||||
|
"""Test dbt lineage with nonexistent model"""
|
||||||
|
target_dir = tmp_path / 'dbt_project' / 'target'
|
||||||
|
target_dir.mkdir(parents=True)
|
||||||
|
|
||||||
|
manifest = {
|
||||||
|
'nodes': {
|
||||||
|
'model.test.dim_customers': {
|
||||||
|
'name': 'dim_customers',
|
||||||
|
'resource_type': 'model'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(target_dir / 'manifest.json').write_text(json.dumps(manifest))
|
||||||
|
|
||||||
|
dbt_tools.project_dir = str(tmp_path / 'dbt_project')
|
||||||
|
|
||||||
|
result = await dbt_tools.dbt_lineage('nonexistent_model')
|
||||||
|
|
||||||
|
assert 'error' in result
|
||||||
|
assert 'not found' in result['error'].lower()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_dbt_no_project():
|
||||||
|
"""Test dbt tools when no project configured"""
|
||||||
|
with patch('mcp_server.dbt_tools.load_config', return_value={'dbt_project_dir': None}):
|
||||||
|
from mcp_server.dbt_tools import DbtTools
|
||||||
|
|
||||||
|
tools = DbtTools()
|
||||||
|
tools.project_dir = None
|
||||||
|
|
||||||
|
result = await tools.dbt_run()
|
||||||
|
|
||||||
|
assert 'error' in result
|
||||||
|
assert 'not found' in result['error'].lower()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_dbt_timeout(dbt_tools):
|
||||||
|
"""Test dbt command timeout handling"""
|
||||||
|
with patch('subprocess.run', side_effect=subprocess.TimeoutExpired('dbt', 300)):
|
||||||
|
result = await dbt_tools.dbt_parse()
|
||||||
|
|
||||||
|
assert 'error' in result
|
||||||
|
assert 'timed out' in result['error'].lower()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_dbt_not_installed(dbt_tools):
|
||||||
|
"""Test handling when dbt is not installed"""
|
||||||
|
with patch('subprocess.run', side_effect=FileNotFoundError()):
|
||||||
|
result = await dbt_tools.dbt_parse()
|
||||||
|
|
||||||
|
assert 'error' in result
|
||||||
|
assert 'not found' in result['error'].lower()
|
||||||
301
mcp-servers/data-platform/tests/test_pandas_tools.py
Normal file
301
mcp-servers/data-platform/tests/test_pandas_tools.py
Normal file
@@ -0,0 +1,301 @@
|
|||||||
|
"""
|
||||||
|
Unit tests for pandas MCP tools.
|
||||||
|
"""
|
||||||
|
import pytest
|
||||||
|
import pandas as pd
|
||||||
|
import tempfile
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def temp_csv(tmp_path):
|
||||||
|
"""Create a temporary CSV file for testing"""
|
||||||
|
csv_path = tmp_path / 'test.csv'
|
||||||
|
df = pd.DataFrame({
|
||||||
|
'id': [1, 2, 3, 4, 5],
|
||||||
|
'name': ['Alice', 'Bob', 'Charlie', 'Diana', 'Eve'],
|
||||||
|
'value': [10.5, 20.0, 30.5, 40.0, 50.5]
|
||||||
|
})
|
||||||
|
df.to_csv(csv_path, index=False)
|
||||||
|
return str(csv_path)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def temp_parquet(tmp_path):
|
||||||
|
"""Create a temporary Parquet file for testing"""
|
||||||
|
parquet_path = tmp_path / 'test.parquet'
|
||||||
|
df = pd.DataFrame({
|
||||||
|
'id': [1, 2, 3],
|
||||||
|
'data': ['a', 'b', 'c']
|
||||||
|
})
|
||||||
|
df.to_parquet(parquet_path)
|
||||||
|
return str(parquet_path)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def temp_json(tmp_path):
|
||||||
|
"""Create a temporary JSON file for testing"""
|
||||||
|
json_path = tmp_path / 'test.json'
|
||||||
|
df = pd.DataFrame({
|
||||||
|
'x': [1, 2],
|
||||||
|
'y': [3, 4]
|
||||||
|
})
|
||||||
|
df.to_json(json_path, orient='records')
|
||||||
|
return str(json_path)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def pandas_tools():
|
||||||
|
"""Create PandasTools instance with fresh store"""
|
||||||
|
from mcp_server.pandas_tools import PandasTools
|
||||||
|
from mcp_server.data_store import DataStore
|
||||||
|
|
||||||
|
# Reset store for test isolation
|
||||||
|
store = DataStore.get_instance()
|
||||||
|
store._dataframes = {}
|
||||||
|
store._metadata = {}
|
||||||
|
|
||||||
|
return PandasTools()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_read_csv(pandas_tools, temp_csv):
|
||||||
|
"""Test reading CSV file"""
|
||||||
|
result = await pandas_tools.read_csv(temp_csv, name='csv_test')
|
||||||
|
|
||||||
|
assert 'data_ref' in result
|
||||||
|
assert result['data_ref'] == 'csv_test'
|
||||||
|
assert result['rows'] == 5
|
||||||
|
assert 'id' in result['columns']
|
||||||
|
assert 'name' in result['columns']
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_read_csv_nonexistent(pandas_tools):
|
||||||
|
"""Test reading nonexistent CSV file"""
|
||||||
|
result = await pandas_tools.read_csv('/nonexistent/path.csv')
|
||||||
|
|
||||||
|
assert 'error' in result
|
||||||
|
assert 'not found' in result['error'].lower()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_read_parquet(pandas_tools, temp_parquet):
|
||||||
|
"""Test reading Parquet file"""
|
||||||
|
result = await pandas_tools.read_parquet(temp_parquet, name='parquet_test')
|
||||||
|
|
||||||
|
assert 'data_ref' in result
|
||||||
|
assert result['rows'] == 3
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_read_json(pandas_tools, temp_json):
|
||||||
|
"""Test reading JSON file"""
|
||||||
|
result = await pandas_tools.read_json(temp_json, name='json_test')
|
||||||
|
|
||||||
|
assert 'data_ref' in result
|
||||||
|
assert result['rows'] == 2
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_to_csv(pandas_tools, temp_csv, tmp_path):
|
||||||
|
"""Test exporting to CSV"""
|
||||||
|
# First load some data
|
||||||
|
await pandas_tools.read_csv(temp_csv, name='export_test')
|
||||||
|
|
||||||
|
# Export to new file
|
||||||
|
output_path = str(tmp_path / 'output.csv')
|
||||||
|
result = await pandas_tools.to_csv('export_test', output_path)
|
||||||
|
|
||||||
|
assert result['success'] is True
|
||||||
|
assert os.path.exists(output_path)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_to_parquet(pandas_tools, temp_csv, tmp_path):
|
||||||
|
"""Test exporting to Parquet"""
|
||||||
|
await pandas_tools.read_csv(temp_csv, name='parquet_export')
|
||||||
|
|
||||||
|
output_path = str(tmp_path / 'output.parquet')
|
||||||
|
result = await pandas_tools.to_parquet('parquet_export', output_path)
|
||||||
|
|
||||||
|
assert result['success'] is True
|
||||||
|
assert os.path.exists(output_path)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_describe(pandas_tools, temp_csv):
|
||||||
|
"""Test describe statistics"""
|
||||||
|
await pandas_tools.read_csv(temp_csv, name='describe_test')
|
||||||
|
|
||||||
|
result = await pandas_tools.describe('describe_test')
|
||||||
|
|
||||||
|
assert 'data_ref' in result
|
||||||
|
assert 'shape' in result
|
||||||
|
assert result['shape']['rows'] == 5
|
||||||
|
assert 'statistics' in result
|
||||||
|
assert 'null_counts' in result
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_head(pandas_tools, temp_csv):
|
||||||
|
"""Test getting first N rows"""
|
||||||
|
await pandas_tools.read_csv(temp_csv, name='head_test')
|
||||||
|
|
||||||
|
result = await pandas_tools.head('head_test', n=3)
|
||||||
|
|
||||||
|
assert result['returned_rows'] == 3
|
||||||
|
assert len(result['data']) == 3
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_tail(pandas_tools, temp_csv):
|
||||||
|
"""Test getting last N rows"""
|
||||||
|
await pandas_tools.read_csv(temp_csv, name='tail_test')
|
||||||
|
|
||||||
|
result = await pandas_tools.tail('tail_test', n=2)
|
||||||
|
|
||||||
|
assert result['returned_rows'] == 2
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_filter(pandas_tools, temp_csv):
|
||||||
|
"""Test filtering rows"""
|
||||||
|
await pandas_tools.read_csv(temp_csv, name='filter_test')
|
||||||
|
|
||||||
|
result = await pandas_tools.filter('filter_test', 'value > 25')
|
||||||
|
|
||||||
|
assert 'data_ref' in result
|
||||||
|
assert result['rows'] == 3 # 30.5, 40.0, 50.5
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_filter_invalid_condition(pandas_tools, temp_csv):
|
||||||
|
"""Test filter with invalid condition"""
|
||||||
|
await pandas_tools.read_csv(temp_csv, name='filter_error')
|
||||||
|
|
||||||
|
result = await pandas_tools.filter('filter_error', 'invalid_column > 0')
|
||||||
|
|
||||||
|
assert 'error' in result
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_select(pandas_tools, temp_csv):
|
||||||
|
"""Test selecting columns"""
|
||||||
|
await pandas_tools.read_csv(temp_csv, name='select_test')
|
||||||
|
|
||||||
|
result = await pandas_tools.select('select_test', ['id', 'name'])
|
||||||
|
|
||||||
|
assert 'data_ref' in result
|
||||||
|
assert result['columns'] == ['id', 'name']
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_select_invalid_column(pandas_tools, temp_csv):
|
||||||
|
"""Test select with invalid column"""
|
||||||
|
await pandas_tools.read_csv(temp_csv, name='select_error')
|
||||||
|
|
||||||
|
result = await pandas_tools.select('select_error', ['id', 'nonexistent'])
|
||||||
|
|
||||||
|
assert 'error' in result
|
||||||
|
assert 'available_columns' in result
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_groupby(pandas_tools, tmp_path):
|
||||||
|
"""Test groupby aggregation"""
|
||||||
|
# Create test data with groups
|
||||||
|
csv_path = tmp_path / 'groupby.csv'
|
||||||
|
df = pd.DataFrame({
|
||||||
|
'category': ['A', 'A', 'B', 'B'],
|
||||||
|
'value': [10, 20, 30, 40]
|
||||||
|
})
|
||||||
|
df.to_csv(csv_path, index=False)
|
||||||
|
|
||||||
|
await pandas_tools.read_csv(str(csv_path), name='groupby_test')
|
||||||
|
|
||||||
|
result = await pandas_tools.groupby(
|
||||||
|
'groupby_test',
|
||||||
|
by='category',
|
||||||
|
agg={'value': 'sum'}
|
||||||
|
)
|
||||||
|
|
||||||
|
assert 'data_ref' in result
|
||||||
|
assert result['rows'] == 2 # Two groups: A, B
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_join(pandas_tools, tmp_path):
|
||||||
|
"""Test joining DataFrames"""
|
||||||
|
# Create left table
|
||||||
|
left_path = tmp_path / 'left.csv'
|
||||||
|
pd.DataFrame({
|
||||||
|
'id': [1, 2, 3],
|
||||||
|
'name': ['A', 'B', 'C']
|
||||||
|
}).to_csv(left_path, index=False)
|
||||||
|
|
||||||
|
# Create right table
|
||||||
|
right_path = tmp_path / 'right.csv'
|
||||||
|
pd.DataFrame({
|
||||||
|
'id': [1, 2, 4],
|
||||||
|
'value': [100, 200, 400]
|
||||||
|
}).to_csv(right_path, index=False)
|
||||||
|
|
||||||
|
await pandas_tools.read_csv(str(left_path), name='left')
|
||||||
|
await pandas_tools.read_csv(str(right_path), name='right')
|
||||||
|
|
||||||
|
result = await pandas_tools.join('left', 'right', on='id', how='inner')
|
||||||
|
|
||||||
|
assert 'data_ref' in result
|
||||||
|
assert result['rows'] == 2 # Only id 1 and 2 match
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_list_data(pandas_tools, temp_csv):
|
||||||
|
"""Test listing all DataFrames"""
|
||||||
|
await pandas_tools.read_csv(temp_csv, name='list_test1')
|
||||||
|
await pandas_tools.read_csv(temp_csv, name='list_test2')
|
||||||
|
|
||||||
|
result = await pandas_tools.list_data()
|
||||||
|
|
||||||
|
assert result['count'] == 2
|
||||||
|
refs = [df['ref'] for df in result['dataframes']]
|
||||||
|
assert 'list_test1' in refs
|
||||||
|
assert 'list_test2' in refs
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_drop_data(pandas_tools, temp_csv):
|
||||||
|
"""Test dropping DataFrame"""
|
||||||
|
await pandas_tools.read_csv(temp_csv, name='drop_test')
|
||||||
|
|
||||||
|
result = await pandas_tools.drop_data('drop_test')
|
||||||
|
|
||||||
|
assert result['success'] is True
|
||||||
|
|
||||||
|
# Verify it's gone
|
||||||
|
list_result = await pandas_tools.list_data()
|
||||||
|
refs = [df['ref'] for df in list_result['dataframes']]
|
||||||
|
assert 'drop_test' not in refs
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_drop_nonexistent(pandas_tools):
|
||||||
|
"""Test dropping nonexistent DataFrame"""
|
||||||
|
result = await pandas_tools.drop_data('nonexistent')
|
||||||
|
|
||||||
|
assert 'error' in result
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_operations_on_nonexistent(pandas_tools):
|
||||||
|
"""Test operations on nonexistent data_ref"""
|
||||||
|
result = await pandas_tools.describe('nonexistent')
|
||||||
|
assert 'error' in result
|
||||||
|
|
||||||
|
result = await pandas_tools.head('nonexistent')
|
||||||
|
assert 'error' in result
|
||||||
|
|
||||||
|
result = await pandas_tools.filter('nonexistent', 'x > 0')
|
||||||
|
assert 'error' in result
|
||||||
338
mcp-servers/data-platform/tests/test_postgres_tools.py
Normal file
338
mcp-servers/data-platform/tests/test_postgres_tools.py
Normal file
@@ -0,0 +1,338 @@
|
|||||||
|
"""
|
||||||
|
Unit tests for PostgreSQL MCP tools.
|
||||||
|
"""
|
||||||
|
import pytest
|
||||||
|
from unittest.mock import Mock, AsyncMock, patch, MagicMock
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def mock_config():
|
||||||
|
"""Mock configuration"""
|
||||||
|
return {
|
||||||
|
'postgres_url': 'postgresql://test:test@localhost:5432/testdb',
|
||||||
|
'max_rows': 100000
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def postgres_tools(mock_config):
|
||||||
|
"""Create PostgresTools instance with mocked config"""
|
||||||
|
with patch('mcp_server.postgres_tools.load_config', return_value=mock_config):
|
||||||
|
from mcp_server.postgres_tools import PostgresTools
|
||||||
|
from mcp_server.data_store import DataStore
|
||||||
|
|
||||||
|
# Reset store
|
||||||
|
store = DataStore.get_instance()
|
||||||
|
store._dataframes = {}
|
||||||
|
store._metadata = {}
|
||||||
|
|
||||||
|
tools = PostgresTools()
|
||||||
|
tools.config = mock_config
|
||||||
|
return tools
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_pg_connect_no_config():
|
||||||
|
"""Test pg_connect when no PostgreSQL configured"""
|
||||||
|
with patch('mcp_server.postgres_tools.load_config', return_value={'postgres_url': None}):
|
||||||
|
from mcp_server.postgres_tools import PostgresTools
|
||||||
|
|
||||||
|
tools = PostgresTools()
|
||||||
|
tools.config = {'postgres_url': None}
|
||||||
|
|
||||||
|
result = await tools.pg_connect()
|
||||||
|
|
||||||
|
assert result['connected'] is False
|
||||||
|
assert 'not configured' in result['error'].lower()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_pg_connect_success(postgres_tools):
|
||||||
|
"""Test successful pg_connect"""
|
||||||
|
mock_conn = AsyncMock()
|
||||||
|
mock_conn.fetchval = AsyncMock(side_effect=[
|
||||||
|
'PostgreSQL 15.1', # version
|
||||||
|
'testdb', # database name
|
||||||
|
'testuser', # user
|
||||||
|
None # PostGIS check fails
|
||||||
|
])
|
||||||
|
mock_conn.close = AsyncMock()
|
||||||
|
|
||||||
|
# Create proper async context manager
|
||||||
|
mock_cm = AsyncMock()
|
||||||
|
mock_cm.__aenter__ = AsyncMock(return_value=mock_conn)
|
||||||
|
mock_cm.__aexit__ = AsyncMock(return_value=None)
|
||||||
|
|
||||||
|
mock_pool = MagicMock()
|
||||||
|
mock_pool.acquire = MagicMock(return_value=mock_cm)
|
||||||
|
|
||||||
|
# Use AsyncMock for create_pool since it's awaited
|
||||||
|
with patch('asyncpg.create_pool', new=AsyncMock(return_value=mock_pool)):
|
||||||
|
postgres_tools.pool = None
|
||||||
|
result = await postgres_tools.pg_connect()
|
||||||
|
|
||||||
|
assert result['connected'] is True
|
||||||
|
assert result['database'] == 'testdb'
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_pg_query_success(postgres_tools):
|
||||||
|
"""Test successful pg_query"""
|
||||||
|
mock_rows = [
|
||||||
|
{'id': 1, 'name': 'Alice'},
|
||||||
|
{'id': 2, 'name': 'Bob'}
|
||||||
|
]
|
||||||
|
|
||||||
|
mock_conn = AsyncMock()
|
||||||
|
mock_conn.fetch = AsyncMock(return_value=mock_rows)
|
||||||
|
|
||||||
|
mock_pool = AsyncMock()
|
||||||
|
mock_pool.acquire = MagicMock(return_value=AsyncMock(
|
||||||
|
__aenter__=AsyncMock(return_value=mock_conn),
|
||||||
|
__aexit__=AsyncMock()
|
||||||
|
))
|
||||||
|
|
||||||
|
postgres_tools.pool = mock_pool
|
||||||
|
|
||||||
|
result = await postgres_tools.pg_query('SELECT * FROM users', name='users_data')
|
||||||
|
|
||||||
|
assert 'data_ref' in result
|
||||||
|
assert result['rows'] == 2
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_pg_query_empty_result(postgres_tools):
|
||||||
|
"""Test pg_query with no results"""
|
||||||
|
mock_conn = AsyncMock()
|
||||||
|
mock_conn.fetch = AsyncMock(return_value=[])
|
||||||
|
|
||||||
|
mock_pool = AsyncMock()
|
||||||
|
mock_pool.acquire = MagicMock(return_value=AsyncMock(
|
||||||
|
__aenter__=AsyncMock(return_value=mock_conn),
|
||||||
|
__aexit__=AsyncMock()
|
||||||
|
))
|
||||||
|
|
||||||
|
postgres_tools.pool = mock_pool
|
||||||
|
|
||||||
|
result = await postgres_tools.pg_query('SELECT * FROM empty_table')
|
||||||
|
|
||||||
|
assert result['data_ref'] is None
|
||||||
|
assert result['rows'] == 0
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_pg_execute_success(postgres_tools):
|
||||||
|
"""Test successful pg_execute"""
|
||||||
|
mock_conn = AsyncMock()
|
||||||
|
mock_conn.execute = AsyncMock(return_value='INSERT 0 3')
|
||||||
|
|
||||||
|
mock_pool = AsyncMock()
|
||||||
|
mock_pool.acquire = MagicMock(return_value=AsyncMock(
|
||||||
|
__aenter__=AsyncMock(return_value=mock_conn),
|
||||||
|
__aexit__=AsyncMock()
|
||||||
|
))
|
||||||
|
|
||||||
|
postgres_tools.pool = mock_pool
|
||||||
|
|
||||||
|
result = await postgres_tools.pg_execute('INSERT INTO users VALUES (1, 2, 3)')
|
||||||
|
|
||||||
|
assert result['success'] is True
|
||||||
|
assert result['affected_rows'] == 3
|
||||||
|
assert result['command'] == 'INSERT'
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_pg_tables(postgres_tools):
|
||||||
|
"""Test listing tables"""
|
||||||
|
mock_rows = [
|
||||||
|
{'table_name': 'users', 'table_type': 'BASE TABLE', 'column_count': 5},
|
||||||
|
{'table_name': 'orders', 'table_type': 'BASE TABLE', 'column_count': 8}
|
||||||
|
]
|
||||||
|
|
||||||
|
mock_conn = AsyncMock()
|
||||||
|
mock_conn.fetch = AsyncMock(return_value=mock_rows)
|
||||||
|
|
||||||
|
mock_pool = AsyncMock()
|
||||||
|
mock_pool.acquire = MagicMock(return_value=AsyncMock(
|
||||||
|
__aenter__=AsyncMock(return_value=mock_conn),
|
||||||
|
__aexit__=AsyncMock()
|
||||||
|
))
|
||||||
|
|
||||||
|
postgres_tools.pool = mock_pool
|
||||||
|
|
||||||
|
result = await postgres_tools.pg_tables(schema='public')
|
||||||
|
|
||||||
|
assert result['schema'] == 'public'
|
||||||
|
assert result['count'] == 2
|
||||||
|
assert len(result['tables']) == 2
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_pg_columns(postgres_tools):
|
||||||
|
"""Test getting column info"""
|
||||||
|
mock_rows = [
|
||||||
|
{
|
||||||
|
'column_name': 'id',
|
||||||
|
'data_type': 'integer',
|
||||||
|
'udt_name': 'int4',
|
||||||
|
'is_nullable': 'NO',
|
||||||
|
'column_default': "nextval('users_id_seq'::regclass)",
|
||||||
|
'character_maximum_length': None,
|
||||||
|
'numeric_precision': 32
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'column_name': 'name',
|
||||||
|
'data_type': 'character varying',
|
||||||
|
'udt_name': 'varchar',
|
||||||
|
'is_nullable': 'YES',
|
||||||
|
'column_default': None,
|
||||||
|
'character_maximum_length': 255,
|
||||||
|
'numeric_precision': None
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
mock_conn = AsyncMock()
|
||||||
|
mock_conn.fetch = AsyncMock(return_value=mock_rows)
|
||||||
|
|
||||||
|
mock_pool = AsyncMock()
|
||||||
|
mock_pool.acquire = MagicMock(return_value=AsyncMock(
|
||||||
|
__aenter__=AsyncMock(return_value=mock_conn),
|
||||||
|
__aexit__=AsyncMock()
|
||||||
|
))
|
||||||
|
|
||||||
|
postgres_tools.pool = mock_pool
|
||||||
|
|
||||||
|
result = await postgres_tools.pg_columns(table='users')
|
||||||
|
|
||||||
|
assert result['table'] == 'public.users'
|
||||||
|
assert result['column_count'] == 2
|
||||||
|
assert result['columns'][0]['name'] == 'id'
|
||||||
|
assert result['columns'][0]['nullable'] is False
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_pg_schemas(postgres_tools):
|
||||||
|
"""Test listing schemas"""
|
||||||
|
mock_rows = [
|
||||||
|
{'schema_name': 'public'},
|
||||||
|
{'schema_name': 'app'}
|
||||||
|
]
|
||||||
|
|
||||||
|
mock_conn = AsyncMock()
|
||||||
|
mock_conn.fetch = AsyncMock(return_value=mock_rows)
|
||||||
|
|
||||||
|
mock_pool = AsyncMock()
|
||||||
|
mock_pool.acquire = MagicMock(return_value=AsyncMock(
|
||||||
|
__aenter__=AsyncMock(return_value=mock_conn),
|
||||||
|
__aexit__=AsyncMock()
|
||||||
|
))
|
||||||
|
|
||||||
|
postgres_tools.pool = mock_pool
|
||||||
|
|
||||||
|
result = await postgres_tools.pg_schemas()
|
||||||
|
|
||||||
|
assert result['count'] == 2
|
||||||
|
assert 'public' in result['schemas']
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_st_tables(postgres_tools):
|
||||||
|
"""Test listing PostGIS tables"""
|
||||||
|
mock_rows = [
|
||||||
|
{
|
||||||
|
'table_name': 'locations',
|
||||||
|
'geometry_column': 'geom',
|
||||||
|
'geometry_type': 'POINT',
|
||||||
|
'srid': 4326,
|
||||||
|
'coord_dimension': 2
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
mock_conn = AsyncMock()
|
||||||
|
mock_conn.fetch = AsyncMock(return_value=mock_rows)
|
||||||
|
|
||||||
|
mock_pool = AsyncMock()
|
||||||
|
mock_pool.acquire = MagicMock(return_value=AsyncMock(
|
||||||
|
__aenter__=AsyncMock(return_value=mock_conn),
|
||||||
|
__aexit__=AsyncMock()
|
||||||
|
))
|
||||||
|
|
||||||
|
postgres_tools.pool = mock_pool
|
||||||
|
|
||||||
|
result = await postgres_tools.st_tables()
|
||||||
|
|
||||||
|
assert result['count'] == 1
|
||||||
|
assert result['postgis_tables'][0]['table'] == 'locations'
|
||||||
|
assert result['postgis_tables'][0]['srid'] == 4326
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_st_tables_no_postgis(postgres_tools):
|
||||||
|
"""Test st_tables when PostGIS not installed"""
|
||||||
|
mock_conn = AsyncMock()
|
||||||
|
mock_conn.fetch = AsyncMock(side_effect=Exception("relation \"geometry_columns\" does not exist"))
|
||||||
|
|
||||||
|
# Create proper async context manager
|
||||||
|
mock_cm = AsyncMock()
|
||||||
|
mock_cm.__aenter__ = AsyncMock(return_value=mock_conn)
|
||||||
|
mock_cm.__aexit__ = AsyncMock(return_value=None)
|
||||||
|
|
||||||
|
mock_pool = MagicMock()
|
||||||
|
mock_pool.acquire = MagicMock(return_value=mock_cm)
|
||||||
|
|
||||||
|
postgres_tools.pool = mock_pool
|
||||||
|
|
||||||
|
result = await postgres_tools.st_tables()
|
||||||
|
|
||||||
|
assert 'error' in result
|
||||||
|
assert 'PostGIS' in result['error']
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_st_extent(postgres_tools):
|
||||||
|
"""Test getting geometry bounding box"""
|
||||||
|
mock_row = {
|
||||||
|
'xmin': -122.5,
|
||||||
|
'ymin': 37.5,
|
||||||
|
'xmax': -122.0,
|
||||||
|
'ymax': 38.0
|
||||||
|
}
|
||||||
|
|
||||||
|
mock_conn = AsyncMock()
|
||||||
|
mock_conn.fetchrow = AsyncMock(return_value=mock_row)
|
||||||
|
|
||||||
|
mock_pool = AsyncMock()
|
||||||
|
mock_pool.acquire = MagicMock(return_value=AsyncMock(
|
||||||
|
__aenter__=AsyncMock(return_value=mock_conn),
|
||||||
|
__aexit__=AsyncMock()
|
||||||
|
))
|
||||||
|
|
||||||
|
postgres_tools.pool = mock_pool
|
||||||
|
|
||||||
|
result = await postgres_tools.st_extent(table='locations', column='geom')
|
||||||
|
|
||||||
|
assert result['bbox']['xmin'] == -122.5
|
||||||
|
assert result['bbox']['ymax'] == 38.0
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_error_handling(postgres_tools):
|
||||||
|
"""Test error handling for database errors"""
|
||||||
|
mock_conn = AsyncMock()
|
||||||
|
mock_conn.fetch = AsyncMock(side_effect=Exception("Connection refused"))
|
||||||
|
|
||||||
|
# Create proper async context manager
|
||||||
|
mock_cm = AsyncMock()
|
||||||
|
mock_cm.__aenter__ = AsyncMock(return_value=mock_conn)
|
||||||
|
mock_cm.__aexit__ = AsyncMock(return_value=None)
|
||||||
|
|
||||||
|
mock_pool = MagicMock()
|
||||||
|
mock_pool.acquire = MagicMock(return_value=mock_cm)
|
||||||
|
|
||||||
|
postgres_tools.pool = mock_pool
|
||||||
|
|
||||||
|
result = await postgres_tools.pg_query('SELECT 1')
|
||||||
|
|
||||||
|
assert 'error' in result
|
||||||
|
assert 'Connection refused' in result['error']
|
||||||
@@ -389,7 +389,7 @@ def list_issues(self, state='open', labels=None, repo=None):
|
|||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
MIT License - Part of the Claude Code Marketplace project.
|
MIT License - Part of the Leo Claude Marketplace project.
|
||||||
|
|
||||||
## Related Documentation
|
## Related Documentation
|
||||||
|
|
||||||
@@ -406,7 +406,7 @@ For issues or questions:
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
**Built for**: Claude Code Marketplace - Project Management Plugins
|
**Built for**: Leo Claude Marketplace - Project Management Plugins
|
||||||
**Phase**: 1 (Complete)
|
**Phase**: 1 (Complete)
|
||||||
**Status**: ✅ Production Ready
|
**Status**: ✅ Production Ready
|
||||||
**Last Updated**: 2025-01-06
|
**Last Updated**: 2025-01-06
|
||||||
227
mcp-servers/gitea/mcp_server/config.py
Normal file
227
mcp-servers/gitea/mcp_server/config.py
Normal file
@@ -0,0 +1,227 @@
|
|||||||
|
"""
|
||||||
|
Configuration loader for Gitea MCP Server.
|
||||||
|
|
||||||
|
Implements hybrid configuration system:
|
||||||
|
- System-level: ~/.config/claude/gitea.env (credentials)
|
||||||
|
- Project-level: .env (repository specification)
|
||||||
|
- Auto-detection: Falls back to git remote URL parsing
|
||||||
|
"""
|
||||||
|
from pathlib import Path
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
import logging
|
||||||
|
from typing import Dict, Optional
|
||||||
|
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class GiteaConfig:
|
||||||
|
"""Hybrid configuration loader with mode detection"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.api_url: Optional[str] = None
|
||||||
|
self.api_token: Optional[str] = None
|
||||||
|
self.repo: Optional[str] = None
|
||||||
|
self.mode: str = 'project'
|
||||||
|
|
||||||
|
def load(self) -> Dict[str, Optional[str]]:
|
||||||
|
"""
|
||||||
|
Load configuration from system and project levels.
|
||||||
|
Project-level configuration overrides system-level.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict containing api_url, api_token, repo, mode
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
FileNotFoundError: If system config is missing
|
||||||
|
ValueError: If required configuration is missing
|
||||||
|
"""
|
||||||
|
# Load system config
|
||||||
|
system_config = Path.home() / '.config' / 'claude' / 'gitea.env'
|
||||||
|
if system_config.exists():
|
||||||
|
load_dotenv(system_config)
|
||||||
|
logger.info(f"Loaded system configuration from {system_config}")
|
||||||
|
else:
|
||||||
|
raise FileNotFoundError(
|
||||||
|
f"System config not found: {system_config}\n"
|
||||||
|
"Create it with: mkdir -p ~/.config/claude && "
|
||||||
|
"cat > ~/.config/claude/gitea.env"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Find project directory (MCP server cwd is plugin dir, not project dir)
|
||||||
|
project_dir = self._find_project_directory()
|
||||||
|
|
||||||
|
# Load project config (overrides system)
|
||||||
|
if project_dir:
|
||||||
|
project_config = project_dir / '.env'
|
||||||
|
if project_config.exists():
|
||||||
|
load_dotenv(project_config, override=True)
|
||||||
|
logger.info(f"Loaded project configuration from {project_config}")
|
||||||
|
|
||||||
|
# Extract values
|
||||||
|
self.api_url = os.getenv('GITEA_API_URL')
|
||||||
|
self.api_token = os.getenv('GITEA_API_TOKEN')
|
||||||
|
self.repo = os.getenv('GITEA_REPO') # Optional, must be owner/repo format
|
||||||
|
|
||||||
|
# Auto-detect repo from git remote if not specified
|
||||||
|
if not self.repo and project_dir:
|
||||||
|
self.repo = self._detect_repo_from_git(project_dir)
|
||||||
|
if self.repo:
|
||||||
|
logger.info(f"Auto-detected repository from git remote: {self.repo}")
|
||||||
|
|
||||||
|
# Detect mode
|
||||||
|
if self.repo:
|
||||||
|
self.mode = 'project'
|
||||||
|
logger.info(f"Running in project mode: {self.repo}")
|
||||||
|
else:
|
||||||
|
self.mode = 'company'
|
||||||
|
logger.info("Running in company-wide mode (PMO)")
|
||||||
|
|
||||||
|
# Validate required variables
|
||||||
|
self._validate()
|
||||||
|
|
||||||
|
return {
|
||||||
|
'api_url': self.api_url,
|
||||||
|
'api_token': self.api_token,
|
||||||
|
'repo': self.repo,
|
||||||
|
'mode': self.mode
|
||||||
|
}
|
||||||
|
|
||||||
|
def _validate(self) -> None:
|
||||||
|
"""
|
||||||
|
Validate that required configuration is present.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If required configuration is missing
|
||||||
|
"""
|
||||||
|
required = {
|
||||||
|
'GITEA_API_URL': self.api_url,
|
||||||
|
'GITEA_API_TOKEN': self.api_token
|
||||||
|
}
|
||||||
|
|
||||||
|
missing = [key for key, value in required.items() if not value]
|
||||||
|
|
||||||
|
if missing:
|
||||||
|
raise ValueError(
|
||||||
|
f"Missing required configuration: {', '.join(missing)}\n"
|
||||||
|
"Check your ~/.config/claude/gitea.env file"
|
||||||
|
)
|
||||||
|
|
||||||
|
def _find_project_directory(self) -> Optional[Path]:
|
||||||
|
"""
|
||||||
|
Find the user's project directory.
|
||||||
|
|
||||||
|
The MCP server runs with cwd set to the plugin directory, not the
|
||||||
|
user's project. We need to find the actual project directory using
|
||||||
|
various heuristics.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Path to project directory, or None if not found
|
||||||
|
"""
|
||||||
|
# Strategy 1: Check CLAUDE_PROJECT_DIR environment variable
|
||||||
|
project_dir = os.getenv('CLAUDE_PROJECT_DIR')
|
||||||
|
if project_dir:
|
||||||
|
path = Path(project_dir)
|
||||||
|
if path.exists():
|
||||||
|
logger.info(f"Found project directory from CLAUDE_PROJECT_DIR: {path}")
|
||||||
|
return path
|
||||||
|
|
||||||
|
# Strategy 2: Check PWD (original working directory before cwd override)
|
||||||
|
pwd = os.getenv('PWD')
|
||||||
|
if pwd:
|
||||||
|
path = Path(pwd)
|
||||||
|
# Verify it has .git or .env (indicates a project)
|
||||||
|
if path.exists() and ((path / '.git').exists() or (path / '.env').exists()):
|
||||||
|
logger.info(f"Found project directory from PWD: {path}")
|
||||||
|
return path
|
||||||
|
|
||||||
|
# Strategy 3: Check current working directory
|
||||||
|
# This handles test scenarios and cases where cwd is actually the project
|
||||||
|
cwd = Path.cwd()
|
||||||
|
if (cwd / '.git').exists() or (cwd / '.env').exists():
|
||||||
|
logger.info(f"Found project directory from cwd: {cwd}")
|
||||||
|
return cwd
|
||||||
|
|
||||||
|
# Strategy 4: Check if GITEA_REPO is already set (user configured it)
|
||||||
|
# If so, we don't need to find the project directory for git detection
|
||||||
|
if os.getenv('GITEA_REPO'):
|
||||||
|
logger.debug("GITEA_REPO already set, skipping project directory detection")
|
||||||
|
return None
|
||||||
|
|
||||||
|
logger.debug("Could not determine project directory")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _detect_repo_from_git(self, project_dir: Optional[Path] = None) -> Optional[str]:
|
||||||
|
"""
|
||||||
|
Auto-detect repository from git remote origin URL.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
project_dir: Directory to run git command from (defaults to cwd)
|
||||||
|
|
||||||
|
Supports URL formats:
|
||||||
|
- SSH: ssh://git@host:port/owner/repo.git
|
||||||
|
- SSH short: git@host:owner/repo.git
|
||||||
|
- HTTPS: https://host/owner/repo.git
|
||||||
|
- HTTP: http://host/owner/repo.git
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Repository in 'owner/repo' format, or None if detection fails
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
result = subprocess.run(
|
||||||
|
['git', 'remote', 'get-url', 'origin'],
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
timeout=5,
|
||||||
|
cwd=str(project_dir) if project_dir else None
|
||||||
|
)
|
||||||
|
if result.returncode != 0:
|
||||||
|
logger.debug("No git remote 'origin' found")
|
||||||
|
return None
|
||||||
|
|
||||||
|
url = result.stdout.strip()
|
||||||
|
return self._parse_git_url(url)
|
||||||
|
|
||||||
|
except subprocess.TimeoutExpired:
|
||||||
|
logger.warning("Git command timed out")
|
||||||
|
return None
|
||||||
|
except FileNotFoundError:
|
||||||
|
logger.debug("Git not available")
|
||||||
|
return None
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug(f"Failed to detect repo from git: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _parse_git_url(self, url: str) -> Optional[str]:
|
||||||
|
"""
|
||||||
|
Parse git URL to extract owner/repo.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
url: Git remote URL
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Repository in 'owner/repo' format, or None if parsing fails
|
||||||
|
"""
|
||||||
|
# Remove .git suffix if present
|
||||||
|
url = re.sub(r'\.git$', '', url)
|
||||||
|
|
||||||
|
# SSH format: ssh://git@host:port/owner/repo
|
||||||
|
ssh_match = re.match(r'ssh://[^/]+/(.+/.+)$', url)
|
||||||
|
if ssh_match:
|
||||||
|
return ssh_match.group(1)
|
||||||
|
|
||||||
|
# SSH short format: git@host:owner/repo
|
||||||
|
ssh_short_match = re.match(r'git@[^:]+:(.+/.+)$', url)
|
||||||
|
if ssh_short_match:
|
||||||
|
return ssh_short_match.group(1)
|
||||||
|
|
||||||
|
# HTTPS/HTTP format: https://host/owner/repo
|
||||||
|
http_match = re.match(r'https?://[^/]+/(.+/.+)$', url)
|
||||||
|
if http_match:
|
||||||
|
return http_match.group(1)
|
||||||
|
|
||||||
|
logger.warning(f"Could not parse git URL: {url}")
|
||||||
|
return None
|
||||||
@@ -53,6 +53,7 @@ class GiteaClient:
|
|||||||
self,
|
self,
|
||||||
state: str = 'open',
|
state: str = 'open',
|
||||||
labels: Optional[List[str]] = None,
|
labels: Optional[List[str]] = None,
|
||||||
|
milestone: Optional[str] = None,
|
||||||
repo: Optional[str] = None
|
repo: Optional[str] = None
|
||||||
) -> List[Dict]:
|
) -> List[Dict]:
|
||||||
"""
|
"""
|
||||||
@@ -61,6 +62,7 @@ class GiteaClient:
|
|||||||
Args:
|
Args:
|
||||||
state: Issue state (open, closed, all)
|
state: Issue state (open, closed, all)
|
||||||
labels: Filter by labels
|
labels: Filter by labels
|
||||||
|
milestone: Filter by milestone title (exact match)
|
||||||
repo: Repository in 'owner/repo' format
|
repo: Repository in 'owner/repo' format
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
@@ -71,6 +73,8 @@ class GiteaClient:
|
|||||||
params = {'state': state}
|
params = {'state': state}
|
||||||
if labels:
|
if labels:
|
||||||
params['labels'] = ','.join(labels)
|
params['labels'] = ','.join(labels)
|
||||||
|
if milestone:
|
||||||
|
params['milestones'] = milestone
|
||||||
logger.info(f"Listing issues from {owner}/{target_repo} with state={state}")
|
logger.info(f"Listing issues from {owner}/{target_repo} with state={state}")
|
||||||
response = self.session.get(url, params=params)
|
response = self.session.get(url, params=params)
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
@@ -110,8 +114,14 @@ class GiteaClient:
|
|||||||
|
|
||||||
def _resolve_label_ids(self, label_names: List[str], owner: str, repo: str) -> List[int]:
|
def _resolve_label_ids(self, label_names: List[str], owner: str, repo: str) -> List[int]:
|
||||||
"""Convert label names to label IDs."""
|
"""Convert label names to label IDs."""
|
||||||
org_labels = self.get_org_labels(owner)
|
full_repo = f"{owner}/{repo}"
|
||||||
repo_labels = self.get_labels(f"{owner}/{repo}")
|
|
||||||
|
# Only fetch org labels if repo belongs to an organization
|
||||||
|
org_labels = []
|
||||||
|
if self.is_org_repo(full_repo):
|
||||||
|
org_labels = self.get_org_labels(owner)
|
||||||
|
|
||||||
|
repo_labels = self.get_labels(full_repo)
|
||||||
all_labels = org_labels + repo_labels
|
all_labels = org_labels + repo_labels
|
||||||
label_map = {label['name']: label['id'] for label in all_labels}
|
label_map = {label['name']: label['id'] for label in all_labels}
|
||||||
label_ids = []
|
label_ids = []
|
||||||
@@ -129,9 +139,24 @@ class GiteaClient:
|
|||||||
body: Optional[str] = None,
|
body: Optional[str] = None,
|
||||||
state: Optional[str] = None,
|
state: Optional[str] = None,
|
||||||
labels: Optional[List[str]] = None,
|
labels: Optional[List[str]] = None,
|
||||||
|
milestone: Optional[int] = None,
|
||||||
repo: Optional[str] = None
|
repo: Optional[str] = None
|
||||||
) -> Dict:
|
) -> Dict:
|
||||||
"""Update existing issue. Repo must be 'owner/repo' format."""
|
"""
|
||||||
|
Update existing issue.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
issue_number: Issue number to update
|
||||||
|
title: New title (optional)
|
||||||
|
body: New body (optional)
|
||||||
|
state: New state - 'open' or 'closed' (optional)
|
||||||
|
labels: New labels (optional)
|
||||||
|
milestone: Milestone ID to assign (optional)
|
||||||
|
repo: Repository in 'owner/repo' format
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Updated issue dictionary
|
||||||
|
"""
|
||||||
owner, target_repo = self._parse_repo(repo)
|
owner, target_repo = self._parse_repo(repo)
|
||||||
url = f"{self.base_url}/repos/{owner}/{target_repo}/issues/{issue_number}"
|
url = f"{self.base_url}/repos/{owner}/{target_repo}/issues/{issue_number}"
|
||||||
data = {}
|
data = {}
|
||||||
@@ -143,6 +168,8 @@ class GiteaClient:
|
|||||||
data['state'] = state
|
data['state'] = state
|
||||||
if labels is not None:
|
if labels is not None:
|
||||||
data['labels'] = labels
|
data['labels'] = labels
|
||||||
|
if milestone is not None:
|
||||||
|
data['milestone'] = milestone
|
||||||
logger.info(f"Updating issue #{issue_number} in {owner}/{target_repo}")
|
logger.info(f"Updating issue #{issue_number} in {owner}/{target_repo}")
|
||||||
response = self.session.patch(url, json=data)
|
response = self.session.patch(url, json=data)
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
@@ -233,8 +260,11 @@ class GiteaClient:
|
|||||||
repo: Optional[str] = None
|
repo: Optional[str] = None
|
||||||
) -> Dict:
|
) -> Dict:
|
||||||
"""Get a specific wiki page by name."""
|
"""Get a specific wiki page by name."""
|
||||||
|
from urllib.parse import quote
|
||||||
owner, target_repo = self._parse_repo(repo)
|
owner, target_repo = self._parse_repo(repo)
|
||||||
url = f"{self.base_url}/repos/{owner}/{target_repo}/wiki/page/{page_name}"
|
# URL-encode the page_name to handle special characters like ':'
|
||||||
|
encoded_page_name = quote(page_name, safe='')
|
||||||
|
url = f"{self.base_url}/repos/{owner}/{target_repo}/wiki/page/{encoded_page_name}"
|
||||||
logger.info(f"Getting wiki page '{page_name}' from {owner}/{target_repo}")
|
logger.info(f"Getting wiki page '{page_name}' from {owner}/{target_repo}")
|
||||||
response = self.session.get(url)
|
response = self.session.get(url)
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
@@ -265,9 +295,13 @@ class GiteaClient:
|
|||||||
repo: Optional[str] = None
|
repo: Optional[str] = None
|
||||||
) -> Dict:
|
) -> Dict:
|
||||||
"""Update an existing wiki page."""
|
"""Update an existing wiki page."""
|
||||||
|
from urllib.parse import quote
|
||||||
owner, target_repo = self._parse_repo(repo)
|
owner, target_repo = self._parse_repo(repo)
|
||||||
url = f"{self.base_url}/repos/{owner}/{target_repo}/wiki/page/{page_name}"
|
# URL-encode the page_name to handle special characters like ':'
|
||||||
|
encoded_page_name = quote(page_name, safe='')
|
||||||
|
url = f"{self.base_url}/repos/{owner}/{target_repo}/wiki/page/{encoded_page_name}"
|
||||||
data = {
|
data = {
|
||||||
|
'title': page_name, # CRITICAL: include title to preserve page name
|
||||||
'content_base64': self._encode_base64(content)
|
'content_base64': self._encode_base64(content)
|
||||||
}
|
}
|
||||||
logger.info(f"Updating wiki page '{page_name}' in {owner}/{target_repo}")
|
logger.info(f"Updating wiki page '{page_name}' in {owner}/{target_repo}")
|
||||||
@@ -281,8 +315,11 @@ class GiteaClient:
|
|||||||
repo: Optional[str] = None
|
repo: Optional[str] = None
|
||||||
) -> bool:
|
) -> bool:
|
||||||
"""Delete a wiki page."""
|
"""Delete a wiki page."""
|
||||||
|
from urllib.parse import quote
|
||||||
owner, target_repo = self._parse_repo(repo)
|
owner, target_repo = self._parse_repo(repo)
|
||||||
url = f"{self.base_url}/repos/{owner}/{target_repo}/wiki/page/{page_name}"
|
# URL-encode the page_name to handle special characters like ':'
|
||||||
|
encoded_page_name = quote(page_name, safe='')
|
||||||
|
url = f"{self.base_url}/repos/{owner}/{target_repo}/wiki/page/{encoded_page_name}"
|
||||||
logger.info(f"Deleting wiki page '{page_name}' from {owner}/{target_repo}")
|
logger.info(f"Deleting wiki page '{page_name}' from {owner}/{target_repo}")
|
||||||
response = self.session.delete(url)
|
response = self.session.delete(url)
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
@@ -548,10 +585,33 @@ class GiteaClient:
|
|||||||
return response.json()
|
return response.json()
|
||||||
|
|
||||||
def is_org_repo(self, repo: Optional[str] = None) -> bool:
|
def is_org_repo(self, repo: Optional[str] = None) -> bool:
|
||||||
"""Check if repository belongs to an organization (not a user)."""
|
"""
|
||||||
info = self.get_repo_info(repo)
|
Check if repository belongs to an organization (not a user).
|
||||||
owner_type = info.get('owner', {}).get('type', '')
|
|
||||||
return owner_type.lower() == 'organization'
|
Uses the /orgs/{owner} endpoint to reliably detect organizations,
|
||||||
|
as the owner.type field in repo info may be null in some Gitea versions.
|
||||||
|
"""
|
||||||
|
owner, _ = self._parse_repo(repo)
|
||||||
|
return self._is_organization(owner)
|
||||||
|
|
||||||
|
def _is_organization(self, owner: str) -> bool:
|
||||||
|
"""
|
||||||
|
Check if an owner is an organization by querying the orgs endpoint.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
owner: The owner name to check
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if owner is an organization, False if user or unknown
|
||||||
|
"""
|
||||||
|
url = f"{self.base_url}/orgs/{owner}"
|
||||||
|
try:
|
||||||
|
response = self.session.get(url)
|
||||||
|
# 200 = organization exists, 404 = not an organization (user account)
|
||||||
|
return response.status_code == 200
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to check if {owner} is organization: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
def get_branch_protection(
|
def get_branch_protection(
|
||||||
self,
|
self,
|
||||||
@@ -591,3 +651,199 @@ class GiteaClient:
|
|||||||
response = self.session.post(url, json=data)
|
response = self.session.post(url, json=data)
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
return response.json()
|
return response.json()
|
||||||
|
|
||||||
|
def create_org_label(
|
||||||
|
self,
|
||||||
|
org: str,
|
||||||
|
name: str,
|
||||||
|
color: str,
|
||||||
|
description: Optional[str] = None
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Create a new label at the organization level.
|
||||||
|
|
||||||
|
Organization labels are shared across all repositories in the org.
|
||||||
|
Use this for workflow labels (Type, Priority, Complexity, Effort, etc.)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
org: Organization name
|
||||||
|
name: Label name (e.g., 'Type/Bug', 'Priority/High')
|
||||||
|
color: Hex color code (with or without #)
|
||||||
|
description: Optional label description
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Created label dictionary
|
||||||
|
"""
|
||||||
|
url = f"{self.base_url}/orgs/{org}/labels"
|
||||||
|
data = {
|
||||||
|
'name': name,
|
||||||
|
'color': color.lstrip('#') # Remove # if present
|
||||||
|
}
|
||||||
|
if description:
|
||||||
|
data['description'] = description
|
||||||
|
logger.info(f"Creating organization label '{name}' in {org}")
|
||||||
|
response = self.session.post(url, json=data)
|
||||||
|
response.raise_for_status()
|
||||||
|
return response.json()
|
||||||
|
|
||||||
|
# ========================================
|
||||||
|
# PULL REQUEST OPERATIONS
|
||||||
|
# ========================================
|
||||||
|
|
||||||
|
def list_pull_requests(
|
||||||
|
self,
|
||||||
|
state: str = 'open',
|
||||||
|
sort: str = 'recentupdate',
|
||||||
|
labels: Optional[List[str]] = None,
|
||||||
|
repo: Optional[str] = None
|
||||||
|
) -> List[Dict]:
|
||||||
|
"""
|
||||||
|
List pull requests from Gitea repository.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
state: PR state (open, closed, all)
|
||||||
|
sort: Sort order (oldest, recentupdate, leastupdate, mostcomment, leastcomment, priority)
|
||||||
|
labels: Filter by labels
|
||||||
|
repo: Repository in 'owner/repo' format
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of pull request dictionaries
|
||||||
|
"""
|
||||||
|
owner, target_repo = self._parse_repo(repo)
|
||||||
|
url = f"{self.base_url}/repos/{owner}/{target_repo}/pulls"
|
||||||
|
params = {'state': state, 'sort': sort}
|
||||||
|
if labels:
|
||||||
|
params['labels'] = ','.join(labels)
|
||||||
|
logger.info(f"Listing PRs from {owner}/{target_repo} with state={state}")
|
||||||
|
response = self.session.get(url, params=params)
|
||||||
|
response.raise_for_status()
|
||||||
|
return response.json()
|
||||||
|
|
||||||
|
def get_pull_request(
|
||||||
|
self,
|
||||||
|
pr_number: int,
|
||||||
|
repo: Optional[str] = None
|
||||||
|
) -> Dict:
|
||||||
|
"""Get specific pull request details."""
|
||||||
|
owner, target_repo = self._parse_repo(repo)
|
||||||
|
url = f"{self.base_url}/repos/{owner}/{target_repo}/pulls/{pr_number}"
|
||||||
|
logger.info(f"Getting PR #{pr_number} from {owner}/{target_repo}")
|
||||||
|
response = self.session.get(url)
|
||||||
|
response.raise_for_status()
|
||||||
|
return response.json()
|
||||||
|
|
||||||
|
def get_pr_diff(
|
||||||
|
self,
|
||||||
|
pr_number: int,
|
||||||
|
repo: Optional[str] = None
|
||||||
|
) -> str:
|
||||||
|
"""Get the diff for a pull request."""
|
||||||
|
owner, target_repo = self._parse_repo(repo)
|
||||||
|
url = f"{self.base_url}/repos/{owner}/{target_repo}/pulls/{pr_number}.diff"
|
||||||
|
logger.info(f"Getting diff for PR #{pr_number} from {owner}/{target_repo}")
|
||||||
|
response = self.session.get(url)
|
||||||
|
response.raise_for_status()
|
||||||
|
return response.text
|
||||||
|
|
||||||
|
def get_pr_comments(
|
||||||
|
self,
|
||||||
|
pr_number: int,
|
||||||
|
repo: Optional[str] = None
|
||||||
|
) -> List[Dict]:
|
||||||
|
"""Get comments on a pull request (uses issue comments endpoint)."""
|
||||||
|
owner, target_repo = self._parse_repo(repo)
|
||||||
|
# PRs share comment endpoint with issues in Gitea
|
||||||
|
url = f"{self.base_url}/repos/{owner}/{target_repo}/issues/{pr_number}/comments"
|
||||||
|
logger.info(f"Getting comments for PR #{pr_number} from {owner}/{target_repo}")
|
||||||
|
response = self.session.get(url)
|
||||||
|
response.raise_for_status()
|
||||||
|
return response.json()
|
||||||
|
|
||||||
|
def create_pr_review(
|
||||||
|
self,
|
||||||
|
pr_number: int,
|
||||||
|
body: str,
|
||||||
|
event: str = 'COMMENT',
|
||||||
|
comments: Optional[List[Dict]] = None,
|
||||||
|
repo: Optional[str] = None
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Create a review on a pull request.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pr_number: Pull request number
|
||||||
|
body: Review body/summary
|
||||||
|
event: Review action (APPROVE, REQUEST_CHANGES, COMMENT)
|
||||||
|
comments: Optional list of inline comments with path, position, body
|
||||||
|
repo: Repository in 'owner/repo' format
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Created review dictionary
|
||||||
|
"""
|
||||||
|
owner, target_repo = self._parse_repo(repo)
|
||||||
|
url = f"{self.base_url}/repos/{owner}/{target_repo}/pulls/{pr_number}/reviews"
|
||||||
|
data = {
|
||||||
|
'body': body,
|
||||||
|
'event': event
|
||||||
|
}
|
||||||
|
if comments:
|
||||||
|
data['comments'] = comments
|
||||||
|
logger.info(f"Creating review on PR #{pr_number} in {owner}/{target_repo}")
|
||||||
|
response = self.session.post(url, json=data)
|
||||||
|
response.raise_for_status()
|
||||||
|
return response.json()
|
||||||
|
|
||||||
|
def add_pr_comment(
|
||||||
|
self,
|
||||||
|
pr_number: int,
|
||||||
|
body: str,
|
||||||
|
repo: Optional[str] = None
|
||||||
|
) -> Dict:
|
||||||
|
"""Add a general comment to a pull request (uses issue comment endpoint)."""
|
||||||
|
owner, target_repo = self._parse_repo(repo)
|
||||||
|
# PRs share comment endpoint with issues in Gitea
|
||||||
|
url = f"{self.base_url}/repos/{owner}/{target_repo}/issues/{pr_number}/comments"
|
||||||
|
data = {'body': body}
|
||||||
|
logger.info(f"Adding comment to PR #{pr_number} in {owner}/{target_repo}")
|
||||||
|
response = self.session.post(url, json=data)
|
||||||
|
response.raise_for_status()
|
||||||
|
return response.json()
|
||||||
|
|
||||||
|
def create_pull_request(
|
||||||
|
self,
|
||||||
|
title: str,
|
||||||
|
body: str,
|
||||||
|
head: str,
|
||||||
|
base: str,
|
||||||
|
labels: Optional[List[str]] = None,
|
||||||
|
repo: Optional[str] = None
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Create a new pull request.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
title: PR title
|
||||||
|
body: PR description/body
|
||||||
|
head: Source branch name (the branch with changes)
|
||||||
|
base: Target branch name (the branch to merge into)
|
||||||
|
labels: Optional list of label names
|
||||||
|
repo: Repository in 'owner/repo' format
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Created pull request dictionary
|
||||||
|
"""
|
||||||
|
owner, target_repo = self._parse_repo(repo)
|
||||||
|
url = f"{self.base_url}/repos/{owner}/{target_repo}/pulls"
|
||||||
|
data = {
|
||||||
|
'title': title,
|
||||||
|
'body': body,
|
||||||
|
'head': head,
|
||||||
|
'base': base
|
||||||
|
}
|
||||||
|
if labels:
|
||||||
|
label_ids = self._resolve_label_ids(labels, owner, target_repo)
|
||||||
|
data['labels'] = label_ids
|
||||||
|
logger.info(f"Creating PR '{title}' in {owner}/{target_repo}: {head} -> {base}")
|
||||||
|
response = self.session.post(url, json=data)
|
||||||
|
response.raise_for_status()
|
||||||
|
return response.json()
|
||||||
@@ -17,6 +17,7 @@ from .tools.labels import LabelTools
|
|||||||
from .tools.wiki import WikiTools
|
from .tools.wiki import WikiTools
|
||||||
from .tools.milestones import MilestoneTools
|
from .tools.milestones import MilestoneTools
|
||||||
from .tools.dependencies import DependencyTools
|
from .tools.dependencies import DependencyTools
|
||||||
|
from .tools.pull_requests import PullRequestTools
|
||||||
|
|
||||||
# Suppress noisy MCP validation warnings on stderr
|
# Suppress noisy MCP validation warnings on stderr
|
||||||
logging.basicConfig(level=logging.INFO)
|
logging.basicConfig(level=logging.INFO)
|
||||||
@@ -25,6 +26,44 @@ logging.getLogger("mcp").setLevel(logging.ERROR)
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def _coerce_types(arguments: dict) -> dict:
|
||||||
|
"""
|
||||||
|
Coerce argument types to handle MCP serialization quirks.
|
||||||
|
|
||||||
|
MCP sometimes passes integers as strings and arrays as JSON strings.
|
||||||
|
This function normalizes them to the expected Python types.
|
||||||
|
"""
|
||||||
|
coerced = {}
|
||||||
|
for key, value in arguments.items():
|
||||||
|
if value is None:
|
||||||
|
coerced[key] = value
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Coerce integer fields
|
||||||
|
int_fields = {'issue_number', 'milestone_id', 'pr_number', 'depends_on', 'milestone', 'limit'}
|
||||||
|
if key in int_fields and isinstance(value, str):
|
||||||
|
try:
|
||||||
|
coerced[key] = int(value)
|
||||||
|
continue
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Coerce array fields that might be JSON strings
|
||||||
|
array_fields = {'labels', 'tags', 'issue_numbers', 'comments'}
|
||||||
|
if key in array_fields and isinstance(value, str):
|
||||||
|
try:
|
||||||
|
parsed = json.loads(value)
|
||||||
|
if isinstance(parsed, list):
|
||||||
|
coerced[key] = parsed
|
||||||
|
continue
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
coerced[key] = value
|
||||||
|
|
||||||
|
return coerced
|
||||||
|
|
||||||
|
|
||||||
class GiteaMCPServer:
|
class GiteaMCPServer:
|
||||||
"""MCP Server for Gitea integration"""
|
"""MCP Server for Gitea integration"""
|
||||||
|
|
||||||
@@ -37,6 +76,7 @@ class GiteaMCPServer:
|
|||||||
self.wiki_tools = None
|
self.wiki_tools = None
|
||||||
self.milestone_tools = None
|
self.milestone_tools = None
|
||||||
self.dependency_tools = None
|
self.dependency_tools = None
|
||||||
|
self.pr_tools = None
|
||||||
|
|
||||||
async def initialize(self):
|
async def initialize(self):
|
||||||
"""
|
"""
|
||||||
@@ -55,6 +95,7 @@ class GiteaMCPServer:
|
|||||||
self.wiki_tools = WikiTools(self.client)
|
self.wiki_tools = WikiTools(self.client)
|
||||||
self.milestone_tools = MilestoneTools(self.client)
|
self.milestone_tools = MilestoneTools(self.client)
|
||||||
self.dependency_tools = DependencyTools(self.client)
|
self.dependency_tools = DependencyTools(self.client)
|
||||||
|
self.pr_tools = PullRequestTools(self.client)
|
||||||
|
|
||||||
logger.info(f"Gitea MCP Server initialized in {self.config['mode']} mode")
|
logger.info(f"Gitea MCP Server initialized in {self.config['mode']} mode")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -85,6 +126,10 @@ class GiteaMCPServer:
|
|||||||
"items": {"type": "string"},
|
"items": {"type": "string"},
|
||||||
"description": "Filter by labels"
|
"description": "Filter by labels"
|
||||||
},
|
},
|
||||||
|
"milestone": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Filter by milestone title (exact match)"
|
||||||
|
},
|
||||||
"repo": {
|
"repo": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Repository name (for PMO mode)"
|
"description": "Repository name (for PMO mode)"
|
||||||
@@ -99,7 +144,7 @@ class GiteaMCPServer:
|
|||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"issue_number": {
|
"issue_number": {
|
||||||
"type": "integer",
|
"type": ["integer", "string"],
|
||||||
"description": "Issue number"
|
"description": "Issue number"
|
||||||
},
|
},
|
||||||
"repo": {
|
"repo": {
|
||||||
@@ -144,7 +189,7 @@ class GiteaMCPServer:
|
|||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"issue_number": {
|
"issue_number": {
|
||||||
"type": "integer",
|
"type": ["integer", "string"],
|
||||||
"description": "Issue number"
|
"description": "Issue number"
|
||||||
},
|
},
|
||||||
"title": {
|
"title": {
|
||||||
@@ -165,6 +210,10 @@ class GiteaMCPServer:
|
|||||||
"items": {"type": "string"},
|
"items": {"type": "string"},
|
||||||
"description": "New labels"
|
"description": "New labels"
|
||||||
},
|
},
|
||||||
|
"milestone": {
|
||||||
|
"type": ["integer", "string"],
|
||||||
|
"description": "Milestone ID to assign"
|
||||||
|
},
|
||||||
"repo": {
|
"repo": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Repository name (for PMO mode)"
|
"description": "Repository name (for PMO mode)"
|
||||||
@@ -180,7 +229,7 @@ class GiteaMCPServer:
|
|||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"issue_number": {
|
"issue_number": {
|
||||||
"type": "integer",
|
"type": ["integer", "string"],
|
||||||
"description": "Issue number"
|
"description": "Issue number"
|
||||||
},
|
},
|
||||||
"comment": {
|
"comment": {
|
||||||
@@ -217,6 +266,10 @@ class GiteaMCPServer:
|
|||||||
"context": {
|
"context": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Issue title + description or sprint context"
|
"description": "Issue title + description or sprint context"
|
||||||
|
},
|
||||||
|
"repo": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Repository name (owner/repo format)"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"required": ["context"]
|
"required": ["context"]
|
||||||
@@ -371,7 +424,7 @@ class GiteaMCPServer:
|
|||||||
"description": "Tags to filter by (optional)"
|
"description": "Tags to filter by (optional)"
|
||||||
},
|
},
|
||||||
"limit": {
|
"limit": {
|
||||||
"type": "integer",
|
"type": ["integer", "string"],
|
||||||
"default": 20,
|
"default": 20,
|
||||||
"description": "Maximum results"
|
"description": "Maximum results"
|
||||||
},
|
},
|
||||||
@@ -382,6 +435,19 @@ class GiteaMCPServer:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
|
Tool(
|
||||||
|
name="allocate_rfc_number",
|
||||||
|
description="Allocate the next available RFC number by scanning existing RFC wiki pages",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"repo": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Repository name (owner/repo format)"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
),
|
||||||
# Milestone Tools
|
# Milestone Tools
|
||||||
Tool(
|
Tool(
|
||||||
name="list_milestones",
|
name="list_milestones",
|
||||||
@@ -409,7 +475,7 @@ class GiteaMCPServer:
|
|||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"milestone_id": {
|
"milestone_id": {
|
||||||
"type": "integer",
|
"type": ["integer", "string"],
|
||||||
"description": "Milestone ID"
|
"description": "Milestone ID"
|
||||||
},
|
},
|
||||||
"repo": {
|
"repo": {
|
||||||
@@ -453,7 +519,7 @@ class GiteaMCPServer:
|
|||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"milestone_id": {
|
"milestone_id": {
|
||||||
"type": "integer",
|
"type": ["integer", "string"],
|
||||||
"description": "Milestone ID"
|
"description": "Milestone ID"
|
||||||
},
|
},
|
||||||
"title": {
|
"title": {
|
||||||
@@ -488,7 +554,7 @@ class GiteaMCPServer:
|
|||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"milestone_id": {
|
"milestone_id": {
|
||||||
"type": "integer",
|
"type": ["integer", "string"],
|
||||||
"description": "Milestone ID"
|
"description": "Milestone ID"
|
||||||
},
|
},
|
||||||
"repo": {
|
"repo": {
|
||||||
@@ -507,7 +573,7 @@ class GiteaMCPServer:
|
|||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"issue_number": {
|
"issue_number": {
|
||||||
"type": "integer",
|
"type": ["integer", "string"],
|
||||||
"description": "Issue number"
|
"description": "Issue number"
|
||||||
},
|
},
|
||||||
"repo": {
|
"repo": {
|
||||||
@@ -525,11 +591,11 @@ class GiteaMCPServer:
|
|||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"issue_number": {
|
"issue_number": {
|
||||||
"type": "integer",
|
"type": ["integer", "string"],
|
||||||
"description": "Issue that will depend on another"
|
"description": "Issue that will depend on another"
|
||||||
},
|
},
|
||||||
"depends_on": {
|
"depends_on": {
|
||||||
"type": "integer",
|
"type": ["integer", "string"],
|
||||||
"description": "Issue that blocks issue_number"
|
"description": "Issue that blocks issue_number"
|
||||||
},
|
},
|
||||||
"repo": {
|
"repo": {
|
||||||
@@ -547,11 +613,11 @@ class GiteaMCPServer:
|
|||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"issue_number": {
|
"issue_number": {
|
||||||
"type": "integer",
|
"type": ["integer", "string"],
|
||||||
"description": "Issue that depends on another"
|
"description": "Issue that depends on another"
|
||||||
},
|
},
|
||||||
"depends_on": {
|
"depends_on": {
|
||||||
"type": "integer",
|
"type": ["integer", "string"],
|
||||||
"description": "Issue being depended on"
|
"description": "Issue being depended on"
|
||||||
},
|
},
|
||||||
"repo": {
|
"repo": {
|
||||||
@@ -615,13 +681,13 @@ class GiteaMCPServer:
|
|||||||
),
|
),
|
||||||
Tool(
|
Tool(
|
||||||
name="create_label",
|
name="create_label",
|
||||||
description="Create a new label in the repository",
|
description="Create a new label in the repository (for repo-specific labels like Component/*, Tech/*)",
|
||||||
inputSchema={
|
inputSchema={
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"name": {
|
"name": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Label name"
|
"description": "Label name (e.g., 'Component/Backend', 'Tech/Python')"
|
||||||
},
|
},
|
||||||
"color": {
|
"color": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
@@ -638,6 +704,240 @@ class GiteaMCPServer:
|
|||||||
},
|
},
|
||||||
"required": ["name", "color"]
|
"required": ["name", "color"]
|
||||||
}
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="create_org_label",
|
||||||
|
description="Create a new label at organization level (for workflow labels like Type/*, Priority/*, Complexity/*, Effort/*)",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"org": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Organization name"
|
||||||
|
},
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Label name (e.g., 'Type/Bug', 'Priority/High')"
|
||||||
|
},
|
||||||
|
"color": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Label color (hex code)"
|
||||||
|
},
|
||||||
|
"description": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Label description"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["org", "name", "color"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="create_label_smart",
|
||||||
|
description="Create a label at the appropriate level (org or repo) based on category. Org: Type/*, Priority/*, Complexity/*, Effort/*, Risk/*, Source/*, Agent/*. Repo: Component/*, Tech/*",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Label name (e.g., 'Type/Bug', 'Component/Backend')"
|
||||||
|
},
|
||||||
|
"color": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Label color (hex code)"
|
||||||
|
},
|
||||||
|
"description": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Label description"
|
||||||
|
},
|
||||||
|
"repo": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Repository name (owner/repo format)"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["name", "color"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
# Pull Request Tools
|
||||||
|
Tool(
|
||||||
|
name="list_pull_requests",
|
||||||
|
description="List pull requests from repository",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"state": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["open", "closed", "all"],
|
||||||
|
"default": "open",
|
||||||
|
"description": "PR state filter"
|
||||||
|
},
|
||||||
|
"sort": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["oldest", "recentupdate", "leastupdate", "mostcomment", "leastcomment", "priority"],
|
||||||
|
"default": "recentupdate",
|
||||||
|
"description": "Sort order"
|
||||||
|
},
|
||||||
|
"labels": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"description": "Filter by labels"
|
||||||
|
},
|
||||||
|
"repo": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Repository name (owner/repo format)"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="get_pull_request",
|
||||||
|
description="Get specific pull request details",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"pr_number": {
|
||||||
|
"type": ["integer", "string"],
|
||||||
|
"description": "Pull request number"
|
||||||
|
},
|
||||||
|
"repo": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Repository name (owner/repo format)"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["pr_number"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="get_pr_diff",
|
||||||
|
description="Get the diff for a pull request",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"pr_number": {
|
||||||
|
"type": ["integer", "string"],
|
||||||
|
"description": "Pull request number"
|
||||||
|
},
|
||||||
|
"repo": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Repository name (owner/repo format)"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["pr_number"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="get_pr_comments",
|
||||||
|
description="Get comments on a pull request",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"pr_number": {
|
||||||
|
"type": ["integer", "string"],
|
||||||
|
"description": "Pull request number"
|
||||||
|
},
|
||||||
|
"repo": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Repository name (owner/repo format)"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["pr_number"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="create_pr_review",
|
||||||
|
description="Create a review on a pull request (approve, request changes, or comment)",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"pr_number": {
|
||||||
|
"type": ["integer", "string"],
|
||||||
|
"description": "Pull request number"
|
||||||
|
},
|
||||||
|
"body": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Review body/summary"
|
||||||
|
},
|
||||||
|
"event": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["APPROVE", "REQUEST_CHANGES", "COMMENT"],
|
||||||
|
"default": "COMMENT",
|
||||||
|
"description": "Review action"
|
||||||
|
},
|
||||||
|
"comments": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"path": {"type": "string"},
|
||||||
|
"position": {"type": ["integer", "string"]},
|
||||||
|
"body": {"type": "string"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "Optional inline comments"
|
||||||
|
},
|
||||||
|
"repo": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Repository name (owner/repo format)"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["pr_number", "body"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="add_pr_comment",
|
||||||
|
description="Add a general comment to a pull request",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"pr_number": {
|
||||||
|
"type": ["integer", "string"],
|
||||||
|
"description": "Pull request number"
|
||||||
|
},
|
||||||
|
"body": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Comment text"
|
||||||
|
},
|
||||||
|
"repo": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Repository name (owner/repo format)"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["pr_number", "body"]
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Tool(
|
||||||
|
name="create_pull_request",
|
||||||
|
description="Create a new pull request",
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"title": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "PR title"
|
||||||
|
},
|
||||||
|
"body": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "PR description/body"
|
||||||
|
},
|
||||||
|
"head": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Source branch name (the branch with changes)"
|
||||||
|
},
|
||||||
|
"base": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Target branch name (the branch to merge into)"
|
||||||
|
},
|
||||||
|
"labels": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"description": "Optional list of label names"
|
||||||
|
},
|
||||||
|
"repo": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Repository name (owner/repo format)"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["title", "body", "head", "base"]
|
||||||
|
}
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -654,6 +954,9 @@ class GiteaMCPServer:
|
|||||||
List of TextContent with results
|
List of TextContent with results
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
|
# Coerce types to handle MCP serialization quirks
|
||||||
|
arguments = _coerce_types(arguments)
|
||||||
|
|
||||||
# Route to appropriate tool handler
|
# Route to appropriate tool handler
|
||||||
if name == "list_issues":
|
if name == "list_issues":
|
||||||
result = await self.issue_tools.list_issues(**arguments)
|
result = await self.issue_tools.list_issues(**arguments)
|
||||||
@@ -690,6 +993,10 @@ class GiteaMCPServer:
|
|||||||
limit=arguments.get('limit', 20),
|
limit=arguments.get('limit', 20),
|
||||||
repo=arguments.get('repo')
|
repo=arguments.get('repo')
|
||||||
)
|
)
|
||||||
|
elif name == "allocate_rfc_number":
|
||||||
|
result = await self.wiki_tools.allocate_rfc_number(
|
||||||
|
repo=arguments.get('repo')
|
||||||
|
)
|
||||||
# Milestone tools
|
# Milestone tools
|
||||||
elif name == "list_milestones":
|
elif name == "list_milestones":
|
||||||
result = await self.milestone_tools.list_milestones(**arguments)
|
result = await self.milestone_tools.list_milestones(**arguments)
|
||||||
@@ -726,6 +1033,35 @@ class GiteaMCPServer:
|
|||||||
arguments.get('description'),
|
arguments.get('description'),
|
||||||
arguments.get('repo')
|
arguments.get('repo')
|
||||||
)
|
)
|
||||||
|
elif name == "create_org_label":
|
||||||
|
result = self.client.create_org_label(
|
||||||
|
arguments['org'],
|
||||||
|
arguments['name'],
|
||||||
|
arguments['color'],
|
||||||
|
arguments.get('description')
|
||||||
|
)
|
||||||
|
elif name == "create_label_smart":
|
||||||
|
result = await self.label_tools.create_label_smart(
|
||||||
|
arguments['name'],
|
||||||
|
arguments['color'],
|
||||||
|
arguments.get('description'),
|
||||||
|
arguments.get('repo')
|
||||||
|
)
|
||||||
|
# Pull Request tools
|
||||||
|
elif name == "list_pull_requests":
|
||||||
|
result = await self.pr_tools.list_pull_requests(**arguments)
|
||||||
|
elif name == "get_pull_request":
|
||||||
|
result = await self.pr_tools.get_pull_request(**arguments)
|
||||||
|
elif name == "get_pr_diff":
|
||||||
|
result = await self.pr_tools.get_pr_diff(**arguments)
|
||||||
|
elif name == "get_pr_comments":
|
||||||
|
result = await self.pr_tools.get_pr_comments(**arguments)
|
||||||
|
elif name == "create_pr_review":
|
||||||
|
result = await self.pr_tools.create_pr_review(**arguments)
|
||||||
|
elif name == "add_pr_comment":
|
||||||
|
result = await self.pr_tools.add_pr_comment(**arguments)
|
||||||
|
elif name == "create_pull_request":
|
||||||
|
result = await self.pr_tools.create_pull_request(**arguments)
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"Unknown tool: {name}")
|
raise ValueError(f"Unknown tool: {name}")
|
||||||
|
|
||||||
@@ -4,4 +4,8 @@ MCP tools for Gitea integration.
|
|||||||
This package provides MCP tool implementations for:
|
This package provides MCP tool implementations for:
|
||||||
- Issue operations (issues.py)
|
- Issue operations (issues.py)
|
||||||
- Label management (labels.py)
|
- Label management (labels.py)
|
||||||
|
- Wiki operations (wiki.py)
|
||||||
|
- Milestone management (milestones.py)
|
||||||
|
- Issue dependencies (dependencies.py)
|
||||||
|
- Pull request operations (pull_requests.py)
|
||||||
"""
|
"""
|
||||||
@@ -7,6 +7,7 @@ Provides async wrappers for issue CRUD operations with:
|
|||||||
- Comprehensive error handling
|
- Comprehensive error handling
|
||||||
"""
|
"""
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
import logging
|
import logging
|
||||||
from typing import List, Dict, Optional
|
from typing import List, Dict, Optional
|
||||||
@@ -27,19 +28,34 @@ class IssueTools:
|
|||||||
"""
|
"""
|
||||||
self.gitea = gitea_client
|
self.gitea = gitea_client
|
||||||
|
|
||||||
|
def _get_project_directory(self) -> Optional[str]:
|
||||||
|
"""
|
||||||
|
Get the user's project directory from environment.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Project directory path or None if not set
|
||||||
|
"""
|
||||||
|
return os.environ.get('CLAUDE_PROJECT_DIR')
|
||||||
|
|
||||||
def _get_current_branch(self) -> str:
|
def _get_current_branch(self) -> str:
|
||||||
"""
|
"""
|
||||||
Get current git branch.
|
Get current git branch from user's project directory.
|
||||||
|
|
||||||
|
Uses CLAUDE_PROJECT_DIR environment variable to determine the correct
|
||||||
|
directory for git operations, avoiding the bug where git runs from
|
||||||
|
the installed plugin directory instead of the user's project.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Current branch name or 'unknown' if not in a git repo
|
Current branch name or 'unknown' if not in a git repo
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
|
project_dir = self._get_project_directory()
|
||||||
result = subprocess.run(
|
result = subprocess.run(
|
||||||
['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
|
['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
|
||||||
capture_output=True,
|
capture_output=True,
|
||||||
text=True,
|
text=True,
|
||||||
check=True
|
check=True,
|
||||||
|
cwd=project_dir # Run git in project directory, not plugin directory
|
||||||
)
|
)
|
||||||
return result.stdout.strip()
|
return result.stdout.strip()
|
||||||
except subprocess.CalledProcessError:
|
except subprocess.CalledProcessError:
|
||||||
@@ -66,7 +82,13 @@ class IssueTools:
|
|||||||
return operation in ['list_issues', 'get_issue', 'get_labels', 'create_issue']
|
return operation in ['list_issues', 'get_issue', 'get_labels', 'create_issue']
|
||||||
|
|
||||||
# Development branches (full access)
|
# Development branches (full access)
|
||||||
if branch in ['development', 'develop'] or branch.startswith(('feat/', 'feature/', 'dev/')):
|
# Include all common feature/fix branch patterns
|
||||||
|
dev_prefixes = (
|
||||||
|
'feat/', 'feature/', 'dev/',
|
||||||
|
'fix/', 'bugfix/', 'hotfix/',
|
||||||
|
'chore/', 'refactor/', 'docs/', 'test/'
|
||||||
|
)
|
||||||
|
if branch in ['development', 'develop'] or branch.startswith(dev_prefixes):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
# Unknown branch - be restrictive
|
# Unknown branch - be restrictive
|
||||||
@@ -76,6 +98,7 @@ class IssueTools:
|
|||||||
self,
|
self,
|
||||||
state: str = 'open',
|
state: str = 'open',
|
||||||
labels: Optional[List[str]] = None,
|
labels: Optional[List[str]] = None,
|
||||||
|
milestone: Optional[str] = None,
|
||||||
repo: Optional[str] = None
|
repo: Optional[str] = None
|
||||||
) -> List[Dict]:
|
) -> List[Dict]:
|
||||||
"""
|
"""
|
||||||
@@ -84,6 +107,7 @@ class IssueTools:
|
|||||||
Args:
|
Args:
|
||||||
state: Issue state (open, closed, all)
|
state: Issue state (open, closed, all)
|
||||||
labels: Filter by labels
|
labels: Filter by labels
|
||||||
|
milestone: Filter by milestone title (exact match)
|
||||||
repo: Override configured repo (for PMO multi-repo)
|
repo: Override configured repo (for PMO multi-repo)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
@@ -102,7 +126,7 @@ class IssueTools:
|
|||||||
loop = asyncio.get_event_loop()
|
loop = asyncio.get_event_loop()
|
||||||
return await loop.run_in_executor(
|
return await loop.run_in_executor(
|
||||||
None,
|
None,
|
||||||
lambda: self.gitea.list_issues(state, labels, repo)
|
lambda: self.gitea.list_issues(state, labels, milestone, repo)
|
||||||
)
|
)
|
||||||
|
|
||||||
async def get_issue(
|
async def get_issue(
|
||||||
@@ -178,6 +202,7 @@ class IssueTools:
|
|||||||
body: Optional[str] = None,
|
body: Optional[str] = None,
|
||||||
state: Optional[str] = None,
|
state: Optional[str] = None,
|
||||||
labels: Optional[List[str]] = None,
|
labels: Optional[List[str]] = None,
|
||||||
|
milestone: Optional[int] = None,
|
||||||
repo: Optional[str] = None
|
repo: Optional[str] = None
|
||||||
) -> Dict:
|
) -> Dict:
|
||||||
"""
|
"""
|
||||||
@@ -189,6 +214,7 @@ class IssueTools:
|
|||||||
body: New body (optional)
|
body: New body (optional)
|
||||||
state: New state - 'open' or 'closed' (optional)
|
state: New state - 'open' or 'closed' (optional)
|
||||||
labels: New labels (optional)
|
labels: New labels (optional)
|
||||||
|
milestone: Milestone ID to assign (optional)
|
||||||
repo: Override configured repo (for PMO multi-repo)
|
repo: Override configured repo (for PMO multi-repo)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
@@ -207,7 +233,7 @@ class IssueTools:
|
|||||||
loop = asyncio.get_event_loop()
|
loop = asyncio.get_event_loop()
|
||||||
return await loop.run_in_executor(
|
return await loop.run_in_executor(
|
||||||
None,
|
None,
|
||||||
lambda: self.gitea.update_issue(issue_number, title, body, state, labels, repo)
|
lambda: self.gitea.update_issue(issue_number, title, body, state, labels, milestone, repo)
|
||||||
)
|
)
|
||||||
|
|
||||||
async def add_comment(
|
async def add_comment(
|
||||||
377
mcp-servers/gitea/mcp_server/tools/labels.py
Normal file
377
mcp-servers/gitea/mcp_server/tools/labels.py
Normal file
@@ -0,0 +1,377 @@
|
|||||||
|
"""
|
||||||
|
Label management tools for MCP server.
|
||||||
|
|
||||||
|
Provides async wrappers for label operations with:
|
||||||
|
- Label taxonomy retrieval
|
||||||
|
- Intelligent label suggestion
|
||||||
|
- Dynamic label detection
|
||||||
|
"""
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
from typing import List, Dict, Optional
|
||||||
|
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class LabelTools:
|
||||||
|
"""Async wrappers for Gitea label operations"""
|
||||||
|
|
||||||
|
def __init__(self, gitea_client):
|
||||||
|
"""
|
||||||
|
Initialize label tools.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
gitea_client: GiteaClient instance
|
||||||
|
"""
|
||||||
|
self.gitea = gitea_client
|
||||||
|
|
||||||
|
async def get_labels(self, repo: Optional[str] = None) -> Dict[str, List[Dict]]:
|
||||||
|
"""Get all labels (org + repo if org-owned, repo-only if user-owned)."""
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
|
||||||
|
target_repo = repo or self.gitea.repo
|
||||||
|
if not target_repo or '/' not in target_repo:
|
||||||
|
raise ValueError("Use 'owner/repo' format (e.g. 'org/repo-name')")
|
||||||
|
|
||||||
|
# Check if repo belongs to an organization or user
|
||||||
|
is_org = await loop.run_in_executor(
|
||||||
|
None,
|
||||||
|
lambda: self.gitea.is_org_repo(target_repo)
|
||||||
|
)
|
||||||
|
|
||||||
|
org_labels = []
|
||||||
|
if is_org:
|
||||||
|
org = target_repo.split('/')[0]
|
||||||
|
org_labels = await loop.run_in_executor(
|
||||||
|
None,
|
||||||
|
lambda: self.gitea.get_org_labels(org)
|
||||||
|
)
|
||||||
|
|
||||||
|
repo_labels = await loop.run_in_executor(
|
||||||
|
None,
|
||||||
|
lambda: self.gitea.get_labels(target_repo)
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'organization': org_labels,
|
||||||
|
'repository': repo_labels,
|
||||||
|
'total_count': len(org_labels) + len(repo_labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
async def suggest_labels(self, context: str, repo: Optional[str] = None) -> List[str]:
|
||||||
|
"""
|
||||||
|
Analyze context and suggest appropriate labels from repository's actual labels.
|
||||||
|
|
||||||
|
This method fetches actual labels from the repository and matches them
|
||||||
|
dynamically, supporting any label naming convention (slash, colon-space, etc.).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
context: Issue title + description or sprint context
|
||||||
|
repo: Repository in 'owner/repo' format (optional, uses default if not provided)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of suggested label names that exist in the repository
|
||||||
|
"""
|
||||||
|
# Fetch actual labels from repository
|
||||||
|
target_repo = repo or self.gitea.repo
|
||||||
|
if not target_repo:
|
||||||
|
logger.warning("No repository specified, returning empty suggestions")
|
||||||
|
return []
|
||||||
|
|
||||||
|
try:
|
||||||
|
labels_data = await self.get_labels(target_repo)
|
||||||
|
all_labels = labels_data.get('organization', []) + labels_data.get('repository', [])
|
||||||
|
label_names = [label['name'] for label in all_labels]
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to fetch labels: {e}. Using fallback suggestions.")
|
||||||
|
label_names = []
|
||||||
|
|
||||||
|
# Build label lookup for dynamic matching
|
||||||
|
label_lookup = self._build_label_lookup(label_names)
|
||||||
|
|
||||||
|
suggested = []
|
||||||
|
context_lower = context.lower()
|
||||||
|
|
||||||
|
# Type detection (exclusive - only one)
|
||||||
|
type_label = None
|
||||||
|
if any(word in context_lower for word in ['bug', 'error', 'fix', 'broken', 'crash', 'fail']):
|
||||||
|
type_label = self._find_label(label_lookup, 'type', 'bug')
|
||||||
|
elif any(word in context_lower for word in ['refactor', 'extract', 'restructure', 'architecture', 'service extraction']):
|
||||||
|
type_label = self._find_label(label_lookup, 'type', 'refactor')
|
||||||
|
elif any(word in context_lower for word in ['feature', 'add', 'implement', 'new', 'create']):
|
||||||
|
type_label = self._find_label(label_lookup, 'type', 'feature')
|
||||||
|
elif any(word in context_lower for word in ['docs', 'documentation', 'readme', 'guide']):
|
||||||
|
type_label = self._find_label(label_lookup, 'type', 'documentation')
|
||||||
|
elif any(word in context_lower for word in ['test', 'testing', 'spec', 'coverage']):
|
||||||
|
type_label = self._find_label(label_lookup, 'type', 'test')
|
||||||
|
elif any(word in context_lower for word in ['chore', 'maintenance', 'update', 'upgrade']):
|
||||||
|
type_label = self._find_label(label_lookup, 'type', 'chore')
|
||||||
|
if type_label:
|
||||||
|
suggested.append(type_label)
|
||||||
|
|
||||||
|
# Priority detection
|
||||||
|
priority_label = None
|
||||||
|
if any(word in context_lower for word in ['critical', 'urgent', 'blocker', 'blocking', 'emergency']):
|
||||||
|
priority_label = self._find_label(label_lookup, 'priority', 'critical')
|
||||||
|
elif any(word in context_lower for word in ['high', 'important', 'asap', 'soon']):
|
||||||
|
priority_label = self._find_label(label_lookup, 'priority', 'high')
|
||||||
|
elif any(word in context_lower for word in ['low', 'nice-to-have', 'optional', 'later']):
|
||||||
|
priority_label = self._find_label(label_lookup, 'priority', 'low')
|
||||||
|
else:
|
||||||
|
priority_label = self._find_label(label_lookup, 'priority', 'medium')
|
||||||
|
if priority_label:
|
||||||
|
suggested.append(priority_label)
|
||||||
|
|
||||||
|
# Complexity detection
|
||||||
|
complexity_label = None
|
||||||
|
if any(word in context_lower for word in ['simple', 'trivial', 'easy', 'quick']):
|
||||||
|
complexity_label = self._find_label(label_lookup, 'complexity', 'simple')
|
||||||
|
elif any(word in context_lower for word in ['complex', 'difficult', 'challenging', 'intricate']):
|
||||||
|
complexity_label = self._find_label(label_lookup, 'complexity', 'complex')
|
||||||
|
else:
|
||||||
|
complexity_label = self._find_label(label_lookup, 'complexity', 'medium')
|
||||||
|
if complexity_label:
|
||||||
|
suggested.append(complexity_label)
|
||||||
|
|
||||||
|
# Effort detection (supports both "Effort" and "Efforts" naming)
|
||||||
|
effort_label = None
|
||||||
|
if any(word in context_lower for word in ['xs', 'tiny', '1 hour', '2 hours']):
|
||||||
|
effort_label = self._find_label(label_lookup, 'effort', 'xs')
|
||||||
|
elif any(word in context_lower for word in ['small', 's ', '1 day', 'half day']):
|
||||||
|
effort_label = self._find_label(label_lookup, 'effort', 's')
|
||||||
|
elif any(word in context_lower for word in ['medium', 'm ', '2 days', '3 days']):
|
||||||
|
effort_label = self._find_label(label_lookup, 'effort', 'm')
|
||||||
|
elif any(word in context_lower for word in ['large', 'l ', '1 week', '5 days']):
|
||||||
|
effort_label = self._find_label(label_lookup, 'effort', 'l')
|
||||||
|
elif any(word in context_lower for word in ['xl', 'extra large', '2 weeks', 'sprint']):
|
||||||
|
effort_label = self._find_label(label_lookup, 'effort', 'xl')
|
||||||
|
if effort_label:
|
||||||
|
suggested.append(effort_label)
|
||||||
|
|
||||||
|
# Component detection (based on keywords)
|
||||||
|
component_mappings = {
|
||||||
|
'backend': ['backend', 'server', 'api', 'database', 'service'],
|
||||||
|
'frontend': ['frontend', 'ui', 'interface', 'react', 'vue', 'component'],
|
||||||
|
'api': ['api', 'endpoint', 'rest', 'graphql', 'route'],
|
||||||
|
'database': ['database', 'db', 'sql', 'migration', 'schema', 'postgres'],
|
||||||
|
'auth': ['auth', 'authentication', 'login', 'oauth', 'token', 'session'],
|
||||||
|
'deploy': ['deploy', 'deployment', 'docker', 'kubernetes', 'ci/cd'],
|
||||||
|
'testing': ['test', 'testing', 'spec', 'jest', 'pytest', 'coverage'],
|
||||||
|
'docs': ['docs', 'documentation', 'readme', 'guide', 'wiki']
|
||||||
|
}
|
||||||
|
|
||||||
|
for component, keywords in component_mappings.items():
|
||||||
|
if any(keyword in context_lower for keyword in keywords):
|
||||||
|
label = self._find_label(label_lookup, 'component', component)
|
||||||
|
if label and label not in suggested:
|
||||||
|
suggested.append(label)
|
||||||
|
|
||||||
|
# Tech stack detection
|
||||||
|
tech_mappings = {
|
||||||
|
'python': ['python', 'fastapi', 'django', 'flask', 'pytest'],
|
||||||
|
'javascript': ['javascript', 'js', 'node', 'npm', 'yarn'],
|
||||||
|
'docker': ['docker', 'dockerfile', 'container', 'compose'],
|
||||||
|
'postgresql': ['postgres', 'postgresql', 'psql', 'sql'],
|
||||||
|
'redis': ['redis', 'cache', 'session store'],
|
||||||
|
'vue': ['vue', 'vuejs', 'nuxt'],
|
||||||
|
'fastapi': ['fastapi', 'pydantic', 'starlette']
|
||||||
|
}
|
||||||
|
|
||||||
|
for tech, keywords in tech_mappings.items():
|
||||||
|
if any(keyword in context_lower for keyword in keywords):
|
||||||
|
label = self._find_label(label_lookup, 'tech', tech)
|
||||||
|
if label and label not in suggested:
|
||||||
|
suggested.append(label)
|
||||||
|
|
||||||
|
# Source detection (based on git branch or context)
|
||||||
|
source_label = None
|
||||||
|
if 'development' in context_lower or 'dev/' in context_lower:
|
||||||
|
source_label = self._find_label(label_lookup, 'source', 'development')
|
||||||
|
elif 'staging' in context_lower or 'stage/' in context_lower:
|
||||||
|
source_label = self._find_label(label_lookup, 'source', 'staging')
|
||||||
|
elif 'production' in context_lower or 'prod' in context_lower:
|
||||||
|
source_label = self._find_label(label_lookup, 'source', 'production')
|
||||||
|
if source_label:
|
||||||
|
suggested.append(source_label)
|
||||||
|
|
||||||
|
# Risk detection
|
||||||
|
risk_label = None
|
||||||
|
if any(word in context_lower for word in ['breaking', 'breaking change', 'major', 'risky']):
|
||||||
|
risk_label = self._find_label(label_lookup, 'risk', 'high')
|
||||||
|
elif any(word in context_lower for word in ['safe', 'low risk', 'minor']):
|
||||||
|
risk_label = self._find_label(label_lookup, 'risk', 'low')
|
||||||
|
if risk_label:
|
||||||
|
suggested.append(risk_label)
|
||||||
|
|
||||||
|
logger.info(f"Suggested {len(suggested)} labels based on context and {len(label_names)} available labels")
|
||||||
|
return suggested
|
||||||
|
|
||||||
|
def _build_label_lookup(self, label_names: List[str]) -> Dict[str, Dict[str, str]]:
|
||||||
|
"""
|
||||||
|
Build a lookup dictionary for label matching.
|
||||||
|
|
||||||
|
Supports various label formats:
|
||||||
|
- Slash format: Type/Bug, Priority/High
|
||||||
|
- Colon-space format: Type: Bug, Priority: High
|
||||||
|
- Colon format: Type:Bug
|
||||||
|
|
||||||
|
Args:
|
||||||
|
label_names: List of actual label names from repository
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Nested dict: {category: {value: actual_label_name}}
|
||||||
|
"""
|
||||||
|
lookup: Dict[str, Dict[str, str]] = {}
|
||||||
|
|
||||||
|
for label in label_names:
|
||||||
|
# Try different separator patterns
|
||||||
|
# Pattern: Category<separator>Value
|
||||||
|
# Separators: /, : , :
|
||||||
|
match = re.match(r'^([^/:]+)(?:/|:\s*|:)(.+)$', label)
|
||||||
|
if match:
|
||||||
|
category = match.group(1).lower().rstrip('s') # Normalize: "Efforts" -> "effort"
|
||||||
|
value = match.group(2).lower()
|
||||||
|
|
||||||
|
if category not in lookup:
|
||||||
|
lookup[category] = {}
|
||||||
|
lookup[category][value] = label
|
||||||
|
|
||||||
|
return lookup
|
||||||
|
|
||||||
|
def _find_label(self, lookup: Dict[str, Dict[str, str]], category: str, value: str) -> Optional[str]:
|
||||||
|
"""
|
||||||
|
Find actual label name from lookup.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
lookup: Label lookup dictionary
|
||||||
|
category: Category to search (e.g., 'type', 'priority')
|
||||||
|
value: Value to find (e.g., 'bug', 'high')
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Actual label name if found, None otherwise
|
||||||
|
"""
|
||||||
|
category_lower = category.lower().rstrip('s') # Normalize
|
||||||
|
value_lower = value.lower()
|
||||||
|
|
||||||
|
if category_lower in lookup and value_lower in lookup[category_lower]:
|
||||||
|
return lookup[category_lower][value_lower]
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Organization-level label categories (workflow labels shared across repos)
|
||||||
|
ORG_LABEL_CATEGORIES = {'agent', 'complexity', 'effort', 'efforts', 'priority', 'risk', 'source', 'type'}
|
||||||
|
|
||||||
|
# Repository-level label categories (project-specific labels)
|
||||||
|
REPO_LABEL_CATEGORIES = {'component', 'tech'}
|
||||||
|
|
||||||
|
async def create_label_smart(
|
||||||
|
self,
|
||||||
|
name: str,
|
||||||
|
color: str,
|
||||||
|
description: Optional[str] = None,
|
||||||
|
repo: Optional[str] = None
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Create a label at the appropriate level (org or repo) based on category.
|
||||||
|
Skips if label already exists (checks both org and repo levels).
|
||||||
|
|
||||||
|
Organization labels: Agent, Complexity, Effort, Priority, Risk, Source, Type
|
||||||
|
Repository labels: Component, Tech
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name: Label name (e.g., 'Type/Bug', 'Component/Backend')
|
||||||
|
color: Hex color code
|
||||||
|
description: Optional label description
|
||||||
|
repo: Repository in 'owner/repo' format
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Created label dictionary with 'level' key, or 'skipped' if already exists
|
||||||
|
"""
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
|
||||||
|
target_repo = repo or self.gitea.repo
|
||||||
|
if not target_repo or '/' not in target_repo:
|
||||||
|
raise ValueError("Use 'owner/repo' format (e.g. 'org/repo-name')")
|
||||||
|
|
||||||
|
owner = target_repo.split('/')[0]
|
||||||
|
is_org = await loop.run_in_executor(
|
||||||
|
None,
|
||||||
|
lambda: self.gitea.is_org_repo(target_repo)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Fetch existing labels to check for duplicates
|
||||||
|
existing_labels = await self.get_labels(target_repo)
|
||||||
|
all_existing = existing_labels.get('organization', []) + existing_labels.get('repository', [])
|
||||||
|
existing_names = [label['name'].lower() for label in all_existing]
|
||||||
|
|
||||||
|
# Normalize the new label name for comparison
|
||||||
|
name_normalized = name.lower()
|
||||||
|
|
||||||
|
# Also check for format variations (Type/Bug vs Type: Bug)
|
||||||
|
name_variations = [name_normalized]
|
||||||
|
if '/' in name:
|
||||||
|
name_variations.append(name.replace('/', ': ').lower())
|
||||||
|
name_variations.append(name.replace('/', ':').lower())
|
||||||
|
elif ': ' in name:
|
||||||
|
name_variations.append(name.replace(': ', '/').lower())
|
||||||
|
elif ':' in name:
|
||||||
|
name_variations.append(name.replace(':', '/').lower())
|
||||||
|
|
||||||
|
# Check if label already exists in any format
|
||||||
|
for variation in name_variations:
|
||||||
|
if variation in existing_names:
|
||||||
|
logger.info(f"Label '{name}' already exists (found as '{variation}'), skipping")
|
||||||
|
return {
|
||||||
|
'name': name,
|
||||||
|
'skipped': True,
|
||||||
|
'reason': f"Label already exists",
|
||||||
|
'level': 'existing'
|
||||||
|
}
|
||||||
|
|
||||||
|
# Parse category from label name
|
||||||
|
category = None
|
||||||
|
if '/' in name:
|
||||||
|
category = name.split('/')[0].lower().rstrip('s')
|
||||||
|
elif ':' in name:
|
||||||
|
category = name.split(':')[0].strip().lower().rstrip('s')
|
||||||
|
|
||||||
|
# If it's an org repo and the category is an org-level category, create at org level
|
||||||
|
if is_org and category in self.ORG_LABEL_CATEGORIES:
|
||||||
|
result = await loop.run_in_executor(
|
||||||
|
None,
|
||||||
|
lambda: self.gitea.create_org_label(owner, name, color, description)
|
||||||
|
)
|
||||||
|
# Handle unexpected response types (API may return list or non-dict)
|
||||||
|
if not isinstance(result, dict):
|
||||||
|
logger.error(f"Unexpected API response type for org label: {type(result)} - {result}")
|
||||||
|
return {
|
||||||
|
'name': name,
|
||||||
|
'error': True,
|
||||||
|
'reason': f"API returned {type(result).__name__} instead of dict: {result}",
|
||||||
|
'level': 'organization'
|
||||||
|
}
|
||||||
|
result['level'] = 'organization'
|
||||||
|
result['skipped'] = False
|
||||||
|
logger.info(f"Created organization label '{name}' in {owner}")
|
||||||
|
else:
|
||||||
|
# Create at repo level
|
||||||
|
result = await loop.run_in_executor(
|
||||||
|
None,
|
||||||
|
lambda: self.gitea.create_label(name, color, description, target_repo)
|
||||||
|
)
|
||||||
|
# Handle unexpected response types (API may return list or non-dict)
|
||||||
|
if not isinstance(result, dict):
|
||||||
|
logger.error(f"Unexpected API response type for repo label: {type(result)} - {result}")
|
||||||
|
return {
|
||||||
|
'name': name,
|
||||||
|
'error': True,
|
||||||
|
'reason': f"API returned {type(result).__name__} instead of dict: {result}",
|
||||||
|
'level': 'repository'
|
||||||
|
}
|
||||||
|
result['level'] = 'repository'
|
||||||
|
result['skipped'] = False
|
||||||
|
logger.info(f"Created repository label '{name}' in {target_repo}")
|
||||||
|
|
||||||
|
return result
|
||||||
335
mcp-servers/gitea/mcp_server/tools/pull_requests.py
Normal file
335
mcp-servers/gitea/mcp_server/tools/pull_requests.py
Normal file
@@ -0,0 +1,335 @@
|
|||||||
|
"""
|
||||||
|
Pull request management tools for MCP server.
|
||||||
|
|
||||||
|
Provides async wrappers for PR operations with:
|
||||||
|
- Branch-aware security
|
||||||
|
- PMO multi-repo support
|
||||||
|
- Comprehensive error handling
|
||||||
|
"""
|
||||||
|
import asyncio
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import logging
|
||||||
|
from typing import List, Dict, Optional
|
||||||
|
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class PullRequestTools:
|
||||||
|
"""Async wrappers for Gitea pull request operations with branch detection"""
|
||||||
|
|
||||||
|
def __init__(self, gitea_client):
|
||||||
|
"""
|
||||||
|
Initialize pull request tools.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
gitea_client: GiteaClient instance
|
||||||
|
"""
|
||||||
|
self.gitea = gitea_client
|
||||||
|
|
||||||
|
def _get_project_directory(self) -> Optional[str]:
|
||||||
|
"""
|
||||||
|
Get the user's project directory from environment.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Project directory path or None if not set
|
||||||
|
"""
|
||||||
|
return os.environ.get('CLAUDE_PROJECT_DIR')
|
||||||
|
|
||||||
|
def _get_current_branch(self) -> str:
|
||||||
|
"""
|
||||||
|
Get current git branch from user's project directory.
|
||||||
|
|
||||||
|
Uses CLAUDE_PROJECT_DIR environment variable to determine the correct
|
||||||
|
directory for git operations, avoiding the bug where git runs from
|
||||||
|
the installed plugin directory instead of the user's project.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Current branch name or 'unknown' if not in a git repo
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
project_dir = self._get_project_directory()
|
||||||
|
result = subprocess.run(
|
||||||
|
['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
check=True,
|
||||||
|
cwd=project_dir # Run git in project directory, not plugin directory
|
||||||
|
)
|
||||||
|
return result.stdout.strip()
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
return "unknown"
|
||||||
|
|
||||||
|
def _check_branch_permissions(self, operation: str) -> bool:
|
||||||
|
"""
|
||||||
|
Check if operation is allowed on current branch.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
operation: Operation name (list_prs, create_review, etc.)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if operation is allowed, False otherwise
|
||||||
|
"""
|
||||||
|
branch = self._get_current_branch()
|
||||||
|
|
||||||
|
# Read-only operations allowed everywhere
|
||||||
|
read_ops = ['list_pull_requests', 'get_pull_request', 'get_pr_diff', 'get_pr_comments']
|
||||||
|
|
||||||
|
# Production branches (read-only)
|
||||||
|
if branch in ['main', 'master'] or branch.startswith('prod/'):
|
||||||
|
return operation in read_ops
|
||||||
|
|
||||||
|
# Staging branches (read-only for PRs, can comment)
|
||||||
|
if branch == 'staging' or branch.startswith('stage/'):
|
||||||
|
return operation in read_ops + ['add_pr_comment']
|
||||||
|
|
||||||
|
# Development branches (full access)
|
||||||
|
# Include all common feature/fix branch patterns
|
||||||
|
dev_prefixes = (
|
||||||
|
'feat/', 'feature/', 'dev/',
|
||||||
|
'fix/', 'bugfix/', 'hotfix/',
|
||||||
|
'chore/', 'refactor/', 'docs/', 'test/'
|
||||||
|
)
|
||||||
|
if branch in ['development', 'develop'] or branch.startswith(dev_prefixes):
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Unknown branch - be restrictive
|
||||||
|
return operation in read_ops
|
||||||
|
|
||||||
|
async def list_pull_requests(
|
||||||
|
self,
|
||||||
|
state: str = 'open',
|
||||||
|
sort: str = 'recentupdate',
|
||||||
|
labels: Optional[List[str]] = None,
|
||||||
|
repo: Optional[str] = None
|
||||||
|
) -> List[Dict]:
|
||||||
|
"""
|
||||||
|
List pull requests from repository (async wrapper).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
state: PR state (open, closed, all)
|
||||||
|
sort: Sort order
|
||||||
|
labels: Filter by labels
|
||||||
|
repo: Override configured repo (for PMO multi-repo)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of pull request dictionaries
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
PermissionError: If operation not allowed on current branch
|
||||||
|
"""
|
||||||
|
if not self._check_branch_permissions('list_pull_requests'):
|
||||||
|
branch = self._get_current_branch()
|
||||||
|
raise PermissionError(
|
||||||
|
f"Cannot list PRs on branch '{branch}'. "
|
||||||
|
f"Switch to a development branch."
|
||||||
|
)
|
||||||
|
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
return await loop.run_in_executor(
|
||||||
|
None,
|
||||||
|
lambda: self.gitea.list_pull_requests(state, sort, labels, repo)
|
||||||
|
)
|
||||||
|
|
||||||
|
async def get_pull_request(
|
||||||
|
self,
|
||||||
|
pr_number: int,
|
||||||
|
repo: Optional[str] = None
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Get specific pull request details (async wrapper).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pr_number: Pull request number
|
||||||
|
repo: Override configured repo (for PMO multi-repo)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Pull request dictionary
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
PermissionError: If operation not allowed on current branch
|
||||||
|
"""
|
||||||
|
if not self._check_branch_permissions('get_pull_request'):
|
||||||
|
branch = self._get_current_branch()
|
||||||
|
raise PermissionError(
|
||||||
|
f"Cannot get PR on branch '{branch}'. "
|
||||||
|
f"Switch to a development branch."
|
||||||
|
)
|
||||||
|
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
return await loop.run_in_executor(
|
||||||
|
None,
|
||||||
|
lambda: self.gitea.get_pull_request(pr_number, repo)
|
||||||
|
)
|
||||||
|
|
||||||
|
async def get_pr_diff(
|
||||||
|
self,
|
||||||
|
pr_number: int,
|
||||||
|
repo: Optional[str] = None
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Get pull request diff (async wrapper).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pr_number: Pull request number
|
||||||
|
repo: Override configured repo (for PMO multi-repo)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Diff as string
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
PermissionError: If operation not allowed on current branch
|
||||||
|
"""
|
||||||
|
if not self._check_branch_permissions('get_pr_diff'):
|
||||||
|
branch = self._get_current_branch()
|
||||||
|
raise PermissionError(
|
||||||
|
f"Cannot get PR diff on branch '{branch}'. "
|
||||||
|
f"Switch to a development branch."
|
||||||
|
)
|
||||||
|
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
return await loop.run_in_executor(
|
||||||
|
None,
|
||||||
|
lambda: self.gitea.get_pr_diff(pr_number, repo)
|
||||||
|
)
|
||||||
|
|
||||||
|
async def get_pr_comments(
|
||||||
|
self,
|
||||||
|
pr_number: int,
|
||||||
|
repo: Optional[str] = None
|
||||||
|
) -> List[Dict]:
|
||||||
|
"""
|
||||||
|
Get comments on a pull request (async wrapper).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pr_number: Pull request number
|
||||||
|
repo: Override configured repo (for PMO multi-repo)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of comment dictionaries
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
PermissionError: If operation not allowed on current branch
|
||||||
|
"""
|
||||||
|
if not self._check_branch_permissions('get_pr_comments'):
|
||||||
|
branch = self._get_current_branch()
|
||||||
|
raise PermissionError(
|
||||||
|
f"Cannot get PR comments on branch '{branch}'. "
|
||||||
|
f"Switch to a development branch."
|
||||||
|
)
|
||||||
|
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
return await loop.run_in_executor(
|
||||||
|
None,
|
||||||
|
lambda: self.gitea.get_pr_comments(pr_number, repo)
|
||||||
|
)
|
||||||
|
|
||||||
|
async def create_pr_review(
|
||||||
|
self,
|
||||||
|
pr_number: int,
|
||||||
|
body: str,
|
||||||
|
event: str = 'COMMENT',
|
||||||
|
comments: Optional[List[Dict]] = None,
|
||||||
|
repo: Optional[str] = None
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Create a review on a pull request (async wrapper with branch check).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pr_number: Pull request number
|
||||||
|
body: Review body/summary
|
||||||
|
event: Review action (APPROVE, REQUEST_CHANGES, COMMENT)
|
||||||
|
comments: Optional list of inline comments
|
||||||
|
repo: Override configured repo (for PMO multi-repo)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Created review dictionary
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
PermissionError: If operation not allowed on current branch
|
||||||
|
"""
|
||||||
|
if not self._check_branch_permissions('create_pr_review'):
|
||||||
|
branch = self._get_current_branch()
|
||||||
|
raise PermissionError(
|
||||||
|
f"Cannot create PR review on branch '{branch}'. "
|
||||||
|
f"Switch to a development branch to review PRs."
|
||||||
|
)
|
||||||
|
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
return await loop.run_in_executor(
|
||||||
|
None,
|
||||||
|
lambda: self.gitea.create_pr_review(pr_number, body, event, comments, repo)
|
||||||
|
)
|
||||||
|
|
||||||
|
async def add_pr_comment(
|
||||||
|
self,
|
||||||
|
pr_number: int,
|
||||||
|
body: str,
|
||||||
|
repo: Optional[str] = None
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Add a general comment to a pull request (async wrapper with branch check).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pr_number: Pull request number
|
||||||
|
body: Comment text
|
||||||
|
repo: Override configured repo (for PMO multi-repo)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Created comment dictionary
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
PermissionError: If operation not allowed on current branch
|
||||||
|
"""
|
||||||
|
if not self._check_branch_permissions('add_pr_comment'):
|
||||||
|
branch = self._get_current_branch()
|
||||||
|
raise PermissionError(
|
||||||
|
f"Cannot add PR comment on branch '{branch}'. "
|
||||||
|
f"Switch to a development or staging branch to comment on PRs."
|
||||||
|
)
|
||||||
|
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
return await loop.run_in_executor(
|
||||||
|
None,
|
||||||
|
lambda: self.gitea.add_pr_comment(pr_number, body, repo)
|
||||||
|
)
|
||||||
|
|
||||||
|
async def create_pull_request(
|
||||||
|
self,
|
||||||
|
title: str,
|
||||||
|
body: str,
|
||||||
|
head: str,
|
||||||
|
base: str,
|
||||||
|
labels: Optional[List[str]] = None,
|
||||||
|
repo: Optional[str] = None
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Create a new pull request (async wrapper with branch check).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
title: PR title
|
||||||
|
body: PR description/body
|
||||||
|
head: Source branch name (the branch with changes)
|
||||||
|
base: Target branch name (the branch to merge into)
|
||||||
|
labels: Optional list of label names
|
||||||
|
repo: Override configured repo (for PMO multi-repo)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Created pull request dictionary
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
PermissionError: If operation not allowed on current branch
|
||||||
|
"""
|
||||||
|
if not self._check_branch_permissions('create_pull_request'):
|
||||||
|
branch = self._get_current_branch()
|
||||||
|
raise PermissionError(
|
||||||
|
f"Cannot create PR on branch '{branch}'. "
|
||||||
|
f"Switch to a development or feature branch to create PRs."
|
||||||
|
)
|
||||||
|
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
return await loop.run_in_executor(
|
||||||
|
None,
|
||||||
|
lambda: self.gitea.create_pull_request(title, body, head, base, labels, repo)
|
||||||
|
)
|
||||||
@@ -4,9 +4,11 @@ Wiki management tools for MCP server.
|
|||||||
Provides async wrappers for wiki operations to support lessons learned:
|
Provides async wrappers for wiki operations to support lessons learned:
|
||||||
- Page CRUD operations
|
- Page CRUD operations
|
||||||
- Lessons learned creation and search
|
- Lessons learned creation and search
|
||||||
|
- RFC number allocation
|
||||||
"""
|
"""
|
||||||
import asyncio
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
|
import re
|
||||||
from typing import List, Dict, Optional
|
from typing import List, Dict, Optional
|
||||||
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
logging.basicConfig(level=logging.INFO)
|
||||||
@@ -147,3 +149,39 @@ class WikiTools:
|
|||||||
lambda: self.gitea.search_lessons(query, tags, repo)
|
lambda: self.gitea.search_lessons(query, tags, repo)
|
||||||
)
|
)
|
||||||
return results[:limit]
|
return results[:limit]
|
||||||
|
|
||||||
|
async def allocate_rfc_number(self, repo: Optional[str] = None) -> Dict:
|
||||||
|
"""
|
||||||
|
Allocate the next available RFC number.
|
||||||
|
|
||||||
|
Scans existing wiki pages for RFC-NNNN pattern and returns
|
||||||
|
the next sequential number.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
repo: Repository in owner/repo format
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with 'next_number' (int) and 'formatted' (str like 'RFC-0001')
|
||||||
|
"""
|
||||||
|
pages = await self.list_wiki_pages(repo)
|
||||||
|
|
||||||
|
# Extract RFC numbers from page titles
|
||||||
|
rfc_numbers = []
|
||||||
|
rfc_pattern = re.compile(r'^RFC-(\d{4})')
|
||||||
|
|
||||||
|
for page in pages:
|
||||||
|
title = page.get('title', '')
|
||||||
|
match = rfc_pattern.match(title)
|
||||||
|
if match:
|
||||||
|
rfc_numbers.append(int(match.group(1)))
|
||||||
|
|
||||||
|
# Calculate next number
|
||||||
|
if rfc_numbers:
|
||||||
|
next_num = max(rfc_numbers) + 1
|
||||||
|
else:
|
||||||
|
next_num = 1
|
||||||
|
|
||||||
|
return {
|
||||||
|
'next_number': next_num,
|
||||||
|
'formatted': f'RFC-{next_num:04d}'
|
||||||
|
}
|
||||||
21
mcp-servers/gitea/run.sh
Executable file
21
mcp-servers/gitea/run.sh
Executable file
@@ -0,0 +1,21 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Capture original working directory before any cd operations
|
||||||
|
# This should be the user's project directory when launched by Claude Code
|
||||||
|
export CLAUDE_PROJECT_DIR="${CLAUDE_PROJECT_DIR:-$PWD}"
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
CACHE_VENV="$HOME/.cache/claude-mcp-venvs/leo-claude-mktplace/gitea/.venv"
|
||||||
|
LOCAL_VENV="$SCRIPT_DIR/.venv"
|
||||||
|
|
||||||
|
if [[ -f "$CACHE_VENV/bin/python" ]]; then
|
||||||
|
PYTHON="$CACHE_VENV/bin/python"
|
||||||
|
elif [[ -f "$LOCAL_VENV/bin/python" ]]; then
|
||||||
|
PYTHON="$LOCAL_VENV/bin/python"
|
||||||
|
else
|
||||||
|
echo "ERROR: No venv found. Run: ./scripts/setup-venvs.sh" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd "$SCRIPT_DIR"
|
||||||
|
export PYTHONPATH="$SCRIPT_DIR"
|
||||||
|
exec "$PYTHON" -m mcp_server.server "$@"
|
||||||
@@ -149,3 +149,112 @@ def test_mode_detection_company(tmp_path, monkeypatch):
|
|||||||
|
|
||||||
assert result['mode'] == 'company'
|
assert result['mode'] == 'company'
|
||||||
assert result['repo'] is None
|
assert result['repo'] is None
|
||||||
|
|
||||||
|
|
||||||
|
# ========================================
|
||||||
|
# GIT URL PARSING TESTS
|
||||||
|
# ========================================
|
||||||
|
|
||||||
|
def test_parse_git_url_ssh_format():
|
||||||
|
"""Test parsing SSH format git URL"""
|
||||||
|
config = GiteaConfig()
|
||||||
|
|
||||||
|
# SSH with port: ssh://git@host:port/owner/repo.git
|
||||||
|
url = "ssh://git@hotserv.tailc9b278.ts.net:2222/personal-projects/personal-portfolio.git"
|
||||||
|
result = config._parse_git_url(url)
|
||||||
|
assert result == "personal-projects/personal-portfolio"
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_git_url_ssh_short_format():
|
||||||
|
"""Test parsing SSH short format git URL"""
|
||||||
|
config = GiteaConfig()
|
||||||
|
|
||||||
|
# SSH short: git@host:owner/repo.git
|
||||||
|
url = "git@github.com:owner/repo.git"
|
||||||
|
result = config._parse_git_url(url)
|
||||||
|
assert result == "owner/repo"
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_git_url_https_format():
|
||||||
|
"""Test parsing HTTPS format git URL"""
|
||||||
|
config = GiteaConfig()
|
||||||
|
|
||||||
|
# HTTPS: https://host/owner/repo.git
|
||||||
|
url = "https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace.git"
|
||||||
|
result = config._parse_git_url(url)
|
||||||
|
assert result == "personal-projects/leo-claude-mktplace"
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_git_url_http_format():
|
||||||
|
"""Test parsing HTTP format git URL"""
|
||||||
|
config = GiteaConfig()
|
||||||
|
|
||||||
|
# HTTP: http://host/owner/repo.git
|
||||||
|
url = "http://gitea.hotserv.cloud/personal-projects/repo.git"
|
||||||
|
result = config._parse_git_url(url)
|
||||||
|
assert result == "personal-projects/repo"
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_git_url_without_git_suffix():
|
||||||
|
"""Test parsing git URL without .git suffix"""
|
||||||
|
config = GiteaConfig()
|
||||||
|
|
||||||
|
url = "https://github.com/owner/repo"
|
||||||
|
result = config._parse_git_url(url)
|
||||||
|
assert result == "owner/repo"
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_git_url_invalid_format():
|
||||||
|
"""Test parsing invalid git URL returns None"""
|
||||||
|
config = GiteaConfig()
|
||||||
|
|
||||||
|
url = "not-a-valid-url"
|
||||||
|
result = config._parse_git_url(url)
|
||||||
|
assert result is None
|
||||||
|
|
||||||
|
|
||||||
|
def test_find_project_directory_from_env(tmp_path, monkeypatch):
|
||||||
|
"""Test finding project directory from CLAUDE_PROJECT_DIR env var"""
|
||||||
|
project_dir = tmp_path / 'my-project'
|
||||||
|
project_dir.mkdir()
|
||||||
|
(project_dir / '.git').mkdir()
|
||||||
|
|
||||||
|
monkeypatch.setenv('CLAUDE_PROJECT_DIR', str(project_dir))
|
||||||
|
|
||||||
|
config = GiteaConfig()
|
||||||
|
result = config._find_project_directory()
|
||||||
|
|
||||||
|
assert result == project_dir
|
||||||
|
|
||||||
|
|
||||||
|
def test_find_project_directory_from_cwd(tmp_path, monkeypatch):
|
||||||
|
"""Test finding project directory from cwd with .env file"""
|
||||||
|
project_dir = tmp_path / 'project'
|
||||||
|
project_dir.mkdir()
|
||||||
|
(project_dir / '.env').write_text("GITEA_REPO=test/repo")
|
||||||
|
|
||||||
|
monkeypatch.chdir(project_dir)
|
||||||
|
# Clear env vars that might interfere
|
||||||
|
monkeypatch.delenv('CLAUDE_PROJECT_DIR', raising=False)
|
||||||
|
monkeypatch.delenv('PWD', raising=False)
|
||||||
|
|
||||||
|
config = GiteaConfig()
|
||||||
|
result = config._find_project_directory()
|
||||||
|
|
||||||
|
assert result == project_dir
|
||||||
|
|
||||||
|
|
||||||
|
def test_find_project_directory_none_when_no_markers(tmp_path, monkeypatch):
|
||||||
|
"""Test returns None when no project markers found"""
|
||||||
|
empty_dir = tmp_path / 'empty'
|
||||||
|
empty_dir.mkdir()
|
||||||
|
|
||||||
|
monkeypatch.chdir(empty_dir)
|
||||||
|
monkeypatch.delenv('CLAUDE_PROJECT_DIR', raising=False)
|
||||||
|
monkeypatch.delenv('PWD', raising=False)
|
||||||
|
monkeypatch.delenv('GITEA_REPO', raising=False)
|
||||||
|
|
||||||
|
config = GiteaConfig()
|
||||||
|
result = config._find_project_directory()
|
||||||
|
|
||||||
|
assert result is None
|
||||||
@@ -222,3 +222,47 @@ def test_no_repo_specified_error(gitea_client):
|
|||||||
client.list_issues()
|
client.list_issues()
|
||||||
|
|
||||||
assert "Repository not specified" in str(exc_info.value)
|
assert "Repository not specified" in str(exc_info.value)
|
||||||
|
|
||||||
|
|
||||||
|
# ========================================
|
||||||
|
# ORGANIZATION DETECTION TESTS
|
||||||
|
# ========================================
|
||||||
|
|
||||||
|
def test_is_organization_true(gitea_client):
|
||||||
|
"""Test _is_organization returns True for valid organization"""
|
||||||
|
mock_response = Mock()
|
||||||
|
mock_response.status_code = 200
|
||||||
|
|
||||||
|
with patch.object(gitea_client.session, 'get', return_value=mock_response):
|
||||||
|
result = gitea_client._is_organization('personal-projects')
|
||||||
|
|
||||||
|
assert result is True
|
||||||
|
gitea_client.session.get.assert_called_once_with(
|
||||||
|
'https://test.com/api/v1/orgs/personal-projects'
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_is_organization_false(gitea_client):
|
||||||
|
"""Test _is_organization returns False for user account"""
|
||||||
|
mock_response = Mock()
|
||||||
|
mock_response.status_code = 404
|
||||||
|
|
||||||
|
with patch.object(gitea_client.session, 'get', return_value=mock_response):
|
||||||
|
result = gitea_client._is_organization('lmiranda')
|
||||||
|
|
||||||
|
assert result is False
|
||||||
|
|
||||||
|
|
||||||
|
def test_is_org_repo_uses_orgs_endpoint(gitea_client):
|
||||||
|
"""Test is_org_repo uses /orgs endpoint instead of owner.type"""
|
||||||
|
mock_response = Mock()
|
||||||
|
mock_response.status_code = 200
|
||||||
|
|
||||||
|
with patch.object(gitea_client.session, 'get', return_value=mock_response):
|
||||||
|
result = gitea_client.is_org_repo('personal-projects/repo')
|
||||||
|
|
||||||
|
assert result is True
|
||||||
|
# Should call /orgs/personal-projects, not /repos/.../
|
||||||
|
gitea_client.session.get.assert_called_once_with(
|
||||||
|
'https://test.com/api/v1/orgs/personal-projects'
|
||||||
|
)
|
||||||
478
mcp-servers/gitea/tests/test_labels.py
Normal file
478
mcp-servers/gitea/tests/test_labels.py
Normal file
@@ -0,0 +1,478 @@
|
|||||||
|
"""
|
||||||
|
Unit tests for label tools with suggestion logic.
|
||||||
|
"""
|
||||||
|
import pytest
|
||||||
|
from unittest.mock import Mock, patch
|
||||||
|
from mcp_server.tools.labels import LabelTools
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def mock_gitea_client():
|
||||||
|
"""Fixture providing mocked Gitea client"""
|
||||||
|
client = Mock()
|
||||||
|
client.repo = 'test_org/test_repo'
|
||||||
|
client.is_org_repo = Mock(return_value=True)
|
||||||
|
return client
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def label_tools(mock_gitea_client):
|
||||||
|
"""Fixture providing LabelTools instance"""
|
||||||
|
return LabelTools(mock_gitea_client)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_get_labels(label_tools):
|
||||||
|
"""Test getting all labels (org + repo)"""
|
||||||
|
label_tools.gitea.get_org_labels = Mock(return_value=[
|
||||||
|
{'name': 'Type/Bug'},
|
||||||
|
{'name': 'Type/Feature'}
|
||||||
|
])
|
||||||
|
label_tools.gitea.get_labels = Mock(return_value=[
|
||||||
|
{'name': 'Component/Backend'},
|
||||||
|
{'name': 'Component/Frontend'}
|
||||||
|
])
|
||||||
|
|
||||||
|
result = await label_tools.get_labels()
|
||||||
|
|
||||||
|
assert len(result['organization']) == 2
|
||||||
|
assert len(result['repository']) == 2
|
||||||
|
assert result['total_count'] == 4
|
||||||
|
|
||||||
|
|
||||||
|
# ========================================
|
||||||
|
# LABEL LOOKUP TESTS (NEW)
|
||||||
|
# ========================================
|
||||||
|
|
||||||
|
def test_build_label_lookup_slash_format():
|
||||||
|
"""Test building label lookup with slash format labels"""
|
||||||
|
mock_client = Mock()
|
||||||
|
mock_client.repo = 'test/repo'
|
||||||
|
tools = LabelTools(mock_client)
|
||||||
|
|
||||||
|
labels = ['Type/Bug', 'Type/Feature', 'Priority/High', 'Priority/Low']
|
||||||
|
lookup = tools._build_label_lookup(labels)
|
||||||
|
|
||||||
|
assert 'type' in lookup
|
||||||
|
assert 'bug' in lookup['type']
|
||||||
|
assert lookup['type']['bug'] == 'Type/Bug'
|
||||||
|
assert lookup['type']['feature'] == 'Type/Feature'
|
||||||
|
assert 'priority' in lookup
|
||||||
|
assert lookup['priority']['high'] == 'Priority/High'
|
||||||
|
|
||||||
|
|
||||||
|
def test_build_label_lookup_colon_space_format():
|
||||||
|
"""Test building label lookup with colon-space format labels"""
|
||||||
|
mock_client = Mock()
|
||||||
|
mock_client.repo = 'test/repo'
|
||||||
|
tools = LabelTools(mock_client)
|
||||||
|
|
||||||
|
labels = ['Type: Bug', 'Type: Feature', 'Priority: High', 'Effort: M']
|
||||||
|
lookup = tools._build_label_lookup(labels)
|
||||||
|
|
||||||
|
assert 'type' in lookup
|
||||||
|
assert 'bug' in lookup['type']
|
||||||
|
assert lookup['type']['bug'] == 'Type: Bug'
|
||||||
|
assert lookup['type']['feature'] == 'Type: Feature'
|
||||||
|
assert 'priority' in lookup
|
||||||
|
assert lookup['priority']['high'] == 'Priority: High'
|
||||||
|
# Test singular "Effort" (not "Efforts")
|
||||||
|
assert 'effort' in lookup
|
||||||
|
assert lookup['effort']['m'] == 'Effort: M'
|
||||||
|
|
||||||
|
|
||||||
|
def test_build_label_lookup_efforts_normalization():
|
||||||
|
"""Test that 'Efforts' is normalized to 'effort' for matching"""
|
||||||
|
mock_client = Mock()
|
||||||
|
mock_client.repo = 'test/repo'
|
||||||
|
tools = LabelTools(mock_client)
|
||||||
|
|
||||||
|
labels = ['Efforts/XS', 'Efforts/S', 'Efforts/M']
|
||||||
|
lookup = tools._build_label_lookup(labels)
|
||||||
|
|
||||||
|
# 'Efforts' should be normalized to 'effort'
|
||||||
|
assert 'effort' in lookup
|
||||||
|
assert lookup['effort']['xs'] == 'Efforts/XS'
|
||||||
|
|
||||||
|
|
||||||
|
def test_find_label():
|
||||||
|
"""Test finding labels from lookup"""
|
||||||
|
mock_client = Mock()
|
||||||
|
mock_client.repo = 'test/repo'
|
||||||
|
tools = LabelTools(mock_client)
|
||||||
|
|
||||||
|
lookup = {
|
||||||
|
'type': {'bug': 'Type: Bug', 'feature': 'Type: Feature'},
|
||||||
|
'priority': {'high': 'Priority: High', 'low': 'Priority: Low'}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert tools._find_label(lookup, 'type', 'bug') == 'Type: Bug'
|
||||||
|
assert tools._find_label(lookup, 'priority', 'high') == 'Priority: High'
|
||||||
|
assert tools._find_label(lookup, 'type', 'nonexistent') is None
|
||||||
|
assert tools._find_label(lookup, 'nonexistent', 'bug') is None
|
||||||
|
|
||||||
|
|
||||||
|
# ========================================
|
||||||
|
# SUGGEST LABELS WITH DYNAMIC FORMAT TESTS
|
||||||
|
# ========================================
|
||||||
|
|
||||||
|
def _create_tools_with_labels(labels):
|
||||||
|
"""Helper to create LabelTools with mocked labels"""
|
||||||
|
import asyncio
|
||||||
|
mock_client = Mock()
|
||||||
|
mock_client.repo = 'test/repo'
|
||||||
|
mock_client.is_org_repo = Mock(return_value=False)
|
||||||
|
mock_client.get_labels = Mock(return_value=[{'name': l} for l in labels])
|
||||||
|
return LabelTools(mock_client)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_suggest_labels_with_slash_format():
|
||||||
|
"""Test label suggestion with slash format labels"""
|
||||||
|
labels = [
|
||||||
|
'Type/Bug', 'Type/Feature', 'Type/Refactor',
|
||||||
|
'Priority/Critical', 'Priority/High', 'Priority/Medium', 'Priority/Low',
|
||||||
|
'Complexity/Simple', 'Complexity/Medium', 'Complexity/Complex',
|
||||||
|
'Component/Auth'
|
||||||
|
]
|
||||||
|
tools = _create_tools_with_labels(labels)
|
||||||
|
|
||||||
|
context = "Fix critical bug in login authentication"
|
||||||
|
suggestions = await tools.suggest_labels(context)
|
||||||
|
|
||||||
|
assert 'Type/Bug' in suggestions
|
||||||
|
assert 'Priority/Critical' in suggestions
|
||||||
|
assert 'Component/Auth' in suggestions
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_suggest_labels_with_colon_space_format():
|
||||||
|
"""Test label suggestion with colon-space format labels"""
|
||||||
|
labels = [
|
||||||
|
'Type: Bug', 'Type: Feature', 'Type: Refactor',
|
||||||
|
'Priority: Critical', 'Priority: High', 'Priority: Medium', 'Priority: Low',
|
||||||
|
'Complexity: Simple', 'Complexity: Medium', 'Complexity: Complex',
|
||||||
|
'Effort: XS', 'Effort: S', 'Effort: M', 'Effort: L', 'Effort: XL'
|
||||||
|
]
|
||||||
|
tools = _create_tools_with_labels(labels)
|
||||||
|
|
||||||
|
context = "Fix critical bug for tiny 1 hour fix"
|
||||||
|
suggestions = await tools.suggest_labels(context)
|
||||||
|
|
||||||
|
# Should return colon-space format labels
|
||||||
|
assert 'Type: Bug' in suggestions
|
||||||
|
assert 'Priority: Critical' in suggestions
|
||||||
|
assert 'Effort: XS' in suggestions
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_suggest_labels_bug():
|
||||||
|
"""Test label suggestion for bug context"""
|
||||||
|
labels = [
|
||||||
|
'Type/Bug', 'Type/Feature',
|
||||||
|
'Priority/Critical', 'Priority/High', 'Priority/Medium', 'Priority/Low',
|
||||||
|
'Complexity/Simple', 'Complexity/Medium', 'Complexity/Complex',
|
||||||
|
'Component/Auth'
|
||||||
|
]
|
||||||
|
tools = _create_tools_with_labels(labels)
|
||||||
|
|
||||||
|
context = "Fix critical bug in login authentication"
|
||||||
|
suggestions = await tools.suggest_labels(context)
|
||||||
|
|
||||||
|
assert 'Type/Bug' in suggestions
|
||||||
|
assert 'Priority/Critical' in suggestions
|
||||||
|
assert 'Component/Auth' in suggestions
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_suggest_labels_feature():
|
||||||
|
"""Test label suggestion for feature context"""
|
||||||
|
labels = ['Type/Feature', 'Priority/Medium', 'Complexity/Medium']
|
||||||
|
tools = _create_tools_with_labels(labels)
|
||||||
|
|
||||||
|
context = "Add new feature to implement user dashboard"
|
||||||
|
suggestions = await tools.suggest_labels(context)
|
||||||
|
|
||||||
|
assert 'Type/Feature' in suggestions
|
||||||
|
assert any('Priority' in label for label in suggestions)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_suggest_labels_refactor():
|
||||||
|
"""Test label suggestion for refactor context"""
|
||||||
|
labels = ['Type/Refactor', 'Priority/Medium', 'Complexity/Medium', 'Component/Backend']
|
||||||
|
tools = _create_tools_with_labels(labels)
|
||||||
|
|
||||||
|
context = "Refactor architecture to extract service layer"
|
||||||
|
suggestions = await tools.suggest_labels(context)
|
||||||
|
|
||||||
|
assert 'Type/Refactor' in suggestions
|
||||||
|
assert 'Component/Backend' in suggestions
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_suggest_labels_documentation():
|
||||||
|
"""Test label suggestion for documentation context"""
|
||||||
|
labels = ['Type/Documentation', 'Priority/Medium', 'Complexity/Medium', 'Component/API', 'Component/Docs']
|
||||||
|
tools = _create_tools_with_labels(labels)
|
||||||
|
|
||||||
|
context = "Update documentation for API endpoints"
|
||||||
|
suggestions = await tools.suggest_labels(context)
|
||||||
|
|
||||||
|
assert 'Type/Documentation' in suggestions
|
||||||
|
assert 'Component/API' in suggestions or 'Component/Docs' in suggestions
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_suggest_labels_priority():
|
||||||
|
"""Test priority detection in suggestions"""
|
||||||
|
labels = ['Type/Feature', 'Priority/Critical', 'Priority/High', 'Priority/Medium', 'Priority/Low', 'Complexity/Medium']
|
||||||
|
tools = _create_tools_with_labels(labels)
|
||||||
|
|
||||||
|
# Critical priority
|
||||||
|
context = "Urgent blocker in production"
|
||||||
|
suggestions = await tools.suggest_labels(context)
|
||||||
|
assert 'Priority/Critical' in suggestions
|
||||||
|
|
||||||
|
# High priority
|
||||||
|
context = "Important feature needed asap"
|
||||||
|
suggestions = await tools.suggest_labels(context)
|
||||||
|
assert 'Priority/High' in suggestions
|
||||||
|
|
||||||
|
# Low priority
|
||||||
|
context = "Nice-to-have optional improvement"
|
||||||
|
suggestions = await tools.suggest_labels(context)
|
||||||
|
assert 'Priority/Low' in suggestions
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_suggest_labels_complexity():
|
||||||
|
"""Test complexity detection in suggestions"""
|
||||||
|
labels = ['Type/Feature', 'Priority/Medium', 'Complexity/Simple', 'Complexity/Medium', 'Complexity/Complex']
|
||||||
|
tools = _create_tools_with_labels(labels)
|
||||||
|
|
||||||
|
# Simple complexity
|
||||||
|
context = "Simple quick fix for typo"
|
||||||
|
suggestions = await tools.suggest_labels(context)
|
||||||
|
assert 'Complexity/Simple' in suggestions
|
||||||
|
|
||||||
|
# Complex complexity
|
||||||
|
context = "Complex challenging architecture redesign"
|
||||||
|
suggestions = await tools.suggest_labels(context)
|
||||||
|
assert 'Complexity/Complex' in suggestions
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_suggest_labels_efforts():
|
||||||
|
"""Test efforts detection in suggestions"""
|
||||||
|
labels = ['Type/Feature', 'Priority/Medium', 'Complexity/Medium', 'Efforts/XS', 'Efforts/S', 'Efforts/M', 'Efforts/L', 'Efforts/XL']
|
||||||
|
tools = _create_tools_with_labels(labels)
|
||||||
|
|
||||||
|
# XS effort
|
||||||
|
context = "Tiny fix that takes 1 hour"
|
||||||
|
suggestions = await tools.suggest_labels(context)
|
||||||
|
assert 'Efforts/XS' in suggestions
|
||||||
|
|
||||||
|
# L effort
|
||||||
|
context = "Large feature taking 1 week"
|
||||||
|
suggestions = await tools.suggest_labels(context)
|
||||||
|
assert 'Efforts/L' in suggestions
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_suggest_labels_components():
|
||||||
|
"""Test component detection in suggestions"""
|
||||||
|
labels = ['Type/Feature', 'Priority/Medium', 'Complexity/Medium', 'Component/Backend', 'Component/Frontend', 'Component/API', 'Component/Database']
|
||||||
|
tools = _create_tools_with_labels(labels)
|
||||||
|
|
||||||
|
# Backend component
|
||||||
|
context = "Update backend API service"
|
||||||
|
suggestions = await tools.suggest_labels(context)
|
||||||
|
assert 'Component/Backend' in suggestions
|
||||||
|
assert 'Component/API' in suggestions
|
||||||
|
|
||||||
|
# Frontend component
|
||||||
|
context = "Fix frontend UI component"
|
||||||
|
suggestions = await tools.suggest_labels(context)
|
||||||
|
assert 'Component/Frontend' in suggestions
|
||||||
|
|
||||||
|
# Database component
|
||||||
|
context = "Add database migration for schema"
|
||||||
|
suggestions = await tools.suggest_labels(context)
|
||||||
|
assert 'Component/Database' in suggestions
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_suggest_labels_tech_stack():
|
||||||
|
"""Test tech stack detection in suggestions"""
|
||||||
|
labels = ['Type/Feature', 'Priority/Medium', 'Complexity/Medium', 'Tech/Python', 'Tech/FastAPI', 'Tech/Docker', 'Tech/PostgreSQL']
|
||||||
|
tools = _create_tools_with_labels(labels)
|
||||||
|
|
||||||
|
# Python
|
||||||
|
context = "Update Python FastAPI endpoint"
|
||||||
|
suggestions = await tools.suggest_labels(context)
|
||||||
|
assert 'Tech/Python' in suggestions
|
||||||
|
assert 'Tech/FastAPI' in suggestions
|
||||||
|
|
||||||
|
# Docker
|
||||||
|
context = "Fix Dockerfile configuration"
|
||||||
|
suggestions = await tools.suggest_labels(context)
|
||||||
|
assert 'Tech/Docker' in suggestions
|
||||||
|
|
||||||
|
# PostgreSQL
|
||||||
|
context = "Optimize PostgreSQL query"
|
||||||
|
suggestions = await tools.suggest_labels(context)
|
||||||
|
assert 'Tech/PostgreSQL' in suggestions
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_suggest_labels_source():
|
||||||
|
"""Test source detection in suggestions"""
|
||||||
|
labels = ['Type/Feature', 'Priority/Medium', 'Complexity/Medium', 'Source/Development', 'Source/Staging', 'Source/Production']
|
||||||
|
tools = _create_tools_with_labels(labels)
|
||||||
|
|
||||||
|
# Development
|
||||||
|
context = "Issue found in development environment"
|
||||||
|
suggestions = await tools.suggest_labels(context)
|
||||||
|
assert 'Source/Development' in suggestions
|
||||||
|
|
||||||
|
# Production
|
||||||
|
context = "Critical production issue"
|
||||||
|
suggestions = await tools.suggest_labels(context)
|
||||||
|
assert 'Source/Production' in suggestions
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_suggest_labels_risk():
|
||||||
|
"""Test risk detection in suggestions"""
|
||||||
|
labels = ['Type/Feature', 'Priority/Medium', 'Complexity/Medium', 'Risk/High', 'Risk/Low']
|
||||||
|
tools = _create_tools_with_labels(labels)
|
||||||
|
|
||||||
|
# High risk
|
||||||
|
context = "Breaking change to major API"
|
||||||
|
suggestions = await tools.suggest_labels(context)
|
||||||
|
assert 'Risk/High' in suggestions
|
||||||
|
|
||||||
|
# Low risk
|
||||||
|
context = "Safe minor update with low risk"
|
||||||
|
suggestions = await tools.suggest_labels(context)
|
||||||
|
assert 'Risk/Low' in suggestions
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_suggest_labels_multiple_categories():
|
||||||
|
"""Test that suggestions span multiple categories"""
|
||||||
|
labels = [
|
||||||
|
'Type/Bug', 'Type/Feature',
|
||||||
|
'Priority/Critical', 'Priority/Medium',
|
||||||
|
'Complexity/Complex', 'Complexity/Medium',
|
||||||
|
'Component/Backend', 'Component/API', 'Component/Auth',
|
||||||
|
'Tech/FastAPI', 'Tech/PostgreSQL',
|
||||||
|
'Source/Production'
|
||||||
|
]
|
||||||
|
tools = _create_tools_with_labels(labels)
|
||||||
|
|
||||||
|
context = """
|
||||||
|
Urgent critical bug in production backend API service.
|
||||||
|
Need to fix broken authentication endpoint.
|
||||||
|
This is a complex issue requiring FastAPI and PostgreSQL expertise.
|
||||||
|
"""
|
||||||
|
|
||||||
|
suggestions = await tools.suggest_labels(context)
|
||||||
|
|
||||||
|
# Should have Type
|
||||||
|
assert any('Type/' in label for label in suggestions)
|
||||||
|
|
||||||
|
# Should have Priority
|
||||||
|
assert any('Priority/' in label for label in suggestions)
|
||||||
|
|
||||||
|
# Should have Component
|
||||||
|
assert any('Component/' in label for label in suggestions)
|
||||||
|
|
||||||
|
# Should have Tech
|
||||||
|
assert any('Tech/' in label for label in suggestions)
|
||||||
|
|
||||||
|
# Should have Source
|
||||||
|
assert any('Source/' in label for label in suggestions)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_suggest_labels_empty_repo():
|
||||||
|
"""Test suggestions when no repo specified and no labels available"""
|
||||||
|
mock_client = Mock()
|
||||||
|
mock_client.repo = None
|
||||||
|
tools = LabelTools(mock_client)
|
||||||
|
|
||||||
|
context = "Fix a bug"
|
||||||
|
suggestions = await tools.suggest_labels(context)
|
||||||
|
|
||||||
|
# Should return empty list when no repo
|
||||||
|
assert suggestions == []
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_suggest_labels_no_matching_labels():
|
||||||
|
"""Test suggestions return empty when no matching labels exist"""
|
||||||
|
labels = ['Custom/Label', 'Other/Thing'] # No standard labels
|
||||||
|
tools = _create_tools_with_labels(labels)
|
||||||
|
|
||||||
|
context = "Fix a bug"
|
||||||
|
suggestions = await tools.suggest_labels(context)
|
||||||
|
|
||||||
|
# Should return empty list since no Type/Bug or similar exists
|
||||||
|
assert len(suggestions) == 0
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_get_labels_org_owned_repo():
|
||||||
|
"""Test getting labels for organization-owned repository"""
|
||||||
|
mock_client = Mock()
|
||||||
|
mock_client.repo = 'myorg/myrepo'
|
||||||
|
mock_client.is_org_repo = Mock(return_value=True)
|
||||||
|
mock_client.get_org_labels = Mock(return_value=[
|
||||||
|
{'name': 'Type/Bug', 'id': 1},
|
||||||
|
{'name': 'Type/Feature', 'id': 2}
|
||||||
|
])
|
||||||
|
mock_client.get_labels = Mock(return_value=[
|
||||||
|
{'name': 'Component/Backend', 'id': 3}
|
||||||
|
])
|
||||||
|
|
||||||
|
tools = LabelTools(mock_client)
|
||||||
|
result = await tools.get_labels()
|
||||||
|
|
||||||
|
# Should fetch both org and repo labels
|
||||||
|
mock_client.is_org_repo.assert_called_once_with('myorg/myrepo')
|
||||||
|
mock_client.get_org_labels.assert_called_once_with('myorg')
|
||||||
|
mock_client.get_labels.assert_called_once_with('myorg/myrepo')
|
||||||
|
|
||||||
|
assert len(result['organization']) == 2
|
||||||
|
assert len(result['repository']) == 1
|
||||||
|
assert result['total_count'] == 3
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_get_labels_user_owned_repo():
|
||||||
|
"""Test getting labels for user-owned repository (no org labels)"""
|
||||||
|
mock_client = Mock()
|
||||||
|
mock_client.repo = 'lmiranda/personal-portfolio'
|
||||||
|
mock_client.is_org_repo = Mock(return_value=False)
|
||||||
|
mock_client.get_labels = Mock(return_value=[
|
||||||
|
{'name': 'bug', 'id': 1},
|
||||||
|
{'name': 'enhancement', 'id': 2}
|
||||||
|
])
|
||||||
|
|
||||||
|
tools = LabelTools(mock_client)
|
||||||
|
result = await tools.get_labels()
|
||||||
|
|
||||||
|
# Should check if org repo
|
||||||
|
mock_client.is_org_repo.assert_called_once_with('lmiranda/personal-portfolio')
|
||||||
|
|
||||||
|
# Should NOT call get_org_labels for user-owned repos
|
||||||
|
mock_client.get_org_labels.assert_not_called()
|
||||||
|
|
||||||
|
# Should still get repo labels
|
||||||
|
mock_client.get_labels.assert_called_once_with('lmiranda/personal-portfolio')
|
||||||
|
|
||||||
|
assert len(result['organization']) == 0
|
||||||
|
assert len(result['repository']) == 2
|
||||||
|
assert result['total_count'] == 2
|
||||||
@@ -294,4 +294,4 @@ logging.basicConfig(level=logging.DEBUG)
|
|||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
MIT License - Part of the Claude Code Marketplace (`support-claude-mktplace`).
|
MIT License - Part of the Leo Claude Marketplace.
|
||||||
@@ -4,6 +4,7 @@ NetBox API client for interacting with NetBox REST API.
|
|||||||
Provides a generic HTTP client with methods for all standard REST operations.
|
Provides a generic HTTP client with methods for all standard REST operations.
|
||||||
Individual tool modules use this client for their specific endpoints.
|
Individual tool modules use this client for their specific endpoints.
|
||||||
"""
|
"""
|
||||||
|
import json
|
||||||
import requests
|
import requests
|
||||||
import logging
|
import logging
|
||||||
from typing import List, Dict, Optional, Any, Union
|
from typing import List, Dict, Optional, Any, Union
|
||||||
@@ -83,7 +84,20 @@ class NetBoxClient:
|
|||||||
if response.status_code == 204 or not response.content:
|
if response.status_code == 204 or not response.content:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
return response.json()
|
# Parse JSON with diagnostic error handling
|
||||||
|
try:
|
||||||
|
return response.json()
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
logger.error(
|
||||||
|
f"JSON decode failed. Status: {response.status_code}, "
|
||||||
|
f"Content-Length: {len(response.content)}, "
|
||||||
|
f"Content preview: {response.content[:200]!r}"
|
||||||
|
)
|
||||||
|
raise ValueError(
|
||||||
|
f"Invalid JSON response from NetBox: {e}. "
|
||||||
|
f"Status code: {response.status_code}, "
|
||||||
|
f"Content length: {len(response.content)} bytes"
|
||||||
|
) from e
|
||||||
|
|
||||||
def list(
|
def list(
|
||||||
self,
|
self,
|
||||||
@@ -103,7 +103,19 @@ TOOL_DEFINITIONS = {
|
|||||||
'properties': {
|
'properties': {
|
||||||
'id': {'type': 'integer', 'description': 'Site ID'},
|
'id': {'type': 'integer', 'description': 'Site ID'},
|
||||||
'name': {'type': 'string', 'description': 'New name'},
|
'name': {'type': 'string', 'description': 'New name'},
|
||||||
'status': {'type': 'string', 'description': 'New status'}
|
'slug': {'type': 'string', 'description': 'New slug'},
|
||||||
|
'status': {'type': 'string', 'description': 'Status'},
|
||||||
|
'region': {'type': 'integer', 'description': 'Region ID'},
|
||||||
|
'group': {'type': 'integer', 'description': 'Site group ID'},
|
||||||
|
'tenant': {'type': 'integer', 'description': 'Tenant ID'},
|
||||||
|
'facility': {'type': 'string', 'description': 'Facility name'},
|
||||||
|
'time_zone': {'type': 'string', 'description': 'Time zone'},
|
||||||
|
'description': {'type': 'string', 'description': 'Description'},
|
||||||
|
'physical_address': {'type': 'string', 'description': 'Physical address'},
|
||||||
|
'shipping_address': {'type': 'string', 'description': 'Shipping address'},
|
||||||
|
'latitude': {'type': 'number', 'description': 'Latitude'},
|
||||||
|
'longitude': {'type': 'number', 'description': 'Longitude'},
|
||||||
|
'comments': {'type': 'string', 'description': 'Comments'}
|
||||||
},
|
},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
@@ -136,7 +148,14 @@ TOOL_DEFINITIONS = {
|
|||||||
},
|
},
|
||||||
'dcim_update_location': {
|
'dcim_update_location': {
|
||||||
'description': 'Update an existing location',
|
'description': 'Update an existing location',
|
||||||
'properties': {'id': {'type': 'integer', 'description': 'Location ID'}},
|
'properties': {
|
||||||
|
'id': {'type': 'integer', 'description': 'Location ID'},
|
||||||
|
'name': {'type': 'string', 'description': 'New name'},
|
||||||
|
'slug': {'type': 'string', 'description': 'New slug'},
|
||||||
|
'site': {'type': 'integer', 'description': 'Site ID'},
|
||||||
|
'parent': {'type': 'integer', 'description': 'Parent location ID'},
|
||||||
|
'description': {'type': 'string', 'description': 'Description'}
|
||||||
|
},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
'dcim_delete_location': {
|
'dcim_delete_location': {
|
||||||
@@ -171,7 +190,18 @@ TOOL_DEFINITIONS = {
|
|||||||
},
|
},
|
||||||
'dcim_update_rack': {
|
'dcim_update_rack': {
|
||||||
'description': 'Update an existing rack',
|
'description': 'Update an existing rack',
|
||||||
'properties': {'id': {'type': 'integer', 'description': 'Rack ID'}},
|
'properties': {
|
||||||
|
'id': {'type': 'integer', 'description': 'Rack ID'},
|
||||||
|
'name': {'type': 'string', 'description': 'New name'},
|
||||||
|
'site': {'type': 'integer', 'description': 'Site ID'},
|
||||||
|
'location': {'type': 'integer', 'description': 'Location ID'},
|
||||||
|
'status': {'type': 'string', 'description': 'Status'},
|
||||||
|
'role': {'type': 'integer', 'description': 'Role ID'},
|
||||||
|
'tenant': {'type': 'integer', 'description': 'Tenant ID'},
|
||||||
|
'u_height': {'type': 'integer', 'description': 'Rack height in U'},
|
||||||
|
'description': {'type': 'string', 'description': 'Description'},
|
||||||
|
'comments': {'type': 'string', 'description': 'Comments'}
|
||||||
|
},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
'dcim_delete_rack': {
|
'dcim_delete_rack': {
|
||||||
@@ -198,7 +228,12 @@ TOOL_DEFINITIONS = {
|
|||||||
},
|
},
|
||||||
'dcim_update_manufacturer': {
|
'dcim_update_manufacturer': {
|
||||||
'description': 'Update an existing manufacturer',
|
'description': 'Update an existing manufacturer',
|
||||||
'properties': {'id': {'type': 'integer', 'description': 'Manufacturer ID'}},
|
'properties': {
|
||||||
|
'id': {'type': 'integer', 'description': 'Manufacturer ID'},
|
||||||
|
'name': {'type': 'string', 'description': 'New name'},
|
||||||
|
'slug': {'type': 'string', 'description': 'New slug'},
|
||||||
|
'description': {'type': 'string', 'description': 'Description'}
|
||||||
|
},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
'dcim_delete_manufacturer': {
|
'dcim_delete_manufacturer': {
|
||||||
@@ -230,7 +265,16 @@ TOOL_DEFINITIONS = {
|
|||||||
},
|
},
|
||||||
'dcim_update_device_type': {
|
'dcim_update_device_type': {
|
||||||
'description': 'Update an existing device type',
|
'description': 'Update an existing device type',
|
||||||
'properties': {'id': {'type': 'integer', 'description': 'Device type ID'}},
|
'properties': {
|
||||||
|
'id': {'type': 'integer', 'description': 'Device type ID'},
|
||||||
|
'manufacturer': {'type': 'integer', 'description': 'Manufacturer ID'},
|
||||||
|
'model': {'type': 'string', 'description': 'Model name'},
|
||||||
|
'slug': {'type': 'string', 'description': 'New slug'},
|
||||||
|
'u_height': {'type': 'number', 'description': 'Height in rack units'},
|
||||||
|
'is_full_depth': {'type': 'boolean', 'description': 'Is full depth'},
|
||||||
|
'description': {'type': 'string', 'description': 'Description'},
|
||||||
|
'comments': {'type': 'string', 'description': 'Comments'}
|
||||||
|
},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
'dcim_delete_device_type': {
|
'dcim_delete_device_type': {
|
||||||
@@ -259,7 +303,14 @@ TOOL_DEFINITIONS = {
|
|||||||
},
|
},
|
||||||
'dcim_update_device_role': {
|
'dcim_update_device_role': {
|
||||||
'description': 'Update an existing device role',
|
'description': 'Update an existing device role',
|
||||||
'properties': {'id': {'type': 'integer', 'description': 'Device role ID'}},
|
'properties': {
|
||||||
|
'id': {'type': 'integer', 'description': 'Device role ID'},
|
||||||
|
'name': {'type': 'string', 'description': 'New name'},
|
||||||
|
'slug': {'type': 'string', 'description': 'New slug'},
|
||||||
|
'color': {'type': 'string', 'description': 'Hex color code'},
|
||||||
|
'vm_role': {'type': 'boolean', 'description': 'Can be assigned to VMs'},
|
||||||
|
'description': {'type': 'string', 'description': 'Description'}
|
||||||
|
},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
'dcim_delete_device_role': {
|
'dcim_delete_device_role': {
|
||||||
@@ -290,7 +341,13 @@ TOOL_DEFINITIONS = {
|
|||||||
},
|
},
|
||||||
'dcim_update_platform': {
|
'dcim_update_platform': {
|
||||||
'description': 'Update an existing platform',
|
'description': 'Update an existing platform',
|
||||||
'properties': {'id': {'type': 'integer', 'description': 'Platform ID'}},
|
'properties': {
|
||||||
|
'id': {'type': 'integer', 'description': 'Platform ID'},
|
||||||
|
'name': {'type': 'string', 'description': 'New name'},
|
||||||
|
'slug': {'type': 'string', 'description': 'New slug'},
|
||||||
|
'manufacturer': {'type': 'integer', 'description': 'Manufacturer ID'},
|
||||||
|
'description': {'type': 'string', 'description': 'Description'}
|
||||||
|
},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
'dcim_delete_platform': {
|
'dcim_delete_platform': {
|
||||||
@@ -326,7 +383,13 @@ TOOL_DEFINITIONS = {
|
|||||||
'status': {'type': 'string', 'description': 'Device status'},
|
'status': {'type': 'string', 'description': 'Device status'},
|
||||||
'rack': {'type': 'integer', 'description': 'Rack ID'},
|
'rack': {'type': 'integer', 'description': 'Rack ID'},
|
||||||
'position': {'type': 'number', 'description': 'Position in rack'},
|
'position': {'type': 'number', 'description': 'Position in rack'},
|
||||||
'serial': {'type': 'string', 'description': 'Serial number'}
|
'serial': {'type': 'string', 'description': 'Serial number'},
|
||||||
|
'platform': {'type': 'integer', 'description': 'Platform ID'},
|
||||||
|
'primary_ip4': {'type': 'integer', 'description': 'Primary IPv4 address ID'},
|
||||||
|
'primary_ip6': {'type': 'integer', 'description': 'Primary IPv6 address ID'},
|
||||||
|
'asset_tag': {'type': 'string', 'description': 'Asset tag'},
|
||||||
|
'description': {'type': 'string', 'description': 'Description'},
|
||||||
|
'comments': {'type': 'string', 'description': 'Comments'}
|
||||||
},
|
},
|
||||||
'required': ['name', 'device_type', 'role', 'site']
|
'required': ['name', 'device_type', 'role', 'site']
|
||||||
},
|
},
|
||||||
@@ -335,7 +398,17 @@ TOOL_DEFINITIONS = {
|
|||||||
'properties': {
|
'properties': {
|
||||||
'id': {'type': 'integer', 'description': 'Device ID'},
|
'id': {'type': 'integer', 'description': 'Device ID'},
|
||||||
'name': {'type': 'string', 'description': 'New name'},
|
'name': {'type': 'string', 'description': 'New name'},
|
||||||
'status': {'type': 'string', 'description': 'New status'}
|
'status': {'type': 'string', 'description': 'New status'},
|
||||||
|
'platform': {'type': 'integer', 'description': 'Platform ID'},
|
||||||
|
'primary_ip4': {'type': 'integer', 'description': 'Primary IPv4 address ID'},
|
||||||
|
'primary_ip6': {'type': 'integer', 'description': 'Primary IPv6 address ID'},
|
||||||
|
'serial': {'type': 'string', 'description': 'Serial number'},
|
||||||
|
'asset_tag': {'type': 'string', 'description': 'Asset tag'},
|
||||||
|
'site': {'type': 'integer', 'description': 'Site ID'},
|
||||||
|
'rack': {'type': 'integer', 'description': 'Rack ID'},
|
||||||
|
'position': {'type': 'number', 'description': 'Position in rack'},
|
||||||
|
'description': {'type': 'string', 'description': 'Description'},
|
||||||
|
'comments': {'type': 'string', 'description': 'Comments'}
|
||||||
},
|
},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
@@ -370,7 +443,18 @@ TOOL_DEFINITIONS = {
|
|||||||
},
|
},
|
||||||
'dcim_update_interface': {
|
'dcim_update_interface': {
|
||||||
'description': 'Update an existing interface',
|
'description': 'Update an existing interface',
|
||||||
'properties': {'id': {'type': 'integer', 'description': 'Interface ID'}},
|
'properties': {
|
||||||
|
'id': {'type': 'integer', 'description': 'Interface ID'},
|
||||||
|
'name': {'type': 'string', 'description': 'New name'},
|
||||||
|
'type': {'type': 'string', 'description': 'Interface type'},
|
||||||
|
'enabled': {'type': 'boolean', 'description': 'Interface enabled'},
|
||||||
|
'mtu': {'type': 'integer', 'description': 'MTU'},
|
||||||
|
'mac_address': {'type': 'string', 'description': 'MAC address'},
|
||||||
|
'description': {'type': 'string', 'description': 'Description'},
|
||||||
|
'mode': {'type': 'string', 'description': 'VLAN mode'},
|
||||||
|
'untagged_vlan': {'type': 'integer', 'description': 'Untagged VLAN ID'},
|
||||||
|
'tagged_vlans': {'type': 'array', 'description': 'Tagged VLAN IDs'}
|
||||||
|
},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
'dcim_delete_interface': {
|
'dcim_delete_interface': {
|
||||||
@@ -404,7 +488,15 @@ TOOL_DEFINITIONS = {
|
|||||||
},
|
},
|
||||||
'dcim_update_cable': {
|
'dcim_update_cable': {
|
||||||
'description': 'Update an existing cable',
|
'description': 'Update an existing cable',
|
||||||
'properties': {'id': {'type': 'integer', 'description': 'Cable ID'}},
|
'properties': {
|
||||||
|
'id': {'type': 'integer', 'description': 'Cable ID'},
|
||||||
|
'type': {'type': 'string', 'description': 'Cable type'},
|
||||||
|
'status': {'type': 'string', 'description': 'Cable status'},
|
||||||
|
'label': {'type': 'string', 'description': 'Cable label'},
|
||||||
|
'color': {'type': 'string', 'description': 'Cable color'},
|
||||||
|
'length': {'type': 'number', 'description': 'Cable length'},
|
||||||
|
'length_unit': {'type': 'string', 'description': 'Length unit'}
|
||||||
|
},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
'dcim_delete_cable': {
|
'dcim_delete_cable': {
|
||||||
@@ -492,7 +584,15 @@ TOOL_DEFINITIONS = {
|
|||||||
},
|
},
|
||||||
'ipam_update_vrf': {
|
'ipam_update_vrf': {
|
||||||
'description': 'Update an existing VRF',
|
'description': 'Update an existing VRF',
|
||||||
'properties': {'id': {'type': 'integer', 'description': 'VRF ID'}},
|
'properties': {
|
||||||
|
'id': {'type': 'integer', 'description': 'VRF ID'},
|
||||||
|
'name': {'type': 'string', 'description': 'New name'},
|
||||||
|
'rd': {'type': 'string', 'description': 'Route distinguisher'},
|
||||||
|
'tenant': {'type': 'integer', 'description': 'Tenant ID'},
|
||||||
|
'enforce_unique': {'type': 'boolean', 'description': 'Enforce unique IPs'},
|
||||||
|
'description': {'type': 'string', 'description': 'Description'},
|
||||||
|
'comments': {'type': 'string', 'description': 'Comments'}
|
||||||
|
},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
'ipam_delete_vrf': {
|
'ipam_delete_vrf': {
|
||||||
@@ -531,7 +631,19 @@ TOOL_DEFINITIONS = {
|
|||||||
},
|
},
|
||||||
'ipam_update_prefix': {
|
'ipam_update_prefix': {
|
||||||
'description': 'Update an existing prefix',
|
'description': 'Update an existing prefix',
|
||||||
'properties': {'id': {'type': 'integer', 'description': 'Prefix ID'}},
|
'properties': {
|
||||||
|
'id': {'type': 'integer', 'description': 'Prefix ID'},
|
||||||
|
'prefix': {'type': 'string', 'description': 'Prefix in CIDR notation'},
|
||||||
|
'status': {'type': 'string', 'description': 'Status'},
|
||||||
|
'site': {'type': 'integer', 'description': 'Site ID'},
|
||||||
|
'vrf': {'type': 'integer', 'description': 'VRF ID'},
|
||||||
|
'vlan': {'type': 'integer', 'description': 'VLAN ID'},
|
||||||
|
'role': {'type': 'integer', 'description': 'Role ID'},
|
||||||
|
'tenant': {'type': 'integer', 'description': 'Tenant ID'},
|
||||||
|
'is_pool': {'type': 'boolean', 'description': 'Is a pool'},
|
||||||
|
'description': {'type': 'string', 'description': 'Description'},
|
||||||
|
'comments': {'type': 'string', 'description': 'Comments'}
|
||||||
|
},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
'ipam_delete_prefix': {
|
'ipam_delete_prefix': {
|
||||||
@@ -582,7 +694,18 @@ TOOL_DEFINITIONS = {
|
|||||||
},
|
},
|
||||||
'ipam_update_ip_address': {
|
'ipam_update_ip_address': {
|
||||||
'description': 'Update an existing IP address',
|
'description': 'Update an existing IP address',
|
||||||
'properties': {'id': {'type': 'integer', 'description': 'IP address ID'}},
|
'properties': {
|
||||||
|
'id': {'type': 'integer', 'description': 'IP address ID'},
|
||||||
|
'address': {'type': 'string', 'description': 'IP address with prefix length'},
|
||||||
|
'status': {'type': 'string', 'description': 'Status'},
|
||||||
|
'vrf': {'type': 'integer', 'description': 'VRF ID'},
|
||||||
|
'tenant': {'type': 'integer', 'description': 'Tenant ID'},
|
||||||
|
'dns_name': {'type': 'string', 'description': 'DNS name'},
|
||||||
|
'description': {'type': 'string', 'description': 'Description'},
|
||||||
|
'comments': {'type': 'string', 'description': 'Comments'},
|
||||||
|
'assigned_object_type': {'type': 'string', 'description': 'Object type to assign to'},
|
||||||
|
'assigned_object_id': {'type': 'integer', 'description': 'Object ID to assign to'}
|
||||||
|
},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
'ipam_delete_ip_address': {
|
'ipam_delete_ip_address': {
|
||||||
@@ -647,7 +770,18 @@ TOOL_DEFINITIONS = {
|
|||||||
},
|
},
|
||||||
'ipam_update_vlan': {
|
'ipam_update_vlan': {
|
||||||
'description': 'Update an existing VLAN',
|
'description': 'Update an existing VLAN',
|
||||||
'properties': {'id': {'type': 'integer', 'description': 'VLAN ID'}},
|
'properties': {
|
||||||
|
'id': {'type': 'integer', 'description': 'VLAN ID'},
|
||||||
|
'vid': {'type': 'integer', 'description': 'VLAN ID number'},
|
||||||
|
'name': {'type': 'string', 'description': 'VLAN name'},
|
||||||
|
'status': {'type': 'string', 'description': 'Status'},
|
||||||
|
'site': {'type': 'integer', 'description': 'Site ID'},
|
||||||
|
'group': {'type': 'integer', 'description': 'VLAN group ID'},
|
||||||
|
'tenant': {'type': 'integer', 'description': 'Tenant ID'},
|
||||||
|
'role': {'type': 'integer', 'description': 'Role ID'},
|
||||||
|
'description': {'type': 'string', 'description': 'Description'},
|
||||||
|
'comments': {'type': 'string', 'description': 'Comments'}
|
||||||
|
},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
'ipam_delete_vlan': {
|
'ipam_delete_vlan': {
|
||||||
@@ -757,16 +891,17 @@ TOOL_DEFINITIONS = {
|
|||||||
'properties': {'id': {'type': 'integer', 'description': 'Provider ID'}},
|
'properties': {'id': {'type': 'integer', 'description': 'Provider ID'}},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
'circuits_list_circuit_types': {
|
# NOTE: circuit_types tools shortened to meet 28-char limit
|
||||||
|
'circ_list_types': {
|
||||||
'description': 'List all circuit types in NetBox',
|
'description': 'List all circuit types in NetBox',
|
||||||
'properties': {'name': {'type': 'string', 'description': 'Filter by name'}}
|
'properties': {'name': {'type': 'string', 'description': 'Filter by name'}}
|
||||||
},
|
},
|
||||||
'circuits_get_circuit_type': {
|
'circ_get_type': {
|
||||||
'description': 'Get a specific circuit type by ID',
|
'description': 'Get a specific circuit type by ID',
|
||||||
'properties': {'id': {'type': 'integer', 'description': 'Circuit type ID'}},
|
'properties': {'id': {'type': 'integer', 'description': 'Circuit type ID'}},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
'circuits_create_circuit_type': {
|
'circ_create_type': {
|
||||||
'description': 'Create a new circuit type',
|
'description': 'Create a new circuit type',
|
||||||
'properties': {
|
'properties': {
|
||||||
'name': {'type': 'string', 'description': 'Type name'},
|
'name': {'type': 'string', 'description': 'Type name'},
|
||||||
@@ -809,19 +944,20 @@ TOOL_DEFINITIONS = {
|
|||||||
'properties': {'id': {'type': 'integer', 'description': 'Circuit ID'}},
|
'properties': {'id': {'type': 'integer', 'description': 'Circuit ID'}},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
'circuits_list_circuit_terminations': {
|
# NOTE: circuit_terminations tools shortened to meet 28-char limit
|
||||||
|
'circ_list_terminations': {
|
||||||
'description': 'List all circuit terminations in NetBox',
|
'description': 'List all circuit terminations in NetBox',
|
||||||
'properties': {
|
'properties': {
|
||||||
'circuit_id': {'type': 'integer', 'description': 'Filter by circuit ID'},
|
'circuit_id': {'type': 'integer', 'description': 'Filter by circuit ID'},
|
||||||
'site_id': {'type': 'integer', 'description': 'Filter by site ID'}
|
'site_id': {'type': 'integer', 'description': 'Filter by site ID'}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
'circuits_get_circuit_termination': {
|
'circ_get_termination': {
|
||||||
'description': 'Get a specific circuit termination by ID',
|
'description': 'Get a specific circuit termination by ID',
|
||||||
'properties': {'id': {'type': 'integer', 'description': 'Termination ID'}},
|
'properties': {'id': {'type': 'integer', 'description': 'Termination ID'}},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
'circuits_create_circuit_termination': {
|
'circ_create_termination': {
|
||||||
'description': 'Create a new circuit termination',
|
'description': 'Create a new circuit termination',
|
||||||
'properties': {
|
'properties': {
|
||||||
'circuit': {'type': 'integer', 'description': 'Circuit ID'},
|
'circuit': {'type': 'integer', 'description': 'Circuit ID'},
|
||||||
@@ -832,16 +968,18 @@ TOOL_DEFINITIONS = {
|
|||||||
},
|
},
|
||||||
|
|
||||||
# ==================== Virtualization Tools ====================
|
# ==================== Virtualization Tools ====================
|
||||||
'virtualization_list_cluster_types': {
|
# NOTE: Tool names shortened from 'virtualization_' to 'virt_' to meet
|
||||||
|
# 28-char limit (Claude API 64-char limit minus 36-char prefix)
|
||||||
|
'virt_list_cluster_types': {
|
||||||
'description': 'List all cluster types in NetBox',
|
'description': 'List all cluster types in NetBox',
|
||||||
'properties': {'name': {'type': 'string', 'description': 'Filter by name'}}
|
'properties': {'name': {'type': 'string', 'description': 'Filter by name'}}
|
||||||
},
|
},
|
||||||
'virtualization_get_cluster_type': {
|
'virt_get_cluster_type': {
|
||||||
'description': 'Get a specific cluster type by ID',
|
'description': 'Get a specific cluster type by ID',
|
||||||
'properties': {'id': {'type': 'integer', 'description': 'Cluster type ID'}},
|
'properties': {'id': {'type': 'integer', 'description': 'Cluster type ID'}},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
'virtualization_create_cluster_type': {
|
'virt_create_cluster_type': {
|
||||||
'description': 'Create a new cluster type',
|
'description': 'Create a new cluster type',
|
||||||
'properties': {
|
'properties': {
|
||||||
'name': {'type': 'string', 'description': 'Type name'},
|
'name': {'type': 'string', 'description': 'Type name'},
|
||||||
@@ -849,16 +987,16 @@ TOOL_DEFINITIONS = {
|
|||||||
},
|
},
|
||||||
'required': ['name', 'slug']
|
'required': ['name', 'slug']
|
||||||
},
|
},
|
||||||
'virtualization_list_cluster_groups': {
|
'virt_list_cluster_groups': {
|
||||||
'description': 'List all cluster groups in NetBox',
|
'description': 'List all cluster groups in NetBox',
|
||||||
'properties': {'name': {'type': 'string', 'description': 'Filter by name'}}
|
'properties': {'name': {'type': 'string', 'description': 'Filter by name'}}
|
||||||
},
|
},
|
||||||
'virtualization_get_cluster_group': {
|
'virt_get_cluster_group': {
|
||||||
'description': 'Get a specific cluster group by ID',
|
'description': 'Get a specific cluster group by ID',
|
||||||
'properties': {'id': {'type': 'integer', 'description': 'Cluster group ID'}},
|
'properties': {'id': {'type': 'integer', 'description': 'Cluster group ID'}},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
'virtualization_create_cluster_group': {
|
'virt_create_cluster_group': {
|
||||||
'description': 'Create a new cluster group',
|
'description': 'Create a new cluster group',
|
||||||
'properties': {
|
'properties': {
|
||||||
'name': {'type': 'string', 'description': 'Group name'},
|
'name': {'type': 'string', 'description': 'Group name'},
|
||||||
@@ -866,7 +1004,7 @@ TOOL_DEFINITIONS = {
|
|||||||
},
|
},
|
||||||
'required': ['name', 'slug']
|
'required': ['name', 'slug']
|
||||||
},
|
},
|
||||||
'virtualization_list_clusters': {
|
'virt_list_clusters': {
|
||||||
'description': 'List all clusters in NetBox',
|
'description': 'List all clusters in NetBox',
|
||||||
'properties': {
|
'properties': {
|
||||||
'name': {'type': 'string', 'description': 'Filter by name'},
|
'name': {'type': 'string', 'description': 'Filter by name'},
|
||||||
@@ -875,12 +1013,12 @@ TOOL_DEFINITIONS = {
|
|||||||
'site_id': {'type': 'integer', 'description': 'Filter by site ID'}
|
'site_id': {'type': 'integer', 'description': 'Filter by site ID'}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
'virtualization_get_cluster': {
|
'virt_get_cluster': {
|
||||||
'description': 'Get a specific cluster by ID',
|
'description': 'Get a specific cluster by ID',
|
||||||
'properties': {'id': {'type': 'integer', 'description': 'Cluster ID'}},
|
'properties': {'id': {'type': 'integer', 'description': 'Cluster ID'}},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
'virtualization_create_cluster': {
|
'virt_create_cluster': {
|
||||||
'description': 'Create a new cluster',
|
'description': 'Create a new cluster',
|
||||||
'properties': {
|
'properties': {
|
||||||
'name': {'type': 'string', 'description': 'Cluster name'},
|
'name': {'type': 'string', 'description': 'Cluster name'},
|
||||||
@@ -891,17 +1029,27 @@ TOOL_DEFINITIONS = {
|
|||||||
},
|
},
|
||||||
'required': ['name', 'type']
|
'required': ['name', 'type']
|
||||||
},
|
},
|
||||||
'virtualization_update_cluster': {
|
'virt_update_cluster': {
|
||||||
'description': 'Update an existing cluster',
|
'description': 'Update an existing cluster',
|
||||||
'properties': {'id': {'type': 'integer', 'description': 'Cluster ID'}},
|
'properties': {
|
||||||
|
'id': {'type': 'integer', 'description': 'Cluster ID'},
|
||||||
|
'name': {'type': 'string', 'description': 'New name'},
|
||||||
|
'type': {'type': 'integer', 'description': 'Cluster type ID'},
|
||||||
|
'group': {'type': 'integer', 'description': 'Cluster group ID'},
|
||||||
|
'site': {'type': 'integer', 'description': 'Site ID'},
|
||||||
|
'status': {'type': 'string', 'description': 'Status'},
|
||||||
|
'tenant': {'type': 'integer', 'description': 'Tenant ID'},
|
||||||
|
'description': {'type': 'string', 'description': 'Description'},
|
||||||
|
'comments': {'type': 'string', 'description': 'Comments'}
|
||||||
|
},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
'virtualization_delete_cluster': {
|
'virt_delete_cluster': {
|
||||||
'description': 'Delete a cluster',
|
'description': 'Delete a cluster',
|
||||||
'properties': {'id': {'type': 'integer', 'description': 'Cluster ID'}},
|
'properties': {'id': {'type': 'integer', 'description': 'Cluster ID'}},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
'virtualization_list_virtual_machines': {
|
'virt_list_vms': {
|
||||||
'description': 'List all virtual machines in NetBox',
|
'description': 'List all virtual machines in NetBox',
|
||||||
'properties': {
|
'properties': {
|
||||||
'name': {'type': 'string', 'description': 'Filter by name'},
|
'name': {'type': 'string', 'description': 'Filter by name'},
|
||||||
@@ -910,12 +1058,12 @@ TOOL_DEFINITIONS = {
|
|||||||
'status': {'type': 'string', 'description': 'Filter by status'}
|
'status': {'type': 'string', 'description': 'Filter by status'}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
'virtualization_get_virtual_machine': {
|
'virt_get_vm': {
|
||||||
'description': 'Get a specific virtual machine by ID',
|
'description': 'Get a specific virtual machine by ID',
|
||||||
'properties': {'id': {'type': 'integer', 'description': 'VM ID'}},
|
'properties': {'id': {'type': 'integer', 'description': 'VM ID'}},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
'virtualization_create_virtual_machine': {
|
'virt_create_vm': {
|
||||||
'description': 'Create a new virtual machine',
|
'description': 'Create a new virtual machine',
|
||||||
'properties': {
|
'properties': {
|
||||||
'name': {'type': 'string', 'description': 'VM name'},
|
'name': {'type': 'string', 'description': 'VM name'},
|
||||||
@@ -928,29 +1076,45 @@ TOOL_DEFINITIONS = {
|
|||||||
},
|
},
|
||||||
'required': ['name']
|
'required': ['name']
|
||||||
},
|
},
|
||||||
'virtualization_update_virtual_machine': {
|
'virt_update_vm': {
|
||||||
'description': 'Update an existing virtual machine',
|
'description': 'Update an existing virtual machine',
|
||||||
'properties': {'id': {'type': 'integer', 'description': 'VM ID'}},
|
'properties': {
|
||||||
|
'id': {'type': 'integer', 'description': 'VM ID'},
|
||||||
|
'name': {'type': 'string', 'description': 'New name'},
|
||||||
|
'status': {'type': 'string', 'description': 'Status'},
|
||||||
|
'cluster': {'type': 'integer', 'description': 'Cluster ID'},
|
||||||
|
'site': {'type': 'integer', 'description': 'Site ID'},
|
||||||
|
'role': {'type': 'integer', 'description': 'Role ID'},
|
||||||
|
'tenant': {'type': 'integer', 'description': 'Tenant ID'},
|
||||||
|
'platform': {'type': 'integer', 'description': 'Platform ID'},
|
||||||
|
'vcpus': {'type': 'number', 'description': 'Number of vCPUs'},
|
||||||
|
'memory': {'type': 'integer', 'description': 'Memory in MB'},
|
||||||
|
'disk': {'type': 'integer', 'description': 'Disk in GB'},
|
||||||
|
'primary_ip4': {'type': 'integer', 'description': 'Primary IPv4 address ID'},
|
||||||
|
'primary_ip6': {'type': 'integer', 'description': 'Primary IPv6 address ID'},
|
||||||
|
'description': {'type': 'string', 'description': 'Description'},
|
||||||
|
'comments': {'type': 'string', 'description': 'Comments'}
|
||||||
|
},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
'virtualization_delete_virtual_machine': {
|
'virt_delete_vm': {
|
||||||
'description': 'Delete a virtual machine',
|
'description': 'Delete a virtual machine',
|
||||||
'properties': {'id': {'type': 'integer', 'description': 'VM ID'}},
|
'properties': {'id': {'type': 'integer', 'description': 'VM ID'}},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
'virtualization_list_vm_interfaces': {
|
'virt_list_vm_ifaces': {
|
||||||
'description': 'List all VM interfaces in NetBox',
|
'description': 'List all VM interfaces in NetBox',
|
||||||
'properties': {
|
'properties': {
|
||||||
'virtual_machine_id': {'type': 'integer', 'description': 'Filter by VM ID'},
|
'virtual_machine_id': {'type': 'integer', 'description': 'Filter by VM ID'},
|
||||||
'name': {'type': 'string', 'description': 'Filter by name'}
|
'name': {'type': 'string', 'description': 'Filter by name'}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
'virtualization_get_vm_interface': {
|
'virt_get_vm_iface': {
|
||||||
'description': 'Get a specific VM interface by ID',
|
'description': 'Get a specific VM interface by ID',
|
||||||
'properties': {'id': {'type': 'integer', 'description': 'Interface ID'}},
|
'properties': {'id': {'type': 'integer', 'description': 'Interface ID'}},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
'virtualization_create_vm_interface': {
|
'virt_create_vm_iface': {
|
||||||
'description': 'Create a new VM interface',
|
'description': 'Create a new VM interface',
|
||||||
'properties': {
|
'properties': {
|
||||||
'virtual_machine': {'type': 'integer', 'description': 'VM ID'},
|
'virtual_machine': {'type': 'integer', 'description': 'VM ID'},
|
||||||
@@ -1088,16 +1252,18 @@ TOOL_DEFINITIONS = {
|
|||||||
},
|
},
|
||||||
|
|
||||||
# ==================== Wireless Tools ====================
|
# ==================== Wireless Tools ====================
|
||||||
'wireless_list_wireless_lan_groups': {
|
# NOTE: Tool names shortened from 'wireless_' to 'wlan_' to meet
|
||||||
|
# 28-char limit (Claude API 64-char limit minus 36-char prefix)
|
||||||
|
'wlan_list_groups': {
|
||||||
'description': 'List all wireless LAN groups in NetBox',
|
'description': 'List all wireless LAN groups in NetBox',
|
||||||
'properties': {'name': {'type': 'string', 'description': 'Filter by name'}}
|
'properties': {'name': {'type': 'string', 'description': 'Filter by name'}}
|
||||||
},
|
},
|
||||||
'wireless_get_wireless_lan_group': {
|
'wlan_get_group': {
|
||||||
'description': 'Get a specific wireless LAN group by ID',
|
'description': 'Get a specific wireless LAN group by ID',
|
||||||
'properties': {'id': {'type': 'integer', 'description': 'WLAN group ID'}},
|
'properties': {'id': {'type': 'integer', 'description': 'WLAN group ID'}},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
'wireless_create_wireless_lan_group': {
|
'wlan_create_group': {
|
||||||
'description': 'Create a new wireless LAN group',
|
'description': 'Create a new wireless LAN group',
|
||||||
'properties': {
|
'properties': {
|
||||||
'name': {'type': 'string', 'description': 'Group name'},
|
'name': {'type': 'string', 'description': 'Group name'},
|
||||||
@@ -1105,7 +1271,7 @@ TOOL_DEFINITIONS = {
|
|||||||
},
|
},
|
||||||
'required': ['name', 'slug']
|
'required': ['name', 'slug']
|
||||||
},
|
},
|
||||||
'wireless_list_wireless_lans': {
|
'wlan_list_lans': {
|
||||||
'description': 'List all wireless LANs in NetBox',
|
'description': 'List all wireless LANs in NetBox',
|
||||||
'properties': {
|
'properties': {
|
||||||
'ssid': {'type': 'string', 'description': 'Filter by SSID'},
|
'ssid': {'type': 'string', 'description': 'Filter by SSID'},
|
||||||
@@ -1113,12 +1279,12 @@ TOOL_DEFINITIONS = {
|
|||||||
'status': {'type': 'string', 'description': 'Filter by status'}
|
'status': {'type': 'string', 'description': 'Filter by status'}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
'wireless_get_wireless_lan': {
|
'wlan_get_lan': {
|
||||||
'description': 'Get a specific wireless LAN by ID',
|
'description': 'Get a specific wireless LAN by ID',
|
||||||
'properties': {'id': {'type': 'integer', 'description': 'WLAN ID'}},
|
'properties': {'id': {'type': 'integer', 'description': 'WLAN ID'}},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
},
|
},
|
||||||
'wireless_create_wireless_lan': {
|
'wlan_create_lan': {
|
||||||
'description': 'Create a new wireless LAN',
|
'description': 'Create a new wireless LAN',
|
||||||
'properties': {
|
'properties': {
|
||||||
'ssid': {'type': 'string', 'description': 'SSID'},
|
'ssid': {'type': 'string', 'description': 'SSID'},
|
||||||
@@ -1128,14 +1294,14 @@ TOOL_DEFINITIONS = {
|
|||||||
},
|
},
|
||||||
'required': ['ssid']
|
'required': ['ssid']
|
||||||
},
|
},
|
||||||
'wireless_list_wireless_links': {
|
'wlan_list_links': {
|
||||||
'description': 'List all wireless links in NetBox',
|
'description': 'List all wireless links in NetBox',
|
||||||
'properties': {
|
'properties': {
|
||||||
'ssid': {'type': 'string', 'description': 'Filter by SSID'},
|
'ssid': {'type': 'string', 'description': 'Filter by SSID'},
|
||||||
'status': {'type': 'string', 'description': 'Filter by status'}
|
'status': {'type': 'string', 'description': 'Filter by status'}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
'wireless_get_wireless_link': {
|
'wlan_get_link': {
|
||||||
'description': 'Get a specific wireless link by ID',
|
'description': 'Get a specific wireless link by ID',
|
||||||
'properties': {'id': {'type': 'integer', 'description': 'Link ID'}},
|
'properties': {'id': {'type': 'integer', 'description': 'Link ID'}},
|
||||||
'required': ['id']
|
'required': ['id']
|
||||||
@@ -1241,6 +1407,52 @@ TOOL_DEFINITIONS = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Map shortened tool names to (category, method_name) for routing.
|
||||||
|
# This is necessary because tool names were shortened to meet the 28-character
|
||||||
|
# limit imposed by Claude API's 64-character tool name limit minus the
|
||||||
|
# 36-character prefix used by Claude Code for MCP tools.
|
||||||
|
TOOL_NAME_MAP = {
|
||||||
|
# Virtualization tools (virt_ -> virtualization category)
|
||||||
|
'virt_list_cluster_types': ('virtualization', 'list_cluster_types'),
|
||||||
|
'virt_get_cluster_type': ('virtualization', 'get_cluster_type'),
|
||||||
|
'virt_create_cluster_type': ('virtualization', 'create_cluster_type'),
|
||||||
|
'virt_list_cluster_groups': ('virtualization', 'list_cluster_groups'),
|
||||||
|
'virt_get_cluster_group': ('virtualization', 'get_cluster_group'),
|
||||||
|
'virt_create_cluster_group': ('virtualization', 'create_cluster_group'),
|
||||||
|
'virt_list_clusters': ('virtualization', 'list_clusters'),
|
||||||
|
'virt_get_cluster': ('virtualization', 'get_cluster'),
|
||||||
|
'virt_create_cluster': ('virtualization', 'create_cluster'),
|
||||||
|
'virt_update_cluster': ('virtualization', 'update_cluster'),
|
||||||
|
'virt_delete_cluster': ('virtualization', 'delete_cluster'),
|
||||||
|
'virt_list_vms': ('virtualization', 'list_virtual_machines'),
|
||||||
|
'virt_get_vm': ('virtualization', 'get_virtual_machine'),
|
||||||
|
'virt_create_vm': ('virtualization', 'create_virtual_machine'),
|
||||||
|
'virt_update_vm': ('virtualization', 'update_virtual_machine'),
|
||||||
|
'virt_delete_vm': ('virtualization', 'delete_virtual_machine'),
|
||||||
|
'virt_list_vm_ifaces': ('virtualization', 'list_vm_interfaces'),
|
||||||
|
'virt_get_vm_iface': ('virtualization', 'get_vm_interface'),
|
||||||
|
'virt_create_vm_iface': ('virtualization', 'create_vm_interface'),
|
||||||
|
|
||||||
|
# Circuits tools (circ_ -> circuits category, for shortened names only)
|
||||||
|
'circ_list_types': ('circuits', 'list_circuit_types'),
|
||||||
|
'circ_get_type': ('circuits', 'get_circuit_type'),
|
||||||
|
'circ_create_type': ('circuits', 'create_circuit_type'),
|
||||||
|
'circ_list_terminations': ('circuits', 'list_circuit_terminations'),
|
||||||
|
'circ_get_termination': ('circuits', 'get_circuit_termination'),
|
||||||
|
'circ_create_termination': ('circuits', 'create_circuit_termination'),
|
||||||
|
|
||||||
|
# Wireless tools (wlan_ -> wireless category)
|
||||||
|
'wlan_list_groups': ('wireless', 'list_wireless_lan_groups'),
|
||||||
|
'wlan_get_group': ('wireless', 'get_wireless_lan_group'),
|
||||||
|
'wlan_create_group': ('wireless', 'create_wireless_lan_group'),
|
||||||
|
'wlan_list_lans': ('wireless', 'list_wireless_lans'),
|
||||||
|
'wlan_get_lan': ('wireless', 'get_wireless_lan'),
|
||||||
|
'wlan_create_lan': ('wireless', 'create_wireless_lan'),
|
||||||
|
'wlan_list_links': ('wireless', 'list_wireless_links'),
|
||||||
|
'wlan_get_link': ('wireless', 'get_wireless_link'),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class NetBoxMCPServer:
|
class NetBoxMCPServer:
|
||||||
"""MCP Server for NetBox integration"""
|
"""MCP Server for NetBox integration"""
|
||||||
|
|
||||||
@@ -1314,12 +1526,21 @@ class NetBoxMCPServer:
|
|||||||
)]
|
)]
|
||||||
|
|
||||||
async def _route_tool(self, name: str, arguments: dict):
|
async def _route_tool(self, name: str, arguments: dict):
|
||||||
"""Route tool call to appropriate handler."""
|
"""Route tool call to appropriate handler.
|
||||||
parts = name.split('_', 1)
|
|
||||||
if len(parts) != 2:
|
|
||||||
raise ValueError(f"Invalid tool name format: {name}")
|
|
||||||
|
|
||||||
category, method_name = parts[0], parts[1]
|
Tool names may be shortened (e.g., 'virt_list_vms' instead of
|
||||||
|
'virtualization_list_virtual_machines') to meet the 28-character
|
||||||
|
limit. TOOL_NAME_MAP handles the translation to actual method names.
|
||||||
|
"""
|
||||||
|
# Check if this is a mapped short name
|
||||||
|
if name in TOOL_NAME_MAP:
|
||||||
|
category, method_name = TOOL_NAME_MAP[name]
|
||||||
|
else:
|
||||||
|
# Fall back to original logic for unchanged tools
|
||||||
|
parts = name.split('_', 1)
|
||||||
|
if len(parts) != 2:
|
||||||
|
raise ValueError(f"Invalid tool name format: {name}")
|
||||||
|
category, method_name = parts[0], parts[1]
|
||||||
|
|
||||||
# Map category to tool class
|
# Map category to tool class
|
||||||
tool_map = {
|
tool_map = {
|
||||||
21
mcp-servers/netbox/run.sh
Executable file
21
mcp-servers/netbox/run.sh
Executable file
@@ -0,0 +1,21 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Capture original working directory before any cd operations
|
||||||
|
# This should be the user's project directory when launched by Claude Code
|
||||||
|
export CLAUDE_PROJECT_DIR="${CLAUDE_PROJECT_DIR:-$PWD}"
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
CACHE_VENV="$HOME/.cache/claude-mcp-venvs/leo-claude-mktplace/netbox/.venv"
|
||||||
|
LOCAL_VENV="$SCRIPT_DIR/.venv"
|
||||||
|
|
||||||
|
if [[ -f "$CACHE_VENV/bin/python" ]]; then
|
||||||
|
PYTHON="$CACHE_VENV/bin/python"
|
||||||
|
elif [[ -f "$LOCAL_VENV/bin/python" ]]; then
|
||||||
|
PYTHON="$LOCAL_VENV/bin/python"
|
||||||
|
else
|
||||||
|
echo "ERROR: No venv found. Run: ./scripts/setup-venvs.sh" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd "$SCRIPT_DIR"
|
||||||
|
export PYTHONPATH="$SCRIPT_DIR"
|
||||||
|
exec "$PYTHON" -m mcp_server.server "$@"
|
||||||
5
mcp-servers/viz-platform/.doc-guardian-queue
Normal file
5
mcp-servers/viz-platform/.doc-guardian-queue
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
2026-01-26T11:40:11 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/viz-platform/registry/dmc_2_5.json | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
|
||||||
|
2026-01-26T13:46:31 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/viz-platform/tests/test_chart_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
|
||||||
|
2026-01-26T13:46:32 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/viz-platform/tests/test_theme_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
|
||||||
|
2026-01-26T13:46:34 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/viz-platform/tests/test_theme_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
|
||||||
|
2026-01-26T13:46:35 | mcp-servers | /home/lmiranda/claude-plugins-work/mcp-servers/viz-platform/tests/test_theme_tools.py | docs/COMMANDS-CHEATSHEET.md CLAUDE.md
|
||||||
115
mcp-servers/viz-platform/README.md
Normal file
115
mcp-servers/viz-platform/README.md
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
# viz-platform MCP Server
|
||||||
|
|
||||||
|
Model Context Protocol (MCP) server for Dash Mantine Components validation and visualization tools.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This MCP server provides 21 tools for:
|
||||||
|
- **DMC Validation**: Version-locked component registry prevents Claude from hallucinating invalid props
|
||||||
|
- **Chart Creation**: Plotly-based visualization with theme integration
|
||||||
|
- **Layout Composition**: Dashboard layouts with responsive grids
|
||||||
|
- **Theme Management**: Design token-based theming system
|
||||||
|
- **Page Structure**: Multi-page Dash app generation
|
||||||
|
|
||||||
|
## Tools
|
||||||
|
|
||||||
|
### DMC Tools (3)
|
||||||
|
|
||||||
|
| Tool | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `list_components` | List available DMC components by category |
|
||||||
|
| `get_component_props` | Get valid props, types, and defaults for a component |
|
||||||
|
| `validate_component` | Validate component definition before use |
|
||||||
|
|
||||||
|
### Chart Tools (2)
|
||||||
|
|
||||||
|
| Tool | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `chart_create` | Create Plotly chart (line, bar, scatter, pie, histogram, area, heatmap) |
|
||||||
|
| `chart_configure_interaction` | Configure chart interactions (zoom, pan, hover) |
|
||||||
|
|
||||||
|
### Layout Tools (5)
|
||||||
|
|
||||||
|
| Tool | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `layout_create` | Create dashboard layout structure |
|
||||||
|
| `layout_add_filter` | Add filter components to layout |
|
||||||
|
| `layout_set_grid` | Configure responsive grid settings |
|
||||||
|
| `layout_get` | Retrieve layout configuration |
|
||||||
|
| `layout_add_section` | Add sections to layout |
|
||||||
|
|
||||||
|
### Theme Tools (6)
|
||||||
|
|
||||||
|
| Tool | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `theme_create` | Create new theme with design tokens |
|
||||||
|
| `theme_extend` | Extend existing theme with overrides |
|
||||||
|
| `theme_validate` | Validate theme completeness |
|
||||||
|
| `theme_export_css` | Export theme as CSS custom properties |
|
||||||
|
| `theme_list` | List available themes |
|
||||||
|
| `theme_activate` | Set active theme for visualizations |
|
||||||
|
|
||||||
|
### Page Tools (5)
|
||||||
|
|
||||||
|
| Tool | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `page_create` | Create new page structure |
|
||||||
|
| `page_add_navbar` | Add navigation bar to page |
|
||||||
|
| `page_set_auth` | Configure page authentication |
|
||||||
|
| `page_list` | List available pages |
|
||||||
|
| `page_get_app_config` | Get full app configuration |
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### Environment Variables
|
||||||
|
|
||||||
|
| Variable | Required | Description |
|
||||||
|
|----------|----------|-------------|
|
||||||
|
| `DMC_VERSION` | No | Dash Mantine Components version (auto-detected if installed) |
|
||||||
|
| `VIZ_DEFAULT_THEME` | No | Default theme name |
|
||||||
|
| `CLAUDE_PROJECT_DIR` | No | Project directory for theme storage |
|
||||||
|
|
||||||
|
### Theme Storage
|
||||||
|
|
||||||
|
Themes can be stored at two levels:
|
||||||
|
- **User-level**: `~/.config/claude/themes/`
|
||||||
|
- **Project-level**: `{project}/.viz-platform/themes/`
|
||||||
|
|
||||||
|
Project-level themes take precedence.
|
||||||
|
|
||||||
|
## Component Registry
|
||||||
|
|
||||||
|
The server uses a static JSON registry for DMC component validation:
|
||||||
|
- Pre-generated from DMC source code
|
||||||
|
- Version-tagged (e.g., `dmc_2_5.json`)
|
||||||
|
- Prevents hallucination of non-existent props
|
||||||
|
- Fast, deterministic validation
|
||||||
|
|
||||||
|
Registry files are stored in `registry/` directory.
|
||||||
|
|
||||||
|
## Tests
|
||||||
|
|
||||||
|
94 tests with coverage:
|
||||||
|
- `test_config.py`: 82% coverage
|
||||||
|
- `test_component_registry.py`: 92% coverage
|
||||||
|
- `test_dmc_tools.py`: 88% coverage
|
||||||
|
- `test_chart_tools.py`: 68% coverage
|
||||||
|
- `test_theme_tools.py`: 99% coverage
|
||||||
|
|
||||||
|
Run tests:
|
||||||
|
```bash
|
||||||
|
cd mcp-servers/viz-platform
|
||||||
|
source .venv/bin/activate
|
||||||
|
pytest tests/ -v
|
||||||
|
```
|
||||||
|
|
||||||
|
## Dependencies
|
||||||
|
|
||||||
|
- Python 3.10+
|
||||||
|
- FastMCP
|
||||||
|
- plotly
|
||||||
|
- dash-mantine-components (optional, for version detection)
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
This MCP server is used by the `viz-platform` plugin. See the plugin's commands in `plugins/viz-platform/commands/` for usage.
|
||||||
7
mcp-servers/viz-platform/mcp_server/__init__.py
Normal file
7
mcp-servers/viz-platform/mcp_server/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
"""
|
||||||
|
viz-platform MCP Server package.
|
||||||
|
|
||||||
|
Provides Dash Mantine Components validation and visualization tools to Claude Code.
|
||||||
|
"""
|
||||||
|
|
||||||
|
__version__ = "1.0.0"
|
||||||
479
mcp-servers/viz-platform/mcp_server/accessibility_tools.py
Normal file
479
mcp-servers/viz-platform/mcp_server/accessibility_tools.py
Normal file
@@ -0,0 +1,479 @@
|
|||||||
|
"""
|
||||||
|
Accessibility validation tools for color blindness and WCAG compliance.
|
||||||
|
|
||||||
|
Provides tools for validating color palettes against color blindness
|
||||||
|
simulations and WCAG contrast requirements.
|
||||||
|
"""
|
||||||
|
import logging
|
||||||
|
import math
|
||||||
|
from typing import Dict, List, Optional, Any, Tuple
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
# Color-blind safe palettes
|
||||||
|
SAFE_PALETTES = {
|
||||||
|
"categorical": {
|
||||||
|
"name": "Paul Tol's Qualitative",
|
||||||
|
"colors": ["#4477AA", "#EE6677", "#228833", "#CCBB44", "#66CCEE", "#AA3377", "#BBBBBB"],
|
||||||
|
"description": "Distinguishable for all types of color blindness"
|
||||||
|
},
|
||||||
|
"ibm": {
|
||||||
|
"name": "IBM Design",
|
||||||
|
"colors": ["#648FFF", "#785EF0", "#DC267F", "#FE6100", "#FFB000"],
|
||||||
|
"description": "IBM's accessible color palette"
|
||||||
|
},
|
||||||
|
"okabe_ito": {
|
||||||
|
"name": "Okabe-Ito",
|
||||||
|
"colors": ["#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#000000"],
|
||||||
|
"description": "Optimized for all color vision deficiencies"
|
||||||
|
},
|
||||||
|
"tableau_colorblind": {
|
||||||
|
"name": "Tableau Colorblind 10",
|
||||||
|
"colors": ["#006BA4", "#FF800E", "#ABABAB", "#595959", "#5F9ED1",
|
||||||
|
"#C85200", "#898989", "#A2C8EC", "#FFBC79", "#CFCFCF"],
|
||||||
|
"description": "Industry-standard accessible palette"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Simulation matrices for color blindness (LMS color space transformation)
|
||||||
|
# These approximate how colors appear to people with different types of color blindness
|
||||||
|
SIMULATION_MATRICES = {
|
||||||
|
"deuteranopia": {
|
||||||
|
# Green-blind (most common)
|
||||||
|
"severity": "common",
|
||||||
|
"population": "6% males, 0.4% females",
|
||||||
|
"description": "Difficulty distinguishing red from green (green-blind)",
|
||||||
|
"matrix": [
|
||||||
|
[0.625, 0.375, 0.0],
|
||||||
|
[0.700, 0.300, 0.0],
|
||||||
|
[0.0, 0.300, 0.700]
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"protanopia": {
|
||||||
|
# Red-blind
|
||||||
|
"severity": "common",
|
||||||
|
"population": "2.5% males, 0.05% females",
|
||||||
|
"description": "Difficulty distinguishing red from green (red-blind)",
|
||||||
|
"matrix": [
|
||||||
|
[0.567, 0.433, 0.0],
|
||||||
|
[0.558, 0.442, 0.0],
|
||||||
|
[0.0, 0.242, 0.758]
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"tritanopia": {
|
||||||
|
# Blue-blind (rare)
|
||||||
|
"severity": "rare",
|
||||||
|
"population": "0.01% total",
|
||||||
|
"description": "Difficulty distinguishing blue from yellow",
|
||||||
|
"matrix": [
|
||||||
|
[0.950, 0.050, 0.0],
|
||||||
|
[0.0, 0.433, 0.567],
|
||||||
|
[0.0, 0.475, 0.525]
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class AccessibilityTools:
|
||||||
|
"""
|
||||||
|
Color accessibility validation tools.
|
||||||
|
|
||||||
|
Validates colors for WCAG compliance and color blindness accessibility.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, theme_store=None):
|
||||||
|
"""
|
||||||
|
Initialize accessibility tools.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
theme_store: Optional ThemeStore for theme color extraction
|
||||||
|
"""
|
||||||
|
self.theme_store = theme_store
|
||||||
|
|
||||||
|
def _hex_to_rgb(self, hex_color: str) -> Tuple[int, int, int]:
|
||||||
|
"""Convert hex color to RGB tuple."""
|
||||||
|
hex_color = hex_color.lstrip('#')
|
||||||
|
if len(hex_color) == 3:
|
||||||
|
hex_color = ''.join([c * 2 for c in hex_color])
|
||||||
|
return tuple(int(hex_color[i:i+2], 16) for i in (0, 2, 4))
|
||||||
|
|
||||||
|
def _rgb_to_hex(self, rgb: Tuple[int, int, int]) -> str:
|
||||||
|
"""Convert RGB tuple to hex color."""
|
||||||
|
return '#{:02x}{:02x}{:02x}'.format(
|
||||||
|
max(0, min(255, int(rgb[0]))),
|
||||||
|
max(0, min(255, int(rgb[1]))),
|
||||||
|
max(0, min(255, int(rgb[2])))
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_relative_luminance(self, rgb: Tuple[int, int, int]) -> float:
|
||||||
|
"""
|
||||||
|
Calculate relative luminance per WCAG 2.1.
|
||||||
|
|
||||||
|
https://www.w3.org/WAI/GL/wiki/Relative_luminance
|
||||||
|
"""
|
||||||
|
def channel_luminance(value: int) -> float:
|
||||||
|
v = value / 255
|
||||||
|
return v / 12.92 if v <= 0.03928 else ((v + 0.055) / 1.055) ** 2.4
|
||||||
|
|
||||||
|
r, g, b = rgb
|
||||||
|
return (
|
||||||
|
0.2126 * channel_luminance(r) +
|
||||||
|
0.7152 * channel_luminance(g) +
|
||||||
|
0.0722 * channel_luminance(b)
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_contrast_ratio(self, color1: str, color2: str) -> float:
|
||||||
|
"""
|
||||||
|
Calculate contrast ratio between two colors per WCAG 2.1.
|
||||||
|
|
||||||
|
Returns ratio between 1:1 and 21:1.
|
||||||
|
"""
|
||||||
|
rgb1 = self._hex_to_rgb(color1)
|
||||||
|
rgb2 = self._hex_to_rgb(color2)
|
||||||
|
|
||||||
|
l1 = self._get_relative_luminance(rgb1)
|
||||||
|
l2 = self._get_relative_luminance(rgb2)
|
||||||
|
|
||||||
|
lighter = max(l1, l2)
|
||||||
|
darker = min(l1, l2)
|
||||||
|
|
||||||
|
return (lighter + 0.05) / (darker + 0.05)
|
||||||
|
|
||||||
|
def _simulate_color_blindness(
|
||||||
|
self,
|
||||||
|
hex_color: str,
|
||||||
|
deficiency_type: str
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Simulate how a color appears with a specific color blindness type.
|
||||||
|
|
||||||
|
Uses linear RGB transformation approximation.
|
||||||
|
"""
|
||||||
|
if deficiency_type not in SIMULATION_MATRICES:
|
||||||
|
return hex_color
|
||||||
|
|
||||||
|
rgb = self._hex_to_rgb(hex_color)
|
||||||
|
matrix = SIMULATION_MATRICES[deficiency_type]["matrix"]
|
||||||
|
|
||||||
|
# Apply transformation matrix
|
||||||
|
r = rgb[0] * matrix[0][0] + rgb[1] * matrix[0][1] + rgb[2] * matrix[0][2]
|
||||||
|
g = rgb[0] * matrix[1][0] + rgb[1] * matrix[1][1] + rgb[2] * matrix[1][2]
|
||||||
|
b = rgb[0] * matrix[2][0] + rgb[1] * matrix[2][1] + rgb[2] * matrix[2][2]
|
||||||
|
|
||||||
|
return self._rgb_to_hex((r, g, b))
|
||||||
|
|
||||||
|
def _get_color_distance(self, color1: str, color2: str) -> float:
|
||||||
|
"""
|
||||||
|
Calculate perceptual color distance (CIE76 approximation).
|
||||||
|
|
||||||
|
Returns a value where < 20 means colors may be hard to distinguish.
|
||||||
|
"""
|
||||||
|
rgb1 = self._hex_to_rgb(color1)
|
||||||
|
rgb2 = self._hex_to_rgb(color2)
|
||||||
|
|
||||||
|
# Simple Euclidean distance in RGB space (approximation)
|
||||||
|
# For production, should use CIEDE2000
|
||||||
|
return math.sqrt(
|
||||||
|
(rgb1[0] - rgb2[0]) ** 2 +
|
||||||
|
(rgb1[1] - rgb2[1]) ** 2 +
|
||||||
|
(rgb1[2] - rgb2[2]) ** 2
|
||||||
|
)
|
||||||
|
|
||||||
|
async def accessibility_validate_colors(
|
||||||
|
self,
|
||||||
|
colors: List[str],
|
||||||
|
check_types: Optional[List[str]] = None,
|
||||||
|
min_contrast_ratio: float = 4.5
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Validate a list of colors for accessibility.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
colors: List of hex colors to validate
|
||||||
|
check_types: Color blindness types to check (default: all)
|
||||||
|
min_contrast_ratio: Minimum WCAG contrast ratio (default: 4.5 for AA)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with:
|
||||||
|
- issues: List of accessibility issues found
|
||||||
|
- simulations: How colors appear under each deficiency
|
||||||
|
- recommendations: Suggestions for improvement
|
||||||
|
- safe_palettes: Color-blind safe palette suggestions
|
||||||
|
"""
|
||||||
|
check_types = check_types or list(SIMULATION_MATRICES.keys())
|
||||||
|
issues = []
|
||||||
|
simulations = {}
|
||||||
|
|
||||||
|
# Normalize colors
|
||||||
|
normalized_colors = [c.upper() if c.startswith('#') else f'#{c.upper()}' for c in colors]
|
||||||
|
|
||||||
|
# Simulate each color blindness type
|
||||||
|
for deficiency in check_types:
|
||||||
|
if deficiency not in SIMULATION_MATRICES:
|
||||||
|
continue
|
||||||
|
|
||||||
|
simulated = [self._simulate_color_blindness(c, deficiency) for c in normalized_colors]
|
||||||
|
simulations[deficiency] = {
|
||||||
|
"original": normalized_colors,
|
||||||
|
"simulated": simulated,
|
||||||
|
"info": SIMULATION_MATRICES[deficiency]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if any color pairs become indistinguishable
|
||||||
|
for i in range(len(normalized_colors)):
|
||||||
|
for j in range(i + 1, len(normalized_colors)):
|
||||||
|
distance = self._get_color_distance(simulated[i], simulated[j])
|
||||||
|
if distance < 30: # Threshold for distinguishability
|
||||||
|
issues.append({
|
||||||
|
"type": "distinguishability",
|
||||||
|
"severity": "warning" if distance > 15 else "error",
|
||||||
|
"colors": [normalized_colors[i], normalized_colors[j]],
|
||||||
|
"affected_by": [deficiency],
|
||||||
|
"simulated_colors": [simulated[i], simulated[j]],
|
||||||
|
"distance": round(distance, 1),
|
||||||
|
"message": f"Colors may be hard to distinguish for {deficiency} ({SIMULATION_MATRICES[deficiency]['description']})"
|
||||||
|
})
|
||||||
|
|
||||||
|
# Check contrast ratios against white and black backgrounds
|
||||||
|
for color in normalized_colors:
|
||||||
|
white_contrast = self._get_contrast_ratio(color, "#FFFFFF")
|
||||||
|
black_contrast = self._get_contrast_ratio(color, "#000000")
|
||||||
|
|
||||||
|
if white_contrast < min_contrast_ratio and black_contrast < min_contrast_ratio:
|
||||||
|
issues.append({
|
||||||
|
"type": "contrast_ratio",
|
||||||
|
"severity": "error",
|
||||||
|
"colors": [color],
|
||||||
|
"white_contrast": round(white_contrast, 2),
|
||||||
|
"black_contrast": round(black_contrast, 2),
|
||||||
|
"required": min_contrast_ratio,
|
||||||
|
"message": f"Insufficient contrast against both white ({white_contrast:.1f}:1) and black ({black_contrast:.1f}:1) backgrounds"
|
||||||
|
})
|
||||||
|
|
||||||
|
# Generate recommendations
|
||||||
|
recommendations = self._generate_recommendations(issues)
|
||||||
|
|
||||||
|
# Calculate overall score
|
||||||
|
error_count = sum(1 for i in issues if i["severity"] == "error")
|
||||||
|
warning_count = sum(1 for i in issues if i["severity"] == "warning")
|
||||||
|
|
||||||
|
if error_count == 0 and warning_count == 0:
|
||||||
|
score = "A"
|
||||||
|
elif error_count == 0 and warning_count <= 2:
|
||||||
|
score = "B"
|
||||||
|
elif error_count <= 2:
|
||||||
|
score = "C"
|
||||||
|
else:
|
||||||
|
score = "D"
|
||||||
|
|
||||||
|
return {
|
||||||
|
"colors_checked": normalized_colors,
|
||||||
|
"overall_score": score,
|
||||||
|
"issue_count": len(issues),
|
||||||
|
"issues": issues,
|
||||||
|
"simulations": simulations,
|
||||||
|
"recommendations": recommendations,
|
||||||
|
"safe_palettes": SAFE_PALETTES
|
||||||
|
}
|
||||||
|
|
||||||
|
async def accessibility_validate_theme(
|
||||||
|
self,
|
||||||
|
theme_name: str
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Validate a theme's colors for accessibility.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
theme_name: Theme name to validate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with accessibility validation results
|
||||||
|
"""
|
||||||
|
if not self.theme_store:
|
||||||
|
return {
|
||||||
|
"error": "Theme store not configured",
|
||||||
|
"theme_name": theme_name
|
||||||
|
}
|
||||||
|
|
||||||
|
theme = self.theme_store.get_theme(theme_name)
|
||||||
|
if not theme:
|
||||||
|
available = self.theme_store.list_themes()
|
||||||
|
return {
|
||||||
|
"error": f"Theme '{theme_name}' not found. Available: {available}",
|
||||||
|
"theme_name": theme_name
|
||||||
|
}
|
||||||
|
|
||||||
|
# Extract colors from theme
|
||||||
|
colors = []
|
||||||
|
tokens = theme.get("tokens", {})
|
||||||
|
color_tokens = tokens.get("colors", {})
|
||||||
|
|
||||||
|
def extract_colors(obj, prefix=""):
|
||||||
|
"""Recursively extract color values."""
|
||||||
|
if isinstance(obj, str) and (obj.startswith('#') or len(obj) == 6):
|
||||||
|
colors.append(obj if obj.startswith('#') else f'#{obj}')
|
||||||
|
elif isinstance(obj, dict):
|
||||||
|
for key, value in obj.items():
|
||||||
|
extract_colors(value, f"{prefix}.{key}")
|
||||||
|
elif isinstance(obj, list):
|
||||||
|
for item in obj:
|
||||||
|
extract_colors(item, prefix)
|
||||||
|
|
||||||
|
extract_colors(color_tokens)
|
||||||
|
|
||||||
|
# Validate extracted colors
|
||||||
|
result = await self.accessibility_validate_colors(colors)
|
||||||
|
result["theme_name"] = theme_name
|
||||||
|
|
||||||
|
# Add theme-specific checks
|
||||||
|
primary = color_tokens.get("primary")
|
||||||
|
background = color_tokens.get("background", {})
|
||||||
|
text = color_tokens.get("text", {})
|
||||||
|
|
||||||
|
if primary and background:
|
||||||
|
bg_color = background.get("base") if isinstance(background, dict) else background
|
||||||
|
if bg_color:
|
||||||
|
contrast = self._get_contrast_ratio(primary, bg_color)
|
||||||
|
if contrast < 4.5:
|
||||||
|
result["issues"].append({
|
||||||
|
"type": "primary_contrast",
|
||||||
|
"severity": "error",
|
||||||
|
"colors": [primary, bg_color],
|
||||||
|
"ratio": round(contrast, 2),
|
||||||
|
"required": 4.5,
|
||||||
|
"message": f"Primary color has insufficient contrast ({contrast:.1f}:1) against background"
|
||||||
|
})
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
async def accessibility_suggest_alternative(
|
||||||
|
self,
|
||||||
|
color: str,
|
||||||
|
deficiency_type: str
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Suggest accessible alternative colors.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
color: Original hex color
|
||||||
|
deficiency_type: Type of color blindness to optimize for
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with alternative color suggestions
|
||||||
|
"""
|
||||||
|
rgb = self._hex_to_rgb(color)
|
||||||
|
|
||||||
|
suggestions = []
|
||||||
|
|
||||||
|
# Suggest shifting hue while maintaining saturation and brightness
|
||||||
|
# For red-green deficiency, shift toward blue or yellow
|
||||||
|
if deficiency_type in ["deuteranopia", "protanopia"]:
|
||||||
|
# Shift toward blue
|
||||||
|
blue_shift = self._rgb_to_hex((
|
||||||
|
max(0, rgb[0] - 50),
|
||||||
|
max(0, rgb[1] - 30),
|
||||||
|
min(255, rgb[2] + 80)
|
||||||
|
))
|
||||||
|
suggestions.append({
|
||||||
|
"color": blue_shift,
|
||||||
|
"description": "Blue-shifted alternative",
|
||||||
|
"preserves": "approximate brightness"
|
||||||
|
})
|
||||||
|
|
||||||
|
# Shift toward yellow/orange
|
||||||
|
yellow_shift = self._rgb_to_hex((
|
||||||
|
min(255, rgb[0] + 50),
|
||||||
|
min(255, rgb[1] + 30),
|
||||||
|
max(0, rgb[2] - 80)
|
||||||
|
))
|
||||||
|
suggestions.append({
|
||||||
|
"color": yellow_shift,
|
||||||
|
"description": "Yellow-shifted alternative",
|
||||||
|
"preserves": "approximate brightness"
|
||||||
|
})
|
||||||
|
|
||||||
|
elif deficiency_type == "tritanopia":
|
||||||
|
# For blue-yellow deficiency, shift toward red or green
|
||||||
|
red_shift = self._rgb_to_hex((
|
||||||
|
min(255, rgb[0] + 60),
|
||||||
|
max(0, rgb[1] - 20),
|
||||||
|
max(0, rgb[2] - 40)
|
||||||
|
))
|
||||||
|
suggestions.append({
|
||||||
|
"color": red_shift,
|
||||||
|
"description": "Red-shifted alternative",
|
||||||
|
"preserves": "approximate brightness"
|
||||||
|
})
|
||||||
|
|
||||||
|
# Add safe palette suggestions
|
||||||
|
for palette_name, palette in SAFE_PALETTES.items():
|
||||||
|
# Find closest color in safe palette
|
||||||
|
min_distance = float('inf')
|
||||||
|
closest = None
|
||||||
|
for safe_color in palette["colors"]:
|
||||||
|
distance = self._get_color_distance(color, safe_color)
|
||||||
|
if distance < min_distance:
|
||||||
|
min_distance = distance
|
||||||
|
closest = safe_color
|
||||||
|
|
||||||
|
if closest:
|
||||||
|
suggestions.append({
|
||||||
|
"color": closest,
|
||||||
|
"description": f"From {palette['name']} palette",
|
||||||
|
"palette": palette_name
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
"original_color": color,
|
||||||
|
"deficiency_type": deficiency_type,
|
||||||
|
"suggestions": suggestions[:5] # Limit to 5 suggestions
|
||||||
|
}
|
||||||
|
|
||||||
|
def _generate_recommendations(self, issues: List[Dict[str, Any]]) -> List[str]:
|
||||||
|
"""Generate actionable recommendations based on issues."""
|
||||||
|
recommendations = []
|
||||||
|
|
||||||
|
# Check for distinguishability issues
|
||||||
|
distinguishability_issues = [i for i in issues if i["type"] == "distinguishability"]
|
||||||
|
if distinguishability_issues:
|
||||||
|
affected_types = set()
|
||||||
|
for issue in distinguishability_issues:
|
||||||
|
affected_types.update(issue.get("affected_by", []))
|
||||||
|
|
||||||
|
if "deuteranopia" in affected_types or "protanopia" in affected_types:
|
||||||
|
recommendations.append(
|
||||||
|
"Avoid using red and green as the only differentiators - "
|
||||||
|
"add patterns, shapes, or labels"
|
||||||
|
)
|
||||||
|
|
||||||
|
recommendations.append(
|
||||||
|
"Consider using a color-blind safe palette like Okabe-Ito or IBM Design"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check for contrast issues
|
||||||
|
contrast_issues = [i for i in issues if i["type"] in ["contrast_ratio", "primary_contrast"]]
|
||||||
|
if contrast_issues:
|
||||||
|
recommendations.append(
|
||||||
|
"Increase contrast by darkening colors for light backgrounds "
|
||||||
|
"or lightening for dark backgrounds"
|
||||||
|
)
|
||||||
|
recommendations.append(
|
||||||
|
"Use WCAG contrast checker tools to verify text readability"
|
||||||
|
)
|
||||||
|
|
||||||
|
# General recommendations
|
||||||
|
if len(issues) > 0:
|
||||||
|
recommendations.append(
|
||||||
|
"Add secondary visual cues (icons, patterns, labels) "
|
||||||
|
"to not rely solely on color"
|
||||||
|
)
|
||||||
|
|
||||||
|
if not recommendations:
|
||||||
|
recommendations.append(
|
||||||
|
"Color palette appears accessible! Consider adding patterns "
|
||||||
|
"for additional distinguishability"
|
||||||
|
)
|
||||||
|
|
||||||
|
return recommendations
|
||||||
533
mcp-servers/viz-platform/mcp_server/chart_tools.py
Normal file
533
mcp-servers/viz-platform/mcp_server/chart_tools.py
Normal file
@@ -0,0 +1,533 @@
|
|||||||
|
"""
|
||||||
|
Chart creation tools using Plotly.
|
||||||
|
|
||||||
|
Provides tools for creating data visualizations with automatic theme integration.
|
||||||
|
"""
|
||||||
|
import base64
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
from typing import Dict, List, Optional, Any, Union
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Check for kaleido availability
|
||||||
|
KALEIDO_AVAILABLE = False
|
||||||
|
try:
|
||||||
|
import kaleido
|
||||||
|
KALEIDO_AVAILABLE = True
|
||||||
|
except ImportError:
|
||||||
|
logger.debug("kaleido not installed - chart export will be unavailable")
|
||||||
|
|
||||||
|
|
||||||
|
# Default color palette based on Mantine theme
|
||||||
|
DEFAULT_COLORS = [
|
||||||
|
"#228be6", # blue
|
||||||
|
"#40c057", # green
|
||||||
|
"#fa5252", # red
|
||||||
|
"#fab005", # yellow
|
||||||
|
"#7950f2", # violet
|
||||||
|
"#fd7e14", # orange
|
||||||
|
"#20c997", # teal
|
||||||
|
"#f783ac", # pink
|
||||||
|
"#868e96", # gray
|
||||||
|
"#15aabf", # cyan
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class ChartTools:
|
||||||
|
"""
|
||||||
|
Plotly-based chart creation tools.
|
||||||
|
|
||||||
|
Creates charts that integrate with DMC theming system.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, theme_store=None):
|
||||||
|
"""
|
||||||
|
Initialize chart tools.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
theme_store: Optional ThemeStore for theme token resolution
|
||||||
|
"""
|
||||||
|
self.theme_store = theme_store
|
||||||
|
self._active_theme = None
|
||||||
|
|
||||||
|
def set_theme(self, theme: Dict[str, Any]) -> None:
|
||||||
|
"""Set the active theme for chart styling."""
|
||||||
|
self._active_theme = theme
|
||||||
|
|
||||||
|
def _get_color_palette(self) -> List[str]:
|
||||||
|
"""Get color palette from theme or defaults."""
|
||||||
|
if self._active_theme and 'colors' in self._active_theme:
|
||||||
|
colors = self._active_theme['colors']
|
||||||
|
# Extract primary colors from theme
|
||||||
|
palette = []
|
||||||
|
for key in ['primary', 'secondary', 'success', 'warning', 'error']:
|
||||||
|
if key in colors:
|
||||||
|
palette.append(colors[key])
|
||||||
|
if palette:
|
||||||
|
return palette + DEFAULT_COLORS[len(palette):]
|
||||||
|
return DEFAULT_COLORS
|
||||||
|
|
||||||
|
def _resolve_color(self, color: Optional[str]) -> str:
|
||||||
|
"""Resolve a color token to actual color value."""
|
||||||
|
if not color:
|
||||||
|
return self._get_color_palette()[0]
|
||||||
|
|
||||||
|
# Check if it's a theme token
|
||||||
|
if self._active_theme and 'colors' in self._active_theme:
|
||||||
|
colors = self._active_theme['colors']
|
||||||
|
if color in colors:
|
||||||
|
return colors[color]
|
||||||
|
|
||||||
|
# Check if it's already a valid color
|
||||||
|
if color.startswith('#') or color.startswith('rgb'):
|
||||||
|
return color
|
||||||
|
|
||||||
|
# Map common color names to palette
|
||||||
|
color_map = {
|
||||||
|
'blue': DEFAULT_COLORS[0],
|
||||||
|
'green': DEFAULT_COLORS[1],
|
||||||
|
'red': DEFAULT_COLORS[2],
|
||||||
|
'yellow': DEFAULT_COLORS[3],
|
||||||
|
'violet': DEFAULT_COLORS[4],
|
||||||
|
'orange': DEFAULT_COLORS[5],
|
||||||
|
'teal': DEFAULT_COLORS[6],
|
||||||
|
'pink': DEFAULT_COLORS[7],
|
||||||
|
'gray': DEFAULT_COLORS[8],
|
||||||
|
'cyan': DEFAULT_COLORS[9],
|
||||||
|
}
|
||||||
|
return color_map.get(color, color)
|
||||||
|
|
||||||
|
async def chart_create(
|
||||||
|
self,
|
||||||
|
chart_type: str,
|
||||||
|
data: Dict[str, Any],
|
||||||
|
options: Optional[Dict[str, Any]] = None
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Create a Plotly chart.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
chart_type: Type of chart (line, bar, scatter, pie, heatmap, histogram, area)
|
||||||
|
data: Data specification with x, y values or labels/values for pie
|
||||||
|
options: Optional chart options (title, color, layout settings)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with:
|
||||||
|
- figure: Plotly figure JSON
|
||||||
|
- chart_type: Type of chart created
|
||||||
|
- error: Error message if creation failed
|
||||||
|
"""
|
||||||
|
options = options or {}
|
||||||
|
|
||||||
|
# Validate chart type
|
||||||
|
valid_types = ['line', 'bar', 'scatter', 'pie', 'heatmap', 'histogram', 'area']
|
||||||
|
if chart_type not in valid_types:
|
||||||
|
return {
|
||||||
|
"error": f"Invalid chart_type '{chart_type}'. Must be one of: {valid_types}",
|
||||||
|
"chart_type": chart_type,
|
||||||
|
"figure": None
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Build trace based on chart type
|
||||||
|
trace = self._build_trace(chart_type, data, options)
|
||||||
|
if 'error' in trace:
|
||||||
|
return trace
|
||||||
|
|
||||||
|
# Build layout
|
||||||
|
layout = self._build_layout(options)
|
||||||
|
|
||||||
|
# Create figure structure
|
||||||
|
figure = {
|
||||||
|
"data": [trace],
|
||||||
|
"layout": layout
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
"figure": figure,
|
||||||
|
"chart_type": chart_type,
|
||||||
|
"trace_count": 1
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Chart creation failed: {e}")
|
||||||
|
return {
|
||||||
|
"error": str(e),
|
||||||
|
"chart_type": chart_type,
|
||||||
|
"figure": None
|
||||||
|
}
|
||||||
|
|
||||||
|
def _build_trace(
|
||||||
|
self,
|
||||||
|
chart_type: str,
|
||||||
|
data: Dict[str, Any],
|
||||||
|
options: Dict[str, Any]
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Build Plotly trace for the chart type."""
|
||||||
|
color = self._resolve_color(options.get('color'))
|
||||||
|
palette = self._get_color_palette()
|
||||||
|
|
||||||
|
# Common trace properties
|
||||||
|
trace: Dict[str, Any] = {}
|
||||||
|
|
||||||
|
if chart_type == 'line':
|
||||||
|
trace = {
|
||||||
|
"type": "scatter",
|
||||||
|
"mode": "lines+markers",
|
||||||
|
"x": data.get('x', []),
|
||||||
|
"y": data.get('y', []),
|
||||||
|
"line": {"color": color},
|
||||||
|
"marker": {"color": color}
|
||||||
|
}
|
||||||
|
if 'name' in data:
|
||||||
|
trace['name'] = data['name']
|
||||||
|
|
||||||
|
elif chart_type == 'bar':
|
||||||
|
trace = {
|
||||||
|
"type": "bar",
|
||||||
|
"x": data.get('x', []),
|
||||||
|
"y": data.get('y', []),
|
||||||
|
"marker": {"color": color}
|
||||||
|
}
|
||||||
|
if options.get('horizontal'):
|
||||||
|
trace['orientation'] = 'h'
|
||||||
|
trace['x'], trace['y'] = trace['y'], trace['x']
|
||||||
|
if 'name' in data:
|
||||||
|
trace['name'] = data['name']
|
||||||
|
|
||||||
|
elif chart_type == 'scatter':
|
||||||
|
trace = {
|
||||||
|
"type": "scatter",
|
||||||
|
"mode": "markers",
|
||||||
|
"x": data.get('x', []),
|
||||||
|
"y": data.get('y', []),
|
||||||
|
"marker": {
|
||||||
|
"color": color,
|
||||||
|
"size": options.get('marker_size', 10)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if 'size' in data:
|
||||||
|
trace['marker']['size'] = data['size']
|
||||||
|
if 'name' in data:
|
||||||
|
trace['name'] = data['name']
|
||||||
|
|
||||||
|
elif chart_type == 'pie':
|
||||||
|
labels = data.get('labels', data.get('x', []))
|
||||||
|
values = data.get('values', data.get('y', []))
|
||||||
|
trace = {
|
||||||
|
"type": "pie",
|
||||||
|
"labels": labels,
|
||||||
|
"values": values,
|
||||||
|
"marker": {"colors": palette[:len(labels)]}
|
||||||
|
}
|
||||||
|
if options.get('donut'):
|
||||||
|
trace['hole'] = options.get('hole', 0.4)
|
||||||
|
|
||||||
|
elif chart_type == 'heatmap':
|
||||||
|
trace = {
|
||||||
|
"type": "heatmap",
|
||||||
|
"z": data.get('z', data.get('values', [])),
|
||||||
|
"x": data.get('x', []),
|
||||||
|
"y": data.get('y', []),
|
||||||
|
"colorscale": options.get('colorscale', 'Blues')
|
||||||
|
}
|
||||||
|
|
||||||
|
elif chart_type == 'histogram':
|
||||||
|
trace = {
|
||||||
|
"type": "histogram",
|
||||||
|
"x": data.get('x', data.get('values', [])),
|
||||||
|
"marker": {"color": color}
|
||||||
|
}
|
||||||
|
if 'nbins' in options:
|
||||||
|
trace['nbinsx'] = options['nbins']
|
||||||
|
|
||||||
|
elif chart_type == 'area':
|
||||||
|
trace = {
|
||||||
|
"type": "scatter",
|
||||||
|
"mode": "lines",
|
||||||
|
"x": data.get('x', []),
|
||||||
|
"y": data.get('y', []),
|
||||||
|
"fill": "tozeroy",
|
||||||
|
"line": {"color": color},
|
||||||
|
"fillcolor": color.replace(')', ', 0.3)').replace('rgb', 'rgba') if color.startswith('rgb') else color + '4D'
|
||||||
|
}
|
||||||
|
if 'name' in data:
|
||||||
|
trace['name'] = data['name']
|
||||||
|
|
||||||
|
else:
|
||||||
|
return {"error": f"Unsupported chart type: {chart_type}"}
|
||||||
|
|
||||||
|
return trace
|
||||||
|
|
||||||
|
def _build_layout(self, options: Dict[str, Any]) -> Dict[str, Any]:
|
||||||
|
"""Build Plotly layout from options."""
|
||||||
|
layout: Dict[str, Any] = {
|
||||||
|
"autosize": True,
|
||||||
|
"margin": {"l": 50, "r": 30, "t": 50, "b": 50}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Title
|
||||||
|
if 'title' in options:
|
||||||
|
layout['title'] = {
|
||||||
|
"text": options['title'],
|
||||||
|
"x": 0.5,
|
||||||
|
"xanchor": "center"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Axis labels
|
||||||
|
if 'x_label' in options:
|
||||||
|
layout['xaxis'] = layout.get('xaxis', {})
|
||||||
|
layout['xaxis']['title'] = options['x_label']
|
||||||
|
|
||||||
|
if 'y_label' in options:
|
||||||
|
layout['yaxis'] = layout.get('yaxis', {})
|
||||||
|
layout['yaxis']['title'] = options['y_label']
|
||||||
|
|
||||||
|
# Theme-based styling
|
||||||
|
if self._active_theme:
|
||||||
|
colors = self._active_theme.get('colors', {})
|
||||||
|
bg = colors.get('background', {})
|
||||||
|
|
||||||
|
if isinstance(bg, dict):
|
||||||
|
layout['paper_bgcolor'] = bg.get('base', '#ffffff')
|
||||||
|
layout['plot_bgcolor'] = bg.get('subtle', '#f8f9fa')
|
||||||
|
elif isinstance(bg, str):
|
||||||
|
layout['paper_bgcolor'] = bg
|
||||||
|
layout['plot_bgcolor'] = bg
|
||||||
|
|
||||||
|
text_color = colors.get('text', {})
|
||||||
|
if isinstance(text_color, dict):
|
||||||
|
layout['font'] = {'color': text_color.get('primary', '#212529')}
|
||||||
|
elif isinstance(text_color, str):
|
||||||
|
layout['font'] = {'color': text_color}
|
||||||
|
|
||||||
|
# Additional layout options
|
||||||
|
if 'showlegend' in options:
|
||||||
|
layout['showlegend'] = options['showlegend']
|
||||||
|
|
||||||
|
if 'height' in options:
|
||||||
|
layout['height'] = options['height']
|
||||||
|
|
||||||
|
if 'width' in options:
|
||||||
|
layout['width'] = options['width']
|
||||||
|
|
||||||
|
return layout
|
||||||
|
|
||||||
|
async def chart_configure_interaction(
|
||||||
|
self,
|
||||||
|
figure: Dict[str, Any],
|
||||||
|
interactions: Dict[str, Any]
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Configure interactions for a chart.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
figure: Plotly figure JSON to modify
|
||||||
|
interactions: Interaction configuration:
|
||||||
|
- hover_template: Custom hover text template
|
||||||
|
- click_data: Enable click data capture
|
||||||
|
- selection: Enable selection (box, lasso)
|
||||||
|
- zoom: Enable/disable zoom
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with:
|
||||||
|
- figure: Updated figure JSON
|
||||||
|
- interactions_added: List of interactions configured
|
||||||
|
- error: Error message if configuration failed
|
||||||
|
"""
|
||||||
|
if not figure or 'data' not in figure:
|
||||||
|
return {
|
||||||
|
"error": "Invalid figure: must contain 'data' key",
|
||||||
|
"figure": figure,
|
||||||
|
"interactions_added": []
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
interactions_added = []
|
||||||
|
|
||||||
|
# Process each trace
|
||||||
|
for i, trace in enumerate(figure['data']):
|
||||||
|
# Hover template
|
||||||
|
if 'hover_template' in interactions:
|
||||||
|
trace['hovertemplate'] = interactions['hover_template']
|
||||||
|
if i == 0:
|
||||||
|
interactions_added.append('hover_template')
|
||||||
|
|
||||||
|
# Custom hover info
|
||||||
|
if 'hover_info' in interactions:
|
||||||
|
trace['hoverinfo'] = interactions['hover_info']
|
||||||
|
if i == 0:
|
||||||
|
interactions_added.append('hover_info')
|
||||||
|
|
||||||
|
# Layout-level interactions
|
||||||
|
layout = figure.get('layout', {})
|
||||||
|
|
||||||
|
# Click data (Dash callback integration)
|
||||||
|
if interactions.get('click_data', False):
|
||||||
|
layout['clickmode'] = 'event+select'
|
||||||
|
interactions_added.append('click_data')
|
||||||
|
|
||||||
|
# Selection mode
|
||||||
|
if 'selection' in interactions:
|
||||||
|
sel_mode = interactions['selection']
|
||||||
|
if sel_mode in ['box', 'lasso', 'box+lasso']:
|
||||||
|
layout['dragmode'] = 'select' if sel_mode == 'box' else sel_mode
|
||||||
|
interactions_added.append(f'selection:{sel_mode}')
|
||||||
|
|
||||||
|
# Zoom configuration
|
||||||
|
if 'zoom' in interactions:
|
||||||
|
if not interactions['zoom']:
|
||||||
|
layout['xaxis'] = layout.get('xaxis', {})
|
||||||
|
layout['yaxis'] = layout.get('yaxis', {})
|
||||||
|
layout['xaxis']['fixedrange'] = True
|
||||||
|
layout['yaxis']['fixedrange'] = True
|
||||||
|
interactions_added.append('zoom:disabled')
|
||||||
|
else:
|
||||||
|
interactions_added.append('zoom:enabled')
|
||||||
|
|
||||||
|
# Modebar configuration
|
||||||
|
if 'modebar' in interactions:
|
||||||
|
layout['modebar'] = interactions['modebar']
|
||||||
|
interactions_added.append('modebar')
|
||||||
|
|
||||||
|
figure['layout'] = layout
|
||||||
|
|
||||||
|
return {
|
||||||
|
"figure": figure,
|
||||||
|
"interactions_added": interactions_added
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Interaction configuration failed: {e}")
|
||||||
|
return {
|
||||||
|
"error": str(e),
|
||||||
|
"figure": figure,
|
||||||
|
"interactions_added": []
|
||||||
|
}
|
||||||
|
|
||||||
|
async def chart_export(
|
||||||
|
self,
|
||||||
|
figure: Dict[str, Any],
|
||||||
|
format: str = "png",
|
||||||
|
width: Optional[int] = None,
|
||||||
|
height: Optional[int] = None,
|
||||||
|
scale: float = 2.0,
|
||||||
|
output_path: Optional[str] = None
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Export a Plotly chart to a static image format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
figure: Plotly figure JSON to export
|
||||||
|
format: Output format - png, svg, or pdf
|
||||||
|
width: Image width in pixels (default: from figure or 1200)
|
||||||
|
height: Image height in pixels (default: from figure or 800)
|
||||||
|
scale: Resolution scale factor (default: 2 for retina)
|
||||||
|
output_path: Optional file path to save the image
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with:
|
||||||
|
- image_data: Base64-encoded image (if no output_path)
|
||||||
|
- file_path: Path to saved file (if output_path provided)
|
||||||
|
- format: Export format used
|
||||||
|
- dimensions: {width, height, scale}
|
||||||
|
- error: Error message if export failed
|
||||||
|
"""
|
||||||
|
# Validate format
|
||||||
|
valid_formats = ['png', 'svg', 'pdf']
|
||||||
|
format = format.lower()
|
||||||
|
if format not in valid_formats:
|
||||||
|
return {
|
||||||
|
"error": f"Invalid format '{format}'. Must be one of: {valid_formats}",
|
||||||
|
"format": format,
|
||||||
|
"image_data": None
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check kaleido availability
|
||||||
|
if not KALEIDO_AVAILABLE:
|
||||||
|
return {
|
||||||
|
"error": "kaleido package not installed. Install with: pip install kaleido",
|
||||||
|
"format": format,
|
||||||
|
"image_data": None,
|
||||||
|
"install_hint": "pip install kaleido"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Validate figure
|
||||||
|
if not figure or 'data' not in figure:
|
||||||
|
return {
|
||||||
|
"error": "Invalid figure: must contain 'data' key",
|
||||||
|
"format": format,
|
||||||
|
"image_data": None
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
import plotly.graph_objects as go
|
||||||
|
import plotly.io as pio
|
||||||
|
|
||||||
|
# Create Plotly figure object
|
||||||
|
fig = go.Figure(figure)
|
||||||
|
|
||||||
|
# Determine dimensions
|
||||||
|
layout = figure.get('layout', {})
|
||||||
|
export_width = width or layout.get('width') or 1200
|
||||||
|
export_height = height or layout.get('height') or 800
|
||||||
|
|
||||||
|
# Export to bytes
|
||||||
|
image_bytes = pio.to_image(
|
||||||
|
fig,
|
||||||
|
format=format,
|
||||||
|
width=export_width,
|
||||||
|
height=export_height,
|
||||||
|
scale=scale
|
||||||
|
)
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"format": format,
|
||||||
|
"dimensions": {
|
||||||
|
"width": export_width,
|
||||||
|
"height": export_height,
|
||||||
|
"scale": scale,
|
||||||
|
"effective_width": int(export_width * scale),
|
||||||
|
"effective_height": int(export_height * scale)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Save to file or return base64
|
||||||
|
if output_path:
|
||||||
|
# Ensure directory exists
|
||||||
|
output_dir = os.path.dirname(output_path)
|
||||||
|
if output_dir and not os.path.exists(output_dir):
|
||||||
|
os.makedirs(output_dir, exist_ok=True)
|
||||||
|
|
||||||
|
# Add extension if missing
|
||||||
|
if not output_path.endswith(f'.{format}'):
|
||||||
|
output_path = f"{output_path}.{format}"
|
||||||
|
|
||||||
|
with open(output_path, 'wb') as f:
|
||||||
|
f.write(image_bytes)
|
||||||
|
|
||||||
|
result["file_path"] = output_path
|
||||||
|
result["file_size_bytes"] = len(image_bytes)
|
||||||
|
else:
|
||||||
|
# Return as base64
|
||||||
|
result["image_data"] = base64.b64encode(image_bytes).decode('utf-8')
|
||||||
|
result["data_uri"] = f"data:image/{format};base64,{result['image_data']}"
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
except ImportError as e:
|
||||||
|
logger.error(f"Chart export failed - missing dependency: {e}")
|
||||||
|
return {
|
||||||
|
"error": f"Missing dependency for export: {e}",
|
||||||
|
"format": format,
|
||||||
|
"image_data": None,
|
||||||
|
"install_hint": "pip install plotly kaleido"
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Chart export failed: {e}")
|
||||||
|
return {
|
||||||
|
"error": str(e),
|
||||||
|
"format": format,
|
||||||
|
"image_data": None
|
||||||
|
}
|
||||||
301
mcp-servers/viz-platform/mcp_server/component_registry.py
Normal file
301
mcp-servers/viz-platform/mcp_server/component_registry.py
Normal file
@@ -0,0 +1,301 @@
|
|||||||
|
"""
|
||||||
|
DMC Component Registry for viz-platform.
|
||||||
|
|
||||||
|
Provides version-locked component definitions to prevent Claude from
|
||||||
|
hallucinating invalid props. Uses static JSON registries pre-generated
|
||||||
|
from DMC source.
|
||||||
|
"""
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Optional, Any
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ComponentRegistry:
|
||||||
|
"""
|
||||||
|
Version-locked registry of Dash Mantine Components.
|
||||||
|
|
||||||
|
Loads component definitions from static JSON files and provides
|
||||||
|
lookup methods for validation tools.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, dmc_version: Optional[str] = None):
|
||||||
|
"""
|
||||||
|
Initialize the component registry.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dmc_version: Installed DMC version (e.g., "0.14.7").
|
||||||
|
If None, will try to detect or use fallback.
|
||||||
|
"""
|
||||||
|
self.dmc_version = dmc_version
|
||||||
|
self.registry_dir = Path(__file__).parent.parent / 'registry'
|
||||||
|
self.components: Dict[str, Dict[str, Any]] = {}
|
||||||
|
self.categories: Dict[str, List[str]] = {}
|
||||||
|
self.loaded_version: Optional[str] = None
|
||||||
|
|
||||||
|
def load(self) -> bool:
|
||||||
|
"""
|
||||||
|
Load the component registry for the configured DMC version.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if registry loaded successfully, False otherwise
|
||||||
|
"""
|
||||||
|
registry_file = self._find_registry_file()
|
||||||
|
|
||||||
|
if not registry_file:
|
||||||
|
logger.warning(
|
||||||
|
f"No registry found for DMC {self.dmc_version}. "
|
||||||
|
"Component validation will be limited."
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(registry_file, 'r') as f:
|
||||||
|
data = json.load(f)
|
||||||
|
|
||||||
|
self.loaded_version = data.get('version')
|
||||||
|
self.components = data.get('components', {})
|
||||||
|
self.categories = data.get('categories', {})
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Loaded component registry v{self.loaded_version} "
|
||||||
|
f"with {len(self.components)} components"
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to load registry: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _find_registry_file(self) -> Optional[Path]:
|
||||||
|
"""
|
||||||
|
Find the best matching registry file for the DMC version.
|
||||||
|
|
||||||
|
Strategy:
|
||||||
|
1. Exact major.minor match (e.g., dmc_0_14.json for 0.14.7)
|
||||||
|
2. Fallback to latest available registry
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Path to registry file, or None if not found
|
||||||
|
"""
|
||||||
|
if not self.registry_dir.exists():
|
||||||
|
logger.warning(f"Registry directory not found: {self.registry_dir}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Try exact major.minor match
|
||||||
|
if self.dmc_version:
|
||||||
|
parts = self.dmc_version.split('.')
|
||||||
|
if len(parts) >= 2:
|
||||||
|
major_minor = f"{parts[0]}_{parts[1]}"
|
||||||
|
exact_match = self.registry_dir / f"dmc_{major_minor}.json"
|
||||||
|
if exact_match.exists():
|
||||||
|
return exact_match
|
||||||
|
|
||||||
|
# Fallback: find latest registry
|
||||||
|
registry_files = list(self.registry_dir.glob("dmc_*.json"))
|
||||||
|
if registry_files:
|
||||||
|
# Sort by version and return latest
|
||||||
|
registry_files.sort(reverse=True)
|
||||||
|
fallback = registry_files[0]
|
||||||
|
if self.dmc_version:
|
||||||
|
logger.warning(
|
||||||
|
f"No exact match for DMC {self.dmc_version}, "
|
||||||
|
f"using fallback: {fallback.name}"
|
||||||
|
)
|
||||||
|
return fallback
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_component(self, name: str) -> Optional[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Get component definition by name.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name: Component name (e.g., "Button", "TextInput")
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Component definition dict, or None if not found
|
||||||
|
"""
|
||||||
|
return self.components.get(name)
|
||||||
|
|
||||||
|
def get_component_props(self, name: str) -> Optional[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Get props schema for a component.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name: Component name
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Props dict with type info, or None if component not found
|
||||||
|
"""
|
||||||
|
component = self.get_component(name)
|
||||||
|
if component:
|
||||||
|
return component.get('props', {})
|
||||||
|
return None
|
||||||
|
|
||||||
|
def list_components(self, category: Optional[str] = None) -> Dict[str, List[str]]:
|
||||||
|
"""
|
||||||
|
List available components, optionally filtered by category.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
category: Optional category filter (e.g., "inputs", "buttons")
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict of category -> component names
|
||||||
|
"""
|
||||||
|
if category:
|
||||||
|
if category in self.categories:
|
||||||
|
return {category: self.categories[category]}
|
||||||
|
return {}
|
||||||
|
return self.categories
|
||||||
|
|
||||||
|
def get_categories(self) -> List[str]:
|
||||||
|
"""
|
||||||
|
Get list of available component categories.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of category names
|
||||||
|
"""
|
||||||
|
return list(self.categories.keys())
|
||||||
|
|
||||||
|
def validate_prop(
|
||||||
|
self,
|
||||||
|
component: str,
|
||||||
|
prop_name: str,
|
||||||
|
prop_value: Any
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Validate a single prop value against the registry.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
component: Component name
|
||||||
|
prop_name: Prop name
|
||||||
|
prop_value: Value to validate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with valid: bool, error: Optional[str]
|
||||||
|
"""
|
||||||
|
props = self.get_component_props(component)
|
||||||
|
if props is None:
|
||||||
|
return {
|
||||||
|
'valid': False,
|
||||||
|
'error': f"Unknown component: {component}"
|
||||||
|
}
|
||||||
|
|
||||||
|
if prop_name not in props:
|
||||||
|
# Check for similar prop names (typo detection)
|
||||||
|
similar = self._find_similar_props(prop_name, props.keys())
|
||||||
|
if similar:
|
||||||
|
return {
|
||||||
|
'valid': False,
|
||||||
|
'error': f"Unknown prop '{prop_name}' for {component}. Did you mean '{similar}'?"
|
||||||
|
}
|
||||||
|
return {
|
||||||
|
'valid': False,
|
||||||
|
'error': f"Unknown prop '{prop_name}' for {component}"
|
||||||
|
}
|
||||||
|
|
||||||
|
prop_schema = props[prop_name]
|
||||||
|
return self._validate_value(prop_value, prop_schema, prop_name)
|
||||||
|
|
||||||
|
def _validate_value(
|
||||||
|
self,
|
||||||
|
value: Any,
|
||||||
|
schema: Dict[str, Any],
|
||||||
|
prop_name: str
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Validate a value against a prop schema.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
value: Value to validate
|
||||||
|
schema: Prop schema from registry
|
||||||
|
prop_name: Prop name (for error messages)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with valid: bool, error: Optional[str]
|
||||||
|
"""
|
||||||
|
prop_type = schema.get('type', 'any')
|
||||||
|
|
||||||
|
# Any type always valid
|
||||||
|
if prop_type == 'any':
|
||||||
|
return {'valid': True}
|
||||||
|
|
||||||
|
# Check enum values
|
||||||
|
if 'enum' in schema:
|
||||||
|
if value not in schema['enum']:
|
||||||
|
return {
|
||||||
|
'valid': False,
|
||||||
|
'error': f"Prop '{prop_name}' expects one of {schema['enum']}, got '{value}'"
|
||||||
|
}
|
||||||
|
return {'valid': True}
|
||||||
|
|
||||||
|
# Type checking
|
||||||
|
type_checks = {
|
||||||
|
'string': lambda v: isinstance(v, str),
|
||||||
|
'number': lambda v: isinstance(v, (int, float)),
|
||||||
|
'integer': lambda v: isinstance(v, int),
|
||||||
|
'boolean': lambda v: isinstance(v, bool),
|
||||||
|
'array': lambda v: isinstance(v, list),
|
||||||
|
'object': lambda v: isinstance(v, dict),
|
||||||
|
}
|
||||||
|
|
||||||
|
checker = type_checks.get(prop_type)
|
||||||
|
if checker and not checker(value):
|
||||||
|
return {
|
||||||
|
'valid': False,
|
||||||
|
'error': f"Prop '{prop_name}' expects type '{prop_type}', got '{type(value).__name__}'"
|
||||||
|
}
|
||||||
|
|
||||||
|
return {'valid': True}
|
||||||
|
|
||||||
|
def _find_similar_props(
|
||||||
|
self,
|
||||||
|
prop_name: str,
|
||||||
|
available_props: List[str]
|
||||||
|
) -> Optional[str]:
|
||||||
|
"""
|
||||||
|
Find a similar prop name for typo suggestions.
|
||||||
|
|
||||||
|
Uses simple edit distance heuristic.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
prop_name: The (possibly misspelled) prop name
|
||||||
|
available_props: List of valid prop names
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Most similar prop name, or None if no close match
|
||||||
|
"""
|
||||||
|
prop_lower = prop_name.lower()
|
||||||
|
|
||||||
|
for prop in available_props:
|
||||||
|
# Exact match after lowercase
|
||||||
|
if prop.lower() == prop_lower:
|
||||||
|
return prop
|
||||||
|
# Common typos: extra/missing letter
|
||||||
|
if abs(len(prop) - len(prop_name)) == 1:
|
||||||
|
if prop_lower.startswith(prop.lower()[:3]):
|
||||||
|
return prop
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
def is_loaded(self) -> bool:
|
||||||
|
"""Check if registry is loaded."""
|
||||||
|
return len(self.components) > 0
|
||||||
|
|
||||||
|
|
||||||
|
def load_registry(dmc_version: Optional[str] = None) -> ComponentRegistry:
|
||||||
|
"""
|
||||||
|
Convenience function to load and return a component registry.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dmc_version: Optional DMC version string
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Loaded ComponentRegistry instance
|
||||||
|
"""
|
||||||
|
registry = ComponentRegistry(dmc_version)
|
||||||
|
registry.load()
|
||||||
|
return registry
|
||||||
172
mcp-servers/viz-platform/mcp_server/config.py
Normal file
172
mcp-servers/viz-platform/mcp_server/config.py
Normal file
@@ -0,0 +1,172 @@
|
|||||||
|
"""
|
||||||
|
Configuration loader for viz-platform MCP Server.
|
||||||
|
|
||||||
|
Implements hybrid configuration system:
|
||||||
|
- System-level: ~/.config/claude/viz-platform.env (theme preferences)
|
||||||
|
- Project-level: .env (DMC version overrides)
|
||||||
|
- Auto-detection: DMC package version from installed package
|
||||||
|
"""
|
||||||
|
from pathlib import Path
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import os
|
||||||
|
import logging
|
||||||
|
from typing import Dict, Optional
|
||||||
|
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class VizPlatformConfig:
|
||||||
|
"""Hybrid configuration loader for viz-platform tools"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.dmc_version: Optional[str] = None
|
||||||
|
self.theme_dir_user: Path = Path.home() / '.config' / 'claude' / 'themes'
|
||||||
|
self.theme_dir_project: Optional[Path] = None
|
||||||
|
self.default_theme: Optional[str] = None
|
||||||
|
|
||||||
|
def load(self) -> Dict[str, any]:
|
||||||
|
"""
|
||||||
|
Load configuration from system and project levels.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict containing dmc_version, theme directories, and availability flags
|
||||||
|
"""
|
||||||
|
# Load system config
|
||||||
|
system_config = Path.home() / '.config' / 'claude' / 'viz-platform.env'
|
||||||
|
if system_config.exists():
|
||||||
|
load_dotenv(system_config)
|
||||||
|
logger.info(f"Loaded system configuration from {system_config}")
|
||||||
|
|
||||||
|
# Find project directory
|
||||||
|
project_dir = self._find_project_directory()
|
||||||
|
|
||||||
|
# Load project config (overrides system)
|
||||||
|
if project_dir:
|
||||||
|
project_config = project_dir / '.env'
|
||||||
|
if project_config.exists():
|
||||||
|
load_dotenv(project_config, override=True)
|
||||||
|
logger.info(f"Loaded project configuration from {project_config}")
|
||||||
|
|
||||||
|
# Set project theme directory
|
||||||
|
self.theme_dir_project = project_dir / '.viz-platform' / 'themes'
|
||||||
|
|
||||||
|
# Get DMC version (from env or auto-detect)
|
||||||
|
self.dmc_version = os.getenv('DMC_VERSION') or self._detect_dmc_version()
|
||||||
|
self.default_theme = os.getenv('VIZ_DEFAULT_THEME')
|
||||||
|
|
||||||
|
# Ensure user theme directory exists
|
||||||
|
self.theme_dir_user.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'dmc_version': self.dmc_version,
|
||||||
|
'dmc_available': self.dmc_version is not None,
|
||||||
|
'theme_dir_user': str(self.theme_dir_user),
|
||||||
|
'theme_dir_project': str(self.theme_dir_project) if self.theme_dir_project else None,
|
||||||
|
'default_theme': self.default_theme,
|
||||||
|
'project_dir': str(project_dir) if project_dir else None
|
||||||
|
}
|
||||||
|
|
||||||
|
def _detect_dmc_version(self) -> Optional[str]:
|
||||||
|
"""
|
||||||
|
Auto-detect installed Dash Mantine Components version.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Version string (e.g., "0.14.7") or None if not installed
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
from importlib.metadata import version
|
||||||
|
dmc_version = version('dash-mantine-components')
|
||||||
|
logger.info(f"Detected DMC version: {dmc_version}")
|
||||||
|
return dmc_version
|
||||||
|
except ImportError:
|
||||||
|
logger.warning("dash-mantine-components not installed - using registry fallback")
|
||||||
|
return None
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Could not detect DMC version: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _find_project_directory(self) -> Optional[Path]:
|
||||||
|
"""
|
||||||
|
Find the user's project directory.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Path to project directory, or None if not found
|
||||||
|
"""
|
||||||
|
# Strategy 1: Check CLAUDE_PROJECT_DIR environment variable
|
||||||
|
project_dir = os.getenv('CLAUDE_PROJECT_DIR')
|
||||||
|
if project_dir:
|
||||||
|
path = Path(project_dir)
|
||||||
|
if path.exists():
|
||||||
|
logger.info(f"Found project directory from CLAUDE_PROJECT_DIR: {path}")
|
||||||
|
return path
|
||||||
|
|
||||||
|
# Strategy 2: Check PWD
|
||||||
|
pwd = os.getenv('PWD')
|
||||||
|
if pwd:
|
||||||
|
path = Path(pwd)
|
||||||
|
if path.exists() and (
|
||||||
|
(path / '.git').exists() or
|
||||||
|
(path / '.env').exists() or
|
||||||
|
(path / '.viz-platform').exists()
|
||||||
|
):
|
||||||
|
logger.info(f"Found project directory from PWD: {path}")
|
||||||
|
return path
|
||||||
|
|
||||||
|
# Strategy 3: Check current working directory
|
||||||
|
cwd = Path.cwd()
|
||||||
|
if (cwd / '.git').exists() or (cwd / '.env').exists() or (cwd / '.viz-platform').exists():
|
||||||
|
logger.info(f"Found project directory from cwd: {cwd}")
|
||||||
|
return cwd
|
||||||
|
|
||||||
|
logger.debug("Could not determine project directory")
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def load_config() -> Dict[str, any]:
|
||||||
|
"""
|
||||||
|
Convenience function to load configuration.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Configuration dictionary
|
||||||
|
"""
|
||||||
|
config = VizPlatformConfig()
|
||||||
|
return config.load()
|
||||||
|
|
||||||
|
|
||||||
|
def check_dmc_version() -> Dict[str, any]:
|
||||||
|
"""
|
||||||
|
Check DMC installation status for SessionStart hook.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with installation status and version info
|
||||||
|
"""
|
||||||
|
config = load_config()
|
||||||
|
|
||||||
|
if not config.get('dmc_available'):
|
||||||
|
return {
|
||||||
|
'installed': False,
|
||||||
|
'message': 'dash-mantine-components not installed. Run: pip install dash-mantine-components'
|
||||||
|
}
|
||||||
|
|
||||||
|
version = config.get('dmc_version', 'unknown')
|
||||||
|
|
||||||
|
# Check for registry compatibility
|
||||||
|
registry_path = Path(__file__).parent.parent / 'registry'
|
||||||
|
major_minor = '.'.join(version.split('.')[:2]) if version else None
|
||||||
|
registry_file = registry_path / f'dmc_{major_minor.replace(".", "_")}.json' if major_minor else None
|
||||||
|
|
||||||
|
if registry_file and registry_file.exists():
|
||||||
|
return {
|
||||||
|
'installed': True,
|
||||||
|
'version': version,
|
||||||
|
'registry_available': True,
|
||||||
|
'message': f'DMC {version} ready with component registry'
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
return {
|
||||||
|
'installed': True,
|
||||||
|
'version': version,
|
||||||
|
'registry_available': False,
|
||||||
|
'message': f'DMC {version} installed but no matching registry. Validation may be limited.'
|
||||||
|
}
|
||||||
306
mcp-servers/viz-platform/mcp_server/dmc_tools.py
Normal file
306
mcp-servers/viz-platform/mcp_server/dmc_tools.py
Normal file
@@ -0,0 +1,306 @@
|
|||||||
|
"""
|
||||||
|
DMC (Dash Mantine Components) validation tools.
|
||||||
|
|
||||||
|
Provides component constraint layer to prevent Claude from hallucinating invalid props.
|
||||||
|
"""
|
||||||
|
import logging
|
||||||
|
from typing import Dict, List, Optional, Any
|
||||||
|
|
||||||
|
from .component_registry import ComponentRegistry
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class DMCTools:
|
||||||
|
"""
|
||||||
|
DMC component validation tools.
|
||||||
|
|
||||||
|
These tools provide the "constraint layer" that validates component usage
|
||||||
|
against a version-locked registry of DMC components.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, registry: Optional[ComponentRegistry] = None):
|
||||||
|
"""
|
||||||
|
Initialize DMC tools with component registry.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
registry: ComponentRegistry instance. If None, creates one.
|
||||||
|
"""
|
||||||
|
self.registry = registry
|
||||||
|
self._initialized = False
|
||||||
|
|
||||||
|
def initialize(self, dmc_version: Optional[str] = None) -> bool:
|
||||||
|
"""
|
||||||
|
Initialize the registry if not already provided.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dmc_version: DMC version to load registry for
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if initialized successfully
|
||||||
|
"""
|
||||||
|
if self.registry is None:
|
||||||
|
self.registry = ComponentRegistry(dmc_version)
|
||||||
|
|
||||||
|
if not self.registry.is_loaded():
|
||||||
|
self.registry.load()
|
||||||
|
|
||||||
|
self._initialized = self.registry.is_loaded()
|
||||||
|
return self._initialized
|
||||||
|
|
||||||
|
async def list_components(
|
||||||
|
self,
|
||||||
|
category: Optional[str] = None
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
List available DMC components, optionally filtered by category.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
category: Optional category filter (e.g., "inputs", "buttons", "navigation")
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with:
|
||||||
|
- components: Dict[category -> [component names]]
|
||||||
|
- categories: List of available categories
|
||||||
|
- version: Loaded DMC registry version
|
||||||
|
- total_count: Total number of components
|
||||||
|
"""
|
||||||
|
if not self._initialized:
|
||||||
|
return {
|
||||||
|
"error": "Registry not initialized",
|
||||||
|
"components": {},
|
||||||
|
"categories": [],
|
||||||
|
"version": None,
|
||||||
|
"total_count": 0
|
||||||
|
}
|
||||||
|
|
||||||
|
components = self.registry.list_components(category)
|
||||||
|
all_categories = self.registry.get_categories()
|
||||||
|
|
||||||
|
# Count total components
|
||||||
|
total = sum(len(comps) for comps in components.values())
|
||||||
|
|
||||||
|
return {
|
||||||
|
"components": components,
|
||||||
|
"categories": all_categories if not category else [category],
|
||||||
|
"version": self.registry.loaded_version,
|
||||||
|
"total_count": total
|
||||||
|
}
|
||||||
|
|
||||||
|
async def get_component_props(self, component: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Get props schema for a specific component.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
component: Component name (e.g., "Button", "TextInput")
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with:
|
||||||
|
- component: Component name
|
||||||
|
- description: Component description
|
||||||
|
- props: Dict of prop name -> {type, default, enum, description}
|
||||||
|
- prop_count: Number of props
|
||||||
|
- required: List of required prop names
|
||||||
|
Or error dict if component not found
|
||||||
|
"""
|
||||||
|
if not self._initialized:
|
||||||
|
return {
|
||||||
|
"error": "Registry not initialized",
|
||||||
|
"component": component,
|
||||||
|
"props": {},
|
||||||
|
"prop_count": 0
|
||||||
|
}
|
||||||
|
|
||||||
|
comp_def = self.registry.get_component(component)
|
||||||
|
if not comp_def:
|
||||||
|
# Try to suggest similar component name
|
||||||
|
similar = self._find_similar_component(component)
|
||||||
|
error_msg = f"Component '{component}' not found in registry"
|
||||||
|
if similar:
|
||||||
|
error_msg += f". Did you mean '{similar}'?"
|
||||||
|
|
||||||
|
return {
|
||||||
|
"error": error_msg,
|
||||||
|
"component": component,
|
||||||
|
"props": {},
|
||||||
|
"prop_count": 0
|
||||||
|
}
|
||||||
|
|
||||||
|
props = comp_def.get('props', {})
|
||||||
|
|
||||||
|
# Extract required props
|
||||||
|
required = [
|
||||||
|
name for name, schema in props.items()
|
||||||
|
if schema.get('required', False)
|
||||||
|
]
|
||||||
|
|
||||||
|
return {
|
||||||
|
"component": component,
|
||||||
|
"description": comp_def.get('description', ''),
|
||||||
|
"props": props,
|
||||||
|
"prop_count": len(props),
|
||||||
|
"required": required,
|
||||||
|
"version": self.registry.loaded_version
|
||||||
|
}
|
||||||
|
|
||||||
|
async def validate_component(
|
||||||
|
self,
|
||||||
|
component: str,
|
||||||
|
props: Dict[str, Any]
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Validate component props against registry.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
component: Component name
|
||||||
|
props: Props dict to validate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with:
|
||||||
|
- valid: bool - True if all props are valid
|
||||||
|
- errors: List of error messages
|
||||||
|
- warnings: List of warning messages
|
||||||
|
- validated_props: Number of props validated
|
||||||
|
- component: Component name for reference
|
||||||
|
"""
|
||||||
|
if not self._initialized:
|
||||||
|
return {
|
||||||
|
"valid": False,
|
||||||
|
"errors": ["Registry not initialized"],
|
||||||
|
"warnings": [],
|
||||||
|
"validated_props": 0,
|
||||||
|
"component": component
|
||||||
|
}
|
||||||
|
|
||||||
|
errors: List[str] = []
|
||||||
|
warnings: List[str] = []
|
||||||
|
|
||||||
|
# Check if component exists
|
||||||
|
comp_def = self.registry.get_component(component)
|
||||||
|
if not comp_def:
|
||||||
|
similar = self._find_similar_component(component)
|
||||||
|
error_msg = f"Unknown component: {component}"
|
||||||
|
if similar:
|
||||||
|
error_msg += f". Did you mean '{similar}'?"
|
||||||
|
errors.append(error_msg)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"valid": False,
|
||||||
|
"errors": errors,
|
||||||
|
"warnings": warnings,
|
||||||
|
"validated_props": 0,
|
||||||
|
"component": component
|
||||||
|
}
|
||||||
|
|
||||||
|
comp_props = comp_def.get('props', {})
|
||||||
|
|
||||||
|
# Check for required props
|
||||||
|
for prop_name, prop_schema in comp_props.items():
|
||||||
|
if prop_schema.get('required', False) and prop_name not in props:
|
||||||
|
errors.append(f"Missing required prop: '{prop_name}'")
|
||||||
|
|
||||||
|
# Validate each provided prop
|
||||||
|
for prop_name, prop_value in props.items():
|
||||||
|
# Skip special props that are always allowed
|
||||||
|
if prop_name in ('id', 'children', 'className', 'style', 'key'):
|
||||||
|
continue
|
||||||
|
|
||||||
|
result = self.registry.validate_prop(component, prop_name, prop_value)
|
||||||
|
|
||||||
|
if not result.get('valid', True):
|
||||||
|
error = result.get('error', f"Invalid prop: {prop_name}")
|
||||||
|
# Distinguish between typos/unknown props and type errors
|
||||||
|
if "Unknown prop" in error:
|
||||||
|
errors.append(f"❌ {error}")
|
||||||
|
elif "expects one of" in error:
|
||||||
|
errors.append(f"❌ {error}")
|
||||||
|
elif "expects type" in error:
|
||||||
|
warnings.append(f"⚠️ {error}")
|
||||||
|
else:
|
||||||
|
errors.append(f"❌ {error}")
|
||||||
|
|
||||||
|
# Check for props that exist but might have common mistakes
|
||||||
|
self._check_common_mistakes(component, props, warnings)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"valid": len(errors) == 0,
|
||||||
|
"errors": errors,
|
||||||
|
"warnings": warnings,
|
||||||
|
"validated_props": len(props),
|
||||||
|
"component": component,
|
||||||
|
"version": self.registry.loaded_version
|
||||||
|
}
|
||||||
|
|
||||||
|
def _find_similar_component(self, component: str) -> Optional[str]:
|
||||||
|
"""
|
||||||
|
Find a similar component name for suggestions.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
component: The (possibly misspelled) component name
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Similar component name, or None if no close match
|
||||||
|
"""
|
||||||
|
if not self.registry:
|
||||||
|
return None
|
||||||
|
|
||||||
|
comp_lower = component.lower()
|
||||||
|
all_components = []
|
||||||
|
for comps in self.registry.categories.values():
|
||||||
|
all_components.extend(comps)
|
||||||
|
|
||||||
|
for comp in all_components:
|
||||||
|
# Exact match after lowercase
|
||||||
|
if comp.lower() == comp_lower:
|
||||||
|
return comp
|
||||||
|
# Check if it's a prefix match
|
||||||
|
if comp.lower().startswith(comp_lower) or comp_lower.startswith(comp.lower()):
|
||||||
|
return comp
|
||||||
|
# Check for common typos
|
||||||
|
if abs(len(comp) - len(component)) <= 2:
|
||||||
|
if comp_lower[:4] == comp.lower()[:4]:
|
||||||
|
return comp
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _check_common_mistakes(
|
||||||
|
self,
|
||||||
|
component: str,
|
||||||
|
props: Dict[str, Any],
|
||||||
|
warnings: List[str]
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Check for common prop usage mistakes and add warnings.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
component: Component name
|
||||||
|
props: Props being used
|
||||||
|
warnings: List to append warnings to
|
||||||
|
"""
|
||||||
|
# Common mistake: using 'onclick' instead of callback pattern
|
||||||
|
if 'onclick' in [p.lower() for p in props.keys()]:
|
||||||
|
warnings.append(
|
||||||
|
"⚠️ Dash uses callback patterns, not inline event handlers. "
|
||||||
|
"Use 'n_clicks' prop with a callback instead."
|
||||||
|
)
|
||||||
|
|
||||||
|
# Common mistake: using 'class' instead of 'className'
|
||||||
|
if 'class' in props:
|
||||||
|
warnings.append(
|
||||||
|
"⚠️ Use 'className' instead of 'class' for CSS classes."
|
||||||
|
)
|
||||||
|
|
||||||
|
# Button-specific checks
|
||||||
|
if component == 'Button':
|
||||||
|
if 'href' in props and 'component' not in props:
|
||||||
|
warnings.append(
|
||||||
|
"⚠️ Button with 'href' should also set 'component=\"a\"' for proper anchor behavior."
|
||||||
|
)
|
||||||
|
|
||||||
|
# Input-specific checks
|
||||||
|
if 'Input' in component:
|
||||||
|
if 'value' in props and 'onChange' in [p for p in props.keys()]:
|
||||||
|
warnings.append(
|
||||||
|
"⚠️ Dash uses 'value' prop with callbacks, not 'onChange'. "
|
||||||
|
"The value updates automatically through Dash callbacks."
|
||||||
|
)
|
||||||
553
mcp-servers/viz-platform/mcp_server/layout_tools.py
Normal file
553
mcp-servers/viz-platform/mcp_server/layout_tools.py
Normal file
@@ -0,0 +1,553 @@
|
|||||||
|
"""
|
||||||
|
Layout composition tools for dashboard building.
|
||||||
|
|
||||||
|
Provides tools for creating structured layouts with grids, filters, and sections.
|
||||||
|
"""
|
||||||
|
import logging
|
||||||
|
from typing import Dict, List, Optional, Any
|
||||||
|
from uuid import uuid4
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
# Standard responsive breakpoints (Mantine/Bootstrap-aligned)
|
||||||
|
DEFAULT_BREAKPOINTS = {
|
||||||
|
"xs": {
|
||||||
|
"min_width": "0px",
|
||||||
|
"max_width": "575px",
|
||||||
|
"cols": 1,
|
||||||
|
"spacing": "xs",
|
||||||
|
"description": "Extra small devices (phones, portrait)"
|
||||||
|
},
|
||||||
|
"sm": {
|
||||||
|
"min_width": "576px",
|
||||||
|
"max_width": "767px",
|
||||||
|
"cols": 2,
|
||||||
|
"spacing": "sm",
|
||||||
|
"description": "Small devices (phones, landscape)"
|
||||||
|
},
|
||||||
|
"md": {
|
||||||
|
"min_width": "768px",
|
||||||
|
"max_width": "991px",
|
||||||
|
"cols": 6,
|
||||||
|
"spacing": "md",
|
||||||
|
"description": "Medium devices (tablets)"
|
||||||
|
},
|
||||||
|
"lg": {
|
||||||
|
"min_width": "992px",
|
||||||
|
"max_width": "1199px",
|
||||||
|
"cols": 12,
|
||||||
|
"spacing": "md",
|
||||||
|
"description": "Large devices (desktops)"
|
||||||
|
},
|
||||||
|
"xl": {
|
||||||
|
"min_width": "1200px",
|
||||||
|
"max_width": None,
|
||||||
|
"cols": 12,
|
||||||
|
"spacing": "lg",
|
||||||
|
"description": "Extra large devices (large desktops)"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Layout templates
|
||||||
|
TEMPLATES = {
|
||||||
|
"dashboard": {
|
||||||
|
"sections": ["header", "filters", "main", "footer"],
|
||||||
|
"default_grid": {"cols": 12, "spacing": "md"},
|
||||||
|
"description": "Standard dashboard with header, filters, main content, and footer"
|
||||||
|
},
|
||||||
|
"report": {
|
||||||
|
"sections": ["title", "summary", "content", "appendix"],
|
||||||
|
"default_grid": {"cols": 1, "spacing": "lg"},
|
||||||
|
"description": "Report layout with title, summary, and content sections"
|
||||||
|
},
|
||||||
|
"form": {
|
||||||
|
"sections": ["header", "fields", "actions"],
|
||||||
|
"default_grid": {"cols": 2, "spacing": "md"},
|
||||||
|
"description": "Form layout with header, fields, and action buttons"
|
||||||
|
},
|
||||||
|
"blank": {
|
||||||
|
"sections": ["main"],
|
||||||
|
"default_grid": {"cols": 12, "spacing": "md"},
|
||||||
|
"description": "Blank canvas for custom layouts"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Filter type definitions
|
||||||
|
FILTER_TYPES = {
|
||||||
|
"dropdown": {
|
||||||
|
"component": "Select",
|
||||||
|
"props": ["label", "data", "placeholder", "clearable", "searchable", "value"]
|
||||||
|
},
|
||||||
|
"multi_select": {
|
||||||
|
"component": "MultiSelect",
|
||||||
|
"props": ["label", "data", "placeholder", "clearable", "searchable", "value"]
|
||||||
|
},
|
||||||
|
"date_range": {
|
||||||
|
"component": "DateRangePicker",
|
||||||
|
"props": ["label", "placeholder", "value", "minDate", "maxDate"]
|
||||||
|
},
|
||||||
|
"date": {
|
||||||
|
"component": "DatePicker",
|
||||||
|
"props": ["label", "placeholder", "value", "minDate", "maxDate"]
|
||||||
|
},
|
||||||
|
"search": {
|
||||||
|
"component": "TextInput",
|
||||||
|
"props": ["label", "placeholder", "value", "icon"]
|
||||||
|
},
|
||||||
|
"checkbox_group": {
|
||||||
|
"component": "CheckboxGroup",
|
||||||
|
"props": ["label", "children", "value"]
|
||||||
|
},
|
||||||
|
"radio_group": {
|
||||||
|
"component": "RadioGroup",
|
||||||
|
"props": ["label", "children", "value"]
|
||||||
|
},
|
||||||
|
"slider": {
|
||||||
|
"component": "Slider",
|
||||||
|
"props": ["label", "min", "max", "step", "value", "marks"]
|
||||||
|
},
|
||||||
|
"range_slider": {
|
||||||
|
"component": "RangeSlider",
|
||||||
|
"props": ["label", "min", "max", "step", "value", "marks"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class LayoutTools:
|
||||||
|
"""
|
||||||
|
Dashboard layout composition tools.
|
||||||
|
|
||||||
|
Creates layouts that map to DMC Grid and AppShell components.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""Initialize layout tools."""
|
||||||
|
self._layouts: Dict[str, Dict[str, Any]] = {}
|
||||||
|
|
||||||
|
async def layout_create(
|
||||||
|
self,
|
||||||
|
name: str,
|
||||||
|
template: Optional[str] = None
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Create a new layout container.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name: Unique name for the layout
|
||||||
|
template: Optional template (dashboard, report, form, blank)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with:
|
||||||
|
- layout_ref: Reference to use in other tools
|
||||||
|
- template: Template used
|
||||||
|
- sections: Available sections
|
||||||
|
- grid: Default grid configuration
|
||||||
|
"""
|
||||||
|
# Validate template
|
||||||
|
template = template or "blank"
|
||||||
|
if template not in TEMPLATES:
|
||||||
|
return {
|
||||||
|
"error": f"Invalid template '{template}'. Must be one of: {list(TEMPLATES.keys())}",
|
||||||
|
"layout_ref": None
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check for name collision
|
||||||
|
if name in self._layouts:
|
||||||
|
return {
|
||||||
|
"error": f"Layout '{name}' already exists. Use a different name or modify existing.",
|
||||||
|
"layout_ref": name
|
||||||
|
}
|
||||||
|
|
||||||
|
template_config = TEMPLATES[template]
|
||||||
|
|
||||||
|
# Create layout structure
|
||||||
|
layout = {
|
||||||
|
"id": str(uuid4()),
|
||||||
|
"name": name,
|
||||||
|
"template": template,
|
||||||
|
"sections": {section: {"items": []} for section in template_config["sections"]},
|
||||||
|
"grid": template_config["default_grid"].copy(),
|
||||||
|
"filters": [],
|
||||||
|
"metadata": {
|
||||||
|
"description": template_config["description"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self._layouts[name] = layout
|
||||||
|
|
||||||
|
return {
|
||||||
|
"layout_ref": name,
|
||||||
|
"template": template,
|
||||||
|
"sections": template_config["sections"],
|
||||||
|
"grid": layout["grid"],
|
||||||
|
"description": template_config["description"]
|
||||||
|
}
|
||||||
|
|
||||||
|
async def layout_add_filter(
|
||||||
|
self,
|
||||||
|
layout_ref: str,
|
||||||
|
filter_type: str,
|
||||||
|
options: Dict[str, Any]
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Add a filter control to a layout.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
layout_ref: Layout name to add filter to
|
||||||
|
filter_type: Type of filter (dropdown, date_range, search, checkbox_group, etc.)
|
||||||
|
options: Filter options (label, data for dropdown, placeholder, position)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with:
|
||||||
|
- filter_id: Unique ID for the filter
|
||||||
|
- component: DMC component that will be used
|
||||||
|
- props: Props that were set
|
||||||
|
- position: Where filter was placed
|
||||||
|
"""
|
||||||
|
# Validate layout exists
|
||||||
|
if layout_ref not in self._layouts:
|
||||||
|
return {
|
||||||
|
"error": f"Layout '{layout_ref}' not found. Create it first with layout_create.",
|
||||||
|
"filter_id": None
|
||||||
|
}
|
||||||
|
|
||||||
|
# Validate filter type
|
||||||
|
if filter_type not in FILTER_TYPES:
|
||||||
|
return {
|
||||||
|
"error": f"Invalid filter_type '{filter_type}'. Must be one of: {list(FILTER_TYPES.keys())}",
|
||||||
|
"filter_id": None
|
||||||
|
}
|
||||||
|
|
||||||
|
filter_config = FILTER_TYPES[filter_type]
|
||||||
|
layout = self._layouts[layout_ref]
|
||||||
|
|
||||||
|
# Generate filter ID
|
||||||
|
filter_id = f"filter_{filter_type}_{len(layout['filters'])}"
|
||||||
|
|
||||||
|
# Extract relevant props
|
||||||
|
props = {"id": filter_id}
|
||||||
|
for prop in filter_config["props"]:
|
||||||
|
if prop in options:
|
||||||
|
props[prop] = options[prop]
|
||||||
|
|
||||||
|
# Determine position
|
||||||
|
position = options.get("position", "filters")
|
||||||
|
if position not in layout["sections"]:
|
||||||
|
# Default to first available section
|
||||||
|
position = list(layout["sections"].keys())[0]
|
||||||
|
|
||||||
|
# Create filter definition
|
||||||
|
filter_def = {
|
||||||
|
"id": filter_id,
|
||||||
|
"type": filter_type,
|
||||||
|
"component": filter_config["component"],
|
||||||
|
"props": props,
|
||||||
|
"position": position
|
||||||
|
}
|
||||||
|
|
||||||
|
layout["filters"].append(filter_def)
|
||||||
|
layout["sections"][position]["items"].append({
|
||||||
|
"type": "filter",
|
||||||
|
"ref": filter_id
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
"filter_id": filter_id,
|
||||||
|
"component": filter_config["component"],
|
||||||
|
"props": props,
|
||||||
|
"position": position,
|
||||||
|
"layout_ref": layout_ref
|
||||||
|
}
|
||||||
|
|
||||||
|
async def layout_set_grid(
|
||||||
|
self,
|
||||||
|
layout_ref: str,
|
||||||
|
grid: Dict[str, Any]
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Configure the grid system for a layout.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
layout_ref: Layout name to configure
|
||||||
|
grid: Grid configuration:
|
||||||
|
- cols: Number of columns (default 12)
|
||||||
|
- spacing: Gap between items (xs, sm, md, lg, xl)
|
||||||
|
- breakpoints: Responsive breakpoints {xs: cols, sm: cols, ...}
|
||||||
|
- gutter: Gutter size
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with:
|
||||||
|
- grid: Updated grid configuration
|
||||||
|
- layout_ref: Layout reference
|
||||||
|
"""
|
||||||
|
# Validate layout exists
|
||||||
|
if layout_ref not in self._layouts:
|
||||||
|
return {
|
||||||
|
"error": f"Layout '{layout_ref}' not found. Create it first with layout_create.",
|
||||||
|
"grid": None
|
||||||
|
}
|
||||||
|
|
||||||
|
layout = self._layouts[layout_ref]
|
||||||
|
|
||||||
|
# Validate spacing if provided
|
||||||
|
valid_spacing = ["xs", "sm", "md", "lg", "xl"]
|
||||||
|
if "spacing" in grid and grid["spacing"] not in valid_spacing:
|
||||||
|
return {
|
||||||
|
"error": f"Invalid spacing '{grid['spacing']}'. Must be one of: {valid_spacing}",
|
||||||
|
"grid": layout["grid"]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Validate cols
|
||||||
|
if "cols" in grid:
|
||||||
|
cols = grid["cols"]
|
||||||
|
if not isinstance(cols, int) or cols < 1 or cols > 24:
|
||||||
|
return {
|
||||||
|
"error": f"Invalid cols '{cols}'. Must be integer between 1 and 24.",
|
||||||
|
"grid": layout["grid"]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Update grid configuration
|
||||||
|
layout["grid"].update(grid)
|
||||||
|
|
||||||
|
# Process breakpoints if provided
|
||||||
|
if "breakpoints" in grid:
|
||||||
|
bp = grid["breakpoints"]
|
||||||
|
layout["grid"]["breakpoints"] = bp
|
||||||
|
|
||||||
|
return {
|
||||||
|
"grid": layout["grid"],
|
||||||
|
"layout_ref": layout_ref
|
||||||
|
}
|
||||||
|
|
||||||
|
async def layout_get(self, layout_ref: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Get a layout's full configuration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
layout_ref: Layout name to retrieve
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Full layout configuration or error
|
||||||
|
"""
|
||||||
|
if layout_ref not in self._layouts:
|
||||||
|
return {
|
||||||
|
"error": f"Layout '{layout_ref}' not found.",
|
||||||
|
"layout": None
|
||||||
|
}
|
||||||
|
|
||||||
|
layout = self._layouts[layout_ref]
|
||||||
|
|
||||||
|
return {
|
||||||
|
"layout": layout,
|
||||||
|
"filter_count": len(layout["filters"]),
|
||||||
|
"sections": list(layout["sections"].keys())
|
||||||
|
}
|
||||||
|
|
||||||
|
async def layout_add_section(
|
||||||
|
self,
|
||||||
|
layout_ref: str,
|
||||||
|
section_name: str,
|
||||||
|
position: Optional[int] = None
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Add a custom section to a layout.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
layout_ref: Layout name
|
||||||
|
section_name: Name for the new section
|
||||||
|
position: Optional position index (appends if not specified)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with sections list and the new section name
|
||||||
|
"""
|
||||||
|
if layout_ref not in self._layouts:
|
||||||
|
return {
|
||||||
|
"error": f"Layout '{layout_ref}' not found.",
|
||||||
|
"sections": []
|
||||||
|
}
|
||||||
|
|
||||||
|
layout = self._layouts[layout_ref]
|
||||||
|
|
||||||
|
if section_name in layout["sections"]:
|
||||||
|
return {
|
||||||
|
"error": f"Section '{section_name}' already exists.",
|
||||||
|
"sections": list(layout["sections"].keys())
|
||||||
|
}
|
||||||
|
|
||||||
|
# Add new section
|
||||||
|
layout["sections"][section_name] = {"items": []}
|
||||||
|
|
||||||
|
return {
|
||||||
|
"section_name": section_name,
|
||||||
|
"sections": list(layout["sections"].keys()),
|
||||||
|
"layout_ref": layout_ref
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_available_templates(self) -> Dict[str, Any]:
|
||||||
|
"""Get list of available layout templates."""
|
||||||
|
return {
|
||||||
|
name: {
|
||||||
|
"sections": config["sections"],
|
||||||
|
"description": config["description"]
|
||||||
|
}
|
||||||
|
for name, config in TEMPLATES.items()
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_available_filter_types(self) -> Dict[str, Any]:
|
||||||
|
"""Get list of available filter types."""
|
||||||
|
return {
|
||||||
|
name: {
|
||||||
|
"component": config["component"],
|
||||||
|
"props": config["props"]
|
||||||
|
}
|
||||||
|
for name, config in FILTER_TYPES.items()
|
||||||
|
}
|
||||||
|
|
||||||
|
async def layout_set_breakpoints(
|
||||||
|
self,
|
||||||
|
layout_ref: str,
|
||||||
|
breakpoints: Dict[str, Dict[str, Any]],
|
||||||
|
mobile_first: bool = True
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Configure responsive breakpoints for a layout.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
layout_ref: Layout name to configure
|
||||||
|
breakpoints: Breakpoint configuration dict:
|
||||||
|
{
|
||||||
|
"xs": {"cols": 1, "spacing": "xs"},
|
||||||
|
"sm": {"cols": 2, "spacing": "sm"},
|
||||||
|
"md": {"cols": 6, "spacing": "md"},
|
||||||
|
"lg": {"cols": 12, "spacing": "md"},
|
||||||
|
"xl": {"cols": 12, "spacing": "lg"}
|
||||||
|
}
|
||||||
|
mobile_first: If True, use min-width media queries (default)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with:
|
||||||
|
- breakpoints: Complete breakpoint configuration
|
||||||
|
- css_media_queries: Generated CSS media queries
|
||||||
|
- mobile_first: Whether mobile-first approach is used
|
||||||
|
"""
|
||||||
|
# Validate layout exists
|
||||||
|
if layout_ref not in self._layouts:
|
||||||
|
return {
|
||||||
|
"error": f"Layout '{layout_ref}' not found. Create it first with layout_create.",
|
||||||
|
"breakpoints": None
|
||||||
|
}
|
||||||
|
|
||||||
|
layout = self._layouts[layout_ref]
|
||||||
|
|
||||||
|
# Validate breakpoint names
|
||||||
|
valid_breakpoints = ["xs", "sm", "md", "lg", "xl"]
|
||||||
|
for bp_name in breakpoints.keys():
|
||||||
|
if bp_name not in valid_breakpoints:
|
||||||
|
return {
|
||||||
|
"error": f"Invalid breakpoint '{bp_name}'. Must be one of: {valid_breakpoints}",
|
||||||
|
"breakpoints": layout.get("breakpoints")
|
||||||
|
}
|
||||||
|
|
||||||
|
# Merge with defaults
|
||||||
|
merged_breakpoints = {}
|
||||||
|
for bp_name in valid_breakpoints:
|
||||||
|
default = DEFAULT_BREAKPOINTS[bp_name].copy()
|
||||||
|
if bp_name in breakpoints:
|
||||||
|
default.update(breakpoints[bp_name])
|
||||||
|
merged_breakpoints[bp_name] = default
|
||||||
|
|
||||||
|
# Validate spacing values
|
||||||
|
valid_spacing = ["xs", "sm", "md", "lg", "xl"]
|
||||||
|
for bp_name, bp_config in merged_breakpoints.items():
|
||||||
|
if "spacing" in bp_config and bp_config["spacing"] not in valid_spacing:
|
||||||
|
return {
|
||||||
|
"error": f"Invalid spacing '{bp_config['spacing']}' for breakpoint '{bp_name}'. Must be one of: {valid_spacing}",
|
||||||
|
"breakpoints": layout.get("breakpoints")
|
||||||
|
}
|
||||||
|
|
||||||
|
# Validate column counts
|
||||||
|
for bp_name, bp_config in merged_breakpoints.items():
|
||||||
|
if "cols" in bp_config:
|
||||||
|
cols = bp_config["cols"]
|
||||||
|
if not isinstance(cols, int) or cols < 1 or cols > 24:
|
||||||
|
return {
|
||||||
|
"error": f"Invalid cols '{cols}' for breakpoint '{bp_name}'. Must be integer between 1 and 24.",
|
||||||
|
"breakpoints": layout.get("breakpoints")
|
||||||
|
}
|
||||||
|
|
||||||
|
# Generate CSS media queries
|
||||||
|
css_queries = self._generate_media_queries(merged_breakpoints, mobile_first)
|
||||||
|
|
||||||
|
# Store in layout
|
||||||
|
layout["breakpoints"] = merged_breakpoints
|
||||||
|
layout["mobile_first"] = mobile_first
|
||||||
|
layout["responsive_css"] = css_queries
|
||||||
|
|
||||||
|
return {
|
||||||
|
"layout_ref": layout_ref,
|
||||||
|
"breakpoints": merged_breakpoints,
|
||||||
|
"mobile_first": mobile_first,
|
||||||
|
"css_media_queries": css_queries
|
||||||
|
}
|
||||||
|
|
||||||
|
def _generate_media_queries(
|
||||||
|
self,
|
||||||
|
breakpoints: Dict[str, Dict[str, Any]],
|
||||||
|
mobile_first: bool
|
||||||
|
) -> List[str]:
|
||||||
|
"""Generate CSS media queries for breakpoints."""
|
||||||
|
queries = []
|
||||||
|
bp_order = ["xs", "sm", "md", "lg", "xl"]
|
||||||
|
|
||||||
|
if mobile_first:
|
||||||
|
# Use min-width queries (mobile-first)
|
||||||
|
for bp_name in bp_order[1:]: # Skip xs (base styles)
|
||||||
|
bp = breakpoints[bp_name]
|
||||||
|
min_width = bp.get("min_width", DEFAULT_BREAKPOINTS[bp_name]["min_width"])
|
||||||
|
if min_width and min_width != "0px":
|
||||||
|
queries.append(f"@media (min-width: {min_width}) {{ /* {bp_name} styles */ }}")
|
||||||
|
else:
|
||||||
|
# Use max-width queries (desktop-first)
|
||||||
|
for bp_name in reversed(bp_order[:-1]): # Skip xl (base styles)
|
||||||
|
bp = breakpoints[bp_name]
|
||||||
|
max_width = bp.get("max_width", DEFAULT_BREAKPOINTS[bp_name]["max_width"])
|
||||||
|
if max_width:
|
||||||
|
queries.append(f"@media (max-width: {max_width}) {{ /* {bp_name} styles */ }}")
|
||||||
|
|
||||||
|
return queries
|
||||||
|
|
||||||
|
async def layout_get_breakpoints(self, layout_ref: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Get the breakpoint configuration for a layout.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
layout_ref: Layout name
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with breakpoint configuration
|
||||||
|
"""
|
||||||
|
if layout_ref not in self._layouts:
|
||||||
|
return {
|
||||||
|
"error": f"Layout '{layout_ref}' not found.",
|
||||||
|
"breakpoints": None
|
||||||
|
}
|
||||||
|
|
||||||
|
layout = self._layouts[layout_ref]
|
||||||
|
|
||||||
|
return {
|
||||||
|
"layout_ref": layout_ref,
|
||||||
|
"breakpoints": layout.get("breakpoints", DEFAULT_BREAKPOINTS.copy()),
|
||||||
|
"mobile_first": layout.get("mobile_first", True),
|
||||||
|
"css_media_queries": layout.get("responsive_css", [])
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_default_breakpoints(self) -> Dict[str, Any]:
|
||||||
|
"""Get the default breakpoint configuration."""
|
||||||
|
return {
|
||||||
|
"breakpoints": DEFAULT_BREAKPOINTS.copy(),
|
||||||
|
"description": "Standard responsive breakpoints aligned with Mantine/Bootstrap",
|
||||||
|
"mobile_first": True
|
||||||
|
}
|
||||||
366
mcp-servers/viz-platform/mcp_server/page_tools.py
Normal file
366
mcp-servers/viz-platform/mcp_server/page_tools.py
Normal file
@@ -0,0 +1,366 @@
|
|||||||
|
"""
|
||||||
|
Multi-page app tools for viz-platform.
|
||||||
|
|
||||||
|
Provides tools for building complete Dash applications with routing and navigation.
|
||||||
|
"""
|
||||||
|
import logging
|
||||||
|
from typing import Dict, List, Optional, Any
|
||||||
|
from uuid import uuid4
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
# Navigation position options
|
||||||
|
NAV_POSITIONS = ["top", "left", "right"]
|
||||||
|
|
||||||
|
# Auth types supported
|
||||||
|
AUTH_TYPES = ["none", "basic", "oauth", "custom"]
|
||||||
|
|
||||||
|
|
||||||
|
class PageTools:
|
||||||
|
"""
|
||||||
|
Multi-page Dash application tools.
|
||||||
|
|
||||||
|
Creates page definitions, navigation, and auth configuration.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""Initialize page tools."""
|
||||||
|
self._pages: Dict[str, Dict[str, Any]] = {}
|
||||||
|
self._navbars: Dict[str, Dict[str, Any]] = {}
|
||||||
|
self._app_config: Dict[str, Any] = {
|
||||||
|
"title": "Dash App",
|
||||||
|
"suppress_callback_exceptions": True
|
||||||
|
}
|
||||||
|
|
||||||
|
async def page_create(
|
||||||
|
self,
|
||||||
|
name: str,
|
||||||
|
path: str,
|
||||||
|
layout_ref: Optional[str] = None,
|
||||||
|
title: Optional[str] = None
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Create a new page definition.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name: Unique page name (used as identifier)
|
||||||
|
path: URL path for the page (e.g., "/", "/settings")
|
||||||
|
layout_ref: Optional layout reference to use for the page
|
||||||
|
title: Optional page title (defaults to name)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with:
|
||||||
|
- page_ref: Reference to use in other tools
|
||||||
|
- path: URL path
|
||||||
|
- registered: Whether page was registered
|
||||||
|
"""
|
||||||
|
# Validate path format
|
||||||
|
if not path.startswith('/'):
|
||||||
|
return {
|
||||||
|
"error": f"Path must start with '/'. Got: {path}",
|
||||||
|
"page_ref": None
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check for name collision
|
||||||
|
if name in self._pages:
|
||||||
|
return {
|
||||||
|
"error": f"Page '{name}' already exists. Use a different name.",
|
||||||
|
"page_ref": name
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check for path collision
|
||||||
|
for page_name, page_data in self._pages.items():
|
||||||
|
if page_data['path'] == path:
|
||||||
|
return {
|
||||||
|
"error": f"Path '{path}' already used by page '{page_name}'.",
|
||||||
|
"page_ref": None
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create page definition
|
||||||
|
page = {
|
||||||
|
"id": str(uuid4()),
|
||||||
|
"name": name,
|
||||||
|
"path": path,
|
||||||
|
"title": title or name,
|
||||||
|
"layout_ref": layout_ref,
|
||||||
|
"auth": None,
|
||||||
|
"metadata": {}
|
||||||
|
}
|
||||||
|
|
||||||
|
self._pages[name] = page
|
||||||
|
|
||||||
|
return {
|
||||||
|
"page_ref": name,
|
||||||
|
"path": path,
|
||||||
|
"title": page["title"],
|
||||||
|
"layout_ref": layout_ref,
|
||||||
|
"registered": True
|
||||||
|
}
|
||||||
|
|
||||||
|
async def page_add_navbar(
|
||||||
|
self,
|
||||||
|
pages: List[str],
|
||||||
|
options: Optional[Dict[str, Any]] = None
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Generate a navigation component linking pages.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pages: List of page names to include in navigation
|
||||||
|
options: Navigation options:
|
||||||
|
- position: "top", "left", or "right"
|
||||||
|
- style: Style variant
|
||||||
|
- brand: Brand/logo text or config
|
||||||
|
- collapsible: Whether to collapse on mobile
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with:
|
||||||
|
- navbar_id: Navigation ID
|
||||||
|
- pages: List of page links generated
|
||||||
|
- component: DMC component structure
|
||||||
|
"""
|
||||||
|
options = options or {}
|
||||||
|
|
||||||
|
# Validate pages exist
|
||||||
|
missing_pages = [p for p in pages if p not in self._pages]
|
||||||
|
if missing_pages:
|
||||||
|
return {
|
||||||
|
"error": f"Pages not found: {missing_pages}. Create them first.",
|
||||||
|
"navbar_id": None
|
||||||
|
}
|
||||||
|
|
||||||
|
# Validate position
|
||||||
|
position = options.get("position", "top")
|
||||||
|
if position not in NAV_POSITIONS:
|
||||||
|
return {
|
||||||
|
"error": f"Invalid position '{position}'. Must be one of: {NAV_POSITIONS}",
|
||||||
|
"navbar_id": None
|
||||||
|
}
|
||||||
|
|
||||||
|
# Generate navbar ID
|
||||||
|
navbar_id = f"navbar_{len(self._navbars)}"
|
||||||
|
|
||||||
|
# Build page links
|
||||||
|
page_links = []
|
||||||
|
for page_name in pages:
|
||||||
|
page = self._pages[page_name]
|
||||||
|
page_links.append({
|
||||||
|
"label": page["title"],
|
||||||
|
"href": page["path"],
|
||||||
|
"page_ref": page_name
|
||||||
|
})
|
||||||
|
|
||||||
|
# Build DMC component structure
|
||||||
|
if position == "top":
|
||||||
|
component = self._build_top_navbar(page_links, options)
|
||||||
|
else:
|
||||||
|
component = self._build_side_navbar(page_links, options, position)
|
||||||
|
|
||||||
|
# Store navbar config
|
||||||
|
self._navbars[navbar_id] = {
|
||||||
|
"id": navbar_id,
|
||||||
|
"position": position,
|
||||||
|
"pages": pages,
|
||||||
|
"options": options,
|
||||||
|
"component": component
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
"navbar_id": navbar_id,
|
||||||
|
"position": position,
|
||||||
|
"pages": page_links,
|
||||||
|
"component": component
|
||||||
|
}
|
||||||
|
|
||||||
|
async def page_set_auth(
|
||||||
|
self,
|
||||||
|
page_ref: str,
|
||||||
|
auth_config: Dict[str, Any]
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Configure authentication for a page.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
page_ref: Page name to configure
|
||||||
|
auth_config: Authentication configuration:
|
||||||
|
- type: "none", "basic", "oauth", "custom"
|
||||||
|
- required: Whether auth is required (default True)
|
||||||
|
- roles: List of required roles (optional)
|
||||||
|
- redirect: Redirect path for unauthenticated users
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with:
|
||||||
|
- page_ref: Page reference
|
||||||
|
- auth_type: Type of auth configured
|
||||||
|
- protected: Whether page is protected
|
||||||
|
"""
|
||||||
|
# Validate page exists
|
||||||
|
if page_ref not in self._pages:
|
||||||
|
available = list(self._pages.keys())
|
||||||
|
return {
|
||||||
|
"error": f"Page '{page_ref}' not found. Available: {available}",
|
||||||
|
"page_ref": page_ref
|
||||||
|
}
|
||||||
|
|
||||||
|
# Validate auth type
|
||||||
|
auth_type = auth_config.get("type", "basic")
|
||||||
|
if auth_type not in AUTH_TYPES:
|
||||||
|
return {
|
||||||
|
"error": f"Invalid auth type '{auth_type}'. Must be one of: {AUTH_TYPES}",
|
||||||
|
"page_ref": page_ref
|
||||||
|
}
|
||||||
|
|
||||||
|
# Build auth config
|
||||||
|
auth = {
|
||||||
|
"type": auth_type,
|
||||||
|
"required": auth_config.get("required", True),
|
||||||
|
"roles": auth_config.get("roles", []),
|
||||||
|
"redirect": auth_config.get("redirect", "/login")
|
||||||
|
}
|
||||||
|
|
||||||
|
# Handle OAuth-specific config
|
||||||
|
if auth_type == "oauth":
|
||||||
|
auth["provider"] = auth_config.get("provider", "generic")
|
||||||
|
auth["scopes"] = auth_config.get("scopes", [])
|
||||||
|
|
||||||
|
# Update page
|
||||||
|
self._pages[page_ref]["auth"] = auth
|
||||||
|
|
||||||
|
return {
|
||||||
|
"page_ref": page_ref,
|
||||||
|
"auth_type": auth_type,
|
||||||
|
"protected": auth["required"],
|
||||||
|
"roles": auth["roles"],
|
||||||
|
"redirect": auth["redirect"]
|
||||||
|
}
|
||||||
|
|
||||||
|
async def page_list(self) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
List all registered pages.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with pages and their configurations
|
||||||
|
"""
|
||||||
|
pages_info = {}
|
||||||
|
for name, page in self._pages.items():
|
||||||
|
pages_info[name] = {
|
||||||
|
"path": page["path"],
|
||||||
|
"title": page["title"],
|
||||||
|
"layout_ref": page["layout_ref"],
|
||||||
|
"protected": page["auth"] is not None and page["auth"].get("required", False)
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
"pages": pages_info,
|
||||||
|
"count": len(pages_info),
|
||||||
|
"navbars": list(self._navbars.keys())
|
||||||
|
}
|
||||||
|
|
||||||
|
async def page_get_app_config(self) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Get the complete app configuration for Dash.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with app config including pages, navbars, and settings
|
||||||
|
"""
|
||||||
|
# Build pages config
|
||||||
|
pages_config = []
|
||||||
|
for name, page in self._pages.items():
|
||||||
|
pages_config.append({
|
||||||
|
"name": name,
|
||||||
|
"path": page["path"],
|
||||||
|
"title": page["title"],
|
||||||
|
"layout_ref": page["layout_ref"]
|
||||||
|
})
|
||||||
|
|
||||||
|
# Build routing config
|
||||||
|
routes = {page["path"]: name for name, page in self._pages.items()}
|
||||||
|
|
||||||
|
return {
|
||||||
|
"app": self._app_config,
|
||||||
|
"pages": pages_config,
|
||||||
|
"routes": routes,
|
||||||
|
"navbars": list(self._navbars.values()),
|
||||||
|
"page_count": len(self._pages)
|
||||||
|
}
|
||||||
|
|
||||||
|
def _build_top_navbar(
|
||||||
|
self,
|
||||||
|
page_links: List[Dict[str, str]],
|
||||||
|
options: Dict[str, Any]
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Build a top navigation bar component."""
|
||||||
|
brand = options.get("brand", "App")
|
||||||
|
|
||||||
|
# Build nav links
|
||||||
|
nav_items = []
|
||||||
|
for link in page_links:
|
||||||
|
nav_items.append({
|
||||||
|
"component": "NavLink",
|
||||||
|
"props": {
|
||||||
|
"label": link["label"],
|
||||||
|
"href": link["href"]
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
"component": "AppShell.Header",
|
||||||
|
"children": [
|
||||||
|
{
|
||||||
|
"component": "Group",
|
||||||
|
"props": {"justify": "space-between", "h": "100%", "px": "md"},
|
||||||
|
"children": [
|
||||||
|
{
|
||||||
|
"component": "Text",
|
||||||
|
"props": {"size": "lg", "fw": 700},
|
||||||
|
"children": brand
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"component": "Group",
|
||||||
|
"props": {"gap": "sm"},
|
||||||
|
"children": nav_items
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
def _build_side_navbar(
|
||||||
|
self,
|
||||||
|
page_links: List[Dict[str, str]],
|
||||||
|
options: Dict[str, Any],
|
||||||
|
position: str
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Build a side navigation bar component."""
|
||||||
|
brand = options.get("brand", "App")
|
||||||
|
|
||||||
|
# Build nav links
|
||||||
|
nav_items = []
|
||||||
|
for link in page_links:
|
||||||
|
nav_items.append({
|
||||||
|
"component": "NavLink",
|
||||||
|
"props": {
|
||||||
|
"label": link["label"],
|
||||||
|
"href": link["href"]
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
navbar_component = "AppShell.Navbar" if position == "left" else "AppShell.Aside"
|
||||||
|
|
||||||
|
return {
|
||||||
|
"component": navbar_component,
|
||||||
|
"props": {"p": "md"},
|
||||||
|
"children": [
|
||||||
|
{
|
||||||
|
"component": "Text",
|
||||||
|
"props": {"size": "lg", "fw": 700, "mb": "md"},
|
||||||
|
"children": brand
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"component": "Stack",
|
||||||
|
"props": {"gap": "xs"},
|
||||||
|
"children": nav_items
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
928
mcp-servers/viz-platform/mcp_server/server.py
Normal file
928
mcp-servers/viz-platform/mcp_server/server.py
Normal file
@@ -0,0 +1,928 @@
|
|||||||
|
"""
|
||||||
|
MCP Server entry point for viz-platform integration.
|
||||||
|
|
||||||
|
Provides Dash Mantine Components validation, charting, layout, theming, and page tools
|
||||||
|
to Claude Code via JSON-RPC 2.0 over stdio.
|
||||||
|
"""
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
import json
|
||||||
|
from mcp.server import Server
|
||||||
|
from mcp.server.stdio import stdio_server
|
||||||
|
from mcp.types import Tool, TextContent
|
||||||
|
|
||||||
|
from .config import VizPlatformConfig
|
||||||
|
from .dmc_tools import DMCTools
|
||||||
|
from .chart_tools import ChartTools
|
||||||
|
from .layout_tools import LayoutTools
|
||||||
|
from .theme_tools import ThemeTools
|
||||||
|
from .page_tools import PageTools
|
||||||
|
from .accessibility_tools import AccessibilityTools
|
||||||
|
|
||||||
|
# Suppress noisy MCP validation warnings on stderr
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
logging.getLogger("root").setLevel(logging.ERROR)
|
||||||
|
logging.getLogger("mcp").setLevel(logging.ERROR)
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class VizPlatformMCPServer:
|
||||||
|
"""MCP Server for visualization platform integration"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.server = Server("viz-platform-mcp")
|
||||||
|
self.config = None
|
||||||
|
self.dmc_tools = DMCTools()
|
||||||
|
self.chart_tools = ChartTools()
|
||||||
|
self.layout_tools = LayoutTools()
|
||||||
|
self.theme_tools = ThemeTools()
|
||||||
|
self.page_tools = PageTools()
|
||||||
|
self.accessibility_tools = AccessibilityTools(theme_store=self.theme_tools.store)
|
||||||
|
|
||||||
|
async def initialize(self):
|
||||||
|
"""Initialize server and load configuration."""
|
||||||
|
try:
|
||||||
|
config_loader = VizPlatformConfig()
|
||||||
|
self.config = config_loader.load()
|
||||||
|
|
||||||
|
# Initialize DMC tools with detected version
|
||||||
|
dmc_version = self.config.get('dmc_version')
|
||||||
|
self.dmc_tools.initialize(dmc_version)
|
||||||
|
|
||||||
|
# Log available capabilities
|
||||||
|
caps = []
|
||||||
|
if self.config.get('dmc_available'):
|
||||||
|
caps.append(f"DMC {dmc_version}")
|
||||||
|
if self.dmc_tools._initialized:
|
||||||
|
caps.append(f"Registry loaded ({self.dmc_tools.registry.loaded_version})")
|
||||||
|
else:
|
||||||
|
caps.append("DMC (not installed)")
|
||||||
|
|
||||||
|
logger.info(f"viz-platform MCP Server initialized with: {', '.join(caps)}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to initialize: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
def setup_tools(self):
|
||||||
|
"""Register all available tools with the MCP server"""
|
||||||
|
|
||||||
|
@self.server.list_tools()
|
||||||
|
async def list_tools() -> list[Tool]:
|
||||||
|
"""Return list of available tools"""
|
||||||
|
tools = []
|
||||||
|
|
||||||
|
# DMC validation tools (Issue #172)
|
||||||
|
tools.append(Tool(
|
||||||
|
name="list_components",
|
||||||
|
description=(
|
||||||
|
"List available Dash Mantine Components. "
|
||||||
|
"Returns components grouped by category with version info. "
|
||||||
|
"Use this to discover what components are available before building UI."
|
||||||
|
),
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"category": {
|
||||||
|
"type": "string",
|
||||||
|
"description": (
|
||||||
|
"Optional category filter. Available categories: "
|
||||||
|
"buttons, inputs, navigation, feedback, overlays, "
|
||||||
|
"typography, layout, data_display, charts, dates"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": []
|
||||||
|
}
|
||||||
|
))
|
||||||
|
|
||||||
|
tools.append(Tool(
|
||||||
|
name="get_component_props",
|
||||||
|
description=(
|
||||||
|
"Get the props schema for a specific DMC component. "
|
||||||
|
"Returns all available props with types, defaults, and allowed values. "
|
||||||
|
"ALWAYS use this before creating a component to ensure valid props."
|
||||||
|
),
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"component": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Component name (e.g., 'Button', 'TextInput', 'Select')"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["component"]
|
||||||
|
}
|
||||||
|
))
|
||||||
|
|
||||||
|
tools.append(Tool(
|
||||||
|
name="validate_component",
|
||||||
|
description=(
|
||||||
|
"Validate component props before use. "
|
||||||
|
"Checks for invalid props, type mismatches, and common mistakes. "
|
||||||
|
"Returns errors and warnings with suggestions for fixes."
|
||||||
|
),
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"component": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Component name to validate"
|
||||||
|
},
|
||||||
|
"props": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Props object to validate"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["component", "props"]
|
||||||
|
}
|
||||||
|
))
|
||||||
|
|
||||||
|
# Chart tools (Issue #173)
|
||||||
|
tools.append(Tool(
|
||||||
|
name="chart_create",
|
||||||
|
description=(
|
||||||
|
"Create a Plotly chart for data visualization. "
|
||||||
|
"Supports line, bar, scatter, pie, heatmap, histogram, and area charts. "
|
||||||
|
"Automatically applies theme colors when a theme is active."
|
||||||
|
),
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"chart_type": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["line", "bar", "scatter", "pie", "heatmap", "histogram", "area"],
|
||||||
|
"description": "Type of chart to create"
|
||||||
|
},
|
||||||
|
"data": {
|
||||||
|
"type": "object",
|
||||||
|
"description": (
|
||||||
|
"Data for the chart. For most charts: {x: [], y: []}. "
|
||||||
|
"For pie: {labels: [], values: []}. "
|
||||||
|
"For heatmap: {x: [], y: [], z: [[]]}"
|
||||||
|
)
|
||||||
|
},
|
||||||
|
"options": {
|
||||||
|
"type": "object",
|
||||||
|
"description": (
|
||||||
|
"Optional settings: title, x_label, y_label, color, "
|
||||||
|
"showlegend, height, width, horizontal (for bar)"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["chart_type", "data"]
|
||||||
|
}
|
||||||
|
))
|
||||||
|
|
||||||
|
tools.append(Tool(
|
||||||
|
name="chart_configure_interaction",
|
||||||
|
description=(
|
||||||
|
"Configure interactions on an existing chart. "
|
||||||
|
"Add hover templates, enable click data capture, selection modes, "
|
||||||
|
"and zoom behavior for Dash callback integration."
|
||||||
|
),
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"figure": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Plotly figure JSON to modify"
|
||||||
|
},
|
||||||
|
"interactions": {
|
||||||
|
"type": "object",
|
||||||
|
"description": (
|
||||||
|
"Interaction config: hover_template (string), "
|
||||||
|
"click_data (bool), selection ('box'|'lasso'), zoom (bool)"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["figure", "interactions"]
|
||||||
|
}
|
||||||
|
))
|
||||||
|
|
||||||
|
# Chart export tool (Issue #247)
|
||||||
|
tools.append(Tool(
|
||||||
|
name="chart_export",
|
||||||
|
description=(
|
||||||
|
"Export a Plotly chart to static image format (PNG, SVG, PDF). "
|
||||||
|
"Requires kaleido package. Returns base64 image data or saves to file."
|
||||||
|
),
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"figure": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Plotly figure JSON to export"
|
||||||
|
},
|
||||||
|
"format": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["png", "svg", "pdf"],
|
||||||
|
"description": "Output format (default: png)"
|
||||||
|
},
|
||||||
|
"width": {
|
||||||
|
"type": "integer",
|
||||||
|
"description": "Image width in pixels (default: 1200)"
|
||||||
|
},
|
||||||
|
"height": {
|
||||||
|
"type": "integer",
|
||||||
|
"description": "Image height in pixels (default: 800)"
|
||||||
|
},
|
||||||
|
"scale": {
|
||||||
|
"type": "number",
|
||||||
|
"description": "Resolution scale factor (default: 2 for retina)"
|
||||||
|
},
|
||||||
|
"output_path": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Optional file path to save image"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["figure"]
|
||||||
|
}
|
||||||
|
))
|
||||||
|
|
||||||
|
# Layout tools (Issue #174)
|
||||||
|
tools.append(Tool(
|
||||||
|
name="layout_create",
|
||||||
|
description=(
|
||||||
|
"Create a new dashboard layout container. "
|
||||||
|
"Templates available: dashboard, report, form, blank. "
|
||||||
|
"Returns layout reference for use with other layout tools."
|
||||||
|
),
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Unique name for the layout"
|
||||||
|
},
|
||||||
|
"template": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["dashboard", "report", "form", "blank"],
|
||||||
|
"description": "Layout template to use (default: blank)"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["name"]
|
||||||
|
}
|
||||||
|
))
|
||||||
|
|
||||||
|
tools.append(Tool(
|
||||||
|
name="layout_add_filter",
|
||||||
|
description=(
|
||||||
|
"Add a filter control to a layout. "
|
||||||
|
"Filter types: dropdown, multi_select, date_range, date, search, "
|
||||||
|
"checkbox_group, radio_group, slider, range_slider."
|
||||||
|
),
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"layout_ref": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Layout name to add filter to"
|
||||||
|
},
|
||||||
|
"filter_type": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["dropdown", "multi_select", "date_range", "date",
|
||||||
|
"search", "checkbox_group", "radio_group", "slider", "range_slider"],
|
||||||
|
"description": "Type of filter control"
|
||||||
|
},
|
||||||
|
"options": {
|
||||||
|
"type": "object",
|
||||||
|
"description": (
|
||||||
|
"Filter options: label, data (for dropdown), placeholder, "
|
||||||
|
"position (section name), value, etc."
|
||||||
|
)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["layout_ref", "filter_type", "options"]
|
||||||
|
}
|
||||||
|
))
|
||||||
|
|
||||||
|
tools.append(Tool(
|
||||||
|
name="layout_set_grid",
|
||||||
|
description=(
|
||||||
|
"Configure the grid system for a layout. "
|
||||||
|
"Uses DMC Grid component patterns with 12 or 24 column system."
|
||||||
|
),
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"layout_ref": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Layout name to configure"
|
||||||
|
},
|
||||||
|
"grid": {
|
||||||
|
"type": "object",
|
||||||
|
"description": (
|
||||||
|
"Grid config: cols (1-24), spacing (xs|sm|md|lg|xl), "
|
||||||
|
"breakpoints ({xs: cols, sm: cols, ...}), gutter"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["layout_ref", "grid"]
|
||||||
|
}
|
||||||
|
))
|
||||||
|
|
||||||
|
# Responsive breakpoints tool (Issue #249)
|
||||||
|
tools.append(Tool(
|
||||||
|
name="layout_set_breakpoints",
|
||||||
|
description=(
|
||||||
|
"Configure responsive breakpoints for a layout. "
|
||||||
|
"Supports xs, sm, md, lg, xl breakpoints with mobile-first approach. "
|
||||||
|
"Each breakpoint can define cols, spacing, and other grid properties."
|
||||||
|
),
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"layout_ref": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Layout name to configure"
|
||||||
|
},
|
||||||
|
"breakpoints": {
|
||||||
|
"type": "object",
|
||||||
|
"description": (
|
||||||
|
"Breakpoint config: {xs: {cols, spacing}, sm: {...}, md: {...}, lg: {...}, xl: {...}}"
|
||||||
|
)
|
||||||
|
},
|
||||||
|
"mobile_first": {
|
||||||
|
"type": "boolean",
|
||||||
|
"description": "Use mobile-first (min-width) media queries (default: true)"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["layout_ref", "breakpoints"]
|
||||||
|
}
|
||||||
|
))
|
||||||
|
|
||||||
|
# Theme tools (Issue #175)
|
||||||
|
tools.append(Tool(
|
||||||
|
name="theme_create",
|
||||||
|
description=(
|
||||||
|
"Create a new design theme with tokens. "
|
||||||
|
"Tokens include colors, spacing, typography, radii. "
|
||||||
|
"Missing tokens are filled from defaults."
|
||||||
|
),
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Unique theme name"
|
||||||
|
},
|
||||||
|
"tokens": {
|
||||||
|
"type": "object",
|
||||||
|
"description": (
|
||||||
|
"Design tokens: colors (primary, background, text), "
|
||||||
|
"spacing (xs-xl), typography (fontFamily, fontSize), radii (sm-xl)"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["name", "tokens"]
|
||||||
|
}
|
||||||
|
))
|
||||||
|
|
||||||
|
tools.append(Tool(
|
||||||
|
name="theme_extend",
|
||||||
|
description=(
|
||||||
|
"Create a new theme by extending an existing one. "
|
||||||
|
"Only specify the tokens you want to override."
|
||||||
|
),
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"base_theme": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Theme to extend (e.g., 'default')"
|
||||||
|
},
|
||||||
|
"overrides": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Token overrides to apply"
|
||||||
|
},
|
||||||
|
"new_name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Name for the new theme (optional)"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["base_theme", "overrides"]
|
||||||
|
}
|
||||||
|
))
|
||||||
|
|
||||||
|
tools.append(Tool(
|
||||||
|
name="theme_validate",
|
||||||
|
description=(
|
||||||
|
"Validate a theme for completeness. "
|
||||||
|
"Checks for required tokens and common issues."
|
||||||
|
),
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"theme_name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Theme to validate"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["theme_name"]
|
||||||
|
}
|
||||||
|
))
|
||||||
|
|
||||||
|
tools.append(Tool(
|
||||||
|
name="theme_export_css",
|
||||||
|
description=(
|
||||||
|
"Export a theme as CSS custom properties. "
|
||||||
|
"Generates :root CSS variables for all tokens."
|
||||||
|
),
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"theme_name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Theme to export"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["theme_name"]
|
||||||
|
}
|
||||||
|
))
|
||||||
|
|
||||||
|
# Page tools (Issue #176)
|
||||||
|
tools.append(Tool(
|
||||||
|
name="page_create",
|
||||||
|
description=(
|
||||||
|
"Create a new page for a multi-page Dash application. "
|
||||||
|
"Defines page routing and can link to a layout."
|
||||||
|
),
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Unique page name (identifier)"
|
||||||
|
},
|
||||||
|
"path": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "URL path (e.g., '/', '/settings')"
|
||||||
|
},
|
||||||
|
"layout_ref": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Optional layout reference to use"
|
||||||
|
},
|
||||||
|
"title": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Page title (defaults to name)"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["name", "path"]
|
||||||
|
}
|
||||||
|
))
|
||||||
|
|
||||||
|
tools.append(Tool(
|
||||||
|
name="page_add_navbar",
|
||||||
|
description=(
|
||||||
|
"Generate navigation component linking pages. "
|
||||||
|
"Creates top or side navigation with DMC components."
|
||||||
|
),
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"pages": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"description": "List of page names to include"
|
||||||
|
},
|
||||||
|
"options": {
|
||||||
|
"type": "object",
|
||||||
|
"description": (
|
||||||
|
"Navigation options: position (top|left|right), "
|
||||||
|
"brand (app name), collapsible (bool)"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["pages"]
|
||||||
|
}
|
||||||
|
))
|
||||||
|
|
||||||
|
tools.append(Tool(
|
||||||
|
name="page_set_auth",
|
||||||
|
description=(
|
||||||
|
"Configure authentication for a page. "
|
||||||
|
"Sets auth requirements, roles, and redirect behavior."
|
||||||
|
),
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"page_ref": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Page name to configure"
|
||||||
|
},
|
||||||
|
"auth_config": {
|
||||||
|
"type": "object",
|
||||||
|
"description": (
|
||||||
|
"Auth config: type (none|basic|oauth|custom), "
|
||||||
|
"required (bool), roles (array), redirect (path)"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["page_ref", "auth_config"]
|
||||||
|
}
|
||||||
|
))
|
||||||
|
|
||||||
|
# Accessibility tools (Issue #248)
|
||||||
|
tools.append(Tool(
|
||||||
|
name="accessibility_validate_colors",
|
||||||
|
description=(
|
||||||
|
"Validate colors for color blind accessibility. "
|
||||||
|
"Checks contrast ratios for deuteranopia, protanopia, tritanopia. "
|
||||||
|
"Returns issues, simulations, and accessible palette suggestions."
|
||||||
|
),
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"colors": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"description": "List of hex colors to validate (e.g., ['#228be6', '#40c057'])"
|
||||||
|
},
|
||||||
|
"check_types": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"description": "Color blindness types to check: deuteranopia, protanopia, tritanopia (default: all)"
|
||||||
|
},
|
||||||
|
"min_contrast_ratio": {
|
||||||
|
"type": "number",
|
||||||
|
"description": "Minimum WCAG contrast ratio (default: 4.5 for AA)"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["colors"]
|
||||||
|
}
|
||||||
|
))
|
||||||
|
|
||||||
|
tools.append(Tool(
|
||||||
|
name="accessibility_validate_theme",
|
||||||
|
description=(
|
||||||
|
"Validate a theme's colors for accessibility. "
|
||||||
|
"Extracts all colors from theme tokens and checks for color blind safety."
|
||||||
|
),
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"theme_name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Theme name to validate"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["theme_name"]
|
||||||
|
}
|
||||||
|
))
|
||||||
|
|
||||||
|
tools.append(Tool(
|
||||||
|
name="accessibility_suggest_alternative",
|
||||||
|
description=(
|
||||||
|
"Suggest accessible alternative colors for a given color. "
|
||||||
|
"Provides alternatives optimized for specific color blindness types."
|
||||||
|
),
|
||||||
|
inputSchema={
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"color": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Hex color to find alternatives for"
|
||||||
|
},
|
||||||
|
"deficiency_type": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["deuteranopia", "protanopia", "tritanopia"],
|
||||||
|
"description": "Color blindness type to optimize for"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["color", "deficiency_type"]
|
||||||
|
}
|
||||||
|
))
|
||||||
|
|
||||||
|
return tools
|
||||||
|
|
||||||
|
@self.server.call_tool()
|
||||||
|
async def call_tool(name: str, arguments: dict) -> list[TextContent]:
|
||||||
|
"""Handle tool invocation."""
|
||||||
|
try:
|
||||||
|
# DMC validation tools
|
||||||
|
if name == "list_components":
|
||||||
|
result = await self.dmc_tools.list_components(
|
||||||
|
category=arguments.get('category')
|
||||||
|
)
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps(result, indent=2)
|
||||||
|
)]
|
||||||
|
|
||||||
|
elif name == "get_component_props":
|
||||||
|
component = arguments.get('component')
|
||||||
|
if not component:
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps({"error": "component is required"}, indent=2)
|
||||||
|
)]
|
||||||
|
result = await self.dmc_tools.get_component_props(component)
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps(result, indent=2)
|
||||||
|
)]
|
||||||
|
|
||||||
|
elif name == "validate_component":
|
||||||
|
component = arguments.get('component')
|
||||||
|
props = arguments.get('props', {})
|
||||||
|
if not component:
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps({"error": "component is required"}, indent=2)
|
||||||
|
)]
|
||||||
|
result = await self.dmc_tools.validate_component(component, props)
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps(result, indent=2)
|
||||||
|
)]
|
||||||
|
|
||||||
|
# Chart tools
|
||||||
|
elif name == "chart_create":
|
||||||
|
chart_type = arguments.get('chart_type')
|
||||||
|
data = arguments.get('data', {})
|
||||||
|
options = arguments.get('options', {})
|
||||||
|
if not chart_type:
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps({"error": "chart_type is required"}, indent=2)
|
||||||
|
)]
|
||||||
|
result = await self.chart_tools.chart_create(chart_type, data, options)
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps(result, indent=2)
|
||||||
|
)]
|
||||||
|
|
||||||
|
elif name == "chart_configure_interaction":
|
||||||
|
figure = arguments.get('figure')
|
||||||
|
interactions = arguments.get('interactions', {})
|
||||||
|
if not figure:
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps({"error": "figure is required"}, indent=2)
|
||||||
|
)]
|
||||||
|
result = await self.chart_tools.chart_configure_interaction(figure, interactions)
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps(result, indent=2)
|
||||||
|
)]
|
||||||
|
|
||||||
|
elif name == "chart_export":
|
||||||
|
figure = arguments.get('figure')
|
||||||
|
if not figure:
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps({"error": "figure is required"}, indent=2)
|
||||||
|
)]
|
||||||
|
result = await self.chart_tools.chart_export(
|
||||||
|
figure=figure,
|
||||||
|
format=arguments.get('format', 'png'),
|
||||||
|
width=arguments.get('width'),
|
||||||
|
height=arguments.get('height'),
|
||||||
|
scale=arguments.get('scale', 2.0),
|
||||||
|
output_path=arguments.get('output_path')
|
||||||
|
)
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps(result, indent=2)
|
||||||
|
)]
|
||||||
|
|
||||||
|
# Layout tools
|
||||||
|
elif name == "layout_create":
|
||||||
|
layout_name = arguments.get('name')
|
||||||
|
template = arguments.get('template')
|
||||||
|
if not layout_name:
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps({"error": "name is required"}, indent=2)
|
||||||
|
)]
|
||||||
|
result = await self.layout_tools.layout_create(layout_name, template)
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps(result, indent=2)
|
||||||
|
)]
|
||||||
|
|
||||||
|
elif name == "layout_add_filter":
|
||||||
|
layout_ref = arguments.get('layout_ref')
|
||||||
|
filter_type = arguments.get('filter_type')
|
||||||
|
options = arguments.get('options', {})
|
||||||
|
if not layout_ref or not filter_type:
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps({"error": "layout_ref and filter_type are required"}, indent=2)
|
||||||
|
)]
|
||||||
|
result = await self.layout_tools.layout_add_filter(layout_ref, filter_type, options)
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps(result, indent=2)
|
||||||
|
)]
|
||||||
|
|
||||||
|
elif name == "layout_set_grid":
|
||||||
|
layout_ref = arguments.get('layout_ref')
|
||||||
|
grid = arguments.get('grid', {})
|
||||||
|
if not layout_ref:
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps({"error": "layout_ref is required"}, indent=2)
|
||||||
|
)]
|
||||||
|
result = await self.layout_tools.layout_set_grid(layout_ref, grid)
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps(result, indent=2)
|
||||||
|
)]
|
||||||
|
|
||||||
|
elif name == "layout_set_breakpoints":
|
||||||
|
layout_ref = arguments.get('layout_ref')
|
||||||
|
breakpoints = arguments.get('breakpoints', {})
|
||||||
|
mobile_first = arguments.get('mobile_first', True)
|
||||||
|
if not layout_ref:
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps({"error": "layout_ref is required"}, indent=2)
|
||||||
|
)]
|
||||||
|
result = await self.layout_tools.layout_set_breakpoints(
|
||||||
|
layout_ref, breakpoints, mobile_first
|
||||||
|
)
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps(result, indent=2)
|
||||||
|
)]
|
||||||
|
|
||||||
|
# Theme tools
|
||||||
|
elif name == "theme_create":
|
||||||
|
theme_name = arguments.get('name')
|
||||||
|
tokens = arguments.get('tokens', {})
|
||||||
|
if not theme_name:
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps({"error": "name is required"}, indent=2)
|
||||||
|
)]
|
||||||
|
result = await self.theme_tools.theme_create(theme_name, tokens)
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps(result, indent=2)
|
||||||
|
)]
|
||||||
|
|
||||||
|
elif name == "theme_extend":
|
||||||
|
base_theme = arguments.get('base_theme')
|
||||||
|
overrides = arguments.get('overrides', {})
|
||||||
|
new_name = arguments.get('new_name')
|
||||||
|
if not base_theme:
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps({"error": "base_theme is required"}, indent=2)
|
||||||
|
)]
|
||||||
|
result = await self.theme_tools.theme_extend(base_theme, overrides, new_name)
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps(result, indent=2)
|
||||||
|
)]
|
||||||
|
|
||||||
|
elif name == "theme_validate":
|
||||||
|
theme_name = arguments.get('theme_name')
|
||||||
|
if not theme_name:
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps({"error": "theme_name is required"}, indent=2)
|
||||||
|
)]
|
||||||
|
result = await self.theme_tools.theme_validate(theme_name)
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps(result, indent=2)
|
||||||
|
)]
|
||||||
|
|
||||||
|
elif name == "theme_export_css":
|
||||||
|
theme_name = arguments.get('theme_name')
|
||||||
|
if not theme_name:
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps({"error": "theme_name is required"}, indent=2)
|
||||||
|
)]
|
||||||
|
result = await self.theme_tools.theme_export_css(theme_name)
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps(result, indent=2)
|
||||||
|
)]
|
||||||
|
|
||||||
|
# Page tools
|
||||||
|
elif name == "page_create":
|
||||||
|
page_name = arguments.get('name')
|
||||||
|
path = arguments.get('path')
|
||||||
|
layout_ref = arguments.get('layout_ref')
|
||||||
|
title = arguments.get('title')
|
||||||
|
if not page_name or not path:
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps({"error": "name and path are required"}, indent=2)
|
||||||
|
)]
|
||||||
|
result = await self.page_tools.page_create(page_name, path, layout_ref, title)
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps(result, indent=2)
|
||||||
|
)]
|
||||||
|
|
||||||
|
elif name == "page_add_navbar":
|
||||||
|
pages = arguments.get('pages', [])
|
||||||
|
options = arguments.get('options', {})
|
||||||
|
if not pages:
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps({"error": "pages list is required"}, indent=2)
|
||||||
|
)]
|
||||||
|
result = await self.page_tools.page_add_navbar(pages, options)
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps(result, indent=2)
|
||||||
|
)]
|
||||||
|
|
||||||
|
elif name == "page_set_auth":
|
||||||
|
page_ref = arguments.get('page_ref')
|
||||||
|
auth_config = arguments.get('auth_config', {})
|
||||||
|
if not page_ref:
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps({"error": "page_ref is required"}, indent=2)
|
||||||
|
)]
|
||||||
|
result = await self.page_tools.page_set_auth(page_ref, auth_config)
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps(result, indent=2)
|
||||||
|
)]
|
||||||
|
|
||||||
|
# Accessibility tools
|
||||||
|
elif name == "accessibility_validate_colors":
|
||||||
|
colors = arguments.get('colors')
|
||||||
|
if not colors:
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps({"error": "colors list is required"}, indent=2)
|
||||||
|
)]
|
||||||
|
result = await self.accessibility_tools.accessibility_validate_colors(
|
||||||
|
colors=colors,
|
||||||
|
check_types=arguments.get('check_types'),
|
||||||
|
min_contrast_ratio=arguments.get('min_contrast_ratio', 4.5)
|
||||||
|
)
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps(result, indent=2)
|
||||||
|
)]
|
||||||
|
|
||||||
|
elif name == "accessibility_validate_theme":
|
||||||
|
theme_name = arguments.get('theme_name')
|
||||||
|
if not theme_name:
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps({"error": "theme_name is required"}, indent=2)
|
||||||
|
)]
|
||||||
|
result = await self.accessibility_tools.accessibility_validate_theme(theme_name)
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps(result, indent=2)
|
||||||
|
)]
|
||||||
|
|
||||||
|
elif name == "accessibility_suggest_alternative":
|
||||||
|
color = arguments.get('color')
|
||||||
|
deficiency_type = arguments.get('deficiency_type')
|
||||||
|
if not color or not deficiency_type:
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps({"error": "color and deficiency_type are required"}, indent=2)
|
||||||
|
)]
|
||||||
|
result = await self.accessibility_tools.accessibility_suggest_alternative(
|
||||||
|
color, deficiency_type
|
||||||
|
)
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps(result, indent=2)
|
||||||
|
)]
|
||||||
|
|
||||||
|
raise ValueError(f"Unknown tool: {name}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Tool {name} failed: {e}")
|
||||||
|
return [TextContent(
|
||||||
|
type="text",
|
||||||
|
text=json.dumps({"error": str(e)}, indent=2)
|
||||||
|
)]
|
||||||
|
|
||||||
|
async def run(self):
|
||||||
|
"""Run the MCP server"""
|
||||||
|
await self.initialize()
|
||||||
|
self.setup_tools()
|
||||||
|
|
||||||
|
async with stdio_server() as (read_stream, write_stream):
|
||||||
|
await self.server.run(
|
||||||
|
read_stream,
|
||||||
|
write_stream,
|
||||||
|
self.server.create_initialization_options()
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
"""Main entry point"""
|
||||||
|
server = VizPlatformMCPServer()
|
||||||
|
await server.run()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
asyncio.run(main())
|
||||||
259
mcp-servers/viz-platform/mcp_server/theme_store.py
Normal file
259
mcp-servers/viz-platform/mcp_server/theme_store.py
Normal file
@@ -0,0 +1,259 @@
|
|||||||
|
"""
|
||||||
|
Theme storage and persistence for viz-platform.
|
||||||
|
|
||||||
|
Handles saving/loading themes from user and project locations.
|
||||||
|
"""
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Optional, Any
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
# Default theme based on Mantine defaults
|
||||||
|
DEFAULT_THEME = {
|
||||||
|
"name": "default",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"tokens": {
|
||||||
|
"colors": {
|
||||||
|
"primary": "#228be6",
|
||||||
|
"secondary": "#868e96",
|
||||||
|
"success": "#40c057",
|
||||||
|
"warning": "#fab005",
|
||||||
|
"error": "#fa5252",
|
||||||
|
"info": "#15aabf",
|
||||||
|
"background": {
|
||||||
|
"base": "#ffffff",
|
||||||
|
"subtle": "#f8f9fa",
|
||||||
|
"dark": "#212529"
|
||||||
|
},
|
||||||
|
"text": {
|
||||||
|
"primary": "#212529",
|
||||||
|
"secondary": "#495057",
|
||||||
|
"muted": "#868e96",
|
||||||
|
"inverse": "#ffffff"
|
||||||
|
},
|
||||||
|
"border": "#dee2e6"
|
||||||
|
},
|
||||||
|
"spacing": {
|
||||||
|
"xs": "4px",
|
||||||
|
"sm": "8px",
|
||||||
|
"md": "16px",
|
||||||
|
"lg": "24px",
|
||||||
|
"xl": "32px"
|
||||||
|
},
|
||||||
|
"typography": {
|
||||||
|
"fontFamily": "Inter, -apple-system, BlinkMacSystemFont, Segoe UI, Roboto, sans-serif",
|
||||||
|
"fontFamilyMono": "ui-monospace, SFMono-Regular, Menlo, Monaco, monospace",
|
||||||
|
"fontSize": {
|
||||||
|
"xs": "12px",
|
||||||
|
"sm": "14px",
|
||||||
|
"md": "16px",
|
||||||
|
"lg": "18px",
|
||||||
|
"xl": "20px"
|
||||||
|
},
|
||||||
|
"fontWeight": {
|
||||||
|
"normal": 400,
|
||||||
|
"medium": 500,
|
||||||
|
"semibold": 600,
|
||||||
|
"bold": 700
|
||||||
|
},
|
||||||
|
"lineHeight": {
|
||||||
|
"tight": 1.25,
|
||||||
|
"normal": 1.5,
|
||||||
|
"relaxed": 1.75
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"radii": {
|
||||||
|
"none": "0px",
|
||||||
|
"sm": "4px",
|
||||||
|
"md": "8px",
|
||||||
|
"lg": "16px",
|
||||||
|
"xl": "24px",
|
||||||
|
"full": "9999px"
|
||||||
|
},
|
||||||
|
"shadows": {
|
||||||
|
"none": "none",
|
||||||
|
"sm": "0 1px 2px 0 rgb(0 0 0 / 0.05)",
|
||||||
|
"md": "0 4px 6px -1px rgb(0 0 0 / 0.1)",
|
||||||
|
"lg": "0 10px 15px -3px rgb(0 0 0 / 0.1)",
|
||||||
|
"xl": "0 20px 25px -5px rgb(0 0 0 / 0.1)"
|
||||||
|
},
|
||||||
|
"transitions": {
|
||||||
|
"fast": "150ms",
|
||||||
|
"normal": "300ms",
|
||||||
|
"slow": "500ms"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Required token categories for validation
|
||||||
|
REQUIRED_TOKEN_CATEGORIES = ["colors", "spacing", "typography", "radii"]
|
||||||
|
|
||||||
|
|
||||||
|
class ThemeStore:
|
||||||
|
"""
|
||||||
|
Store and manage design themes.
|
||||||
|
|
||||||
|
Handles persistence to user-level and project-level locations.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, project_dir: Optional[Path] = None):
|
||||||
|
"""
|
||||||
|
Initialize theme store.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
project_dir: Project directory for project-level themes
|
||||||
|
"""
|
||||||
|
self.project_dir = project_dir
|
||||||
|
self._themes: Dict[str, Dict[str, Any]] = {}
|
||||||
|
self._active_theme: Optional[str] = None
|
||||||
|
|
||||||
|
# Load default theme
|
||||||
|
self._themes["default"] = DEFAULT_THEME.copy()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def user_themes_dir(self) -> Path:
|
||||||
|
"""User-level themes directory."""
|
||||||
|
return Path.home() / ".config" / "claude" / "themes"
|
||||||
|
|
||||||
|
@property
|
||||||
|
def project_themes_dir(self) -> Optional[Path]:
|
||||||
|
"""Project-level themes directory."""
|
||||||
|
if self.project_dir:
|
||||||
|
return self.project_dir / ".viz-platform" / "themes"
|
||||||
|
return None
|
||||||
|
|
||||||
|
def load_themes(self) -> int:
|
||||||
|
"""
|
||||||
|
Load themes from user and project directories.
|
||||||
|
|
||||||
|
Project themes take precedence over user themes.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Number of themes loaded
|
||||||
|
"""
|
||||||
|
count = 0
|
||||||
|
|
||||||
|
# Load user themes
|
||||||
|
if self.user_themes_dir.exists():
|
||||||
|
for theme_file in self.user_themes_dir.glob("*.json"):
|
||||||
|
try:
|
||||||
|
with open(theme_file, 'r') as f:
|
||||||
|
theme = json.load(f)
|
||||||
|
name = theme.get('name', theme_file.stem)
|
||||||
|
self._themes[name] = theme
|
||||||
|
count += 1
|
||||||
|
logger.debug(f"Loaded user theme: {name}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to load theme {theme_file}: {e}")
|
||||||
|
|
||||||
|
# Load project themes (override user themes)
|
||||||
|
if self.project_themes_dir and self.project_themes_dir.exists():
|
||||||
|
for theme_file in self.project_themes_dir.glob("*.json"):
|
||||||
|
try:
|
||||||
|
with open(theme_file, 'r') as f:
|
||||||
|
theme = json.load(f)
|
||||||
|
name = theme.get('name', theme_file.stem)
|
||||||
|
self._themes[name] = theme
|
||||||
|
count += 1
|
||||||
|
logger.debug(f"Loaded project theme: {name}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to load theme {theme_file}: {e}")
|
||||||
|
|
||||||
|
return count
|
||||||
|
|
||||||
|
def save_theme(
|
||||||
|
self,
|
||||||
|
theme: Dict[str, Any],
|
||||||
|
location: str = "project"
|
||||||
|
) -> Path:
|
||||||
|
"""
|
||||||
|
Save a theme to disk.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
theme: Theme dict to save
|
||||||
|
location: "user" or "project"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Path where theme was saved
|
||||||
|
"""
|
||||||
|
name = theme.get('name', 'unnamed')
|
||||||
|
|
||||||
|
if location == "user":
|
||||||
|
target_dir = self.user_themes_dir
|
||||||
|
else:
|
||||||
|
target_dir = self.project_themes_dir
|
||||||
|
if not target_dir:
|
||||||
|
target_dir = self.user_themes_dir
|
||||||
|
|
||||||
|
target_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
theme_path = target_dir / f"{name}.json"
|
||||||
|
|
||||||
|
with open(theme_path, 'w') as f:
|
||||||
|
json.dump(theme, f, indent=2)
|
||||||
|
|
||||||
|
# Update in-memory store
|
||||||
|
self._themes[name] = theme
|
||||||
|
|
||||||
|
return theme_path
|
||||||
|
|
||||||
|
def get_theme(self, name: str) -> Optional[Dict[str, Any]]:
|
||||||
|
"""Get a theme by name."""
|
||||||
|
return self._themes.get(name)
|
||||||
|
|
||||||
|
def list_themes(self) -> List[str]:
|
||||||
|
"""List all available theme names."""
|
||||||
|
return list(self._themes.keys())
|
||||||
|
|
||||||
|
def set_active_theme(self, name: str) -> bool:
|
||||||
|
"""
|
||||||
|
Set the active theme.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name: Theme name to activate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if theme was activated
|
||||||
|
"""
|
||||||
|
if name in self._themes:
|
||||||
|
self._active_theme = name
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def get_active_theme(self) -> Optional[Dict[str, Any]]:
|
||||||
|
"""Get the currently active theme."""
|
||||||
|
if self._active_theme:
|
||||||
|
return self._themes.get(self._active_theme)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def delete_theme(self, name: str) -> bool:
|
||||||
|
"""
|
||||||
|
Delete a theme.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name: Theme name to delete
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if theme was deleted
|
||||||
|
"""
|
||||||
|
if name == "default":
|
||||||
|
return False # Cannot delete default theme
|
||||||
|
|
||||||
|
if name in self._themes:
|
||||||
|
del self._themes[name]
|
||||||
|
|
||||||
|
# Remove file if exists
|
||||||
|
for themes_dir in [self.user_themes_dir, self.project_themes_dir]:
|
||||||
|
if themes_dir and themes_dir.exists():
|
||||||
|
theme_path = themes_dir / f"{name}.json"
|
||||||
|
if theme_path.exists():
|
||||||
|
theme_path.unlink()
|
||||||
|
|
||||||
|
if self._active_theme == name:
|
||||||
|
self._active_theme = None
|
||||||
|
|
||||||
|
return True
|
||||||
|
return False
|
||||||
391
mcp-servers/viz-platform/mcp_server/theme_tools.py
Normal file
391
mcp-servers/viz-platform/mcp_server/theme_tools.py
Normal file
@@ -0,0 +1,391 @@
|
|||||||
|
"""
|
||||||
|
Theme management tools for viz-platform.
|
||||||
|
|
||||||
|
Provides design token-based theming system for consistent visual styling.
|
||||||
|
"""
|
||||||
|
import copy
|
||||||
|
import logging
|
||||||
|
from typing import Dict, List, Optional, Any
|
||||||
|
|
||||||
|
from .theme_store import ThemeStore, DEFAULT_THEME, REQUIRED_TOKEN_CATEGORIES
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ThemeTools:
|
||||||
|
"""
|
||||||
|
Design token-based theming tools.
|
||||||
|
|
||||||
|
Creates and manages themes that integrate with DMC and Plotly.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, store: Optional[ThemeStore] = None):
|
||||||
|
"""
|
||||||
|
Initialize theme tools.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
store: Optional ThemeStore for persistence
|
||||||
|
"""
|
||||||
|
self.store = store or ThemeStore()
|
||||||
|
|
||||||
|
async def theme_create(
|
||||||
|
self,
|
||||||
|
name: str,
|
||||||
|
tokens: Dict[str, Any]
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Create a new theme with design tokens.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name: Unique theme name
|
||||||
|
tokens: Design tokens dict with colors, spacing, typography, radii
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with:
|
||||||
|
- name: Theme name
|
||||||
|
- tokens: Full token set (merged with defaults)
|
||||||
|
- validation: Validation results
|
||||||
|
"""
|
||||||
|
# Check for name collision
|
||||||
|
if self.store.get_theme(name) and name != "default":
|
||||||
|
return {
|
||||||
|
"error": f"Theme '{name}' already exists. Use theme_extend to modify it.",
|
||||||
|
"name": name
|
||||||
|
}
|
||||||
|
|
||||||
|
# Start with default tokens and merge provided ones
|
||||||
|
theme_tokens = copy.deepcopy(DEFAULT_THEME["tokens"])
|
||||||
|
theme_tokens = self._deep_merge(theme_tokens, tokens)
|
||||||
|
|
||||||
|
# Create theme object
|
||||||
|
theme = {
|
||||||
|
"name": name,
|
||||||
|
"version": "1.0.0",
|
||||||
|
"tokens": theme_tokens
|
||||||
|
}
|
||||||
|
|
||||||
|
# Validate the theme
|
||||||
|
validation = self._validate_tokens(theme_tokens)
|
||||||
|
|
||||||
|
# Save to store
|
||||||
|
self.store._themes[name] = theme
|
||||||
|
|
||||||
|
return {
|
||||||
|
"name": name,
|
||||||
|
"tokens": theme_tokens,
|
||||||
|
"validation": validation,
|
||||||
|
"complete": validation["complete"]
|
||||||
|
}
|
||||||
|
|
||||||
|
async def theme_extend(
|
||||||
|
self,
|
||||||
|
base_theme: str,
|
||||||
|
overrides: Dict[str, Any],
|
||||||
|
new_name: Optional[str] = None
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Create a new theme by extending an existing one.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
base_theme: Name of theme to extend
|
||||||
|
overrides: Token overrides to apply
|
||||||
|
new_name: Optional name for the new theme (defaults to base_theme_extended)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with the new theme or error
|
||||||
|
"""
|
||||||
|
# Get base theme
|
||||||
|
base = self.store.get_theme(base_theme)
|
||||||
|
if not base:
|
||||||
|
available = self.store.list_themes()
|
||||||
|
return {
|
||||||
|
"error": f"Base theme '{base_theme}' not found. Available: {available}",
|
||||||
|
"name": None
|
||||||
|
}
|
||||||
|
|
||||||
|
# Determine new name
|
||||||
|
name = new_name or f"{base_theme}_extended"
|
||||||
|
|
||||||
|
# Check for collision
|
||||||
|
if self.store.get_theme(name) and name != base_theme:
|
||||||
|
return {
|
||||||
|
"error": f"Theme '{name}' already exists. Choose a different name.",
|
||||||
|
"name": name
|
||||||
|
}
|
||||||
|
|
||||||
|
# Merge tokens
|
||||||
|
theme_tokens = copy.deepcopy(base.get("tokens", {}))
|
||||||
|
theme_tokens = self._deep_merge(theme_tokens, overrides)
|
||||||
|
|
||||||
|
# Create theme
|
||||||
|
theme = {
|
||||||
|
"name": name,
|
||||||
|
"version": "1.0.0",
|
||||||
|
"extends": base_theme,
|
||||||
|
"tokens": theme_tokens
|
||||||
|
}
|
||||||
|
|
||||||
|
# Validate
|
||||||
|
validation = self._validate_tokens(theme_tokens)
|
||||||
|
|
||||||
|
# Save to store
|
||||||
|
self.store._themes[name] = theme
|
||||||
|
|
||||||
|
return {
|
||||||
|
"name": name,
|
||||||
|
"extends": base_theme,
|
||||||
|
"tokens": theme_tokens,
|
||||||
|
"validation": validation,
|
||||||
|
"complete": validation["complete"]
|
||||||
|
}
|
||||||
|
|
||||||
|
async def theme_validate(self, theme_name: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Validate a theme for completeness.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
theme_name: Theme name to validate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with:
|
||||||
|
- valid: bool
|
||||||
|
- complete: bool (all optional tokens present)
|
||||||
|
- missing: List of missing required tokens
|
||||||
|
- warnings: List of warnings
|
||||||
|
"""
|
||||||
|
theme = self.store.get_theme(theme_name)
|
||||||
|
if not theme:
|
||||||
|
available = self.store.list_themes()
|
||||||
|
return {
|
||||||
|
"error": f"Theme '{theme_name}' not found. Available: {available}",
|
||||||
|
"valid": False
|
||||||
|
}
|
||||||
|
|
||||||
|
tokens = theme.get("tokens", {})
|
||||||
|
validation = self._validate_tokens(tokens)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"theme_name": theme_name,
|
||||||
|
"valid": validation["valid"],
|
||||||
|
"complete": validation["complete"],
|
||||||
|
"missing_required": validation["missing_required"],
|
||||||
|
"missing_optional": validation["missing_optional"],
|
||||||
|
"warnings": validation["warnings"]
|
||||||
|
}
|
||||||
|
|
||||||
|
async def theme_export_css(self, theme_name: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Export a theme as CSS custom properties.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
theme_name: Theme name to export
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with:
|
||||||
|
- css: CSS custom properties string
|
||||||
|
- variables: List of variable names
|
||||||
|
"""
|
||||||
|
theme = self.store.get_theme(theme_name)
|
||||||
|
if not theme:
|
||||||
|
available = self.store.list_themes()
|
||||||
|
return {
|
||||||
|
"error": f"Theme '{theme_name}' not found. Available: {available}",
|
||||||
|
"css": None
|
||||||
|
}
|
||||||
|
|
||||||
|
tokens = theme.get("tokens", {})
|
||||||
|
css_vars = []
|
||||||
|
var_names = []
|
||||||
|
|
||||||
|
# Convert tokens to CSS custom properties
|
||||||
|
css_vars.append(f"/* Theme: {theme_name} */")
|
||||||
|
css_vars.append(":root {")
|
||||||
|
|
||||||
|
# Colors
|
||||||
|
colors = tokens.get("colors", {})
|
||||||
|
css_vars.append(" /* Colors */")
|
||||||
|
for key, value in self._flatten_tokens(colors, "color").items():
|
||||||
|
var_name = f"--{key}"
|
||||||
|
css_vars.append(f" {var_name}: {value};")
|
||||||
|
var_names.append(var_name)
|
||||||
|
|
||||||
|
# Spacing
|
||||||
|
spacing = tokens.get("spacing", {})
|
||||||
|
css_vars.append("\n /* Spacing */")
|
||||||
|
for key, value in spacing.items():
|
||||||
|
var_name = f"--spacing-{key}"
|
||||||
|
css_vars.append(f" {var_name}: {value};")
|
||||||
|
var_names.append(var_name)
|
||||||
|
|
||||||
|
# Typography
|
||||||
|
typography = tokens.get("typography", {})
|
||||||
|
css_vars.append("\n /* Typography */")
|
||||||
|
for key, value in self._flatten_tokens(typography, "font").items():
|
||||||
|
var_name = f"--{key}"
|
||||||
|
css_vars.append(f" {var_name}: {value};")
|
||||||
|
var_names.append(var_name)
|
||||||
|
|
||||||
|
# Radii
|
||||||
|
radii = tokens.get("radii", {})
|
||||||
|
css_vars.append("\n /* Border Radius */")
|
||||||
|
for key, value in radii.items():
|
||||||
|
var_name = f"--radius-{key}"
|
||||||
|
css_vars.append(f" {var_name}: {value};")
|
||||||
|
var_names.append(var_name)
|
||||||
|
|
||||||
|
# Shadows
|
||||||
|
shadows = tokens.get("shadows", {})
|
||||||
|
if shadows:
|
||||||
|
css_vars.append("\n /* Shadows */")
|
||||||
|
for key, value in shadows.items():
|
||||||
|
var_name = f"--shadow-{key}"
|
||||||
|
css_vars.append(f" {var_name}: {value};")
|
||||||
|
var_names.append(var_name)
|
||||||
|
|
||||||
|
# Transitions
|
||||||
|
transitions = tokens.get("transitions", {})
|
||||||
|
if transitions:
|
||||||
|
css_vars.append("\n /* Transitions */")
|
||||||
|
for key, value in transitions.items():
|
||||||
|
var_name = f"--transition-{key}"
|
||||||
|
css_vars.append(f" {var_name}: {value};")
|
||||||
|
var_names.append(var_name)
|
||||||
|
|
||||||
|
css_vars.append("}")
|
||||||
|
|
||||||
|
css_content = "\n".join(css_vars)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"theme_name": theme_name,
|
||||||
|
"css": css_content,
|
||||||
|
"variable_count": len(var_names),
|
||||||
|
"variables": var_names
|
||||||
|
}
|
||||||
|
|
||||||
|
async def theme_list(self) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
List all available themes.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with theme names and active theme
|
||||||
|
"""
|
||||||
|
themes = self.store.list_themes()
|
||||||
|
active = self.store._active_theme
|
||||||
|
|
||||||
|
theme_info = {}
|
||||||
|
for name in themes:
|
||||||
|
theme = self.store.get_theme(name)
|
||||||
|
theme_info[name] = {
|
||||||
|
"extends": theme.get("extends"),
|
||||||
|
"version": theme.get("version", "1.0.0")
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
"themes": theme_info,
|
||||||
|
"active_theme": active,
|
||||||
|
"count": len(themes)
|
||||||
|
}
|
||||||
|
|
||||||
|
async def theme_activate(self, theme_name: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Set the active theme.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
theme_name: Theme to activate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with activation status
|
||||||
|
"""
|
||||||
|
if self.store.set_active_theme(theme_name):
|
||||||
|
return {
|
||||||
|
"active_theme": theme_name,
|
||||||
|
"success": True
|
||||||
|
}
|
||||||
|
return {
|
||||||
|
"error": f"Theme '{theme_name}' not found.",
|
||||||
|
"success": False
|
||||||
|
}
|
||||||
|
|
||||||
|
def _validate_tokens(self, tokens: Dict[str, Any]) -> Dict[str, Any]:
|
||||||
|
"""Validate token structure and completeness."""
|
||||||
|
missing_required = []
|
||||||
|
missing_optional = []
|
||||||
|
warnings = []
|
||||||
|
|
||||||
|
# Check required categories
|
||||||
|
for category in REQUIRED_TOKEN_CATEGORIES:
|
||||||
|
if category not in tokens:
|
||||||
|
missing_required.append(category)
|
||||||
|
|
||||||
|
# Check colors structure
|
||||||
|
colors = tokens.get("colors", {})
|
||||||
|
required_colors = ["primary", "background", "text"]
|
||||||
|
for color in required_colors:
|
||||||
|
if color not in colors:
|
||||||
|
missing_required.append(f"colors.{color}")
|
||||||
|
|
||||||
|
# Check spacing
|
||||||
|
spacing = tokens.get("spacing", {})
|
||||||
|
required_spacing = ["xs", "sm", "md", "lg"]
|
||||||
|
for size in required_spacing:
|
||||||
|
if size not in spacing:
|
||||||
|
missing_optional.append(f"spacing.{size}")
|
||||||
|
|
||||||
|
# Check typography
|
||||||
|
typography = tokens.get("typography", {})
|
||||||
|
if "fontFamily" not in typography:
|
||||||
|
missing_optional.append("typography.fontFamily")
|
||||||
|
if "fontSize" not in typography:
|
||||||
|
missing_optional.append("typography.fontSize")
|
||||||
|
|
||||||
|
# Check radii
|
||||||
|
radii = tokens.get("radii", {})
|
||||||
|
if "sm" not in radii and "md" not in radii:
|
||||||
|
missing_optional.append("radii.sm or radii.md")
|
||||||
|
|
||||||
|
# Warnings for common issues
|
||||||
|
if "shadows" not in tokens:
|
||||||
|
warnings.append("No shadows defined - components may have no elevation")
|
||||||
|
if "transitions" not in tokens:
|
||||||
|
warnings.append("No transitions defined - animations will use defaults")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"valid": len(missing_required) == 0,
|
||||||
|
"complete": len(missing_required) == 0 and len(missing_optional) == 0,
|
||||||
|
"missing_required": missing_required,
|
||||||
|
"missing_optional": missing_optional,
|
||||||
|
"warnings": warnings
|
||||||
|
}
|
||||||
|
|
||||||
|
def _deep_merge(
|
||||||
|
self,
|
||||||
|
base: Dict[str, Any],
|
||||||
|
override: Dict[str, Any]
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Deep merge two dictionaries."""
|
||||||
|
result = copy.deepcopy(base)
|
||||||
|
|
||||||
|
for key, value in override.items():
|
||||||
|
if key in result and isinstance(result[key], dict) and isinstance(value, dict):
|
||||||
|
result[key] = self._deep_merge(result[key], value)
|
||||||
|
else:
|
||||||
|
result[key] = value
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _flatten_tokens(
|
||||||
|
self,
|
||||||
|
tokens: Dict[str, Any],
|
||||||
|
prefix: str
|
||||||
|
) -> Dict[str, str]:
|
||||||
|
"""Flatten nested token dict for CSS export."""
|
||||||
|
result = {}
|
||||||
|
|
||||||
|
for key, value in tokens.items():
|
||||||
|
if isinstance(value, dict):
|
||||||
|
nested = self._flatten_tokens(value, f"{prefix}-{key}")
|
||||||
|
result.update(nested)
|
||||||
|
else:
|
||||||
|
result[f"{prefix}-{key}"] = str(value)
|
||||||
|
|
||||||
|
return result
|
||||||
45
mcp-servers/viz-platform/pyproject.toml
Normal file
45
mcp-servers/viz-platform/pyproject.toml
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0", "wheel"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "viz-platform-mcp"
|
||||||
|
version = "1.0.0"
|
||||||
|
description = "MCP Server for visualization with Dash Mantine Components validation and theming"
|
||||||
|
readme = "README.md"
|
||||||
|
license = {text = "MIT"}
|
||||||
|
requires-python = ">=3.10"
|
||||||
|
authors = [
|
||||||
|
{name = "Leo Miranda"}
|
||||||
|
]
|
||||||
|
classifiers = [
|
||||||
|
"Development Status :: 4 - Beta",
|
||||||
|
"Intended Audience :: Developers",
|
||||||
|
"License :: OSI Approved :: MIT License",
|
||||||
|
"Programming Language :: Python :: 3",
|
||||||
|
"Programming Language :: Python :: 3.10",
|
||||||
|
"Programming Language :: Python :: 3.11",
|
||||||
|
"Programming Language :: Python :: 3.12",
|
||||||
|
]
|
||||||
|
dependencies = [
|
||||||
|
"mcp>=0.9.0",
|
||||||
|
"plotly>=5.18.0",
|
||||||
|
"dash>=2.14.0",
|
||||||
|
"dash-mantine-components>=2.0.0",
|
||||||
|
"python-dotenv>=1.0.0",
|
||||||
|
"pydantic>=2.5.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[project.optional-dependencies]
|
||||||
|
dev = [
|
||||||
|
"pytest>=7.4.3",
|
||||||
|
"pytest-asyncio>=0.23.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["mcp_server*"]
|
||||||
|
|
||||||
|
[tool.pytest.ini_options]
|
||||||
|
asyncio_mode = "auto"
|
||||||
|
testpaths = ["tests"]
|
||||||
668
mcp-servers/viz-platform/registry/dmc_2_5.json
Normal file
668
mcp-servers/viz-platform/registry/dmc_2_5.json
Normal file
@@ -0,0 +1,668 @@
|
|||||||
|
{
|
||||||
|
"version": "2.5.1",
|
||||||
|
"generated": "2026-01-26",
|
||||||
|
"mantine_version": "7.x",
|
||||||
|
"categories": {
|
||||||
|
"buttons": ["Button", "ButtonGroup", "ActionIcon", "ActionIconGroup", "CopyButton", "CloseButton", "UnstyledButton"],
|
||||||
|
"inputs": [
|
||||||
|
"TextInput", "PasswordInput", "NumberInput", "Textarea", "Select", "MultiSelect",
|
||||||
|
"Checkbox", "CheckboxGroup", "CheckboxCard", "Switch", "Radio", "RadioGroup", "RadioCard",
|
||||||
|
"Slider", "RangeSlider", "ColorInput", "ColorPicker", "Autocomplete", "TagsInput",
|
||||||
|
"PinInput", "Rating", "SegmentedControl", "Chip", "ChipGroup", "JsonInput",
|
||||||
|
"NativeSelect", "FileInput", "Combobox"
|
||||||
|
],
|
||||||
|
"navigation": ["Anchor", "Breadcrumbs", "Burger", "NavLink", "Pagination", "Stepper", "Tabs", "TabsList", "TabsTab", "TabsPanel"],
|
||||||
|
"feedback": ["Alert", "Loader", "Notification", "NotificationContainer", "Progress", "RingProgress", "Skeleton"],
|
||||||
|
"overlays": ["Modal", "Drawer", "DrawerStack", "Popover", "HoverCard", "Tooltip", "FloatingTooltip", "Menu", "MenuTarget", "MenuDropdown", "MenuItem", "Affix"],
|
||||||
|
"typography": ["Text", "Title", "Highlight", "Mark", "Code", "CodeHighlight", "Blockquote", "List", "ListItem", "Kbd"],
|
||||||
|
"layout": [
|
||||||
|
"AppShell", "AppShellHeader", "AppShellNavbar", "AppShellAside", "AppShellFooter", "AppShellMain", "AppShellSection",
|
||||||
|
"Container", "Center", "Stack", "Group", "Flex", "Grid", "GridCol", "SimpleGrid",
|
||||||
|
"Paper", "Card", "CardSection", "Box", "Space", "Divider", "AspectRatio", "ScrollArea"
|
||||||
|
],
|
||||||
|
"data_display": [
|
||||||
|
"Accordion", "AccordionItem", "AccordionControl", "AccordionPanel",
|
||||||
|
"Avatar", "AvatarGroup", "Badge", "Image", "BackgroundImage",
|
||||||
|
"Indicator", "Spoiler", "Table", "ThemeIcon", "Timeline", "TimelineItem", "Tree"
|
||||||
|
],
|
||||||
|
"charts": ["AreaChart", "BarChart", "LineChart", "PieChart", "DonutChart", "RadarChart", "ScatterChart", "BubbleChart", "CompositeChart", "Sparkline"],
|
||||||
|
"dates": ["DatePicker", "DateTimePicker", "DateInput", "DatePickerInput", "MonthPicker", "YearPicker", "TimePicker", "TimeInput", "Calendar", "MiniCalendar", "DatesProvider"]
|
||||||
|
},
|
||||||
|
"components": {
|
||||||
|
"Button": {
|
||||||
|
"description": "Button component for user interactions",
|
||||||
|
"props": {
|
||||||
|
"children": {"type": "any", "description": "Button content"},
|
||||||
|
"variant": {"type": "string", "enum": ["filled", "light", "outline", "transparent", "white", "subtle", "default", "gradient"], "default": "filled"},
|
||||||
|
"color": {"type": "string", "default": "blue", "description": "Key of theme.colors or CSS color"},
|
||||||
|
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl", "compact-xs", "compact-sm", "compact-md", "compact-lg", "compact-xl"], "default": "sm"},
|
||||||
|
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
|
||||||
|
"disabled": {"type": "boolean", "default": false},
|
||||||
|
"loading": {"type": "boolean", "default": false},
|
||||||
|
"loaderProps": {"type": "object"},
|
||||||
|
"leftSection": {"type": "any", "description": "Content on the left side of label"},
|
||||||
|
"rightSection": {"type": "any", "description": "Content on the right side of label"},
|
||||||
|
"fullWidth": {"type": "boolean", "default": false},
|
||||||
|
"gradient": {"type": "object", "description": "Gradient for gradient variant"},
|
||||||
|
"justify": {"type": "string", "enum": ["center", "start", "end", "space-between"], "default": "center"},
|
||||||
|
"autoContrast": {"type": "boolean", "default": false},
|
||||||
|
"n_clicks": {"type": "integer", "default": 0, "description": "Dash callback trigger"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ActionIcon": {
|
||||||
|
"description": "Icon button without text label",
|
||||||
|
"props": {
|
||||||
|
"children": {"type": "any", "required": true, "description": "Icon element"},
|
||||||
|
"variant": {"type": "string", "enum": ["filled", "light", "outline", "transparent", "white", "subtle", "default", "gradient"], "default": "subtle"},
|
||||||
|
"color": {"type": "string", "default": "gray"},
|
||||||
|
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
|
||||||
|
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
|
||||||
|
"disabled": {"type": "boolean", "default": false},
|
||||||
|
"loading": {"type": "boolean", "default": false},
|
||||||
|
"autoContrast": {"type": "boolean", "default": false},
|
||||||
|
"n_clicks": {"type": "integer", "default": 0}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"TextInput": {
|
||||||
|
"description": "Text input field",
|
||||||
|
"props": {
|
||||||
|
"value": {"type": "string", "default": ""},
|
||||||
|
"placeholder": {"type": "string"},
|
||||||
|
"label": {"type": "any"},
|
||||||
|
"description": {"type": "any"},
|
||||||
|
"error": {"type": "any"},
|
||||||
|
"disabled": {"type": "boolean", "default": false},
|
||||||
|
"required": {"type": "boolean", "default": false},
|
||||||
|
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
|
||||||
|
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
|
||||||
|
"variant": {"type": "string", "enum": ["default", "filled", "unstyled"], "default": "default"},
|
||||||
|
"leftSection": {"type": "any"},
|
||||||
|
"rightSection": {"type": "any"},
|
||||||
|
"withAsterisk": {"type": "boolean", "default": false},
|
||||||
|
"debounce": {"type": "integer", "description": "Debounce delay in ms"},
|
||||||
|
"leftSectionPointerEvents": {"type": "string", "enum": ["none", "all"], "default": "none"},
|
||||||
|
"rightSectionPointerEvents": {"type": "string", "enum": ["none", "all"], "default": "none"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"NumberInput": {
|
||||||
|
"description": "Numeric input with optional controls",
|
||||||
|
"props": {
|
||||||
|
"value": {"type": "number"},
|
||||||
|
"placeholder": {"type": "string"},
|
||||||
|
"label": {"type": "any"},
|
||||||
|
"description": {"type": "any"},
|
||||||
|
"error": {"type": "any"},
|
||||||
|
"disabled": {"type": "boolean", "default": false},
|
||||||
|
"required": {"type": "boolean", "default": false},
|
||||||
|
"min": {"type": "number"},
|
||||||
|
"max": {"type": "number"},
|
||||||
|
"step": {"type": "number", "default": 1},
|
||||||
|
"hideControls": {"type": "boolean", "default": false},
|
||||||
|
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
|
||||||
|
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
|
||||||
|
"allowNegative": {"type": "boolean", "default": true},
|
||||||
|
"allowDecimal": {"type": "boolean", "default": true},
|
||||||
|
"clampBehavior": {"type": "string", "enum": ["strict", "blur", "none"], "default": "blur"},
|
||||||
|
"decimalScale": {"type": "integer"},
|
||||||
|
"fixedDecimalScale": {"type": "boolean", "default": false},
|
||||||
|
"thousandSeparator": {"type": "string"},
|
||||||
|
"decimalSeparator": {"type": "string"},
|
||||||
|
"prefix": {"type": "string"},
|
||||||
|
"suffix": {"type": "string"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Select": {
|
||||||
|
"description": "Dropdown select input",
|
||||||
|
"props": {
|
||||||
|
"value": {"type": "string"},
|
||||||
|
"data": {"type": "array", "required": true, "description": "Array of options: strings or {value, label} objects"},
|
||||||
|
"placeholder": {"type": "string"},
|
||||||
|
"label": {"type": "any"},
|
||||||
|
"description": {"type": "any"},
|
||||||
|
"error": {"type": "any"},
|
||||||
|
"disabled": {"type": "boolean", "default": false},
|
||||||
|
"required": {"type": "boolean", "default": false},
|
||||||
|
"searchable": {"type": "boolean", "default": false},
|
||||||
|
"clearable": {"type": "boolean", "default": false},
|
||||||
|
"nothingFoundMessage": {"type": "string"},
|
||||||
|
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
|
||||||
|
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
|
||||||
|
"maxDropdownHeight": {"type": "number", "default": 250},
|
||||||
|
"allowDeselect": {"type": "boolean", "default": true},
|
||||||
|
"checkIconPosition": {"type": "string", "enum": ["left", "right"], "default": "left"},
|
||||||
|
"comboboxProps": {"type": "object"},
|
||||||
|
"withScrollArea": {"type": "boolean", "default": true}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"MultiSelect": {
|
||||||
|
"description": "Multiple selection dropdown",
|
||||||
|
"props": {
|
||||||
|
"value": {"type": "array", "default": []},
|
||||||
|
"data": {"type": "array", "required": true},
|
||||||
|
"placeholder": {"type": "string"},
|
||||||
|
"label": {"type": "any"},
|
||||||
|
"description": {"type": "any"},
|
||||||
|
"error": {"type": "any"},
|
||||||
|
"disabled": {"type": "boolean", "default": false},
|
||||||
|
"required": {"type": "boolean", "default": false},
|
||||||
|
"searchable": {"type": "boolean", "default": false},
|
||||||
|
"clearable": {"type": "boolean", "default": false},
|
||||||
|
"maxValues": {"type": "integer"},
|
||||||
|
"hidePickedOptions": {"type": "boolean", "default": false},
|
||||||
|
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
|
||||||
|
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
|
||||||
|
"maxDropdownHeight": {"type": "number", "default": 250},
|
||||||
|
"withCheckIcon": {"type": "boolean", "default": true}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Checkbox": {
|
||||||
|
"description": "Checkbox input",
|
||||||
|
"props": {
|
||||||
|
"checked": {"type": "boolean", "default": false},
|
||||||
|
"label": {"type": "any"},
|
||||||
|
"description": {"type": "any"},
|
||||||
|
"error": {"type": "any"},
|
||||||
|
"disabled": {"type": "boolean", "default": false},
|
||||||
|
"indeterminate": {"type": "boolean", "default": false},
|
||||||
|
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
|
||||||
|
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
|
||||||
|
"color": {"type": "string", "default": "blue"},
|
||||||
|
"labelPosition": {"type": "string", "enum": ["left", "right"], "default": "right"},
|
||||||
|
"autoContrast": {"type": "boolean", "default": false},
|
||||||
|
"icon": {"type": "any"},
|
||||||
|
"iconColor": {"type": "string"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Switch": {
|
||||||
|
"description": "Toggle switch input",
|
||||||
|
"props": {
|
||||||
|
"checked": {"type": "boolean", "default": false},
|
||||||
|
"label": {"type": "any"},
|
||||||
|
"description": {"type": "any"},
|
||||||
|
"error": {"type": "any"},
|
||||||
|
"disabled": {"type": "boolean", "default": false},
|
||||||
|
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
|
||||||
|
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "xl"},
|
||||||
|
"color": {"type": "string", "default": "blue"},
|
||||||
|
"onLabel": {"type": "any"},
|
||||||
|
"offLabel": {"type": "any"},
|
||||||
|
"thumbIcon": {"type": "any"},
|
||||||
|
"labelPosition": {"type": "string", "enum": ["left", "right"], "default": "right"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Slider": {
|
||||||
|
"description": "Slider input for numeric values",
|
||||||
|
"props": {
|
||||||
|
"value": {"type": "number"},
|
||||||
|
"min": {"type": "number", "default": 0},
|
||||||
|
"max": {"type": "number", "default": 100},
|
||||||
|
"step": {"type": "number", "default": 1},
|
||||||
|
"label": {"type": "any"},
|
||||||
|
"disabled": {"type": "boolean", "default": false},
|
||||||
|
"marks": {"type": "array"},
|
||||||
|
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
|
||||||
|
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "xl"},
|
||||||
|
"color": {"type": "string", "default": "blue"},
|
||||||
|
"showLabelOnHover": {"type": "boolean", "default": true},
|
||||||
|
"labelAlwaysOn": {"type": "boolean", "default": false},
|
||||||
|
"thumbLabel": {"type": "string"},
|
||||||
|
"precision": {"type": "integer", "default": 0},
|
||||||
|
"inverted": {"type": "boolean", "default": false},
|
||||||
|
"thumbSize": {"type": "number"},
|
||||||
|
"restrictToMarks": {"type": "boolean", "default": false}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Alert": {
|
||||||
|
"description": "Alert component for feedback messages",
|
||||||
|
"props": {
|
||||||
|
"children": {"type": "any"},
|
||||||
|
"title": {"type": "any"},
|
||||||
|
"color": {"type": "string", "default": "blue"},
|
||||||
|
"variant": {"type": "string", "enum": ["filled", "light", "outline", "default", "transparent", "white"], "default": "light"},
|
||||||
|
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
|
||||||
|
"icon": {"type": "any"},
|
||||||
|
"withCloseButton": {"type": "boolean", "default": false},
|
||||||
|
"closeButtonLabel": {"type": "string"},
|
||||||
|
"autoContrast": {"type": "boolean", "default": false}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Loader": {
|
||||||
|
"description": "Loading indicator",
|
||||||
|
"props": {
|
||||||
|
"color": {"type": "string", "default": "blue"},
|
||||||
|
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
|
||||||
|
"type": {"type": "string", "enum": ["oval", "bars", "dots"], "default": "oval"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Progress": {
|
||||||
|
"description": "Progress bar",
|
||||||
|
"props": {
|
||||||
|
"value": {"type": "number", "required": true},
|
||||||
|
"color": {"type": "string", "default": "blue"},
|
||||||
|
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
|
||||||
|
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
|
||||||
|
"striped": {"type": "boolean", "default": false},
|
||||||
|
"animated": {"type": "boolean", "default": false},
|
||||||
|
"autoContrast": {"type": "boolean", "default": false},
|
||||||
|
"transitionDuration": {"type": "number", "default": 100}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Modal": {
|
||||||
|
"description": "Modal dialog overlay",
|
||||||
|
"props": {
|
||||||
|
"children": {"type": "any"},
|
||||||
|
"opened": {"type": "boolean", "required": true},
|
||||||
|
"title": {"type": "any"},
|
||||||
|
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl", "auto"], "default": "md"},
|
||||||
|
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
|
||||||
|
"centered": {"type": "boolean", "default": false},
|
||||||
|
"fullScreen": {"type": "boolean", "default": false},
|
||||||
|
"withCloseButton": {"type": "boolean", "default": true},
|
||||||
|
"closeOnClickOutside": {"type": "boolean", "default": true},
|
||||||
|
"closeOnEscape": {"type": "boolean", "default": true},
|
||||||
|
"overlayProps": {"type": "object"},
|
||||||
|
"padding": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
|
||||||
|
"transitionProps": {"type": "object"},
|
||||||
|
"zIndex": {"type": "number", "default": 200},
|
||||||
|
"trapFocus": {"type": "boolean", "default": true},
|
||||||
|
"returnFocus": {"type": "boolean", "default": true},
|
||||||
|
"lockScroll": {"type": "boolean", "default": true}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Drawer": {
|
||||||
|
"description": "Sliding panel drawer",
|
||||||
|
"props": {
|
||||||
|
"children": {"type": "any"},
|
||||||
|
"opened": {"type": "boolean", "required": true},
|
||||||
|
"title": {"type": "any"},
|
||||||
|
"position": {"type": "string", "enum": ["left", "right", "top", "bottom"], "default": "left"},
|
||||||
|
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
|
||||||
|
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"]},
|
||||||
|
"withCloseButton": {"type": "boolean", "default": true},
|
||||||
|
"closeOnClickOutside": {"type": "boolean", "default": true},
|
||||||
|
"closeOnEscape": {"type": "boolean", "default": true},
|
||||||
|
"overlayProps": {"type": "object"},
|
||||||
|
"padding": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
|
||||||
|
"zIndex": {"type": "number", "default": 200},
|
||||||
|
"offset": {"type": "number", "default": 0},
|
||||||
|
"trapFocus": {"type": "boolean", "default": true},
|
||||||
|
"returnFocus": {"type": "boolean", "default": true},
|
||||||
|
"lockScroll": {"type": "boolean", "default": true}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Tooltip": {
|
||||||
|
"description": "Tooltip on hover",
|
||||||
|
"props": {
|
||||||
|
"children": {"type": "any", "required": true},
|
||||||
|
"label": {"type": "any", "required": true},
|
||||||
|
"position": {"type": "string", "enum": ["top", "right", "bottom", "left", "top-start", "top-end", "right-start", "right-end", "bottom-start", "bottom-end", "left-start", "left-end"], "default": "top"},
|
||||||
|
"color": {"type": "string"},
|
||||||
|
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
|
||||||
|
"withArrow": {"type": "boolean", "default": false},
|
||||||
|
"arrowSize": {"type": "number", "default": 4},
|
||||||
|
"arrowOffset": {"type": "number", "default": 5},
|
||||||
|
"offset": {"type": "number", "default": 5},
|
||||||
|
"multiline": {"type": "boolean", "default": false},
|
||||||
|
"disabled": {"type": "boolean", "default": false},
|
||||||
|
"openDelay": {"type": "number", "default": 0},
|
||||||
|
"closeDelay": {"type": "number", "default": 0},
|
||||||
|
"transitionProps": {"type": "object"},
|
||||||
|
"zIndex": {"type": "number", "default": 300}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Text": {
|
||||||
|
"description": "Text component with styling",
|
||||||
|
"props": {
|
||||||
|
"children": {"type": "any"},
|
||||||
|
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"]},
|
||||||
|
"c": {"type": "string", "description": "Color"},
|
||||||
|
"fw": {"type": "number", "description": "Font weight"},
|
||||||
|
"fs": {"type": "string", "enum": ["normal", "italic"], "description": "Font style"},
|
||||||
|
"td": {"type": "string", "enum": ["none", "underline", "line-through"], "description": "Text decoration"},
|
||||||
|
"tt": {"type": "string", "enum": ["none", "capitalize", "uppercase", "lowercase"], "description": "Text transform"},
|
||||||
|
"ta": {"type": "string", "enum": ["left", "center", "right", "justify"], "description": "Text align"},
|
||||||
|
"lineClamp": {"type": "integer"},
|
||||||
|
"truncate": {"type": "boolean", "default": false},
|
||||||
|
"inherit": {"type": "boolean", "default": false},
|
||||||
|
"gradient": {"type": "object"},
|
||||||
|
"span": {"type": "boolean", "default": false},
|
||||||
|
"lh": {"type": "string", "description": "Line height"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Title": {
|
||||||
|
"description": "Heading component",
|
||||||
|
"props": {
|
||||||
|
"children": {"type": "any"},
|
||||||
|
"order": {"type": "integer", "enum": [1, 2, 3, 4, 5, 6], "default": 1},
|
||||||
|
"size": {"type": "string"},
|
||||||
|
"c": {"type": "string", "description": "Color"},
|
||||||
|
"ta": {"type": "string", "enum": ["left", "center", "right", "justify"]},
|
||||||
|
"td": {"type": "string", "enum": ["none", "underline", "line-through"]},
|
||||||
|
"tt": {"type": "string", "enum": ["none", "capitalize", "uppercase", "lowercase"]},
|
||||||
|
"lineClamp": {"type": "integer"},
|
||||||
|
"truncate": {"type": "boolean", "default": false}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Stack": {
|
||||||
|
"description": "Vertical stack layout",
|
||||||
|
"props": {
|
||||||
|
"children": {"type": "any"},
|
||||||
|
"gap": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
|
||||||
|
"align": {"type": "string", "enum": ["stretch", "center", "flex-start", "flex-end"], "default": "stretch"},
|
||||||
|
"justify": {"type": "string", "enum": ["flex-start", "flex-end", "center", "space-between", "space-around", "space-evenly"], "default": "flex-start"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Group": {
|
||||||
|
"description": "Horizontal group layout",
|
||||||
|
"props": {
|
||||||
|
"children": {"type": "any"},
|
||||||
|
"gap": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
|
||||||
|
"align": {"type": "string", "enum": ["stretch", "center", "flex-start", "flex-end"], "default": "center"},
|
||||||
|
"justify": {"type": "string", "enum": ["flex-start", "flex-end", "center", "space-between", "space-around"], "default": "flex-start"},
|
||||||
|
"grow": {"type": "boolean", "default": false},
|
||||||
|
"wrap": {"type": "string", "enum": ["wrap", "nowrap", "wrap-reverse"], "default": "wrap"},
|
||||||
|
"preventGrowOverflow": {"type": "boolean", "default": true}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Flex": {
|
||||||
|
"description": "Flexbox container",
|
||||||
|
"props": {
|
||||||
|
"children": {"type": "any"},
|
||||||
|
"gap": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"]},
|
||||||
|
"rowGap": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"]},
|
||||||
|
"columnGap": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"]},
|
||||||
|
"align": {"type": "string", "enum": ["stretch", "center", "flex-start", "flex-end", "baseline"]},
|
||||||
|
"justify": {"type": "string", "enum": ["flex-start", "flex-end", "center", "space-between", "space-around", "space-evenly"]},
|
||||||
|
"wrap": {"type": "string", "enum": ["wrap", "nowrap", "wrap-reverse"], "default": "nowrap"},
|
||||||
|
"direction": {"type": "string", "enum": ["row", "column", "row-reverse", "column-reverse"], "default": "row"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Grid": {
|
||||||
|
"description": "Grid layout component",
|
||||||
|
"props": {
|
||||||
|
"children": {"type": "any"},
|
||||||
|
"columns": {"type": "integer", "default": 12},
|
||||||
|
"gutter": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
|
||||||
|
"grow": {"type": "boolean", "default": false},
|
||||||
|
"justify": {"type": "string", "enum": ["flex-start", "flex-end", "center", "space-between", "space-around"], "default": "flex-start"},
|
||||||
|
"align": {"type": "string", "enum": ["stretch", "center", "flex-start", "flex-end"], "default": "stretch"},
|
||||||
|
"overflow": {"type": "string", "enum": ["visible", "hidden"], "default": "visible"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"SimpleGrid": {
|
||||||
|
"description": "Simple grid with equal columns",
|
||||||
|
"props": {
|
||||||
|
"children": {"type": "any"},
|
||||||
|
"cols": {"type": "integer", "default": 1},
|
||||||
|
"spacing": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
|
||||||
|
"verticalSpacing": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"]}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Container": {
|
||||||
|
"description": "Centered container with max-width",
|
||||||
|
"props": {
|
||||||
|
"children": {"type": "any"},
|
||||||
|
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
|
||||||
|
"fluid": {"type": "boolean", "default": false}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Paper": {
|
||||||
|
"description": "Paper surface component",
|
||||||
|
"props": {
|
||||||
|
"children": {"type": "any"},
|
||||||
|
"shadow": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"]},
|
||||||
|
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
|
||||||
|
"p": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "description": "Padding"},
|
||||||
|
"withBorder": {"type": "boolean", "default": false}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Card": {
|
||||||
|
"description": "Card container",
|
||||||
|
"props": {
|
||||||
|
"children": {"type": "any"},
|
||||||
|
"shadow": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
|
||||||
|
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
|
||||||
|
"padding": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
|
||||||
|
"withBorder": {"type": "boolean", "default": false}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Tabs": {
|
||||||
|
"description": "Tabbed interface",
|
||||||
|
"props": {
|
||||||
|
"children": {"type": "any"},
|
||||||
|
"value": {"type": "string"},
|
||||||
|
"defaultValue": {"type": "string"},
|
||||||
|
"orientation": {"type": "string", "enum": ["horizontal", "vertical"], "default": "horizontal"},
|
||||||
|
"variant": {"type": "string", "enum": ["default", "outline", "pills"], "default": "default"},
|
||||||
|
"color": {"type": "string", "default": "blue"},
|
||||||
|
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
|
||||||
|
"placement": {"type": "string", "enum": ["left", "right"], "default": "left"},
|
||||||
|
"grow": {"type": "boolean", "default": false},
|
||||||
|
"inverted": {"type": "boolean", "default": false},
|
||||||
|
"keepMounted": {"type": "boolean", "default": true},
|
||||||
|
"activateTabWithKeyboard": {"type": "boolean", "default": true},
|
||||||
|
"allowTabDeactivation": {"type": "boolean", "default": false},
|
||||||
|
"autoContrast": {"type": "boolean", "default": false}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Accordion": {
|
||||||
|
"description": "Collapsible content panels",
|
||||||
|
"props": {
|
||||||
|
"children": {"type": "any"},
|
||||||
|
"value": {"type": "any"},
|
||||||
|
"defaultValue": {"type": "any"},
|
||||||
|
"multiple": {"type": "boolean", "default": false},
|
||||||
|
"variant": {"type": "string", "enum": ["default", "contained", "filled", "separated"], "default": "default"},
|
||||||
|
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
|
||||||
|
"chevronPosition": {"type": "string", "enum": ["left", "right"], "default": "right"},
|
||||||
|
"disableChevronRotation": {"type": "boolean", "default": false},
|
||||||
|
"transitionDuration": {"type": "number", "default": 200},
|
||||||
|
"chevronSize": {"type": "any"},
|
||||||
|
"order": {"type": "integer", "enum": [2, 3, 4, 5, 6]}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Badge": {
|
||||||
|
"description": "Badge for status or labels",
|
||||||
|
"props": {
|
||||||
|
"children": {"type": "any"},
|
||||||
|
"color": {"type": "string", "default": "blue"},
|
||||||
|
"variant": {"type": "string", "enum": ["filled", "light", "outline", "dot", "gradient", "default", "transparent", "white"], "default": "filled"},
|
||||||
|
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
|
||||||
|
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "xl"},
|
||||||
|
"fullWidth": {"type": "boolean", "default": false},
|
||||||
|
"leftSection": {"type": "any"},
|
||||||
|
"rightSection": {"type": "any"},
|
||||||
|
"autoContrast": {"type": "boolean", "default": false},
|
||||||
|
"circle": {"type": "boolean", "default": false}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Avatar": {
|
||||||
|
"description": "User avatar image",
|
||||||
|
"props": {
|
||||||
|
"src": {"type": "string"},
|
||||||
|
"alt": {"type": "string"},
|
||||||
|
"children": {"type": "any", "description": "Fallback content"},
|
||||||
|
"color": {"type": "string", "default": "gray"},
|
||||||
|
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "md"},
|
||||||
|
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "xl"},
|
||||||
|
"variant": {"type": "string", "enum": ["filled", "light", "outline", "gradient", "default", "transparent", "white"], "default": "filled"},
|
||||||
|
"autoContrast": {"type": "boolean", "default": false}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Image": {
|
||||||
|
"description": "Image with fallback",
|
||||||
|
"props": {
|
||||||
|
"src": {"type": "string"},
|
||||||
|
"alt": {"type": "string"},
|
||||||
|
"w": {"type": "any", "description": "Width"},
|
||||||
|
"h": {"type": "any", "description": "Height"},
|
||||||
|
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"]},
|
||||||
|
"fit": {"type": "string", "enum": ["contain", "cover", "fill", "none", "scale-down"], "default": "cover"},
|
||||||
|
"fallbackSrc": {"type": "string"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Table": {
|
||||||
|
"description": "Data table component",
|
||||||
|
"props": {
|
||||||
|
"children": {"type": "any"},
|
||||||
|
"data": {"type": "object", "description": "Table data object with head, body, foot"},
|
||||||
|
"striped": {"type": "boolean", "default": false},
|
||||||
|
"highlightOnHover": {"type": "boolean", "default": false},
|
||||||
|
"withTableBorder": {"type": "boolean", "default": false},
|
||||||
|
"withColumnBorders": {"type": "boolean", "default": false},
|
||||||
|
"withRowBorders": {"type": "boolean", "default": true},
|
||||||
|
"verticalSpacing": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "xs"},
|
||||||
|
"horizontalSpacing": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "xs"},
|
||||||
|
"captionSide": {"type": "string", "enum": ["top", "bottom"], "default": "bottom"},
|
||||||
|
"stickyHeader": {"type": "boolean", "default": false},
|
||||||
|
"stickyHeaderOffset": {"type": "number", "default": 0}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"AreaChart": {
|
||||||
|
"description": "Area chart for time series data",
|
||||||
|
"props": {
|
||||||
|
"data": {"type": "array", "required": true},
|
||||||
|
"dataKey": {"type": "string", "required": true, "description": "X-axis data key"},
|
||||||
|
"series": {"type": "array", "required": true, "description": "Array of {name, color} objects"},
|
||||||
|
"h": {"type": "any", "description": "Chart height"},
|
||||||
|
"w": {"type": "any", "description": "Chart width"},
|
||||||
|
"curveType": {"type": "string", "enum": ["bump", "linear", "natural", "monotone", "step", "stepBefore", "stepAfter"], "default": "monotone"},
|
||||||
|
"connectNulls": {"type": "boolean", "default": true},
|
||||||
|
"withDots": {"type": "boolean", "default": true},
|
||||||
|
"withGradient": {"type": "boolean", "default": true},
|
||||||
|
"withLegend": {"type": "boolean", "default": false},
|
||||||
|
"withTooltip": {"type": "boolean", "default": true},
|
||||||
|
"withXAxis": {"type": "boolean", "default": true},
|
||||||
|
"withYAxis": {"type": "boolean", "default": true},
|
||||||
|
"gridAxis": {"type": "string", "enum": ["x", "y", "xy", "none"], "default": "x"},
|
||||||
|
"tickLine": {"type": "string", "enum": ["x", "y", "xy", "none"], "default": "y"},
|
||||||
|
"strokeDasharray": {"type": "string"},
|
||||||
|
"fillOpacity": {"type": "number", "default": 0.2},
|
||||||
|
"splitColors": {"type": "array"},
|
||||||
|
"areaChartProps": {"type": "object"},
|
||||||
|
"type": {"type": "string", "enum": ["default", "stacked", "percent", "split"], "default": "default"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"BarChart": {
|
||||||
|
"description": "Bar chart for categorical data",
|
||||||
|
"props": {
|
||||||
|
"data": {"type": "array", "required": true},
|
||||||
|
"dataKey": {"type": "string", "required": true},
|
||||||
|
"series": {"type": "array", "required": true},
|
||||||
|
"h": {"type": "any"},
|
||||||
|
"w": {"type": "any"},
|
||||||
|
"orientation": {"type": "string", "enum": ["horizontal", "vertical"], "default": "vertical"},
|
||||||
|
"withLegend": {"type": "boolean", "default": false},
|
||||||
|
"withTooltip": {"type": "boolean", "default": true},
|
||||||
|
"withXAxis": {"type": "boolean", "default": true},
|
||||||
|
"withYAxis": {"type": "boolean", "default": true},
|
||||||
|
"gridAxis": {"type": "string", "enum": ["x", "y", "xy", "none"], "default": "x"},
|
||||||
|
"tickLine": {"type": "string", "enum": ["x", "y", "xy", "none"], "default": "y"},
|
||||||
|
"barProps": {"type": "object"},
|
||||||
|
"type": {"type": "string", "enum": ["default", "stacked", "percent", "waterfall"], "default": "default"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"LineChart": {
|
||||||
|
"description": "Line chart for trends",
|
||||||
|
"props": {
|
||||||
|
"data": {"type": "array", "required": true},
|
||||||
|
"dataKey": {"type": "string", "required": true},
|
||||||
|
"series": {"type": "array", "required": true},
|
||||||
|
"h": {"type": "any"},
|
||||||
|
"w": {"type": "any"},
|
||||||
|
"curveType": {"type": "string", "enum": ["bump", "linear", "natural", "monotone", "step", "stepBefore", "stepAfter"], "default": "monotone"},
|
||||||
|
"connectNulls": {"type": "boolean", "default": true},
|
||||||
|
"withDots": {"type": "boolean", "default": true},
|
||||||
|
"withLegend": {"type": "boolean", "default": false},
|
||||||
|
"withTooltip": {"type": "boolean", "default": true},
|
||||||
|
"withXAxis": {"type": "boolean", "default": true},
|
||||||
|
"withYAxis": {"type": "boolean", "default": true},
|
||||||
|
"gridAxis": {"type": "string", "enum": ["x", "y", "xy", "none"], "default": "x"},
|
||||||
|
"strokeWidth": {"type": "number", "default": 2}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"PieChart": {
|
||||||
|
"description": "Pie chart for proportions",
|
||||||
|
"props": {
|
||||||
|
"data": {"type": "array", "required": true, "description": "Array of {name, value, color} objects"},
|
||||||
|
"h": {"type": "any"},
|
||||||
|
"w": {"type": "any"},
|
||||||
|
"withLabels": {"type": "boolean", "default": false},
|
||||||
|
"withLabelsLine": {"type": "boolean", "default": true},
|
||||||
|
"withTooltip": {"type": "boolean", "default": true},
|
||||||
|
"labelsPosition": {"type": "string", "enum": ["inside", "outside"], "default": "outside"},
|
||||||
|
"labelsType": {"type": "string", "enum": ["value", "percent"], "default": "value"},
|
||||||
|
"strokeWidth": {"type": "number", "default": 1},
|
||||||
|
"strokeColor": {"type": "string"},
|
||||||
|
"startAngle": {"type": "number", "default": 0},
|
||||||
|
"endAngle": {"type": "number", "default": 360}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"DonutChart": {
|
||||||
|
"description": "Donut chart (pie with hole)",
|
||||||
|
"props": {
|
||||||
|
"data": {"type": "array", "required": true},
|
||||||
|
"h": {"type": "any"},
|
||||||
|
"w": {"type": "any"},
|
||||||
|
"withLabels": {"type": "boolean", "default": false},
|
||||||
|
"withLabelsLine": {"type": "boolean", "default": true},
|
||||||
|
"withTooltip": {"type": "boolean", "default": true},
|
||||||
|
"thickness": {"type": "number", "default": 20},
|
||||||
|
"chartLabel": {"type": "any"},
|
||||||
|
"strokeWidth": {"type": "number", "default": 1},
|
||||||
|
"strokeColor": {"type": "string"},
|
||||||
|
"startAngle": {"type": "number", "default": 0},
|
||||||
|
"endAngle": {"type": "number", "default": 360},
|
||||||
|
"paddingAngle": {"type": "number", "default": 0}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"DatePicker": {
|
||||||
|
"description": "Date picker calendar",
|
||||||
|
"props": {
|
||||||
|
"value": {"type": "string"},
|
||||||
|
"type": {"type": "string", "enum": ["default", "range", "multiple"], "default": "default"},
|
||||||
|
"defaultValue": {"type": "any"},
|
||||||
|
"allowDeselect": {"type": "boolean", "default": false},
|
||||||
|
"allowSingleDateInRange": {"type": "boolean", "default": false},
|
||||||
|
"numberOfColumns": {"type": "integer", "default": 1},
|
||||||
|
"columnsToScroll": {"type": "integer", "default": 1},
|
||||||
|
"ariaLabels": {"type": "object"},
|
||||||
|
"hideOutsideDates": {"type": "boolean", "default": false},
|
||||||
|
"hideWeekdays": {"type": "boolean", "default": false},
|
||||||
|
"weekendDays": {"type": "array", "default": [0, 6]},
|
||||||
|
"renderDay": {"type": "any"},
|
||||||
|
"minDate": {"type": "string"},
|
||||||
|
"maxDate": {"type": "string"},
|
||||||
|
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"DatePickerInput": {
|
||||||
|
"description": "Date picker input field",
|
||||||
|
"props": {
|
||||||
|
"value": {"type": "string"},
|
||||||
|
"label": {"type": "any"},
|
||||||
|
"description": {"type": "any"},
|
||||||
|
"error": {"type": "any"},
|
||||||
|
"placeholder": {"type": "string"},
|
||||||
|
"clearable": {"type": "boolean", "default": false},
|
||||||
|
"type": {"type": "string", "enum": ["default", "range", "multiple"], "default": "default"},
|
||||||
|
"valueFormat": {"type": "string", "default": "MMMM D, YYYY"},
|
||||||
|
"size": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
|
||||||
|
"radius": {"type": "string", "enum": ["xs", "sm", "md", "lg", "xl"], "default": "sm"},
|
||||||
|
"disabled": {"type": "boolean", "default": false},
|
||||||
|
"required": {"type": "boolean", "default": false},
|
||||||
|
"minDate": {"type": "string"},
|
||||||
|
"maxDate": {"type": "string"},
|
||||||
|
"popoverProps": {"type": "object"},
|
||||||
|
"dropdownType": {"type": "string", "enum": ["popover", "modal"], "default": "popover"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"DatesProvider": {
|
||||||
|
"description": "Provider for date localization settings",
|
||||||
|
"props": {
|
||||||
|
"children": {"type": "any", "required": true},
|
||||||
|
"settings": {"type": "object", "description": "Locale and formatting settings"}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
16
mcp-servers/viz-platform/requirements.txt
Normal file
16
mcp-servers/viz-platform/requirements.txt
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# MCP SDK
|
||||||
|
mcp>=0.9.0
|
||||||
|
|
||||||
|
# Visualization
|
||||||
|
plotly>=5.18.0
|
||||||
|
dash>=2.14.0
|
||||||
|
dash-mantine-components>=2.0.0
|
||||||
|
kaleido>=0.2.1 # For chart export (PNG, SVG, PDF)
|
||||||
|
|
||||||
|
# Utilities
|
||||||
|
python-dotenv>=1.0.0
|
||||||
|
pydantic>=2.5.0
|
||||||
|
|
||||||
|
# Testing
|
||||||
|
pytest>=7.4.3
|
||||||
|
pytest-asyncio>=0.23.0
|
||||||
21
mcp-servers/viz-platform/run.sh
Executable file
21
mcp-servers/viz-platform/run.sh
Executable file
@@ -0,0 +1,21 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Capture original working directory before any cd operations
|
||||||
|
# This should be the user's project directory when launched by Claude Code
|
||||||
|
export CLAUDE_PROJECT_DIR="${CLAUDE_PROJECT_DIR:-$PWD}"
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
CACHE_VENV="$HOME/.cache/claude-mcp-venvs/leo-claude-mktplace/viz-platform/.venv"
|
||||||
|
LOCAL_VENV="$SCRIPT_DIR/.venv"
|
||||||
|
|
||||||
|
if [[ -f "$CACHE_VENV/bin/python" ]]; then
|
||||||
|
PYTHON="$CACHE_VENV/bin/python"
|
||||||
|
elif [[ -f "$LOCAL_VENV/bin/python" ]]; then
|
||||||
|
PYTHON="$LOCAL_VENV/bin/python"
|
||||||
|
else
|
||||||
|
echo "ERROR: No venv found. Run: ./scripts/setup-venvs.sh" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd "$SCRIPT_DIR"
|
||||||
|
export PYTHONPATH="$SCRIPT_DIR"
|
||||||
|
exec "$PYTHON" -m mcp_server.server "$@"
|
||||||
262
mcp-servers/viz-platform/scripts/generate-dmc-registry.py
Normal file
262
mcp-servers/viz-platform/scripts/generate-dmc-registry.py
Normal file
@@ -0,0 +1,262 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Generate DMC Component Registry from installed dash-mantine-components package.
|
||||||
|
|
||||||
|
This script introspects the installed DMC package and generates a JSON registry
|
||||||
|
file containing component definitions, props, types, and defaults.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python generate-dmc-registry.py [--output registry/dmc_X_Y.json]
|
||||||
|
|
||||||
|
Requirements:
|
||||||
|
- dash-mantine-components must be installed
|
||||||
|
- Run from the mcp-servers/viz-platform directory
|
||||||
|
"""
|
||||||
|
import argparse
|
||||||
|
import inspect
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
|
from datetime import date
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict, List, Optional, get_type_hints
|
||||||
|
|
||||||
|
|
||||||
|
def get_dmc_version() -> Optional[str]:
|
||||||
|
"""Get installed DMC version."""
|
||||||
|
try:
|
||||||
|
from importlib.metadata import version
|
||||||
|
return version('dash-mantine-components')
|
||||||
|
except Exception:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def get_component_categories() -> Dict[str, List[str]]:
|
||||||
|
"""Define component categories."""
|
||||||
|
return {
|
||||||
|
"buttons": ["Button", "ActionIcon", "CopyButton", "FileButton", "UnstyledButton"],
|
||||||
|
"inputs": [
|
||||||
|
"TextInput", "PasswordInput", "NumberInput", "Textarea",
|
||||||
|
"Select", "MultiSelect", "Checkbox", "Switch", "Radio",
|
||||||
|
"Slider", "RangeSlider", "ColorInput", "ColorPicker",
|
||||||
|
"DateInput", "DatePicker", "TimeInput"
|
||||||
|
],
|
||||||
|
"navigation": ["Anchor", "Breadcrumbs", "Burger", "NavLink", "Pagination", "Stepper", "Tabs"],
|
||||||
|
"feedback": ["Alert", "Loader", "Notification", "Progress", "RingProgress", "Skeleton"],
|
||||||
|
"overlays": ["Dialog", "Drawer", "HoverCard", "Menu", "Modal", "Popover", "Tooltip"],
|
||||||
|
"typography": ["Blockquote", "Code", "Highlight", "Mark", "Text", "Title"],
|
||||||
|
"layout": [
|
||||||
|
"AppShell", "AspectRatio", "Center", "Container", "Flex",
|
||||||
|
"Grid", "Group", "Paper", "SimpleGrid", "Space", "Stack"
|
||||||
|
],
|
||||||
|
"data": [
|
||||||
|
"Accordion", "Avatar", "Badge", "Card", "Image",
|
||||||
|
"Indicator", "Kbd", "Spoiler", "Table", "ThemeIcon", "Timeline"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def extract_prop_type(prop_info: Dict[str, Any]) -> Dict[str, Any]:
|
||||||
|
"""Extract prop type information from Dash component prop."""
|
||||||
|
result = {"type": "any"}
|
||||||
|
|
||||||
|
if 'type' not in prop_info:
|
||||||
|
return result
|
||||||
|
|
||||||
|
prop_type = prop_info['type']
|
||||||
|
|
||||||
|
if isinstance(prop_type, dict):
|
||||||
|
type_name = prop_type.get('name', 'any')
|
||||||
|
|
||||||
|
# Map Dash types to JSON schema types
|
||||||
|
type_mapping = {
|
||||||
|
'string': 'string',
|
||||||
|
'number': 'number',
|
||||||
|
'bool': 'boolean',
|
||||||
|
'boolean': 'boolean',
|
||||||
|
'array': 'array',
|
||||||
|
'object': 'object',
|
||||||
|
'node': 'any',
|
||||||
|
'element': 'any',
|
||||||
|
'any': 'any',
|
||||||
|
'func': 'any',
|
||||||
|
}
|
||||||
|
|
||||||
|
result['type'] = type_mapping.get(type_name, 'any')
|
||||||
|
|
||||||
|
# Handle enums
|
||||||
|
if type_name == 'enum' and 'value' in prop_type:
|
||||||
|
values = prop_type['value']
|
||||||
|
if isinstance(values, list):
|
||||||
|
enum_values = []
|
||||||
|
for v in values:
|
||||||
|
if isinstance(v, dict) and 'value' in v:
|
||||||
|
# Remove quotes from string values
|
||||||
|
val = v['value'].strip("'\"")
|
||||||
|
enum_values.append(val)
|
||||||
|
elif isinstance(v, str):
|
||||||
|
enum_values.append(v.strip("'\""))
|
||||||
|
if enum_values:
|
||||||
|
result['enum'] = enum_values
|
||||||
|
result['type'] = 'string'
|
||||||
|
|
||||||
|
# Handle union types
|
||||||
|
elif type_name == 'union' and 'value' in prop_type:
|
||||||
|
# For unions, just mark as any for simplicity
|
||||||
|
result['type'] = 'any'
|
||||||
|
|
||||||
|
elif isinstance(prop_type, str):
|
||||||
|
result['type'] = prop_type
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def extract_component_props(component_class) -> Dict[str, Any]:
|
||||||
|
"""Extract props from a Dash component class."""
|
||||||
|
props = {}
|
||||||
|
|
||||||
|
# Try to get _prop_names or similar
|
||||||
|
if hasattr(component_class, '_prop_names'):
|
||||||
|
prop_names = component_class._prop_names
|
||||||
|
else:
|
||||||
|
prop_names = []
|
||||||
|
|
||||||
|
# Try to get _type attribute for prop definitions
|
||||||
|
if hasattr(component_class, '_type'):
|
||||||
|
prop_types = getattr(component_class, '_type', {})
|
||||||
|
else:
|
||||||
|
prop_types = {}
|
||||||
|
|
||||||
|
# Get default values
|
||||||
|
if hasattr(component_class, '_default_props'):
|
||||||
|
defaults = component_class._default_props
|
||||||
|
else:
|
||||||
|
defaults = {}
|
||||||
|
|
||||||
|
# Try to extract from _prop_descriptions
|
||||||
|
if hasattr(component_class, '_prop_descriptions'):
|
||||||
|
descriptions = component_class._prop_descriptions
|
||||||
|
else:
|
||||||
|
descriptions = {}
|
||||||
|
|
||||||
|
for prop_name in prop_names:
|
||||||
|
if prop_name.startswith('_'):
|
||||||
|
continue
|
||||||
|
|
||||||
|
prop_info = {}
|
||||||
|
|
||||||
|
# Get type info if available
|
||||||
|
if prop_name in prop_types:
|
||||||
|
prop_info = extract_prop_type({'type': prop_types[prop_name]})
|
||||||
|
else:
|
||||||
|
prop_info = {'type': 'any'}
|
||||||
|
|
||||||
|
# Add default if exists
|
||||||
|
if prop_name in defaults:
|
||||||
|
prop_info['default'] = defaults[prop_name]
|
||||||
|
|
||||||
|
# Add description if exists
|
||||||
|
if prop_name in descriptions:
|
||||||
|
prop_info['description'] = descriptions[prop_name]
|
||||||
|
|
||||||
|
props[prop_name] = prop_info
|
||||||
|
|
||||||
|
return props
|
||||||
|
|
||||||
|
|
||||||
|
def generate_registry() -> Dict[str, Any]:
|
||||||
|
"""Generate the component registry from installed DMC."""
|
||||||
|
try:
|
||||||
|
import dash_mantine_components as dmc
|
||||||
|
except ImportError:
|
||||||
|
print("ERROR: dash-mantine-components not installed")
|
||||||
|
print("Install with: pip install dash-mantine-components")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
version = get_dmc_version()
|
||||||
|
categories = get_component_categories()
|
||||||
|
|
||||||
|
registry = {
|
||||||
|
"version": version,
|
||||||
|
"generated": date.today().isoformat(),
|
||||||
|
"categories": categories,
|
||||||
|
"components": {}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get all components from categories
|
||||||
|
all_components = set()
|
||||||
|
for comp_list in categories.values():
|
||||||
|
all_components.update(comp_list)
|
||||||
|
|
||||||
|
# Extract props for each component
|
||||||
|
for comp_name in sorted(all_components):
|
||||||
|
if hasattr(dmc, comp_name):
|
||||||
|
comp_class = getattr(dmc, comp_name)
|
||||||
|
try:
|
||||||
|
props = extract_component_props(comp_class)
|
||||||
|
if props:
|
||||||
|
registry["components"][comp_name] = {
|
||||||
|
"description": comp_class.__doc__ or f"{comp_name} component",
|
||||||
|
"props": props
|
||||||
|
}
|
||||||
|
print(f" Extracted: {comp_name} ({len(props)} props)")
|
||||||
|
except Exception as e:
|
||||||
|
print(f" Warning: Failed to extract {comp_name}: {e}")
|
||||||
|
else:
|
||||||
|
print(f" Warning: Component not found: {comp_name}")
|
||||||
|
|
||||||
|
return registry
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Generate DMC component registry from installed package"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--output', '-o',
|
||||||
|
type=str,
|
||||||
|
help='Output file path (default: auto-generated based on version)'
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--dry-run',
|
||||||
|
action='store_true',
|
||||||
|
help='Print to stdout instead of writing file'
|
||||||
|
)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
print("Generating DMC Component Registry...")
|
||||||
|
print("=" * 50)
|
||||||
|
|
||||||
|
registry = generate_registry()
|
||||||
|
|
||||||
|
print("=" * 50)
|
||||||
|
print(f"Generated registry for DMC {registry['version']}")
|
||||||
|
print(f"Total components: {len(registry['components'])}")
|
||||||
|
|
||||||
|
if args.dry_run:
|
||||||
|
print(json.dumps(registry, indent=2))
|
||||||
|
return
|
||||||
|
|
||||||
|
# Determine output path
|
||||||
|
if args.output:
|
||||||
|
output_path = Path(args.output)
|
||||||
|
else:
|
||||||
|
version = registry['version']
|
||||||
|
if version:
|
||||||
|
major_minor = '_'.join(version.split('.')[:2])
|
||||||
|
output_path = Path(__file__).parent.parent / 'registry' / f'dmc_{major_minor}.json'
|
||||||
|
else:
|
||||||
|
output_path = Path(__file__).parent.parent / 'registry' / 'dmc_unknown.json'
|
||||||
|
|
||||||
|
# Create directory if needed
|
||||||
|
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# Write registry
|
||||||
|
with open(output_path, 'w') as f:
|
||||||
|
json.dump(registry, indent=2, fp=f)
|
||||||
|
|
||||||
|
print(f"Registry written to: {output_path}")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
1
mcp-servers/viz-platform/tests/__init__.py
Normal file
1
mcp-servers/viz-platform/tests/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
"""viz-platform MCP Server tests."""
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user