From 9698e8724d79a975810a515c54d96a0f0c983914 Mon Sep 17 00:00:00 2001 From: lmiranda Date: Wed, 28 Jan 2026 12:02:26 -0500 Subject: [PATCH 1/2] feat(plugins): implement Sprint 4 commands (#241-#258) Sprint 4 - Plugin Commands implementation adding 18 new user-facing commands across 8 plugins as part of V5.2.0 Plugin Enhancements. **projman:** - #241: /sprint-diagram - Mermaid visualization of sprint issues **pr-review:** - #242: Confidence threshold config (PR_REVIEW_CONFIDENCE_THRESHOLD) - #243: /pr-diff - Formatted diff with inline review comments **data-platform:** - #244: /data-quality - DataFrame quality checks (nulls, duplicates, outliers) - #245: /lineage-viz - dbt lineage as Mermaid diagrams - #246: /dbt-test - Formatted dbt test runner **viz-platform:** - #247: /chart-export - Export charts to PNG/SVG/PDF via kaleido - #248: /accessibility-check - Color blind validation (WCAG contrast) - #249: /breakpoints - Responsive layout configuration **contract-validator:** - #250: /dependency-graph - Plugin dependency visualization **doc-guardian:** - #251: /changelog-gen - Generate changelog from conventional commits - #252: /doc-coverage - Documentation coverage metrics - #253: /stale-docs - Flag outdated documentation **claude-config-maintainer:** - #254: /config-diff - Track CLAUDE.md changes over time - #255: /config-lint - 31 lint rules for CLAUDE.md best practices **cmdb-assistant:** - #256: /cmdb-topology - Infrastructure topology diagrams - #257: /change-audit - NetBox audit trail queries - #258: /ip-conflicts - Detect IP conflicts and overlaps Closes #241, #242, #243, #244, #245, #246, #247, #248, #249, #250, #251, #252, #253, #254, #255, #256, #257, #258 Co-Authored-By: Claude Opus 4.5 --- .../mcp_server/accessibility_tools.py | 479 ++++++++++++++++++ .../viz-platform/mcp_server/chart_tools.py | 136 +++++ .../viz-platform/mcp_server/layout_tools.py | 186 +++++++ mcp-servers/viz-platform/mcp_server/server.py | 227 +++++++++ mcp-servers/viz-platform/requirements.txt | 1 + .../tests/test_accessibility_tools.py | 195 +++++++ plugins/claude-config-maintainer/README.md | 27 + .../commands/config-diff.md | 239 +++++++++ .../commands/config-lint.md | 334 ++++++++++++ .../cmdb-assistant/.claude-plugin/plugin.json | 2 +- plugins/cmdb-assistant/README.md | 18 +- .../cmdb-assistant/commands/change-audit.md | 163 ++++++ .../cmdb-assistant/commands/cmdb-topology.md | 182 +++++++ .../cmdb-assistant/commands/ip-conflicts.md | 226 +++++++++ plugins/contract-validator/README.md | 12 + .../commands/dependency-graph.md | 251 +++++++++ plugins/data-platform/README.md | 5 +- .../data-platform/commands/data-quality.md | 103 ++++ plugins/data-platform/commands/dbt-test.md | 119 +++++ plugins/data-platform/commands/lineage-viz.md | 125 +++++ plugins/doc-guardian/README.md | 5 + plugins/doc-guardian/claude-md-integration.md | 3 + .../doc-guardian/commands/changelog-gen.md | 109 ++++ plugins/doc-guardian/commands/doc-coverage.md | 128 +++++ plugins/doc-guardian/commands/stale-docs.md | 143 ++++++ plugins/pr-review/README.md | 29 +- plugins/pr-review/agents/coordinator.md | 11 +- plugins/pr-review/claude-md-integration.md | 14 +- plugins/pr-review/commands/pr-diff.md | 154 ++++++ plugins/pr-review/commands/pr-review.md | 8 +- .../review-patterns/confidence-scoring.md | 6 +- plugins/projman/commands/sprint-diagram.md | 180 +++++++ plugins/viz-platform/README.md | 46 +- .../commands/accessibility-check.md | 144 ++++++ plugins/viz-platform/commands/breakpoints.md | 193 +++++++ plugins/viz-platform/commands/chart-export.md | 114 +++++ 36 files changed, 4295 insertions(+), 22 deletions(-) create mode 100644 mcp-servers/viz-platform/mcp_server/accessibility_tools.py create mode 100644 mcp-servers/viz-platform/tests/test_accessibility_tools.py create mode 100644 plugins/claude-config-maintainer/commands/config-diff.md create mode 100644 plugins/claude-config-maintainer/commands/config-lint.md create mode 100644 plugins/cmdb-assistant/commands/change-audit.md create mode 100644 plugins/cmdb-assistant/commands/cmdb-topology.md create mode 100644 plugins/cmdb-assistant/commands/ip-conflicts.md create mode 100644 plugins/contract-validator/commands/dependency-graph.md create mode 100644 plugins/data-platform/commands/data-quality.md create mode 100644 plugins/data-platform/commands/dbt-test.md create mode 100644 plugins/data-platform/commands/lineage-viz.md create mode 100644 plugins/doc-guardian/commands/changelog-gen.md create mode 100644 plugins/doc-guardian/commands/doc-coverage.md create mode 100644 plugins/doc-guardian/commands/stale-docs.md create mode 100644 plugins/pr-review/commands/pr-diff.md create mode 100644 plugins/projman/commands/sprint-diagram.md create mode 100644 plugins/viz-platform/commands/accessibility-check.md create mode 100644 plugins/viz-platform/commands/breakpoints.md create mode 100644 plugins/viz-platform/commands/chart-export.md diff --git a/mcp-servers/viz-platform/mcp_server/accessibility_tools.py b/mcp-servers/viz-platform/mcp_server/accessibility_tools.py new file mode 100644 index 0000000..e523361 --- /dev/null +++ b/mcp-servers/viz-platform/mcp_server/accessibility_tools.py @@ -0,0 +1,479 @@ +""" +Accessibility validation tools for color blindness and WCAG compliance. + +Provides tools for validating color palettes against color blindness +simulations and WCAG contrast requirements. +""" +import logging +import math +from typing import Dict, List, Optional, Any, Tuple + +logger = logging.getLogger(__name__) + + +# Color-blind safe palettes +SAFE_PALETTES = { + "categorical": { + "name": "Paul Tol's Qualitative", + "colors": ["#4477AA", "#EE6677", "#228833", "#CCBB44", "#66CCEE", "#AA3377", "#BBBBBB"], + "description": "Distinguishable for all types of color blindness" + }, + "ibm": { + "name": "IBM Design", + "colors": ["#648FFF", "#785EF0", "#DC267F", "#FE6100", "#FFB000"], + "description": "IBM's accessible color palette" + }, + "okabe_ito": { + "name": "Okabe-Ito", + "colors": ["#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#000000"], + "description": "Optimized for all color vision deficiencies" + }, + "tableau_colorblind": { + "name": "Tableau Colorblind 10", + "colors": ["#006BA4", "#FF800E", "#ABABAB", "#595959", "#5F9ED1", + "#C85200", "#898989", "#A2C8EC", "#FFBC79", "#CFCFCF"], + "description": "Industry-standard accessible palette" + } +} + + +# Simulation matrices for color blindness (LMS color space transformation) +# These approximate how colors appear to people with different types of color blindness +SIMULATION_MATRICES = { + "deuteranopia": { + # Green-blind (most common) + "severity": "common", + "population": "6% males, 0.4% females", + "description": "Difficulty distinguishing red from green (green-blind)", + "matrix": [ + [0.625, 0.375, 0.0], + [0.700, 0.300, 0.0], + [0.0, 0.300, 0.700] + ] + }, + "protanopia": { + # Red-blind + "severity": "common", + "population": "2.5% males, 0.05% females", + "description": "Difficulty distinguishing red from green (red-blind)", + "matrix": [ + [0.567, 0.433, 0.0], + [0.558, 0.442, 0.0], + [0.0, 0.242, 0.758] + ] + }, + "tritanopia": { + # Blue-blind (rare) + "severity": "rare", + "population": "0.01% total", + "description": "Difficulty distinguishing blue from yellow", + "matrix": [ + [0.950, 0.050, 0.0], + [0.0, 0.433, 0.567], + [0.0, 0.475, 0.525] + ] + } +} + + +class AccessibilityTools: + """ + Color accessibility validation tools. + + Validates colors for WCAG compliance and color blindness accessibility. + """ + + def __init__(self, theme_store=None): + """ + Initialize accessibility tools. + + Args: + theme_store: Optional ThemeStore for theme color extraction + """ + self.theme_store = theme_store + + def _hex_to_rgb(self, hex_color: str) -> Tuple[int, int, int]: + """Convert hex color to RGB tuple.""" + hex_color = hex_color.lstrip('#') + if len(hex_color) == 3: + hex_color = ''.join([c * 2 for c in hex_color]) + return tuple(int(hex_color[i:i+2], 16) for i in (0, 2, 4)) + + def _rgb_to_hex(self, rgb: Tuple[int, int, int]) -> str: + """Convert RGB tuple to hex color.""" + return '#{:02x}{:02x}{:02x}'.format( + max(0, min(255, int(rgb[0]))), + max(0, min(255, int(rgb[1]))), + max(0, min(255, int(rgb[2]))) + ) + + def _get_relative_luminance(self, rgb: Tuple[int, int, int]) -> float: + """ + Calculate relative luminance per WCAG 2.1. + + https://www.w3.org/WAI/GL/wiki/Relative_luminance + """ + def channel_luminance(value: int) -> float: + v = value / 255 + return v / 12.92 if v <= 0.03928 else ((v + 0.055) / 1.055) ** 2.4 + + r, g, b = rgb + return ( + 0.2126 * channel_luminance(r) + + 0.7152 * channel_luminance(g) + + 0.0722 * channel_luminance(b) + ) + + def _get_contrast_ratio(self, color1: str, color2: str) -> float: + """ + Calculate contrast ratio between two colors per WCAG 2.1. + + Returns ratio between 1:1 and 21:1. + """ + rgb1 = self._hex_to_rgb(color1) + rgb2 = self._hex_to_rgb(color2) + + l1 = self._get_relative_luminance(rgb1) + l2 = self._get_relative_luminance(rgb2) + + lighter = max(l1, l2) + darker = min(l1, l2) + + return (lighter + 0.05) / (darker + 0.05) + + def _simulate_color_blindness( + self, + hex_color: str, + deficiency_type: str + ) -> str: + """ + Simulate how a color appears with a specific color blindness type. + + Uses linear RGB transformation approximation. + """ + if deficiency_type not in SIMULATION_MATRICES: + return hex_color + + rgb = self._hex_to_rgb(hex_color) + matrix = SIMULATION_MATRICES[deficiency_type]["matrix"] + + # Apply transformation matrix + r = rgb[0] * matrix[0][0] + rgb[1] * matrix[0][1] + rgb[2] * matrix[0][2] + g = rgb[0] * matrix[1][0] + rgb[1] * matrix[1][1] + rgb[2] * matrix[1][2] + b = rgb[0] * matrix[2][0] + rgb[1] * matrix[2][1] + rgb[2] * matrix[2][2] + + return self._rgb_to_hex((r, g, b)) + + def _get_color_distance(self, color1: str, color2: str) -> float: + """ + Calculate perceptual color distance (CIE76 approximation). + + Returns a value where < 20 means colors may be hard to distinguish. + """ + rgb1 = self._hex_to_rgb(color1) + rgb2 = self._hex_to_rgb(color2) + + # Simple Euclidean distance in RGB space (approximation) + # For production, should use CIEDE2000 + return math.sqrt( + (rgb1[0] - rgb2[0]) ** 2 + + (rgb1[1] - rgb2[1]) ** 2 + + (rgb1[2] - rgb2[2]) ** 2 + ) + + async def accessibility_validate_colors( + self, + colors: List[str], + check_types: Optional[List[str]] = None, + min_contrast_ratio: float = 4.5 + ) -> Dict[str, Any]: + """ + Validate a list of colors for accessibility. + + Args: + colors: List of hex colors to validate + check_types: Color blindness types to check (default: all) + min_contrast_ratio: Minimum WCAG contrast ratio (default: 4.5 for AA) + + Returns: + Dict with: + - issues: List of accessibility issues found + - simulations: How colors appear under each deficiency + - recommendations: Suggestions for improvement + - safe_palettes: Color-blind safe palette suggestions + """ + check_types = check_types or list(SIMULATION_MATRICES.keys()) + issues = [] + simulations = {} + + # Normalize colors + normalized_colors = [c.upper() if c.startswith('#') else f'#{c.upper()}' for c in colors] + + # Simulate each color blindness type + for deficiency in check_types: + if deficiency not in SIMULATION_MATRICES: + continue + + simulated = [self._simulate_color_blindness(c, deficiency) for c in normalized_colors] + simulations[deficiency] = { + "original": normalized_colors, + "simulated": simulated, + "info": SIMULATION_MATRICES[deficiency] + } + + # Check if any color pairs become indistinguishable + for i in range(len(normalized_colors)): + for j in range(i + 1, len(normalized_colors)): + distance = self._get_color_distance(simulated[i], simulated[j]) + if distance < 30: # Threshold for distinguishability + issues.append({ + "type": "distinguishability", + "severity": "warning" if distance > 15 else "error", + "colors": [normalized_colors[i], normalized_colors[j]], + "affected_by": [deficiency], + "simulated_colors": [simulated[i], simulated[j]], + "distance": round(distance, 1), + "message": f"Colors may be hard to distinguish for {deficiency} ({SIMULATION_MATRICES[deficiency]['description']})" + }) + + # Check contrast ratios against white and black backgrounds + for color in normalized_colors: + white_contrast = self._get_contrast_ratio(color, "#FFFFFF") + black_contrast = self._get_contrast_ratio(color, "#000000") + + if white_contrast < min_contrast_ratio and black_contrast < min_contrast_ratio: + issues.append({ + "type": "contrast_ratio", + "severity": "error", + "colors": [color], + "white_contrast": round(white_contrast, 2), + "black_contrast": round(black_contrast, 2), + "required": min_contrast_ratio, + "message": f"Insufficient contrast against both white ({white_contrast:.1f}:1) and black ({black_contrast:.1f}:1) backgrounds" + }) + + # Generate recommendations + recommendations = self._generate_recommendations(issues) + + # Calculate overall score + error_count = sum(1 for i in issues if i["severity"] == "error") + warning_count = sum(1 for i in issues if i["severity"] == "warning") + + if error_count == 0 and warning_count == 0: + score = "A" + elif error_count == 0 and warning_count <= 2: + score = "B" + elif error_count <= 2: + score = "C" + else: + score = "D" + + return { + "colors_checked": normalized_colors, + "overall_score": score, + "issue_count": len(issues), + "issues": issues, + "simulations": simulations, + "recommendations": recommendations, + "safe_palettes": SAFE_PALETTES + } + + async def accessibility_validate_theme( + self, + theme_name: str + ) -> Dict[str, Any]: + """ + Validate a theme's colors for accessibility. + + Args: + theme_name: Theme name to validate + + Returns: + Dict with accessibility validation results + """ + if not self.theme_store: + return { + "error": "Theme store not configured", + "theme_name": theme_name + } + + theme = self.theme_store.get_theme(theme_name) + if not theme: + available = self.theme_store.list_themes() + return { + "error": f"Theme '{theme_name}' not found. Available: {available}", + "theme_name": theme_name + } + + # Extract colors from theme + colors = [] + tokens = theme.get("tokens", {}) + color_tokens = tokens.get("colors", {}) + + def extract_colors(obj, prefix=""): + """Recursively extract color values.""" + if isinstance(obj, str) and (obj.startswith('#') or len(obj) == 6): + colors.append(obj if obj.startswith('#') else f'#{obj}') + elif isinstance(obj, dict): + for key, value in obj.items(): + extract_colors(value, f"{prefix}.{key}") + elif isinstance(obj, list): + for item in obj: + extract_colors(item, prefix) + + extract_colors(color_tokens) + + # Validate extracted colors + result = await self.accessibility_validate_colors(colors) + result["theme_name"] = theme_name + + # Add theme-specific checks + primary = color_tokens.get("primary") + background = color_tokens.get("background", {}) + text = color_tokens.get("text", {}) + + if primary and background: + bg_color = background.get("base") if isinstance(background, dict) else background + if bg_color: + contrast = self._get_contrast_ratio(primary, bg_color) + if contrast < 4.5: + result["issues"].append({ + "type": "primary_contrast", + "severity": "error", + "colors": [primary, bg_color], + "ratio": round(contrast, 2), + "required": 4.5, + "message": f"Primary color has insufficient contrast ({contrast:.1f}:1) against background" + }) + + return result + + async def accessibility_suggest_alternative( + self, + color: str, + deficiency_type: str + ) -> Dict[str, Any]: + """ + Suggest accessible alternative colors. + + Args: + color: Original hex color + deficiency_type: Type of color blindness to optimize for + + Returns: + Dict with alternative color suggestions + """ + rgb = self._hex_to_rgb(color) + + suggestions = [] + + # Suggest shifting hue while maintaining saturation and brightness + # For red-green deficiency, shift toward blue or yellow + if deficiency_type in ["deuteranopia", "protanopia"]: + # Shift toward blue + blue_shift = self._rgb_to_hex(( + max(0, rgb[0] - 50), + max(0, rgb[1] - 30), + min(255, rgb[2] + 80) + )) + suggestions.append({ + "color": blue_shift, + "description": "Blue-shifted alternative", + "preserves": "approximate brightness" + }) + + # Shift toward yellow/orange + yellow_shift = self._rgb_to_hex(( + min(255, rgb[0] + 50), + min(255, rgb[1] + 30), + max(0, rgb[2] - 80) + )) + suggestions.append({ + "color": yellow_shift, + "description": "Yellow-shifted alternative", + "preserves": "approximate brightness" + }) + + elif deficiency_type == "tritanopia": + # For blue-yellow deficiency, shift toward red or green + red_shift = self._rgb_to_hex(( + min(255, rgb[0] + 60), + max(0, rgb[1] - 20), + max(0, rgb[2] - 40) + )) + suggestions.append({ + "color": red_shift, + "description": "Red-shifted alternative", + "preserves": "approximate brightness" + }) + + # Add safe palette suggestions + for palette_name, palette in SAFE_PALETTES.items(): + # Find closest color in safe palette + min_distance = float('inf') + closest = None + for safe_color in palette["colors"]: + distance = self._get_color_distance(color, safe_color) + if distance < min_distance: + min_distance = distance + closest = safe_color + + if closest: + suggestions.append({ + "color": closest, + "description": f"From {palette['name']} palette", + "palette": palette_name + }) + + return { + "original_color": color, + "deficiency_type": deficiency_type, + "suggestions": suggestions[:5] # Limit to 5 suggestions + } + + def _generate_recommendations(self, issues: List[Dict[str, Any]]) -> List[str]: + """Generate actionable recommendations based on issues.""" + recommendations = [] + + # Check for distinguishability issues + distinguishability_issues = [i for i in issues if i["type"] == "distinguishability"] + if distinguishability_issues: + affected_types = set() + for issue in distinguishability_issues: + affected_types.update(issue.get("affected_by", [])) + + if "deuteranopia" in affected_types or "protanopia" in affected_types: + recommendations.append( + "Avoid using red and green as the only differentiators - " + "add patterns, shapes, or labels" + ) + + recommendations.append( + "Consider using a color-blind safe palette like Okabe-Ito or IBM Design" + ) + + # Check for contrast issues + contrast_issues = [i for i in issues if i["type"] in ["contrast_ratio", "primary_contrast"]] + if contrast_issues: + recommendations.append( + "Increase contrast by darkening colors for light backgrounds " + "or lightening for dark backgrounds" + ) + recommendations.append( + "Use WCAG contrast checker tools to verify text readability" + ) + + # General recommendations + if len(issues) > 0: + recommendations.append( + "Add secondary visual cues (icons, patterns, labels) " + "to not rely solely on color" + ) + + if not recommendations: + recommendations.append( + "Color palette appears accessible! Consider adding patterns " + "for additional distinguishability" + ) + + return recommendations diff --git a/mcp-servers/viz-platform/mcp_server/chart_tools.py b/mcp-servers/viz-platform/mcp_server/chart_tools.py index 4ba81d6..eab00bc 100644 --- a/mcp-servers/viz-platform/mcp_server/chart_tools.py +++ b/mcp-servers/viz-platform/mcp_server/chart_tools.py @@ -3,11 +3,21 @@ Chart creation tools using Plotly. Provides tools for creating data visualizations with automatic theme integration. """ +import base64 import logging +import os from typing import Dict, List, Optional, Any, Union logger = logging.getLogger(__name__) +# Check for kaleido availability +KALEIDO_AVAILABLE = False +try: + import kaleido + KALEIDO_AVAILABLE = True +except ImportError: + logger.debug("kaleido not installed - chart export will be unavailable") + # Default color palette based on Mantine theme DEFAULT_COLORS = [ @@ -395,3 +405,129 @@ class ChartTools: "figure": figure, "interactions_added": [] } + + async def chart_export( + self, + figure: Dict[str, Any], + format: str = "png", + width: Optional[int] = None, + height: Optional[int] = None, + scale: float = 2.0, + output_path: Optional[str] = None + ) -> Dict[str, Any]: + """ + Export a Plotly chart to a static image format. + + Args: + figure: Plotly figure JSON to export + format: Output format - png, svg, or pdf + width: Image width in pixels (default: from figure or 1200) + height: Image height in pixels (default: from figure or 800) + scale: Resolution scale factor (default: 2 for retina) + output_path: Optional file path to save the image + + Returns: + Dict with: + - image_data: Base64-encoded image (if no output_path) + - file_path: Path to saved file (if output_path provided) + - format: Export format used + - dimensions: {width, height, scale} + - error: Error message if export failed + """ + # Validate format + valid_formats = ['png', 'svg', 'pdf'] + format = format.lower() + if format not in valid_formats: + return { + "error": f"Invalid format '{format}'. Must be one of: {valid_formats}", + "format": format, + "image_data": None + } + + # Check kaleido availability + if not KALEIDO_AVAILABLE: + return { + "error": "kaleido package not installed. Install with: pip install kaleido", + "format": format, + "image_data": None, + "install_hint": "pip install kaleido" + } + + # Validate figure + if not figure or 'data' not in figure: + return { + "error": "Invalid figure: must contain 'data' key", + "format": format, + "image_data": None + } + + try: + import plotly.graph_objects as go + import plotly.io as pio + + # Create Plotly figure object + fig = go.Figure(figure) + + # Determine dimensions + layout = figure.get('layout', {}) + export_width = width or layout.get('width') or 1200 + export_height = height or layout.get('height') or 800 + + # Export to bytes + image_bytes = pio.to_image( + fig, + format=format, + width=export_width, + height=export_height, + scale=scale + ) + + result = { + "format": format, + "dimensions": { + "width": export_width, + "height": export_height, + "scale": scale, + "effective_width": int(export_width * scale), + "effective_height": int(export_height * scale) + } + } + + # Save to file or return base64 + if output_path: + # Ensure directory exists + output_dir = os.path.dirname(output_path) + if output_dir and not os.path.exists(output_dir): + os.makedirs(output_dir, exist_ok=True) + + # Add extension if missing + if not output_path.endswith(f'.{format}'): + output_path = f"{output_path}.{format}" + + with open(output_path, 'wb') as f: + f.write(image_bytes) + + result["file_path"] = output_path + result["file_size_bytes"] = len(image_bytes) + else: + # Return as base64 + result["image_data"] = base64.b64encode(image_bytes).decode('utf-8') + result["data_uri"] = f"data:image/{format};base64,{result['image_data']}" + + return result + + except ImportError as e: + logger.error(f"Chart export failed - missing dependency: {e}") + return { + "error": f"Missing dependency for export: {e}", + "format": format, + "image_data": None, + "install_hint": "pip install plotly kaleido" + } + except Exception as e: + logger.error(f"Chart export failed: {e}") + return { + "error": str(e), + "format": format, + "image_data": None + } diff --git a/mcp-servers/viz-platform/mcp_server/layout_tools.py b/mcp-servers/viz-platform/mcp_server/layout_tools.py index 1453ced..fcb716c 100644 --- a/mcp-servers/viz-platform/mcp_server/layout_tools.py +++ b/mcp-servers/viz-platform/mcp_server/layout_tools.py @@ -10,6 +10,46 @@ from uuid import uuid4 logger = logging.getLogger(__name__) +# Standard responsive breakpoints (Mantine/Bootstrap-aligned) +DEFAULT_BREAKPOINTS = { + "xs": { + "min_width": "0px", + "max_width": "575px", + "cols": 1, + "spacing": "xs", + "description": "Extra small devices (phones, portrait)" + }, + "sm": { + "min_width": "576px", + "max_width": "767px", + "cols": 2, + "spacing": "sm", + "description": "Small devices (phones, landscape)" + }, + "md": { + "min_width": "768px", + "max_width": "991px", + "cols": 6, + "spacing": "md", + "description": "Medium devices (tablets)" + }, + "lg": { + "min_width": "992px", + "max_width": "1199px", + "cols": 12, + "spacing": "md", + "description": "Large devices (desktops)" + }, + "xl": { + "min_width": "1200px", + "max_width": None, + "cols": 12, + "spacing": "lg", + "description": "Extra large devices (large desktops)" + } +} + + # Layout templates TEMPLATES = { "dashboard": { @@ -365,3 +405,149 @@ class LayoutTools: } for name, config in FILTER_TYPES.items() } + + async def layout_set_breakpoints( + self, + layout_ref: str, + breakpoints: Dict[str, Dict[str, Any]], + mobile_first: bool = True + ) -> Dict[str, Any]: + """ + Configure responsive breakpoints for a layout. + + Args: + layout_ref: Layout name to configure + breakpoints: Breakpoint configuration dict: + { + "xs": {"cols": 1, "spacing": "xs"}, + "sm": {"cols": 2, "spacing": "sm"}, + "md": {"cols": 6, "spacing": "md"}, + "lg": {"cols": 12, "spacing": "md"}, + "xl": {"cols": 12, "spacing": "lg"} + } + mobile_first: If True, use min-width media queries (default) + + Returns: + Dict with: + - breakpoints: Complete breakpoint configuration + - css_media_queries: Generated CSS media queries + - mobile_first: Whether mobile-first approach is used + """ + # Validate layout exists + if layout_ref not in self._layouts: + return { + "error": f"Layout '{layout_ref}' not found. Create it first with layout_create.", + "breakpoints": None + } + + layout = self._layouts[layout_ref] + + # Validate breakpoint names + valid_breakpoints = ["xs", "sm", "md", "lg", "xl"] + for bp_name in breakpoints.keys(): + if bp_name not in valid_breakpoints: + return { + "error": f"Invalid breakpoint '{bp_name}'. Must be one of: {valid_breakpoints}", + "breakpoints": layout.get("breakpoints") + } + + # Merge with defaults + merged_breakpoints = {} + for bp_name in valid_breakpoints: + default = DEFAULT_BREAKPOINTS[bp_name].copy() + if bp_name in breakpoints: + default.update(breakpoints[bp_name]) + merged_breakpoints[bp_name] = default + + # Validate spacing values + valid_spacing = ["xs", "sm", "md", "lg", "xl"] + for bp_name, bp_config in merged_breakpoints.items(): + if "spacing" in bp_config and bp_config["spacing"] not in valid_spacing: + return { + "error": f"Invalid spacing '{bp_config['spacing']}' for breakpoint '{bp_name}'. Must be one of: {valid_spacing}", + "breakpoints": layout.get("breakpoints") + } + + # Validate column counts + for bp_name, bp_config in merged_breakpoints.items(): + if "cols" in bp_config: + cols = bp_config["cols"] + if not isinstance(cols, int) or cols < 1 or cols > 24: + return { + "error": f"Invalid cols '{cols}' for breakpoint '{bp_name}'. Must be integer between 1 and 24.", + "breakpoints": layout.get("breakpoints") + } + + # Generate CSS media queries + css_queries = self._generate_media_queries(merged_breakpoints, mobile_first) + + # Store in layout + layout["breakpoints"] = merged_breakpoints + layout["mobile_first"] = mobile_first + layout["responsive_css"] = css_queries + + return { + "layout_ref": layout_ref, + "breakpoints": merged_breakpoints, + "mobile_first": mobile_first, + "css_media_queries": css_queries + } + + def _generate_media_queries( + self, + breakpoints: Dict[str, Dict[str, Any]], + mobile_first: bool + ) -> List[str]: + """Generate CSS media queries for breakpoints.""" + queries = [] + bp_order = ["xs", "sm", "md", "lg", "xl"] + + if mobile_first: + # Use min-width queries (mobile-first) + for bp_name in bp_order[1:]: # Skip xs (base styles) + bp = breakpoints[bp_name] + min_width = bp.get("min_width", DEFAULT_BREAKPOINTS[bp_name]["min_width"]) + if min_width and min_width != "0px": + queries.append(f"@media (min-width: {min_width}) {{ /* {bp_name} styles */ }}") + else: + # Use max-width queries (desktop-first) + for bp_name in reversed(bp_order[:-1]): # Skip xl (base styles) + bp = breakpoints[bp_name] + max_width = bp.get("max_width", DEFAULT_BREAKPOINTS[bp_name]["max_width"]) + if max_width: + queries.append(f"@media (max-width: {max_width}) {{ /* {bp_name} styles */ }}") + + return queries + + async def layout_get_breakpoints(self, layout_ref: str) -> Dict[str, Any]: + """ + Get the breakpoint configuration for a layout. + + Args: + layout_ref: Layout name + + Returns: + Dict with breakpoint configuration + """ + if layout_ref not in self._layouts: + return { + "error": f"Layout '{layout_ref}' not found.", + "breakpoints": None + } + + layout = self._layouts[layout_ref] + + return { + "layout_ref": layout_ref, + "breakpoints": layout.get("breakpoints", DEFAULT_BREAKPOINTS.copy()), + "mobile_first": layout.get("mobile_first", True), + "css_media_queries": layout.get("responsive_css", []) + } + + def get_default_breakpoints(self) -> Dict[str, Any]: + """Get the default breakpoint configuration.""" + return { + "breakpoints": DEFAULT_BREAKPOINTS.copy(), + "description": "Standard responsive breakpoints aligned with Mantine/Bootstrap", + "mobile_first": True + } diff --git a/mcp-servers/viz-platform/mcp_server/server.py b/mcp-servers/viz-platform/mcp_server/server.py index a48d8e3..2a7ce03 100644 --- a/mcp-servers/viz-platform/mcp_server/server.py +++ b/mcp-servers/viz-platform/mcp_server/server.py @@ -17,6 +17,7 @@ from .chart_tools import ChartTools from .layout_tools import LayoutTools from .theme_tools import ThemeTools from .page_tools import PageTools +from .accessibility_tools import AccessibilityTools # Suppress noisy MCP validation warnings on stderr logging.basicConfig(level=logging.INFO) @@ -36,6 +37,7 @@ class VizPlatformMCPServer: self.layout_tools = LayoutTools() self.theme_tools = ThemeTools() self.page_tools = PageTools() + self.accessibility_tools = AccessibilityTools(theme_store=self.theme_tools.store) async def initialize(self): """Initialize server and load configuration.""" @@ -198,6 +200,46 @@ class VizPlatformMCPServer: } )) + # Chart export tool (Issue #247) + tools.append(Tool( + name="chart_export", + description=( + "Export a Plotly chart to static image format (PNG, SVG, PDF). " + "Requires kaleido package. Returns base64 image data or saves to file." + ), + inputSchema={ + "type": "object", + "properties": { + "figure": { + "type": "object", + "description": "Plotly figure JSON to export" + }, + "format": { + "type": "string", + "enum": ["png", "svg", "pdf"], + "description": "Output format (default: png)" + }, + "width": { + "type": "integer", + "description": "Image width in pixels (default: 1200)" + }, + "height": { + "type": "integer", + "description": "Image height in pixels (default: 800)" + }, + "scale": { + "type": "number", + "description": "Resolution scale factor (default: 2 for retina)" + }, + "output_path": { + "type": "string", + "description": "Optional file path to save image" + } + }, + "required": ["figure"] + } + )) + # Layout tools (Issue #174) tools.append(Tool( name="layout_create", @@ -280,6 +322,36 @@ class VizPlatformMCPServer: } )) + # Responsive breakpoints tool (Issue #249) + tools.append(Tool( + name="layout_set_breakpoints", + description=( + "Configure responsive breakpoints for a layout. " + "Supports xs, sm, md, lg, xl breakpoints with mobile-first approach. " + "Each breakpoint can define cols, spacing, and other grid properties." + ), + inputSchema={ + "type": "object", + "properties": { + "layout_ref": { + "type": "string", + "description": "Layout name to configure" + }, + "breakpoints": { + "type": "object", + "description": ( + "Breakpoint config: {xs: {cols, spacing}, sm: {...}, md: {...}, lg: {...}, xl: {...}}" + ) + }, + "mobile_first": { + "type": "boolean", + "description": "Use mobile-first (min-width) media queries (default: true)" + } + }, + "required": ["layout_ref", "breakpoints"] + } + )) + # Theme tools (Issue #175) tools.append(Tool( name="theme_create", @@ -451,6 +523,77 @@ class VizPlatformMCPServer: } )) + # Accessibility tools (Issue #248) + tools.append(Tool( + name="accessibility_validate_colors", + description=( + "Validate colors for color blind accessibility. " + "Checks contrast ratios for deuteranopia, protanopia, tritanopia. " + "Returns issues, simulations, and accessible palette suggestions." + ), + inputSchema={ + "type": "object", + "properties": { + "colors": { + "type": "array", + "items": {"type": "string"}, + "description": "List of hex colors to validate (e.g., ['#228be6', '#40c057'])" + }, + "check_types": { + "type": "array", + "items": {"type": "string"}, + "description": "Color blindness types to check: deuteranopia, protanopia, tritanopia (default: all)" + }, + "min_contrast_ratio": { + "type": "number", + "description": "Minimum WCAG contrast ratio (default: 4.5 for AA)" + } + }, + "required": ["colors"] + } + )) + + tools.append(Tool( + name="accessibility_validate_theme", + description=( + "Validate a theme's colors for accessibility. " + "Extracts all colors from theme tokens and checks for color blind safety." + ), + inputSchema={ + "type": "object", + "properties": { + "theme_name": { + "type": "string", + "description": "Theme name to validate" + } + }, + "required": ["theme_name"] + } + )) + + tools.append(Tool( + name="accessibility_suggest_alternative", + description=( + "Suggest accessible alternative colors for a given color. " + "Provides alternatives optimized for specific color blindness types." + ), + inputSchema={ + "type": "object", + "properties": { + "color": { + "type": "string", + "description": "Hex color to find alternatives for" + }, + "deficiency_type": { + "type": "string", + "enum": ["deuteranopia", "protanopia", "tritanopia"], + "description": "Color blindness type to optimize for" + } + }, + "required": ["color", "deficiency_type"] + } + )) + return tools @self.server.call_tool() @@ -524,6 +667,26 @@ class VizPlatformMCPServer: text=json.dumps(result, indent=2) )] + elif name == "chart_export": + figure = arguments.get('figure') + if not figure: + return [TextContent( + type="text", + text=json.dumps({"error": "figure is required"}, indent=2) + )] + result = await self.chart_tools.chart_export( + figure=figure, + format=arguments.get('format', 'png'), + width=arguments.get('width'), + height=arguments.get('height'), + scale=arguments.get('scale', 2.0), + output_path=arguments.get('output_path') + ) + return [TextContent( + type="text", + text=json.dumps(result, indent=2) + )] + # Layout tools elif name == "layout_create": layout_name = arguments.get('name') @@ -568,6 +731,23 @@ class VizPlatformMCPServer: text=json.dumps(result, indent=2) )] + elif name == "layout_set_breakpoints": + layout_ref = arguments.get('layout_ref') + breakpoints = arguments.get('breakpoints', {}) + mobile_first = arguments.get('mobile_first', True) + if not layout_ref: + return [TextContent( + type="text", + text=json.dumps({"error": "layout_ref is required"}, indent=2) + )] + result = await self.layout_tools.layout_set_breakpoints( + layout_ref, breakpoints, mobile_first + ) + return [TextContent( + type="text", + text=json.dumps(result, indent=2) + )] + # Theme tools elif name == "theme_create": theme_name = arguments.get('name') @@ -669,6 +849,53 @@ class VizPlatformMCPServer: text=json.dumps(result, indent=2) )] + # Accessibility tools + elif name == "accessibility_validate_colors": + colors = arguments.get('colors') + if not colors: + return [TextContent( + type="text", + text=json.dumps({"error": "colors list is required"}, indent=2) + )] + result = await self.accessibility_tools.accessibility_validate_colors( + colors=colors, + check_types=arguments.get('check_types'), + min_contrast_ratio=arguments.get('min_contrast_ratio', 4.5) + ) + return [TextContent( + type="text", + text=json.dumps(result, indent=2) + )] + + elif name == "accessibility_validate_theme": + theme_name = arguments.get('theme_name') + if not theme_name: + return [TextContent( + type="text", + text=json.dumps({"error": "theme_name is required"}, indent=2) + )] + result = await self.accessibility_tools.accessibility_validate_theme(theme_name) + return [TextContent( + type="text", + text=json.dumps(result, indent=2) + )] + + elif name == "accessibility_suggest_alternative": + color = arguments.get('color') + deficiency_type = arguments.get('deficiency_type') + if not color or not deficiency_type: + return [TextContent( + type="text", + text=json.dumps({"error": "color and deficiency_type are required"}, indent=2) + )] + result = await self.accessibility_tools.accessibility_suggest_alternative( + color, deficiency_type + ) + return [TextContent( + type="text", + text=json.dumps(result, indent=2) + )] + raise ValueError(f"Unknown tool: {name}") except Exception as e: diff --git a/mcp-servers/viz-platform/requirements.txt b/mcp-servers/viz-platform/requirements.txt index 4b5159e..0c8fced 100644 --- a/mcp-servers/viz-platform/requirements.txt +++ b/mcp-servers/viz-platform/requirements.txt @@ -5,6 +5,7 @@ mcp>=0.9.0 plotly>=5.18.0 dash>=2.14.0 dash-mantine-components>=2.0.0 +kaleido>=0.2.1 # For chart export (PNG, SVG, PDF) # Utilities python-dotenv>=1.0.0 diff --git a/mcp-servers/viz-platform/tests/test_accessibility_tools.py b/mcp-servers/viz-platform/tests/test_accessibility_tools.py new file mode 100644 index 0000000..a4108dc --- /dev/null +++ b/mcp-servers/viz-platform/tests/test_accessibility_tools.py @@ -0,0 +1,195 @@ +""" +Tests for accessibility validation tools. +""" +import pytest +from mcp_server.accessibility_tools import AccessibilityTools + + +@pytest.fixture +def tools(): + """Create AccessibilityTools instance.""" + return AccessibilityTools() + + +class TestHexToRgb: + """Tests for _hex_to_rgb method.""" + + def test_hex_to_rgb_6_digit(self, tools): + """Test 6-digit hex conversion.""" + assert tools._hex_to_rgb("#FF0000") == (255, 0, 0) + assert tools._hex_to_rgb("#00FF00") == (0, 255, 0) + assert tools._hex_to_rgb("#0000FF") == (0, 0, 255) + + def test_hex_to_rgb_3_digit(self, tools): + """Test 3-digit hex conversion.""" + assert tools._hex_to_rgb("#F00") == (255, 0, 0) + assert tools._hex_to_rgb("#0F0") == (0, 255, 0) + assert tools._hex_to_rgb("#00F") == (0, 0, 255) + + def test_hex_to_rgb_lowercase(self, tools): + """Test lowercase hex conversion.""" + assert tools._hex_to_rgb("#ff0000") == (255, 0, 0) + + +class TestContrastRatio: + """Tests for _get_contrast_ratio method.""" + + def test_black_white_contrast(self, tools): + """Test black on white has maximum contrast.""" + ratio = tools._get_contrast_ratio("#000000", "#FFFFFF") + assert ratio == pytest.approx(21.0, rel=0.01) + + def test_same_color_contrast(self, tools): + """Test same color has minimum contrast.""" + ratio = tools._get_contrast_ratio("#FF0000", "#FF0000") + assert ratio == pytest.approx(1.0, rel=0.01) + + def test_symmetric_contrast(self, tools): + """Test contrast ratio is symmetric.""" + ratio1 = tools._get_contrast_ratio("#228be6", "#FFFFFF") + ratio2 = tools._get_contrast_ratio("#FFFFFF", "#228be6") + assert ratio1 == pytest.approx(ratio2, rel=0.01) + + +class TestColorBlindnessSimulation: + """Tests for _simulate_color_blindness method.""" + + def test_deuteranopia_simulation(self, tools): + """Test deuteranopia (green-blind) simulation.""" + # Red and green should appear more similar + original_red = "#FF0000" + original_green = "#00FF00" + + simulated_red = tools._simulate_color_blindness(original_red, "deuteranopia") + simulated_green = tools._simulate_color_blindness(original_green, "deuteranopia") + + # They should be different from originals + assert simulated_red != original_red or simulated_green != original_green + + def test_protanopia_simulation(self, tools): + """Test protanopia (red-blind) simulation.""" + simulated = tools._simulate_color_blindness("#FF0000", "protanopia") + # Should return a modified color + assert simulated.startswith("#") + assert len(simulated) == 7 + + def test_tritanopia_simulation(self, tools): + """Test tritanopia (blue-blind) simulation.""" + simulated = tools._simulate_color_blindness("#0000FF", "tritanopia") + # Should return a modified color + assert simulated.startswith("#") + assert len(simulated) == 7 + + def test_unknown_deficiency_returns_original(self, tools): + """Test unknown deficiency type returns original color.""" + color = "#FF0000" + simulated = tools._simulate_color_blindness(color, "unknown") + assert simulated == color + + +class TestAccessibilityValidateColors: + """Tests for accessibility_validate_colors method.""" + + @pytest.mark.asyncio + async def test_validate_single_color(self, tools): + """Test validating a single color.""" + result = await tools.accessibility_validate_colors(["#228be6"]) + assert "colors_checked" in result + assert "overall_score" in result + assert "issues" in result + assert "safe_palettes" in result + + @pytest.mark.asyncio + async def test_validate_problematic_colors(self, tools): + """Test similar colors trigger warnings.""" + # Use colors that are very close in hue, which should be harder to distinguish + result = await tools.accessibility_validate_colors(["#FF5555", "#FF6666"]) + # Similar colors should trigger distinguishability warnings + assert "issues" in result + # The validation should at least run without errors + assert "colors_checked" in result + assert len(result["colors_checked"]) == 2 + + @pytest.mark.asyncio + async def test_validate_contrast_issue(self, tools): + """Test low contrast colors trigger contrast warnings.""" + # Yellow on white has poor contrast + result = await tools.accessibility_validate_colors(["#FFFF00"]) + # Check for contrast issues (yellow may have issues with both black and white) + assert "issues" in result + + @pytest.mark.asyncio + async def test_validate_with_specific_types(self, tools): + """Test validating for specific color blindness types.""" + result = await tools.accessibility_validate_colors( + ["#FF0000", "#00FF00"], + check_types=["deuteranopia"] + ) + assert "simulations" in result + assert "deuteranopia" in result["simulations"] + assert "protanopia" not in result["simulations"] + + @pytest.mark.asyncio + async def test_overall_score(self, tools): + """Test overall score is calculated.""" + result = await tools.accessibility_validate_colors(["#228be6", "#ffffff"]) + assert result["overall_score"] in ["A", "B", "C", "D"] + + @pytest.mark.asyncio + async def test_recommendations_generated(self, tools): + """Test recommendations are generated for issues.""" + result = await tools.accessibility_validate_colors(["#FF0000", "#00FF00"]) + assert "recommendations" in result + assert len(result["recommendations"]) > 0 + + +class TestAccessibilitySuggestAlternative: + """Tests for accessibility_suggest_alternative method.""" + + @pytest.mark.asyncio + async def test_suggest_alternative_deuteranopia(self, tools): + """Test suggesting alternatives for deuteranopia.""" + result = await tools.accessibility_suggest_alternative("#FF0000", "deuteranopia") + assert "original_color" in result + assert result["deficiency_type"] == "deuteranopia" + assert "suggestions" in result + assert len(result["suggestions"]) > 0 + + @pytest.mark.asyncio + async def test_suggest_alternative_tritanopia(self, tools): + """Test suggesting alternatives for tritanopia.""" + result = await tools.accessibility_suggest_alternative("#0000FF", "tritanopia") + assert "suggestions" in result + assert len(result["suggestions"]) > 0 + + @pytest.mark.asyncio + async def test_suggestions_include_safe_palettes(self, tools): + """Test suggestions include colors from safe palettes.""" + result = await tools.accessibility_suggest_alternative("#FF0000", "deuteranopia") + palette_suggestions = [ + s for s in result["suggestions"] + if "palette" in s + ] + assert len(palette_suggestions) > 0 + + +class TestSafePalettes: + """Tests for safe palette constants.""" + + def test_safe_palettes_exist(self, tools): + """Test that safe palettes are defined.""" + from mcp_server.accessibility_tools import SAFE_PALETTES + assert "categorical" in SAFE_PALETTES + assert "ibm" in SAFE_PALETTES + assert "okabe_ito" in SAFE_PALETTES + assert "tableau_colorblind" in SAFE_PALETTES + + def test_safe_palettes_have_colors(self, tools): + """Test that safe palettes have color lists.""" + from mcp_server.accessibility_tools import SAFE_PALETTES + for palette_name, palette in SAFE_PALETTES.items(): + assert "colors" in palette + assert len(palette["colors"]) > 0 + # All colors should be valid hex + for color in palette["colors"]: + assert color.startswith("#") diff --git a/plugins/claude-config-maintainer/README.md b/plugins/claude-config-maintainer/README.md index 7628fbf..6f64370 100644 --- a/plugins/claude-config-maintainer/README.md +++ b/plugins/claude-config-maintainer/README.md @@ -37,6 +37,33 @@ Create a new CLAUDE.md tailored to your project. /config-init ``` +### `/config-diff` +Show differences between current CLAUDE.md and previous versions. + +``` +/config-diff # Compare working copy vs last commit +/config-diff --commit=abc1234 # Compare against specific commit +/config-diff --from=v1.0 --to=v2.0 # Compare two commits +/config-diff --section="Critical Rules" # Focus on specific section +``` + +### `/config-lint` +Lint CLAUDE.md for common anti-patterns and best practices. + +``` +/config-lint # Run all lint checks +/config-lint --fix # Auto-fix fixable issues +/config-lint --rules=security # Check only security rules +/config-lint --severity=error # Show only errors +``` + +**Lint Rule Categories:** +- **Security (SEC)** - Hardcoded secrets, paths, credentials +- **Structure (STR)** - Header hierarchy, required sections +- **Content (CNT)** - Contradictions, duplicates, vague rules +- **Format (FMT)** - Consistency, code blocks, whitespace +- **Best Practice (BPR)** - Missing Quick Start, Critical Rules sections + ## Best Practices A good CLAUDE.md should be: diff --git a/plugins/claude-config-maintainer/commands/config-diff.md b/plugins/claude-config-maintainer/commands/config-diff.md new file mode 100644 index 0000000..b612518 --- /dev/null +++ b/plugins/claude-config-maintainer/commands/config-diff.md @@ -0,0 +1,239 @@ +--- +description: Show diff between current CLAUDE.md and last commit +--- + +# Compare CLAUDE.md Changes + +This command shows differences between your current CLAUDE.md file and previous versions, helping track configuration drift and review changes before committing. + +## What This Command Does + +1. **Detect CLAUDE.md Location** - Finds the project's CLAUDE.md file +2. **Compare Versions** - Shows diff against last commit or specified revision +3. **Highlight Sections** - Groups changes by affected sections +4. **Summarize Impact** - Explains what the changes mean for Claude's behavior + +## Usage + +``` +/config-diff +``` + +Compare against a specific commit: + +``` +/config-diff --commit=abc1234 +/config-diff --commit=HEAD~3 +``` + +Compare two specific commits: + +``` +/config-diff --from=abc1234 --to=def5678 +``` + +Show only specific sections: + +``` +/config-diff --section="Critical Rules" +/config-diff --section="Quick Start" +``` + +## Comparison Modes + +### Default: Working vs Last Commit +Shows uncommitted changes to CLAUDE.md: +``` +/config-diff +``` + +### Working vs Specific Commit +Shows changes since a specific point: +``` +/config-diff --commit=v1.0.0 +``` + +### Commit to Commit +Shows changes between two historical versions: +``` +/config-diff --from=v1.0.0 --to=v2.0.0 +``` + +### Branch Comparison +Shows CLAUDE.md differences between branches: +``` +/config-diff --branch=main +/config-diff --from=feature-branch --to=main +``` + +## Expected Output + +``` +CLAUDE.md Diff Report +===================== + +File: /path/to/project/CLAUDE.md +Comparing: Working copy vs HEAD (last commit) +Commit: abc1234 "Update build commands" (2 days ago) + +Summary: +- Lines added: 12 +- Lines removed: 5 +- Net change: +7 lines +- Sections affected: 3 + +Section Changes: +---------------- + +## Quick Start [MODIFIED] + - Added new environment variable requirement + - Updated test command with coverage flag + +## Critical Rules [ADDED CONTENT] + + New rule: "Never modify database migrations directly" + +## Architecture [UNCHANGED] + +## Common Operations [MODIFIED] + - Removed deprecated deployment command + - Added new Docker workflow + +Detailed Diff: +-------------- + +--- CLAUDE.md (HEAD) ++++ CLAUDE.md (working) + +@@ -15,7 +15,10 @@ + ## Quick Start + + ```bash ++export DATABASE_URL=postgres://... # Required + pip install -r requirements.txt +-pytest ++pytest --cov=src # Run with coverage + uvicorn main:app --reload + ``` + +@@ -45,6 +48,7 @@ + ## Critical Rules + + - Never modify `.env` files directly ++- Never modify database migrations directly + - Always run tests before committing + +Behavioral Impact: +------------------ + +These changes will affect Claude's behavior: + +1. [NEW REQUIREMENT] Claude will now export DATABASE_URL before running +2. [MODIFIED] Test command now includes coverage reporting +3. [NEW RULE] Claude will avoid direct migration modifications + +Review: Do these changes reflect your intended configuration? +``` + +## Section-Focused View + +When using `--section`, output focuses on specific areas: + +``` +/config-diff --section="Critical Rules" + +CLAUDE.md Section Diff: Critical Rules +====================================== + +--- HEAD ++++ Working + + ## Critical Rules + + - Never modify `.env` files directly ++- Never modify database migrations directly ++- Always use type hints in Python code + - Always run tests before committing +-- Keep functions under 50 lines + +Changes: + + 2 rules added + - 1 rule removed + +Impact: Claude will follow 2 new constraints and no longer enforce +the 50-line function limit. +``` + +## Options + +| Option | Description | +|--------|-------------| +| `--commit=REF` | Compare working copy against specific commit/tag | +| `--from=REF` | Starting point for comparison | +| `--to=REF` | Ending point for comparison (default: HEAD) | +| `--branch=NAME` | Compare against branch tip | +| `--section=NAME` | Show only changes to specific section | +| `--stat` | Show only statistics, no detailed diff | +| `--no-color` | Disable colored output | +| `--context=N` | Lines of context around changes (default: 3) | + +## Understanding the Output + +### Change Indicators + +| Symbol | Meaning | +|--------|---------| +| `+` | Line added | +| `-` | Line removed | +| `@@` | Location marker showing line numbers | +| `[MODIFIED]` | Section has changes | +| `[ADDED]` | New section created | +| `[REMOVED]` | Section deleted | +| `[UNCHANGED]` | No changes to section | + +### Impact Categories + +- **NEW REQUIREMENT** - Claude will now need to do something new +- **REMOVED REQUIREMENT** - Claude no longer needs to do something +- **MODIFIED** - Existing behavior changed +- **NEW RULE** - New constraint added +- **RELAXED RULE** - Constraint removed or softened + +## When to Use + +Run `/config-diff` when: +- Before committing CLAUDE.md changes +- Reviewing what changed after pulling updates +- Debugging unexpected Claude behavior +- Auditing configuration changes over time +- Comparing configurations across branches + +## Integration with Other Commands + +| Workflow | Commands | +|----------|----------| +| Review before commit | `/config-diff` then `git commit` | +| After optimization | `/config-optimize` then `/config-diff` | +| Audit history | `/config-diff --from=v1.0.0 --to=HEAD` | +| Branch comparison | `/config-diff --branch=main` | + +## Tips + +1. **Review before committing** - Always check what changed +2. **Track behavioral changes** - Focus on rules and requirements sections +3. **Use section filtering** - Large files benefit from focused diffs +4. **Compare across releases** - Use tags to track major changes +5. **Check after merges** - Ensure CLAUDE.md didn't get conflict artifacts + +## Troubleshooting + +### "No changes detected" +- CLAUDE.md matches the comparison target +- Check if you're comparing the right commits + +### "File not found in commit" +- CLAUDE.md didn't exist at that commit +- Use `git log -- CLAUDE.md` to find when it was created + +### "Not a git repository" +- This command requires git history +- Initialize git or use file backup comparison instead diff --git a/plugins/claude-config-maintainer/commands/config-lint.md b/plugins/claude-config-maintainer/commands/config-lint.md new file mode 100644 index 0000000..fd530fb --- /dev/null +++ b/plugins/claude-config-maintainer/commands/config-lint.md @@ -0,0 +1,334 @@ +--- +description: Lint CLAUDE.md for common anti-patterns and best practices +--- + +# Lint CLAUDE.md + +This command checks your CLAUDE.md file against best practices and detects common anti-patterns that can cause issues with Claude Code. + +## What This Command Does + +1. **Parse Structure** - Validates markdown structure and hierarchy +2. **Check Security** - Detects hardcoded paths, secrets, and sensitive data +3. **Validate Content** - Identifies anti-patterns and problematic instructions +4. **Verify Format** - Ensures consistent formatting and style +5. **Generate Report** - Provides actionable findings with fix suggestions + +## Usage + +``` +/config-lint +``` + +Lint with auto-fix: + +``` +/config-lint --fix +``` + +Check specific rules only: + +``` +/config-lint --rules=security,structure +``` + +## Linting Rules + +### Security Rules (SEC) + +| Rule | Description | Severity | +|------|-------------|----------| +| SEC001 | Hardcoded absolute paths | Warning | +| SEC002 | Potential secrets/API keys | Error | +| SEC003 | Hardcoded IP addresses | Warning | +| SEC004 | Exposed credentials patterns | Error | +| SEC005 | Hardcoded URLs with tokens | Error | +| SEC006 | Environment variable values (not names) | Warning | + +### Structure Rules (STR) + +| Rule | Description | Severity | +|------|-------------|----------| +| STR001 | Missing required sections | Error | +| STR002 | Invalid header hierarchy (h3 before h2) | Warning | +| STR003 | Orphaned content (text before first header) | Info | +| STR004 | Excessive nesting depth (>4 levels) | Warning | +| STR005 | Empty sections | Warning | +| STR006 | Missing section content | Warning | + +### Content Rules (CNT) + +| Rule | Description | Severity | +|------|-------------|----------| +| CNT001 | Contradictory instructions | Error | +| CNT002 | Vague or ambiguous rules | Warning | +| CNT003 | Overly long sections (>100 lines) | Info | +| CNT004 | Duplicate content | Warning | +| CNT005 | TODO/FIXME in production config | Warning | +| CNT006 | Outdated version references | Info | +| CNT007 | Broken internal links | Warning | + +### Format Rules (FMT) + +| Rule | Description | Severity | +|------|-------------|----------| +| FMT001 | Inconsistent header styles | Info | +| FMT002 | Inconsistent list markers | Info | +| FMT003 | Missing code block language | Info | +| FMT004 | Trailing whitespace | Info | +| FMT005 | Missing blank lines around headers | Info | +| FMT006 | Inconsistent indentation | Info | + +### Best Practice Rules (BPR) + +| Rule | Description | Severity | +|------|-------------|----------| +| BPR001 | No Quick Start section | Warning | +| BPR002 | No Critical Rules section | Warning | +| BPR003 | Instructions without examples | Info | +| BPR004 | Commands without explanation | Info | +| BPR005 | Rules without rationale | Info | +| BPR006 | Missing plugin integration docs | Info | + +## Expected Output + +``` +CLAUDE.md Lint Report +===================== + +File: /path/to/project/CLAUDE.md +Rules checked: 25 +Time: 0.3s + +Summary: + Errors: 2 + Warnings: 5 + Info: 3 + +Findings: +--------- + +[ERROR] SEC002: Potential secret detected (line 45) + │ api_key = "sk-1234567890abcdef" + │ ^^^^^^^^^^^^^^^^^^^^^^ + └─ Hardcoded API key found. Use environment variable reference instead. + + Suggested fix: + - api_key = "sk-1234567890abcdef" + + api_key = $OPENAI_API_KEY # Set in environment + +[ERROR] CNT001: Contradictory instructions (lines 23, 67) + │ Line 23: "Always run tests before committing" + │ Line 67: "Skip tests for documentation-only changes" + │ + └─ These rules conflict. Clarify the exception explicitly. + + Suggested fix: + + "Always run tests before committing, except for documentation-only + + changes (files in docs/ directory)" + +[WARNING] SEC001: Hardcoded absolute path (line 12) + │ Database location: /home/user/data/myapp.db + │ ^^^^^^^^^^^^^^^^^^^^^^^^ + └─ Absolute paths break portability. Use relative or variable. + + Suggested fix: + - Database location: /home/user/data/myapp.db + + Database location: ./data/myapp.db # Or $DATA_DIR/myapp.db + +[WARNING] STR002: Invalid header hierarchy (line 34) + │ ### Subsection + │ (no preceding ## header) + │ + └─ H3 header without parent H2. Add H2 or promote to H2. + +[WARNING] CNT004: Duplicate content (lines 45-52, 89-96) + │ Same git workflow documented twice + │ + └─ Remove duplicate or consolidate into single section. + +[WARNING] STR005: Empty section (line 78) + │ ## Troubleshooting + │ (no content) + │ + └─ Add content or remove empty section. + +[WARNING] BPR002: No Critical Rules section + │ Missing "Critical Rules" or "Important Rules" section + │ + └─ Add a section highlighting must-follow rules for Claude. + +[INFO] FMT003: Missing code block language (line 56) + │ ``` + │ npm install + │ ``` + │ + └─ Specify language for syntax highlighting: ```bash + +[INFO] CNT003: Overly long section (lines 100-215) + │ "Architecture" section is 115 lines + │ + └─ Consider breaking into subsections or condensing. + +[INFO] FMT001: Inconsistent header styles + │ Line 10: "## Quick Start" + │ Line 25: "## Architecture:" + │ (colon suffix inconsistent) + │ + └─ Standardize header format throughout document. + +--- + +Auto-fixable: 4 issues (run with --fix) +Manual review required: 6 issues + +Run `/config-lint --fix` to apply automatic fixes. +``` + +## Options + +| Option | Description | +|--------|-------------| +| `--fix` | Automatically fix auto-fixable issues | +| `--rules=LIST` | Check only specified rule categories | +| `--ignore=LIST` | Skip specified rules (e.g., `--ignore=FMT001,FMT002`) | +| `--severity=LEVEL` | Show only issues at or above level (error/warning/info) | +| `--format=FORMAT` | Output format: `text` (default), `json`, `sarif` | +| `--config=FILE` | Use custom lint configuration | +| `--strict` | Treat warnings as errors | + +## Rule Categories + +Use `--rules` to focus on specific areas: + +``` +/config-lint --rules=security # Only security checks +/config-lint --rules=structure # Only structure checks +/config-lint --rules=security,content # Multiple categories +``` + +Available categories: +- `security` - SEC rules +- `structure` - STR rules +- `content` - CNT rules +- `format` - FMT rules +- `bestpractice` - BPR rules + +## Custom Configuration + +Create `.claude-lint.json` in project root: + +```json +{ + "rules": { + "SEC001": "warning", + "FMT001": "off", + "CNT003": { + "severity": "warning", + "maxLines": 150 + } + }, + "ignore": [ + "FMT*" + ], + "requiredSections": [ + "Quick Start", + "Critical Rules", + "Project Overview" + ] +} +``` + +## Anti-Pattern Examples + +### Hardcoded Secrets (SEC002) +```markdown +# BAD +API_KEY=sk-1234567890abcdef + +# GOOD +API_KEY=$OPENAI_API_KEY # Set via environment +``` + +### Hardcoded Paths (SEC001) +```markdown +# BAD +Config file: /home/john/projects/myapp/config.yml + +# GOOD +Config file: ./config.yml +Config file: $PROJECT_ROOT/config.yml +``` + +### Contradictory Rules (CNT001) +```markdown +# BAD +- Always use TypeScript +- JavaScript files are acceptable for scripts + +# GOOD +- Always use TypeScript for source code +- JavaScript (.js) is acceptable only for config files and scripts +``` + +### Vague Instructions (CNT002) +```markdown +# BAD +- Be careful with the database + +# GOOD +- Never run DELETE without WHERE clause +- Always backup before migrations +``` + +### Invalid Hierarchy (STR002) +```markdown +# BAD +# Main Title +### Skipped Level + +# GOOD +# Main Title +## Section +### Subsection +``` + +## When to Use + +Run `/config-lint` when: +- Before committing CLAUDE.md changes +- During code review for CLAUDE.md modifications +- Setting up CI/CD checks for configuration files +- After major edits to catch introduced issues +- Periodically as maintenance check + +## Integration with CI/CD + +Add to your CI pipeline: + +```yaml +# GitHub Actions example +- name: Lint CLAUDE.md + run: claude /config-lint --strict --format=sarif > lint-results.sarif + +- name: Upload SARIF + uses: github/codeql-action/upload-sarif@v2 + with: + sarif_file: lint-results.sarif +``` + +## Tips + +1. **Start with errors** - Fix errors before warnings +2. **Use --fix carefully** - Review auto-fixes before committing +3. **Configure per-project** - Different projects have different needs +4. **Integrate in CI** - Catch issues before they reach main +5. **Review periodically** - Run lint check monthly as maintenance + +## Related Commands + +| Command | Relationship | +|---------|--------------| +| `/config-analyze` | Deeper content analysis (complements lint) | +| `/config-optimize` | Applies fixes and improvements | +| `/config-diff` | Shows what changed (run lint before commit) | diff --git a/plugins/cmdb-assistant/.claude-plugin/plugin.json b/plugins/cmdb-assistant/.claude-plugin/plugin.json index 4105491..bff85eb 100644 --- a/plugins/cmdb-assistant/.claude-plugin/plugin.json +++ b/plugins/cmdb-assistant/.claude-plugin/plugin.json @@ -1,6 +1,6 @@ { "name": "cmdb-assistant", - "version": "1.1.0", + "version": "1.2.0", "description": "NetBox CMDB integration with data quality validation - query, create, update, and manage network devices, IP addresses, sites, and more with best practices enforcement", "author": { "name": "Leo Miranda", diff --git a/plugins/cmdb-assistant/README.md b/plugins/cmdb-assistant/README.md index ea05bb5..b8328a3 100644 --- a/plugins/cmdb-assistant/README.md +++ b/plugins/cmdb-assistant/README.md @@ -2,6 +2,12 @@ A Claude Code plugin for NetBox CMDB integration - query, create, update, and manage your network infrastructure directly from Claude Code. +## What's New in v1.2.0 + +- **`/cmdb-topology`**: Generate Mermaid diagrams showing infrastructure topology (rack view, network view, site overview) +- **`/change-audit`**: Query and analyze NetBox audit log for change tracking and compliance +- **`/ip-conflicts`**: Detect IP address conflicts and overlapping prefixes + ## What's New in v1.1.0 - **Data Quality Validation**: Hooks for SessionStart and PreToolUse that check data quality and warn about missing fields @@ -59,6 +65,9 @@ Add to your Claude Code plugins or marketplace configuration. | `/cmdb-audit [scope]` | Data quality analysis (all, vms, devices, naming, roles) | | `/cmdb-register` | Register current machine into NetBox with running apps | | `/cmdb-sync` | Sync machine state with NetBox (detect drift, update) | +| `/cmdb-topology ` | Generate Mermaid diagrams (rack, network, site, full) | +| `/change-audit [filters]` | Query NetBox audit log for change tracking | +| `/ip-conflicts [scope]` | Detect IP conflicts and overlapping prefixes | ## Agent @@ -140,9 +149,12 @@ cmdb-assistant/ │ ├── cmdb-device.md # Device management │ ├── cmdb-ip.md # IP management │ ├── cmdb-site.md # Site management -│ ├── cmdb-audit.md # Data quality audit (NEW) -│ ├── cmdb-register.md # Machine registration (NEW) -│ └── cmdb-sync.md # Machine sync (NEW) +│ ├── cmdb-audit.md # Data quality audit +│ ├── cmdb-register.md # Machine registration +│ ├── cmdb-sync.md # Machine sync +│ ├── cmdb-topology.md # Topology visualization (NEW) +│ ├── change-audit.md # Change audit log (NEW) +│ └── ip-conflicts.md # IP conflict detection (NEW) ├── hooks/ │ ├── hooks.json # Hook configuration │ ├── startup-check.sh # SessionStart validation diff --git a/plugins/cmdb-assistant/commands/change-audit.md b/plugins/cmdb-assistant/commands/change-audit.md new file mode 100644 index 0000000..d84e607 --- /dev/null +++ b/plugins/cmdb-assistant/commands/change-audit.md @@ -0,0 +1,163 @@ +--- +description: Audit NetBox changes with filtering by date, user, or object type +--- + +# CMDB Change Audit + +Query and analyze the NetBox audit log for change tracking and compliance. + +## Usage + +``` +/change-audit [filters] +``` + +**Filters:** +- `last days/hours` - Changes within time period +- `by ` - Changes by specific user +- `type ` - Changes to specific object type +- `action ` - Filter by action type +- `object ` - Search for changes to specific object + +## Instructions + +You are a change auditor that queries NetBox's object change log and generates audit reports. + +### MCP Tools + +Use these tools to query the audit log: + +- `extras_list_object_changes` - List changes with filters: + - `user_id` - Filter by user ID + - `changed_object_type` - Filter by object type (e.g., "dcim.device", "ipam.ipaddress") + - `action` - Filter by action: "create", "update", "delete" + +- `extras_get_object_change` - Get detailed change record by ID + +### Common Object Types + +| Category | Object Types | +|----------|--------------| +| DCIM | `dcim.device`, `dcim.interface`, `dcim.site`, `dcim.rack`, `dcim.cable` | +| IPAM | `ipam.ipaddress`, `ipam.prefix`, `ipam.vlan`, `ipam.vrf` | +| Virtualization | `virtualization.virtualmachine`, `virtualization.cluster` | +| Tenancy | `tenancy.tenant`, `tenancy.contact` | + +### Workflow + +1. **Parse user request** to determine filters +2. **Query object changes** using `extras_list_object_changes` +3. **Enrich data** by fetching detailed records if needed +4. **Analyze patterns** in the changes +5. **Generate report** in structured format + +### Report Format + +```markdown +## NetBox Change Audit Report + +**Generated:** [timestamp] +**Period:** [date range or "All time"] +**Filters:** [applied filters] + +### Summary + +| Metric | Count | +|--------|-------| +| Total Changes | X | +| Creates | Y | +| Updates | Z | +| Deletes | W | +| Unique Users | N | +| Object Types | M | + +### Changes by Action + +#### Created Objects (Y) + +| Time | User | Object Type | Object | Details | +|------|------|-------------|--------|---------| +| 2024-01-15 14:30 | admin | dcim.device | server-01 | Created device | +| ... | ... | ... | ... | ... | + +#### Updated Objects (Z) + +| Time | User | Object Type | Object | Changed Fields | +|------|------|-------------|--------|----------------| +| 2024-01-15 15:00 | john | ipam.ipaddress | 10.0.1.50/24 | status, description | +| ... | ... | ... | ... | ... | + +#### Deleted Objects (W) + +| Time | User | Object Type | Object | Details | +|------|------|-------------|--------|---------| +| 2024-01-14 09:00 | admin | dcim.interface | eth2 | Removed from server-01 | +| ... | ... | ... | ... | ... | + +### Changes by User + +| User | Creates | Updates | Deletes | Total | +|------|---------|---------|---------|-------| +| admin | 5 | 10 | 2 | 17 | +| john | 3 | 8 | 0 | 11 | + +### Changes by Object Type + +| Object Type | Creates | Updates | Deletes | Total | +|-------------|---------|---------|---------|-------| +| dcim.device | 2 | 5 | 0 | 7 | +| ipam.ipaddress | 4 | 3 | 1 | 8 | + +### Timeline + +``` +2024-01-15: ████████ 8 changes +2024-01-14: ████ 4 changes +2024-01-13: ██ 2 changes +``` + +### Notable Patterns + +- **Bulk operations:** [Identify if many changes happened in short time] +- **Unusual activity:** [Flag unexpected deletions or after-hours changes] +- **Missing audit trail:** [Note if expected changes are not logged] + +### Recommendations + +1. [Any security or process recommendations based on findings] +``` + +### Time Period Handling + +When user specifies "last N days": +- The NetBox API may not have direct date filtering in `extras_list_object_changes` +- Fetch recent changes and filter client-side by the `time` field +- Note any limitations in the report + +### Enriching Change Details + +For detailed audit, use `extras_get_object_change` with the change ID to see: +- `prechange_data` - Object state before change +- `postchange_data` - Object state after change +- `request_id` - Links related changes in same request + +### Security Audit Mode + +If user asks for "security audit" or "compliance report": +1. Focus on deletions and permission-sensitive changes +2. Highlight changes to critical objects (firewalls, VRFs, prefixes) +3. Flag changes outside business hours +4. Identify users with high change counts + +## Examples + +- `/change-audit` - Show recent changes (last 24 hours) +- `/change-audit last 7 days` - Changes in past week +- `/change-audit by admin` - All changes by admin user +- `/change-audit type dcim.device` - Device changes only +- `/change-audit action delete` - All deletions +- `/change-audit object server-01` - Changes to server-01 + +## User Request + +$ARGUMENTS diff --git a/plugins/cmdb-assistant/commands/cmdb-topology.md b/plugins/cmdb-assistant/commands/cmdb-topology.md new file mode 100644 index 0000000..427c067 --- /dev/null +++ b/plugins/cmdb-assistant/commands/cmdb-topology.md @@ -0,0 +1,182 @@ +--- +description: Generate infrastructure topology diagrams from NetBox data +--- + +# CMDB Topology Visualization + +Generate Mermaid diagrams showing infrastructure topology from NetBox. + +## Usage + +``` +/cmdb-topology [scope] +``` + +**Views:** +- `rack ` - Rack elevation showing devices and positions +- `network [site]` - Network topology showing device connections via cables +- `site ` - Site overview with racks and device counts +- `full` - Full infrastructure overview + +## Instructions + +You are a topology visualization assistant that queries NetBox and generates Mermaid diagrams. + +### View: Rack Elevation + +Generate a rack view showing devices and their positions. + +**Data Collection:** +1. Use `dcim_list_racks` to find the rack by name +2. Use `dcim_list_devices` with `rack_id` filter to get devices in rack +3. For each device, note: `position`, `u_height`, `face`, `name`, `role` + +**Mermaid Output:** + +```mermaid +graph TB + subgraph rack["Rack: (U)"] + direction TB + u42["U42: empty"] + u41["U41: empty"] + u40["U40: server-01 (Server)"] + u39["U39: server-01 (cont.)"] + u38["U38: switch-01 (Switch)"] + %% ... continue for all units + end +``` + +**For devices spanning multiple U:** +- Mark the top U with device name and role +- Mark subsequent Us as "(cont.)" for the same device +- Empty Us should show "empty" + +### View: Network Topology + +Generate a network diagram showing device connections. + +**Data Collection:** +1. Use `dcim_list_sites` if no site specified (get all) +2. Use `dcim_list_devices` with optional `site_id` filter +3. Use `dcim_list_cables` to get all connections +4. Use `dcim_list_interfaces` for each device to understand port names + +**Mermaid Output:** + +```mermaid +graph TD + subgraph site1["Site: Home"] + router1[("core-router-01
Router")] + switch1[["dist-switch-01
Switch"]] + server1["web-server-01
Server"] + server2["db-server-01
Server"] + end + + router1 -->|"eth0 - eth1"| switch1 + switch1 -->|"gi0/1 - eth0"| server1 + switch1 -->|"gi0/2 - eth0"| server2 +``` + +**Node shapes by role:** +- Router: `[(" ")]` (cylinder/database shape) +- Switch: `[[ ]]` (double brackets) +- Server: `[ ]` (rectangle) +- Firewall: `{{ }}` (hexagon) +- Other: `[ ]` (rectangle) + +**Edge labels:** Show interface names on both ends (A-side - B-side) + +### View: Site Overview + +Generate a site-level view showing racks and summary counts. + +**Data Collection:** +1. Use `dcim_get_site` to get site details +2. Use `dcim_list_racks` with `site_id` filter +3. Use `dcim_list_devices` with `site_id` filter for counts per rack + +**Mermaid Output:** + +```mermaid +graph TB + subgraph site["Site: Headquarters"] + subgraph row1["Row 1"] + rack1["Rack A1
12/42 U used
5 devices"] + rack2["Rack A2
20/42 U used
8 devices"] + end + subgraph row2["Row 2"] + rack3["Rack B1
8/42 U used
3 devices"] + end + end +``` + +### View: Full Infrastructure + +Generate a high-level view of all sites and their relationships. + +**Data Collection:** +1. Use `dcim_list_regions` to get hierarchy +2. Use `dcim_list_sites` to get all sites +3. Use `dcim_list_devices` with status filter for counts + +**Mermaid Output:** + +```mermaid +graph TB + subgraph region1["Region: Americas"] + site1["Headquarters
3 racks, 25 devices"] + site2["Branch Office
1 rack, 5 devices"] + end + subgraph region2["Region: Europe"] + site3["EU Datacenter
10 racks, 100 devices"] + end + + site1 -.->|"WAN Link"| site3 +``` + +### Output Format + +Always provide: + +1. **Summary** - Brief description of what the diagram shows +2. **Mermaid Code Block** - The diagram code in a fenced code block +3. **Legend** - Explanation of shapes and colors used +4. **Data Notes** - Any data quality issues (e.g., devices without position, missing cables) + +**Example Output:** + +```markdown +## Network Topology: Home Site + +This diagram shows the network connections between 4 devices at the Home site. + +```mermaid +graph TD + router1[("core-router
Router")] + switch1[["main-switch
Switch"]] + server1["homelab-01
Server"] + + router1 -->|"eth0 - gi0/24"| switch1 + switch1 -->|"gi0/1 - eth0"| server1 +``` + +**Legend:** +- Cylinder shape: Routers +- Double brackets: Switches +- Rectangle: Servers + +**Data Notes:** +- 1 device (nas-01) has no cable connections documented +``` + +## Examples + +- `/cmdb-topology rack server-rack-01` - Show devices in server-rack-01 +- `/cmdb-topology network` - Show all network connections +- `/cmdb-topology network Home` - Show network topology for Home site only +- `/cmdb-topology site Headquarters` - Show rack overview for Headquarters +- `/cmdb-topology full` - Show full infrastructure overview + +## User Request + +$ARGUMENTS diff --git a/plugins/cmdb-assistant/commands/ip-conflicts.md b/plugins/cmdb-assistant/commands/ip-conflicts.md new file mode 100644 index 0000000..614b72d --- /dev/null +++ b/plugins/cmdb-assistant/commands/ip-conflicts.md @@ -0,0 +1,226 @@ +--- +description: Detect IP address conflicts and overlapping prefixes in NetBox +--- + +# CMDB IP Conflict Detection + +Scan NetBox IPAM data to identify IP address conflicts and overlapping prefixes. + +## Usage + +``` +/ip-conflicts [scope] +``` + +**Scopes:** +- `all` (default) - Full scan of all IP data +- `addresses` - Check for duplicate IP addresses only +- `prefixes` - Check for overlapping prefixes only +- `vrf ` - Scan specific VRF only +- `prefix ` - Scan within specific prefix + +## Instructions + +You are an IP conflict detection specialist that analyzes NetBox IPAM data for conflicts and issues. + +### Conflict Types to Detect + +#### 1. Duplicate IP Addresses + +Multiple IP address records with the same address (within same VRF). + +**Detection:** +1. Use `ipam_list_ip_addresses` to get all addresses +2. Group by address + VRF combination +3. Flag groups with more than one record + +**Exception:** Anycast addresses may legitimately appear multiple times - check the `role` field for "anycast". + +#### 2. Overlapping Prefixes + +Prefixes that contain the same address space (within same VRF). + +**Detection:** +1. Use `ipam_list_prefixes` to get all prefixes +2. For each prefix pair in the same VRF, check if one contains the other +3. Legitimate hierarchies should have proper parent-child relationships + +**Legitimate Overlaps:** +- Parent/child prefix hierarchy (e.g., 10.0.0.0/8 contains 10.0.1.0/24) +- Different VRFs (isolated routing tables) +- Marked as "container" status + +#### 3. IPs Outside Their Prefix + +IP addresses that don't fall within any defined prefix. + +**Detection:** +1. For each IP address, find the most specific prefix that contains it +2. Flag IPs with no matching prefix + +#### 4. Prefix Overlap Across VRFs (Informational) + +Same prefix appearing in multiple VRFs - not necessarily a conflict, but worth noting. + +### MCP Tools + +- `ipam_list_ip_addresses` - Get all IP addresses with filters: + - `address` - Filter by specific address + - `vrf_id` - Filter by VRF + - `parent` - Filter by parent prefix + - `status` - Filter by status + +- `ipam_list_prefixes` - Get all prefixes with filters: + - `prefix` - Filter by prefix CIDR + - `vrf_id` - Filter by VRF + - `within` - Find prefixes within a parent + - `contains` - Find prefixes containing an address + +- `ipam_list_vrfs` - List VRFs for context +- `ipam_get_ip_address` - Get detailed IP info including assigned device/interface +- `ipam_get_prefix` - Get detailed prefix info + +### Workflow + +1. **Data Collection** + - Fetch all IP addresses (or filtered set) + - Fetch all prefixes (or filtered set) + - Fetch VRFs for context + +2. **Duplicate Detection** + - Build address map: `{address+vrf: [records]}` + - Filter for entries with >1 record + +3. **Overlap Detection** + - For each VRF, compare prefixes pairwise + - Check using CIDR math: does prefix A contain prefix B or vice versa? + - Ignore legitimate hierarchies (status=container) + +4. **Orphan IP Detection** + - For each IP, find containing prefix + - Flag IPs with no prefix match + +5. **Generate Report** + +### Report Format + +```markdown +## IP Conflict Detection Report + +**Generated:** [timestamp] +**Scope:** [scope parameter] + +### Summary + +| Check | Status | Count | +|-------|--------|-------| +| Duplicate IPs | [PASS/FAIL] | X | +| Overlapping Prefixes | [PASS/FAIL] | Y | +| Orphan IPs | [PASS/FAIL] | Z | +| Total Issues | - | N | + +### Critical Issues + +#### Duplicate IP Addresses + +| Address | VRF | Count | Assigned To | +|---------|-----|-------|-------------| +| 10.0.1.50/24 | Global | 2 | server-01 (eth0), server-02 (eth0) | +| 192.168.1.100/24 | Global | 2 | router-01 (gi0/1), switch-01 (vlan10) | + +**Impact:** IP conflicts cause network connectivity issues. Devices will have intermittent connectivity. + +**Resolution:** +- Determine which device should have the IP +- Update or remove the duplicate assignment +- Consider IP reservation to prevent future conflicts + +#### Overlapping Prefixes + +| Prefix 1 | Prefix 2 | VRF | Type | +|----------|----------|-----|------| +| 10.0.0.0/24 | 10.0.0.0/25 | Global | Unstructured overlap | +| 192.168.0.0/16 | 192.168.1.0/24 | Production | Missing container flag | + +**Impact:** Overlapping prefixes can cause routing ambiguity and IP management confusion. + +**Resolution:** +- For legitimate hierarchies: Mark parent prefix as status="container" +- For accidental overlaps: Consolidate or re-address one prefix + +### Warnings + +#### IPs Without Prefix + +| Address | VRF | Assigned To | Nearest Prefix | +|---------|-----|-------------|----------------| +| 172.16.5.10/24 | Global | server-03 (eth0) | None found | + +**Impact:** IPs without a prefix bypass IPAM allocation controls. + +**Resolution:** +- Create appropriate prefix to contain the IP +- Or update IP to correct address within existing prefix + +### Informational + +#### Same Prefix in Multiple VRFs + +| Prefix | VRFs | Purpose | +|--------|------|---------| +| 10.0.0.0/24 | Global, DMZ, Internal | [Check if intentional] | + +### Statistics + +| Metric | Value | +|--------|-------| +| Total IP Addresses | X | +| Total Prefixes | Y | +| Total VRFs | Z | +| Utilization (IPs/Prefix space) | W% | + +### Remediation Commands + +``` +# Remove duplicate IP (keep server-01's assignment) +ipam_delete_ip_address id=123 + +# Mark prefix as container +ipam_update_prefix id=456 status=container + +# Create missing prefix for orphan IP +ipam_create_prefix prefix=172.16.5.0/24 status=active +``` +``` + +### CIDR Math Reference + +For overlap detection, use these rules: +- Prefix A **contains** Prefix B if: A.network <= B.network AND A.broadcast >= B.broadcast +- Two prefixes **overlap** if: A.network <= B.broadcast AND B.network <= A.broadcast + +**Example:** +- 10.0.0.0/8 contains 10.0.1.0/24 (legitimate hierarchy) +- 10.0.0.0/24 and 10.0.0.128/25 overlap (10.0.0.128/25 is within 10.0.0.0/24) + +### Severity Levels + +| Issue | Severity | Description | +|-------|----------|-------------| +| Duplicate IP (same interface type) | CRITICAL | Active conflict, causes outages | +| Duplicate IP (different roles) | HIGH | Potential conflict | +| Overlapping prefixes (same status) | HIGH | IPAM management issue | +| Overlapping prefixes (container ok) | LOW | May need status update | +| Orphan IP | MEDIUM | Bypasses IPAM controls | + +## Examples + +- `/ip-conflicts` - Full scan for all conflicts +- `/ip-conflicts addresses` - Check only for duplicate IPs +- `/ip-conflicts prefixes` - Check only for overlapping prefixes +- `/ip-conflicts vrf Production` - Scan only Production VRF +- `/ip-conflicts prefix 10.0.0.0/8` - Scan within specific prefix range + +## User Request + +$ARGUMENTS diff --git a/plugins/contract-validator/README.md b/plugins/contract-validator/README.md index a5abf18..0fbea41 100644 --- a/plugins/contract-validator/README.md +++ b/plugins/contract-validator/README.md @@ -19,6 +19,7 @@ Contract-validator solves these by parsing plugin interfaces and validating comp - **Agent Extraction**: Parse CLAUDE.md Four-Agent Model tables and Agents sections - **Compatibility Checks**: Pairwise validation between all plugins in a marketplace - **Data Flow Validation**: Verify agent tool sequences have valid data producers/consumers +- **Dependency Visualization**: Generate Mermaid flowcharts showing plugin relationships - **Comprehensive Reports**: Markdown or JSON reports with actionable suggestions ## Installation @@ -44,6 +45,7 @@ pip install -r requirements.txt | `/validate-contracts` | Full marketplace compatibility validation | | `/check-agent` | Validate single agent definition | | `/list-interfaces` | Show all plugin interfaces | +| `/dependency-graph` | Generate Mermaid flowchart of plugin dependencies | ## Agents @@ -106,6 +108,16 @@ pip install -r requirements.txt # Data Flow: No issues detected ``` +``` +/dependency-graph + +# Output: Mermaid flowchart showing: +# - Plugins grouped by shared MCP servers +# - Data flow from data-platform to viz-platform +# - Required vs optional dependencies +# - Command counts per plugin +``` + ## Issue Types | Type | Severity | Description | diff --git a/plugins/contract-validator/commands/dependency-graph.md b/plugins/contract-validator/commands/dependency-graph.md new file mode 100644 index 0000000..01c1d0b --- /dev/null +++ b/plugins/contract-validator/commands/dependency-graph.md @@ -0,0 +1,251 @@ +# /dependency-graph - Generate Dependency Visualization + +Generate a Mermaid flowchart showing plugin dependencies, data flows, and tool relationships. + +## Usage + +``` +/dependency-graph [marketplace_path] [--format ] [--show-tools] +``` + +## Parameters + +- `marketplace_path` (optional): Path to marketplace root. Defaults to current project root. +- `--format` (optional): Output format - `mermaid` (default) or `text` +- `--show-tools` (optional): Include individual tool nodes in the graph + +## Workflow + +1. **Discover plugins**: + - Scan plugins directory for all plugins with `.claude-plugin/` marker + - Parse each plugin's README.md to extract interface + - Parse CLAUDE.md for agent definitions and tool sequences + +2. **Analyze dependencies**: + - Identify shared MCP servers (plugins using same server) + - Detect tool dependencies (which plugins produce vs consume data) + - Find agent tool references across plugins + - Categorize as required (ERROR if missing) or optional (WARNING if missing) + +3. **Build dependency graph**: + - Create nodes for each plugin + - Create edges for: + - Shared MCP servers (bidirectional) + - Data producers -> consumers (directional) + - Agent tool dependencies (directional) + - Mark edges as optional or required + +4. **Generate Mermaid output**: + - Create flowchart diagram syntax + - Style required dependencies with solid lines + - Style optional dependencies with dashed lines + - Group by MCP server or data flow + +## Output Format + +### Mermaid (default) + +```mermaid +flowchart TD + subgraph mcp_gitea["MCP: gitea"] + projman["projman"] + pr-review["pr-review"] + end + + subgraph mcp_data["MCP: data-platform"] + data-platform["data-platform"] + end + + subgraph mcp_viz["MCP: viz-platform"] + viz-platform["viz-platform"] + end + + %% Data flow dependencies + data-platform -->|"data_ref (required)"| viz-platform + + %% Optional dependencies + projman -.->|"lessons (optional)"| pr-review + + %% Styling + classDef required stroke:#e74c3c,stroke-width:2px + classDef optional stroke:#f39c12,stroke-dasharray:5 5 +``` + +### Text Format + +``` +DEPENDENCY GRAPH +================ + +Plugins: 12 +MCP Servers: 4 +Dependencies: 8 (5 required, 3 optional) + +MCP Server Groups: + gitea: projman, pr-review + data-platform: data-platform + viz-platform: viz-platform + netbox: cmdb-assistant + +Data Flow Dependencies: + data-platform -> viz-platform (data_ref) [REQUIRED] + data-platform -> data-platform (data_ref) [INTERNAL] + +Cross-Plugin Tool Usage: + projman.Planner uses: create_issue, search_lessons + pr-review.reviewer uses: get_pr_diff, create_pr_review +``` + +## Dependency Types + +| Type | Line Style | Meaning | +|------|------------|---------| +| Required | Solid (`-->`) | Plugin cannot function without this dependency | +| Optional | Dashed (`-.->`) | Plugin works but with reduced functionality | +| Internal | Dotted (`...>`) | Self-dependency within same plugin | +| Shared MCP | Double (`==>`) | Plugins share same MCP server instance | + +## Known Data Flow Patterns + +The command recognizes these producer/consumer relationships: + +### Data Producers +- `read_csv`, `read_parquet`, `read_json` - File loaders +- `pg_query`, `pg_execute` - Database queries +- `filter`, `select`, `groupby`, `join` - Transformations + +### Data Consumers +- `describe`, `head`, `tail` - Data inspection +- `to_csv`, `to_parquet` - File writers +- `chart_create` - Visualization + +### Cross-Plugin Flows +- `data-platform` produces `data_ref` -> `viz-platform` consumes for charts +- `projman` produces issues -> `pr-review` references in reviews +- `gitea` wiki -> `projman` lessons learned + +## Examples + +### Basic Usage + +``` +/dependency-graph +``` + +Generates Mermaid diagram for current marketplace. + +### With Tool Details + +``` +/dependency-graph --show-tools +``` + +Includes individual tool nodes showing which tools each plugin provides. + +### Text Summary + +``` +/dependency-graph --format text +``` + +Outputs text-based summary suitable for CLAUDE.md inclusion. + +### Specific Path + +``` +/dependency-graph ~/claude-plugins-work +``` + +Analyze marketplace at specified path. + +## Integration with Other Commands + +Use with `/validate-contracts` to: +1. Run `/dependency-graph` to visualize relationships +2. Run `/validate-contracts` to find issues in those relationships +3. Fix issues and regenerate graph to verify + +## Available Tools + +Use these MCP tools: +- `parse_plugin_interface` - Extract interface from plugin README.md +- `parse_claude_md_agents` - Extract agents and their tool sequences +- `generate_compatibility_report` - Get full interface data (JSON format for analysis) +- `validate_data_flow` - Verify data producer/consumer relationships + +## Implementation Notes + +### Detecting Shared MCP Servers + +Check each plugin's `.mcp.json` file for server definitions: + +```bash +# List all .mcp.json files in plugins +find plugins/ -name ".mcp.json" -exec cat {} \; +``` + +Plugins with identical MCP server names share that server. + +### Identifying Data Flows + +1. Parse tool categories from README.md +2. Map known producer tools to their output types +3. Map known consumer tools to their input requirements +4. Create edges where outputs match inputs + +### Optional vs Required + +- **Required**: Consumer tool has no default/fallback behavior +- **Optional**: Consumer works without producer (e.g., lessons search returns empty) + +Determination is based on: +- Issue severity from `validate_data_flow` (ERROR = required, WARNING = optional) +- Tool documentation stating "requires" vs "uses if available" + +## Sample Output + +For the leo-claude-mktplace: + +```mermaid +flowchart TD + subgraph gitea_mcp["Shared MCP: gitea"] + projman["projman
14 commands"] + pr-review["pr-review
6 commands"] + end + + subgraph netbox_mcp["Shared MCP: netbox"] + cmdb-assistant["cmdb-assistant
3 commands"] + end + + subgraph data_mcp["Shared MCP: data-platform"] + data-platform["data-platform
7 commands"] + end + + subgraph viz_mcp["Shared MCP: viz-platform"] + viz-platform["viz-platform
7 commands"] + end + + subgraph standalone["Standalone Plugins"] + doc-guardian["doc-guardian"] + code-sentinel["code-sentinel"] + clarity-assist["clarity-assist"] + git-flow["git-flow"] + claude-config-maintainer["claude-config-maintainer"] + contract-validator["contract-validator"] + end + + %% Data flow: data-platform -> viz-platform + data-platform -->|"data_ref"| viz-platform + + %% Cross-plugin: projman lessons -> pr-review context + projman -.->|"lessons"| pr-review + + %% Styling + classDef mcpGroup fill:#e8f4fd,stroke:#2196f3 + classDef standalone fill:#f5f5f5,stroke:#9e9e9e + classDef required stroke:#e74c3c,stroke-width:2px + classDef optional stroke:#f39c12,stroke-dasharray:5 5 + + class gitea_mcp,netbox_mcp,data_mcp,viz_mcp mcpGroup + class standalone standalone +``` diff --git a/plugins/data-platform/README.md b/plugins/data-platform/README.md index 43fe1fd..846c49d 100644 --- a/plugins/data-platform/README.md +++ b/plugins/data-platform/README.md @@ -49,10 +49,13 @@ DBT_PROFILES_DIR=~/.dbt | `/initial-setup` | Interactive setup wizard for PostgreSQL and dbt configuration | | `/ingest` | Load data from files or database | | `/profile` | Generate data profile and statistics | +| `/data-quality` | Data quality assessment with pass/warn/fail scoring | | `/schema` | Show database/DataFrame schema | | `/explain` | Explain dbt model lineage | -| `/lineage` | Visualize data dependencies | +| `/lineage` | Visualize data dependencies (ASCII) | +| `/lineage-viz` | Generate Mermaid flowchart for dbt lineage | | `/run` | Execute dbt models | +| `/dbt-test` | Run dbt tests with formatted results | ## Agents diff --git a/plugins/data-platform/commands/data-quality.md b/plugins/data-platform/commands/data-quality.md new file mode 100644 index 0000000..9d6cf3b --- /dev/null +++ b/plugins/data-platform/commands/data-quality.md @@ -0,0 +1,103 @@ +# /data-quality - Data Quality Assessment + +Comprehensive data quality check for DataFrames with pass/warn/fail scoring. + +## Usage + +``` +/data-quality [--strict] +``` + +## Workflow + +1. **Get data reference**: + - If no data_ref provided, use `list_data` to show available options + - Validate the data_ref exists + +2. **Null analysis**: + - Calculate null percentage per column + - **PASS**: < 5% nulls + - **WARN**: 5-20% nulls + - **FAIL**: > 20% nulls + +3. **Duplicate detection**: + - Check for fully duplicated rows + - **PASS**: 0% duplicates + - **WARN**: < 1% duplicates + - **FAIL**: >= 1% duplicates + +4. **Type consistency**: + - Identify mixed-type columns (object columns with mixed content) + - Flag columns that could be numeric but contain strings + - **PASS**: All columns have consistent types + - **FAIL**: Mixed types detected + +5. **Outlier detection** (numeric columns): + - Use IQR method (values beyond 1.5 * IQR) + - Report percentage of outliers per column + - **PASS**: < 1% outliers + - **WARN**: 1-5% outliers + - **FAIL**: > 5% outliers + +6. **Generate quality report**: + - Overall quality score (0-100) + - Per-column breakdown + - Recommendations for remediation + +## Report Format + +``` +=== Data Quality Report === +Dataset: sales_data +Rows: 10,000 | Columns: 15 +Overall Score: 82/100 [PASS] + +--- Column Analysis --- +| Column | Nulls | Dups | Type | Outliers | Status | +|--------------|-------|------|----------|----------|--------| +| customer_id | 0.0% | - | int64 | 0.2% | PASS | +| email | 2.3% | - | object | - | PASS | +| amount | 15.2% | - | float64 | 3.1% | WARN | +| created_at | 0.0% | - | datetime | - | PASS | + +--- Issues Found --- +[WARN] Column 'amount': 15.2% null values (threshold: 5%) +[WARN] Column 'amount': 3.1% outliers detected +[FAIL] 1.2% duplicate rows detected (12 rows) + +--- Recommendations --- +1. Investigate null values in 'amount' column +2. Review outliers in 'amount' - may be data entry errors +3. Remove or deduplicate 12 duplicate rows +``` + +## Options + +| Flag | Description | +|------|-------------| +| `--strict` | Use stricter thresholds (WARN at 1% nulls, FAIL at 5%) | + +## Examples + +``` +/data-quality sales_data +/data-quality df_customers --strict +``` + +## Scoring + +| Component | Weight | Scoring | +|-----------|--------|---------| +| Nulls | 30% | 100 - (avg_null_pct * 2) | +| Duplicates | 20% | 100 - (dup_pct * 50) | +| Type consistency | 25% | 100 if clean, 0 if mixed | +| Outliers | 25% | 100 - (avg_outlier_pct * 10) | + +Final score: Weighted average, capped at 0-100 + +## Available Tools + +Use these MCP tools: +- `describe` - Get statistical summary (for outlier detection) +- `head` - Preview data +- `list_data` - List available DataFrames diff --git a/plugins/data-platform/commands/dbt-test.md b/plugins/data-platform/commands/dbt-test.md new file mode 100644 index 0000000..31f889a --- /dev/null +++ b/plugins/data-platform/commands/dbt-test.md @@ -0,0 +1,119 @@ +# /dbt-test - Run dbt Tests + +Execute dbt tests with formatted pass/fail results. + +## Usage + +``` +/dbt-test [selection] [--warn-only] +``` + +## Workflow + +1. **Pre-validation** (MANDATORY): + - Use `dbt_parse` to validate project first + - If validation fails, show errors and STOP + +2. **Execute tests**: + - Use `dbt_test` with provided selection + - Capture all test results + +3. **Format results**: + - Group by test type (schema vs. data) + - Show pass/fail status with counts + - Display failure details + +## Report Format + +``` +=== dbt Test Results === +Project: my_project +Selection: tag:critical + +--- Summary --- +Total: 24 tests +PASS: 22 (92%) +FAIL: 1 (4%) +WARN: 1 (4%) +SKIP: 0 (0%) + +--- Schema Tests (18) --- +[PASS] unique_dim_customers_customer_id +[PASS] not_null_dim_customers_customer_id +[PASS] not_null_dim_customers_email +[PASS] accepted_values_dim_customers_status +[FAIL] relationships_fct_orders_customer_id + +--- Data Tests (6) --- +[PASS] assert_positive_order_amounts +[PASS] assert_valid_dates +[WARN] assert_recent_orders (threshold: 7 days) + +--- Failure Details --- +Test: relationships_fct_orders_customer_id +Type: schema (relationships) +Model: fct_orders +Message: 15 records failed referential integrity check +Query: SELECT * FROM fct_orders WHERE customer_id NOT IN (SELECT customer_id FROM dim_customers) + +--- Warning Details --- +Test: assert_recent_orders +Type: data +Message: No orders in last 7 days (expected for dev environment) +Severity: warn +``` + +## Selection Syntax + +| Pattern | Meaning | +|---------|---------| +| (none) | Run all tests | +| `model_name` | Tests for specific model | +| `+model_name` | Tests for model and upstream | +| `tag:critical` | Tests with tag | +| `test_type:schema` | Only schema tests | +| `test_type:data` | Only data tests | + +## Options + +| Flag | Description | +|------|-------------| +| `--warn-only` | Treat failures as warnings (don't fail CI) | + +## Examples + +``` +/dbt-test # Run all tests +/dbt-test dim_customers # Tests for specific model +/dbt-test tag:critical # Run critical tests only +/dbt-test +fct_orders # Test model and its upstream +``` + +## Test Types + +### Schema Tests +Built-in tests defined in `schema.yml`: +- `unique` - No duplicate values +- `not_null` - No null values +- `accepted_values` - Value in allowed list +- `relationships` - Foreign key integrity + +### Data Tests +Custom SQL tests in `tests/` directory: +- Return rows that fail the assertion +- Zero rows = pass, any rows = fail + +## Exit Codes + +| Code | Meaning | +|------|---------| +| 0 | All tests passed | +| 1 | One or more tests failed | +| 2 | dbt error (parse failure, etc.) | + +## Available Tools + +Use these MCP tools: +- `dbt_parse` - Pre-validation (ALWAYS RUN FIRST) +- `dbt_test` - Execute tests (REQUIRED) +- `dbt_build` - Alternative: run + test together diff --git a/plugins/data-platform/commands/lineage-viz.md b/plugins/data-platform/commands/lineage-viz.md new file mode 100644 index 0000000..949e711 --- /dev/null +++ b/plugins/data-platform/commands/lineage-viz.md @@ -0,0 +1,125 @@ +# /lineage-viz - Mermaid Lineage Visualization + +Generate Mermaid flowchart syntax for dbt model lineage. + +## Usage + +``` +/lineage-viz [--direction TB|LR] [--depth N] +``` + +## Workflow + +1. **Get lineage data**: + - Use `dbt_lineage` to fetch model dependencies + - Capture upstream sources and downstream consumers + +2. **Build Mermaid graph**: + - Create nodes for each model/source + - Style nodes by materialization type + - Add directional arrows for dependencies + +3. **Output**: + - Render Mermaid flowchart syntax + - Include copy-paste ready code block + +## Output Format + +```mermaid +flowchart LR + subgraph Sources + raw_customers[(raw_customers)] + raw_orders[(raw_orders)] + end + + subgraph Staging + stg_customers[stg_customers] + stg_orders[stg_orders] + end + + subgraph Marts + dim_customers{{dim_customers}} + fct_orders{{fct_orders}} + end + + raw_customers --> stg_customers + raw_orders --> stg_orders + stg_customers --> dim_customers + stg_orders --> fct_orders + dim_customers --> fct_orders +``` + +## Node Styles + +| Materialization | Mermaid Shape | Example | +|-----------------|---------------|---------| +| source | Cylinder `[( )]` | `raw_data[(raw_data)]` | +| view | Rectangle `[ ]` | `stg_model[stg_model]` | +| table | Double braces `{{ }}` | `dim_model{{dim_model}}` | +| incremental | Hexagon `{{ }}` | `fct_model{{fct_model}}` | +| ephemeral | Dashed `[/ /]` | `tmp_model[/tmp_model/]` | + +## Options + +| Flag | Description | +|------|-------------| +| `--direction TB` | Top-to-bottom layout (default: LR = left-to-right) | +| `--depth N` | Limit lineage depth (default: unlimited) | + +## Examples + +``` +/lineage-viz dim_customers +/lineage-viz fct_orders --direction TB +/lineage-viz rpt_revenue --depth 2 +``` + +## Usage Tips + +1. **Paste in documentation**: Copy the output directly into README.md or docs +2. **GitHub/GitLab rendering**: Both platforms render Mermaid natively +3. **Mermaid Live Editor**: Paste at https://mermaid.live for interactive editing + +## Example Output + +For `/lineage-viz fct_orders`: + +~~~markdown +```mermaid +flowchart LR + %% Sources + raw_customers[(raw_customers)] + raw_orders[(raw_orders)] + raw_products[(raw_products)] + + %% Staging + stg_customers[stg_customers] + stg_orders[stg_orders] + stg_products[stg_products] + + %% Marts + dim_customers{{dim_customers}} + dim_products{{dim_products}} + fct_orders{{fct_orders}} + + %% Dependencies + raw_customers --> stg_customers + raw_orders --> stg_orders + raw_products --> stg_products + stg_customers --> dim_customers + stg_products --> dim_products + stg_orders --> fct_orders + dim_customers --> fct_orders + dim_products --> fct_orders + + %% Highlight target model + style fct_orders fill:#f96,stroke:#333,stroke-width:2px +``` +~~~ + +## Available Tools + +Use these MCP tools: +- `dbt_lineage` - Get model dependencies (REQUIRED) +- `dbt_ls` - List dbt resources +- `dbt_docs_generate` - Generate full manifest if needed diff --git a/plugins/doc-guardian/README.md b/plugins/doc-guardian/README.md index c5c1e91..e877c45 100644 --- a/plugins/doc-guardian/README.md +++ b/plugins/doc-guardian/README.md @@ -22,6 +22,9 @@ doc-guardian monitors your code changes via hooks: |---------|-------------| | `/doc-audit` | Full project scan - reports all drift without changing anything | | `/doc-sync` | Apply all pending documentation updates in one commit | +| `/changelog-gen` | Generate changelog from conventional commits in Keep-a-Changelog format | +| `/doc-coverage` | Calculate documentation coverage percentage for functions and classes | +| `/stale-docs` | Detect documentation files that are stale relative to their associated code | ## Hooks @@ -33,6 +36,8 @@ doc-guardian monitors your code changes via hooks: - **Version Drift**: Python 3.9 in docs but 3.11 in pyproject.toml - **Missing Docs**: Public functions without docstrings - **Stale Examples**: CLI examples that no longer work +- **Low Coverage**: Undocumented functions and classes +- **Stale Files**: Documentation that hasn't been updated alongside code changes ## Installation diff --git a/plugins/doc-guardian/claude-md-integration.md b/plugins/doc-guardian/claude-md-integration.md index 6a481c0..bffb2f8 100644 --- a/plugins/doc-guardian/claude-md-integration.md +++ b/plugins/doc-guardian/claude-md-integration.md @@ -11,6 +11,9 @@ This project uses doc-guardian for automatic documentation synchronization. - Pending updates are queued silently during work - Run `/doc-sync` to apply all pending documentation updates - Run `/doc-audit` for a full project documentation review +- Run `/changelog-gen` to generate changelog from conventional commits +- Run `/doc-coverage` to check documentation coverage metrics +- Run `/stale-docs` to find documentation that may be outdated ### Documentation Files Tracked - README.md (root and subdirectories) diff --git a/plugins/doc-guardian/commands/changelog-gen.md b/plugins/doc-guardian/commands/changelog-gen.md new file mode 100644 index 0000000..07ea113 --- /dev/null +++ b/plugins/doc-guardian/commands/changelog-gen.md @@ -0,0 +1,109 @@ +--- +description: Generate changelog from conventional commits in Keep-a-Changelog format +--- + +# Changelog Generation + +Generate a changelog entry from conventional commits. + +## Process + +1. **Identify Commit Range** + - Default: commits since last tag + - Optional: specify range (e.g., `v1.0.0..HEAD`) + - Detect if this is first release (no previous tags) + +2. **Parse Conventional Commits** + Extract from commit messages following the pattern: + ``` + (): + + [optional body] + + [optional footer(s)] + ``` + + **Recognized Types:** + | Type | Changelog Section | + |------|------------------| + | `feat` | Added | + | `fix` | Fixed | + | `docs` | Documentation | + | `perf` | Performance | + | `refactor` | Changed | + | `style` | Changed | + | `test` | Testing | + | `build` | Build | + | `ci` | CI/CD | + | `chore` | Maintenance | + | `BREAKING CHANGE` | Breaking Changes | + +3. **Group by Type** + Organize commits into Keep-a-Changelog sections: + - Breaking Changes (if any `!` suffix or `BREAKING CHANGE` footer) + - Added (feat) + - Changed (refactor, style, perf) + - Deprecated + - Removed + - Fixed (fix) + - Security + +4. **Format Entries** + For each commit: + - Extract scope (if present) as prefix + - Use description as entry text + - Link to commit hash if repository URL available + - Include PR/issue references from footer + +5. **Output Format** +```markdown +## [Unreleased] + +### Breaking Changes +- **scope**: Description of breaking change + +### Added +- **scope**: New feature description +- Another feature without scope + +### Changed +- **scope**: Refactoring description + +### Fixed +- **scope**: Bug fix description + +### Documentation +- Updated README with new examples +``` + +## Options + +| Flag | Description | Default | +|------|-------------|---------| +| `--from ` | Start from specific tag | Latest tag | +| `--to ` | End at specific ref | HEAD | +| `--version ` | Set version header | [Unreleased] | +| `--include-merge` | Include merge commits | false | +| `--group-by-scope` | Group by scope within sections | false | + +## Integration + +The generated output is designed to be copied directly into CHANGELOG.md: +- Follows [Keep a Changelog](https://keepachangelog.com) format +- Compatible with semantic versioning +- Excludes non-user-facing commits (chore, ci, test by default) + +## Example Usage + +``` +/changelog-gen +/changelog-gen --from v1.0.0 --version 1.1.0 +/changelog-gen --include-merge --group-by-scope +``` + +## Non-Conventional Commits + +Commits not following conventional format are: +- Listed under "Other" section +- Flagged for manual categorization +- Skipped if `--strict` flag is used diff --git a/plugins/doc-guardian/commands/doc-coverage.md b/plugins/doc-guardian/commands/doc-coverage.md new file mode 100644 index 0000000..500766e --- /dev/null +++ b/plugins/doc-guardian/commands/doc-coverage.md @@ -0,0 +1,128 @@ +--- +description: Calculate documentation coverage percentage for functions and classes +--- + +# Documentation Coverage + +Analyze codebase to calculate documentation coverage metrics. + +## Process + +1. **Scan Source Files** + Identify all documentable items: + + **Python:** + - Functions (def) + - Classes + - Methods + - Module-level docstrings + + **JavaScript/TypeScript:** + - Functions (function, arrow functions) + - Classes + - Methods + - JSDoc comments + + **Other Languages:** + - Adapt patterns for Go, Rust, etc. + +2. **Determine Documentation Status** + For each item, check: + - Has docstring/JSDoc comment + - Docstring is non-empty and meaningful (not just `pass` or `TODO`) + - Parameters are documented (for detailed mode) + - Return type is documented (for detailed mode) + +3. **Calculate Metrics** + ``` + Coverage = (Documented Items / Total Items) * 100 + ``` + + **Levels:** + - Basic: Item has any docstring + - Standard: Docstring describes purpose + - Complete: All parameters and return documented + +4. **Output Format** +``` +## Documentation Coverage Report + +### Summary +- Total documentable items: 156 +- Documented: 142 +- Coverage: 91.0% + +### By Type +| Type | Total | Documented | Coverage | +|------|-------|------------|----------| +| Functions | 89 | 85 | 95.5% | +| Classes | 23 | 21 | 91.3% | +| Methods | 44 | 36 | 81.8% | + +### By Directory +| Path | Total | Documented | Coverage | +|------|-------|------------|----------| +| src/api/ | 34 | 32 | 94.1% | +| src/utils/ | 28 | 28 | 100.0% | +| src/models/ | 45 | 38 | 84.4% | +| tests/ | 49 | 44 | 89.8% | + +### Undocumented Items +- [ ] src/api/handlers.py:45 `create_order()` +- [ ] src/api/handlers.py:78 `update_order()` +- [ ] src/models/user.py:23 `UserModel.validate()` +``` + +## Options + +| Flag | Description | Default | +|------|-------------|---------| +| `--path ` | Scan specific directory | Project root | +| `--exclude ` | Exclude files matching pattern | `**/test_*,**/*_test.*` | +| `--include-private` | Include private members (_prefixed) | false | +| `--include-tests` | Include test files | false | +| `--min-coverage ` | Fail if below threshold | none | +| `--format ` | Output format (table, json, markdown) | table | +| `--detailed` | Check parameter/return docs | false | + +## Thresholds + +Common coverage targets: +| Level | Coverage | Description | +|-------|----------|-------------| +| Minimal | 60% | Basic documentation exists | +| Good | 80% | Most public APIs documented | +| Excellent | 95% | Comprehensive documentation | + +## CI Integration + +Use `--min-coverage` to enforce standards: +```bash +# Fail if coverage drops below 80% +claude /doc-coverage --min-coverage 80 +``` + +Exit codes: +- 0: Coverage meets threshold (or no threshold set) +- 1: Coverage below threshold + +## Example Usage + +``` +/doc-coverage +/doc-coverage --path src/ +/doc-coverage --min-coverage 85 --exclude "**/generated/**" +/doc-coverage --detailed --include-private +``` + +## Language Detection + +File extensions mapped to documentation patterns: +| Extension | Language | Doc Format | +|-----------|----------|------------| +| .py | Python | Docstrings (""") | +| .js, .ts | JavaScript/TypeScript | JSDoc (/** */) | +| .go | Go | // comments above | +| .rs | Rust | /// doc comments | +| .rb | Ruby | # comments, YARD | +| .java | Java | Javadoc (/** */) | diff --git a/plugins/doc-guardian/commands/stale-docs.md b/plugins/doc-guardian/commands/stale-docs.md new file mode 100644 index 0000000..fff8b8d --- /dev/null +++ b/plugins/doc-guardian/commands/stale-docs.md @@ -0,0 +1,143 @@ +--- +description: Detect documentation files that are stale relative to their associated code +--- + +# Stale Documentation Detection + +Identify documentation files that may be outdated based on commit history. + +## Process + +1. **Map Documentation to Code** + Build relationships between docs and code: + + | Doc File | Related Code | + |----------|--------------| + | README.md | All files in same directory | + | API.md | src/api/**/* | + | CLAUDE.md | Configuration files, scripts | + | docs/module.md | src/module/**/* | + | Component.md | Component.tsx, Component.css | + +2. **Analyze Commit History** + For each doc file: + - Find last commit that modified the doc + - Find last commit that modified related code + - Count commits to code since doc was updated + +3. **Calculate Staleness** + ``` + Commits Behind = Code Commits Since Doc Update + Days Behind = Days Since Doc Update - Days Since Code Update + ``` + +4. **Apply Threshold** + Default: Flag if documentation is 10+ commits behind related code + + **Staleness Levels:** + | Commits Behind | Level | Action | + |----------------|-------|--------| + | 0-5 | Fresh | No action needed | + | 6-10 | Aging | Review recommended | + | 11-20 | Stale | Update needed | + | 20+ | Critical | Immediate attention | + +5. **Output Format** +``` +## Stale Documentation Report + +### Critical (20+ commits behind) +| File | Last Updated | Commits Behind | Related Code | +|------|--------------|----------------|--------------| +| docs/api.md | 2024-01-15 | 34 | src/api/**/* | + +### Stale (11-20 commits behind) +| File | Last Updated | Commits Behind | Related Code | +|------|--------------|----------------|--------------| +| README.md | 2024-02-20 | 15 | package.json, src/index.ts | + +### Aging (6-10 commits behind) +| File | Last Updated | Commits Behind | Related Code | +|------|--------------|----------------|--------------| +| CONTRIBUTING.md | 2024-03-01 | 8 | .github/*, scripts/* | + +### Summary +- Critical: 1 file +- Stale: 1 file +- Aging: 1 file +- Fresh: 12 files +- Total documentation files: 15 +``` + +## Options + +| Flag | Description | Default | +|------|-------------|---------| +| `--threshold ` | Commits behind to flag as stale | 10 | +| `--days` | Use days instead of commits | false | +| `--path ` | Scan specific directory | Project root | +| `--doc-pattern ` | Pattern for doc files | `**/*.md,**/README*` | +| `--ignore ` | Ignore specific docs | `CHANGELOG.md,LICENSE` | +| `--show-fresh` | Include fresh docs in output | false | +| `--format ` | Output format (table, json) | table | + +## Relationship Detection + +How docs are mapped to code: + +1. **Same Directory** + - `src/api/README.md` relates to `src/api/**/*` + +2. **Name Matching** + - `docs/auth.md` relates to `**/auth.*`, `**/auth/**` + +3. **Explicit Links** + - Parse `[link](path)` in docs to find related files + +4. **Import Analysis** + - Track which modules are referenced in code examples + +## Configuration + +Create `.doc-guardian.yml` to customize mappings: +```yaml +stale-docs: + threshold: 10 + mappings: + - doc: docs/deployment.md + code: + - Dockerfile + - docker-compose.yml + - .github/workflows/deploy.yml + - doc: ARCHITECTURE.md + code: + - src/**/* + ignore: + - CHANGELOG.md + - LICENSE + - vendor/** +``` + +## Example Usage + +``` +/stale-docs +/stale-docs --threshold 5 +/stale-docs --days --threshold 30 +/stale-docs --path docs/ --show-fresh +``` + +## Integration with doc-audit + +`/stale-docs` focuses specifically on commit-based staleness, while `/doc-audit` checks content accuracy. Use both for comprehensive documentation health: + +``` +/doc-audit # Check for broken references and content drift +/stale-docs # Check for files that may need review +``` + +## Exit Codes + +- 0: No critical or stale documentation +- 1: Stale documentation found (useful for CI) +- 2: Critical documentation found diff --git a/plugins/pr-review/README.md b/plugins/pr-review/README.md index d882dfc..d1666f3 100644 --- a/plugins/pr-review/README.md +++ b/plugins/pr-review/README.md @@ -13,6 +13,7 @@ pr-review conducts comprehensive code reviews using specialized agents for secur | `/pr-review ` | Full multi-agent review | | `/pr-summary ` | Quick summary without full review | | `/pr-findings ` | Filter findings by category/confidence | +| `/pr-diff ` | View diff with inline comment annotations | | `/initial-setup` | Full interactive setup wizard | | `/project-init` | Quick project setup (system already configured) | | `/project-sync` | Sync configuration with current git remote | @@ -51,14 +52,38 @@ Requires Gitea MCP server configuration. ## Configuration +Environment variables can be set in your project's `.env` file or shell environment. + +| Variable | Default | Description | +|----------|---------|-------------| +| `PR_REVIEW_CONFIDENCE_THRESHOLD` | `0.7` | Minimum confidence score (0.0-1.0) for reporting findings. Findings below this threshold are filtered out to reduce noise. | +| `PR_REVIEW_AUTO_SUBMIT` | `false` | Automatically submit review to Gitea without confirmation prompt | + +### Example Configuration + ```bash -# Minimum confidence to report (default: 0.5) -PR_REVIEW_CONFIDENCE_THRESHOLD=0.5 +# Project .env file + +# Only show high-confidence findings (MEDIUM and HIGH) +PR_REVIEW_CONFIDENCE_THRESHOLD=0.7 # Auto-submit review to Gitea (default: false) PR_REVIEW_AUTO_SUBMIT=false ``` +### Confidence Threshold Details + +The confidence threshold filters which findings appear in review output: + +| Threshold | Effect | +|-----------|--------| +| `0.9` | Only definite issues (HIGH confidence) | +| `0.7` | Likely issues and above (MEDIUM+HIGH) - **recommended** | +| `0.5` | Include possible concerns (LOW+MEDIUM+HIGH) | +| `0.3` | Include speculative findings | + +Lower thresholds show more findings but may include false positives. Higher thresholds reduce noise but may miss some valid concerns. + ## Usage Examples ### Full Review diff --git a/plugins/pr-review/agents/coordinator.md b/plugins/pr-review/agents/coordinator.md index 718484d..3b3f9e7 100644 --- a/plugins/pr-review/agents/coordinator.md +++ b/plugins/pr-review/agents/coordinator.md @@ -120,10 +120,13 @@ Focus on findings that: ### Respect Confidence Thresholds -Never report findings below 0.5 confidence. Be transparent about uncertainty: -- 0.9+ → "This is definitely an issue" -- 0.7-0.89 → "This is likely an issue" -- 0.5-0.69 → "This might be an issue" +Filter findings based on `PR_REVIEW_CONFIDENCE_THRESHOLD` (default: 0.7). Be transparent about uncertainty: +- 0.9+ → "This is definitely an issue" (HIGH) +- 0.7-0.89 → "This is likely an issue" (MEDIUM) +- 0.5-0.69 → "This might be an issue" (LOW) +- < threshold → Filtered from output + +With the default threshold of 0.7, only MEDIUM and HIGH confidence findings are reported. ### Avoid Noise diff --git a/plugins/pr-review/claude-md-integration.md b/plugins/pr-review/claude-md-integration.md index 87e235c..f9f45a4 100644 --- a/plugins/pr-review/claude-md-integration.md +++ b/plugins/pr-review/claude-md-integration.md @@ -15,6 +15,7 @@ This project uses the pr-review plugin for automated code review. | `/pr-review ` | Full multi-agent review | | `/pr-summary ` | Quick change summary | | `/pr-findings ` | Filter review findings | +| `/pr-diff ` | View diff with inline comments | ### Review Categories @@ -26,11 +27,16 @@ Reviews analyze: ### Confidence Threshold -Findings below 0.5 confidence are suppressed. +Configure via `PR_REVIEW_CONFIDENCE_THRESHOLD` (default: 0.7). -- HIGH (0.9+): Definite issue -- MEDIUM (0.7-0.89): Likely issue -- LOW (0.5-0.69): Possible concern +| Range | Label | Action | +|-------|-------|--------| +| 0.9 - 1.0 | HIGH | Must address | +| 0.7 - 0.89 | MEDIUM | Should address | +| 0.5 - 0.69 | LOW | Consider addressing | +| < threshold | (filtered) | Not reported | + +With default threshold of 0.7, only MEDIUM and HIGH findings are shown. ### Verdict Rules diff --git a/plugins/pr-review/commands/pr-diff.md b/plugins/pr-review/commands/pr-diff.md new file mode 100644 index 0000000..346b02e --- /dev/null +++ b/plugins/pr-review/commands/pr-diff.md @@ -0,0 +1,154 @@ +# /pr-diff - Annotated PR Diff Viewer + +## Purpose + +Display the PR diff with inline annotations from review comments, making it easy to see what feedback has been given alongside the code changes. + +## Usage + +``` +/pr-diff [--repo owner/repo] [--context ] +``` + +### Options + +``` +--repo Override repository (default: from .env) +--context Lines of context around changes (default: 3) +--no-comments Show diff without comment annotations +--file Filter to specific files (glob pattern) +``` + +## Behavior + +### Step 1: Fetch PR Data + +Using Gitea MCP tools: +1. `get_pr_diff` - Unified diff of all changes +2. `get_pr_comments` - All review comments on the PR + +### Step 2: Parse and Annotate + +Parse the diff and overlay comments at their respective file/line positions: + +``` +═══════════════════════════════════════════════════ +PR #123 Diff - Add user authentication +═══════════════════════════════════════════════════ + +Branch: feat/user-auth → development +Files: 12 changed (+234 / -45) + +─────────────────────────────────────────────────── +src/api/users.ts (+85 / -12) +─────────────────────────────────────────────────── + +@@ -42,6 +42,15 @@ export async function getUser(id: string) { + 42 │ const db = getDatabase(); + 43 │ + 44 │- const user = db.query("SELECT * FROM users WHERE id = " + id); + │ ┌───────────────────────────────────────────────────────────── + │ │ COMMENT by @reviewer (2h ago): + │ │ This is a SQL injection vulnerability. Use parameterized + │ │ queries instead: `db.query("SELECT * FROM users WHERE id = ?", [id])` + │ └───────────────────────────────────────────────────────────── + 45 │+ const query = "SELECT * FROM users WHERE id = ?"; + 46 │+ const user = db.query(query, [id]); + 47 │ + 48 │ if (!user) { + 49 │ throw new NotFoundError("User not found"); + 50 │ } + +@@ -78,3 +87,12 @@ export async function updateUser(id: string, data: UserInput) { + 87 │+ // Validate input before update + 88 │+ validateUserInput(data); + 89 │+ + 90 │+ const result = db.query( + 91 │+ "UPDATE users SET name = ?, email = ? WHERE id = ?", + 92 │+ [data.name, data.email, id] + 93 │+ ); + │ ┌───────────────────────────────────────────────────────────── + │ │ COMMENT by @maintainer (1h ago): + │ │ Good use of parameterized query here! + │ │ + │ │ REPLY by @author (30m ago): + │ │ Thanks! Applied the same pattern throughout. + │ └───────────────────────────────────────────────────────────── + +─────────────────────────────────────────────────── +src/components/LoginForm.tsx (+65 / -0) [NEW FILE] +─────────────────────────────────────────────────── + +@@ -0,0 +1,65 @@ + 1 │+import React, { useState } from 'react'; + 2 │+import { useAuth } from '../context/AuthContext'; + 3 │+ + 4 │+export function LoginForm() { + 5 │+ const [email, setEmail] = useState(''); + 6 │+ const [password, setPassword] = useState(''); + 7 │+ const { login } = useAuth(); + +... (remaining diff content) + +═══════════════════════════════════════════════════ +Comment Summary: 5 comments, 2 resolved +═══════════════════════════════════════════════════ +``` + +### Step 3: Filter by Confidence (Optional) + +If `PR_REVIEW_CONFIDENCE_THRESHOLD` is set, also annotate with high-confidence findings from previous reviews: + +``` + 44 │- const user = db.query("SELECT * FROM users WHERE id = " + id); + │ ┌─── REVIEW FINDING (0.95 HIGH) ───────────────────────────── + │ │ [SEC-001] SQL Injection Vulnerability + │ │ Use parameterized queries to prevent injection attacks. + │ └───────────────────────────────────────────────────────────── + │ ┌─── COMMENT by @reviewer ──────────────────────────────────── + │ │ This is a SQL injection vulnerability... + │ └───────────────────────────────────────────────────────────── +``` + +## Output Formats + +### Default (Annotated Diff) + +Full diff with inline comments as shown above. + +### Plain (--no-comments) + +``` +/pr-diff 123 --no-comments + +# Standard unified diff output without annotations +``` + +### File Filter (--file) + +``` +/pr-diff 123 --file "src/api/*" + +# Shows diff only for files matching pattern +``` + +## Use Cases + +- **Review preparation**: See the full context of changes with existing feedback +- **Followup work**: Understand what was commented on and where +- **Discussion context**: View threaded conversations alongside the code +- **Progress tracking**: See which comments have been resolved + +## Configuration + +| Variable | Default | Description | +|----------|---------|-------------| +| `PR_REVIEW_CONFIDENCE_THRESHOLD` | `0.7` | Minimum confidence for showing review findings | + +## Related Commands + +| Command | Purpose | +|---------|---------| +| `/pr-summary` | Quick overview without diff | +| `/pr-review` | Full multi-agent review | +| `/pr-findings` | Filter review findings by category | diff --git a/plugins/pr-review/commands/pr-review.md b/plugins/pr-review/commands/pr-review.md index d899690..053e3ae 100644 --- a/plugins/pr-review/commands/pr-review.md +++ b/plugins/pr-review/commands/pr-review.md @@ -46,14 +46,16 @@ Collect findings from all agents, each with: ### Step 4: Filter by Confidence -Only display findings with confidence >= 0.5: +Filter findings based on `PR_REVIEW_CONFIDENCE_THRESHOLD` (default: 0.7): | Confidence | Label | Description | |------------|-------|-------------| | 0.9 - 1.0 | HIGH | Definite issue, must address | | 0.7 - 0.89 | MEDIUM | Likely issue, should address | | 0.5 - 0.69 | LOW | Possible concern, consider addressing | -| < 0.5 | (suppressed) | Too uncertain to report | +| < threshold | (filtered) | Below configured threshold | + +**Note:** With the default threshold of 0.7, only MEDIUM and HIGH confidence findings are shown. Adjust `PR_REVIEW_CONFIDENCE_THRESHOLD` to include more or fewer findings. ### Step 5: Generate Report @@ -135,5 +137,5 @@ Full review report with: | Variable | Default | Description | |----------|---------|-------------| -| `PR_REVIEW_CONFIDENCE_THRESHOLD` | `0.5` | Minimum confidence to report | +| `PR_REVIEW_CONFIDENCE_THRESHOLD` | `0.7` | Minimum confidence to report (0.0-1.0) | | `PR_REVIEW_AUTO_SUBMIT` | `false` | Auto-submit to Gitea | diff --git a/plugins/pr-review/skills/review-patterns/confidence-scoring.md b/plugins/pr-review/skills/review-patterns/confidence-scoring.md index 5ef19b7..2cd4131 100644 --- a/plugins/pr-review/skills/review-patterns/confidence-scoring.md +++ b/plugins/pr-review/skills/review-patterns/confidence-scoring.md @@ -73,10 +73,12 @@ Base confidence by pattern: ## Threshold Configuration -The default threshold is 0.5. This can be adjusted: +The default threshold is 0.7 (showing MEDIUM and HIGH confidence findings). This can be adjusted: ```bash -PR_REVIEW_CONFIDENCE_THRESHOLD=0.7 # Only high-confidence +PR_REVIEW_CONFIDENCE_THRESHOLD=0.9 # Only definite issues (HIGH) +PR_REVIEW_CONFIDENCE_THRESHOLD=0.7 # Likely issues and above (MEDIUM+HIGH) - default +PR_REVIEW_CONFIDENCE_THRESHOLD=0.5 # Include possible concerns (LOW+) PR_REVIEW_CONFIDENCE_THRESHOLD=0.3 # Include more speculative ``` diff --git a/plugins/projman/commands/sprint-diagram.md b/plugins/projman/commands/sprint-diagram.md new file mode 100644 index 0000000..2c07d2d --- /dev/null +++ b/plugins/projman/commands/sprint-diagram.md @@ -0,0 +1,180 @@ +--- +description: Generate Mermaid diagram of sprint issues with dependencies and status +--- + +# Sprint Diagram + +This command generates a visual Mermaid diagram showing the current sprint's issues, their dependencies, and execution flow. + +## What This Command Does + +1. **Fetch Sprint Issues** - Gets all issues for the current sprint milestone +2. **Fetch Dependencies** - Retrieves dependency relationships between issues +3. **Generate Mermaid Syntax** - Creates flowchart showing issue flow +4. **Apply Status Styling** - Colors nodes based on issue state (open/closed/in-progress) +5. **Show Execution Order** - Visualizes parallel batches and critical path + +## Usage + +``` +/sprint-diagram +/sprint-diagram --milestone "Sprint 4" +``` + +## MCP Tools Used + +**Issue Tools:** +- `list_issues(state="all")` - Fetch all sprint issues +- `list_milestones()` - Find current sprint milestone + +**Dependency Tools:** +- `list_issue_dependencies(issue_number)` - Get dependencies for each issue +- `get_execution_order(issue_numbers)` - Get parallel execution batches + +## Implementation Steps + +1. **Get Current Milestone:** + ``` + milestones = list_milestones(state="open") + current_sprint = milestones[0] # Most recent open milestone + ``` + +2. **Fetch Sprint Issues:** + ``` + issues = list_issues(state="all", milestone=current_sprint.title) + ``` + +3. **Fetch Dependencies for Each Issue:** + ```python + dependencies = {} + for issue in issues: + deps = list_issue_dependencies(issue.number) + dependencies[issue.number] = deps + ``` + +4. **Generate Mermaid Diagram:** + ```mermaid + flowchart TD + subgraph Sprint["Sprint 4 - Commands"] + 241["#241: sprint-diagram"] + 242["#242: confidence threshold"] + 243["#243: pr-diff"] + + 241 --> 242 + 242 --> 243 + end + + classDef completed fill:#90EE90,stroke:#228B22 + classDef inProgress fill:#FFD700,stroke:#DAA520 + classDef open fill:#ADD8E6,stroke:#4682B4 + classDef blocked fill:#FFB6C1,stroke:#DC143C + + class 241 completed + class 242 inProgress + class 243 open + ``` + +## Expected Output + +``` +Sprint Diagram: Sprint 4 - Commands +=================================== + +```mermaid +flowchart TD + subgraph batch1["Batch 1 - No Dependencies"] + 241["#241: sprint-diagram
projman"] + 242["#242: confidence threshold
pr-review"] + 244["#244: data-quality
data-platform"] + 247["#247: chart-export
viz-platform"] + 250["#250: dependency-graph
contract-validator"] + 251["#251: changelog-gen
doc-guardian"] + 254["#254: config-diff
config-maintainer"] + 256["#256: cmdb-topology
cmdb-assistant"] + end + + subgraph batch2["Batch 2 - After Batch 1"] + 243["#243: pr-diff
pr-review"] + 245["#245: lineage-viz
data-platform"] + 248["#248: color blind
viz-platform"] + 252["#252: doc-coverage
doc-guardian"] + 255["#255: linting
config-maintainer"] + 257["#257: change-audit
cmdb-assistant"] + end + + subgraph batch3["Batch 3 - Final"] + 246["#246: dbt-test
data-platform"] + 249["#249: responsive
viz-platform"] + 253["#253: stale-docs
doc-guardian"] + 258["#258: IP conflict
cmdb-assistant"] + end + + batch1 --> batch2 + batch2 --> batch3 + + classDef completed fill:#90EE90,stroke:#228B22 + classDef inProgress fill:#FFD700,stroke:#DAA520 + classDef open fill:#ADD8E6,stroke:#4682B4 + + class 241,242 completed + class 243,244 inProgress +``` + +## Status Legend + +| Status | Color | Description | +|--------|-------|-------------| +| Completed | Green | Issue closed | +| In Progress | Yellow | Currently being worked on | +| Open | Blue | Ready to start | +| Blocked | Red | Waiting on dependencies | + +## Diagram Types + +### Default: Dependency Flow +Shows how issues depend on each other with arrows indicating blockers. + +### Batch View (--batch) +Groups issues by execution batch for parallel work visualization. + +### Plugin View (--by-plugin) +Groups issues by plugin for component-level overview. + +## When to Use + +- **Sprint Planning**: Visualize scope and dependencies +- **Daily Standups**: Show progress at a glance +- **Documentation**: Include in wiki pages +- **Stakeholder Updates**: Visual progress reports + +## Integration + +The generated Mermaid diagram can be: +- Pasted into GitHub/Gitea issues +- Rendered in wiki pages +- Included in PRs for context +- Used in sprint retrospectives + +## Example + +``` +User: /sprint-diagram + +Generating sprint diagram... + +Milestone: Sprint 4 - Commands (18 issues) +Fetching dependencies... +Building diagram... + +```mermaid +flowchart LR + 241[sprint-diagram] --> |enables| 242[confidence] + 242 --> 243[pr-diff] + + style 241 fill:#90EE90 + style 242 fill:#ADD8E6 + style 243 fill:#ADD8E6 +``` + +Open: 16 | In Progress: 1 | Completed: 1 +``` diff --git a/plugins/viz-platform/README.md b/plugins/viz-platform/README.md index 9aced87..e9dc274 100644 --- a/plugins/viz-platform/README.md +++ b/plugins/viz-platform/README.md @@ -51,7 +51,10 @@ DMC_VERSION=0.14.7 | `/initial-setup` | Interactive setup wizard for DMC and theme preferences | | `/component {name}` | Inspect component props and validation | | `/chart {type}` | Create a Plotly chart | +| `/chart-export {format}` | Export chart to PNG, SVG, or PDF | | `/dashboard {template}` | Create a dashboard layout | +| `/breakpoints {layout}` | Configure responsive breakpoints | +| `/accessibility-check` | Validate colors for color blind users | | `/theme {name}` | Apply an existing theme | | `/theme-new {name}` | Create a new custom theme | | `/theme-css {name}` | Export theme as CSS | @@ -75,15 +78,16 @@ Prevent invalid component props before runtime. | `get_component_props` | Get detailed prop specifications | | `validate_component` | Validate a component configuration | -### Charts (2 tools) +### Charts (3 tools) Create Plotly charts with theme integration. | Tool | Description | |------|-------------| | `chart_create` | Create a chart (line, bar, scatter, pie, etc.) | | `chart_configure_interaction` | Configure chart interactivity | +| `chart_export` | Export chart to PNG, SVG, or PDF | -### Layouts (5 tools) +### Layouts (6 tools) Build dashboard structures with filters and grids. | Tool | Description | @@ -91,9 +95,19 @@ Build dashboard structures with filters and grids. | `layout_create` | Create a layout structure | | `layout_add_filter` | Add filter components | | `layout_set_grid` | Configure responsive grid | +| `layout_set_breakpoints` | Configure responsive breakpoints (xs-xl) | | `layout_add_section` | Add content sections | | `layout_get` | Retrieve layout details | +### Accessibility (3 tools) +Validate colors for accessibility and color blindness. + +| Tool | Description | +|------|-------------| +| `accessibility_validate_colors` | Check colors for color blind accessibility | +| `accessibility_validate_theme` | Validate a theme's color accessibility | +| `accessibility_suggest_alternative` | Suggest accessible color alternatives | + ### Themes (6 tools) Manage design tokens and styling. @@ -188,9 +202,37 @@ viz-platform works seamlessly with data-platform: | `tabs` | Multi-page dashboards | | `split` | Comparisons, master-detail | +## Responsive Breakpoints + +The plugin supports mobile-first responsive design with standard breakpoints: + +| Breakpoint | Min Width | Description | +|------------|-----------|-------------| +| `xs` | 0px | Extra small (mobile portrait) | +| `sm` | 576px | Small (mobile landscape) | +| `md` | 768px | Medium (tablet) | +| `lg` | 992px | Large (desktop) | +| `xl` | 1200px | Extra large (large desktop) | + +Example: +``` +/breakpoints my-dashboard +# Configure cols, spacing per breakpoint +``` + +## Color Accessibility + +The plugin validates colors for color blindness: +- **Deuteranopia** (green-blind) - 6% of males +- **Protanopia** (red-blind) - 2.5% of males +- **Tritanopia** (blue-blind) - 0.01% of population + +Includes WCAG contrast ratio checking and accessible palette suggestions. + ## Requirements - Python 3.10+ - dash-mantine-components >= 0.14.0 - plotly >= 5.18.0 - dash >= 2.14.0 +- kaleido >= 0.2.1 (for chart export) diff --git a/plugins/viz-platform/commands/accessibility-check.md b/plugins/viz-platform/commands/accessibility-check.md new file mode 100644 index 0000000..c446f79 --- /dev/null +++ b/plugins/viz-platform/commands/accessibility-check.md @@ -0,0 +1,144 @@ +--- +description: Validate color accessibility for color blind users +--- + +# Accessibility Check + +Validate theme or chart colors for color blind accessibility, checking contrast ratios and suggesting alternatives. + +## Usage + +``` +/accessibility-check {target} +``` + +## Arguments + +- `target` (optional): "theme" or "chart" - defaults to active theme + +## Examples + +``` +/accessibility-check +/accessibility-check theme +/accessibility-check chart +``` + +## Tool Mapping + +This command uses the `accessibility_validate_colors` MCP tool: + +```python +accessibility_validate_colors( + colors=["#228be6", "#40c057", "#fa5252"], # Colors to check + check_types=["deuteranopia", "protanopia", "tritanopia"], + min_contrast_ratio=4.5 # WCAG AA standard +) +``` + +Or validate a full theme: +```python +accessibility_validate_theme( + theme_name="corporate" +) +``` + +## Workflow + +1. **User invokes**: `/accessibility-check theme` +2. **Tool analyzes**: Theme color palette +3. **Tool simulates**: Color perception for each deficiency type +4. **Tool checks**: Contrast ratios between color pairs +5. **Tool returns**: Issues found and alternative suggestions + +## Color Blindness Types + +| Type | Affected Colors | Population | +|------|-----------------|------------| +| **Deuteranopia** | Red-Green (green-blind) | ~6% males, 0.4% females | +| **Protanopia** | Red-Green (red-blind) | ~2.5% males, 0.05% females | +| **Tritanopia** | Blue-Yellow | ~0.01% total | + +## Output Example + +```json +{ + "theme_name": "corporate", + "overall_score": "B", + "issues": [ + { + "type": "contrast", + "severity": "warning", + "colors": ["#fa5252", "#40c057"], + "affected_by": ["deuteranopia", "protanopia"], + "message": "Red and green may be indistinguishable for red-green color blind users", + "suggestion": "Use blue (#228be6) instead of green to differentiate from red" + }, + { + "type": "contrast_ratio", + "severity": "error", + "colors": ["#fab005", "#ffffff"], + "ratio": 2.1, + "required": 4.5, + "message": "Insufficient contrast for WCAG AA compliance", + "suggestion": "Darken yellow to #e6a200 for ratio of 4.5+" + } + ], + "recommendations": [ + "Add patterns or shapes to distinguish data series, not just color", + "Include labels directly on chart elements", + "Consider using a color-blind safe palette" + ], + "safe_palettes": { + "categorical": ["#4477AA", "#EE6677", "#228833", "#CCBB44", "#66CCEE", "#AA3377", "#BBBBBB"], + "sequential": ["#FEE0D2", "#FC9272", "#DE2D26"], + "diverging": ["#4575B4", "#FFFFBF", "#D73027"] + } +} +``` + +## WCAG Contrast Standards + +| Level | Ratio | Use Case | +|-------|-------|----------| +| AA (normal text) | 4.5:1 | Body text, labels | +| AA (large text) | 3:1 | Headings, 14pt+ bold | +| AAA (enhanced) | 7:1 | Highest accessibility | + +## Color-Blind Safe Palettes + +The tool can suggest complete color-blind safe palettes: + +### IBM Design Colors +Designed for accessibility: +``` +#648FFF #785EF0 #DC267F #FE6100 #FFB000 +``` + +### Tableau Colorblind 10 +Industry-standard accessible palette: +``` +#006BA4 #FF800E #ABABAB #595959 #5F9ED1 +#C85200 #898989 #A2C8EC #FFBC79 #CFCFCF +``` + +### Okabe-Ito +Optimized for all types of color blindness: +``` +#E69F00 #56B4E9 #009E73 #F0E442 #0072B2 +#D55E00 #CC79A7 #000000 +``` + +## Related Commands + +- `/theme-new {name}` - Create accessible theme from the start +- `/theme-validate {name}` - General theme validation +- `/chart {type}` - Create chart (check colors after) + +## Best Practices + +1. **Don't rely on color alone** - Use shapes, patterns, or labels +2. **Test with simulation** - View your visualizations through color blindness simulators +3. **Use sufficient contrast** - Minimum 4.5:1 for text, 3:1 for large elements +4. **Limit color count** - Fewer colors = easier to distinguish +5. **Use semantic colors** - Blue for information, red for errors (with icons) diff --git a/plugins/viz-platform/commands/breakpoints.md b/plugins/viz-platform/commands/breakpoints.md new file mode 100644 index 0000000..e86539e --- /dev/null +++ b/plugins/viz-platform/commands/breakpoints.md @@ -0,0 +1,193 @@ +--- +description: Configure responsive breakpoints for dashboard layouts +--- + +# Configure Breakpoints + +Configure responsive breakpoints for a layout to support mobile-first design across different screen sizes. + +## Usage + +``` +/breakpoints {layout_ref} +``` + +## Arguments + +- `layout_ref` (required): Layout name to configure breakpoints for + +## Examples + +``` +/breakpoints my-dashboard +/breakpoints sales-report +``` + +## Tool Mapping + +This command uses the `layout_set_breakpoints` MCP tool: + +```python +layout_set_breakpoints( + layout_ref="my-dashboard", + breakpoints={ + "xs": {"cols": 1, "spacing": "xs"}, # < 576px (mobile) + "sm": {"cols": 2, "spacing": "sm"}, # >= 576px (large mobile) + "md": {"cols": 6, "spacing": "md"}, # >= 768px (tablet) + "lg": {"cols": 12, "spacing": "md"}, # >= 992px (desktop) + "xl": {"cols": 12, "spacing": "lg"} # >= 1200px (large desktop) + }, + mobile_first=True +) +``` + +## Workflow + +1. **User invokes**: `/breakpoints my-dashboard` +2. **Agent asks**: Which breakpoints to customize? (shows current settings) +3. **Agent asks**: Mobile column count? (xs, typically 1-2) +4. **Agent asks**: Tablet column count? (md, typically 4-6) +5. **Agent applies**: Breakpoint configuration +6. **Agent returns**: Complete responsive configuration + +## Breakpoint Sizes + +| Name | Min Width | Common Devices | +|------|-----------|----------------| +| `xs` | 0px | Small phones (portrait) | +| `sm` | 576px | Large phones, small tablets | +| `md` | 768px | Tablets (portrait) | +| `lg` | 992px | Tablets (landscape), laptops | +| `xl` | 1200px | Desktops, large screens | + +## Mobile-First Approach + +When `mobile_first=True` (default), styles cascade up: +- Define base styles for `xs` (mobile) +- Override only what changes at larger breakpoints +- Smaller CSS footprint, better performance + +```python +# Mobile-first example +{ + "xs": {"cols": 1}, # Stack everything on mobile + "md": {"cols": 6}, # Two-column on tablet + "lg": {"cols": 12} # Full grid on desktop +} +``` + +When `mobile_first=False`, styles cascade down: +- Define base styles for `xl` (desktop) +- Override for smaller screens +- Traditional "desktop-first" approach + +## Grid Configuration per Breakpoint + +Each breakpoint can configure: + +| Property | Description | Values | +|----------|-------------|--------| +| `cols` | Grid column count | 1-24 | +| `spacing` | Gap between items | xs, sm, md, lg, xl | +| `gutter` | Outer padding | xs, sm, md, lg, xl | +| `grow` | Items grow to fill | true, false | + +## Common Patterns + +### Dashboard (Charts & Filters) +```python +{ + "xs": {"cols": 1, "spacing": "xs"}, # Full-width cards + "sm": {"cols": 2, "spacing": "sm"}, # 2 cards per row + "md": {"cols": 3, "spacing": "md"}, # 3 cards per row + "lg": {"cols": 4, "spacing": "md"}, # 4 cards per row + "xl": {"cols": 6, "spacing": "lg"} # 6 cards per row +} +``` + +### Data Table +```python +{ + "xs": {"cols": 1, "scroll": true}, # Horizontal scroll + "md": {"cols": 1, "scroll": false}, # Full table visible + "lg": {"cols": 1} # Same as md +} +``` + +### Form Layout +```python +{ + "xs": {"cols": 1}, # Single column + "md": {"cols": 2}, # Two columns + "lg": {"cols": 3} # Three columns +} +``` + +### Sidebar Layout +```python +{ + "xs": {"sidebar": "hidden"}, # No sidebar on mobile + "md": {"sidebar": "collapsed"}, # Icon-only sidebar + "lg": {"sidebar": "expanded"} # Full sidebar +} +``` + +## Component Span + +Control how many columns a component spans at each breakpoint: + +```python +# A chart that spans full width on mobile, half on desktop +{ + "component": "sales-chart", + "span": { + "xs": 1, # Full width (1/1) + "md": 3, # Half width (3/6) + "lg": 6 # Half width (6/12) + } +} +``` + +## DMC Grid Integration + +This maps to Dash Mantine Components Grid: + +```python +dmc.Grid( + children=[ + dmc.GridCol( + children=[chart], + span={"base": 12, "sm": 6, "lg": 4} # Responsive span + ) + ], + gutter="md" +) +``` + +## Output + +```json +{ + "layout_ref": "my-dashboard", + "breakpoints": { + "xs": {"cols": 1, "spacing": "xs", "min_width": "0px"}, + "sm": {"cols": 2, "spacing": "sm", "min_width": "576px"}, + "md": {"cols": 6, "spacing": "md", "min_width": "768px"}, + "lg": {"cols": 12, "spacing": "md", "min_width": "992px"}, + "xl": {"cols": 12, "spacing": "lg", "min_width": "1200px"} + }, + "mobile_first": true, + "css_media_queries": [ + "@media (min-width: 576px) { ... }", + "@media (min-width: 768px) { ... }", + "@media (min-width: 992px) { ... }", + "@media (min-width: 1200px) { ... }" + ] +} +``` + +## Related Commands + +- `/dashboard {template}` - Create layout with default breakpoints +- `/layout-set-grid` - Configure grid without responsive settings +- `/theme {name}` - Theme includes default spacing values diff --git a/plugins/viz-platform/commands/chart-export.md b/plugins/viz-platform/commands/chart-export.md new file mode 100644 index 0000000..16f15f0 --- /dev/null +++ b/plugins/viz-platform/commands/chart-export.md @@ -0,0 +1,114 @@ +--- +description: Export a Plotly chart to PNG, SVG, or PDF format +--- + +# Export Chart + +Export a Plotly chart to static image formats for sharing, embedding, or printing. + +## Usage + +``` +/chart-export {format} +``` + +## Arguments + +- `format` (required): Output format - one of: png, svg, pdf + +## Examples + +``` +/chart-export png +/chart-export svg +/chart-export pdf +``` + +## Tool Mapping + +This command uses the `chart_export` MCP tool: + +```python +chart_export( + figure=figure_json, # Plotly figure JSON from chart_create + format="png", # Output format: png, svg, pdf + width=1200, # Optional: image width in pixels + height=800, # Optional: image height in pixels + scale=2, # Optional: resolution scale factor + output_path=None # Optional: save to file path +) +``` + +## Workflow + +1. **User invokes**: `/chart-export png` +2. **Agent asks**: Which chart to export? (if multiple charts in context) +3. **Agent asks**: Image dimensions? (optional, uses chart defaults) +4. **Agent exports**: Chart with `chart_export` tool +5. **Agent returns**: Base64 image data or file path + +## Output Formats + +| Format | Best For | File Size | +|--------|----------|-----------| +| `png` | Web, presentations, general use | Medium | +| `svg` | Scalable graphics, editing | Small | +| `pdf` | Print, documents, archival | Large | + +## Resolution Options + +### Width & Height +Specify exact pixel dimensions: +```python +chart_export(figure, format="png", width=1920, height=1080) +``` + +### Scale Factor +Increase resolution for high-DPI displays: +```python +chart_export(figure, format="png", scale=3) # 3x resolution +``` + +Common scale values: +- `1` - Standard resolution (72 DPI) +- `2` - Retina/HiDPI (144 DPI) +- `3` - Print quality (216 DPI) +- `4` - High-quality print (288 DPI) + +## Output Options + +### Return as Base64 +Default behavior - returns base64-encoded image data: +```python +result = chart_export(figure, format="png") +# result["image_data"] contains base64 string +``` + +### Save to File +Specify output path to save directly: +```python +chart_export(figure, format="png", output_path="/path/to/chart.png") +# result["file_path"] contains the saved path +``` + +## Requirements + +This tool requires the `kaleido` package for rendering: +```bash +pip install kaleido +``` + +Kaleido is a cross-platform library that renders Plotly figures without a browser. + +## Error Handling + +Common issues: +- **Kaleido not installed**: Install with `pip install kaleido` +- **Invalid figure**: Ensure figure is valid Plotly JSON +- **Permission denied**: Check write permissions for output path + +## Related Commands + +- `/chart {type}` - Create a chart +- `/theme {name}` - Apply theme before export +- `/dashboard` - Create layout containing charts From a0d1b38c6e83d0d5707a653d9914dce3a53f0445 Mon Sep 17 00:00:00 2001 From: lmiranda Date: Wed, 28 Jan 2026 12:56:35 -0500 Subject: [PATCH 2/2] docs(changelog): add Sprint 4 changes to [Unreleased] - 18 new commands across 8 plugins - viz-platform accessibility tools and chart export - MCP project directory detection fix - Links to wiki implementation and lessons learned Sprint 4 - Commands milestone closed. Co-Authored-By: Claude Opus 4.5 --- CHANGELOG.md | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4734c96..d934e13 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,56 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ### Added +#### Sprint 4: Commands (V5.2.0 Plugin Enhancements) +Implementation of 18 new user-facing commands across 8 plugins. + +**projman v3.3.0:** +- **`/sprint-diagram`** - Generate Mermaid diagram of sprint issues with dependencies and status + +**pr-review v1.1.0:** +- **`/pr-diff`** - Formatted diff with inline review comments and annotations +- **Confidence threshold config** - `PR_REVIEW_CONFIDENCE_THRESHOLD` env var (default: 0.7) + +**data-platform v1.2.0:** +- **`/data-quality`** - DataFrame quality checks (nulls, duplicates, types, outliers) with pass/warn/fail scoring +- **`/lineage-viz`** - dbt lineage visualization as Mermaid diagrams +- **`/dbt-test`** - Formatted dbt test runner with summary and failure details + +**viz-platform v1.1.0:** +- **`/chart-export`** - Export charts to PNG, SVG, PDF via kaleido +- **`/accessibility-check`** - Color blind validation (WCAG contrast ratios) +- **`/breakpoints`** - Responsive layout breakpoint configuration +- **New MCP tools**: `chart_export`, `accessibility_validate_colors`, `accessibility_validate_theme`, `accessibility_suggest_alternative`, `layout_set_breakpoints` +- **New dependency**: kaleido>=0.2.1 for chart rendering + +**contract-validator v1.2.0:** +- **`/dependency-graph`** - Mermaid visualization of plugin dependencies with data flow + +**doc-guardian v1.1.0:** +- **`/changelog-gen`** - Generate changelog from conventional commits +- **`/doc-coverage`** - Documentation coverage metrics by function/class +- **`/stale-docs`** - Flag documentation behind code changes + +**claude-config-maintainer v1.1.0:** +- **`/config-diff`** - Track CLAUDE.md changes over time with behavioral impact analysis +- **`/config-lint`** - 31 lint rules for CLAUDE.md (security, structure, content, format, best practices) + +**cmdb-assistant v1.2.0:** +- **`/cmdb-topology`** - Infrastructure topology diagrams (rack, network, site views) +- **`/change-audit`** - NetBox audit trail queries with filtering +- **`/ip-conflicts`** - Detect IP conflicts and overlapping prefixes + +**Sprint Completed:** +- Milestone: Sprint 4 - Commands (closed 2026-01-28) +- Issues: #241-#258 (18/18 closed) +- Wiki: [Change V5.2.0: Plugin Enhancements (Sprint 4 Commands)](https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/wiki/Change-V5.2.0%3A-Plugin-Enhancements-%28Sprint-4-Commands%29) +- Lessons: [Sprint 4 - Plugin Commands Implementation](https://gitea.hotserv.cloud/personal-projects/leo-claude-mktplace/wiki/lessons/sprints/sprint-4---plugin-commands-implementation) + +### Fixed +- **MCP:** Project directory detection - all run.sh scripts now capture `CLAUDE_PROJECT_DIR` from PWD before changing directories + +--- + #### Sprint 3: Hooks (V5.2.0 Plugin Enhancements) Implementation of 6 foundational hooks across 4 plugins.