feat(contract-validator): Complete Sprint 2 - Contract Validator Plugin #195
1
mcp-servers/contract-validator/tests/__init__.py
Normal file
1
mcp-servers/contract-validator/tests/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Tests for contract-validator MCP server
|
||||
193
mcp-servers/contract-validator/tests/test_parse_tools.py
Normal file
193
mcp-servers/contract-validator/tests/test_parse_tools.py
Normal file
@@ -0,0 +1,193 @@
|
||||
"""
|
||||
Unit tests for parse tools.
|
||||
"""
|
||||
import pytest
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def parse_tools():
|
||||
"""Create ParseTools instance"""
|
||||
from mcp_server.parse_tools import ParseTools
|
||||
return ParseTools()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_readme(tmp_path):
|
||||
"""Create a sample README.md for testing"""
|
||||
readme = tmp_path / "README.md"
|
||||
readme.write_text("""# Test Plugin
|
||||
|
||||
A test plugin for validation.
|
||||
|
||||
## Features
|
||||
|
||||
- **Feature One**: Does something
|
||||
- **Feature Two**: Does something else
|
||||
|
||||
## Commands
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `/test-cmd` | Test command |
|
||||
| `/another-cmd` | Another test command |
|
||||
|
||||
## Agents
|
||||
|
||||
| Agent | Description |
|
||||
|-------|-------------|
|
||||
| `test-agent` | A test agent |
|
||||
|
||||
## Tools Summary
|
||||
|
||||
### Category A (3 tools)
|
||||
`tool_a`, `tool_b`, `tool_c`
|
||||
|
||||
### Category B (2 tools)
|
||||
`tool_d`, `tool_e`
|
||||
""")
|
||||
return str(tmp_path)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_claude_md(tmp_path):
|
||||
"""Create a sample CLAUDE.md for testing"""
|
||||
claude_md = tmp_path / "CLAUDE.md"
|
||||
claude_md.write_text("""# CLAUDE.md
|
||||
|
||||
## Project Overview
|
||||
|
||||
### Four-Agent Model (test)
|
||||
|
||||
| Agent | Personality | Responsibilities |
|
||||
|-------|-------------|------------------|
|
||||
| **Planner** | Thoughtful | Planning via `create_issue`, `search_lessons` |
|
||||
| **Executor** | Focused | Implementation via `write`, `edit` |
|
||||
|
||||
## Workflow
|
||||
|
||||
1. Planner creates issues
|
||||
2. Executor implements code
|
||||
""")
|
||||
return str(claude_md)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_parse_plugin_interface_basic(parse_tools, sample_readme):
|
||||
"""Test basic plugin interface parsing"""
|
||||
result = await parse_tools.parse_plugin_interface(sample_readme)
|
||||
|
||||
assert "error" not in result
|
||||
# Plugin name extraction strips "Plugin" suffix
|
||||
assert result["plugin_name"] == "Test"
|
||||
assert "A test plugin" in result["description"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_parse_plugin_interface_commands(parse_tools, sample_readme):
|
||||
"""Test command extraction from README"""
|
||||
result = await parse_tools.parse_plugin_interface(sample_readme)
|
||||
|
||||
commands = result["commands"]
|
||||
assert len(commands) == 2
|
||||
assert commands[0]["name"] == "/test-cmd"
|
||||
assert commands[1]["name"] == "/another-cmd"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_parse_plugin_interface_agents(parse_tools, sample_readme):
|
||||
"""Test agent extraction from README"""
|
||||
result = await parse_tools.parse_plugin_interface(sample_readme)
|
||||
|
||||
agents = result["agents"]
|
||||
assert len(agents) == 1
|
||||
assert agents[0]["name"] == "test-agent"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_parse_plugin_interface_tools(parse_tools, sample_readme):
|
||||
"""Test tool extraction from README"""
|
||||
result = await parse_tools.parse_plugin_interface(sample_readme)
|
||||
|
||||
tools = result["tools"]
|
||||
tool_names = [t["name"] for t in tools]
|
||||
assert "tool_a" in tool_names
|
||||
assert "tool_b" in tool_names
|
||||
assert "tool_e" in tool_names
|
||||
assert len(tools) >= 5
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_parse_plugin_interface_categories(parse_tools, sample_readme):
|
||||
"""Test tool category extraction"""
|
||||
result = await parse_tools.parse_plugin_interface(sample_readme)
|
||||
|
||||
categories = result["tool_categories"]
|
||||
assert "Category A" in categories
|
||||
assert "Category B" in categories
|
||||
assert "tool_a" in categories["Category A"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_parse_plugin_interface_features(parse_tools, sample_readme):
|
||||
"""Test feature extraction"""
|
||||
result = await parse_tools.parse_plugin_interface(sample_readme)
|
||||
|
||||
features = result["features"]
|
||||
assert "Feature One" in features
|
||||
assert "Feature Two" in features
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_parse_plugin_interface_not_found(parse_tools, tmp_path):
|
||||
"""Test error when README not found"""
|
||||
result = await parse_tools.parse_plugin_interface(str(tmp_path / "nonexistent"))
|
||||
|
||||
assert "error" in result
|
||||
assert "not found" in result["error"].lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_parse_claude_md_agents(parse_tools, sample_claude_md):
|
||||
"""Test agent extraction from CLAUDE.md"""
|
||||
result = await parse_tools.parse_claude_md_agents(sample_claude_md)
|
||||
|
||||
assert "error" not in result
|
||||
assert result["agent_count"] == 2
|
||||
|
||||
agents = result["agents"]
|
||||
agent_names = [a["name"] for a in agents]
|
||||
assert "Planner" in agent_names
|
||||
assert "Executor" in agent_names
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_parse_claude_md_tool_refs(parse_tools, sample_claude_md):
|
||||
"""Test tool reference extraction from agents"""
|
||||
result = await parse_tools.parse_claude_md_agents(sample_claude_md)
|
||||
|
||||
agents = {a["name"]: a for a in result["agents"]}
|
||||
planner = agents["Planner"]
|
||||
|
||||
assert "create_issue" in planner["tool_refs"]
|
||||
assert "search_lessons" in planner["tool_refs"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_parse_claude_md_not_found(parse_tools, tmp_path):
|
||||
"""Test error when CLAUDE.md not found"""
|
||||
result = await parse_tools.parse_claude_md_agents(str(tmp_path / "CLAUDE.md"))
|
||||
|
||||
assert "error" in result
|
||||
assert "not found" in result["error"].lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_parse_plugin_with_direct_file(parse_tools, sample_readme):
|
||||
"""Test parsing with direct file path instead of directory"""
|
||||
readme_path = Path(sample_readme) / "README.md"
|
||||
result = await parse_tools.parse_plugin_interface(str(readme_path))
|
||||
|
||||
assert "error" not in result
|
||||
# Plugin name extraction strips "Plugin" suffix
|
||||
assert result["plugin_name"] == "Test"
|
||||
261
mcp-servers/contract-validator/tests/test_report_tools.py
Normal file
261
mcp-servers/contract-validator/tests/test_report_tools.py
Normal file
@@ -0,0 +1,261 @@
|
||||
"""
|
||||
Unit tests for report tools.
|
||||
"""
|
||||
import pytest
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def report_tools():
|
||||
"""Create ReportTools instance"""
|
||||
from mcp_server.report_tools import ReportTools
|
||||
return ReportTools()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_marketplace(tmp_path):
|
||||
"""Create a sample marketplace structure"""
|
||||
import json
|
||||
|
||||
plugins_dir = tmp_path / "plugins"
|
||||
plugins_dir.mkdir()
|
||||
|
||||
# Plugin 1
|
||||
plugin1 = plugins_dir / "plugin-one"
|
||||
plugin1.mkdir()
|
||||
plugin1_meta = plugin1 / ".claude-plugin"
|
||||
plugin1_meta.mkdir()
|
||||
(plugin1_meta / "plugin.json").write_text(json.dumps({"name": "plugin-one"}))
|
||||
(plugin1 / "README.md").write_text("""# plugin-one
|
||||
|
||||
First test plugin.
|
||||
|
||||
## Commands
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `/cmd-one` | Command one |
|
||||
|
||||
## Tools Summary
|
||||
|
||||
### Tools (2 tools)
|
||||
`tool_a`, `tool_b`
|
||||
""")
|
||||
|
||||
# Plugin 2
|
||||
plugin2 = plugins_dir / "plugin-two"
|
||||
plugin2.mkdir()
|
||||
plugin2_meta = plugin2 / ".claude-plugin"
|
||||
plugin2_meta.mkdir()
|
||||
(plugin2_meta / "plugin.json").write_text(json.dumps({"name": "plugin-two"}))
|
||||
(plugin2 / "README.md").write_text("""# plugin-two
|
||||
|
||||
Second test plugin.
|
||||
|
||||
## Commands
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `/cmd-two` | Command two |
|
||||
|
||||
## Tools Summary
|
||||
|
||||
### Tools (2 tools)
|
||||
`tool_c`, `tool_d`
|
||||
""")
|
||||
|
||||
# Plugin 3 (with conflict)
|
||||
plugin3 = plugins_dir / "plugin-three"
|
||||
plugin3.mkdir()
|
||||
plugin3_meta = plugin3 / ".claude-plugin"
|
||||
plugin3_meta.mkdir()
|
||||
(plugin3_meta / "plugin.json").write_text(json.dumps({"name": "plugin-three"}))
|
||||
(plugin3 / "README.md").write_text("""# plugin-three
|
||||
|
||||
Third test plugin with conflict.
|
||||
|
||||
## Commands
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `/cmd-one` | Conflicting command |
|
||||
|
||||
## Tools Summary
|
||||
|
||||
### Tools (1 tool)
|
||||
`tool_e`
|
||||
""")
|
||||
|
||||
return str(tmp_path)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def marketplace_no_plugins(tmp_path):
|
||||
"""Create marketplace with no plugins"""
|
||||
plugins_dir = tmp_path / "plugins"
|
||||
plugins_dir.mkdir()
|
||||
return str(tmp_path)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def marketplace_no_dir(tmp_path):
|
||||
"""Create path without plugins directory"""
|
||||
return str(tmp_path)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_generate_report_json_format(report_tools, sample_marketplace):
|
||||
"""Test JSON format report generation"""
|
||||
result = await report_tools.generate_compatibility_report(
|
||||
sample_marketplace, "json"
|
||||
)
|
||||
|
||||
assert "error" not in result
|
||||
assert "generated_at" in result
|
||||
assert "summary" in result
|
||||
assert "plugins" in result
|
||||
assert result["summary"]["total_plugins"] == 3
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_generate_report_markdown_format(report_tools, sample_marketplace):
|
||||
"""Test markdown format report generation"""
|
||||
result = await report_tools.generate_compatibility_report(
|
||||
sample_marketplace, "markdown"
|
||||
)
|
||||
|
||||
assert "error" not in result
|
||||
assert "report" in result
|
||||
assert "# Contract Validation Report" in result["report"]
|
||||
assert "## Summary" in result["report"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_generate_report_finds_conflicts(report_tools, sample_marketplace):
|
||||
"""Test that report finds command conflicts"""
|
||||
result = await report_tools.generate_compatibility_report(
|
||||
sample_marketplace, "json"
|
||||
)
|
||||
|
||||
assert "error" not in result
|
||||
assert result["summary"]["errors"] > 0
|
||||
assert result["summary"]["total_issues"] > 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_generate_report_counts_correctly(report_tools, sample_marketplace):
|
||||
"""Test summary counts are correct"""
|
||||
result = await report_tools.generate_compatibility_report(
|
||||
sample_marketplace, "json"
|
||||
)
|
||||
|
||||
summary = result["summary"]
|
||||
assert summary["total_plugins"] == 3
|
||||
assert summary["total_commands"] == 3 # 3 commands total
|
||||
assert summary["total_tools"] == 5 # a, b, c, d, e
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_generate_report_no_plugins(report_tools, marketplace_no_plugins):
|
||||
"""Test error when no plugins found"""
|
||||
result = await report_tools.generate_compatibility_report(
|
||||
marketplace_no_plugins, "json"
|
||||
)
|
||||
|
||||
assert "error" in result
|
||||
assert "no plugins" in result["error"].lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_generate_report_no_plugins_dir(report_tools, marketplace_no_dir):
|
||||
"""Test error when plugins directory doesn't exist"""
|
||||
result = await report_tools.generate_compatibility_report(
|
||||
marketplace_no_dir, "json"
|
||||
)
|
||||
|
||||
assert "error" in result
|
||||
assert "not found" in result["error"].lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_issues_all(report_tools, sample_marketplace):
|
||||
"""Test listing all issues"""
|
||||
result = await report_tools.list_issues(sample_marketplace, "all", "all")
|
||||
|
||||
assert "error" not in result
|
||||
assert "issues" in result
|
||||
assert result["total_issues"] > 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_issues_filter_by_severity(report_tools, sample_marketplace):
|
||||
"""Test filtering issues by severity"""
|
||||
all_result = await report_tools.list_issues(sample_marketplace, "all", "all")
|
||||
error_result = await report_tools.list_issues(sample_marketplace, "error", "all")
|
||||
|
||||
# Error count should be less than or equal to all
|
||||
assert error_result["total_issues"] <= all_result["total_issues"]
|
||||
|
||||
# All issues should have error severity
|
||||
for issue in error_result["issues"]:
|
||||
sev = issue.get("severity", "")
|
||||
if hasattr(sev, 'value'):
|
||||
sev = sev.value
|
||||
assert "error" in str(sev).lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_issues_filter_by_type(report_tools, sample_marketplace):
|
||||
"""Test filtering issues by type"""
|
||||
result = await report_tools.list_issues(
|
||||
sample_marketplace, "all", "interface_mismatch"
|
||||
)
|
||||
|
||||
# All issues should have matching type
|
||||
for issue in result["issues"]:
|
||||
itype = issue.get("issue_type", "")
|
||||
if hasattr(itype, 'value'):
|
||||
itype = itype.value
|
||||
assert "interface_mismatch" in str(itype).lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_issues_combined_filters(report_tools, sample_marketplace):
|
||||
"""Test combined severity and type filters"""
|
||||
result = await report_tools.list_issues(
|
||||
sample_marketplace, "error", "interface_mismatch"
|
||||
)
|
||||
|
||||
assert "error" not in result
|
||||
# Should have command conflict errors
|
||||
assert result["total_issues"] > 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_report_markdown_has_all_sections(report_tools, sample_marketplace):
|
||||
"""Test markdown report contains all expected sections"""
|
||||
result = await report_tools.generate_compatibility_report(
|
||||
sample_marketplace, "markdown"
|
||||
)
|
||||
|
||||
report = result["report"]
|
||||
assert "## Summary" in report
|
||||
assert "## Plugins" in report
|
||||
# Compatibility section only if there are checks
|
||||
assert "Plugin One" in report or "plugin-one" in report.lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_report_includes_suggestions(report_tools, sample_marketplace):
|
||||
"""Test that issues include suggestions"""
|
||||
result = await report_tools.generate_compatibility_report(
|
||||
sample_marketplace, "json"
|
||||
)
|
||||
|
||||
issues = result.get("all_issues", [])
|
||||
# Find an issue with a suggestion
|
||||
issues_with_suggestions = [
|
||||
i for i in issues
|
||||
if i.get("suggestion")
|
||||
]
|
||||
assert len(issues_with_suggestions) > 0
|
||||
256
mcp-servers/contract-validator/tests/test_validation_tools.py
Normal file
256
mcp-servers/contract-validator/tests/test_validation_tools.py
Normal file
@@ -0,0 +1,256 @@
|
||||
"""
|
||||
Unit tests for validation tools.
|
||||
"""
|
||||
import pytest
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def validation_tools():
|
||||
"""Create ValidationTools instance"""
|
||||
from mcp_server.validation_tools import ValidationTools
|
||||
return ValidationTools()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def plugin_a(tmp_path):
|
||||
"""Create first test plugin"""
|
||||
plugin_dir = tmp_path / "plugin-a"
|
||||
plugin_dir.mkdir()
|
||||
(plugin_dir / ".claude-plugin").mkdir()
|
||||
|
||||
readme = plugin_dir / "README.md"
|
||||
readme.write_text("""# Plugin A
|
||||
|
||||
Test plugin A.
|
||||
|
||||
## Commands
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `/setup-a` | Setup A |
|
||||
| `/shared-cmd` | Shared command |
|
||||
|
||||
## Tools Summary
|
||||
|
||||
### Core (2 tools)
|
||||
`tool_one`, `tool_two`
|
||||
""")
|
||||
return str(plugin_dir)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def plugin_b(tmp_path):
|
||||
"""Create second test plugin"""
|
||||
plugin_dir = tmp_path / "plugin-b"
|
||||
plugin_dir.mkdir()
|
||||
(plugin_dir / ".claude-plugin").mkdir()
|
||||
|
||||
readme = plugin_dir / "README.md"
|
||||
readme.write_text("""# Plugin B
|
||||
|
||||
Test plugin B.
|
||||
|
||||
## Commands
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `/setup-b` | Setup B |
|
||||
| `/shared-cmd` | Shared command (conflict!) |
|
||||
|
||||
## Tools Summary
|
||||
|
||||
### Core (2 tools)
|
||||
`tool_two`, `tool_three`
|
||||
""")
|
||||
return str(plugin_dir)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def plugin_no_conflict(tmp_path):
|
||||
"""Create plugin with no conflicts"""
|
||||
plugin_dir = tmp_path / "plugin-c"
|
||||
plugin_dir.mkdir()
|
||||
(plugin_dir / ".claude-plugin").mkdir()
|
||||
|
||||
readme = plugin_dir / "README.md"
|
||||
readme.write_text("""# Plugin C
|
||||
|
||||
Test plugin C.
|
||||
|
||||
## Commands
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `/unique-cmd` | Unique command |
|
||||
|
||||
## Tools Summary
|
||||
|
||||
### Core (1 tool)
|
||||
`unique_tool`
|
||||
""")
|
||||
return str(plugin_dir)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def claude_md_with_agents(tmp_path):
|
||||
"""Create CLAUDE.md with agent definitions"""
|
||||
claude_md = tmp_path / "CLAUDE.md"
|
||||
claude_md.write_text("""# CLAUDE.md
|
||||
|
||||
### Four-Agent Model
|
||||
|
||||
| Agent | Personality | Responsibilities |
|
||||
|-------|-------------|------------------|
|
||||
| **TestAgent** | Careful | Uses `tool_one`, `tool_two`, `missing_tool` |
|
||||
| **ValidAgent** | Thorough | Uses `tool_one` only |
|
||||
| **EmptyAgent** | Unknown | General tasks |
|
||||
""")
|
||||
return str(claude_md)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_validate_compatibility_command_conflict(validation_tools, plugin_a, plugin_b):
|
||||
"""Test detection of command name conflicts"""
|
||||
result = await validation_tools.validate_compatibility(plugin_a, plugin_b)
|
||||
|
||||
assert "error" not in result
|
||||
assert result["compatible"] is False
|
||||
|
||||
# Find the command conflict issue
|
||||
error_issues = [i for i in result["issues"] if i["severity"].value == "error"]
|
||||
assert len(error_issues) > 0
|
||||
assert any("/shared-cmd" in str(i["message"]) for i in error_issues)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_validate_compatibility_tool_overlap(validation_tools, plugin_a, plugin_b):
|
||||
"""Test detection of tool name overlaps"""
|
||||
result = await validation_tools.validate_compatibility(plugin_a, plugin_b)
|
||||
|
||||
assert "tool_two" in result["shared_tools"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_validate_compatibility_unique_tools(validation_tools, plugin_a, plugin_b):
|
||||
"""Test identification of unique tools per plugin"""
|
||||
result = await validation_tools.validate_compatibility(plugin_a, plugin_b)
|
||||
|
||||
assert "tool_one" in result["a_only_tools"]
|
||||
assert "tool_three" in result["b_only_tools"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_validate_compatibility_no_conflict(validation_tools, plugin_a, plugin_no_conflict):
|
||||
"""Test compatible plugins"""
|
||||
result = await validation_tools.validate_compatibility(plugin_a, plugin_no_conflict)
|
||||
|
||||
assert "error" not in result
|
||||
assert result["compatible"] is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_validate_compatibility_missing_plugin(validation_tools, plugin_a, tmp_path):
|
||||
"""Test error when plugin not found"""
|
||||
result = await validation_tools.validate_compatibility(
|
||||
plugin_a,
|
||||
str(tmp_path / "nonexistent")
|
||||
)
|
||||
|
||||
assert "error" in result
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_validate_agent_refs_with_missing_tools(validation_tools, claude_md_with_agents, plugin_a):
|
||||
"""Test detection of missing tool references"""
|
||||
result = await validation_tools.validate_agent_refs(
|
||||
"TestAgent",
|
||||
claude_md_with_agents,
|
||||
[plugin_a]
|
||||
)
|
||||
|
||||
assert "error" not in result
|
||||
assert result["valid"] is False
|
||||
assert "missing_tool" in result["tool_refs_missing"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_validate_agent_refs_valid_agent(validation_tools, claude_md_with_agents, plugin_a):
|
||||
"""Test valid agent with all tools found"""
|
||||
result = await validation_tools.validate_agent_refs(
|
||||
"ValidAgent",
|
||||
claude_md_with_agents,
|
||||
[plugin_a]
|
||||
)
|
||||
|
||||
assert "error" not in result
|
||||
assert result["valid"] is True
|
||||
assert "tool_one" in result["tool_refs_found"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_validate_agent_refs_empty_agent(validation_tools, claude_md_with_agents, plugin_a):
|
||||
"""Test agent with no tool references"""
|
||||
result = await validation_tools.validate_agent_refs(
|
||||
"EmptyAgent",
|
||||
claude_md_with_agents,
|
||||
[plugin_a]
|
||||
)
|
||||
|
||||
assert "error" not in result
|
||||
# Should have info issue about undocumented references
|
||||
info_issues = [i for i in result["issues"] if i["severity"].value == "info"]
|
||||
assert len(info_issues) > 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_validate_agent_refs_agent_not_found(validation_tools, claude_md_with_agents, plugin_a):
|
||||
"""Test error when agent not found"""
|
||||
result = await validation_tools.validate_agent_refs(
|
||||
"NonexistentAgent",
|
||||
claude_md_with_agents,
|
||||
[plugin_a]
|
||||
)
|
||||
|
||||
assert "error" in result
|
||||
assert "not found" in result["error"].lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_validate_data_flow_valid(validation_tools, tmp_path):
|
||||
"""Test data flow validation with valid flow"""
|
||||
claude_md = tmp_path / "CLAUDE.md"
|
||||
claude_md.write_text("""# CLAUDE.md
|
||||
|
||||
### Four-Agent Model
|
||||
|
||||
| Agent | Personality | Responsibilities |
|
||||
|-------|-------------|------------------|
|
||||
| **DataAgent** | Analytical | Load with `read_csv`, analyze with `describe`, export with `to_csv` |
|
||||
""")
|
||||
|
||||
result = await validation_tools.validate_data_flow("DataAgent", str(claude_md))
|
||||
|
||||
assert "error" not in result
|
||||
assert result["valid"] is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_validate_data_flow_missing_producer(validation_tools, tmp_path):
|
||||
"""Test data flow with consumer but no producer"""
|
||||
claude_md = tmp_path / "CLAUDE.md"
|
||||
claude_md.write_text("""# CLAUDE.md
|
||||
|
||||
### Four-Agent Model
|
||||
|
||||
| Agent | Personality | Responsibilities |
|
||||
|-------|-------------|------------------|
|
||||
| **BadAgent** | Careless | Just runs `describe`, `head`, `tail` without loading |
|
||||
""")
|
||||
|
||||
result = await validation_tools.validate_data_flow("BadAgent", str(claude_md))
|
||||
|
||||
assert "error" not in result
|
||||
# Should have warning about missing producer
|
||||
warning_issues = [i for i in result["issues"] if i["severity"].value == "warning"]
|
||||
assert len(warning_issues) > 0
|
||||
Reference in New Issue
Block a user